├── .gitattributes ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── docs-redirects.yml │ ├── lint.yml │ └── server-list.yml ├── .gitignore ├── .readthedocs.yaml ├── .salt-lint ├── LICENSE.txt ├── README.rst ├── Vagrantfile ├── conf ├── minion.conf └── vagrant │ └── master.conf ├── dockerfiles ├── Dockerfile.focal ├── Dockerfile.jammy ├── Dockerfile.noble └── etc │ ├── locale.conf │ └── ssl │ └── private │ └── dhparams.pem ├── docs ├── Makefile ├── _static │ └── .empty ├── conf.py ├── guides │ ├── haproxy-registration-guide.md │ ├── images │ │ ├── consul-service.png │ │ └── haproxy-service.png │ ├── index.rst │ ├── migration-recipe.md │ ├── port-guide.md │ └── server.rst ├── index.rst ├── list.rst ├── overview.rst ├── packages.rst ├── requirements.txt ├── salt-server-list.rst ├── security.rst ├── services │ ├── cdn.rst │ ├── discovery.rst │ ├── index.rst │ └── postgresql.rst └── ssl.rst ├── pillar ├── base │ ├── bugs.sls │ ├── codespeed.sls │ ├── fastly-logging.sls │ ├── firewall │ │ ├── bugs.sls │ │ ├── buildbot.sls │ │ ├── codespeed.sls │ │ ├── consul.sls │ │ ├── elasticsearch.sls │ │ ├── fastly-logging.sls │ │ ├── ftp.sls │ │ ├── hg.sls │ │ ├── http.sls │ │ ├── loadbalancer.sls │ │ ├── mail.sls │ │ ├── mailman.sls │ │ ├── planet.sls │ │ ├── postgresql.sls │ │ ├── rs-lb-backend.sls │ │ ├── salt.sls │ │ └── snakebite.sls │ ├── groups │ │ ├── docs.sls │ │ ├── downloads.sls │ │ └── mail.sls │ ├── haproxy.sls │ ├── mail-opt-out.sls │ ├── moin.sls │ ├── planet.sls │ ├── postgresql │ │ └── server.sls │ ├── sudoers │ │ └── init.sls │ ├── tls.sls │ └── users │ │ ├── _admin │ │ ├── benjamin.sls │ │ ├── benjaminws.sls │ │ ├── coderanger.sls │ │ ├── coffee.sls │ │ ├── dstufft.sls │ │ └── ee.sls │ │ ├── adam.sls │ │ ├── ambv.sls │ │ ├── ammar.sls │ │ ├── antoine.sls │ │ ├── barry.sls │ │ ├── brian.sls │ │ ├── diegorusso.sls │ │ ├── encukou.sls │ │ ├── ezio.sls │ │ ├── fijal.sls │ │ ├── gbrandl.sls │ │ ├── haypo.sls │ │ ├── hildeb.sls │ │ ├── hugovk.sls │ │ ├── isidentical.sls │ │ ├── itamaro.sls │ │ ├── jafo.sls │ │ ├── larry.sls │ │ ├── lemburg.sls │ │ ├── loewis.sls │ │ ├── mattip.sls │ │ ├── maxking.sls │ │ ├── mdk.sls │ │ ├── msapiro.sls │ │ ├── nad.sls │ │ ├── offby1.sls │ │ ├── pablogsal.sls │ │ ├── rouilj.sls │ │ ├── schlatterbeck.sls │ │ ├── sdower.sls │ │ ├── sethmlarson.sls │ │ ├── skip.sls │ │ ├── thomas.sls │ │ └── zware.sls ├── dev │ ├── backup │ │ ├── docs.sls │ │ ├── downloads.sls │ │ └── server.sls │ ├── consul.sls │ ├── networking.sls │ ├── pebble.sls │ ├── postgres │ │ ├── clusters.sls │ │ └── databases.sls │ ├── roles.sls │ ├── secrets │ │ ├── bugs.sls │ │ ├── codespeed.sls │ │ ├── docs.sls │ │ ├── fastly.sls │ │ ├── monitoring │ │ │ └── server.sls │ │ ├── postgresql-admin.sls │ │ ├── postgresql-users │ │ │ ├── all.sls │ │ │ ├── codespeed.sls │ │ │ └── replica.sls │ │ └── tls │ │ │ └── certs │ │ │ └── loadbalancer.sls │ └── top.sls └── prod │ ├── backup │ ├── bugs.sls │ ├── buildbot.sls │ ├── docs.sls │ ├── downloads.sls │ ├── gnumailman.sls │ ├── hg.sls │ ├── mail.sls │ ├── moin.sls │ └── server.sls │ ├── consul.sls │ ├── networking.sls │ ├── ocsp.sls │ ├── postgres │ ├── clusters.sls │ └── databases.sls │ ├── roles.sls │ ├── swapfile.sls │ └── top.sls ├── requirements.txt ├── salt ├── _extensions │ ├── modules │ │ └── consul.py │ └── pillar │ │ ├── backup_ssh.py │ │ ├── ca.py │ │ ├── consul.py │ │ ├── dc.py │ │ └── dms.py ├── _grains │ └── detect_virt.py ├── _modules │ └── consul.py ├── _states │ ├── consul.py │ ├── dynect.py │ └── postgres_replica.py ├── backup │ ├── base.sls │ ├── client │ │ ├── README.md │ │ ├── init.sls │ │ └── templates │ │ │ ├── backup.bash.jinja │ │ │ └── cron.jinja │ └── server │ │ ├── README.md │ │ ├── init.sls │ │ └── templates │ │ └── cron.jinja ├── base │ ├── auto-highstate.sls │ ├── config │ │ ├── APT-GPG-KEY-PSF │ │ ├── known_hosts.jinja │ │ ├── letsencrypt-well-known-nginx.conf │ │ ├── publish-files-nginx.conf │ │ ├── salt-logrotate.conf │ │ ├── salt-roles.conf.jinja │ │ ├── salt-server-list.rst.jinja │ │ ├── sources.list.jinja │ │ └── ssmtp.conf.jinja │ ├── harden │ │ ├── config │ │ │ ├── limits.conf │ │ │ ├── login.defs.jinja │ │ │ ├── pam_passwdqc │ │ │ ├── pam_tally2 │ │ │ └── profile.sh │ │ ├── init.sls │ │ ├── limits.sls │ │ ├── login_defs.sls │ │ ├── minimize_access.sls │ │ ├── pam.sls │ │ └── profile.sls │ ├── mail.sls │ ├── motd.sls │ ├── repo.sls │ ├── salt.sls │ ├── sanity.sls │ └── swap.sls ├── bugs │ ├── config │ │ ├── bpo-suggest.service │ │ ├── config.ini.jinja │ │ ├── cpython │ │ │ ├── tracker-extras.conf │ │ │ └── tracker-upstreams.conf │ │ ├── detector-config.ini.jinja │ │ ├── instance-forward.jinja │ │ ├── instance.service.jinja │ │ ├── instance_wsgi.py.jinja │ │ ├── nginx.conf.jinja │ │ ├── postfix │ │ │ ├── main.cf │ │ │ ├── master.cf │ │ │ ├── reject_recipients │ │ │ └── virtual │ │ ├── postgresql.conf │ │ ├── rietveld.service │ │ └── roundup.service │ ├── cpython.sls │ ├── files │ │ └── postgres-backup.bash │ ├── init.sls │ ├── jython.sls │ ├── postgresql.sls │ ├── requirements.txt │ ├── rietveld-requirements.txt │ └── roundup.sls ├── buildbot │ ├── config │ │ └── nginx.conf.jinja │ └── init.sls ├── cdn-logs │ ├── config │ │ ├── fastly.logrotate.conf │ │ └── fastly.rsyslog.conf │ └── init.sls ├── codespeed │ ├── config │ │ ├── codespeed.logrotate │ │ ├── codespeed.service.jinja │ │ ├── django-settings.py.jinja │ │ └── nginx.conf.jinja │ └── init.sls ├── consul │ ├── etc │ │ ├── _address_list.jinja │ │ ├── acl-master.json.jinja │ │ ├── acl.json.jinja │ │ ├── base.json.jinja │ │ ├── consul-template.conf.jinja │ │ ├── consul-template │ │ │ ├── base.json │ │ │ └── template.json.jinja │ │ ├── encrypt.json.jinja │ │ ├── join.json.jinja │ │ ├── server.json.jinja │ │ └── service.jinja │ ├── init.sls │ ├── init │ │ ├── consul-template.conf.jinja │ │ ├── consul-template.service │ │ └── consul.service │ └── jinja.sls ├── datadog │ ├── config │ │ ├── APT-GPG-KEY-DATADOG │ │ └── datadog.yaml.jinja │ ├── files │ │ └── .gitkeep │ └── init.sls ├── dns │ └── init.sls ├── docker │ └── init.sls ├── docs │ ├── config │ │ ├── docsbuild-scripts │ │ ├── docsbuild.logrotate │ │ ├── nginx.docs-backend.conf │ │ └── nginx.docs-redirects.conf │ └── init.sls ├── downloads │ ├── config │ │ └── nginx.downloads-backend.conf │ └── init.sls ├── elasticsearch │ ├── config │ │ ├── elasticsearch.yml.jinja │ │ └── logging.yml │ └── init.sls ├── firewall │ ├── config │ │ ├── ip6tables.jinja │ │ └── iptables.jinja │ └── init.sls ├── groups │ └── init.sls ├── haproxy │ ├── bin │ │ └── haproxy-ocsp │ ├── config │ │ ├── APT-GPG-KEY-HAPROXY │ │ ├── consul-recursors.json │ │ ├── haproxy-ocsp-logrotate.conf │ │ ├── haproxy.cfg.jinja │ │ ├── nginx-redirect.conf.jinja │ │ └── our_domains.jinja │ └── init.sls ├── hg │ ├── config │ │ ├── apache.logrotate │ │ ├── hg-account-admins │ │ ├── hg.apache.conf.jinja │ │ ├── hgmin.service.jinja │ │ ├── legacy.apache.conf.jinja │ │ ├── legacy │ │ │ ├── REDIRECTS │ │ │ │ ├── releases.conf │ │ │ │ └── sigs.conf │ │ │ └── legacy-redirects.conf │ │ ├── ports.apache.conf.jinja │ │ ├── remoteip.apache.conf.jinja │ │ └── svn.apache.conf.jinja │ ├── files │ │ └── hg │ │ │ ├── src │ │ │ ├── app.py │ │ │ ├── hg_commits.json │ │ │ └── requirements.txt │ │ │ └── web │ │ │ ├── 410.html │ │ │ └── robots.txt │ └── init.sls ├── moin │ ├── configs │ │ ├── farmconfig.py │ │ ├── jython.py │ │ ├── logrotate.conf │ │ ├── moin.wsgi │ │ ├── moin_wsgi.py │ │ ├── ports.apache.conf.jinja │ │ ├── psf.py │ │ ├── python.py │ │ ├── shared_intermap.txt │ │ └── wiki.python.org.conf │ ├── init.sls │ └── scripts │ │ ├── moin_maint_cleanpage.sh │ │ ├── moin_maint_cleansessions.sh │ │ ├── moin_maint_cleansessions_all.sh │ │ └── moin_maint_index_rebuild.sh ├── nginx │ ├── config │ │ ├── APT-GPG-KEY-NGINX │ │ ├── fastly_params.jinja │ │ ├── nginx.conf.jinja │ │ └── nginx.logrotate │ └── init.sls ├── nodejs │ ├── APT-GPG-KEY │ └── init.sls ├── pgbouncer │ ├── init.sls │ └── templates │ │ ├── pgbouncer.ini │ │ └── userlist.txt ├── planet │ ├── config │ │ ├── nginx.planet.conf.jinja │ │ └── run-planet.sh.jinja │ └── init.sls ├── postgresql │ ├── admin.sls │ ├── base │ │ ├── APT-GPG-KEY-POSTGRESQL │ │ └── init.sls │ ├── client │ │ ├── init.sls │ │ └── root-certs.crt.jinja │ └── server │ │ ├── configs │ │ ├── gpg.conf.jinja │ │ ├── pg_hba.conf.jinja │ │ ├── pg_ident.conf.jinja │ │ ├── postgresql.conf.jinja │ │ ├── recovery.conf.jinja │ │ └── wal-e.conf.jinja │ │ ├── init.sls │ │ └── wal-e.sls ├── pythontest │ ├── config │ │ ├── inn.conf │ │ ├── nginx.pythontest.conf.jinja │ │ └── vsftpd.conf │ └── init.sls ├── redis │ └── init.sls ├── rsyslog │ └── init.sls ├── ssh │ ├── configs │ │ └── sshd_config.jinja │ └── init.sls ├── sudoers │ ├── config │ │ └── salt.jinja │ └── init.sls ├── tls │ ├── config │ │ ├── lego.conf.jinja │ │ ├── pebble-config.json │ │ └── pebble.service │ ├── init.sls │ ├── lego.sls │ └── pebble.sls ├── top.sls ├── unattended-upgrades │ ├── config │ │ ├── 10periodic │ │ └── 50unattended-upgrades │ └── init.sls └── users │ ├── config │ └── authorized_keys.jinja │ ├── dotfiles │ ├── dstufft.sls │ └── init.sls │ └── init.sls ├── tasks ├── __init__.py ├── salt.py └── utils.py ├── tests └── docs-redirects │ ├── nginx.conf │ ├── specs │ ├── PEP-594.hurl │ ├── default-root.hurl │ ├── devguide.hurl │ ├── ftp-download.hurl │ ├── py2.5.hurl │ ├── py2.7-PEP-3108.hurl │ ├── py2.7-__builtin__.hurl │ ├── py2.7-anydbm.hurl │ ├── py2.7-basehttpserver.hurl │ ├── py2.7-c-api.hurl │ ├── py2.7-cgihttpserver.hurl │ ├── py2.7-cookie.hurl │ ├── py2.7-cookielib.hurl │ ├── py2.7-copy_reg.hurl │ ├── py2.7-docxmlrpcserver.hurl │ ├── py2.7-dumbdbm.hurl │ ├── py2.7-email-examples.hurl │ ├── py2.7-email.util.hurl │ ├── py2.7-gdbm.hurl │ ├── py2.7-howto.hurl │ ├── py2.7-htmlparser.hurl │ ├── py2.7-httplib.hurl │ ├── py2.7-repr.hurl │ ├── py2.7-robotparser.hurl │ ├── py2.7-scrolledtext.hurl │ ├── py2.7-sets.hurl │ ├── py2.7-simplehttpserver.hurl │ ├── py2.7-simplexmlrpcserver.hurl │ ├── py2.7-stringio.hurl │ ├── py2.7-strings.hurl │ ├── py2.7-thread.hurl │ ├── py2.7-tix.hurl │ ├── py2.7-ttk.hurl │ ├── py2.7-urllib2.hurl │ ├── py2.7-urlparse.hurl │ ├── py2.7-userdict.hurl │ ├── py2.7-whichdb.hurl │ ├── py2.7-winreg.hurl │ ├── py2.7-xmlrpclib.hurl │ ├── py3k.hurl │ ├── surrogate-key.hurl │ ├── top-level-directories.hurl │ └── top-level-files.hurl │ └── test.sh └── tox.ini /.gitattributes: -------------------------------------------------------------------------------- 1 | # Add syntax highlighting to NGINX config files 2 | nginx.*.conf linguist-language=Nginx 3 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Notify @EWDurbin for all opened Issues and Pull Requests 2 | * @EWDurbin @JacobCoffee 3 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 5 | ## Description 6 | 7 | - 8 | 9 | 16 | ## Closes 17 | 18 | - 19 | 20 | -------------------------------------------------------------------------------- /.github/workflows/docs-redirects.yml: -------------------------------------------------------------------------------- 1 | name: Test d.p.o redirects 2 | 3 | on: 4 | push: 5 | paths: 6 | - ".github/workflows/docs-redirects.yml" 7 | - "salt/docs/**" 8 | - "tests/docs-redirects/**" 9 | pull_request: 10 | paths: 11 | - ".github/workflows/docs-redirects.yml" 12 | - "salt/docs/**" 13 | - "tests/docs-redirects/**" 14 | workflow_dispatch: 15 | 16 | permissions: 17 | contents: read 18 | 19 | concurrency: 20 | group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} 21 | cancel-in-progress: true 22 | 23 | env: 24 | FORCE_COLOR: "1" 25 | 26 | jobs: 27 | docs-redirects: 28 | name: Hurl 29 | runs-on: ubuntu-latest 30 | 31 | steps: 32 | - uses: actions/checkout@v4 33 | - name: Download Hurl ${{ env.HURL_VERSION }} 34 | run: > 35 | curl --no-progress-meter --location --fail 36 | --proto '=https' --tlsv1.2 37 | --output '/tmp/hurl.deb' 38 | "https://github.com/Orange-OpenSource/hurl/releases/download/${{ env.HURL_VERSION }}/hurl_${{ env.HURL_VERSION }}_amd64.deb" 39 | env: 40 | HURL_VERSION: "5.0.1" 41 | 42 | - name: Install Hurl 43 | run: sudo apt install --yes /tmp/hurl.deb 44 | 45 | - name: Run tests 46 | run: bash tests/docs-redirects/test.sh 47 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | 3 | on: 4 | push: 5 | branches: [main] 6 | pull_request: 7 | branches: [main] 8 | 9 | jobs: 10 | salt-lint: 11 | name: Salt Lint 12 | runs-on: ubuntu-latest 13 | permissions: 14 | contents: read 15 | 16 | steps: 17 | - name: Checkout repository 18 | uses: actions/checkout@v3 19 | 20 | - name: Setup Python 21 | uses: actions/setup-python@v4 22 | with: 23 | python-version: "3.10" 24 | 25 | - name: Install tox and any other packages 26 | run: pip install tox 27 | 28 | - name: Run Linter 29 | run: tox -e lint 30 | -------------------------------------------------------------------------------- /.github/workflows/server-list.yml: -------------------------------------------------------------------------------- 1 | name: Update Server List 2 | 3 | on: 4 | schedule: 5 | - cron: '*/15 * * * *' 6 | workflow_dispatch: 7 | 8 | jobs: 9 | update-server-list: 10 | runs-on: ubuntu-latest 11 | permissions: 12 | contents: write 13 | pull-requests: write 14 | 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v4 18 | 19 | - name: Update salt-server-list.rst 20 | run: | 21 | wget --quiet -O docs/salt-server-list.rst https://salt-public.psf.io/salt-server-list.rst 22 | 23 | - name: Create Pull Request 24 | uses: peter-evans/create-pull-request@v5 25 | with: 26 | commit-message: Update docs/salt-server-list.rst 27 | branch: salt-server-list 28 | delete-branch: true 29 | title: 'Update docs/salt-server-list.rst' 30 | body: | 31 | Update detected from https://salt-public.psf.io/salt-server-list.rst 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /pillar/prod/secrets 2 | /ubuntu-bionic-18.04-cloudimg-console.log 3 | 4 | *.py[cod] 5 | 6 | docs/_build 7 | 8 | .tox 9 | .vagrant 10 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 2 | # Required 3 | version: 2 4 | 5 | build: 6 | os: ubuntu-22.04 7 | tools: 8 | python: "3.11" 9 | 10 | sphinx: 11 | configuration: docs/conf.py 12 | 13 | python: 14 | install: 15 | - requirements: docs/requirements.txt 16 | -------------------------------------------------------------------------------- /.salt-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - 204 4 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2014 Benjamin Peterson, Donald Stufft, and contributors 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of 4 | this software and associated documentation files (the "Software"), to deal in 5 | the Software without restriction, including without limitation the rights to 6 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 7 | the Software, and to permit persons to whom the Software is furnished to do so, 8 | subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 15 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 16 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 17 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 18 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 19 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Python Infrastructure 2 | ===================== 3 | 4 | * `Documentation `_ 5 | * `IRC: #python-infra on Libera `_ 6 | -------------------------------------------------------------------------------- /conf/minion.conf: -------------------------------------------------------------------------------- 1 | master: {{ master }} 2 | -------------------------------------------------------------------------------- /conf/vagrant/master.conf: -------------------------------------------------------------------------------- 1 | open_mode: True 2 | 3 | failhard: True 4 | 5 | extension_modules: srv/salt/_extensions 6 | 7 | user: root 8 | 9 | pillar_roots: 10 | base: 11 | - /srv/pillar/dev 12 | - /srv/pillar/base 13 | 14 | ext_pillar: 15 | - dc: 16 | "*.vagrant.psf.io": vagrant 17 | - ca: 18 | name: PSF_CA 19 | cert_opts: 20 | C: US 21 | ST: NH 22 | L: Wolfeboro 23 | O: Python Software Foundation 24 | OU: Infrastructure Team 25 | emailAddress: infrastructure@python.org 26 | - consul: 27 | key_path: /var/lib/consul/encryption_keys/primary.key 28 | acl_path: /var/lib/consul/acl_tokens/ 29 | - backup_ssh: {} 30 | -------------------------------------------------------------------------------- /dockerfiles/etc/locale.conf: -------------------------------------------------------------------------------- 1 | LANG=en_US.UTF-8 2 | -------------------------------------------------------------------------------- /dockerfiles/etc/ssl/private/dhparams.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN DH PARAMETERS----- 2 | MIICCAKCAgEA2/pV4Uvvm/c4DgpcTDg/jO0ndhXkUxwwfeXMxMd/ilvJeRyvtGtS 3 | HnBmYzuGWtthaGV8t/pvUO3rpoUv3SpGvGe9WhxIFM1/Yqsik9sToP8og7o2t9Qz 4 | xB1RxqDMRlXgx1nywiDeMb4dj/Mlxw7iXNrb/K6keoFdoL/YYUtIxzxyyzwCAB7+ 5 | hJVRuRN37U0Opab4kwTjv0+6b4ykvFU9HbPT37mML9Es1kOkkT7K4VUiVV5MFz+4 6 | VAi/2zc1demBAAbU5ZLG7bteFivW11gJm10jXN5gFnODC2vLbQfc84EFgtEuk9oZ 7 | KiwwFGwKk/9gm1bWICTUMv/0gYYgwf6YB53AZ05CMWbWxvBzMjQmVGoseQtSwkzQ 8 | PUcSsYZHCmwkbXf2XbVoQGmMgSJV7fyasftWU1Dmbj2AmsyeRf8VUlNA6Z9YghMj 9 | TrpP1+2uKPsarCLtau8RYIHlKFENbhY+RPZNf3ADoP0SP28Mcq44Byt05A9WajO5 10 | 9UUUIEXTpppNqE0qF/ZD1xdaOzS/Rz5x/5Np0ZuOKwrR6qytTcOOG6LcTJJ2qp2G 11 | sudIOnKI7IieMmRmpyx3ED9Xx+/awQr4jHEIbGH8UirfrkOORo0R0IvQQQd2jGNK 12 | 5AzrHjBnBWVmOvCOczwFsBWUDF8ul+/CQKt9xxzrYaemdUaFvADEFzsCAQI= 13 | -----END DH PARAMETERS----- 14 | -------------------------------------------------------------------------------- /docs/_static/.empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/docs/_static/.empty -------------------------------------------------------------------------------- /docs/guides/images/consul-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/docs/guides/images/consul-service.png -------------------------------------------------------------------------------- /docs/guides/images/haproxy-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/docs/guides/images/haproxy-service.png -------------------------------------------------------------------------------- /docs/guides/index.rst: -------------------------------------------------------------------------------- 1 | Guides 2 | ====== 3 | 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | server 9 | migration-recipe.md 10 | haproxy-registration-guide.md 11 | port-guide.md 12 | -------------------------------------------------------------------------------- /docs/guides/port-guide.md: -------------------------------------------------------------------------------- 1 | Opening a port 2 | ============== 3 | 4 | 1. Ensure the `salt-master` can be brought up with vagrant locally: 5 | ```console 6 | vagrant up salt-master 7 | ``` 8 | 2. `vagrant ssh` into `salt-master` and confirm the service in question is running and listening on the desired port: 9 | ```console 10 | vagrant ssh salt-master 11 | sudo netstat -tlnp | grep 9001 12 | ``` 13 | Should show something like: 14 | ```console 15 | $ tcp 0 0 0.0.0.0:9001 0.0.0.0:* LISTEN 621968/nginx: maste 16 | ``` 17 | 3. Check the firewall to confirm the desired port is closed 18 | ```console 19 | sudo iptables -L -xvn 20 | ``` 21 | Should show something like: 22 | ```console 23 | $ 86131 5167860 ACCEPT tcp -- * * 192.168.50.0/24 0.0.0.0/0 state NEW tcp dpts:9000 24 | ``` 25 | 4. In the **local repository**, edit the firewall settings by navigating to 26 | `pillar/base/firewall` and editing the [`salt.sls`][firewall-config] 27 | file to include the desired port: 28 | ```console 29 | vim pillar/base/firewall/salt.sls 30 | ``` 31 | 5. On the `salt-master` run `highstate` to validate your changes and check the firewall to verify those changes: 32 | ```console 33 | vagrant ssh salt-master 34 | sudo salt-call state.highstate 35 | ``` 36 | ```console 37 | sudo iptables -L -xvn 38 | ``` 39 | Should show something like: 40 | ``` 41 | 86131 5167860 ACCEPT tcp -- * * 192.168.50.0/24 0.0.0.0/0 state NEW tcp dpts:9000:9001 42 | ``` 43 | 44 | [//]: # (Quicklink targets) 45 | [firewall-config]: https://github.com/python/psf-salt/blob/main/pillar/base/firewall/salt.sls -------------------------------------------------------------------------------- /docs/guides/server.rst: -------------------------------------------------------------------------------- 1 | Server Guide 2 | ============ 3 | 4 | 5 | Bootstrap a Server 6 | ------------------ 7 | 8 | Unless otherwise required all machines operated by the PSF Salt infrastructure 9 | should be running the `latest Ubuntu LTS release `_, currently 24.04, 10 | and they will have their configuration managed by `psf-salt `_. 11 | 12 | Each machine should be given a hostname which matches the pattern 13 | ``serviceN.dc.psf.io`` where ``serviceN`` is replaced by a service name (such as 14 | ``pg``) and a unique number, and ``dc`` is replaced by the `PSF DC identifier 15 | `_ for the DC that this machine is 16 | in. A full example would be ``pg0.iad1.psf.io``. You'll need to add this 17 | hostname to ``pillar/prod/roles.sls`` and ``pillar/dev/roles.sls`` to put the 18 | machine in the correct configuration nodegroup. 19 | 20 | Once you have a machine, you can bootstrap it by simply executing 21 | ``inv salt.bootstrap ``. This will SSH into the machine, 22 | install all the required software, register it with the salt master and run 23 | ``highstate`` on it. Within 15 minutes the salt master will also setup the DNS 24 | for the machine and it will live at the hostname that you have given it at 25 | the ``psf.io`` domain. 26 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Python Infrastructure 2 | ===================== 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :titlesonly: 7 | 8 | overview.rst 9 | packages.rst 10 | security 11 | list.rst 12 | ssl.rst 13 | services/index.rst 14 | guides/index.rst 15 | -------------------------------------------------------------------------------- /docs/list.rst: -------------------------------------------------------------------------------- 1 | Server List 2 | ===================== 3 | 4 | --------------------- 5 | Hosts managed by Salt 6 | --------------------- 7 | 8 | This list is `automatically generated `_ by our salt server. 9 | 10 | .. 11 | START AUTOMATED SECTION **DO NOT DIRECTLY EDIT - Salt will blow away your changes!!!** 12 | 13 | .. include:: salt-server-list.rst 14 | 15 | .. 16 | END AUTOMATED SECTION **DO NOT DIRECTLY EDIT - Salt will blow away your changes!!!** 17 | 18 | ---------------------- 19 | Manually managed hosts 20 | ---------------------- 21 | 22 | +------------------------------+----------------------------------------------+-------------+---------------+-------------------------+ 23 | | Name | Purpose | Contact | Datacenter | Region | 24 | +==============================+==============================================+=============+===============+=========================+ 25 | | speed-python.osuosl.org | Python Speed | noah | OSUOSL | OSUOSL | 26 | +------------------------------+----------------------------------------------+-------------+---------------+-------------------------+ 27 | 28 | -------------------- 29 | Validating Host Keys 30 | -------------------- 31 | 32 | Salt keeps an ssh `known_hosts` compatible list up to date for download at ``_ 33 | -------------------------------------------------------------------------------- /docs/packages.rst: -------------------------------------------------------------------------------- 1 | APT Packages 2 | ============ 3 | 4 | PSF Infrastructure utilizes an apt repository for serving additional debian 5 | packages or versions not available from our upstream distributions. 6 | 7 | This repository is hosted on `packagecloud `_ at 8 | `https://packagecloud.io/psf/infra `_. 9 | 10 | The psf/infra repository has been added to all servers by default and can be 11 | used to ship things which are not available in Ubuntu or a ppa or for which 12 | there are patched versions required. 13 | 14 | 15 | Install from the PSF repository 16 | ------------------------------- 17 | 18 | Generally nothing different needs to happen to install from the PSF repository, 19 | just a simple ``apt-get install `` or adding it to a salt state 20 | should pick up the package automatically. 21 | 22 | 23 | Uploading to the PSF repository 24 | ------------------------------- 25 | 26 | Access to the packagecloud repository is managed by the PSF Infrastructure 27 | Team. Packages can be uploaded by providing them to the team, or if a 28 | contributor consistently contributes to this repository, they may create a user 29 | with packagecloud.io and have their user added to the team. 30 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | furo 2 | sphinx 3 | myst-parser 4 | sphinx-copybutton 5 | -------------------------------------------------------------------------------- /docs/security.rst: -------------------------------------------------------------------------------- 1 | Security 2 | ======== 3 | 4 | TLS Cipher Suites 5 | ----------------- 6 | 7 | Any place where TLS is being used and a cipher string can be specified a cipher 8 | string from the ``tls.ciphers`` pillar should be used. Ideally this will be 9 | used in a way like:: 10 | 11 | {{ pillar["tls"]["ciphers"].get("a unique name", pillar["tls"]["ciphers"]["default"]) }} 12 | 13 | This will ensure that the ciphers can all be configured in one place, that by 14 | default the same cipher strings are used everywhere, but still allow overriding 15 | the cipher strings for each service where it makes sense. 16 | 17 | 18 | Service Authenticity 19 | -------------------- 20 | 21 | In order to validate that a particular server is allowed to function as a 22 | particular service the infrastructure makes use of TLS certificates signed by 23 | a custom certificate authority. 24 | 25 | A new service can be given a certificate by editing the ``pillar/base/tls.sls`` 26 | file and adding a new section under the ``gen_certs`` key. This should look 27 | something like:: 28 | 29 | tls: 30 | gen_certs: 31 | postgresql.psf.io: 32 | days: 7 33 | roles: 34 | - postgresql 35 | 36 | Where ``days`` is how many days a particular certificate should be valid for, 37 | and ``roles`` is a list of roles which need access to this certificate. It is 38 | important that the ``days`` argument be kept short so that a compromised key 39 | is only valid for a small window. The system will ensure that it replaces any 40 | soon to expire certificates with new certificates before they expire. 41 | 42 | This certificate will then be available on the servers at 43 | ``/etc/ssl/private/{{ name }}.pem``. That file contains both the certificate 44 | itself and the private key for the certificate. It can be validated against the 45 | ``/etc/ssl/certs/PSF_CA.pem`` file which is available on all servers as well. 46 | 47 | This requires configuration on the master like:: 48 | 49 | extension_modules: srv/salt/_extensions 50 | 51 | ext_pillar: 52 | - ca: 53 | name: PSF_CA 54 | cert_opts: 55 | C: US 56 | ST: Oregon 57 | L: Beaverton 58 | O: Python Software Foundation 59 | OU: Infrastructure Team 60 | emailAddress: infrastructure@python.org 61 | -------------------------------------------------------------------------------- /docs/services/index.rst: -------------------------------------------------------------------------------- 1 | Services 2 | ======== 3 | 4 | 5 | .. toctree:: 6 | :maxdepth: 2 7 | 8 | discovery 9 | cdn 10 | postgresql 11 | -------------------------------------------------------------------------------- /pillar/base/codespeed.sls: -------------------------------------------------------------------------------- 1 | codespeed-instances: 2 | cpython: 3 | hostname: speed.python.org 4 | db_user: codespeed-cpython 5 | db_name: codespeed-cpython 6 | port: 9000 7 | source: https://github.com/python/codespeed.git 8 | source_ref: speed.python.org 9 | python_version: python3 10 | module: speed_python 11 | wsgi_app: speed_python.wsgi:application 12 | clones: 13 | git: 14 | cpython: 15 | source: https://github.com/python/cpython.git 16 | hg: {} 17 | pypy: 18 | hostname: speed.pypy.org 19 | db_user: codespeed-pypy 20 | db_name: codespeed-pypy 21 | port: 9001 22 | source: https://github.com/python/codespeed.git 23 | source_ref: speed.pypy.org 24 | python_version: python3 25 | module: speed_pypy 26 | wsgi_app: speed_pypy.wsgi:application 27 | clones: 28 | git: 29 | pypy: 30 | source: https://github.com/pypy/pypy 31 | hg: {} 32 | -------------------------------------------------------------------------------- /pillar/base/fastly-logging.sls: -------------------------------------------------------------------------------- 1 | fastly-logging-names: 2 | - docs 3 | - pythonorg 4 | -------------------------------------------------------------------------------- /pillar/base/firewall/bugs.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | frontend-bugs: 5 | port: 9000:9002 6 | source: *psf_internal_network 7 | postscreen: 8 | port: 20025 9 | source: *psf_internal_network 10 | -------------------------------------------------------------------------------- /pillar/base/firewall/buildbot.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | frontend-traffic: 5 | port: 9000 6 | source: *psf_internal_network 7 | buildbot-worker: 8 | port: 9020 9 | -------------------------------------------------------------------------------- /pillar/base/firewall/codespeed.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | frontend-traffic-cpython: 5 | port: 9000 6 | source: *psf_internal_network 7 | frontend-traffic-pypy: 8 | port: 9001 9 | source: *psf_internal_network 10 | -------------------------------------------------------------------------------- /pillar/base/firewall/consul.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | consul-tcp: 3 | port: 8300:8302 4 | protocol: tcp 5 | 6 | consul-udp: 7 | port: 8300:8302 8 | protocol: udp 9 | -------------------------------------------------------------------------------- /pillar/base/firewall/elasticsearch.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | firewall: 3 | elasticsearch_psf_internal: 4 | port: 9200 5 | source: *psf_internal_network 6 | -------------------------------------------------------------------------------- /pillar/base/firewall/fastly-logging.sls: -------------------------------------------------------------------------------- 1 | # See https://developer.fastly.com/reference/api/utils/public-ip-list/ 2 | {% set fastly_ip_ranges = salt['http.query']('https://api.fastly.com/public-ip-list', decode=True) %} 3 | 4 | firewall: 5 | {% for address in fastly_ip_ranges.get('dict', {}).get('addresses', []) %} 6 | fastly_syslog_ipv4_{{ loop.index }}: 7 | source: {{ address }} 8 | port: 514 9 | {% endfor %} 10 | 11 | {% for address in fastly_ip_ranges.get('dict', {}).get('ipv6_addresses', []) %} 12 | fastly_syslog_ipv6_{{ loop.index }}: 13 | source6: {{ address }} 14 | port: 514 15 | {% endfor %} 16 | -------------------------------------------------------------------------------- /pillar/base/firewall/ftp.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | ftp-20: 3 | port: 20 4 | ftp-21: 5 | port: 21 6 | ftp-incoming: 7 | raw: -A INPUT -p tcp --destination-port 10090:10190 -j ACCEPT 8 | -------------------------------------------------------------------------------- /pillar/base/firewall/hg.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | svn-traffic: 5 | port: 9001 6 | source: *psf_internal_network 7 | legacy-traffic: 8 | port: 9002 9 | source: *psf_internal_network 10 | -------------------------------------------------------------------------------- /pillar/base/firewall/http.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | http: 3 | port: 80 4 | https: 5 | port: 443 6 | -------------------------------------------------------------------------------- /pillar/base/firewall/loadbalancer.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | http: 3 | port: 80 4 | 5 | https: 6 | port: 443 7 | 8 | http_0: 9 | port: 20000 10 | 11 | http_2: 12 | port: 20002 13 | 14 | http_3: 15 | port: 20003 16 | 17 | http_4: 18 | port: 20004 19 | 20 | http_5: 21 | port: 20005 22 | 23 | http_6: 24 | port: 20006 25 | 26 | http_map: 27 | port: 20010 28 | 29 | http_proxy: 30 | port: 20001 31 | 32 | http_proxy_map: 33 | port: 20011 34 | 35 | "hg.python.org:ssh": 36 | port: 20100 37 | 38 | "buildbot.python.org:worker": 39 | port: 9020 40 | 41 | "bugs.python.org:smtp": 42 | port: 20025 43 | -------------------------------------------------------------------------------- /pillar/base/firewall/mail.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | mail-http: 3 | port: 80 4 | mail-https: 5 | port: 443 6 | mail-smtp: 7 | port: 25 8 | mail-smtps: 9 | port: 465 10 | mail-ldap: 11 | port: 143 12 | mail-ldaps: 13 | port: 993 14 | mail-nntp: 15 | port: 119 16 | mail-snntp: 17 | port: 563 18 | mail-submission: 19 | port: 587 20 | mail-ntp-udp: 21 | port: 123 22 | protocol: udp 23 | mail-ntp-tcp: 24 | port: 123 25 | protocol: tcp 26 | -------------------------------------------------------------------------------- /pillar/base/firewall/mailman.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | mailman-http: 3 | port: 80 4 | mailman-https: 5 | port: 443 6 | mailman-smtp: 7 | port: 25 8 | mailman-smtps: 9 | port: 465 10 | mailman-ldap: 11 | port: 143 12 | mailman-ldaps: 13 | port: 993 14 | mailman-nntp: 15 | port: 119 16 | mailman-snntp: 17 | port: 563 18 | mailman-submission: 19 | port: 587 20 | -------------------------------------------------------------------------------- /pillar/base/firewall/planet.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | frontend-planet: 5 | port: 9000 6 | source: *psf_internal_network 7 | -------------------------------------------------------------------------------- /pillar/base/firewall/postgresql.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | postgresql: 5 | port: 5432 6 | -------------------------------------------------------------------------------- /pillar/base/firewall/rs-lb-backend.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | frontend-traffic: 5 | port: 9000 6 | source: *psf_internal_network 7 | -------------------------------------------------------------------------------- /pillar/base/firewall/salt.sls: -------------------------------------------------------------------------------- 1 | {% include "networking.sls" %} 2 | 3 | firewall: 4 | 5 | salt_master_letsencrypt_and_publish_files: 6 | port: 9000:9001 7 | source: *psf_internal_network 8 | 9 | salt_master_psf_internal: 10 | port: 4505:4506 11 | source: *psf_internal_network 12 | 13 | {# NOTE: These hosts do not run in the primary DC (NYC1) so firewall holes are poked for access #} 14 | salt_master_pythontest: 15 | port: 4505:4506 16 | source: 68.183.26.59 17 | 18 | salt_master_backup_server: 19 | port: 4505:4506 20 | source: 159.89.159.168 21 | 22 | salt_master_remote_backup: 23 | port: 4505:4506 24 | source: 138.68.57.99 25 | 26 | salt_master_mail1_ams1: 27 | port: 4505:4506 28 | source: 188.166.95.178 29 | 30 | {# TODO: this is used in development environments #} 31 | salt_master_pebble: 32 | port: 14000 33 | source: *psf_internal_network 34 | -------------------------------------------------------------------------------- /pillar/base/firewall/snakebite.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | snakebite-blackhole-tcp: 3 | raw: -A INPUT -p tcp --destination-port 56666 -j DROP 4 | snakebite-blackhole-udp: 5 | raw: -A INPUT -p udp --destination-port 56666 -j DROP 6 | snakebite-whitehole-tcp: 7 | raw: -A INPUT -p tcp --destination-port 56667 -j REJECT 8 | snakebite-whitehole-udp: 9 | raw: -A INPUT -p udp --destination-port 56667 -j REJECT 10 | -------------------------------------------------------------------------------- /pillar/base/groups/docs.sls: -------------------------------------------------------------------------------- 1 | groups: 2 | - docs 3 | - docsbuild 4 | -------------------------------------------------------------------------------- /pillar/base/groups/downloads.sls: -------------------------------------------------------------------------------- 1 | groups: 2 | - downloads 3 | -------------------------------------------------------------------------------- /pillar/base/groups/mail.sls: -------------------------------------------------------------------------------- 1 | groups: 2 | - mailman 3 | -------------------------------------------------------------------------------- /pillar/base/mail-opt-out.sls: -------------------------------------------------------------------------------- 1 | system-mail: False 2 | -------------------------------------------------------------------------------- /pillar/base/moin.sls: -------------------------------------------------------------------------------- 1 | moin: 2 | wikis: 3 | python: 4 | name: Python Wiki 5 | regex: '^https?://[^/]+(:\d+)?/moin.*$' 6 | logo: 'Python' 7 | theme: europython 8 | psf: 9 | name: Private PSF Wiki 10 | regex: '^https?://[^/]+(:\d+)?/psf.*$' 11 | logo: Private PSF Wiki 12 | acls: 13 | before: "AdminGroup:read,write,delete,revert,admin" 14 | default: "" 15 | after: "MembersGroup:read,write,delete,revert All:" 16 | linkspam: False 17 | jython: 18 | name: Jython Wiki 19 | regex: '^https?://[^/]+(:\d+)?/jython.*$' 20 | logo: 'JythonWiki ' 21 | interwiki: JythonWiki 22 | -------------------------------------------------------------------------------- /pillar/base/planet.sls: -------------------------------------------------------------------------------- 1 | planet: 2 | subject_alternative_names: 3 | - planetpython.org 4 | - www.planetpython.org 5 | sites: 6 | planetpython: 7 | domain: planetpython.org 8 | cache: /srv/cache/ 9 | output: /srv/planetpython.org/ 10 | image: ghcr.io/python/planetpython:latest 11 | config: config.ini 12 | planetpython-3: 13 | domain: 3.planetpython.org 14 | cache: /srv/cache3/ 15 | output: /srv/planetpython.org/3/ 16 | image: ghcr.io/python/planetpython-3:latest 17 | config: config.ini 18 | -------------------------------------------------------------------------------- /pillar/base/postgresql/server.sls: -------------------------------------------------------------------------------- 1 | postgresql: 2 | data_dir: /srv/postgresql/11/psf 3 | config_dir: /etc/postgresql/11/psf 4 | config_file: /etc/postgresql/11/psf/postgresql.conf 5 | hba_file: /etc/postgresql/11/psf/pg_hba.conf 6 | ident_file: /etc/postgresql/11/psf/pg_ident.conf 7 | pid_file: /var/run/postgresql/11-psf.pid 8 | recovery_file: /srv/postgresql/11/psf/recovery.conf 9 | 10 | port: 5432 11 | max_connections: 100 12 | replicas: 1 13 | -------------------------------------------------------------------------------- /pillar/base/sudoers/init.sls: -------------------------------------------------------------------------------- 1 | sudoer_groups: 2 | psf-admin: 3 | commands: 4 | - "ALL=(ALL) NOPASSWD: ALL" 5 | -------------------------------------------------------------------------------- /pillar/base/tls.sls: -------------------------------------------------------------------------------- 1 | # This a mapping of role names to TLS certificates that are required for that 2 | # particular role. 3 | 4 | tls: 5 | ciphers: 6 | default: ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES256:ECDH+AES128:!aNULL:!SHA1:!AESCCM 7 | 8 | gen_certs: 9 | buildbot-master.psf.io: 10 | roles: 11 | - buildbot 12 | 13 | codespeed.psf.io: 14 | roles: 15 | - codespeed 16 | 17 | consul.psf.io: 18 | roles: 19 | - consul 20 | 21 | docs.psf.io: 22 | roles: 23 | - docs 24 | 25 | downloads.psf.io: 26 | roles: 27 | - downloads 28 | 29 | hg.psf.io: 30 | roles: 31 | - hg 32 | 33 | lb.psf.io: 34 | roles: 35 | - loadbalancer 36 | 37 | moin.psf.io: 38 | roles: 39 | - moin 40 | 41 | planet.psf.io: 42 | roles: 43 | - planet 44 | 45 | bugs.psf.io: 46 | roles: 47 | - bugs 48 | 49 | postgresql.psf.io: 50 | roles: 51 | - postgresql 52 | 53 | salt.psf.io: 54 | roles: 55 | - salt-master 56 | 57 | salt-master.vagrant.psf.io: 58 | roles: 59 | - salt-master-vagrant 60 | 61 | svn.psf.io: 62 | roles: 63 | - hg 64 | -------------------------------------------------------------------------------- /pillar/base/users/_admin/benjamin.sls: -------------------------------------------------------------------------------- 1 | users: 2 | benjamin: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | downloads: 9 | groups: 10 | - downloads 11 | admin: true 12 | fullname: Benjamin Peterson 13 | ssh_keys: 14 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIC5RZqb06JuJrCClQkFvR/6rSvtFs9MSh7qQxe6gzVpz 15 | Benjamin Peterson 16 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDSSjSCl8VkJWWNLCBizhHOnC8jEjVbt7EDrOzxGJurM 17 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOZr7AjOkEWiJnP2hzmt5tZS53ST3O2WyRyiSXFT0Rde 18 | -------------------------------------------------------------------------------- /pillar/base/users/_admin/benjaminws.sls: -------------------------------------------------------------------------------- 1 | users: 2 | benjaminws: 3 | admin: true 4 | fullname: Benjamin W. Smith 5 | ssh_keys: 6 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC1BJUpRtzq1fCntjuNm4YeIDcefBFbkjzFCvN7Zot4UVWpExWqOLJynRYmaAUAFnJNQd5QuXsBIEmC9ySPV0gs+ueX9yg+RLieXcPoym2fMQ7UgmkaJloYgLnWJM3apG0UnGEDRO6Bz4cm+PC5NPfuZlOdYeOmNVKZoOe3via2RABec+hsWRdr2mD7OVL4PUR3AL3IPa9r8WlLhIBG53MkiVU2su8RVnEEyHmc61YQL8sFnI2zt6aSNiFuHvo6sHL3cMsP9XNArOtONZCc3NPvzN9Lh9jCk+JEe47ox/17CxMCOVhn3B9nRh2oGXydYf6LWH2wkhQ5y07dIjULKi9T 7 | benjaminws@macbuntu 8 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDcY/+b32IynLZhF/fjBfGjUjGlS1XCaMYKNxPZNekBv0hWteBh185k3A1yAZWRWAgLsvpHpe5Srs3Wxoz+NF51UWHMYVtpPzXEmpcjsqOe96rKrixFSlrYt89iHklW4FdAV3oJbbQpvXb8c6eFD6dantzmHj8FFRg+f5Bb+lsGhLIzxDcjcKJbySGLHHS+SgQvaXMFd1XE+Gs/SXgQxpbWV347BdOETJplA96jVB74bxoIP+GuCImO34VCu4eG+klnhMeY2MscYgmBa3ePjD86qef0StBu9zzruR5s+y4cYQK8h5Xm2+sC6RdZbZaSeQL+yfYXhPhfvEv4v5WT/QDb 9 | bsmith@bsmith-laptop 10 | -------------------------------------------------------------------------------- /pillar/base/users/_admin/coderanger.sls: -------------------------------------------------------------------------------- 1 | users: 2 | coderanger: 3 | admin: true 4 | fullname: Noah Kantrowitz 5 | ssh_keys: 6 | - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEAvV0vQo3OpzqDFDBHW5o5abdbNJNNg9YkiawTpSnOusB+E9Hp2Pae1jT3r+7ZUqIIutXuNOPOefIy6oR0YuZhL6d9uhRIl9LMTB0XXzj5aW5ZbbGO6nsaERQUU3ALYzrwxgX8kLvCcAQmrVhhRH88Zqo4lkRkxXpN1LdPANh9qa0= 7 | coderanger@alfred 8 | -------------------------------------------------------------------------------- /pillar/base/users/_admin/coffee.sls: -------------------------------------------------------------------------------- 1 | users: 2 | coffee: 3 | admin: true 4 | fullname: Jacob Coffee 5 | ssh_keys: 6 | - ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAddLP9ByPv5ZzebYW5D0zVwmugZOlgkvTPOrj2FqpvPtA0lroLur8w606JV0DYQiAkud+/Q7+7fM8StnNld7oA= 7 | Main@secretive.EcheXMBP.local 8 | -------------------------------------------------------------------------------- /pillar/base/users/_admin/dstufft.sls: -------------------------------------------------------------------------------- 1 | users: 2 | dstufft: 3 | admin: true 4 | dotfiles: true 5 | fullname: Donald Stufft 6 | packages: 7 | - zsh 8 | - git 9 | shell: /bin/zsh 10 | ssh_keys: 11 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG7R8y2Ecocj+t5rP7YNmNWM+QmmZ8Wuz0f1btJ3KcA0 12 | donald@stufft.io 13 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5md5DAPib7J+uGHanzgQOJ9GwGYAi7RLbG+rB/0NRk8UbUnwzn0JwkqNTXzeomUapO5Z3cOxQ98jzb0k03hGOzhzjIQpaKI1uKxPPquDevf/PwM5ZQaJzlx/8ah76GzJtEQpIIIbw/fofzywv9pZKTBCDL3wBHB94oByQjr0BG9CfjbMZq6FXcBKfo89L56nLQ8cdvxg2tjNJQElva5gL/xnqjpowtQYjA9MPKFmDwJPcrRF2AstBg5Zpkg+8K4JhJltucXTPEva97alK9prshGFY6XLtVD0mtgbwpHFXjFm7cIQYr8XG3pJdtWki0fLg0o3W1YBukQ+reDblT8SnFaDscgF1gStTra5zXfVF5OJaaRFE8zaYuwC01DQT9sN9G4fV4eK8HRbgpObCJcxnCyTs/SYGVhO1PpOiQYmyswGUlV4vU8G3gl3u0D+gkcpHRkdko0HlFbNUt1wKIZWcGLJcKkNWMKGSf1TaciU+6+2A4QVDxtdab8HdjnbuugX9/FckqjZypaUOwl8U4fYc4JbdUQ78PvcQrSQvhRPZB+1KSvm8rwRuBnFWiNlYmThLhGmKDBXegNF8eFRe4dApzv2DyshasHs4tNv46YIox6FdFEw0voRNPqhbTCF3XIdOQvxkHeZRyGGWv4WCTImM5+3GhXrbOQUtB9NjfM7rhQ== 14 | donald@stufft.io 15 | -------------------------------------------------------------------------------- /pillar/base/users/_admin/ee.sls: -------------------------------------------------------------------------------- 1 | users: 2 | ee: 3 | admin: true 4 | fullname: Ee Durbin 5 | ssh_keys: 6 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDvmqF2g0usMZaIc2AZN52BHIc3DhAeKhbUs9RBiwWtro9mplv+lTdwb82qzm5/pH47vsXh7ukFgBimpPCjOjFYknI8L9a5JUGa3fMJwRA1ovF0nVWI0EIlpiKt0n5jFxtnbaDy2wHITFmNBre4yOlLY4tsdGjRbPiWNjIQtFcj3bSel9PRliDHfTSi6DTF/GsU3CQmrUMANjvMlZvLPOcIeqLlg32333hl/7V2GKLndxcLb8YwGGc7oS+GkbB351n/Je06Dq8XzWGpqdpadA3i8uhYtPX9cUnIpj5hrJ8/A/fYyuE2IEVWBu8TGIvDIDDTI6T7KjaP/EoR7vlfVDcx6Hbog5gcnrFyJxDCsWB0rUsxw14dGviNu8c0eyecx1qe+M9UtyqNBlM93Eheqbf+IXuH/PcNSfO40uG5VG1TtCi8avKb/XmeeR0o6egCXsg8nOwQurCV6ECyIjauvJiY+IAiINOjWgs3F4p0HlRg25yvFsf6ZSTmoRx74RKDYB8= 7 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDZOuPNmBeKubAvLrnZlVGzp49Duw/qn8cq4IsOmxKsiVtiTDdLLKr6YBA67CPu/QrVEZYMU/N7RpNOKRfqueYdw8aNB+KgGOy8B/OIiX2obi854q0B81NaYTxdjUmuo67q9gNcdrEv6GmJJzBFqx//d4Nl4F4pcQLFTmxfZg3MDB1zyo2qP8ZpW5jttNty7LpvFlReSHEB/87o7wlNrhnrwSg68NykC2x/DzPv5ZOY5ccW4YiatjS5R2P7MDWv+aabbzZD4G0r3ElaXyW4fRV4KWuyh0ow/5djG0ZIWQSSuqqsnxcmRiV67IV28X6c2uw2zoFO5LLYly8UKMC6OLuFJlrmUPuzXvEeB/AcudgPPFxaip0aSzhowih7Ij0i+rVgma535q6pPqSkPCAcOLpmSi0yk23V9Mr38zg2d33poqV+bFbtSOE3a2gA52rDj4+YMcHQZqxZrgUv0KrSuKwcEm5WyBSOYI1VUoGCxiF27HY+9NaTqfMSHF0AJIVGHsv31uAWa4wqynQVrbZfH4wxlC6yC447HKjizp2LIoZx16LqSHMFfCGQBI4trZPGwZb+OBq2rhFY0GxgQaowrIK0E+VgTiDjcq6VtraglEHJsysK7jj4NiEWiIFVEv0WNW5e2DKVQKZ9BrN7sILcVp+g9VUSRirAZTXzheHLb6+8Hw== 8 | ewd3yubikey2018 9 | - ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBnWofdFIY68N19Q04fiWKPYx+dcpc7nmU10IyOyon2KJa5blCDoxFOemOeWl8doAfFO2qhprQ2YhNABes3pZOc= 10 | ee@chungus 11 | - ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBt9u0P2LhONN5ngUmtU8GupeRXp+BKXnq9Pmx6r6vE8iJ1MQCeRroiNpJ5C8Lr+2yCgsRK5pLndKOowIz5hFPY= 12 | ee@pancack 13 | - ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBSz/Yna8yFAglO9iY/lEdBHDNY91UTfbfGLYcYAZKtYnAnTMyOSQBhubUYr7z5zZv8NBpLiU+10WR8hgDtSsr0= 14 | ee@psf-air 15 | - ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBoop9FbexCHgRpGkLCIbvzoyuBHYuB2vfRmRFvdBot18yYcXBLA/Apl4rO9DhABu6qPp8n784AuKLwJQbnHdeg= 16 | ee@slimjim 17 | -------------------------------------------------------------------------------- /pillar/base/users/adam.sls: -------------------------------------------------------------------------------- 1 | users: 2 | adam: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | - docsbuild 9 | sudo: true 10 | moin: 11 | allowed: true 12 | groups: 13 | - moin 14 | sudo: true 15 | fullname: Adam Turner 16 | ssh_keys: 17 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEIGz53UVWECxd1h48KLeMtCIVohay+OA639kMNntQCw 18 | -------------------------------------------------------------------------------- /pillar/base/users/ambv.sls: -------------------------------------------------------------------------------- 1 | users: 2 | ambv: 3 | access: 4 | bugs: 5 | allowed: true 6 | groups: 7 | - roundup 8 | sudo: true 9 | buildbot: 10 | allowed: true 11 | sudo: true 12 | codespeed: 13 | allowed: true 14 | sudo: true 15 | docs: 16 | allowed: true 17 | groups: 18 | - docs 19 | - docsbuild 20 | sudo: true 21 | downloads: 22 | allowed: true 23 | groups: 24 | - downloads 25 | sudo: true 26 | fullname: "\u0141ukasz Langa" 27 | ssh_keys: 28 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMGC2VGofArUyzGR25TbLFtk16bYT+6TuCHui7qgSxvp 29 | lukasz.langa 30 | -------------------------------------------------------------------------------- /pillar/base/users/ammar.sls: -------------------------------------------------------------------------------- 1 | users: 2 | ammar: 3 | access: 4 | bugs: 5 | allowed: true 6 | groups: 7 | - roundup 8 | sudo: true 9 | fullname: Ammar Askar 10 | ssh_keys: 11 | - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3puTkwAfNz3oHVQ6nH/7y4iU3rYkyBLs44tYmA99068n8AyMCWNwRIFxVmsYA3NsFD4MmyKL7kH67/3gX3/N5FNyhj6WN72/CeJUyl/JhlruvAdEWSuJQyV1I+cP9yM4KjJ+pGdPBHX3uc1AFXPZ5H3I/WmmZemkO7fis8jIuRTdKsC4PPIaqWcOtyt5wt2HpNoCr/Ji8Sl8yxWwB/HuRzsoEoqluCe2tp5e4Cg5SGFEgCUjzlCe27ixYnKWAw2zrBvx3Sn49MVT3xyQYqaimu437NwQkJBps0tRG7+rlZQ8w7dxlB67WUCCnNtcPPewE17PLvWIhFDWpm7Yiue3Sw== 12 | ammar_laptop 13 | -------------------------------------------------------------------------------- /pillar/base/users/antoine.sls: -------------------------------------------------------------------------------- 1 | users: 2 | antoine: 3 | access: 4 | hg: 5 | allowed: true 6 | sudo: true 7 | fullname: Antoine Pitrou 8 | ssh_keys: 9 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIECocvLw5VQdrDP6WdzVtitcB3qP5vkwVAaUvJWYwRp/ 10 | antoine@fsol 11 | -------------------------------------------------------------------------------- /pillar/base/users/barry.sls: -------------------------------------------------------------------------------- 1 | users: 2 | barry: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | downloads: 9 | allowed: true 10 | groups: 11 | - downloads 12 | gnumailman: 13 | allowed: true 14 | sudo: true 15 | mail: 16 | allowed: true 17 | sudo: true 18 | fullname: Barry Warsaw 19 | ssh_keys: 20 | - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3EoQJ+jfbvoG0zBNueWNwq5ElwB4PvXK/MKuEoX1Nr73CP6gwWOBpAmlv13z4BovQXAnOIbIyJIMc6tnaTEn7xX2nc+YsQ7Z6/CFJ2rERUtg85RDH1pRcrPYueEABNI93HRpWn3XHoQYQ3a3oY9mRtVyIx65Ec6Yxqv6H3QlJZKlX9e76V3KUtrV+LT5vODtId/we9K9CvYZwLN5KP3eU01MGaMaF4J+TkVcPa0QfWoPLDWeXOLu68SSB6Gkk2G15id5Z8JKZ71hNCTOiDQTYg+yCyAbSMyvTJueUV2W0ObRh4ziVPzDsf7pv/27jSJXAyO8l3GZDWDFFzekUAaKvQ== 21 | mobile@Digital-Man 22 | - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAgEA5uI0CZ7n7ztfSMqYzjj3UK+4zpcMHoshpMRK/fAw65+gntPSjenZnSQBTEt/Cpo951cFZaTcGWlRxzFyOvbUuEwnfafQlAlQDyWa3bcZ2bXTxmA+rdjpY9TnPwXL5oU/14s2zpxg4mFRyTtl+GquPRGTCY5UAorRqnffV5ht9USdAJ3Ur/Rm5tQ8eL3fXLO56knaHguGhFZypkBGLYlTeLpl9VzSHwhRIng+sHimLkQRrMy/psmsB8z45+BPwbUX+Y7syfbN+EjBZrC7EWTlUn9cdAr4s/WJhFOpyR5FR6M4K+lC7KeypnSnTuTmBFEoglYtl/SbI7GIFaqGld/GAHohSPA7A6Xt6kR8kiqBU4VFsXtwiC5ZJ172CoDyDyF12JDZuKYlTn3Sja+CwBnRMu7RJ22fiB//ORkHIRFd9cgdbEDQFraGGXXvj18lHzGhTPZazmWMSxyUrYZ/2oK9RRXx726g5a7rRZ2wmLpq6grVxFmN+KA8UF+d4a5OLr9lU9xykEPCmV8X5oV9pJPbVeoGIYLYfT26Wra8Nu3PwDwaV3P0kiXBnsfxZaynPA19M+cLL+s68pc+aV7ZVh7ghYC8yVhAWjpycN4xF39g3CCFULElXIG+qD1yxq0ZvdArRyl3diGdQamYhDJ67+efpZEB+wm8TeignviwUr1EVJM= 23 | Keys for VMs 24 | -------------------------------------------------------------------------------- /pillar/base/users/brian.sls: -------------------------------------------------------------------------------- 1 | users: 2 | brian: 3 | access: 4 | cdn-logs: 5 | allowed: true 6 | fullname: Brian Curtin 7 | ssh_keys: 8 | - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAw12z1o+qAtnf8lWK+pQi1spjMZPWbdPRBHaNu2l2O5ZuOkEGr8Qlbcr3M+1CuXUOQ4pQg9uRegUhjVXG9pEmWvEtOHVnpDJamtyO6dTgesLazclQRuarK10QZKj++H+QbNUWqgj0EiQk3cybmDKeMva43pmkjpbgxBjpUMVmvOLqwsWqPdK8gCOvClwN7wT60xpcoh09oigGARsyIMHKB3TBjDnV6hivxL/bQE1q2h8iS3Ch1b9PCfEHRC1sLZxn3DzE/WX4Y80JJhRBGVFOkdzHZf43dtCS3gwZyDRiGz0NGRYhrkxnP1HQNNmvuUqQSSy4t+k5tDiBMIruX6kTcw== 9 | -------------------------------------------------------------------------------- /pillar/base/users/diegorusso.sls: -------------------------------------------------------------------------------- 1 | users: 2 | diegorusso: 3 | access: 4 | buildbot: 5 | allowed: true 6 | sudo: true 7 | codespeed: 8 | allowed: true 9 | sudo: true 10 | fullname: Diego Russo 11 | ssh_keys: 12 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMyfCtl3i1iUFJO30AVC6hqOqbj2jIauK9HIcw2vAiQ9 13 | diego.russo@arm.com 14 | -------------------------------------------------------------------------------- /pillar/base/users/encukou.sls: -------------------------------------------------------------------------------- 1 | users: 2 | encukou: 3 | access: 4 | buildbot: 5 | allowed: true 6 | sudo: true 7 | fullname: Petr Viktorin 8 | ssh_keys: 9 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCuL7X5HFPwaZFEJxbNhjlD7/nl8ov3R0D0PE0e0YXckgWS3LyKKwHvhB4WbHiGh4qwpj/yp8+V27nRzy+I44MoZEtgUqNC4DGZxUTrU5CQuIcHVbqVz3k+XokGzEy09JqEvWS5JISg+C08n5xuHFzZG3PFZR3jgydxOdlxlPkMipXjfr12ULXGPOIG5JsIfLEAKiS0IH/NHcydCT24q7X+LRCBlfxbQmAtrq6OySqEjyYwxVdEoMJG7yiGgc/t2bcDODMaTKSFRBNL0B7aqrejxf3mtTLleX31ZaW0Uo9bCn87XJxbqC6AUwiDuzzYI/3Jz1zzpwqTlDJLS09JV8BfGyovTt22dKYTriRsTjX8pWl4uZInAiHPZOT2ZhKZhmCl/yd3zerBFW6tCKiw2n6wG7gWDSgVTnMdbtSdnhK9DIUsYd0ZWVcKvIBdcDbxGLtbAPV4Sn1CkuLmM6wD0dqok7qDAJtsnYXjE3GOZOnfnEgrW81S4pHg2fGQ/vYasIk= 10 | encukou@blackbox 11 | -------------------------------------------------------------------------------- /pillar/base/users/ezio.sls: -------------------------------------------------------------------------------- 1 | users: 2 | ezio: 3 | access: 4 | bugs: 5 | allowed: true 6 | groups: 7 | - roundup 8 | sudo: true 9 | fullname: Ezio Melotti 10 | ssh_keys: 11 | - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAm1FUCa4Q+cKyMIg4QX4dszX+Wnj216HHCkHMgf2L0chYZVPvbWzboJqs+peInkWnXwYBnmJFdiHSxkwY4fnv1P3o+fb5wcl3fv0/kqIwcC+iavKlDOeBI51FP2s++QpvD5cU6rW202b8k2K5COJ5mlkIvgfhiyRYnR4LbHePLG+jd/4I6gCtUcGTQqwNaJOzRzvaviRd395S7+I8L35iWt1zTNck9II1gCvfcCLeM7XYtogFBq1LW5BZ9f+hZtJC0RcIpihHeoSNW7bmqWH5d0noQrBaKQfz5xm2BlQPM82/QWGUSZjPphynrCV8rYtitM3nkBl5zIxHsfoQbsp5ZQ== 12 | ezio-melotti 13 | -------------------------------------------------------------------------------- /pillar/base/users/fijal.sls: -------------------------------------------------------------------------------- 1 | users: 2 | fijal: 3 | access: 4 | codespeed: 5 | allowed: true 6 | sudo: true 7 | fullname: Maciej Fijalkowski 8 | ssh_keys: 9 | - ssh-dss AAAAB3NzaC1kc3MAAACBAJbByTtD76TRTTxJBFRkfMgHKjhQky/9wq0GZoYvUCWpzL/59H1y7V8SWRmEmHRQBuO+Z5/UBz3poUwHyftYSMA1zUiD5ZlWDoFNeiCb/lmO779IBVb98Pg5zZiN4be1niZozmmoL4IdpwHfIZZMA2Ycdlqww8zE90PEgyPZezPbAAAAFQDdlsczacwXg2HidIa7Tu5nVIzxhQAAAIAdoFDvTP3xfJvv9BbRCG65mn80lRwUjTuXGuPMdR4e/N4zEb96OnZuqJEtzfFF6UWDqNHXcN6k07fo0bqLFg706io+5kTQamnUG7+QQRT4df1Dg6qmMpzh3LHyAZe81DZmdSAJMSbwTqT46G3r2VmOXwo/GJYMf7O/m61bPrXcqAAAAIABmglK3TZCOOzFv6P9H/efeb7EmgVy/NMyjvCYHl6Eb6aB8oOn7YQA6cfh1DE++ax746d5fke5LiCcWtjh4TNf6mDaFORXEtqUK7e6A8r2W/fJ1YgEAdY9Qy81ilJEhVTmYDvAIA4fvpmBdXti2Xq28sAEFJU8DPrFBjGx3VDkTg== 10 | fijal@zygmunt 11 | -------------------------------------------------------------------------------- /pillar/base/users/gbrandl.sls: -------------------------------------------------------------------------------- 1 | users: 2 | gbrandl: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | sudo: true 9 | downloads: 10 | allowed: true 11 | groups: 12 | - downloads 13 | sudo: true 14 | hg: 15 | allowed: true 16 | sudo: true 17 | fullname: Georg Brandl 18 | ssh_keys: 19 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmbsFom9RaggUlB3k8JQCEFDS0IfOVEUSc4Rp4hn2IZhmmpVM1kz+PG93LosAu5rqYi/tTtjTfqjT1EAiGDe+ynqEUznA8tuH8i9yUb/nuWn2hVa60od3rCbWzOh98fiREihd/ITM38GpHgUt1tAXC8d3U4Q8YwPbzc2qSDvB8aq2sdWhSToPf5JEYDgjWvCHmRB+GFmWuusb7CekQ7o/3WY8KS86A+iXfdPyXa8bd2LnUxBVEkIggXNgP0+1Dk6oyWQTuyYKt9m498Zzz3cqliY5DdGxxDVhPoFCtFw1hTyb/4bRPOsGJGgJjIFaQkoRuxilhUn1VT1OajmuWjJAN 20 | gbr@georg 21 | -------------------------------------------------------------------------------- /pillar/base/users/haypo.sls: -------------------------------------------------------------------------------- 1 | users: 2 | haypo: 3 | access: 4 | buildbot: 5 | allowed: true 6 | sudo: true 7 | codespeed: 8 | allowed: true 9 | sudo: true 10 | fullname: Victor Stinner 11 | ssh_keys: 12 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIEIVzct/vFIVEAQ5aG8WJlJJWPNZYbVDyI5jwljst8mk 13 | haypo2017 14 | -------------------------------------------------------------------------------- /pillar/base/users/hildeb.sls: -------------------------------------------------------------------------------- 1 | users: 2 | hildeb: 3 | access: 4 | mail: 5 | allowed: true 6 | sudo: true 7 | fullname: Ralf Hildebrandt 8 | ssh_keys: 9 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDTLpYGZdP2XPthO2eI2sp1mN8C5gO6jGu3wO2j0uD5xNLid2jyadBDL2JpAgxwYv6uREopdST17qVc9hMmPXHEJN4tvLyHjUcjsL80QkVneyuFRT+oBqahX1+h8aVkk8vLuwKsVO5ARqcvYF846YQnRcdJRkPsdAZ4GD5jXGE6EN/yJLxEiVvCMbi7DvegxS6sUxl6E7380bdas3l46R+kotQXlVIw8NnakHFpdYNFlPF7pW1Kd2pCIQGKkOQaSLvuVKdOpKTyG4337R0c65/PMmDk5Uuqt78RbcSV/rTbuaxNEPLzrRX7DvZ2baZlf2vJMdsl20hFiNa1J4xIrG6T 10 | hildeb@senftopf 11 | -------------------------------------------------------------------------------- /pillar/base/users/hugovk.sls: -------------------------------------------------------------------------------- 1 | users: 2 | hugovk: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | - docsbuild 9 | sudo: true 10 | downloads: 11 | allowed: true 12 | groups: 13 | - downloads 14 | sudo: true 15 | fullname: Hugo van Kemenade 16 | ssh_keys: 17 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGTTCyZejfWSW277j177F09VOt8dctuFRpDnsLe+7Ifw 18 | hugovk@users.noreply.github.com 19 | -------------------------------------------------------------------------------- /pillar/base/users/isidentical.sls: -------------------------------------------------------------------------------- 1 | users: 2 | isidentical: 3 | accesss: 4 | codespeed: 5 | allowed: true 6 | sudo: true 7 | fullname: Batuhan Taskaya 8 | ssh_keys: 9 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDI+7nVxFPxT0A5mrLq59YFziAYv2KP9nsjQiKP4139FCYur3CbBkgBD2MSPk5kNy39X+b3xZrZFV72rspY968iFyLXBTfAEoLNAyKC8OHJ9irV2ToWLuWOoek30HrbSYGnuzRFfHNW/8wNuiB2GGZ3hwCAcWaLoGjQuaW47AulkRHWpDzQpJO6zXTo2r5OelLGu2z2zFdtOLvIEnG8FiSKLgMt1UJk7Y1JXQF0+cOOeS/NZHu5efa1Lxpch2qumKJhD4gh1sv9+K+VX70TtZ14uuqo8454b7n3kKeC/RQu6hdXUUcjCywLniNurgTT84OcJluQNHHb0sapOz76oOqr4rF3osFcLav3BbrvdKNH+2TU9feC1BTGwoeQih1jYH3cLJDAvbs+EVkTEjhgfsw88REYJ8srvzLOmeGHNgwscYe2r+q2WB0d4C/Vsud8wea5uSKFn37RkNFcb5zzkEYuyC3rdfgVvFWKVwcKsXtWTDV22M85GRo2F9chXfc4QU0= 10 | -------------------------------------------------------------------------------- /pillar/base/users/itamaro.sls: -------------------------------------------------------------------------------- 1 | users: 2 | itamaro: 3 | access: 4 | buildbot: 5 | allowed: true 6 | sudo: true 7 | fullname: Itamar Oren 8 | ssh_keys: 9 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAK7GQ50VMf3Kekgi6n+JhqPvq+k1aH5Klc1o+kwWdc0 10 | itamarost@gmail.com 11 | -------------------------------------------------------------------------------- /pillar/base/users/jafo.sls: -------------------------------------------------------------------------------- 1 | users: 2 | jafo: 3 | access: 4 | mail: 5 | allowed: true 6 | fullname: Sean Reifschneider 7 | ssh_keys: 8 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAEAQDdVGymQaLMlzaHvbEnxR7TvFiad4xrlJTCJvgygNrSbJCm3wTGSW6Yj5NTHtzWCPTYAmgZYLFZ7pJmPefHV6DLdAFJSA4HwhPOFM8vM8ERo0lSgR6ArlG288Gi9dvjh8O/jZwq0uW4lgiW8KFMhnr/HJy8rTZwqIPz680TakUxE6G48zQW+NUvL7Jn5cXLuYARvGHAv6ajkhhuEqCw1kjCnkgCh/Cl0YvIeTBfdGBGC2SiD2BOPzMYyTSzqm9tCRuscpo+JfKQZa288/9XhG0GOEsxg9h5BDJ7CKj7XxlmOcVKPgHeuvHQ5Xb1Eg5X92zpcBvcW7wgpk0lvAa/YqFLI3i8iNDDtfK40s1NvAW9/0+GUI7isNm1+cEvJfrRYbZ8XZqCkv9kS+d+AOua1iboTU7/cdyaRwm0Wji6Id3CswGT3XwxjtoIz1S10twXj5fjSrJ5ZzBxMQ48mcrlyVpBSvvArzgUI4yWs/9fl0BFVjP4VERstIfXVNNQ16RYQDLvtSHLYt/2lXFWMAWEBEHbfcW7NsBoGkLLJzUlh/J2HxdNuHvizUX6ZzF3+ZS7hevUJJnkgLgccdbqrv7UPkMNXaWxQqU9b5XDN+58C+BGa2JVJT8aWSjUQHzndjza1CnWVAvZ9FxrNh6Aey7Wjkat0zKbhY94t7MmVtYvxosg3aNu7vvNT+X21MVn/3oi3XW1GQmJsT70H7HeElY1fi7nTfTQoOfckxxewPpJYJiBeajngjpgMmIzTzP6j0AH8jU/pIpUtn0WYFX7axhiTRE4KmWYdEcBZ9JpYF6Vp/q1teXV7CSKH5e9TqsFng10DpPEkfQ5Xp3hupUpeu/Z3CTHAlIbGL8YwhQHYulwKrBGUAF54B+kRv4a57Ta3xfn6WMiPaz/pFbG6sxScqFffMBlEzAp3RSRBWMl8Y8gYRQdWaJzyU2FTKRJG4LifcQwZ24OrTftLyEEEkmjK5dQS3ldmO7XRIFRSuPRWj2hOCC06ZZzsZF8xvU58grl0oCOPUQbSDeyWZC2xilR+NEmTYw5Alg6Q9AB+MfJWj915UD0IR5QQtLO9BCZEZ1aVxl30/0iVF8MGcS8+PKlYpp51FFXTI90vXkhPVSeOofTfZ4gsamMRoLblLL2WycbY1vMOLC39BcXhOz2PNGr5RhhCcQbUn5D5FlMiznkhdxGobX5mdSxHHVjPW8YtB6phaRFhZon5ycfbihT9ZFnyNTHTta6a0u/0aiHW5eIbn8Bfasr9bka8F7yM75GGkT4hH1NDuK88nJbxmfnds4pELhMX+3J50juHSE3RNaoPFFyB3uZbdv4d67TZtEN5BuIfgdfx8fgLnmWTeGtpn5BSkQLD8/j 9 | jafo@guin 10 | -------------------------------------------------------------------------------- /pillar/base/users/larry.sls: -------------------------------------------------------------------------------- 1 | users: 2 | larry: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | downloads: 9 | allowed: true 10 | groups: 11 | - downloads 12 | sudo: true 13 | hg: 14 | allowed: true 15 | sudo: true 16 | fullname: Larry Hastings 17 | ssh_keys: 18 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCzWLZOI56gvPiuow/pR8MCCaK2zglBxpNk8mzim+JrSox06Ilu90YrL2zsM4MvgV8TpWWrXH/UQtz1eko188hKAT98IzBwgFoVvbTh23Tidc8YA2A0l4KlPvgcnc8FiGa9ha2UZMUN3bg46lq/crbp3+RiF3+W4jziAHzv1zihDMeaY/tx8iZamFWwJcIqqh+6ptDJ5COeRqECT7A/JUdPw2f8ajvm5oIaWrIu4fUxiRaQviiTgYfqT8MjmExDgqVLgGy0/JUUEo5rkBMgQLU2A783o4a5qxeZD4VOe0HazDwi6YtyvUiTjOIpyjmxH3lT6RoSo3gIRYkW5q4T+gCv 19 | larry@hastings.org 20 | -------------------------------------------------------------------------------- /pillar/base/users/lemburg.sls: -------------------------------------------------------------------------------- 1 | users: 2 | lemburg: 3 | access: 4 | moin: 5 | allowed: true 6 | groups: 7 | - moin 8 | sudo: true 9 | planet: 10 | allowed: true 11 | sudo: true 12 | fullname: Marc-Andre Lemburg 13 | ssh_keys: 14 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDaPO2fk5o6Z41WALEBvErRsYC0Ozjg4VmhDSisvKAnUKss1jYoA1bJ0wR7jFPL7TU6j2LBcyuHMEylU9OWdWaMsSaFy/Jyiw+/fApSRwU0+1yP8qUaeEwQHRv5g426KExvRtxmPx6bw3drpdBv9vaEhTLYbE4yl27lZIYY9c50l9oSPoFfDZg8XZuXqsHY6/BSFxtw0PDUd/qKYZycX4C9AMg4FQocz6Lc4MFbIZaJOFRyfsX6HPAWkY2O0MDydikA83eWg7AEZHmJUyV9QZcrOLutAQfEOdA7XfDkdUkR6bnR/PIjWo7k+4H4UdSBS1bDbEcUsKAGIhiwJmGBdkjj 15 | marc-andre.lemburg 16 | -------------------------------------------------------------------------------- /pillar/base/users/loewis.sls: -------------------------------------------------------------------------------- 1 | users: 2 | loewis: 3 | access: 4 | downloads: 5 | allowed: true 6 | groups: 7 | - downloads 8 | sudo: true 9 | fullname: Martin von Loewis 10 | ssh_keys: 11 | - ssh-dss AAAAB3NzaC1kc3MAAACBALEQSXDzDyY1PENMiRh7mVZsfJTG34ytr/GLMOhpLrFmsQRglkBdK04hEcd6gDT3+7vEUmHndIdj2RLQCG4OR8XISTrQ2Nmq1aktw+MABkmrp3pnPyKUxI0LC3F3ZKRdTefzIz8JeSVrJ7bzxRPX2ogN5i4vz0F8JZidx9tN5MODAAAAFQC7zsu/+K7W0eYMRXrNvvoX/0dCPwAAAIAwJNCVFKZldb//3yo7KMjrIGZrLwjBjc08E+cEFh0VkREL0TLRnHorwPdmkPKLZd0FjN/NJJLYUtAiX4ty9ZEXg0g9psVydhK9YMP6+gfRDtlrdse4N0PHgLeS4d70S6s9Zhtxdgl8T/vc8PDhzDkvqoD6rOlGbDXXTsm1FmzbigAAAIEApPn5qyxaYBrGXsUzW8Q1tnjIJKVTEZNS54cOgXvnGZkStg+e8Usr5d23Ik2cMs5c28NwifVhw9FREi04kEfbKE5XoiKcBYHAVXwriO1CVDznGuPAJmUVc3frY55Rg16vCXsMvJxNKfVil7mhLTwbgwTN8tZbEj5Tlh/mSRVredQ= 12 | martin@mira 13 | - ssh-dss AAAAB3NzaC1kc3MAAACBANVhIMrbKczF76sU4k73Bhh4qR15psW6stle8Zo+/jY7OBGfwf3m/UaH6QEwfQQka6glIkB062oM4ZtzauS6iQNcmeBuAywRM1jfWQ3SqnWUuAevAKQ5NHJ5lvMUWxrQLZch/SNLkQER83XY0PvRr/TR8jqzdQLTdNoB0ErLElVXAAAAFQCm+TYKtE7/pQpVz6td5n9+pZR1aQAAAIB5Rclk9FBfSe1hJ2lKkRcki/t2m4SyI3pV9WAEV2wqWdz4YTrIKwmEGFAFxBd8ElbnLahPTtWsFSgaAyzRr6/903rjSTlDupg28mhKPWoJ5yypzpgEbqvtERrbQVkSVLrOpGECQIKtdWqQcdINnILjA3Jx/+UbcR6hY+8APDZQ1gAAAIEAgYl2sn4Hs/xzGwevZUq9Cqd+t6UcYnpSwutZoewP/yLGNXGzAu6CjD7UHi8vcYupB1ojVaWNlRNYBANRlpcbO1J4u1mW5Ky5tYOax4ts+sdIET/241qgpFq40KVF2uIFkNUUIdIigUQPUhqy+41IUWU8o2QRXmo8I6Cw7vppJTQ= 14 | loewis@kosh 15 | - ssh-dss AAAAB3NzaC1kc3MAAACBALfLPD0tevBjI5aqbV/B8e+9a13o/CQmBeEc7zFiVJ9/ZRPMmKHmuwczpMwzrA5ABS8Ih/HZ864yG+d1IMcJZW4PSzPw0mMWGadR/rGfeZJA03nG90TNJp0DqQXQ8e5RZif9StBpPJ67qQMY3KeyKveJZU99d5oSfxpWOVxE9QwBAAAAFQCmKEkEf4fXceJisqqTEbQ932hRUwAAAIB6Otm8xZtTb1ZF7JqsD2DjfKiu7vHVx204UG78I3Fj9BwIQx4zK+fSgjI3ZyMfJjAcBekQQKE5uWnD0PsrJO7q80C2HZNzY9da7Ou8bvegltn5cME36Xh2DQEEEVBGNnTCgG0Fj8UVCVnjIjvAOOxx8jpV5LXrfty6uAoy+cs6UwAAAIBBI2qoYRr6/V7BTlVAo9Tpg3tkbbYduC64jfcdfqjJhgc9nWoqWqW4k3lMadWOXhMheXOyfndm+qhVYe1rbboUaD4MyFOGyvG/AVH4PdXdKyMVQncO47hNko7bxTCrplNERjTrWrMdOtR1CuDLfRTBkBgW1h6dhsZZkQm6rUeFNw= 16 | loewis@creosote.python.org 17 | -------------------------------------------------------------------------------- /pillar/base/users/mattip.sls: -------------------------------------------------------------------------------- 1 | users: 2 | mattip: 3 | access: 4 | codespeed: 5 | allowed: true 6 | sudo: true 7 | fullname: Matti Picus 8 | ssh_keys: 9 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4NVQIJX7GvaoNUFEGo+dQvpIGx9aCbuVqT4O3T3GQ/cxrikfIDX9VqGm2w5B/1+EvMf43kfBFzD8nuoByCvN+axmHkW4V54pq9jDEAL1SwheOiQLygj3XbcDgsY4TET3hGIbfEQgtRbLyHFrVG97IrkwJ4i/4fr+vnvzR7Us2teX1IWJwsKuNGzW69hJHGMNuXPawU+JC9dP42EfjrQrkiIuvMWrXQer1uVGx7gPwd3AxtTu3fx1QfiaJ5J74jALsgaOGep1I3Ap+KFLPZDf0PAM56aHbOxnsM1R76JRsCZVgXDwShVi9J2yQogcONXEP8O+BAFv0lP03qjiNs2oz 10 | matti@asus 11 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDzePfoLowdG+U7211Xmi1XJJ9acxLzLSYueJDOpnfx/XciyiFbHaJfU1LDTJwkxDAedZn48w7OmHbMsVhGiHun5uJu4EU3QJN3rMxqDDNqWMwo2f09kMhdSjbCR7BVdBUzLQteUDtcXJ7QdH+6sSFRrECyUkehcp49cwVJ0S0w7GWb31enpfUWEoTNgHsnXpabF7EepiK5iWe0j1cHd0GM3rDpVNc8asjRGESb3L0gG6RU+t9ptEvDqnPuPq3vWf+Fb724F+3uSB2ehjPDZwcWZGli/FSjmjft3+7JhAeSTPAptSg6n0ienqLlE0/pom+6BHtMDurmYIFhCDpvq+rv 12 | mattip@tannit 13 | -------------------------------------------------------------------------------- /pillar/base/users/maxking.sls: -------------------------------------------------------------------------------- 1 | users: 2 | maxking: 3 | access: 4 | gnumailman: 5 | allowed: true 6 | groups: 7 | - mailman 8 | sudo: true 9 | mail: 10 | allowed: true 11 | groups: 12 | - mailman 13 | sudo: true 14 | fullname: Abhilash Raj 15 | ssh_keys: 16 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZV9XLHJV0bxVyZ6pbKuDaehAx9c7mlw9RZDCs0zaCBK5asyFGCg6ToDMpuBgUUT+j5AyC9tE73srWZWU5tC2VnwuMquOFzGgwon2Nl08+TsgZ5TzVe2JTJLpyvyO6uCJD2qRJ2Euq47PIknIfnnJtlWw0igO4Y9dYpAs32pxS4jjHWdbqjAEPDbMH3WGWQLMcn8A1JR1wYC11Y15w/RHTZBo9yAbJ7GuYIYcVVolPUdfgpnZehmZUydu8UtAvIRQURpnNiULZfxW5HezeUCwYmM/X1lSNZhwsdinqXsrXc2uDG8DRmLKEh2RHzy/bwUcEdJ2KlE63uoUdwphmTpor 17 | maxking@angel 18 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC66iP0ZJvZssvB9ACc3jvK1eCVZtRC+YXGPXbUYHjqLIz4o2AbmjMIHwIZjnTWdP2Y1rU2kb7f/D6uPI9cFhMSc1Hc9BIlwmwO+bzQowUay8j6DQZvMOG0FnxDkWCR0K1qX3CxsETyuHjt61e9jvOdWZCCy2dUcaTmc4j+4CjFHV3sp7pzhs9xV+CW6dPtBSdep4Ob3WSg5tP3XnYVtpUT9vc5Q57NSLhDJRF9PBQUzyp90OVCUJ9GLHh9C2jv++PkmrU7VK+GFzfiZdvNVVxgbvkDrhfcuzxktxkL6w2FyfbpmTUhNXp1u0FkP9mpZ/ouhc/hYs2tnJw115GAsn5d 19 | phoenix 20 | - ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMV/+IM81xKIyZhJHnCqc6mFBQjKxBj0sEx3Gj3N/DPXL1NXtf49Dj/Wx44AKy5fda+mVZZTzQVB9C7F0X+kRJE= 21 | maxking@Abhilashs-MacBook-Pro.local 22 | -------------------------------------------------------------------------------- /pillar/base/users/mdk.sls: -------------------------------------------------------------------------------- 1 | users: 2 | mdk: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | - docsbuild 9 | sudo: true 10 | fullname: Julien Palard 11 | ssh_keys: 12 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDKA7DgTQ0G7+kdsX0lIUOAAOllwGSCu8s8TxPvr/61Y8q+pIO5mrZycI0xYcKP5NZaABqlFyXUUNfLj7RLqteBxqq2QZP4NOJ1MutYRIkzJ9YW0f565jHaOqSguz0MY+1sCHtuEPiUUZoNexkKN7SIx60SfoaMEvGjAj46txA7VFbJUuKcJtA1Yvmn0C0KoXUUQ/G+JqvjQ7QuKLQYdTZ8S9OEvNaqNfwNSwvy1/LCnuajFw0O+H5bz7AcS5Iuj+9k8wgHPK1a1rQEdteOcn2XBCvta/VOVlFLv6/9K3iU3EJ1pyaZ88UkuJef8aWnH/AJGaF2gLqUbBuL+UeXyD41 13 | julien+yubikey@palard.fr 14 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8vv8vwmbyhFEa0chj8LklnnY6DRLKj2OM0NgaMTd9SsrtBeLMqTt34pU+kKl6/9EIe9P8Z1/fWFyOiTsE7Khf3rkNsoILPmEV14i18Bvtp4nMtljqZaKVkAcRjPvo7flRWNxxL2Zbo+BEr3wVCl3Sc6YV8oQzCwVPKf34AB39b+PW4f3580Aqcd4Ci6zca0Ol95tLDv1slX1A7QcpoZAne8kj5h6bb4cC7FLBC9+xOSKmzoLOlP7LsyxaUUGRyi/FeMoma1VES65aIJ5U23GtZrzZI3tKz+vpQvOVaozNTDkNLiiJkjd3Ew1I10wArpZixjwSndP8CvGFyJc1XUXZ 15 | julien+yubikey5@palard.fr 16 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE6WpbQWtaYmGwrBo9mR0wGTHiz4hc2o5OXVzLI6A4wB 17 | julien+2024@palard.fr 18 | -------------------------------------------------------------------------------- /pillar/base/users/msapiro.sls: -------------------------------------------------------------------------------- 1 | users: 2 | msapiro: 3 | access: 4 | gnumailman: 5 | allowed: true 6 | groups: 7 | - mailman 8 | sudo: true 9 | mail: 10 | allowed: true 11 | groups: 12 | - mailman 13 | sudo: true 14 | fullname: Mark Sapiro 15 | ssh_keys: 16 | - ssh-dss AAAAB3NzaC1kc3MAAACBAKRFoQ6Ebp1HO9rMhDFIvMrvQST4Q/FgPsLP2rz1cVwJ0NQ7DKB6wl95AdifypksDDClsBFxOtxD49YN4SvQS0tSqyNwOdvpROsEH4e/orDl2oJhOYzZxDkwE0UZ+VHC+XeTTWG4qWlPLMNr/ExRAxJzOKZCs66QggNXwoMfq/IJAAAAFQD7u3RSnWiM6uIYARBlUCthyPqHHQAAAIBK5gA7eGLV5+utFPnWsxGz02ZdoOwMWEPhpVaWS9lU5AhcTck1HJcuq/ktqeILuEfJIj3V4ICDNw4WjEoEv5b3YQNAbHVYzhBhg6nsPmzaF33qcugglFqeQWJzff21qN4tH5GamGj76Dqn6tk/hW+xfEmEZYxnsmk4Q3UQ4oP2cQAAAIBL5PKxDU+DmE8wXGQIoyNpj4ZzYpoUmqOveQd4nYyp02QT5oE0uIsxD1lGhkoAKlaSuJNFUlGckXx2DY+eSkIAcLo5i5AD+S8W9245+V5HwtLLj6dTTEUTN2GzR0KofPFn0MgODkaYcEVp+L8+QQJFj/fBENA5E2WomSUT6Hhz1Q== 17 | msapiro@msapiro 18 | - ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAIEArQw24fE8BZmcrtxwuiMOC3g5YXwi1YVLNUkhVSwrHyhvlcu5jRP7pj05pK1naEz8MQ4crNnrhTAgcpMJUAyJgQ/y0a9qrRWO4H+hVtWpMTgiGFsscoKWdu0xTnlK3EbV/vBeMPnSd0usXR3E0+I6GlWJ4/SmtCx0OwqEtfduZV8= 19 | msapiro@msapiro 20 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGjnfQcLTNj0eXqDkIQo75lXiZw+CJDWSHCD70Ao7P7q 21 | mark@msapiro 22 | -------------------------------------------------------------------------------- /pillar/base/users/nad.sls: -------------------------------------------------------------------------------- 1 | users: 2 | nad: 3 | access: 4 | docs: 5 | allowed: true 6 | groups: 7 | - docs 8 | - docsbuild 9 | sudo: true 10 | downloads: 11 | allowed: true 12 | groups: 13 | - downloads 14 | sudo: true 15 | fullname: Ned Deily 16 | ssh_keys: 17 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBmTFZYEfgTq7pyY/GAF2UYVxBt/xNTfqXchlaGUhOTGQreQidR4UnmdmZmjWr838o6/TSO4wFgyQ9ZrfsMTr3cWwDAXFCiVvp0ky0o8ajWdBgYpX1Zn/CmoBTBFaRQMuppo+UzgHvWucHJjq1dyxCKkH66p4x8NkUTJ4ooKigs7MbYKNgSm8UxcSor08MLKoXI+xf+eTeDDA4Z+IzbVDPTKZmDPTt3PE7QOXardftARtXi1esdt9L36S9zaSjLgJBy62Xt1/FqQ86xngxC7gqmjVj8/E2o/XXxp9c2zf+lAU0gCLAZo/ZKacvz1F2QZ9x1SMfokcELFnX/Oyx6ObT 18 | ned.deily 19 | -------------------------------------------------------------------------------- /pillar/base/users/offby1.sls: -------------------------------------------------------------------------------- 1 | users: 2 | offby1: 3 | access: 4 | planet: 5 | allowed: true 6 | sudo: true 7 | fullname: Chris Rose 8 | ssh_keys: 9 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHrm/qwA/L0zcBLD0EX5N2/JP2kglTEOy6pdSCzKl2Ri offby1@void.local 10 | -------------------------------------------------------------------------------- /pillar/base/users/pablogsal.sls: -------------------------------------------------------------------------------- 1 | users: 2 | pablogsal: 3 | access: 4 | buildbot: 5 | allowed: true 6 | sudo: true 7 | codespeed: 8 | allowed: true 9 | sudo: true 10 | docs: 11 | allowed: true 12 | groups: 13 | - docs 14 | - docsbuild 15 | sudo: true 16 | downloads: 17 | allowed: true 18 | groups: 19 | - downloads 20 | sudo: true 21 | fullname: Pablo Galindo 22 | ssh_keys: 23 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIwJt1t9vGbHVnzcsiFXWEFVS/LgZCbvk7YbZGVHGd2q 24 | -------------------------------------------------------------------------------- /pillar/base/users/rouilj.sls: -------------------------------------------------------------------------------- 1 | users: 2 | rouilj: 3 | access: 4 | bugs: 5 | allowed: true 6 | groups: 7 | - roundup 8 | sudo: true 9 | fullname: John Rouillard 10 | ssh_keys: 11 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDXVQUrZpnvpfSEOoQGrDgZ8jrznUERk/fMQTge5SREXMAZlQs4vRbhA00y53e0KPlYdCahTT88+UVnPP/POSKLquk1E/96DrPP4ZYFgtQQdcw5w5zvY9BeRcazdS2d6a0J2dT4nWO3zjzLiYctyAXbQhTQIbdzJ4YA/Kv9wdVgdo1dO4Jltd9MUdp9/82m6jfZsjZZ9/I7EfPIvxI4AnqmRntWp66cLqhT53Gyncd8M1H7WY4vGXCv1zmYx+Xjei5vMnElhule0+D3N4IPKmizat32D2vx8xjx67Tb/DyHdkXQp8tKfTrI65PwpTBjVL2LTa/QN5A4eM3WPe7Pm4kjsp2nPy/XVA4mgbh1PDBOn31CbqTuDcu4eBC0wmmORavgXLivkGvL6ArudcZpzkXUCqzsuSRAoZ9IaAPzlCRqEkn6srLwmZAzo+tSXseIP+TZJ8sxNNvdFZiP50X+UjbHwgy9/jNhYECL1HDVo8Vo8JRo2xKgGwruF5zlCHeBsy4TC34FdNmlEceidvwOW69EI6EW9a645oWK9y6RWFsWS1bgAHUDtmYpVS21XrayD1vVJgfZmvHiHBXECfdLSAkUUeqFrP/ff9wQyJfcwdPu6rCVM1DGdIV6UdJRwCW+ULWjORNFaHE+C7hhCCwxRIiCI/nUbR1h7wCFBSZcjWp+ew== 12 | rouilj@issues 13 | -------------------------------------------------------------------------------- /pillar/base/users/schlatterbeck.sls: -------------------------------------------------------------------------------- 1 | users: 2 | schlatterbeck: 3 | access: 4 | bugs: 5 | allowed: true 6 | groups: 7 | - roundup 8 | sudo: true 9 | fullname: Ralf Schlatterbeck 10 | ssh_keys: 11 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDWL8ig6kjtktAzMKnDkeKdUm1Su/wLIG9knKHEc/pgEy8Z4SZD6Q40oYHIqyeTr5LSupQd9BUn4A2cAWq/ZdplOH6n2T6DqG5wZ0hQlCmxUl4OHoTDyijdxsqk3n9YF+TC3bwsSt7OfRE2we5AL8NLwneGffV4PrYqKQIOTjrSa4S29KIHW+burBqOFFzWiw3ejU1Bwb3n4PlJJ0ceFfbM2ox8OKbN0SvsfviCnzuAszjd77EzF2pFZzWaqA5ur7Yp+EAIR1ENHyMw/1X/do8X5jwgBrA126HfSj2Ns/3PZT4QNLoiXyxN7eeO7sHsfE1s36I3qR71Q0UzDP4g7p3tZ3SrcA+NwVhQauyYX2mwRU6APUyrboeMXYHPQYiXH0zaOSHZCMr5GP70n409pzsQDOSj/F+QUMQes3Uq8CJILGzY8xGmTYV6rY48oHdnNu63rbeSwuMAoYH945eVkEAQy6z32oPmJw/YBoTQmLC0fZNPAmxZGEkqWQktrUKK092/g57IS+k0JC1ZKK2OdFH2WhCerNGnM57iGhWkjavorhHtDsIh0RQ34YSAInsmyuMGXtHxcLGzi5iHfIpFH4on9iEqcez7CoPIGKYh3u3mvXYclWEpU/JvEfNf9bzI3m0rXM0zRYyVBk5g1hZWL89PSDOYqB3hZef2/EAiihycqQ== 12 | rsc@runtux.com 13 | -------------------------------------------------------------------------------- /pillar/base/users/sdower.sls: -------------------------------------------------------------------------------- 1 | users: 2 | sdower: 3 | access: 4 | downloads: 5 | allowed: true 6 | groups: 7 | - downloads 8 | sudo: true 9 | fullname: Steve Dower 10 | ssh_keys: 11 | - ssh-rsa AAAAB3NzaC1yc2EAAAABJQAAAQEAmVmA8k/f9ehIyaPoLVNK58m7s6J3UyoQaRucwVdDZbSU7WXNFyoZj8f7wjep7WrMCqjw1vw9YjRzBUXpkTNQ5VYzx4wECHtAVSicC4y+zHOv51+AP+wVjJH2ATkaMp03L6xGQ/LeLRPRHTMQnLssmmc8OHvtrG/9uWLYTKi/29TSv6uwYJYGw7QFxq8ILb2prKRTujjxn7nHYrvakraGYPjWJR38rATg793yjjWrXCjfLE0BuzUePw2SE1IyHs0WXTbhGIv1KRcE8U6MrDI9b2wS5j4hwrpPXFiiQhQTTxwqXrxWPd1QBCF/rvqtpxLwkaD+IuA9A+pqqiIISc3AXw== 12 | -------------------------------------------------------------------------------- /pillar/base/users/sethmlarson.sls: -------------------------------------------------------------------------------- 1 | users: 2 | sethmlarson: 3 | access: 4 | cdn-logs: 5 | allowed: true 6 | groups: 7 | - adm 8 | fullname: Seth Larson 9 | ssh_keys: 10 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINzmaKr8fHsDNPeht128GMbA+fe53pjYbPgS0V8dhjfK 11 | -------------------------------------------------------------------------------- /pillar/base/users/skip.sls: -------------------------------------------------------------------------------- 1 | users: 2 | skip: 3 | access: 4 | mail: 5 | allowed: true 6 | sudo: true 7 | moin: 8 | allowed: true 9 | groups: 10 | - moin 11 | sudo: true 12 | fullname: Skip Montanaro 13 | ssh_keys: 14 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJA80VH5sZrqYnsFzUiUTKT4Wv7NsfOBIjxCyoHM14L1 15 | skip@montanaro.dyndns.org 16 | - ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJ9EM3GdblHxAdPCtSdFuDO2/8GKhDjtHpdtP+sXfF0F 17 | skip@skips-mbp.lan 18 | -------------------------------------------------------------------------------- /pillar/base/users/thomas.sls: -------------------------------------------------------------------------------- 1 | users: 2 | thomas: 3 | access: 4 | buildbot: 5 | allowed: true 6 | sudo: true 7 | docs: 8 | allowed: true 9 | groups: 10 | - docs 11 | - docsbuild 12 | sudo: true 13 | downloads: 14 | allowed: true 15 | groups: 16 | - downloads 17 | sudo: true 18 | fullname: Thomas Wouters 19 | ssh_keys: 20 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCjuwxW3m8utxCzMqxVhejtTpsBvyB3YOAh06sq/aRK1pSjPJFcVEvpf2BaIdhoiV6Ib53blbQX/rT23+uNGhbEinYHciwnTujA8caPvLxbQohXy02fvsoNeTOzgsIDe/GO3Aw/yEgQsrf59bXPdyBBOhzuwXzPEMTnOPD8kRZvk+l4o4wtF8lZ+p/UohvcjcbqD2NQyhEbrVZVygOWXb4WvAiNJun5p6HpY/98Sx1+C8lyCS9T+c+dT6E07sLhq/xcYVLPeqPbjz78YY+u7byymecgbT476BOqr+UBh+JigHT/8nUNGxlFebWKU2CJPHw5x72dxR6+zIN4xyifYdKNamLtMomvqY89VglCsNf8qCNspRY3aKXujHUV0bFnqjStsGbbxJMGLS1ISvBvsBDt8tHB+y+MGeFedqpQtgq8mnzIDUd0G/94obm1JBH3bI3guMfXUkZvYeUHa3IEmPrc4u6K/pzMyr+VAveWBijiqQ3ot+4aTsWOVA+kEt0DRpZzgfBmzK2F2qjwM5xPoe/wwjLdrRRagNTU+Mmpphc4P88wIpGRruQFIpGPq/YueqxE/bcCyX+RVan/G/mCUZxyIp/N5fDUE6NuKjVIGQtfOwf3q5n0cmwMY1ByIvu+LlEaF+6LQofLTnmSy7bRcqxZjJNUbEORxqkINpADDP3Y8w== 21 | thomas@python.org 22 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqDLpeSnWYrv9JyOzvrSkLT8DElDy+CfOGj5Zrs9x00a7LG34QFRWqakQD6zdK3B2kDBQcqcS9uZKrqVAqIg+26sLe+TH9H2RBtDB+EfJDkAXfbVtAkAcLw1RHhZ7JslcgVMj6nSQ2eWsmQuZJRYxlrinNjdkJqX90OD99oHTGSE6mbNvipZEDTu4xijwFtEQidXdKOZ/qC9gnMlfdBHjFOQpRL4699nclQnrip1IbcvdObjrtkquT+Ojl3RGif3pyvUe0m0TriqCm3pX/FbTlKinOpWLMF/FoQPa5rMbAme67i9C3HCfk1oXyXLL/8N41E+RmSR0Dki7DhmK6evND 23 | twouters@google.com 24 | -------------------------------------------------------------------------------- /pillar/base/users/zware.sls: -------------------------------------------------------------------------------- 1 | users: 2 | zware: 3 | access: 4 | buildbot: 5 | allowed: true 6 | sudo: true 7 | codespeed: 8 | allowed: true 9 | sudo: true 10 | fullname: Zachary Ware 11 | ssh_keys: 12 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC5vqLruv6gzJgZ7zaKJnaWzzWAW7azAtetqMPVN+67cGMcQtnRmG2ih6UOXc1fA0fuZudKBgqlRw3Yg2UcT8ehP505PoHVuq+j0uZ4ogzQ8BJbZIaMEfbPXdzwUfqU3Ju3sur0XQYu0HHexKUU6ZZjjwl5LOmw9dTtY0cb7N7emePy//c7IaDuNsWg+4zaTDUwEhyWLVw6Ev4e0b1ufDxTvHqRXMVCfq0IYMsRXcg8+88GGF8kIS4QMbX/GcsFfOLHj35aJbAk6dqcCZWXWX/bRL937KYl9zENOkvlRbodEZqufDBsa+7Dm29LeV9JPfKJU3+5qM/LkfYPBiw1rH0L 13 | zach@screamer 14 | -------------------------------------------------------------------------------- /pillar/dev/backup/docs.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | python-docs: 4 | source_directory: /srv/ 5 | exclude: 6 | - /srv/docsbuild 7 | target_host: backup-server.vagrant.psf.io 8 | target_directory: /backup/python-docs 9 | target_user: python-docs 10 | frequency: daily 11 | increment_retention: 7D 12 | user: root 13 | -------------------------------------------------------------------------------- /pillar/dev/backup/downloads.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | python-downloads: 4 | source_directory: /srv/ 5 | target_host: backup-server.vagrant.psf.io 6 | target_directory: /backup/python-downloads 7 | target_user: downloads 8 | frequency: daily 9 | increment_retention: 365D 10 | user: root 11 | -------------------------------------------------------------------------------- /pillar/dev/backup/server.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | server: true 3 | -------------------------------------------------------------------------------- /pillar/dev/consul.sls: -------------------------------------------------------------------------------- 1 | consul: 2 | bootstrap: 3 | vagrant: 4 | - salt-master.vagrant.psf.io 5 | - consul.vagrant.psf.io 6 | - none.vagrant.psf.io 7 | acl: 8 | default: deny 9 | dc: vagrant 10 | down: extend-cache 11 | ttl: 30s 12 | dcs: 13 | vagrant: salt-master.vagrant.psf.io 14 | external: 15 | - datacenter: vagrant 16 | node: pythonanywhere 17 | address: www.pythonanywhere.com 18 | service: console 19 | port: 443 20 | - datacenter: vagrant 21 | node: jobspyfound 22 | address: pythonsoftwarefoundation.applytojob.com 23 | service: jobs 24 | port: 443 25 | -------------------------------------------------------------------------------- /pillar/dev/networking.sls: -------------------------------------------------------------------------------- 1 | psf_internal_network: &psf_internal_network 192.168.50.0/24 2 | 3 | 4 | mine_functions: 5 | psf_dc: 6 | - mine_function: pillar.get 7 | - dc 8 | 9 | psf_internal: 10 | mine_function: network.ip_addrs 11 | cidr: *psf_internal_network 12 | 13 | ipv4_addrs: 14 | mine_function: network.ip_addrs 15 | 16 | ipv6_addrs: 17 | mine_function: network.ip_addrs6 18 | 19 | osfinger: 20 | - mine_function: grains.get 21 | - osfinger 22 | -------------------------------------------------------------------------------- /pillar/dev/pebble.sls: -------------------------------------------------------------------------------- 1 | pebble: 2 | enabled: True 3 | -------------------------------------------------------------------------------- /pillar/dev/postgres/clusters.sls: -------------------------------------------------------------------------------- 1 | postgresql-clusters: 2 | pg-vagrant-psf-io: 3 | host: salt-master.vagrant.psf.io 4 | port: 5432 5 | sslmode: verify-full 6 | ca_cert_pillar: 'tls:ca:PSF_CA' 7 | -------------------------------------------------------------------------------- /pillar/dev/postgres/databases.sls: -------------------------------------------------------------------------------- 1 | postgresql-databases: 2 | codespeed-cpython: 3 | owner: "codespeed-cpython" 4 | cluster: "pg-vagrant-psf-io" 5 | codespeed-pypy: 6 | owner: "codespeed-pypy" 7 | cluster: "pg-vagrant-psf-io" 8 | roundup-cpython: 9 | owner: "roundup-cpython" 10 | cluster: "pg-vagrant-psf-io" 11 | roundup-jython: 12 | owner: "roundup-jython" 13 | cluster: "pg-vagrant-psf-io" 14 | roundup-roundup: 15 | owner: "roundup-roundup" 16 | cluster: "pg-vagrant-psf-io" 17 | roundup-cpython_test: 18 | owner: "roundup-cpython_test" 19 | cluster: "pg-vagrant-psf-io" 20 | buildbot-master: 21 | owner: "buildbot-master" 22 | cluster: "pg-vagrant-psf-io" 23 | buildbot-master_test: 24 | owner: "buildbot-master_test" 25 | cluster: "pg-vagrant-psf-io" 26 | -------------------------------------------------------------------------------- /pillar/dev/secrets/bugs.sls: -------------------------------------------------------------------------------- 1 | roundup_secrets: 2 | mail: 3 | roundup: 4 | domain: roundup-tracker.org 5 | trackers: 6 | cpython: 7 | django: 8 | secret_key: foobar 9 | github: 10 | secret: deadbeef 11 | client_id: beefdead 12 | client_secret: cafebeef 13 | -------------------------------------------------------------------------------- /pillar/dev/secrets/codespeed.sls: -------------------------------------------------------------------------------- 1 | codespeed-secrets: 2 | cpython: 3 | secret_key: deadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeef 4 | pypy: 5 | secret_key: beefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdeadbeefdead 6 | -------------------------------------------------------------------------------- /pillar/dev/secrets/docs.sls: -------------------------------------------------------------------------------- 1 | docs: 2 | sentry: 3 | dsn: https://deadbeefdeadbeefdeadbeefdeadbeef@sentry.io/6666666 4 | fastly: 5 | service_id: deadbeefdeadbeefdead 6 | token: deadbeefdeadbeefdeadbeefdeadbeef 7 | -------------------------------------------------------------------------------- /pillar/dev/secrets/fastly.sls: -------------------------------------------------------------------------------- 1 | fastly: 2 | tokens: | 3 | 1 4 | 2 5 | api_key: fakekey 6 | -------------------------------------------------------------------------------- /pillar/dev/secrets/monitoring/server.sls: -------------------------------------------------------------------------------- 1 | graphite-web: 2 | secret_key: areallybadandinsecuresecretkey 3 | -------------------------------------------------------------------------------- /pillar/dev/secrets/postgresql-admin.sls: -------------------------------------------------------------------------------- 1 | postgres-admin: 2 | pg-vagrant-psf-io: 3 | user: salt-master 4 | password: insecurepasswordlol 5 | database: defaultdb 6 | -------------------------------------------------------------------------------- /pillar/dev/secrets/postgresql-users/all.sls: -------------------------------------------------------------------------------- 1 | postgresql-users: 2 | codespeed-cpython: 3 | cluster: pg-vagrant-psf-io 4 | dbname: codespeed-cpython 5 | password: insecurepasswordlol 6 | codespeed-pypy: 7 | cluster: pg-vagrant-psf-io 8 | dbname: codespeed-pypy 9 | password: insecurepasswordlol 10 | roundup-cpython: 11 | cluster: pg-vagrant-psf-io 12 | dbname: roundup-cpython 13 | password: insecurepasswordlol 14 | roundup-jython: 15 | cluster: pg-vagrant-psf-io 16 | dbname: roundup-jython 17 | password: insecurepasswordlol 18 | roundup-roundup: 19 | cluster: pg-vagrant-psf-io 20 | dbname: roundup-roundup 21 | password: insecurepasswordlol 22 | roundup-cpython_test: 23 | cluster: pg-vagrant-psf-io 24 | dbname: roundup-cpython_test 25 | password: insecurepasswordlol 26 | buildbot-master: 27 | cluster: pg-vagrant-psf-io 28 | dbname: buildbot-master 29 | password: insecurepasswordlol 30 | buildbot-master_test: 31 | cluster: pg-vagrant-psf-io 32 | dbname: buildbot-master_test 33 | password: insecurepasswordlol 34 | 35 | postgresql-superusers: 36 | salt-master: 37 | password: insecurepasswordlol 38 | 39 | postgresql-replicator: insecurereplicatorpasswordlol 40 | -------------------------------------------------------------------------------- /pillar/dev/secrets/postgresql-users/codespeed.sls: -------------------------------------------------------------------------------- 1 | postgresql-users: 2 | codespeed-cpython: 3 | cluster: pg-vagrant-psf-io 4 | dbname: codespeed-cpython 5 | password: insecurepasswordlol 6 | codespeed-pypy: 7 | cluster: pg-vagrant-psf-io 8 | dbname: codespeed-pypy 9 | password: insecurepasswordlol 10 | -------------------------------------------------------------------------------- /pillar/dev/secrets/postgresql-users/replica.sls: -------------------------------------------------------------------------------- 1 | postgresql-users: 2 | replicator: insecurereplicatorpasswordlol 3 | -------------------------------------------------------------------------------- /pillar/dev/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | - consul 4 | - firewall.consul 5 | - networking 6 | - roles 7 | - sudoers 8 | - tls 9 | - users.* 10 | - postgres.clusters 11 | 12 | 'backup-server': 13 | - match: nodegroup 14 | - backup.* 15 | 16 | 'bugs': 17 | - match: nodegroup 18 | - secrets.bugs 19 | - bugs 20 | - firewall.bugs 21 | 22 | 'cdn-logs': 23 | - match: nodegroup 24 | - fastly-logging 25 | - firewall.fastly-logging 26 | 27 | 'codespeed': 28 | - match: nodegroup 29 | - firewall.codespeed 30 | - secrets.codespeed 31 | - secrets.postgresql-users.codespeed 32 | - codespeed 33 | 34 | 'docs': 35 | - match: nodegroup 36 | - firewall.rs-lb-backend 37 | - groups.docs 38 | - secrets.docs 39 | - backup.docs 40 | 41 | 'downloads': 42 | - match: nodegroup 43 | - firewall.rs-lb-backend 44 | - groups.downloads 45 | - backup.downloads 46 | 47 | 'gnumailman': 48 | - match: nodegroup 49 | - firewall.mail 50 | - mail-opt-out 51 | 52 | 'hg': 53 | - match: nodegroup 54 | - firewall.rs-lb-backend 55 | 56 | 'loadbalancer': 57 | - match: nodegroup 58 | - haproxy 59 | - firewall.loadbalancer 60 | - secrets.fastly 61 | - secrets.tls.certs.loadbalancer 62 | - bugs 63 | 64 | 'mail': 65 | - match: nodegroup 66 | - firewall.mail 67 | - groups.mail 68 | - mail-opt-out 69 | 70 | 'planet': 71 | - match: nodegroup 72 | - planet 73 | - firewall.planet 74 | 75 | 'salt-master': 76 | - match: nodegroup 77 | - firewall.salt 78 | - pebble 79 | - firewall.postgresql 80 | - postgresql.server 81 | - postgres.databases 82 | - secrets.postgresql-admin 83 | - secrets.postgresql-users.all 84 | 85 | 'tracker': 86 | - match: nodegroup 87 | - secrets.postgresql-users.tracker 88 | 89 | 'wiki': 90 | - match: nodegroup 91 | - moin 92 | - firewall.rs-lb-backend 93 | -------------------------------------------------------------------------------- /pillar/prod/backup/bugs.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | python-bugs-data: 4 | source_directory: /backup/ 5 | target_host: backup.sfo1.psf.io 6 | target_directory: /backup/python-bugs 7 | target_user: python-bugs 8 | frequency: hourly 9 | increment_retention: 30D 10 | user: root 11 | -------------------------------------------------------------------------------- /pillar/prod/backup/buildbot.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | python-buildbot-config: 4 | source_directory: /etc/buildbot/ 5 | target_host: backup.sfo1.psf.io 6 | target_directory: /backup/buildbot 7 | target_user: buildbot 8 | frequency: hourly 9 | increment_retention: 90D 10 | user: root 11 | -------------------------------------------------------------------------------- /pillar/prod/backup/docs.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | python-docs: 4 | source_directory: /srv/ 5 | exclude: 6 | - /srv/docsbuild 7 | target_host: backup.sfo1.psf.io 8 | target_directory: /backup/python-docs 9 | target_user: python-docs 10 | frequency: daily 11 | increment_retention: 7D 12 | user: root 13 | -------------------------------------------------------------------------------- /pillar/prod/backup/downloads.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | python-downloads: 4 | source_directory: /srv/ 5 | target_host: backup.sfo1.psf.io 6 | target_directory: /backup/python-downloads 7 | target_user: downloads 8 | frequency: daily 9 | increment_retention: 365D 10 | user: root 11 | -------------------------------------------------------------------------------- /pillar/prod/backup/gnumailman.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | gnumailman-data: 4 | source_directory: /backup/ 5 | target_host: backup.sfo1.psf.io 6 | target_directory: /backup/gnumailman-data 7 | target_user: gnumailman 8 | frequency: hourly 9 | increment_retention: 90D 10 | user: root 11 | -------------------------------------------------------------------------------- /pillar/prod/backup/hg.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | python-hg: 4 | source_directory: /srv/ 5 | target_host: backup.sfo1.psf.io 6 | target_directory: /backup/python-hg 7 | target_user: hg 8 | frequency: daily 9 | increment_retention: 90D 10 | user: root 11 | hg-mercurial-static: 12 | source_directory: /usr/share/mercurial/templates/static/ 13 | target_host: backup.sfo1.psf.io 14 | target_directory: /backup/hg-mercurial-static 15 | target_user: hg 16 | frequency: daily 17 | increment_retention: 90D 18 | user: root 19 | hg-svn-config: 20 | source_directory: /etc/apache2/svn_config/ 21 | target_host: backup.sfo1.psf.io 22 | target_directory: /backup/hg-svn-config 23 | target_user: hg 24 | frequency: daily 25 | increment_retention: 90D 26 | user: root 27 | -------------------------------------------------------------------------------- /pillar/prod/backup/mail.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | mail-python-org: 4 | source_directory: / 5 | exclude: 6 | - /boot 7 | - /dev 8 | - /media 9 | - /mnt 10 | - /proc 11 | - /sys 12 | - /tmp 13 | - /var/spool/postfix 14 | target_host: backup.sfo1.psf.io 15 | target_directory: /backup/mail-python-org 16 | target_user: mail-python-org 17 | frequency: daily 18 | increment_retention: 15D 19 | user: root 20 | -------------------------------------------------------------------------------- /pillar/prod/backup/moin.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | directories: 3 | moin: 4 | source_directory: /data/ 5 | target_host: backup.sfo1.psf.io 6 | target_directory: /backup/moin 7 | target_user: moin 8 | frequency: daily 9 | increment_retention: 90D 10 | user: root 11 | -------------------------------------------------------------------------------- /pillar/prod/backup/server.sls: -------------------------------------------------------------------------------- 1 | backup: 2 | server: true 3 | backup-server: 4 | volumes: 5 | /dev/sda: /backup 6 | -------------------------------------------------------------------------------- /pillar/prod/consul.sls: -------------------------------------------------------------------------------- 1 | consul: 2 | bootstrap: 3 | nyc1: 4 | - consul-1.nyc1.psf.io 5 | - consul-2.nyc1.psf.io 6 | - consul-3.nyc1.psf.io 7 | acl: 8 | default: deny 9 | dc: nyc1 10 | down: extend-cache 11 | ttl: 30s 12 | dcs: 13 | nyc1: consul*.nyc1.psf.io 14 | # Currently, there is something wrong with consul 0.5.0 and we cannot register 15 | # external services without setting the default acl to allow. So if you add 16 | # something here you need to set the default acl to allow, and uncomment the 17 | # code at the bottom of salt/consul/init.sls. 18 | external: 19 | - datacenter: nyc1 20 | node: pythonanywhere 21 | address: www.pythonanywhere.com 22 | service: console 23 | port: 443 24 | - datacenter: nyc1 25 | node: jobspyfound 26 | address: pythonsoftwarefoundation.applytojob.com 27 | service: jobs 28 | port: 443 29 | -------------------------------------------------------------------------------- /pillar/prod/networking.sls: -------------------------------------------------------------------------------- 1 | psf_internal_network: &psf_internal_network 10.132.0.0/16 2 | 3 | 4 | mine_functions: 5 | psf_dc: 6 | - mine_function: pillar.get 7 | - dc 8 | 9 | psf_internal: 10 | mine_function: network.ip_addrs 11 | cidr: *psf_internal_network 12 | 13 | ipv4_addrs: 14 | mine_function: network.ip_addrs 15 | 16 | ipv6_addrs: 17 | mine_function: network.ip_addrs6 18 | 19 | public_ipv4: 20 | mine_function: network.ip_addrs 21 | type: 'public' 22 | 23 | osfinger: 24 | - mine_function: grains.get 25 | - osfinger 26 | -------------------------------------------------------------------------------- /pillar/prod/ocsp.sls: -------------------------------------------------------------------------------- 1 | tls: 2 | ocsp: 3 | - ev.python.org 4 | - hg.python.org 5 | - star.python.org 6 | -------------------------------------------------------------------------------- /pillar/prod/postgres/databases.sls: -------------------------------------------------------------------------------- 1 | postgresql-databases: 2 | codespeed-cpython: 3 | owner: "codespeed-cpython" 4 | cluster: "pg-nyc1-psf-io" 5 | codespeed-pypy: 6 | owner: "codespeed-pypy" 7 | cluster: "pg-nyc1-psf-io" 8 | roundup-cpython: 9 | owner: "roundup-cpython" 10 | cluster: pg-nyc1-psf-io 11 | roundup-jython: 12 | owner: "roundup-jython" 13 | cluster: pg-nyc1-psf-io 14 | roundup-roundup: 15 | owner: "roundup-roundup" 16 | cluster: pg-nyc1-psf-io 17 | roundup-cpython_test: 18 | owner: "roundup-cpython_test" 19 | cluster: pg-nyc1-psf-io 20 | buildbot-master: 21 | owner: "buildbot-master" 22 | cluster: "pg-nyc1-psf-io" 23 | buildbot-master_test: 24 | owner: "buildbot-master_test" 25 | cluster: "pg-nyc1-psf-io" 26 | -------------------------------------------------------------------------------- /pillar/prod/swapfile.sls: -------------------------------------------------------------------------------- 1 | swap_file: 2 | swap_size: 1024 3 | swap_path: /swapfile 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | invoke 2 | fabric 3 | jinja2 4 | -------------------------------------------------------------------------------- /salt/_extensions/modules/consul.py: -------------------------------------------------------------------------------- 1 | ../../_modules/consul.py -------------------------------------------------------------------------------- /salt/_extensions/pillar/backup_ssh.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import subprocess 3 | 4 | 5 | def ext_pillar(minion_id, pillar, base_path="/etc/backup_keys/"): 6 | base_path = pathlib.Path(base_path) 7 | base_path.mkdir(parents=True, exist_ok=True) 8 | 9 | is_server = pillar.get("backup", {}).get("server", False) 10 | 11 | user_list = set() 12 | for directory, directory_config in ( 13 | pillar.get("backup", {}).get("directories", {}).items() 14 | ): 15 | user_list.add(directory_config.get("target_user")) 16 | 17 | user_keys = {} 18 | 19 | for user in user_list: 20 | user_private_key_path = base_path / f"{user}" 21 | user_public_key_path = base_path / f"{user}.pub" 22 | 23 | if not user_private_key_path.exists(): 24 | subprocess.run( 25 | [ 26 | "ssh-keygen", 27 | "-t", 28 | "ed25519", 29 | "-C", 30 | f"{user}@backup", 31 | "-f", 32 | user_private_key_path, 33 | ] 34 | ) 35 | if not user_public_key_path.exists(): 36 | with open(user_public_key_path, "w") as out_file: 37 | subprocess.run( 38 | ["ssh-keygen", "-y", "-f", user_private_key_path], stdout=out_file 39 | ) 40 | 41 | key_data = {"public": user_public_key_path.read_text()} 42 | if not is_server: 43 | key_data["private"] = user_private_key_path.read_text() 44 | 45 | user_keys[user] = key_data 46 | 47 | if is_server: 48 | pillar["backup_directories"] = pillar.get("backup", {}).pop("directories") 49 | 50 | return {"backup_keys": user_keys} 51 | -------------------------------------------------------------------------------- /salt/_extensions/pillar/dc.py: -------------------------------------------------------------------------------- 1 | import salt.loader 2 | 3 | def compound(tgt, minion_id=None): 4 | opts = {'grains': __grains__} 5 | opts['id'] = minion_id 6 | matcher = salt.loader.matchers(dict(__opts__, **opts))['compound_match.match'] 7 | try: 8 | return matcher(tgt) 9 | except Exception: 10 | pass 11 | return False 12 | 13 | 14 | def ext_pillar(minion_id, pillar, **mapping): 15 | for pat, datacenter in mapping.items(): 16 | if compound(pat, minion_id): 17 | return {"dc": datacenter} 18 | 19 | return {} 20 | -------------------------------------------------------------------------------- /salt/_extensions/pillar/dms.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | 3 | try: 4 | import requests 5 | from requests.auth import HTTPBasicAuth 6 | 7 | HAS_REQUESTS = True 8 | except ImportError: 9 | HAS_REQUESTS = False 10 | 11 | 12 | def ext_pillar(minion_id, pillar, api_key=None, base_path="/etc/deadmanssnitch/"): 13 | base_path = pathlib.Path(base_path) 14 | # Ensure base path exists 15 | base_path.mkdir(parents=True, exist_ok=True) 16 | 17 | minion_path = base_path / minion_id 18 | 19 | if minion_path.exists(): 20 | token = minion_path.read_text() 21 | if token: 22 | return {"deadmanssnitch": {"token": token}} 23 | 24 | snitches = requests.get( 25 | "https://api.deadmanssnitch.com/v1/snitches", 26 | params={"tags": "salt-master"}, 27 | auth=HTTPBasicAuth(api_key, ""), 28 | ) 29 | 30 | for snitch in snitches.json(): 31 | if snitch["name"] == f"salt-highstate {minion_id}": 32 | token = snitch["token"] 33 | minion_path.write_text(token) 34 | return {"deadmanssnitch": {"token": token}} 35 | 36 | snitch = requests.post( 37 | "https://api.deadmanssnitch.com/v1/snitches", 38 | auth=HTTPBasicAuth(api_key, ""), 39 | json={ 40 | "name": f"salt-highstate {minion_id}", 41 | "interval": "hourly", 42 | "alert_type": "basic", 43 | "tags": ["salt-master"], 44 | }, 45 | ) 46 | token = snitch.json()["token"] 47 | minion_path.write_text(token) 48 | return {"deadmanssnitch": {"token": token}} 49 | -------------------------------------------------------------------------------- /salt/_grains/detect_virt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import subprocess 4 | 5 | 6 | def main(): 7 | try: 8 | result = subprocess.run( 9 | ["/usr/bin/systemd-detect-virt"], stdout=subprocess.PIPE, check=True 10 | ).stdout.rstrip() 11 | except FileNotFoundError: 12 | result = "unknown" 13 | return {"detect_virt": result} 14 | -------------------------------------------------------------------------------- /salt/_states/consul.py: -------------------------------------------------------------------------------- 1 | def external_service(name, datacenter, node, address, port, token=None): 2 | ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} 3 | 4 | if token is None: 5 | token = __pillar__['consul']['acl']['tokens']['default'] 6 | 7 | # Determine if the cluster is ready 8 | if not __salt__["consul.cluster_ready"](): 9 | ret["result"] = True 10 | ret["comment"] = "Consul cluster is not ready." 11 | return ret 12 | 13 | # Determine if the node we're attempting to register exists 14 | if __salt__["consul.node_exists"](node, address, dc=datacenter): 15 | # Determine if the service we're attempting to register exists 16 | if __salt__["consul.node_service_exists"]( 17 | node, name, port, dc=datacenter): 18 | ret["result"] = True 19 | ret["comment"] = ( 20 | "External Service {} already in the desired state.".format( 21 | name, 22 | ) 23 | ) 24 | return ret 25 | 26 | if __opts__['test'] == True: 27 | ret['comment'] = 'The state of "{0}" will be changed.'.format(name) 28 | ret['changes'] = { 29 | 'old': None, 30 | 'new': 'External Service {}'.format(name), 31 | } 32 | ret["result"] = None 33 | return ret 34 | 35 | __salt__["consul.register_external_service"]( 36 | node, address, datacenter, name, port, token, 37 | ) 38 | 39 | ret["result"] = True 40 | ret["comment"] = "Registered external service: '{}'.".format(name) 41 | ret["changes"] = { 42 | "old": None, 43 | "new": 'External Service {}'.format(name), 44 | } 45 | 46 | return ret 47 | -------------------------------------------------------------------------------- /salt/_states/postgres_replica.py: -------------------------------------------------------------------------------- 1 | def slot(name): 2 | ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} 3 | 4 | sql = "SELECT * FROM pg_replication_slots WHERE slot_name = '%s'" % name 5 | if __salt__["postgres.psql_query"](sql): 6 | ret["result"] = True 7 | ret["comment"] = "Replication slot '{}' already exists.".format( 8 | name, 9 | ) 10 | return ret 11 | 12 | if __opts__['test']: 13 | ret['comment'] = 'Replication slot "{0}" will be created.'.format(name) 14 | ret['changes'] = { 15 | 'old': None, 16 | 'new': name, 17 | } 18 | ret["result"] = None 19 | return ret 20 | 21 | __salt__["postgres.psql_query"]( 22 | """ SELECT * FROM 23 | pg_create_physical_replication_slot('%s'); 24 | """ % name 25 | ) 26 | 27 | ret["result"] = True 28 | ret["comment"] = "Created replication slot: '{}'.".format(name) 29 | ret["changes"] = { 30 | "old": None, 31 | "new": name, 32 | } 33 | 34 | return ret 35 | -------------------------------------------------------------------------------- /salt/backup/base.sls: -------------------------------------------------------------------------------- 1 | rdiff-backup: 2 | pkg.installed 3 | 4 | rsync: 5 | pkg.installed 6 | -------------------------------------------------------------------------------- /salt/backup/client/README.md: -------------------------------------------------------------------------------- 1 | 2 | Format for pillar data: 3 | 4 | # Root Key, enables the state 5 | backup: 6 | # Dictionary of directories to backup 7 | directories: 8 | # A backup configuration 9 | postgres-archives: 10 | # Frequency of backup, currently {hourly, daily} are supported 11 | frequency: hourly 12 | # Duration that increments are retained, in days 13 | increment_retention: 365D 14 | # User to run backup as 15 | user: devpypi 16 | # Source Directory to backup 17 | source_directory: /var/lib/pgsql/9.3/backup/archives 18 | # Target backup server 19 | target_host: 172.16.57.201 20 | # Target directory on backup server 21 | target_directory: /backup/postgres/archives 22 | # Target user on backup server 23 | target_user: devpypi 24 | # Backup example with pre/post/cleanup scripts 25 | postgres-base: 26 | frequency: daily 27 | increment_retention: 30D 28 | user: postgres 29 | source_directory: /var/lib/pgsql/9.3/backups/base 30 | target_host: 172.16.57.201 31 | target_directory: /backup/postgres/base 32 | target_user: postgres 33 | # Script to run before rdiff-backup command 34 | pre_script: 'pg_basebackup -D /var/lib/pgsql/9.3/backups/base/$(date --iso-8601=seconds)' 35 | # Script to run after rdiff-backup command 36 | post_script: '/usr/local/backup/postgres-archives/scripts/backup.bash' 37 | # Cleanup script to remove old backups 38 | cleanup_script: 'find /var/lib/pgsql/9.3/backups/base -maxdepth 1 -type d -mtime +7 -execdir rm -rf {} \;' 39 | 40 | -------------------------------------------------------------------------------- /salt/backup/client/templates/backup.bash.jinja: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec 200<$0 4 | flock -n 200 || exit 1 5 | 6 | {{ pre_script }} 7 | {{ remote_command }} 8 | {{ post_script }} 9 | 10 | {{ cleanup_script }} 11 | -------------------------------------------------------------------------------- /salt/backup/client/templates/cron.jinja: -------------------------------------------------------------------------------- 1 | {% if job_frequency == 'hourly' %} 2 | {% set cron = '0 * * * *' %} 3 | {% elif job_frequency == 'daily' %} 4 | {% set cron = '0 0 * * *' %} 5 | {% else %} 6 | {% set cron = '0 * * * *' %} 7 | {% endif %} 8 | {{ cron }} {{ job_user }} {{ job_command }} 9 | -------------------------------------------------------------------------------- /salt/backup/server/README.md: -------------------------------------------------------------------------------- 1 | Format for pillar data: 2 | 3 | # Root Key, enables the state 4 | backup-server: 5 | # Volumes to format and mount 6 | volumes: 7 | # device mount point 8 | /dev/sdb: /backup 9 | # Dictionary of directories for backup clients 10 | directories: 11 | # Directory for backup client 12 | /backup/postgres/archives: 13 | # Retention Period for backup increments (see rdiff-backup --remove-older-than) 14 | increment_retention: 10d 15 | # User the client can access the backup server as 16 | user: postgres 17 | # Authorized Key for client access 18 | authorized_key: ssh-rsa AAAAB3NzaC1y.. ...CXVxa6LHKJB6RDT3eYyMQSugFSCrHxQ8j/F 19 | 20 | -------------------------------------------------------------------------------- /salt/backup/server/init.sls: -------------------------------------------------------------------------------- 1 | 2 | include: 3 | - backup.base 4 | 5 | {# TODO: When we have retired distros older than 20.04, remove this #} 6 | /etc/ssh/sshd_config.d/pubkey.conf: 7 | file.managed: 8 | - contents: | 9 | PubkeyAcceptedAlgorithms +ssh-rsa 10 | - user: root 11 | - group: root 12 | - mode: "0644" 13 | 14 | {% for backup, config in salt['pillar.get']('backup_directories', {}).items() %} 15 | 16 | {{ backup }}-user: 17 | user.present: 18 | - name: {{ config['target_user'] }} 19 | 20 | {{ backup }}-ssh: 21 | ssh_auth: 22 | - present 23 | - user: {{ config['target_user'] }} 24 | - names: 25 | - {{ salt['pillar.get']("backup_keys", {}).get(config['target_user'], {}).get('public') }} 26 | - options: 27 | - command="rdiff-backup server" 28 | - no-pty 29 | - no-port-forwarding 30 | - no-agent-forwarding 31 | - no-X11-forwarding 32 | - require: 33 | - user: {{ config['target_user'] }} 34 | 35 | {{ backup }}: 36 | file.directory: 37 | - name: {{ config['target_directory'] }} 38 | - user: {{ config['target_user'] }} 39 | - makedirs: True 40 | - require: 41 | - user: {{ config['target_user'] }} 42 | 43 | {{ backup }}-increment-cleanup: 44 | file.managed: 45 | - name: /etc/cron.d/{{ backup }}-backup-cleanup 46 | - user: root 47 | - group: root 48 | - template: jinja 49 | - source: salt://backup/server/templates/cron.jinja 50 | - context: 51 | cron: '0 3 * * *' 52 | job_user: root 53 | job_command: 'rdiff-backup --terminal-verbosity 1 --force remove increments --older-than {{ config['increment_retention'] }} {{ config['target_directory'] }}' 54 | 55 | {% endfor %} 56 | -------------------------------------------------------------------------------- /salt/backup/server/templates/cron.jinja: -------------------------------------------------------------------------------- 1 | {{ cron }} {{ job_user }} {{ job_command }} 2 | -------------------------------------------------------------------------------- /salt/base/auto-highstate.sls: -------------------------------------------------------------------------------- 1 | {% set dms_token = salt["pillar.get"]("deadmanssnitch:token") %} 2 | 3 | {% if dms_token %} 4 | 15m-interval-highstate: 5 | cron.present: 6 | - identifier: 15m-interval-highstate 7 | - name: "timeout 5m salt-call state.highstate >> /var/log/salt/cron-highstate.log 2>&1; curl https://nosnch.in/{{ dms_token }} &> /dev/null" 8 | - minute: '*/15' 9 | {% else %} 10 | 15m-interval-highstate: 11 | cron.present: 12 | - identifier: 15m-interval-highstate 13 | - name: "timeout 5m salt-call state.highstate >> /var/log/salt/cron-highstate.log 2>&1" 14 | - minute: '*/15' 15 | {% endif %} 16 | 17 | /etc/logrotate.d/salt: 18 | {% if grains["oscodename"] == "xenial" %} 19 | file.absent: [] 20 | {% else %} 21 | file.managed: 22 | - source: salt://base/config/salt-logrotate.conf 23 | {% endif %} 24 | -------------------------------------------------------------------------------- /salt/base/config/known_hosts.jinja: -------------------------------------------------------------------------------- 1 | {% for server in salt['minion.list']()['minions']|sort -%} 2 | {% set keys = salt['ssh.recv_known_host_entries'](server, hash_known_hosts=False) -%} 3 | {% if keys -%} 4 | {% for key in keys|sort(attribute='enc') -%} 5 | {{ key['hostname'] }} {{ key['enc'] }} {{ key['key'] }} 6 | {% endfor %}{% endif %}{%- endfor %} 7 | -------------------------------------------------------------------------------- /salt/base/config/letsencrypt-well-known-nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 9000 ssl default_server; 3 | 4 | ssl_certificate /etc/ssl/private/salt.psf.io.pem; 5 | ssl_certificate_key /etc/ssl/private/salt.psf.io.pem; 6 | 7 | server_name _; 8 | 9 | location /.well-known/acme-challenge/ { 10 | alias /etc/lego/.well-known/acme-challenge/; 11 | try_files $uri =404; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /salt/base/config/publish-files-nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 9001 ssl; 3 | 4 | ssl_certificate /etc/ssl/private/salt.psf.io.pem; 5 | ssl_certificate_key /etc/ssl/private/salt.psf.io.pem; 6 | 7 | server_name salt-public.psf.io; 8 | 9 | location / { 10 | root /srv/public; 11 | try_files $uri =404; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /salt/base/config/salt-logrotate.conf: -------------------------------------------------------------------------------- 1 | /var/log/salt/cron-highstate.log { 2 | daily 3 | rotate 7 4 | missingok 5 | notifempty 6 | compress 7 | delaycompress 8 | } 9 | -------------------------------------------------------------------------------- /salt/base/config/salt-roles.conf.jinja: -------------------------------------------------------------------------------- 1 | nodegroups: 2 | {% for role, role_config in pillar["roles"].items()|sort -%} 3 | "{{ role }}": "{{ role_config["pattern"] }}" 4 | {% endfor %} 5 | -------------------------------------------------------------------------------- /salt/base/config/salt-server-list.rst.jinja: -------------------------------------------------------------------------------- 1 | .. 2 | START AUTOMATED SECTION **DO NOT DIRECTLY EDIT - Salt will blow away your changes!!!** 3 | 4 | {% set role_mapping = {} -%} 5 | {% for role, config in salt["pillar.get"]("roles", {}).items() -%} 6 | {% set _dummy = role_mapping.update( {config["pattern"]: role} ) -%} 7 | {% endfor %} 8 | 9 | .. csv-table:: 10 | :header: "Name", "Purpose", "Contact", "Distro", "Datacener" 11 | 12 | {% for server in salt['minion.list']()['minions']|sort -%} 13 | {% set role = salt['match.filter_by'](role_mapping, minion_id=server) -%} 14 | {% set datacenter = salt['mine.get']("*", "psf_dc").get(server, "") -%} 15 | {% set distro = salt['mine.get']("*", "osfinger").get(server, "") -%} 16 | {% set roleconfig = salt["pillar.get"]("roles", {}).get(role, {}) %} 17 | "{{ server }}", "{{ roleconfig.get("purpose", "") }}", "{{ roleconfig.get("contact", "") }}", "{{ distro }}", "{{ datacenter }}" 18 | {%- endfor %} 19 | 20 | .. 21 | END AUTOMATED SECTION **DO NOT DIRECTLY EDIT - Salt will blow away your changes!!!** 22 | -------------------------------------------------------------------------------- /salt/base/config/sources.list.jinja: -------------------------------------------------------------------------------- 1 | {% if grains["osarch"] == "arm64" %} 2 | {% set base_repo = "ports.ubuntu.com/ubuntu-ports/" %} 3 | {% else %} 4 | {% set base_repo = "us.archive.ubuntu.com/ubuntu/" %} 5 | {% endif %} 6 | 7 | {# 24.04 Noble is the first to use /etc/apt/sources.list.d/ubuntu.list instead of 'sources.list' #} 8 | {% if grains["oscodename"] != "noble" %} 9 | ###### Ubuntu Main Repos 10 | deb http://{{ base_repo }} {{ grains["oscodename"] }} main restricted universe 11 | deb-src http://{{ base_repo }} {{ grains["oscodename"] }} main restricted universe 12 | 13 | ###### Ubuntu Update Repos 14 | deb http://{{ base_repo }} {{ grains["oscodename"] }}-security main restricted universe 15 | deb http://{{ base_repo }} {{ grains["oscodename"] }}-updates main restricted universe 16 | deb-src http://{{ base_repo }} {{ grains["oscodename"] }}-security main restricted universe 17 | deb-src http://{{ base_repo }} {{ grains["oscodename"] }}-updates main restricted universe 18 | {% endif %} -------------------------------------------------------------------------------- /salt/base/config/ssmtp.conf.jinja: -------------------------------------------------------------------------------- 1 | root=infrastructure-staff@python.org 2 | mailhub={{ smtp.server }} 3 | UseSTARTTLS={% if smtp.starttls %}yes{% else %}no{% endif %} 4 | UseTLS={% if smtp.tls %}yes{% else %}no{% endif %} 5 | AuthUser={{ smtp.user }} 6 | AuthPass={{ smtp.password }} 7 | FromLineOverride=yes 8 | RewriteDomain={{ grains["fqdn"] }} 9 | -------------------------------------------------------------------------------- /salt/base/harden/config/limits.conf: -------------------------------------------------------------------------------- 1 | # Prevent core dumps for all users. These are usually only needed by developers 2 | # and may contain sensitive information. 3 | * hard core 0 4 | -------------------------------------------------------------------------------- /salt/base/harden/config/pam_passwdqc: -------------------------------------------------------------------------------- 1 | Name: passwdqc password strength enforcement 2 | Default: yes 3 | Priority: 1024 4 | Conflicts: cracklib 5 | Password-Type: Primary 6 | Password: 7 | requisite pam_passwdqc.so min=disabled,disabled,16,12,8 8 | -------------------------------------------------------------------------------- /salt/base/harden/config/pam_tally2: -------------------------------------------------------------------------------- 1 | Name: tally2 lockout after failed attempts enforcement 2 | Default: yes 3 | Priority: 1024 4 | Conflicts: cracklib 5 | Auth-Type: Primary 6 | Auth-Initial: 7 | required pam_tally2.so deny=5 onerr=fail unlock_time=600 8 | Account-Type: Primary 9 | Account-Initial: 10 | required pam_tally2.so 11 | -------------------------------------------------------------------------------- /salt/base/harden/config/profile.sh: -------------------------------------------------------------------------------- 1 | # Disable core dumps via soft limits for all users. Compliance to this setting 2 | # is voluntary and can be modified by users up to a hard limit. 3 | ulimit -S -c 0 > /dev/null 2>&1 4 | -------------------------------------------------------------------------------- /salt/base/harden/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - .limits 3 | - .login_defs 4 | - .minimize_access 5 | - .pam 6 | - .profile 7 | -------------------------------------------------------------------------------- /salt/base/harden/limits.sls: -------------------------------------------------------------------------------- 1 | /etc/security/limits.d/10.hardcore.conf: 2 | file.managed: 3 | - source: salt://base/harden/config/limits.conf 4 | - user: root 5 | - group: root 6 | - mode: "0440" 7 | -------------------------------------------------------------------------------- /salt/base/harden/login_defs.sls: -------------------------------------------------------------------------------- 1 | /etc/login.defs: 2 | file.managed: 3 | - source: salt://base/harden/config/login.defs.jinja 4 | - template: jinja 5 | - user: root 6 | - group: root 7 | - mode: "0444" 8 | -------------------------------------------------------------------------------- /salt/base/harden/minimize_access.sls: -------------------------------------------------------------------------------- 1 | # Remove write permissions from path folders ($PATH) for all regular users 2 | # this prevents changing any system-wide command from normal users 3 | {% for folder in ["/usr/local/sbin", "/usr/local/bin", "/usr/sbin", "/usr/bin", "/sbin", "/bin"] %} 4 | "remove write permission from {{ folder }}": 5 | cmd.run: 6 | - name: chmod go-w -R {{ folder }} 7 | - unless: find {{ folder }} -perm -go+w -type f | wc -l | egrep '^0$' 8 | {% endfor %} 9 | 10 | 11 | # Shadow must only be accessible to user root 12 | /etc/shadow: 13 | file.managed: 14 | - user: root 15 | - group: root 16 | - mode: "0600" 17 | - replace: False 18 | 19 | 20 | # su must only be accessible to user and group root 21 | /bin/su: 22 | file.managed: 23 | - user: root 24 | - group: root 25 | - mode: "0750" 26 | - replace: False 27 | -------------------------------------------------------------------------------- /salt/base/harden/pam.sls: -------------------------------------------------------------------------------- 1 | libpam-ccreds: 2 | pkg.purged 3 | 4 | 5 | # Remove pam_cracklib, because it does not play nice with passwdqc 6 | libpam-cracklib: 7 | pkg.purged 8 | 9 | 10 | libpam-passwdqc: 11 | pkg.installed: 12 | - require: 13 | - pkg: libpam-cracklib 14 | 15 | 16 | # See NSA 2.3.3.1.2 17 | /usr/share/pam-configs/passwdqc: 18 | file.managed: 19 | - source: salt://base/harden/config/pam_passwdqc 20 | - user: root 21 | - group: root 22 | - mode: "0640" 23 | - require: 24 | - pkg: libpam-passwdqc 25 | 26 | 27 | libpam-modules: 28 | pkg.installed 29 | 30 | 31 | /usr/share/pam-configs/tally2: 32 | file.managed: 33 | - source: salt://base/harden/config/pam_tally2 34 | -------------------------------------------------------------------------------- /salt/base/harden/profile.sls: -------------------------------------------------------------------------------- 1 | /etc/profile.d/softcore.sh: 2 | file.managed: 3 | - source: salt://base/harden/config/profile.sh 4 | - user: root 5 | - group: root 6 | - mode: "0755" 7 | -------------------------------------------------------------------------------- /salt/base/mail.sls: -------------------------------------------------------------------------------- 1 | {% set smtp = salt["pillar.get"]("system-mail") %} 2 | 3 | {% if smtp %} 4 | mail-pkgs: 5 | pkg.installed: 6 | - pkgs: 7 | - ssmtp 8 | - bsd-mailx 9 | 10 | 11 | /etc/ssmtp/ssmtp.conf: 12 | file.managed: 13 | - source: salt://base/config/ssmtp.conf.jinja 14 | - template: jinja 15 | - context: 16 | smtp: {{ smtp }} 17 | - user: root 18 | - group: root 19 | - mode: "0640" 20 | - show_diff: False 21 | - require: 22 | - pkg: mail-pkgs 23 | {% endif %} 24 | -------------------------------------------------------------------------------- /salt/base/motd.sls: -------------------------------------------------------------------------------- 1 | /etc/update-motd.d/10-help-text: 2 | file.managed: 3 | - mode: "0644" 4 | - replace: False 5 | 6 | /etc/update-motd.d/60-unminimize: 7 | file.managed: 8 | - mode: "0644" 9 | - replace: False 10 | 11 | /etc/update-motd.d/91-contract-ua-esm-status: 12 | file.managed: 13 | - mode: "0644" 14 | - replace: False 15 | 16 | /etc/update-motd.d/99-psf: 17 | file.managed: 18 | - mode: "0755" 19 | - contents: | 20 | #!/bin/bash 21 | 22 | cat << 'EOF' 23 | 24 | ============================================================ 25 | __ _ _ _ 26 | / _\ __ _| | |_ /\/\ __ _ _ __ __ _ __ _ ___ __| | 27 | \ \ / _` | | __| / \ / _` | '_ \ / _` |/ _` |/ _ \/ _` | 28 | _\ \ (_| | | |_ / /\/\ \ (_| | | | | (_| | (_| | __/ (_| | 29 | \__/\__,_|_|\__| \/ \/\__,_|_| |_|\__,_|\__, |\___|\__,_| 30 | |___/ 31 | ============================================================ 32 | 33 | !! WARNING !!: This host is managed by the PSF Salt infrastructure. 34 | Any changes made to this host may be reverted. 35 | 36 | Repository: https://github.com/python/psf-salt 37 | 38 | EOF 39 | 40 | echo "" 41 | -------------------------------------------------------------------------------- /salt/base/repo.sls: -------------------------------------------------------------------------------- 1 | {% if grains["oscodename"] in ["jammy", "noble"] %} 2 | psfkey: 3 | file.managed: 4 | - name: /etc/apt/keyrings/packagecloud.asc 5 | - mode: "0644" 6 | - source: salt://base/config/APT-GPG-KEY-PSF 7 | 8 | psf: 9 | pkgrepo.managed: 10 | - name: "deb [signed-by=/etc/apt/keyrings/packagecloud.asc arch={{ grains["osarch"] }}] https://packagecloud.io/psf/infra/ubuntu {{ grains['oscodename'] }} main" 11 | - aptkey: False 12 | - file: /etc/apt/sources.list.d/psf.list 13 | - require: 14 | - file: psfkey 15 | {% else %} 16 | psf: 17 | pkgrepo.managed: 18 | - name: "deb https://packagecloud.io/psf/infra/ubuntu {{ grains['oscodename'] }} main" 19 | - file: /etc/apt/sources.list.d/psf.list 20 | - key_url: salt://base/config/APT-GPG-KEY-PSF 21 | {% endif %} 22 | 23 | # Make source list globally readable. 24 | /etc/apt/sources.list.d/psf.list: 25 | file.managed: 26 | - mode: "0644" 27 | - replace: False 28 | - require: 29 | - pkgrepo: psf 30 | -------------------------------------------------------------------------------- /salt/base/sanity.sls: -------------------------------------------------------------------------------- 1 | niceties: 2 | pkg.installed: 3 | - pkgs: 4 | - atop 5 | - htop 6 | - traceroute 7 | 8 | {% if grains["oscodename"] in ["noble"] %} 9 | systemd-timesyncd: 10 | pkg: 11 | - installed 12 | service: 13 | {% if grains["detect_virt"] in ["docker"] %} 14 | - enabled 15 | {% else %} 16 | - running 17 | - enable: True 18 | {% endif %} 19 | {% else %} 20 | ntp-packages: 21 | pkg.installed: 22 | - pkgs: 23 | - ntp 24 | - ntpdate 25 | 26 | ntp: 27 | service: 28 | - running 29 | - enable: True 30 | {% endif %} 31 | 32 | # Cron has a default $PATH of only /usr/bin:/bin, however the root user's 33 | # default $PATH in the shell includes various sbin directories. This can cause 34 | # scripts to succeed in the shell but fail when run from cron. This bit of 35 | # sanity will ensure consistent default $PATH for the root user. 36 | root-cron-path: 37 | cron.env_present: 38 | - name: PATH 39 | - user: root 40 | - value: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" 41 | 42 | 43 | # Get rid of the Rackspace Mirrors and use the real mirrors, the Rackspace 44 | # mirrors are often way behind. 45 | /etc/apt/sources.list: 46 | file.managed: 47 | - source: salt://base/config/sources.list.jinja 48 | - template: jinja 49 | - user: root 50 | - group: root 51 | - mode: "0644" 52 | - order: 2 53 | 54 | module.wait: 55 | - name: pkg.refresh_db 56 | - order: 2 57 | - watch: 58 | - file: /etc/apt/sources.list 59 | -------------------------------------------------------------------------------- /salt/base/swap.sls: -------------------------------------------------------------------------------- 1 | {% set swap_file = salt["pillar.get"]("swap_file", {}) %} 2 | {% set swap_size = swap_file.get("swap_size", "1024") %} 3 | {% set swap_path = swap_file.get("swap_path") %} 4 | 5 | {% if swap_path %} 6 | {{ swap_path }}: 7 | cmd.run: 8 | - name: | 9 | fallocate -l {{ swap_size }}M {{ swap_path }} 10 | chmod 0600 {{ swap_path }} 11 | mkswap {{ swap_path }} 12 | swapon {{ swap_path }} 13 | - unless: test -f {{ swap_path }} 14 | 15 | mount.swap: 16 | - persist: true 17 | {% endif %} -------------------------------------------------------------------------------- /salt/bugs/config/bpo-suggest.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=bugs.python.org suggestion server 3 | 4 | [Service] 5 | User=roundup 6 | WorkingDirectory=/srv/roundup/trackers/cpython/scripts 7 | 8 | SyslogIdentifier=bpo-suggest-server 9 | 10 | ExecStart=/srv/roundup/env/bin/gunicorn -b unix:///var/run/cpython-extras/suggest.sock -w 4 suggest 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /salt/bugs/config/cpython/tracker-extras.conf: -------------------------------------------------------------------------------- 1 | location /review/ { 2 | 3 | location /review/static/ { 4 | alias /srv/roundup/trackers/cpython/rietveld/static/; 5 | } 6 | 7 | proxy_pass http://cpython-rietveld/review/; 8 | proxy_set_header Host $host; 9 | proxy_set_header X-Real-IP $remote_addr; 10 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 11 | } 12 | 13 | location /suggest/ { 14 | proxy_pass http://cpython-suggest/; 15 | proxy_set_header Host $host; 16 | proxy_set_header X-Real-IP $remote_addr; 17 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 18 | } 19 | -------------------------------------------------------------------------------- /salt/bugs/config/cpython/tracker-upstreams.conf: -------------------------------------------------------------------------------- 1 | upstream cpython-rietveld { 2 | server unix:/var/run/cpython-extras/rietveld.sock fail_timeout=0; 3 | } 4 | 5 | upstream cpython-suggest { 6 | server unix:/var/run/cpython-extras/suggest.sock fail_timeout=0; 7 | } 8 | -------------------------------------------------------------------------------- /salt/bugs/config/detector-config.ini.jinja: -------------------------------------------------------------------------------- 1 | {% for key, configs in detector_config.items() %} 2 | [{{ key }}] 3 | {% for k, v in configs.items() %} 4 | {{ k }} = {{ v }} 5 | {% endfor %} 6 | {% endfor %} 7 | -------------------------------------------------------------------------------- /salt/bugs/config/instance-forward.jinja: -------------------------------------------------------------------------------- 1 | |"/srv/roundup/env/bin/roundup-mailgw /srv/roundup/trackers/{{ tracker }}" 2 | -------------------------------------------------------------------------------- /salt/bugs/config/instance.service.jinja: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Roundup Server - {{ tracker }} 3 | 4 | [Service] 5 | User=roundup 6 | WorkingDirectory=/srv/roundup/trackers/{{ tracker }} 7 | SyslogIdentifier=roundup-server-{{ tracker }} 8 | ExecStart=/srv/roundup/env/bin/gunicorn -b unix:///var/run/roundup/{{ tracker }}.sock wsgi -w {{ workers }} --preload --max-requests 128 --max-requests-jitter 32 --timeout 60 --graceful-timeout 60 --access-logfile - --error-logfile - --statsd-host 127.0.0.1:8125 --statsd-prefix roundup.{{ tracker }} 9 | ExecReload=/bin/kill -HUP $MAINPID 10 | ExecStop = /bin/kill -s TERM $MAINPID 11 | KillMode=mixed 12 | Restart=on-failure 13 | RestartSec=15s 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | -------------------------------------------------------------------------------- /salt/bugs/config/instance_wsgi.py.jinja: -------------------------------------------------------------------------------- 1 | from roundup.cgi.wsgi_handler import RequestDispatcher 2 | 3 | TRACKER_HOME = '/srv/roundup/trackers/{{ tracker }}' 4 | application = RequestDispatcher(TRACKER_HOME) 5 | -------------------------------------------------------------------------------- /salt/bugs/config/nginx.conf.jinja: -------------------------------------------------------------------------------- 1 | log_format timed_combined_{{ tracker }} '$remote_addr - $remote_user [$time_local] ' 2 | '"$request" $status $body_bytes_sent ' 3 | '"$http_referer" "$http_user_agent" ' 4 | '$request_time $upstream_response_time $pipe'; 5 | 6 | limit_req_zone $binary_remote_addr zone=limit-{{ tracker }}:10m rate=5r/s; 7 | 8 | 9 | upstream tracker-{{ tracker }} { 10 | server unix:/var/run/roundup/{{ tracker }}.sock fail_timeout=0; 11 | } 12 | 13 | include conf.d/tracker-extras/upstreams-{{ tracker }}*.conf; 14 | 15 | server { 16 | listen {{ port }} ssl; 17 | server_name {{ server_name }}; 18 | include mime.types; 19 | 20 | set_real_ip_from {{ pillar["psf_internal_network"] }}; 21 | real_ip_header X-Client-IP; 22 | 23 | ssl_certificate /etc/ssl/private/bugs.psf.io.pem; 24 | ssl_certificate_key /etc/ssl/private/bugs.psf.io.pem; 25 | 26 | add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload"; 27 | add_header X-Frame-Options "sameorigin"; 28 | add_header X-Xss-Protection "1; mode=block"; 29 | add_header X-Content-Type-Options "nosniff"; 30 | add_header X-Permitted-Cross-Domain-Policies "none"; 31 | 32 | error_log /var/log/nginx/roundup-{{ tracker }}.error.log; 33 | access_log /var/log/nginx/roundup-{{ tracker }}.access.log timed_combined_{{ tracker }}; 34 | 35 | root /srv/roundup/trackers/{{ tracker }}/; 36 | 37 | include conf.d/tracker-extras/{{ tracker }}*.conf; 38 | 39 | gzip on; 40 | gzip_http_version 1.1; 41 | gzip_proxied any; 42 | gzip_min_length 500; 43 | gzip_comp_level 6; # default comp_level is 1 44 | gzip_disable msie6; 45 | gzip_types text/plain text/css 46 | text/xml application/xml 47 | text/javascript application/javascript 48 | text/json application/json; 49 | 50 | location /@@file/ { 51 | rewrite ^/@@file/(.*) /html/$1 break; 52 | expires 1h; 53 | } 54 | 55 | location / { 56 | limit_req zone=limit-{{ tracker }} burst=10 nodelay; 57 | proxy_pass http://tracker-{{ tracker }}/; 58 | proxy_set_header Host $host; 59 | proxy_set_header X-Real-IP $remote_addr; 60 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /salt/bugs/config/postfix/main.cf: -------------------------------------------------------------------------------- 1 | # See /usr/share/postfix/main.cf.dist for a commented, more complete version 2 | 3 | 4 | # Debian specific: Specifying a file name will cause the first 5 | # line of that file to be used as the name. The Debian default 6 | # is /etc/mailname. 7 | #myorigin = /etc/mailname 8 | 9 | smtpd_banner = $myhostname ESMTP $mail_name (Ubuntu) 10 | biff = no 11 | 12 | # appending .domain is the MUA's job. 13 | append_dot_mydomain = no 14 | 15 | # Uncomment the next line to generate "delayed mail" warnings 16 | #delay_warning_time = 4h 17 | 18 | readme_directory = no 19 | 20 | # See http://www.postfix.org/COMPATIBILITY_README.html -- default to 3.6 on 21 | # fresh installs. 22 | compatibility_level = 3.6 23 | 24 | 25 | 26 | # TLS parameters 27 | smtpd_tls_cert_file=ssl_certificate /etc/ssl/private/bugs.psf.io.pem; 28 | smtpd_tls_key_file=etc/ssl/private/bugs.psf.io.pem; 29 | smtpd_tls_security_level=may 30 | 31 | smtp_tls_CApath=/etc/ssl/certs 32 | smtp_tls_security_level=may 33 | smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache 34 | 35 | 36 | smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination 37 | myhostname = {{ grains['fqdn'] }} 38 | alias_maps = hash:/etc/aliases 39 | alias_database = hash:/etc/aliases 40 | myorigin = /etc/mailname 41 | mydestination = $myhostname, localhost 42 | relayhost = 43 | mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 44 | mailbox_size_limit = 0 45 | recipient_delimiter = + 46 | inet_interfaces = all 47 | inet_protocols = all 48 | 49 | virtual_alias_domains = {% for tracker, config in pillar["bugs"]["trackers"].items() %}{% set tracker_email_list = config["config"]["tracker__email"].split('@') %}{{ tracker_email_list[-1] }} {% endfor %} 50 | virtual_alias_maps = hash:/etc/postfix/virtual 51 | 52 | smtpd_recipient_restrictions = check_recipient_access hash:/etc/postfix/reject_recipients 53 | 54 | postscreen_upstream_proxy_protocol = haproxy 55 | postscreen_upstream_proxy_timeout = 5s 56 | -------------------------------------------------------------------------------- /salt/bugs/config/postfix/reject_recipients: -------------------------------------------------------------------------------- 1 | {% for tracker, config in pillar["bugs"]["trackers"].items() %}{% if not config.get("accept_email", False) %} 2 | {{ config["config"]["tracker__email"] }} REJECT {{ config.get("email_reject_message", "This tracker does not accept email submissions.") }} 3 | {%- endif %}{%- endfor %} 4 | -------------------------------------------------------------------------------- /salt/bugs/config/postfix/virtual: -------------------------------------------------------------------------------- 1 | {% for tracker, config in pillar["bugs"]["trackers"].items() %}{% if config.get("accept_email", False) %} 2 | {{ config["config"]["tracker__email"] }} roundup+{{ tracker }} 3 | {%- endif %}{%- endfor %} 4 | -------------------------------------------------------------------------------- /salt/bugs/config/postgresql.conf: -------------------------------------------------------------------------------- 1 | archive_mode = on 2 | archive_command = 'test ! -f /backup/postgresql/wal_logs/%f && cp %p /backup/postgresql/wal_logs/%f' 3 | archive_timeout = 1800 4 | -------------------------------------------------------------------------------- /salt/bugs/config/rietveld.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Rietveld Server 3 | 4 | [Service] 5 | User=roundup 6 | WorkingDirectory=/srv/roundup/trackers/cpython 7 | 8 | SyslogIdentifier=rietveld-server 9 | 10 | ExecStart=/srv/roundup/trackers/cpython/rietveld/env/bin/gunicorn -b unix:///var/run/cpython-extras/rietveld.sock -w4 rietveld.wsgi 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /salt/bugs/config/roundup.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Roundup Server 3 | 4 | [Service] 5 | User=roundup 6 | WorkingDirectory=/srv/roundup 7 | 8 | SyslogIdentifier=roundup-server 9 | PIDFile=/srv/roundup/pidfile 10 | 11 | ExecStart=/srv/roundup/env/bin/roundup-server -p 8000 -n 127.0.0.1 {%- for tracker in trackers %} {{ tracker }}=/srv/roundup/trackers/{{ tracker }} {%- endfor %} 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /salt/bugs/files/postgres-backup.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | LABEL=$(/bin/date -Iminutes) 5 | 6 | if [ -x /backup/postgresql/base_backups/current ]; then 7 | mv /backup/postgresql/base_backups/current /backup/postgresql/base_backups/prev 8 | fi; 9 | 10 | /usr/bin/pg_basebackup -D /backup/postgresql/base_backups/current -l nightly-backup-$LABEL 11 | 12 | if [ -x /backup/postgresql/base_backups/prev ]; then 13 | rm -rf /backup/postgresql/base_backups/prev 14 | fi 15 | 16 | LATEST_BACKUP_FILE=$(grep -l "LABEL: nightly-backup-$LABEL" /backup/postgresql/wal_logs/*.backup) 17 | 18 | /usr/bin/pg_archivecleanup /backup/postgresql/wal_logs $(/usr/bin/basename $LATEST_BACKUP_FILE) 19 | 20 | /usr/bin/find /backup/postgresql/wal_logs/*.backup -type f -not -path $LATEST_BACKUP_FILE -delete 21 | -------------------------------------------------------------------------------- /salt/bugs/jython.sls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/salt/bugs/jython.sls -------------------------------------------------------------------------------- /salt/bugs/postgresql.sls: -------------------------------------------------------------------------------- 1 | 2 | pgdg-repo: 3 | pkgrepo.managed: 4 | - humanname: PostgresSQL Global Development Group 5 | - name: deb http://apt.postgresql.org/pub/repos/apt {{ grains['oscodename'] }}-pgdg main 6 | - file: /etc/apt/sources.list.d/pgdg.list 7 | - gpgcheck: 1 8 | - key_url: https://www.postgresql.org/media/keys/ACCC4CF8.asc 9 | - refresh_db: true 10 | 11 | postgresql-server: 12 | pkg.installed: 13 | - pkgs: 14 | - postgresql-16 15 | 16 | clear_default_cluster: 17 | postgres_cluster.absent: 18 | - name: 'main' 19 | - version: '16' 20 | - require: 21 | - pkg: postgresql-server 22 | 23 | roundup_postgres_backup_dir: 24 | file.directory: 25 | - name: /backup/postgresql/base_backups 26 | - user: postgres 27 | - group: postgres 28 | - mode: "0750" 29 | - makedirs: True 30 | 31 | roundup_postgres_wal_archives: 32 | file.directory: 33 | - name: /backup/postgresql/wal_logs 34 | - user: postgres 35 | - group: postgres 36 | - mode: "0750" 37 | 38 | roundup_cluster: 39 | postgres_cluster.present: 40 | - name: 'roundup' 41 | - version: '16' 42 | - locale: 'en_US.UTF-8' 43 | - encoding: 'UTF8' 44 | - datadir: '/srv/postgresql/16/roundup' 45 | - require: 46 | - pkg: postgresql-server 47 | 48 | roundup_postgres_config: 49 | file.managed: 50 | - name: /etc/postgresql/16/roundup/conf.d/roundup.conf 51 | - source: salt://bugs/config/postgresql.conf 52 | - user: postgres 53 | - group: postgres 54 | 55 | postgresql@16-roundup: 56 | service.running: 57 | - restart: True 58 | - enable: True 59 | - require: 60 | - postgres_cluster: clear_default_cluster 61 | - postgres_cluster: roundup_cluster 62 | - watch: 63 | - file: roundup_postgres_config 64 | 65 | roundup_user: 66 | postgres_user.present: 67 | - name: roundup 68 | - password: roundup 69 | - createdb: True 70 | 71 | roundup_postgres_backup_script: 72 | file.managed: 73 | - name: /var/lib/postgresql/backup.bash 74 | - source: salt:///bugs/files/postgres-backup.bash 75 | - user: postgres 76 | - group: postgres 77 | - mode: "0750" 78 | 79 | roundup_postgres_nightly_backup: 80 | cron.present: 81 | - name: /var/lib/postgresql/backup.bash 82 | - identifier: roundup_postgres_nightly_backup 83 | - user: postgres 84 | - hour: 23 85 | - minute: 30 86 | -------------------------------------------------------------------------------- /salt/bugs/requirements.txt: -------------------------------------------------------------------------------- 1 | oic==0.14.0 2 | psycopg2<2.8 3 | gunicorn<20.0.0 4 | pytz 5 | sqlitedict==1.6.0 6 | -------------------------------------------------------------------------------- /salt/bugs/rietveld-requirements.txt: -------------------------------------------------------------------------------- 1 | Django==1.2.3 2 | psycopg2 3 | gunicorn 4 | -------------------------------------------------------------------------------- /salt/bugs/roundup.sls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/salt/bugs/roundup.sls -------------------------------------------------------------------------------- /salt/buildbot/config/nginx.conf.jinja: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | 4 | server_name buildbot.python.org, buildbot-master.psf.io, buildbot.nyc1.psf.io; 5 | 6 | if ($scheme = "http") { 7 | return 301 https://$http_host$request_uri; 8 | } 9 | } 10 | 11 | server { 12 | listen {{ port }} ssl; 13 | 14 | server_name buildbot.python.org, buildbot-master.psf.io, buildbot.nyc1.psf.io; 15 | 16 | ssl_certificate /etc/ssl/private/buildbot-master.psf.io.pem; 17 | ssl_certificate_key /etc/ssl/private/buildbot-master.psf.io.pem; 18 | 19 | include fastly_params; 20 | 21 | error_log /var/log/nginx/buildbot-master.error.log; 22 | access_log /var/log/nginx/buildbot-master.access.log main; 23 | 24 | rewrite ^/3.(\d+)(/?)$ /#/grid?branch=3.$1 redirect; 25 | rewrite ^/3.x(/?)$ /#/grid?branch=main redirect; 26 | rewrite ^/3.(\d+).stable(/?)$ /#/grid?branch=3.$1&tag=stable redirect; 27 | rewrite ^/3.x.stable(/?)$ /#/grid?branch=main&tag=stable redirect; 28 | rewrite ^/stable(/?)$ /#/grid?tag=stable redirect; 29 | 30 | rewrite ^/all/(.*) /$1 break; 31 | 32 | location / { 33 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 34 | proxy_set_header Host $http_host; 35 | proxy_redirect off; 36 | proxy_pass http://127.0.0.1:9010; 37 | } 38 | 39 | location /robots.txt { 40 | autoindex on; 41 | root /data/www/buildbot/; 42 | } 43 | 44 | location /ws { 45 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 46 | proxy_set_header Host $http_host; 47 | proxy_redirect off; 48 | proxy_http_version 1.1; 49 | proxy_set_header Upgrade $http_upgrade; 50 | proxy_set_header Connection "Upgrade"; 51 | proxy_pass http://localhost:9010/ws; 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /salt/cdn-logs/config/fastly.logrotate.conf: -------------------------------------------------------------------------------- 1 | /var/log/fastly/*.log { 2 | hourly 3 | rotate 72 4 | missingok 5 | notifempty 6 | compress 7 | delaycompress 8 | sharedscripts 9 | postrotate 10 | /usr/lib/rsyslog/rsyslog-rotate 11 | endscript 12 | } 13 | -------------------------------------------------------------------------------- /salt/cdn-logs/config/fastly.rsyslog.conf: -------------------------------------------------------------------------------- 1 | module(load="imptcp" threads="4") 2 | input(type="imptcp" port="514" ruleset="writeFastlyLogs") 3 | 4 | $template FastlyLog,"%msg:2:$:drop-last-lf%\n" 5 | 6 | ruleset(name="writeFastlyLogs") { 7 | {% for app_name in fastly_logging_names %} 8 | :app-name, isequal, "{{ app_name }}" /var/log/fastly/{{ app_name }}.log;FastlyLog 9 | {% endfor %} 10 | } 11 | -------------------------------------------------------------------------------- /salt/cdn-logs/init.sls: -------------------------------------------------------------------------------- 1 | adm: 2 | group.present: 3 | - name: adm 4 | 5 | /var/log/fastly/: 6 | file.directory: 7 | - user: syslog 8 | - group: adm 9 | 10 | /etc/rsyslog.d/25-fastly-logs.conf: 11 | file.managed: 12 | - source: salt://cdn-logs/config/fastly.rsyslog.conf 13 | - template: jinja 14 | - context: 15 | fastly_logging_names: {{ pillar["fastly-logging-names"] }} 16 | 17 | /etc/logrotate.d/fastly-logs: 18 | file.managed: 19 | - source: salt://cdn-logs/config/fastly.logrotate.conf 20 | 21 | /etc/systemd/system/timers.target.wants/logrotate.timer: 22 | ini.options_present: 23 | - name: /etc/systemd/system/timers.target.wants/logrotate.timer 24 | - separator: '=' 25 | - sections: 26 | Unit: 27 | Description: 'Hourly rotation of log files' 28 | Timer: 29 | OnCalendar: hourly 30 | cmd.run: 31 | - name: systemctl daemon-reload 32 | - onchanges: 33 | - ini: /etc/systemd/system/timers.target.wants/logrotate.timer 34 | -------------------------------------------------------------------------------- /salt/codespeed/config/codespeed.logrotate: -------------------------------------------------------------------------------- 1 | /var/log/codespeed/*.log { 2 | daily 3 | rotate 14 4 | missingok 5 | notifempty 6 | create 644 codespeed codespeed 7 | } 8 | -------------------------------------------------------------------------------- /salt/codespeed/config/codespeed.service.jinja: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=codespeed instance {{ instance }} 3 | After=network.target 4 | 5 | [Service] 6 | Environment=LC_ALL=en_US.UTF-8 7 | Environment=LANG=en_US.UTF-8 8 | WorkingDirectory=/srv/codespeed/{{ instance }}/src 9 | ExecStart=/srv/codespeed/{{ instance }}/env/bin/gunicorn {{ wsgi_app }} -w 4 --bind unix:///var/run/codespeed/{{ instance }}.sock 10 | ExecReload=/bin/kill -HUP $MAINPID 11 | ExecStop = /bin/kill -s TERM $MAINPID 12 | Restart=on-failure 13 | User=codespeed 14 | Group=codespeed 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /salt/codespeed/config/nginx.conf.jinja: -------------------------------------------------------------------------------- 1 | upstream codespeed-{{ instance }} { 2 | server unix:/var/run/codespeed/{{ instance }}.sock; 3 | } 4 | 5 | server { 6 | listen 80; 7 | 8 | server_name {{ server_names }}; 9 | 10 | if ($scheme = "http") { 11 | return 301 https://$http_host$request_uri; 12 | } 13 | } 14 | 15 | server { 16 | listen {{ port }} ssl; 17 | 18 | ssl_certificate /etc/ssl/private/codespeed.psf.io.pem; 19 | ssl_certificate_key /etc/ssl/private/codespeed.psf.io.pem; 20 | 21 | include fastly_params; 22 | 23 | error_log /var/log/nginx/codespeed-{{ instance }}.error.log; 24 | access_log /var/log/nginx/codespeed-{{ instance }}.access.log main; 25 | 26 | # path for static files 27 | location ~ ^/static/(.*)$ { 28 | alias /srv/codespeed/{{ instance }}/data/site_media/static/$1; 29 | } 30 | 31 | location / { 32 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 33 | proxy_set_header Host $http_host; 34 | proxy_redirect off; 35 | 36 | if (!-f $request_filename) { 37 | proxy_pass http://codespeed-{{ instance }}; 38 | break; 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /salt/consul/etc/_address_list.jinja: -------------------------------------------------------------------------------- 1 | {% set join_addresses = [] -%} 2 | {% for server in pillar["consul"]["bootstrap"][pillar["dc"]] -%} 3 | {% for name, addresses in salt["mine.get"](server, "psf_internal").items()|sort() -%} 4 | {% for address in addresses -%} 5 | {% do join_addresses.append(address) -%} 6 | {% endfor -%} 7 | {% endfor -%} 8 | {% endfor -%} 9 | -------------------------------------------------------------------------------- /salt/consul/etc/acl-master.json.jinja: -------------------------------------------------------------------------------- 1 | {"acl_master_token": "{{ salt['pillar.get']('consul:acl:tokens:__master__') }}"} 2 | -------------------------------------------------------------------------------- /salt/consul/etc/acl.json.jinja: -------------------------------------------------------------------------------- 1 | { 2 | {% if "default" in salt['pillar.get']("consul:acl:tokens", []) %} 3 | "acl_token": "{{ pillar['consul']['acl']['tokens']['default'] }}" 4 | {% endif %} 5 | } 6 | -------------------------------------------------------------------------------- /salt/consul/etc/base.json.jinja: -------------------------------------------------------------------------------- 1 | {% set internal = salt["pillar.get"]("psf_internal_network") %} 2 | 3 | { 4 | "acl_datacenter": "{{ pillar.consul.acl.dc }}", 5 | "acl_default_policy": "{{ pillar.consul.acl.default }}", 6 | "acl_down_policy": "{{ pillar.consul.acl.down }}", 7 | "acl_ttl": "{{ pillar.consul.acl.ttl }}", 8 | "bind_addr": "{{ salt["network.ip_addrs"](cidr=internal)[0] }}", 9 | "ca_file": "/etc/ssl/certs/PSF_CA.pem", 10 | "datacenter": "{{ pillar.dc }}", 11 | "disable_remote_exec": true, 12 | "enable_syslog": true, 13 | "server_name": "consul.psf.io", 14 | "verify_incoming": false, 15 | "verify_outgoing": true 16 | } 17 | -------------------------------------------------------------------------------- /salt/consul/etc/consul-template.conf.jinja: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/salt/consul/etc/consul-template.conf.jinja -------------------------------------------------------------------------------- /salt/consul/etc/consul-template/base.json: -------------------------------------------------------------------------------- 1 | wait = "10s" 2 | -------------------------------------------------------------------------------- /salt/consul/etc/consul-template/template.json.jinja: -------------------------------------------------------------------------------- 1 | template { 2 | source = "{{ source }}" 3 | destination = "{{ destination }}" 4 | {% if command %}command = "{{ command }}"{% endif %} 5 | } 6 | -------------------------------------------------------------------------------- /salt/consul/etc/encrypt.json.jinja: -------------------------------------------------------------------------------- 1 | {"encrypt": "{{ salt['pillar.get']('consul:encryption:key') }}"} 2 | -------------------------------------------------------------------------------- /salt/consul/etc/join.json.jinja: -------------------------------------------------------------------------------- 1 | {% import "consul/etc/_address_list.jinja" as address_list %} 2 | { 3 | "retry_join": [ 4 | {% for address in address_list.join_addresses|sort() -%} 5 | "{{ address }}"{% if not loop.last %},{% endif %} 6 | {% endfor %} 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /salt/consul/etc/server.json.jinja: -------------------------------------------------------------------------------- 1 | {% import "consul/etc/_address_list.jinja" as address_list %} 2 | { 3 | "bootstrap_expect": {{ address_list.join_addresses|length() }}, 4 | "cert_file": "/etc/ssl/private/consul.psf.io.pem", 5 | "key_file": "/etc/ssl/private/consul.psf.io.pem", 6 | "server": true 7 | } 8 | -------------------------------------------------------------------------------- /salt/consul/etc/service.jinja: -------------------------------------------------------------------------------- 1 | { 2 | "service": { 3 | "name": "{{ name }}", 4 | "tags": [{% for tag in tags|default([]) %}"{{ tag }}"{% if not loop.last %},{% endif %}{% endfor %}], 5 | "port": {{ port }} 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /salt/consul/init/consul-template.conf.jinja: -------------------------------------------------------------------------------- 1 | description "Consul Template" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | respawn 7 | 8 | exec /usr/bin/consul-template \ 9 | -config /etc/consul-template.conf \ 10 | -wait 10s \ 11 | {% for template in templates -%} 12 | -template "{{ template }}"{% if not loop.last %} \{% endif %} 13 | {% endfor %} 14 | -------------------------------------------------------------------------------- /salt/consul/init/consul-template.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=consul-template 3 | Requires=network-online.target 4 | After=network-online.target consul.service 5 | 6 | [Service] 7 | KillSignal=SIGINT 8 | EnvironmentFile=-/etc/sysconfig/consul-template 9 | Restart=on-failure 10 | ExecStart=/usr/bin/consul-template $OPTIONS -config=/etc/consul-template.d 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /salt/consul/init/consul.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Consul 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | ExecStart=/usr/bin/consul agent -config-dir /etc/consul.d -data-dir /var/lib/consul -ui 8 | KillMode=process 9 | User=consul 10 | Group=consul 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /salt/consul/jinja.sls: -------------------------------------------------------------------------------- 1 | {% macro simple_service(service) %} 2 | {{ "{{" }} with service "{{ service }}@{{ pillar.dc }}" }} # noqa: 206 3 | {{ 4 | caller( 5 | addr="{{ (index . 0).Address }}", 6 | port="{{ (index . 0).Port }}", 7 | ) 8 | }} 9 | {{ "{{ end }}" }} 10 | {% endmacro %} 11 | -------------------------------------------------------------------------------- /salt/datadog/config/APT-GPG-KEY-DATADOG: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | mQINBGRBYW4BEAC/4bIFPbfBmpLzhboTl2JIAvXX2U3fdp2jMsmPMYxpC2+bDOvk 4 | F1IlxjoG1nVnUKm7bnpaml7+007Y4VYpVNsCAgXwXZPpKaNgjYncIdBIiUGt9UFy 5 | uhEUDGmywEj9P28njKQKzysrxsVrcHNTbiZwN5hdq+mgaXMV8foSs7dHg2FzUE67 6 | +6mwTB0NW1dqA/sP6diP5OQfW5xrGmDBeqfzBeq+rabDrkjFBsTkyvxNX3p1VZEF 7 | vBxV5VuOAU7oOCyczCRYPI0pRmEGVXsXU5puuzHiB4roO2Nq0oyjhJTv5bn79AKG 8 | z6OFB2pbjNdlppdsDvbIZXmG192Z4lOzANDSQVR5g5Uy+0wCrMdXQnWg5SeR60T3 9 | wqViy/oKkljfU4zjHrpbpt0y/pbyjVXqXH4PMst0VAiNXtTLG4dfcQiFGei2aPbh 10 | N2oxc0SbSrC3E7k6xR0x0rem6PFPg0o4aHVQ2r7IimS8z0V8EDVjYuwDVvmKsc/T 11 | 0yXr9fd1VNdpi/K4MZBsePDBd9ovOOFM7/EgJntGIK8FrBRTGrq6JTEVyV90ZBuP 12 | j4vCKA/M0RESZe5/sk9NhyJanrR/XeAw3lPdT1Ia1h6vynfrOzc8GnO/kknnjeUs 13 | DgQtt6ZiG+O5D5UTl5f+TXiKAlsb79l2VVFEzhN6FCyNcDBk21MryY9n2QARAQAB 14 | tEtEYXRhZG9nLCBJbmMuIEFQVCBrZXkgKDIwMjMtMDQtMjApIChBUFQga2V5KSA8 15 | cGFja2FnZSthcHRrZXlAZGF0YWRvZ2hxLmNvbT6JAlQEEwEKAD4WIQRfHiVgYdgT 16 | sSXhVujmJm1KwJYsfQUCZEFhbgIbAwUJCWYBgAULCQgHAgYVCgkICwIEFgIDAQIe 17 | AQIXgAAKCRDmJm1KwJYsfXS1EACylw4vuTGixOekprUald6knQOiR619pYfeLryD 18 | GVxkODe9cVtK4voa0JpJ77b3Y3rfOVJzW0H89xtC7vEB2OnHIdmdxDutDMSq/50D 19 | asw355OluqhDhBubsMh7MXeip7fPonMnhJUP2cPCpim1AYEMy2RRr9uQ395JRMbj 20 | tNJPIgnf9ds8nBLYlDKuEgeOScyHLPoc1gQ+VXQuAMmvvyxU1rDkK9Q71hUEHgJg 21 | EAslUm3mh12whwsf2Bsgl1vXHLI+1rQI18OwDf9qn9Mx+1xWDY4L0jsdhQOg4lkI 22 | LxenKtJMGA0Z7I3++LbXFZ+EX8KJMtI7ViwjupTKkm9fpXOH6TYorrSbbNO8JAq4 23 | SgVW7kfM4tiCLB+1NtPjtVYUVgzqFSUDIrZQCcdBEI/VGWMw4NGmKoHnArAiL2RM 24 | ASj0ocdpUee5jtK0uyAudNJE9Nd6pRMT1vzGOALewG9GQosirIGiPq9SdRrYjq1p 25 | KratnS2byrbcXTPrDb3YB7GHbnvWMNjFY2mr+1S3JBte8o3aYKbKd/qpdrgN55ku 26 | 8+qPEJib4K5sTh2rG/yuuXR/3Y3D7+6wfHt6u8DPreRC61uf81WhLHJxNqV0C06m 27 | /wrSpwus7P+wf3eesWk6Wcax2BVV6uieYn/nuvKNqk5ESNfhJc0yqQqXm83vN45F 28 | gtZ0mQ== 29 | =r19+ 30 | -----END PGP PUBLIC KEY BLOCK----- 31 | -------------------------------------------------------------------------------- /salt/datadog/config/datadog.yaml.jinja: -------------------------------------------------------------------------------- 1 | dd_url: https://app.datadoghq.com 2 | api_key: {{ api_key }} 3 | 4 | use_dogstatsd: yes 5 | dogstatsd_port: 8125 6 | 7 | hostname_fqdn: yes 8 | 9 | {% if tags is defined %} 10 | tags: 11 | {%- for tag in tags %} 12 | - {{ tag }} 13 | {%- endfor -%} 14 | {% endif %} 15 | -------------------------------------------------------------------------------- /salt/datadog/files/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/salt/datadog/files/.gitkeep -------------------------------------------------------------------------------- /salt/dns/init.sls: -------------------------------------------------------------------------------- 1 | boto: 2 | pip.installed: 3 | - pip_bin: /usr/bin/salt-pip 4 | 5 | boto3: 6 | pip.installed: 7 | - pip_bin: /usr/bin/salt-pip 8 | 9 | {% set public_ipv4 = salt["mine.get"]("*", "public_ipv4") %} 10 | 11 | # We assume that a server will always have an IPv4 address. 12 | 13 | # TODO(@JacobCoffee): Update back to boto3_route53 14 | {% for server in public_ipv4 %} 15 | {{ server }}-route53: 16 | boto_route53.rr_present: 17 | - zone: psf.io. 18 | - name: {{ server }}. 19 | - ttl: 3600 20 | - record_type: A 21 | - value: {{ public_ipv4.get(server, []) }} 22 | {% endfor %} 23 | -------------------------------------------------------------------------------- /salt/docker/init.sls: -------------------------------------------------------------------------------- 1 | docker: 2 | pkgrepo.managed: 3 | - humanname: Docker 4 | - name: deb https://apt.dockerproject.org/repo ubuntu-{{ grains["oscodename"] }} main 5 | - file: /etc/apt/sources.list.d/docker.list 6 | - keyid: 58118E89F3A912897C070ADBF76221572C52609D 7 | - keyserver: keyserver.ubuntu.com 8 | - require_in: 9 | - pkg: docker 10 | 11 | pkg.latest: 12 | - pkgs: 13 | - linux-image-extra-virtual 14 | - docker-engine 15 | - refresh: True 16 | 17 | service.running: 18 | - enable: True 19 | -------------------------------------------------------------------------------- /salt/docs/config/docsbuild-scripts: -------------------------------------------------------------------------------- 1 | [env] 2 | SENTRY_DSN = "{{ sentry_dsn }}" 3 | FASTLY_SERVICE_ID = "{{ fastly_service_id }}" 4 | FASTLY_TOKEN = "{{ fastly_token }}" 5 | PYTHON_DOCS_ENABLE_ANALYTICS = "1" 6 | -------------------------------------------------------------------------------- /salt/docs/config/docsbuild.logrotate: -------------------------------------------------------------------------------- 1 | /var/log/docsbuild/*.log { 2 | daily 3 | rotate 14 4 | missingok 5 | notifempty 6 | compress 7 | create 644 docsbuild docsbuild 8 | } 9 | -------------------------------------------------------------------------------- /salt/docs/config/nginx.docs-backend.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 9000 ssl; 3 | server_name docs.python.org; 4 | 5 | ssl_certificate /etc/ssl/private/docs.psf.io.pem; 6 | ssl_certificate_key /etc/ssl/private/docs.psf.io.pem; 7 | 8 | include fastly_params; 9 | 10 | error_log /var/log/nginx/docs-backend.error.log; 11 | access_log /var/log/nginx/docs-backend.access.log main; 12 | 13 | root /srv/docs.python.org; 14 | 15 | add_header Surrogate-control max-age=604800; 16 | 17 | autoindex on; 18 | 19 | # The redirect config for docs.python.org is in a separate file 20 | # to allow automatic testing via Hurl (https://hurl.dev/) 21 | include sites.d/docs/redirects.conf; 22 | } 23 | 24 | server { 25 | listen 9000 ssl; 26 | server_name doc.python.org; 27 | 28 | ssl_certificate /etc/ssl/private/docs.psf.io.pem; 29 | ssl_certificate_key /etc/ssl/private/docs.psf.io.pem; 30 | 31 | include fastly_params; 32 | 33 | error_log /var/log/nginx/doc-backend.error.log; 34 | access_log /var/log/nginx/doc-backend.access.log main; 35 | 36 | expires max; 37 | 38 | return 301 http://docs.python.org$request_uri; 39 | } 40 | -------------------------------------------------------------------------------- /salt/downloads/config/nginx.downloads-backend.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 9000 ssl; 3 | server_name www.python.org; 4 | 5 | ssl_certificate /etc/ssl/private/downloads.psf.io.pem; 6 | ssl_certificate_key /etc/ssl/private/downloads.psf.io.pem; 7 | 8 | include fastly_params; 9 | 10 | error_log /var/log/nginx/downloads-backend.error.log; 11 | access_log /var/log/nginx/downloads-backend.access.log; 12 | 13 | root /srv/www.python.org; 14 | 15 | location / { 16 | add_header Surrogate-Control max-age=157680000; 17 | } 18 | 19 | location ~ /$ { 20 | add_header Surrogate-Control max-age=604800; 21 | add_header Surrogate-Key dirlisting; 22 | autoindex on; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /salt/downloads/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - nginx 3 | 4 | 5 | /etc/nginx/sites.d/downloads-backend.conf: 6 | file.managed: 7 | - source: salt://downloads/config/nginx.downloads-backend.conf 8 | - user: root 9 | - group: root 10 | - mode: "0644" 11 | - require: 12 | - file: /etc/nginx/sites.d/ 13 | - file: /etc/nginx/fastly_params 14 | 15 | 16 | /etc/consul.d/service-downloads.json: 17 | file.managed: 18 | - source: salt://consul/etc/service.jinja 19 | - template: jinja 20 | - context: 21 | name: downloads 22 | port: 9000 23 | - user: root 24 | - group: root 25 | - mode: "0644" 26 | - require: 27 | - pkg: consul-pkgs 28 | -------------------------------------------------------------------------------- /salt/elasticsearch/config/elasticsearch.yml.jinja: -------------------------------------------------------------------------------- 1 | {% set psf_internal = salt["pillar.get"]("psf_internal_network") %} 2 | network.bind_host: {{ salt["network.ip_addrs"](cidr=psf_internal)[0] }} 3 | http.port: 9200 4 | -------------------------------------------------------------------------------- /salt/elasticsearch/config/logging.yml: -------------------------------------------------------------------------------- 1 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG 2 | es.logger.level: INFO 3 | rootLogger: ${es.logger.level}, console, file 4 | logger: 5 | # log action execution errors for easier debugging 6 | action: DEBUG 7 | # reduce the logging for aws, too much is logged under the default INFO 8 | com.amazonaws: WARN 9 | 10 | # gateway 11 | #gateway: DEBUG 12 | #index.gateway: DEBUG 13 | 14 | # peer shard recovery 15 | #indices.recovery: DEBUG 16 | 17 | # discovery 18 | #discovery: TRACE 19 | 20 | index.search.slowlog: TRACE, index_search_slow_log_file 21 | index.indexing.slowlog: TRACE, index_indexing_slow_log_file 22 | 23 | additivity: 24 | index.search.slowlog: false 25 | index.indexing.slowlog: false 26 | 27 | appender: 28 | console: 29 | type: console 30 | layout: 31 | type: consolePattern 32 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 33 | 34 | file: 35 | type: rollingFile 36 | file: ${path.logs}/${cluster.name}.log 37 | datePattern: "'.'yyyy-MM-dd" 38 | layout: 39 | type: pattern 40 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 41 | defaultRolloverStrategy: 42 | max: 14 43 | 44 | index_search_slow_log_file: 45 | type: rollingFile 46 | file: ${path.logs}/${cluster.name}_index_search_slowlog.log 47 | datePattern: "'.'yyyy-MM-dd" 48 | layout: 49 | type: pattern 50 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 51 | defaultRolloverStrategy: 52 | max: 14 53 | 54 | index_indexing_slow_log_file: 55 | type: rollingFile 56 | file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log 57 | datePattern: "'.'yyyy-MM-dd" 58 | layout: 59 | type: pattern 60 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 61 | defaultRolloverStrategy: 62 | max: 14 63 | 64 | -------------------------------------------------------------------------------- /salt/elasticsearch/init.sls: -------------------------------------------------------------------------------- 1 | default-jre-headless: 2 | pkg.installed 3 | 4 | elasticsearch-repo: 5 | pkgrepo.managed: 6 | - name: deb http://packages.elasticsearch.org/elasticsearch/1.4/debian stable main 7 | - key_url: http://packages.elasticsearch.org/GPG-KEY-elasticsearch 8 | - file: /etc/apt/sources.list.d/elasticsearch.list 9 | - require_in: 10 | - pkg: elasticsearch 11 | 12 | elasticsearch: 13 | pkg.installed: 14 | - require: 15 | - pkg: default-jre-headless 16 | service.running: 17 | - watch: 18 | - file: /etc/elasticsearch/*.yml 19 | 20 | /etc/elasticsearch/elasticsearch.yml: 21 | file.managed: 22 | - source: salt://elasticsearch/config/elasticsearch.yml.jinja 23 | - template: jinja 24 | - require: 25 | - pkg: elasticsearch 26 | 27 | /etc/elasticsearch/logging.yml: 28 | file.managed: 29 | - source: salt://elasticsearch/config/logging.yml 30 | - require: 31 | - pkg: elasticsearch 32 | 33 | /etc/consul.d/service-elasticsearch.json: 34 | file.managed: 35 | - source: salt://consul/etc/service.jinja 36 | - template: jinja 37 | - context: 38 | name: elasticsearch 39 | port: 9200 40 | - user: root 41 | - group: root 42 | - mode: "0644" 43 | - require: 44 | - service: elasticsearch 45 | - pkg: consul-pkgs 46 | -------------------------------------------------------------------------------- /salt/firewall/config/ip6tables.jinja: -------------------------------------------------------------------------------- 1 | # Firewall configuration written by salt 2 | # Manual customization of this file is not cool. 3 | 4 | {% set rules = salt['pillar.get']('firewall', {}) %} 5 | 6 | *filter 7 | :INPUT ACCEPT [0:0] 8 | :FORWARD ACCEPT [0:0] 9 | :OUTPUT ACCEPT [0:0] 10 | 11 | # Default rules 12 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 13 | -A INPUT -p ipv6-icmp -j ACCEPT 14 | -A INPUT -i lo -j ACCEPT 15 | 16 | # SSH 17 | -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT 18 | 19 | {% for rule, config in rules.items() -%} 20 | {% if not config.get("raw") and not config.get("raw_v6") -%} 21 | {% set port = config.get('port', 22) -%} 22 | {% set proto = config.get('protocol', 'tcp') -%} 23 | {% set source = config.get('source') -%} 24 | {% set source6 = config.get('source6') -%} 25 | {% if (not source and not source6) or source6 -%} 26 | # {{ rule }} 27 | -A INPUT -m state --state NEW -m {{ proto }} -p {{ proto }} -s {{ source6|default('::/0', true) }} --dport {{ port }} -j ACCEPT 28 | {% endif -%} 29 | {% elif config.get("raw6") -%} 30 | # {{ rule }} 31 | {{ config.raw }} 32 | {% endif %} 33 | {% endfor -%} 34 | 35 | # Rejections 36 | -A INPUT -j REJECT --reject-with icmp6-adm-prohibited 37 | -A FORWARD -j REJECT --reject-with icmp6-adm-prohibited 38 | 39 | COMMIT 40 | -------------------------------------------------------------------------------- /salt/firewall/config/iptables.jinja: -------------------------------------------------------------------------------- 1 | # Firewall configuration written by salt 2 | # Manual customization of this file is not cool. 3 | 4 | {% set rules = salt['pillar.get']('fwmangle', {}) %} 5 | {% if rules %} 6 | *mangle 7 | :PREROUTING ACCEPT [0:0] 8 | :INPUT ACCEPT [0:0] 9 | :FORWARD ACCEPT [0:0] 10 | :OUTPUT ACCEPT [0:0] 11 | :POSTROUTING ACCEPT [0:0] 12 | {% for name, rule in rules.items() -%} 13 | # {{ name }} 14 | {{ rule }} 15 | {% endfor %} 16 | COMMIT 17 | {% endif %} 18 | 19 | {% set rules = salt['pillar.get']('firewall', {}) %} 20 | 21 | *filter 22 | :INPUT ACCEPT [0:0] 23 | :FORWARD ACCEPT [0:0] 24 | :OUTPUT ACCEPT [0:0] 25 | 26 | # Default rules 27 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 28 | -A INPUT -p icmp -j ACCEPT 29 | -A INPUT -i lo -j ACCEPT 30 | 31 | # SSH 32 | -A INPUT -m state --state NEW -m tcp -p tcp --dport 22 -j ACCEPT 33 | 34 | {% for rule, config in rules.items() -%} 35 | {% if not config.get("raw") and not config.get("raw_v6") -%} 36 | {% set port = config.get('port', 22) -%} 37 | {% set proto = config.get('protocol', 'tcp') -%} 38 | {% set source = config.get('source') -%} 39 | {% set source6 = config.get('source6') -%} 40 | {% if (not source and not source6) or source -%} 41 | # {{ rule }} 42 | -A INPUT -m state --state NEW -m {{ proto }} -p {{ proto }} -s {{ source|default('0.0.0.0/0', true) }} --dport {{ port }} -j ACCEPT 43 | {% endif -%} 44 | {% elif config.get("raw") -%} 45 | # {{ rule }} 46 | {{ config.raw }} 47 | {% endif %} 48 | {% endfor -%} 49 | 50 | # Rejections 51 | -A INPUT -j REJECT --reject-with icmp-host-prohibited 52 | -A FORWARD -j REJECT --reject-with icmp-host-prohibited 53 | 54 | COMMIT 55 | -------------------------------------------------------------------------------- /salt/firewall/init.sls: -------------------------------------------------------------------------------- 1 | /etc/iptables: 2 | file.directory: 3 | - user: root 4 | - group: root 5 | - mode: "0755" 6 | 7 | /etc/iptables/rules.v4: 8 | file.managed: 9 | - source: salt://firewall/config/iptables.jinja 10 | - user: root 11 | - group: root 12 | - mode: "0600" 13 | - template: jinja 14 | - require: 15 | - pkg: iptables-persistent 16 | 17 | 18 | /etc/iptables/rules.v6: 19 | file.managed: 20 | - source: salt://firewall/config/ip6tables.jinja 21 | - template: jinja 22 | - user: root 23 | - group: root 24 | - mode: "0600" 25 | - require: 26 | - pkg: iptables-persistent 27 | 28 | 29 | iptables-persistent: 30 | pkg.installed: 31 | - pkgs: 32 | {% if grains["oscodename"] == "trusty" %} 33 | - iptables-persistent 34 | {% else %} 35 | - iptables-persistent 36 | - netfilter-persistent 37 | {% endif %} 38 | 39 | service.enabled: 40 | {% if grains["oscodename"] == "trusty" %} 41 | - name: iptables-persistent 42 | {% else %} 43 | - name: netfilter-persistent 44 | {% endif %} 45 | - require: 46 | - file: /etc/iptables/rules.v4 47 | - file: /etc/iptables/rules.v6 48 | 49 | module.watch: 50 | - name: service.restart 51 | {% if grains["oscodename"] == "trusty" %} 52 | - m_name: iptables-persistent 53 | {% else %} 54 | - m_name: netfilter-persistent 55 | {% endif %} 56 | - watch: 57 | - file: /etc/iptables/rules.v4 58 | - file: /etc/iptables/rules.v6 59 | -------------------------------------------------------------------------------- /salt/groups/init.sls: -------------------------------------------------------------------------------- 1 | {% for group in pillar.get("groups", []) %} 2 | {{ group }}-group: 3 | group.present: 4 | - name: {{ group }} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /salt/haproxy/config/APT-GPG-KEY-HAPROXY: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: SKS 1.1.4 3 | Comment: Hostname: keyserver.ubuntu.com 4 | 5 | mI0EUa70wAEEAMtI29s01PCX0JleVmh1QQr3rfPkfGo/GFKfcXRGE40nQHq+rWUh9slUN+kX 6 | BckSE0DDrnQH08Uvf12TJiHHFlbXnH5Ep+hgYPZGlVSpvBGO+c/CopU7RHMx9bl+pVOhrVeD 7 | WqLl2KqJI2wjJBLXA0dbRbCzmXPvrg3mBQ0hZ533ABEBAAG0IExhdW5jaHBhZCBQUEEgZm9y 8 | IFZpbmNlbnQgQmVybmF0iLgEEwECACIFAlGu9MACGwMGCwkIBwMCBhUIAgkKCwQWAgMBAh4B 9 | AheAAAoJEFBdl6QcYbnN8aMD/RM3InMubxTF9hzToCPF2EP37Q9WUQNF15f90jTOl8VqqpnU 10 | fGd2qlxUW31soCpDVxqX6lXfqB0bI9EDz2r7w+goxBH+cRArJ2APdC7wE/U9eIxY49mzNsqj 11 | sl7zY+eoX4v4fjqk33hFyMMJDUtPxSRHWFqP5QNwCN+fbPh5GiyL 12 | =x3rU 13 | -----END PGP PUBLIC KEY BLOCK----- 14 | -------------------------------------------------------------------------------- /salt/haproxy/config/consul-recursors.json: -------------------------------------------------------------------------------- 1 | { 2 | "recursors": [{% for nameserver in salt['grains.get']('dns:nameservers') %}"{{ nameserver }}"{% if not loop.last %}, {% endif %}{% endfor %}] 3 | } 4 | -------------------------------------------------------------------------------- /salt/haproxy/config/haproxy-ocsp-logrotate.conf: -------------------------------------------------------------------------------- 1 | /var/log/haproxy-ocsp.log { 2 | daily 3 | rotate 7 4 | missingok 5 | notifempty 6 | compress 7 | delaycompress 8 | } 9 | -------------------------------------------------------------------------------- /salt/haproxy/config/nginx-redirect.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set haproxy = salt["pillar.get"]("haproxy", {}) -%} 2 | 3 | {% for domain, config in haproxy.redirects.items() -%} 4 | server { 5 | listen 127.0.0.1:19002; 6 | server_name {{ domain }}; 7 | add_header Strict-Transport-Security "max-age={{ config.get("hsts_seconds", 315360000) }}{% if config.get("hsts_subdomains", True) %}; includeSubDomains{% endif %}{% if config.get("hsts_preload", True) %}; preload{% endif %}"; 8 | 9 | return 301 http{% if config.get("tls", True) %}s{% endif %}://{{ config.target }}{% if config.get("request_uri", True) %}$request_uri{% endif %}; 10 | } 11 | {% endfor %} 12 | -------------------------------------------------------------------------------- /salt/haproxy/config/our_domains.jinja: -------------------------------------------------------------------------------- 1 | {%- set haproxy = salt["pillar.get"]("haproxy", {}) -%} 2 | {%- set our_domains = [] -%} 3 | {%- for service, config in haproxy.services.items() -%}{% for domain in config.domains -%}{% do our_domains.append(domain) %}{% endfor -%}{% endfor -%} 4 | {%- for domain in haproxy.redirects -%}{% do our_domains.append(domain) -%}{% endfor -%} 5 | {%- for domain in our_domains %} 6 | {{ domain }}{% endfor %} 7 | -------------------------------------------------------------------------------- /salt/hg/config/apache.logrotate: -------------------------------------------------------------------------------- 1 | /var/log/apache2/*.log { 2 | daily 3 | rotate 30 4 | missingok 5 | notifempty 6 | create 640 root adm 7 | compress 8 | delaycompress 9 | sharedscripts 10 | postrotate 11 | if /etc/init.d/apache2 status > /dev/null ; then \ 12 | /etc/init.d/apache2 reload > /dev/null; \ 13 | fi; 14 | endscript 15 | } 16 | -------------------------------------------------------------------------------- /salt/hg/config/hg.apache.conf.jinja: -------------------------------------------------------------------------------- 1 | 2 | ServerName hg.python.org 3 | 4 | SSLEngine on 5 | SSLCertificateFile /etc/ssl/private/hg.psf.io.pem 6 | SSLCertificateKeyFile /etc/ssl/private/hg.psf.io.pem 7 | SSLCipherSuite {{ pillar["tls"]["ciphers"].get("backend", pillar["tls"]["ciphers"]["default"]) }} 8 | SSLHonorCipherOrder on 9 | SSLProtocol TLSv1.2 10 | 11 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %{ms}T" combined 12 | 13 | CustomLog /var/log/apache2/hg.access.log combined 14 | ErrorLog /var/log/apache2/hg.error.log 15 | 16 | # Mercurial can send long headers for repositories with many heads. See 17 | # http://barahilia.github.io/blog/computers/2014/10/09/hg-pull-via-http-fails-400.html 18 | LimitRequestFieldSize 16384 19 | 20 | Alias /committers.txt /srv/hg/web/committers 21 | Alias /robots.txt /srv/hg/web/robots.txt 22 | Alias /410.html /srv/hg/web/410.html 23 | 24 | Require all granted 25 | 26 | 27 | Alias /icons/ "/usr/share/apache2/icons/" 28 | 29 | 30 | Options FollowSymlinks 31 | AllowOverride None 32 | Require all granted 33 | 34 | 35 | 36 | ProxyPass http://localhost:8000/lookup 37 | 38 | 39 | 40 | ProxyPass http://localhost:8000 41 | 42 | 43 | # Staticly serve hg repos over HTTP 44 | DocumentRoot /srv/hg/hg-static/ 45 | 46 | Options Indexes FollowSymlinks 47 | IndexOptions FancyIndexing SuppressColumnSorting 48 | Require all granted 49 | 50 | 51 | ErrorDocument 410 /410.html 52 | RedirectMatch gone "/cpython/annotate/.*/NEWS$" 53 | RedirectMatch gone "/cpython-fullhistory/annotate/.*/NEWS$" 54 | 55 | # Static files: logo, CSS, favicon... (wired to .../static/) 56 | # This is optional but a bit faster than letting Mercurial serve the files 57 | # NOTE: needs to be changed if hg gets wired to another dir or python 58 | # version 59 | 60 | 61 | SetHandler server-status 62 | Require ip 127.0.0.1 63 | 64 | 65 | -------------------------------------------------------------------------------- /salt/hg/config/hgmin.service.jinja: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Minimal HG service 3 | After=network.target 4 | 5 | [Service] 6 | Environment=LC_ALL=en_US.UTF-8 7 | Environment=LANG=en_US.UTF-8 8 | WorkingDirectory=/srv/hg/src 9 | ExecStart=/srv/hg/env/bin/gunicorn app:app -w 4 --access-logfile - --error-logfile - 10 | ExecReload=/bin/kill -HUP $MAINPID 11 | ExecStop = /bin/kill -s TERM $MAINPID 12 | Restart=on-failure 13 | User=hg 14 | Group=hg 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | -------------------------------------------------------------------------------- /salt/hg/config/ports.apache.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set internal = salt["pillar.get"]("psf_internal_network") %} 2 | Listen {{ salt["network.ip_addrs"](cidr=internal)[0] }}:9000 3 | Listen {{ salt["network.ip_addrs"](cidr=internal)[0] }}:9001 4 | Listen {{ salt["network.ip_addrs"](cidr=internal)[0] }}:9002 5 | Listen 127.0.0.1:9000 6 | Listen 127.0.0.1:9001 7 | Listen 127.0.0.1:9002 8 | -------------------------------------------------------------------------------- /salt/hg/config/remoteip.apache.conf.jinja: -------------------------------------------------------------------------------- 1 | # Load the mod_remoteip module 2 | LoadModule remoteip_module modules/mod_remoteip.so 3 | 4 | # Set an environment variable 'forwarded' if the XFF header is present 5 | SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded 6 | 7 | # Define 'combined' and 'forwarded' log formats 8 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" |%h|%a|%{X-Forwarded-For}i| -combined" combined 9 | LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" |%h|%a|%{X-Forwarded-For}i| -forwarded " forwarded 10 | 11 | # Specify location of error log file 12 | ErrorLog ${APACHE_LOG_DIR}/error.log 13 | 14 | # Configure access logs with conditional logging 15 | CustomLog ${APACHE_LOG_DIR}/access.log combined env=!forwarded 16 | CustomLog ${APACHE_LOG_DIR}/access.log forwarded env=forwarded 17 | 18 | # Configure mod_remoteip to update client IP using XFF header 19 | # Specify internal proxy ips to be trusted 20 | RemoteIPHeader X-Forwarded-For 21 | RemoteIPInternalProxy 127.0.0.1 22 | RemoteIpInternalProxy {{ pillar["psf_internal_network"] }} 23 | -------------------------------------------------------------------------------- /salt/hg/config/svn.apache.conf.jinja: -------------------------------------------------------------------------------- 1 | 2 | ServerName svn.python.org 3 | 4 | SSLEngine on 5 | SSLCertificateFile /etc/ssl/private/hg.psf.io.pem 6 | SSLCertificateKeyFile /etc/ssl/private/hg.psf.io.pem 7 | SSLCipherSuite ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:!AES256:!aNULL:!eNULL:!MD5:!DSS:!PSK:!SRP 8 | SSLHonorCipherOrder on 9 | SSLProtocol TLSv1.2 10 | 11 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %{ms}T" combined 12 | 13 | CustomLog /var/log/apache2/svn.access.log combined 14 | ErrorLog /var/log/apache2/svn.error.log 15 | 16 | DocumentRoot /srv/svnweb 17 | 18 | # Redirection of source code links in already-generated 19 | # documentation 20 | RewriteEngine on 21 | RewriteRule ^/view/python/branches/py3k/(.*)$ \ 22 | https://hg.python.org/cpython/file/default/$1? [R=301] 23 | 24 | 25 | DAV svn 26 | SVNParentPath /srv/svn/repos/ 27 | SVNListParentPath On 28 | 29 | 30 | deny from all 31 | 32 | 33 | # our access control policy 34 | AuthzSVNAccessFile svn_config/svn.access 35 | 36 | # how to authenticate a user 37 | AuthType Basic 38 | AuthName "Subversion repository" 39 | AuthUserFile svn_config/svn.users 40 | 41 | # try anonymous access first, resort to real 42 | # authentication if necessary. 43 | Satisfy Any 44 | Require valid-user 45 | 46 | 47 | Alias /robots.txt /srv/svnweb/robots.txt 48 | 49 | Options Indexes FollowSymLinks MultiViews 50 | AllowOverride None 51 | Order allow,deny 52 | Allow from all 53 | 54 | 55 | 56 | -------------------------------------------------------------------------------- /salt/hg/files/hg/src/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask 2 | gunicorn 3 | -------------------------------------------------------------------------------- /salt/hg/files/hg/web/410.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Page Gone - 410 Error 4 | 5 | 6 |
7 |

Error 410 - Page disabled

8 |

The resources necessary to render annotate on Misc/NEWS for cpython and cpython-fullhistory lead to poor performance for all repositories hosted at hg.python.org.

9 |

If you are reliant on this feature, please contact The Infrastructure Staff to help us understand and support your usecase.

10 |
11 | 12 | 13 | -------------------------------------------------------------------------------- /salt/hg/files/hg/web/robots.txt: -------------------------------------------------------------------------------- 1 | # File referred to in the Apache conf, don't remove 2 | User-agent: * 3 | Disallow: / 4 | -------------------------------------------------------------------------------- /salt/moin/configs/logrotate.conf: -------------------------------------------------------------------------------- 1 | /data/moin/instances/jython/data/event-log 2 | /data/moin/instances/psf/data/event-log 3 | /data/moin/instances/python/data/event-log 4 | { 5 | rotate 4 6 | weekly 7 | missingok 8 | notifempty 9 | compress 10 | sharedscripts 11 | postrotate 12 | /etc/init.d/apache2 reload > /dev/null 13 | endscript 14 | } 15 | -------------------------------------------------------------------------------- /salt/moin/configs/moin.wsgi: -------------------------------------------------------------------------------- 1 | # -*- coding: iso-8859-1 -*- 2 | """ 3 | MoinMoin - mod_wsgi driver script 4 | 5 | To use this, add those statements to your Apache's VirtualHost definition: 6 | 7 | # you will invoke your moin wiki at the root url, like http://servername/FrontPage: 8 | WSGIScriptAlias / /some/path/moin.wsgi 9 | 10 | # create some wsgi daemons - use someuser.somegroup same as your data_dir: 11 | WSGIDaemonProcess daemonname user=someuser group=somegroup processes=5 threads=10 maximum-requests=1000 umask=0007 12 | 13 | # use the daemons we defined above to process requests! 14 | WSGIProcessGroup daemonname 15 | 16 | @copyright: 2008 by MoinMoin:ThomasWaldmann 17 | @license: GNU GPL, see COPYING for details. 18 | """ 19 | 20 | import sys, os 21 | 22 | # a) Configuration of Python's code search path 23 | # If you already have set up the PYTHONPATH environment variable for the 24 | # stuff you see below, you don't need to do a1) and a2). 25 | 26 | # a1) Path of the directory where the MoinMoin code package is located. 27 | # Needed if you installed with --prefix=PREFIX or you didn't use setup.py. 28 | #sys.path.insert(0, 'PREFIX/lib/python2.3/site-packages') 29 | 30 | # a2) Path of the directory where wikiconfig.py / farmconfig.py is located. 31 | # See wiki/config/... for some sample config files. 32 | sys.path.insert(0, '/etc/moin') 33 | 34 | # b) Configuration of moin's logging 35 | # If you have set up MOINLOGGINGCONF environment variable, you don't need this! 36 | # You also don't need this if you are happy with the builtin defaults. 37 | # See wiki/config/logging/... for some sample config files. 38 | #from MoinMoin import log 39 | #log.load_config('/path/to/logging_configuration_file') 40 | 41 | from MoinMoin.web.serving import make_application 42 | 43 | # Creating the WSGI application 44 | # use shared=True to have moin serve the builtin static docs 45 | # use shared=False to not have moin serve static docs 46 | # use shared='/my/path/to/htdocs' to serve static docs from that path 47 | application = make_application(shared=True) 48 | #application = make_application(shared=True, trusted_proxies=['140.211.10.66', '127.0.0.1']) 49 | 50 | # Apply proxy fixes to get moin to use HTTPS links 51 | from werkzeug.middleware.proxy_fix import ProxyFix 52 | application = ProxyFix(application) 53 | -------------------------------------------------------------------------------- /salt/moin/configs/moin_wsgi.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import urlparse 4 | 5 | from MoinMoin.web.serving import make_application 6 | 7 | sys.path.insert(0, "/etc/moin") 8 | os.environ['PYTHONIOENCODING'] = 'utf-8' 9 | 10 | 11 | class ScriptFixerMiddleware(object): 12 | 13 | def __init__(self, application): 14 | self.application = application 15 | 16 | def __call__(self, environ, start_response): 17 | if not environ.get("SCRIPT_NAME"): 18 | path = environ.get("PATH_INFO", "") 19 | if path.startswith("/"): 20 | path = path[1:] 21 | 22 | parsed = urlparse.urlparse(path) 23 | 24 | script_name = "/".join(parsed.path.split("/")[:1]) 25 | if not script_name.startswith("/"): 26 | script_name = "/" + script_name 27 | 28 | environ["SCRIPT_NAME"] = script_name 29 | environ["PATH_INFO"] = "/".join(parsed.path.split("/")[1:]) 30 | 31 | return self.application(environ, start_response) 32 | 33 | 34 | application = ScriptFixerMiddleware(make_application(shared=False)) 35 | -------------------------------------------------------------------------------- /salt/moin/configs/ports.apache.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set internal = salt["pillar.get"]("psf_internal_network") %} 2 | Listen {{ salt["network.ip_addrs"](cidr=internal)[0] }}:9000 3 | Listen 127.0.0.1:9000 4 | -------------------------------------------------------------------------------- /salt/moin/configs/shared_intermap.txt: -------------------------------------------------------------------------------- 1 | PEP http://www.python.org/dev/peps/pep-$PAGE/ 2 | SF http://bugs.python.org/issue$PAGE 3 | PythonMac http://pythonmac.org/wiki/ 4 | WxPython http://wiki.wxpython.org/index.cgi/ 5 | ISBN http://www.amazon.com/exec/obidos/ASIN/$PAGE/pythonsoftwar-20 6 | -------------------------------------------------------------------------------- /salt/moin/scripts/moin_maint_cleanpage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to clean up unused and deleted wiki pages. 4 | # 5 | # This script has to be run using a cronjob once a day to 6 | # prevent the number of unused pages from blowing up. 7 | # 8 | # WARNING: This script may only be run as moin user. 9 | # 10 | 11 | # Make sure only moin can run our script 12 | if [ "$(id -nu)" != "moin" ]; then 13 | echo "This script must be run as moin user" 1>&2 14 | exit 1 15 | fi 16 | 17 | # Globals 18 | DATE=`date +'%Y-%m-%d-%H%M%S'` 19 | 20 | # Clean sessions 21 | cd /srv/moin/ 22 | . venv/bin/activate 23 | 24 | # Clean up Python wiki 25 | cd /data/moin/instances/python/data 26 | echo "Clean up Python wiki..." 27 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/moin maint cleanpage > cleanpage.sh 28 | # create maintenance dirs 29 | mkdir trash deleted 30 | # move the pages to trash/ and deleted/ 31 | bash ./cleanpage.sh 32 | # archive cleanup 33 | tar cfz ../maintenance/$DATE.tgz cleanpage.sh deleted/ trash/ 34 | rm -rf cleanpage.sh deleted trash 35 | 36 | # Clean up PSF wiki 37 | cd /data/moin/instances/psf/data 38 | echo "Clean up PSF wiki..." 39 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/psf maint cleanpage > cleanpage.sh 40 | # create maintenance dirs 41 | mkdir trash deleted 42 | # move the pages to trash/ and deleted/ 43 | bash ./cleanpage.sh 44 | # archive cleanup 45 | tar cfz ../maintenance/$DATE.tgz cleanpage.sh deleted/ trash/ 46 | rm -rf cleanpage.sh deleted trash 47 | 48 | # Clean up Jython wiki 49 | cd /data/moin/instances/jython/data 50 | echo "Clean up Jython wiki..." 51 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/jython maint cleanpage > cleanpage.sh 52 | # create maintenance dirs 53 | mkdir trash deleted 54 | # move the pages to trash/ and deleted/ 55 | bash ./cleanpage.sh 56 | # archive cleanup 57 | tar cfz ../maintenance/$DATE.tgz cleanpage.sh deleted/ trash/ 58 | rm -rf cleanpage.sh deleted trash 59 | -------------------------------------------------------------------------------- /salt/moin/scripts/moin_maint_cleansessions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to clean up the outdated MoinMoin sessions. 4 | # 5 | # This script has to be run using a cronjob at least once a day 6 | # to prevent the number of used inodes from blowing up. 7 | # 8 | # WARNING: This script may only be run as moin user. 9 | # 10 | 11 | # Make sure only moin can run our script 12 | if [ "$(id -nu)" != "moin" ]; then 13 | echo "This script must be run as moin user" 1>&2 14 | exit 1 15 | fi 16 | 17 | # Clean sessions 18 | cd /srv/moin/ 19 | . venv/bin/activate 20 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/moin maint cleansessions 21 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/jython maint cleansessions 22 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/psf maint cleansessions 23 | -------------------------------------------------------------------------------- /salt/moin/scripts/moin_maint_cleansessions_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to clean out all MoinMoin sessions (including ones which 4 | # are marked to not expire). 5 | # 6 | # This script should be run every week or month to prevent the 7 | # number of used inodes from blowing up. 8 | # 9 | # WARNING: This script may only be run as moin user. 10 | # 11 | 12 | # Make sure only moin can run our script 13 | if [ "$(id -nu)" != "moin" ]; then 14 | echo "This script must be run as moin user" 1>&2 15 | exit 1 16 | fi 17 | 18 | # Clean sessions 19 | cd /srv/moin/ 20 | . venv/bin/activate 21 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/moin maint cleansessions --all 22 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/jython maint cleansessions --all 23 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/psf maint cleansessions --all 24 | -------------------------------------------------------------------------------- /salt/moin/scripts/moin_maint_index_rebuild.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Script to rebuild the MoinMoin Xapian indexes. 4 | # 5 | # WARNING: This script may only be run as moin user. 6 | # 7 | 8 | # Make sure only moin can run our script 9 | if [ "$(id -nu)" != "moin" ]; then 10 | echo "This script must be run as moin user" 1>&2 11 | exit 1 12 | fi 13 | 14 | # Rebuild indexes 15 | cd /srv/moin/ 16 | . venv/bin/activate 17 | 18 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/moin index build --mode=rebuild 19 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/jython index build --mode=rebuild 20 | moin --config-dir=/etc/moin --wiki-url=http://wiki.python.org/psf index build --mode=rebuild 21 | -------------------------------------------------------------------------------- /salt/nginx/config/fastly_params.jinja: -------------------------------------------------------------------------------- 1 | port_in_redirect off; 2 | 3 | ssl_ciphers {{ pillar["tls"]["ciphers"].get("backend", pillar["tls"]["ciphers"]["default"]) }}; 4 | ssl_protocols TLSv1.2; 5 | 6 | set_real_ip_from {{ pillar["psf_internal_network"] }}; 7 | real_ip_header X-Forwarded-For; 8 | -------------------------------------------------------------------------------- /salt/nginx/config/nginx.conf.jinja: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes auto; 3 | worker_rlimit_nofile 10000; 4 | 5 | error_log /var/log/nginx/global-error.log; 6 | pid /var/run/nginx.pid; 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | http { 13 | include /etc/nginx/mime.types; 14 | default_type application/octet-stream; 15 | 16 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 17 | '$status $body_bytes_sent "$http_referer" ' 18 | '"$http_user_agent" "$http_x_forwarded_for"'; 19 | 20 | access_log /var/log/nginx/global-access.log main; 21 | sendfile on; 22 | tcp_nopush on; 23 | tcp_nodelay on; 24 | server_tokens off; 25 | 26 | include /etc/nginx/conf.d/*.conf; 27 | include /etc/nginx/sites.d/*.conf; 28 | } 29 | -------------------------------------------------------------------------------- /salt/nginx/config/nginx.logrotate: -------------------------------------------------------------------------------- 1 | /var/log/nginx/*.log { 2 | daily 3 | rotate 30 4 | missingok 5 | notifempty 6 | compress 7 | sharedscripts 8 | postrotate 9 | /bin/kill -USR1 $(cat /var/run/nginx.pid 2>/dev/null) 2>/dev/null || : 10 | endscript 11 | } 12 | -------------------------------------------------------------------------------- /salt/nodejs/init.sls: -------------------------------------------------------------------------------- 1 | nodejs: 2 | pkgrepo.managed: 3 | - name: deb https://deb.nodesource.com/node_6.x {{ grains["oscodename"] }} main 4 | - file: /etc/apt/sources.list.d/nodesource.list 5 | - key_url: salt://nodejs/APT-GPG-KEY 6 | - require_in: 7 | - pkg: nodejs 8 | 9 | pkg.latest: 10 | - pkgs: 11 | - nodejs 12 | - refresh: True 13 | -------------------------------------------------------------------------------- /salt/pgbouncer/init.sls: -------------------------------------------------------------------------------- 1 | 2 | pgbouncer-pkg: 3 | pkg.installed: 4 | - pkgs: 5 | - pgbouncer 6 | 7 | /etc/pgbouncer/pgbouncer.ini: 8 | file.managed: 9 | - source: salt://pgbouncer/templates/pgbouncer.ini 10 | - template: jinja 11 | - user: postgres 12 | - group: postgres 13 | - mode: "0600" 14 | - show_changes: False 15 | - require: 16 | - pkg: pgbouncer-pkg 17 | 18 | /etc/pgbouncer/userlist.txt: 19 | file.managed: 20 | - source: salt://pgbouncer/templates/userlist.txt 21 | - template: jinja 22 | - user: postgres 23 | - group: postgres 24 | - mode: "0600" 25 | - show_changes: False 26 | - require: 27 | - pkg: pgbouncer-pkg 28 | 29 | pgbouncer-service: 30 | service.running: 31 | - name: pgbouncer 32 | - enable: True 33 | - reload: True 34 | - require: 35 | - pkg: pgbouncer-pkg 36 | - file: /etc/pgbouncer/pgbouncer.ini 37 | - file: /etc/pgbouncer/userlist.txt 38 | - watch: 39 | - file: /etc/pgbouncer/pgbouncer.ini 40 | - file: /etc/pgbouncer/userlist.txt 41 | -------------------------------------------------------------------------------- /salt/pgbouncer/templates/pgbouncer.ini: -------------------------------------------------------------------------------- 1 | [databases] 2 | {% for user, config in pillar.get('postgresql-users', {}).items() %} 3 | {{ user }} = host={{ pillar.get('postgresql-clusters', {}).get(config['cluster']).get('host') }} port={{ pillar.get('postgresql-clusters', {}).get(config['cluster']).get('port') }} dbname={{ config.get('dbname') }} user={{ user }} password={{ config.get('password', '') }} 4 | {% endfor %} 5 | 6 | [users] 7 | 8 | [pgbouncer] 9 | logfile = /var/log/postgresql/pgbouncer.log 10 | pidfile = /var/run/postgresql/pgbouncer.pid 11 | 12 | listen_addr = 127.0.0.1 13 | listen_port = 6432 14 | 15 | unix_socket_dir = /var/run/postgresql 16 | 17 | server_tls_sslmode = require 18 | server_tls_ca_file = /etc/ssl/postgres/root-certs.crt 19 | 20 | auth_type = trust 21 | auth_file = /etc/pgbouncer/userlist.txt 22 | -------------------------------------------------------------------------------- /salt/pgbouncer/templates/userlist.txt: -------------------------------------------------------------------------------- 1 | {%- for user, config in pillar.get('postgresql-users', {}).items() -%} 2 | "{{ user }}" "" 3 | {% endfor -%} 4 | -------------------------------------------------------------------------------- /salt/planet/config/nginx.planet.conf.jinja: -------------------------------------------------------------------------------- 1 | {% for site, info in salt["pillar.get"]("planet", {}).get("sites").items() %} 2 | 3 | server { 4 | listen 9000 ssl; 5 | server_name {{ info["domain"] }}; 6 | error_log /var/log/nginx/{{ site }}.error.log; 7 | access_log /var/log/nginx/{{ site }}.access.log; 8 | ssl_certificate /etc/ssl/private/planet.psf.io.pem; 9 | ssl_certificate_key /etc/ssl/private/planet.psf.io.pem; 10 | 11 | root {{ info["output"] }}; 12 | } 13 | 14 | {% for domain in info.get("subject_alternative_names", []) %} 15 | server { 16 | server_name {{ domain }}; 17 | error_log /var/log/nginx/redir-{{ domain }}.error.log; 18 | access_log /var/log/nginx/redir-{{ domain }}.error.log; 19 | 20 | return 302 $scheme://{{ site }}$request_uri; 21 | } 22 | {% endfor %} 23 | 24 | {% endfor %} 25 | -------------------------------------------------------------------------------- /salt/planet/config/run-planet.sh.jinja: -------------------------------------------------------------------------------- 1 | cd /srv/planet/ 2 | git pull 3 | {% for site, site_config in salt["pillar.get"]("planet", {}).get("sites").items() %} 4 | docker run --pull=always --rm \ 5 | -v {{ site_config["cache"] }}:/srv/cache/ \ 6 | -v {{ site_config["output"] }}:/srv/planetpython.org/ \ 7 | -v /srv/planet/config/{{ site_config["config"] }}:/planet/config/config.ini \ 8 | {{ site_config["image"] }} \ 9 | python /planet/code/planet.py /planet/config/config.ini 10 | {% endfor %} 11 | -------------------------------------------------------------------------------- /salt/planet/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - nginx 3 | 4 | git: 5 | pkg.installed 6 | 7 | docker.io: 8 | pkg.installed 9 | docker: 10 | service.running: 11 | - enable: True 12 | 13 | planet-user: 14 | user.present: 15 | - name: planet 16 | - createhome: False 17 | - groups: 18 | - docker 19 | - require: 20 | - pkg: docker.io 21 | 22 | /etc/nginx/sites.d/planet.conf: 23 | file.managed: 24 | - source: salt://planet/config/nginx.planet.conf.jinja 25 | - template: jinja 26 | - user: root 27 | - group: root 28 | - mode: "0644" 29 | - require: 30 | - file: /etc/nginx/sites.d/ 31 | 32 | /etc/consul.d/service-planet.json: 33 | file.managed: 34 | - source: salt://consul/etc/service.jinja 35 | - template: jinja 36 | - context: 37 | name: planet 38 | port: 9000 39 | - user: root 40 | - group: root 41 | - mode: "0644" 42 | - require: 43 | - pkg: consul-pkgs 44 | 45 | /srv/planet/: 46 | file.directory: 47 | - user: planet 48 | - group: planet 49 | - mode: "0755" 50 | 51 | https://github.com/python/planet: 52 | git.latest: 53 | - branch: main 54 | - target: /srv/planet/ 55 | - user: planet 56 | - require: 57 | - user: planet-user 58 | - pkg: git 59 | - file: /srv/planet/ 60 | 61 | /srv/run-planet.sh: 62 | file.managed: 63 | - source: salt://planet/config/run-planet.sh.jinja 64 | - template: jinja 65 | - user: planet 66 | - group: planet 67 | - mode: "0544" 68 | cron.present: 69 | - identifier: run-planet 70 | - user: planet 71 | - minute: 37 72 | - hour: 1,4,7,10,13,16,19,21 73 | 74 | {% for site, site_config in salt["pillar.get"]("planet", {}).get("sites", {}).items() %} 75 | {{ site_config["cache"] }}: 76 | file.directory: 77 | - user: planet 78 | - group: planet 79 | - mode: "0755" 80 | {{ site_config["output"] }}: 81 | file.directory: 82 | - user: planet 83 | - group: planet 84 | - mode: "0755" 85 | {{ site_config["output"] }}/static: 86 | file.symlink: 87 | - target: /srv/planet/static 88 | - user: planet 89 | - group: planet 90 | - mode: "0644" 91 | - require: 92 | - file: {{ site_config["output"] }} 93 | {% endfor %} 94 | -------------------------------------------------------------------------------- /salt/postgresql/admin.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - postgresql.client 3 | 4 | {% if 'postgres-admin' in pillar %} 5 | {% for user, settings in salt["pillar.get"]("postgresql-users", {}).items() %} 6 | {{ user }}-user: 7 | postgres_user.present: 8 | - name: {{ user }} 9 | - password: {{ settings['password'] }} 10 | - refresh_password: True 11 | - db_host: {{ pillar['postgresql-clusters'][settings['cluster']]['host'] }} 12 | - db_port: {{ pillar['postgresql-clusters'][settings['cluster']]['port'] }} 13 | - db_user: {{ pillar['postgres-admin'][settings['cluster']]['user'] }} 14 | - db_password: {{ pillar['postgres-admin'][settings['cluster']]['password'] }} 15 | - require: 16 | - pkg: postgresql-client 17 | {% endfor %} 18 | 19 | {% for database, settings in pillar.get("postgresql-databases", {}).items() %} 20 | {{ database }}-database: 21 | postgres_database.present: 22 | - name: {{ database }} 23 | - owner: {{ settings['owner'] }} 24 | - db_host: {{ pillar['postgresql-clusters'][settings['cluster']]['host'] }} 25 | - db_port: {{ pillar['postgresql-clusters'][settings['cluster']]['port'] }} 26 | - db_user: {{ pillar['postgres-admin'][settings['cluster']]['user'] }} 27 | - db_password: {{ pillar['postgres-admin'][settings['cluster']]['password'] }} 28 | - require: 29 | - pkg: postgresql-client 30 | - postgres_user: {{ settings['owner'] }}-user 31 | {% endfor %} 32 | {% endif %} 33 | -------------------------------------------------------------------------------- /salt/postgresql/base/init.sls: -------------------------------------------------------------------------------- 1 | {% if grains["oscodename"] in ["jammy", "noble"] %} 2 | postgresql-key: 3 | file.managed: 4 | - name: /etc/apt/keyrings/postgresql.asc 5 | - mode: "0644" 6 | - source: salt://postgresql/base/APT-GPG-KEY-POSTGRESQL 7 | 8 | postgresql-repo: 9 | pkgrepo.managed: 10 | - name: "deb [signed-by=/etc/apt/keyrings/postgresql.asc arch={{ grains["osarch"] }}] https://apt.postgresql.org/pub/repos/apt {{ grains['oscodename'] }}-pgdg main" 11 | - aptkey: False 12 | - require: 13 | - file: postgresql-key 14 | - file: /etc/apt/sources.list.d/postgresql.list 15 | {% else %} 16 | postgresql-repo: 17 | pkgrepo.managed: 18 | - name: "deb http://apt.postgresql.org/pub/repos/apt {{ grains['oscodename'] }}-pgdg main" 19 | - key_url: salt://postgresql/base/APT-GPG-KEY-POSTGRESQL 20 | - file: /etc/apt/sources.list.d/postgresql.list 21 | {% endif %} 22 | -------------------------------------------------------------------------------- /salt/postgresql/client/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - postgresql.base 3 | 4 | postgresql-client: 5 | pkg.installed: 6 | - pkgs: 7 | - postgresql-client-11 8 | - python3-psycopg2 9 | 10 | /etc/ssl/postgres: 11 | file.directory: 12 | - user: root 13 | - group: root 14 | - mode: "0755" 15 | 16 | {% for postgres_cluster, config in pillar.get('postgresql-clusters', {}).items() %} 17 | {% if 'ca_cert' in config %} 18 | /etc/ssl/postgres/{{ postgres_cluster }}.crt: 19 | file.managed: 20 | - contents_pillar: postgresql-clusters:{{ postgres_cluster }}:ca_cert 21 | - user: root 22 | - group: root 23 | - mode: "0644" 24 | {% endif %} 25 | {% if 'ca_cert_pillar' in config %} 26 | /etc/ssl/postgres/{{ postgres_cluster }}.crt: 27 | file.managed: 28 | - contents_pillar: {{ config['ca_cert_pillar'] }} 29 | - user: root 30 | - group: root 31 | - mode: "0644" 32 | {% endif %} 33 | {% endfor %} 34 | 35 | /etc/ssl/postgres/root-certs.crt: 36 | file.managed: 37 | - source: salt://postgresql/client/root-certs.crt.jinja 38 | - template: jinja 39 | - user: root 40 | - group: root 41 | - mode: "0644" 42 | -------------------------------------------------------------------------------- /salt/postgresql/client/root-certs.crt.jinja: -------------------------------------------------------------------------------- 1 | {%- for postgresql_cluster, config in pillar.get('postgresql-clusters', {}).items() %}{% if 'ca_cert' in config -%} 2 | {{ config['ca_cert'] }} 3 | {%- endif %}{% endfor -%} 4 | -------------------------------------------------------------------------------- /salt/postgresql/server/configs/gpg.conf.jinja: -------------------------------------------------------------------------------- 1 | trusted-key {{ pillar["wal-e"]["gpg-key-id"] }} 2 | -------------------------------------------------------------------------------- /salt/postgresql/server/configs/pg_hba.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set psf_internal = salt["pillar.get"]("psf_internal_network") %} 2 | # PostgreSQL Client Authentication Configuration File 3 | # =================================================== 4 | # 5 | 6 | # TYPE DATABASE USER ADDRESS METHOD 7 | 8 | # Database administrative login by Unix domain socket 9 | local all postgres peer 10 | 11 | # Administration 12 | hostssl all salt-master {{ psf_internal }} md5 13 | 14 | # Replication 15 | hostssl replication replicator {{ psf_internal }} md5 16 | 17 | # Application Databases 18 | {% for database, settings in pillar.get("postgresql-databases", {}).items() %} 19 | hostssl {{ database }} {{ settings.owner }} 0.0.0.0/0 md5 20 | {% endfor %} 21 | -------------------------------------------------------------------------------- /salt/postgresql/server/configs/pg_ident.conf.jinja: -------------------------------------------------------------------------------- 1 | # PostgreSQL User Name Maps 2 | # ========================= 3 | # 4 | # Refer to the PostgreSQL documentation, chapter "Client 5 | # Authentication" for a complete description. A short synopsis 6 | # follows. 7 | # 8 | # This file controls PostgreSQL user name mapping. It maps external 9 | # user names to their corresponding PostgreSQL user names. Records 10 | # are of the form: 11 | # 12 | # MAPNAME SYSTEM-USERNAME PG-USERNAME 13 | # 14 | # (The uppercase quantities must be replaced by actual values.) 15 | # 16 | # MAPNAME is the (otherwise freely chosen) map name that was used in 17 | # pg_hba.conf. SYSTEM-USERNAME is the detected user name of the 18 | # client. PG-USERNAME is the requested PostgreSQL user name. The 19 | # existence of a record specifies that SYSTEM-USERNAME may connect as 20 | # PG-USERNAME. 21 | # 22 | # If SYSTEM-USERNAME starts with a slash (/), it will be treated as a 23 | # regular expression. Optionally this can contain a capture (a 24 | # parenthesized subexpression). The substring matching the capture 25 | # will be substituted for \1 (backslash-one) if present in 26 | # PG-USERNAME. 27 | # 28 | # Multiple maps may be specified in this file and used by pg_hba.conf. 29 | # 30 | # No map names are defined in the default configuration. If all 31 | # system user names and PostgreSQL user names are the same, you don't 32 | # need anything in this file. 33 | # 34 | # This file is read on server startup and when the postmaster receives 35 | # a SIGHUP signal. If you edit the file on a running system, you have 36 | # to SIGHUP the postmaster for the changes to take effect. You can 37 | # use "pg_ctl reload" to do that. 38 | 39 | # Put your actual configuration here 40 | # ---------------------------------- 41 | 42 | # MAPNAME SYSTEM-USERNAME PG-USERNAME 43 | -------------------------------------------------------------------------------- /salt/postgresql/server/configs/recovery.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set postgresql = salt["pillar.get"]("postgresql") %} 2 | {% set postgresql_password = salt["pillar.get"]("postgresql-users:replicator") %} 3 | 4 | standby_mode = on 5 | 6 | primary_slot_name = '{{ grains["host"] }}' 7 | 8 | {{ "{{" }} with service "primary.postgresql@{{ pillar.dc }}" }} 9 | primary_conninfo = 'application_name={{ grains["fqdn"] }} host=postgresql.psf.io hostaddr={{ "{{(index . 0).Address}}" }} port={{ "{{(index . 0).Port}}" }} sslmode=verify-full sslrootcert=/etc/ssl/certs/PSF_CA.pem user=replicator password={{ postgresql_password }}' 10 | {{ "{{ end }}" }} 11 | -------------------------------------------------------------------------------- /salt/postgresql/server/configs/wal-e.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set swift_tenant = salt["pillar.get"]("wal-e:swift-tenant") %} 2 | 3 | #------------------------------------------------------------------------------ 4 | # WRITE AHEAD LOG 5 | #------------------------------------------------------------------------------ 6 | 7 | # - Archiving - 8 | 9 | archive_mode = on 10 | archive_command = 'SWIFT_TENANT="{{ swift_tenant }}" envdir /etc/wal-e.d /var/lib/postgresql/wal-e/bin/wal-e wal-push %p' 11 | archive_timeout = 60 12 | -------------------------------------------------------------------------------- /salt/pythontest/config/nginx.pythontest.conf.jinja: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name www.pythontest.net pythontest.net; 4 | root /srv/python-testdata/www/; 5 | 6 | location = /redir/ { 7 | return 302 $scheme://$host/elsewhere/; 8 | } 9 | 10 | location = /redir/with_frag/ { 11 | return 302 $scheme://$host/elsewhere/#frag; 12 | } 13 | 14 | location /unicode/ { 15 | gzip on; 16 | gzip_min_length 1000; 17 | gzip_types text/plain application/xml; 18 | } 19 | } 20 | 21 | server { 22 | listen 443 ssl; 23 | server_name self-signed.pythontest.net; 24 | root /srv/python-testdata/www/; 25 | 26 | ssl_certificate /srv/python-testdata/tls/self-signed-cert.pem; 27 | ssl_certificate_key /srv/python-testdata/tls/self-signed-key.pem; 28 | } 29 | -------------------------------------------------------------------------------- /salt/pythontest/config/vsftpd.conf: -------------------------------------------------------------------------------- 1 | ftpd_banner=Welcome to the pythontest FTP server 2 | 3 | # Run vsftp as a standalone server 4 | listen=YES 5 | 6 | # Allow anonymous FTP. (This is what the test suite uses) 7 | anonymous_enable=YES 8 | 9 | # Use nobody as the FTP user for least privelege 10 | ftp_username=nobody 11 | 12 | # Use the ftp folder from the git repo as the root 13 | anon_root=/srv/python-testdata/ftp 14 | 15 | # The only local users are the python infra team and they 16 | # do not use ftp for administration 17 | local_enable=NO 18 | 19 | # This option prevents any writing ftp commands from being issued 20 | write_enable=NO 21 | 22 | # Activate directory messages - messages given to remote users when they 23 | # go into a certain directory. 24 | dirmessage_enable=YES 25 | 26 | # If enabled, vsftpd will display directory listings with the time 27 | # in your local time zone. The default is to display GMT. The 28 | # times returned by the MDTM FTP command are also affected by this 29 | # option. 30 | use_localtime=YES 31 | 32 | # Activate logging of uploads/downloads. 33 | xferlog_enable=YES 34 | 35 | # Make sure PORT transfer connections originate from port 20 (ftp-data). 36 | connect_from_port_20=YES 37 | # These ports must be opened in the firewall to enable data transfer 38 | pasv_enable=YES 39 | pasv_addr_resolve=YES 40 | pasv_address=www.pythontest.net 41 | pasv_max_port=10190 42 | pasv_min_port=10090 43 | 44 | # This option should be the name of a directory which is empty. Also, the 45 | # directory should not be writable by the ftp user. This directory is used 46 | # as a secure chroot() jail at times vsftpd does not require filesystem 47 | # access. 48 | secure_chroot_dir=/var/run/vsftpd/empty 49 | 50 | # Disable PAM support 51 | session_support=NO 52 | -------------------------------------------------------------------------------- /salt/redis/init.sls: -------------------------------------------------------------------------------- 1 | redis: 2 | pkg.installed: 3 | - name: redis-server 4 | -------------------------------------------------------------------------------- /salt/rsyslog/init.sls: -------------------------------------------------------------------------------- 1 | rsyslog: 2 | 3 | pkg: 4 | - installed 5 | 6 | service.running: 7 | - enable: True 8 | - restart: True 9 | - watch: 10 | - file: /etc/rsyslog.d/*.conf 11 | 12 | 13 | /etc/rsyslog.d/PLACEHOLDER.conf: 14 | file.managed: 15 | - replace: False 16 | -------------------------------------------------------------------------------- /salt/ssh/init.sls: -------------------------------------------------------------------------------- 1 | {% set host_keys = salt["pillar.get"]("ssh_host_keys") %} 2 | 3 | 4 | ssh: 5 | service.running: 6 | - enable: True 7 | - restart: True 8 | - watch: 9 | - file: /etc/ssh/sshd_config 10 | {% for fn in host_keys %} 11 | - file: /etc/ssh/{{ fn }} 12 | {% endfor %} 13 | 14 | 15 | /etc/ssh/sshd_config: 16 | file.managed: 17 | - source: salt://ssh/configs/sshd_config.jinja 18 | - template: jinja 19 | - user: root 20 | - group: root 21 | - mode: "0644" 22 | 23 | 24 | /usr/lib/tmpfiles.d/sshd-priv-sep.conf: 25 | file.managed: 26 | - contents: | 27 | d /run/sshd 0755 root root 28 | - user: root 29 | - group: root 30 | - mode: "0644" 31 | 32 | 33 | # If we have defined host keys for this server, then we want to drop them here 34 | # instead of whatever is here by default. 35 | {% for fn in host_keys %} 36 | /etc/ssh/{{ fn }}: 37 | file.managed: 38 | - contents_pillar: ssh_host_keys:{{ fn }} 39 | - owner: root 40 | - group: root 41 | {% if fn.endswith('.pub') %} 42 | - mode: "0644" 43 | {% else %} 44 | - mode: "0600" 45 | - show_diff: False 46 | {% endif %} 47 | {% endfor %} 48 | -------------------------------------------------------------------------------- /salt/sudoers/config/salt.jinja: -------------------------------------------------------------------------------- 1 | {% for sudoer_group in sudoers.split(',') %} 2 | {% for command in pillar['sudoer_groups'][sudoer_group]['commands'] %} 3 | %{{sudoer_group}} {{command}} 4 | {% endfor %} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /salt/sudoers/init.sls: -------------------------------------------------------------------------------- 1 | {% if 'sudoer_groups' in pillar %} 2 | {% for group in pillar.get('sudoer_groups', {}) %} 3 | {{ group }}-sudoer_group: 4 | group.present: 5 | - name: {{ group }} 6 | {% endfor %} 7 | /etc/sudoers.d/salt: 8 | file.managed: 9 | - source: salt://sudoers/config/salt.jinja 10 | - template: jinja 11 | - context: 12 | sudoers: {{ pillar.get('sudoer_groups', {}).keys()|join(',') }} 13 | - user: root 14 | - group: root 15 | - mode: "0640" 16 | {% endif %} 17 | -------------------------------------------------------------------------------- /salt/tls/config/lego.conf.jinja: -------------------------------------------------------------------------------- 1 | ssl_certificate /etc/lego/certificates/{{ grains['fqdn'] }}.crt; 2 | ssl_certificate_key /etc/lego/certificates/{{ grains['fqdn'] }}.key; 3 | 4 | 5 | ssl_protocols TLSv1.2; 6 | ssl_dhparam /etc/ssl/private/dhparams.pem; 7 | ssl_ciphers EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA512:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:ECDH+AESG; 8 | ssl_prefer_server_ciphers on; 9 | 10 | ssl_session_timeout 60m; 11 | ssl_session_cache shared:SSL:32m; 12 | ssl_buffer_size 8k; 13 | 14 | ssl_stapling on; 15 | ssl_stapling_verify on; 16 | resolver 8.8.8.8 8.8.4.4 valid=300s; 17 | resolver_timeout 1s; 18 | -------------------------------------------------------------------------------- /salt/tls/config/pebble-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "pebble": { 3 | "listenAddress": "0.0.0.0:14000", 4 | "managementListenAddress": "0.0.0.0:15000", 5 | "certificate": "/etc/ssl/private/salt-master.vagrant.psf.io.pem", 6 | "privateKey": "/etc/ssl/private/salt-master.vagrant.psf.io.pem", 7 | "httpPort": 80, 8 | "tlsPort": 443, 9 | "ocspResponderURL": "", 10 | "externalAccountBindingRequired": false, 11 | "retryAfter": { 12 | "authz": 3, 13 | "order": 5 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /salt/tls/config/pebble.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Pebble 3 | After=network.target 4 | 5 | [Service] 6 | Type=simple 7 | Environment=PEBBLE_VA_ALWAYS_VALID=1 8 | ExecStart=/usr/local/bin/pebble -config /etc/pebble-config.json 9 | KillMode=process 10 | User=root 11 | Group=root 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /salt/tls/init.sls: -------------------------------------------------------------------------------- 1 | ssl-cert: 2 | pkg.installed 3 | 4 | 5 | {% for name in salt["pillar.get"]("tls:ca", {}) %} # " Syntax Hack 6 | /etc/ssl/certs/{{ name }}.pem: 7 | file.managed: 8 | - contents_pillar: tls:ca:{{ name }} 9 | - user: root 10 | - group: ssl-cert 11 | - mode: "0644" 12 | - require: 13 | - pkg: ssl-cert 14 | {% endfor %} 15 | 16 | 17 | {% for name in salt["pillar.get"]("tls:certs", {}) %} # " Syntax Hack 18 | /etc/ssl/private/{{ name }}.pem: 19 | file.managed: 20 | - contents_pillar: tls:certs:{{ name }} 21 | - user: root 22 | - group: ssl-cert 23 | - mode: "0640" 24 | - show_diff: False 25 | - require: 26 | - pkg: ssl-cert 27 | {% endfor %} 28 | -------------------------------------------------------------------------------- /salt/tls/lego.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - nginx 3 | 4 | {% if pillar["dc"] == "vagrant" %} 5 | salt-master-vagrant-host-entry: 6 | host.present: 7 | - ip: 192.168.50.2 8 | - names: 9 | - salt-master.vagrant.psf.io 10 | - salt-master 11 | {% endif %} 12 | 13 | crypto_packages: 14 | pkg.installed: 15 | - pkgs: 16 | - openssl 17 | 18 | generate_dhparams: 19 | cmd.run: 20 | - name: openssl dhparam -out /etc/ssl/private/dhparams.pem 4096 21 | - creates: /etc/ssl/private/dhparams.pem 22 | - require: 23 | - pkg: crypto_packages 24 | 25 | lego_extract: 26 | archive.extracted: 27 | - name: /usr/local/bin/ 28 | - if_missing: /usr/local/bin/lego 29 | {% if grains.osarch == 'amd64' %} 30 | - source: https://github.com/go-acme/lego/releases/download/v4.8.0/lego_v4.8.0_linux_amd64.tar.gz 31 | - source_hash: sha256=e8a0d808721af53f64977d4c4811e596cb273e1b950fadd5bf39b6781d2c311c 32 | {% elif grains.osarch == 'arm64' %} 33 | - source: https://github.com/go-acme/lego/releases/download/v4.8.0/lego_v4.8.0_linux_arm64.tar.gz 34 | - source_hash: sha256=b2f43fdccdd434e00547750f40e4203f1a5fdcd5186764d3c52a05635600a220 35 | {% endif %} 36 | - archive_format: tar 37 | - tar_options: -J --strip-components=1 lego 38 | - enforce_toplevel: False 39 | 40 | /etc/lego: 41 | file.directory: 42 | - user: root 43 | - group: root 44 | - mode: "0755" 45 | 46 | /etc/lego/.well-known/acme-challenge: 47 | file.directory: 48 | - user: nginx 49 | - group: root 50 | - mode: "0750" 51 | - makedirs: True 52 | - require: 53 | - file: /etc/lego 54 | -------------------------------------------------------------------------------- /salt/tls/pebble.sls: -------------------------------------------------------------------------------- 1 | {% if pillar.get('pebble', {'enabled': False}).enabled %} 2 | pebble-build-deps: 3 | pkg.installed: 4 | - pkgs: 5 | - golang 6 | - git 7 | 8 | pebble-golang-workspace: 9 | file.directory: 10 | - name: /usr/local/golang/pebble 11 | - makedirs: True 12 | 13 | pebble-source: 14 | git.latest: 15 | - name: https://github.com/letsencrypt/pebble.git 16 | - rev: v2.4.0 17 | - force_reset: remote-changes 18 | - target: /usr/local/src/pebble 19 | - require: 20 | - pkg: pebble-build-deps 21 | 22 | pebble-build: 23 | cmd.run: 24 | - creates: /usr/local/golang/pebble/bin/pebble 25 | - name: go install ./cmd/pebble 26 | - cwd: /usr/local/src/pebble 27 | - env: 28 | - GOPATH: /usr/local/golang/pebble 29 | - require: 30 | - git: pebble-source 31 | - file: pebble-golang-workspace 32 | 33 | pebble-install: 34 | file.copy: 35 | - name: /usr/local/bin/pebble 36 | - source: /usr/local/golang/pebble/bin/pebble 37 | - mode: "0755" 38 | 39 | pebble-config: 40 | file.managed: 41 | - name: /etc/pebble-config.json 42 | - source: salt://tls/config/pebble-config.json 43 | 44 | pebble-service: 45 | file.managed: 46 | - name: /lib/systemd/system/pebble.service 47 | - source: salt://tls/config/pebble.service 48 | - mode: "0644" 49 | 50 | service.running: 51 | - name: pebble 52 | - enable: True 53 | - restart: True 54 | - require: 55 | - file: pebble-install 56 | - file: /etc/pebble-config.json 57 | - file: /etc/ssl/private/salt-master.vagrant.psf.io.pem 58 | - watch: 59 | - file: /etc/pebble-config.json 60 | - file: /etc/ssl/certs/PSF_CA.pem 61 | - file: /etc/ssl/private/salt-master.vagrant.psf.io.pem 62 | {% endif %} 63 | -------------------------------------------------------------------------------- /salt/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | - base.auto-highstate 4 | - base.harden 5 | - base.mail 6 | - base.repo 7 | - base.salt 8 | - base.sanity 9 | - consul 10 | - groups 11 | - users 12 | - ssh 13 | - firewall 14 | - sudoers 15 | - backup.client 16 | - unattended-upgrades 17 | - tls 18 | - rsyslog 19 | - datadog 20 | - base.motd 21 | - base.swap 22 | 23 | 'backup-server': 24 | - match: nodegroup 25 | - backup.server 26 | 27 | 'bugs': 28 | - match: nodegroup 29 | - bugs 30 | - bugs.cpython 31 | - bugs.jython 32 | - bugs.roundup 33 | 34 | 'buildbot': 35 | - match: nodegroup 36 | - pgbouncer 37 | - buildbot 38 | 39 | 'cdn-logs': 40 | - match: nodegroup 41 | - cdn-logs 42 | 43 | 'codespeed': 44 | - match: nodegroup 45 | - pgbouncer 46 | - codespeed 47 | 48 | 'docs': 49 | - match: nodegroup 50 | - docs 51 | 52 | 'downloads': 53 | - match: nodegroup 54 | - downloads 55 | 56 | 'elasticsearch': 57 | - match: nodegroup 58 | - elasticsearch 59 | 60 | 'hg': 61 | - match: nodegroup 62 | - hg 63 | 64 | 'loadbalancer': 65 | - match: nodegroup 66 | - haproxy 67 | 68 | 'moin': 69 | - match: nodegroup 70 | - moin 71 | 72 | 'planet': 73 | - match: nodegroup 74 | - planet 75 | 76 | 'postgresql': 77 | - match: nodegroup 78 | - postgresql.server 79 | - postgresql.admin 80 | 81 | 'pythontest': 82 | - match: nodegroup 83 | - pythontest 84 | 85 | 'salt-master': 86 | - match: nodegroup 87 | - postgresql.admin 88 | - dns 89 | - tls.pebble 90 | -------------------------------------------------------------------------------- /salt/unattended-upgrades/config/10periodic: -------------------------------------------------------------------------------- 1 | APT::Periodic::Update-Package-Lists "1"; 2 | APT::Periodic::Download-Upgradeable-Packages "1"; 3 | APT::Periodic::AutocleanInterval "7"; 4 | APT::Periodic::Unattended-Upgrade "1"; 5 | -------------------------------------------------------------------------------- /salt/unattended-upgrades/init.sls: -------------------------------------------------------------------------------- 1 | unattended-upgrades: 2 | pkg.installed 3 | 4 | 5 | # Originally this file was used, however it was moved to 10periodic to make it 6 | # more clear that it is configuring the APT::Periodic and not the actual 7 | # automated upgrades. 8 | /etc/apt/apt.conf.d/20auto-upgrades: 9 | file.absent 10 | 11 | 12 | /etc/apt/apt.conf.d/10periodic: 13 | file.managed: 14 | - source: salt://unattended-upgrades/config/10periodic 15 | - user: root 16 | - group: root 17 | - mode: "0644" 18 | 19 | 20 | /etc/apt/apt.conf.d/50unattended-upgrades: 21 | file.managed: 22 | - source: salt://unattended-upgrades/config/50unattended-upgrades 23 | - user: root 24 | - group: root 25 | - mode: "0644" 26 | - require: 27 | - pkg: unattended-upgrades 28 | -------------------------------------------------------------------------------- /salt/users/config/authorized_keys.jinja: -------------------------------------------------------------------------------- 1 | {% for key in ssh_keys %} 2 | {{key}}{% endfor %} 3 | -------------------------------------------------------------------------------- /salt/users/dotfiles/dstufft.sls: -------------------------------------------------------------------------------- 1 | dstufft-git: 2 | pkg.installed: 3 | - name: git 4 | 5 | https://github.com/dstufft/dotfiles.git: 6 | git.latest: 7 | - target: /home/psf-users/dstufft/.dotfiles 8 | - user: dstufft 9 | - force_clone: True 10 | - force_checkout: True 11 | - force_reset: True 12 | - require: 13 | - pkg: dstufft-git 14 | - user: dstufft 15 | 16 | /home/psf-users/dstufft/.zshenv: 17 | file.symlink: 18 | - target: /home/psf-users/dstufft/.dotfiles/zsh/.zshenv 19 | - user: dstufft 20 | - group: dstufft 21 | - require: 22 | - git: https://github.com/dstufft/dotfiles.git 23 | -------------------------------------------------------------------------------- /salt/users/dotfiles/init.sls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/python/psf-salt/fe2896e7d5b0a651db89a037b1c3aa7c9b5ff6df/salt/users/dotfiles/init.sls -------------------------------------------------------------------------------- /tasks/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import invoke 4 | 5 | from . import salt 6 | 7 | ns = invoke.Collection(salt) 8 | -------------------------------------------------------------------------------- /tasks/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import, division, print_function 2 | 3 | import contextlib 4 | import os 5 | 6 | import fabric.api 7 | 8 | 9 | @contextlib.contextmanager 10 | def cd(path): 11 | current_path = os.path.abspath(os.curdir) 12 | os.chdir(path) 13 | try: 14 | yield 15 | finally: 16 | os.chdir(current_path) 17 | 18 | 19 | @contextlib.contextmanager 20 | def ssh_host(host): 21 | current_value = fabric.api.env.host_string 22 | fabric.api.env.host_string = host 23 | try: 24 | yield 25 | finally: 26 | fabric.api.env.host_string = current_value 27 | -------------------------------------------------------------------------------- /tests/docs-redirects/nginx.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 10000; 3 | include docs-redirects.conf; 4 | } 5 | -------------------------------------------------------------------------------- /tests/docs-redirects/specs/default-root.hurl: -------------------------------------------------------------------------------- 1 | # Assert that Python 3 is the default at the root. 2 | 3 | GET {{host}}/ 4 | HTTP 302 5 | [Asserts] 6 | header "Location" == "http://localhost/3/" 7 | 8 | 9 | # Assert that Python 3 is the default at the root of each translation. 10 | 11 | GET {{host}}/es/ 12 | HTTP 302 13 | [Asserts] 14 | header "Location" == "http://localhost/es/3/" 15 | 16 | GET {{host}}/fr/ 17 | HTTP 302 18 | [Asserts] 19 | header "Location" == "http://localhost/fr/3/" 20 | 21 | GET {{host}}/id/ 22 | HTTP 302 23 | [Asserts] 24 | header "Location" == "http://localhost/id/3/" 25 | 26 | GET {{host}}/it/ 27 | HTTP 302 28 | [Asserts] 29 | header "Location" == "http://localhost/it/3/" 30 | 31 | GET {{host}}/ja/ 32 | HTTP 302 33 | [Asserts] 34 | header "Location" == "http://localhost/ja/3/" 35 | 36 | GET {{host}}/ko/ 37 | HTTP 302 38 | [Asserts] 39 | header "Location" == "http://localhost/ko/3/" 40 | 41 | GET {{host}}/pl/ 42 | HTTP 302 43 | [Asserts] 44 | header "Location" == "http://localhost/pl/3/" 45 | 46 | GET {{host}}/pt-br/ 47 | HTTP 302 48 | [Asserts] 49 | header "Location" == "http://localhost/pt-br/3/" 50 | 51 | GET {{host}}/tr/ 52 | HTTP 302 53 | [Asserts] 54 | header "Location" == "http://localhost/tr/3/" 55 | 56 | GET {{host}}/uk/ 57 | HTTP 302 58 | [Asserts] 59 | header "Location" == "http://localhost/uk/3/" 60 | 61 | GET {{host}}/zh-cn/ 62 | HTTP 302 63 | [Asserts] 64 | header "Location" == "http://localhost/zh-cn/3/" 65 | 66 | GET {{host}}/zh-tw/ 67 | HTTP 302 68 | [Asserts] 69 | header "Location" == "http://localhost/zh-tw/3/" 70 | -------------------------------------------------------------------------------- /tests/docs-redirects/specs/devguide.hurl: -------------------------------------------------------------------------------- 1 | # Assert that /devguide/ redirects to the Developer's Guide. 2 | 3 | GET {{host}}/devguide/ 4 | HTTP 301 5 | [Asserts] 6 | header "Location" == "https://devguide.python.org/" 7 | 8 | GET {{host}}/devguide/index.html 9 | HTTP 301 10 | [Asserts] 11 | header "Location" == "https://devguide.python.org/index.html" 12 | 13 | 14 | # Assert that /documenting/ redirects to the Developer's Guide. 15 | 16 | GET {{host}}/documenting/ 17 | HTTP 301 18 | [Asserts] 19 | header "Location" == "https://devguide.python.org/documentation/start-documenting/" 20 | 21 | GET {{host}}/documenting/index.html 22 | HTTP 301 23 | [Asserts] 24 | header "Location" == "https://devguide.python.org/documentation/start-documenting/" 25 | 26 | GET {{host}}/documenting/intro.html 27 | HTTP 301 28 | [Asserts] 29 | header "Location" == "https://devguide.python.org/documentation/start-documenting/#introduction" 30 | 31 | GET {{host}}/documenting/style.html 32 | HTTP 301 33 | [Asserts] 34 | header "Location" == "https://devguide.python.org/documentation/style-guide/" 35 | 36 | GET {{host}}/documenting/rest.html 37 | HTTP 301 38 | [Asserts] 39 | header "Location" == "https://devguide.python.org/documentation/markup/" 40 | 41 | GET {{host}}/documenting/markup.html 42 | HTTP 301 43 | [Asserts] 44 | header "Location" == "https://devguide.python.org/documentation/markup/" 45 | 46 | GET {{host}}/documenting/fromlatex.html 47 | HTTP 301 48 | [Asserts] 49 | header "Location" == "https://devguide.python.org/documentation/markup/" 50 | 51 | GET {{host}}/documenting/building.html 52 | HTTP 301 53 | [Asserts] 54 | header "Location" == "https://devguide.python.org/documentation/start-documenting/#building-the-documentation" 55 | -------------------------------------------------------------------------------- /tests/docs-redirects/specs/ftp-download.hurl: -------------------------------------------------------------------------------- 1 | # Assert that /ftp/ redirects to www.python.org/ftp 2 | 3 | GET {{host}}/ftp/python/doc 4 | HTTP 301 5 | [Asserts] 6 | header "Location" == "https://www.python.org/ftp/python/doc" 7 | 8 | GET {{host}}/ftp/python/doc/3.4.5/python-3.4.5-docs-text.zip 9 | HTTP 301 10 | [Asserts] 11 | header "Location" == "https://www.python.org/ftp/python/doc/3.4.5/python-3.4.5-docs-text.zip" 12 | -------------------------------------------------------------------------------- /tests/docs-redirects/specs/py2.5.hurl: -------------------------------------------------------------------------------- 1 | # Assert that pre-Python 2.5 URIs are redirected to their 2 | # Python 2.6-and-later locations. 3 | 4 | GET {{host}}/lib/ 5 | HTTP 301 6 | [Asserts] 7 | header "Location" == "https://localhost/3/library/" 8 | 9 | GET {{host}}/lib/module-base64.html 10 | HTTP 301 11 | [Asserts] 12 | header "Location" == "https://localhost/3/library/base64.html" 13 | 14 | GET {{host}}/lib/module-sys.html 15 | HTTP 301 16 | [Asserts] 17 | header "Location" == "https://localhost/3/library/sys.html" 18 | 19 | GET {{host}}/tut/ 20 | HTTP 301 21 | [Asserts] 22 | header "Location" == "https://localhost/3/tutorial/" 23 | 24 | GET {{host}}/tut/tut.html 25 | HTTP 301 26 | [Asserts] 27 | header "Location" == "https://localhost/3/tutorial/" 28 | 29 | GET {{host}}/api/ 30 | HTTP 301 31 | [Asserts] 32 | header "Location" == "https://localhost/3/c-api/" 33 | 34 | GET {{host}}/ext/ 35 | HTTP 301 36 | [Asserts] 37 | header "Location" == "https://localhost/3/extending/" 38 | 39 | GET {{host}}/dist/ 40 | HTTP 301 41 | [Asserts] 42 | header "Location" == "https://localhost/3/" 43 | 44 | GET {{host}}/inst/ 45 | HTTP 301 46 | [Asserts] 47 | header "Location" == "https://localhost/3/" 48 | 49 | GET {{host}}/doc/ 50 | HTTP 301 51 | [Asserts] 52 | header "Location" == "https://devguide.python.org/documentation/start-documenting/" 53 | 54 | GET {{host}}/ref/ 55 | HTTP 301 56 | [Asserts] 57 | header "Location" == "https://localhost/3/reference/" 58 | -------------------------------------------------------------------------------- /tests/docs-redirects/specs/py3k.hurl: -------------------------------------------------------------------------------- 1 | # Assert that Py3k is Python 3. 2 | 3 | GET {{host}}/py3k 4 | HTTP 301 5 | [Asserts] 6 | header "Location" == "https://localhost/3" 7 | 8 | GET {{host}}/py3k/whatsnew/3.0.html 9 | HTTP 301 10 | [Asserts] 11 | header "Location" == "https://localhost/3/whatsnew/3.0.html" 12 | -------------------------------------------------------------------------------- /tests/docs-redirects/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | 4 | # Test the docs redirects. This script must be run from the repository root. 5 | 6 | docker stop docs-redirects-nginx || true 7 | 8 | docker run --name docs-redirects-nginx --detach --quiet --rm --tty \ 9 | --mount type=bind,source=./tests/docs-redirects/nginx.conf,target=/etc/nginx/conf.d/docs.conf,readonly \ 10 | --mount type=bind,source=./salt/docs/config/nginx.docs-redirects.conf,target=/etc/nginx/docs-redirects.conf,readonly \ 11 | -p 10000:10000 \ 12 | nginx:1.26.1-alpine 13 | 14 | # Wait for the nginx container to start… 15 | sleep 1 16 | 17 | hurl --color --continue-on-error --variable host=http://localhost:10000 --test ./tests/docs-redirects/specs/*.hurl 18 | 19 | docker stop docs-redirects-nginx 20 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = docs, lint 3 | skipsdist = true 4 | 5 | [testenv:docs] 6 | deps = 7 | -rdocs/requirements.txt 8 | basepython = python3 9 | commands = 10 | sphinx-build -W -b html -d {envtmpdir}/doctrees docs docs/_build/html 11 | 12 | [testenv:lint] 13 | deps = 14 | salt-lint 15 | allowlist_externals = 16 | /bin/bash 17 | /usr/bin/bash 18 | basepython = python3 19 | commands = 20 | bash -exc "salt-lint $(find . -type f -name *.sls | xargs)" 21 | --------------------------------------------------------------------------------