├── .flake8 ├── .gitignore ├── .travis.yml ├── LICENSE ├── README.rst ├── ansible.cfg ├── bootstrap ├── bootstrap.yml ├── buildbot.asc ├── common.yml ├── docs ├── backups.txt ├── fingerprints.txt ├── hardware.txt ├── network.txt ├── trunas.txt └── upgrading-freebsd.rst ├── files ├── collectevents.py └── docs-redirects ├── gcloud_config ├── README.md └── kube-system-pdb.yaml ├── group_vars └── all ├── host-p12tic-local.yml ├── host-service1.yml ├── host-service2.yml ├── host-service3.yml ├── host-vm1.yml ├── jail-events.yml ├── jail-ftp.yml ├── jail-lists.yml ├── jail-mx.yml ├── jail-mysql.yml ├── jail-ns1.yml ├── jail-syslog.yml ├── library └── bb_filters.py ├── load-secrets.yml ├── local.yml ├── localhost ├── roles ├── admin-users │ ├── files │ │ └── authorized_keys │ │ │ ├── amar.keys │ │ │ ├── dustin.keys │ │ │ ├── mss.keys │ │ │ ├── p12tic.keys │ │ │ ├── sean.keys │ │ │ └── tardyp.keys │ └── tasks │ │ └── main.yml ├── base-jailhost │ ├── files │ │ └── flavours │ │ │ └── base │ │ │ └── etc │ │ │ ├── make.conf │ │ │ ├── periodic.conf │ │ │ └── rc.conf │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── ezjail.conf │ │ └── pf.conf │ └── vars │ │ └── main.yml ├── base-servicehost │ └── meta │ │ └── main.yml ├── base-vm │ └── meta │ │ └── main.yml ├── base │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── sudoers-wheel │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── ansible-pull.yml │ │ ├── hosts.yml │ │ ├── main.yml │ │ ├── periodic.yml │ │ ├── python-cert-symlink.yml │ │ ├── rootpw.yml │ │ ├── sendmail.yml │ │ ├── sudo.yml │ │ └── syslog.yml │ ├── templates │ │ ├── hosts.j2 │ │ └── run-ansible-pull.sh.j2 │ └── vars │ │ └── main.yml ├── bb-master-docker │ ├── files │ │ ├── .dockerignore │ │ ├── Dockerfile │ │ ├── Dockerfile.deps │ │ ├── docker-compose.yml │ │ └── run_buildbot.sh │ └── tasks │ │ └── main.yml ├── bb-slave │ ├── README.rst │ └── tasks │ │ └── main.yml ├── dns │ ├── files │ │ ├── empty.db │ │ ├── localhost-forward.db │ │ └── localhost-reverse.db │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── 224-255.128-255.10.211.140.in-addr.arpa │ │ ├── buildbot.net │ │ ├── named.conf │ │ ├── named.conf.options │ │ ├── named.conf.rfcs │ │ ├── named.conf.zones │ │ └── rndc.conf │ └── vars │ │ └── main.yml ├── docker-worker │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── worker-startup.sh ├── docs │ ├── files │ │ ├── Dockerfile.build │ │ ├── Dockerfile.run │ │ ├── build_docs.sh │ │ ├── docker-compose.yml │ │ ├── nginx.conf │ │ └── refresh_data.sh │ └── tasks │ │ └── main.yml ├── elk │ ├── README.rst │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── elasticsearch.yml │ │ ├── kibana.yml │ │ ├── logstash_events.conf │ │ ├── logstash_syslog.conf │ │ └── oauth2_proxy.conf │ └── vars │ │ └── main.yml ├── ftp │ ├── files │ │ ├── buildbot-favicon.ico │ │ ├── ftp-nginx.conf │ │ └── robots.txt │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── jail │ ├── README.rst │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── create_jail.py │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── nginx-proxy-letsencrypt │ ├── files │ │ ├── docker-compose.yml │ │ └── refresh_certs.sh │ └── tasks │ │ └── main.yml ├── nginx-proxy │ ├── files │ │ └── docker-compose.yml │ └── tasks │ │ └── main.yml ├── nginx │ ├── README.rst │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── domain-redirect │ │ ├── multiproxy │ │ ├── nginx.conf │ │ ├── proxy │ │ ├── ssl.conf │ │ ├── static │ │ └── uwsgi │ └── vars │ │ └── main.yml ├── packages │ ├── README.rst │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── postfix │ ├── files │ │ ├── local-host-names │ │ ├── main.cf │ │ ├── master.cf │ │ ├── transport │ │ └── virtusertable │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── ssh-client │ ├── key │ │ └── tasks │ │ │ └── main.yml │ └── known_hosts │ │ ├── defaults │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ └── templates │ │ └── known_hosts.j2 ├── ssh │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── sshd_config.j2 ├── supervisor-service │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── service.conf.j2 ├── supervisor │ ├── files │ │ └── supervisor-wrapper │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── main.yml ├── syslog-aggregator │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── syslog-ng-buildbot.conf.j2 │ │ └── syslog-ng.conf.j2 │ └── vars │ │ └── main.yml ├── user │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── uwsgi │ ├── files │ │ └── uwsgi.ini │ ├── handlers │ │ └── main.yaml │ └── tasks │ │ └── main.yaml ├── vsftp │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── vsftpd.conf.j2 │ └── vars │ │ └── main.yml └── www │ ├── files │ ├── Dockerfile.build │ ├── Dockerfile.run │ ├── build_www.sh │ ├── docker-compose.yml │ ├── nginx.conf │ └── refresh_data.sh │ └── tasks │ └── main.yml ├── secrets.yml ├── templates ├── run-once └── track-config.sh ├── tox.ini ├── track-config.yml └── vault-merge.sh /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | 3 | show-source = yes 4 | statistics = yes 5 | count = yes 6 | max-line-length = 100 7 | 8 | # List of currently ignored PEP8 issues. Some of them definetely should be 9 | # enabled in future. 10 | # 11 | # E122 continuation line missing indentation or outdented 12 | # E123 closing bracket does not match indentation of opening bracket's line 13 | # (pep8 seems to misdiagnose this) 14 | # E126 continuation line over-indented for hanging indent 15 | # E128 continuation line under-indented for visual indent 16 | # E211 whitespace before '(' 17 | # E501 line too long (102 > 100 characters) 18 | # E711 comparison to None should be 'if cond is None:' 19 | # E712 comparison to False should be 'if cond is False:' or 'if not cond:' 20 | # E721 do not compare types, use 'isinstance()' 21 | 22 | ignore = E122,E123,E126,E128,E211,E501,E711,E712,E721 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *devel.yml 2 | *.retry 3 | .*.swp 4 | dev-hosts 5 | dev-secrets.yml 6 | *.pyc 7 | .tox 8 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - 2.7 5 | 6 | env: 7 | - TOXENV=lint 8 | - TOXENV=ansible-syntax 9 | 10 | install: 11 | - pip install tox 12 | 13 | script: 14 | - tox 15 | 16 | notifications: 17 | email: false 18 | 19 | sudo: false 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright Buildbot Team Members 2 | 3 | Permission is hereby granted, free of charge, to any person 4 | obtaining a copy of this software and associated documentation 5 | files (the "Software"), to deal in the Software without 6 | restriction, including without limitation the rights to use, 7 | copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | copies of the Software, and to permit persons to whom the 9 | Software is furnished to do so, subject to the following 10 | conditions: 11 | 12 | The above copyright notice and this permission notice shall be 13 | included in all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 19 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 20 | WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Ansible 2 | ======= 3 | 4 | Production Runs 5 | --------------- 6 | 7 | Production runs of Ansible take place on the host or jail to be configured, as the ``{{service_user}}``, with a command line such as :: 8 | 9 | ansible-playbook local.yml --vault-password=~/.vault-password 10 | 11 | 12 | This playbook automatically determines which host it's runnning on based on the hostname and configures it accordingly. 13 | Supply host-specific variables in ``group_vars/$hostname``. 14 | 15 | Bootstrapping 16 | ------------- 17 | 18 | To bootstrap a newly-installed system, use ``./bootstrap HOSTNAME``. 19 | Default is to log in to the remote system using your current username. 20 | To change the remote login, use ``./bootstrap HOSTNAME USERNAME``. 21 | Before running the script, ensure: 22 | 23 | * The basic system is installed (FreeBSD 10.0+) on the host 24 | * Networking for the host is fully configured 25 | * The hostname (uname -n) of the host is set correctly 26 | * Sudo configured for a non-root user to become root 27 | * You know the vault password 28 | 29 | Secrets 30 | ------- 31 | 32 | Secrets are stored in ``secrets.yml`` in the top-level directory, which is encrypted with `ansible-vault `__. 33 | To run Ansible with these production secrets, you will need to supply a shared vault password. 34 | 35 | All secrets are loaded into Ansible variables. 36 | By convention, these variables should be named with the prefix ``secret_``. 37 | 38 | You can edit the secrets with ``ansible-vault --vault-password=~/.vault-password edit secrets.yml``. 39 | 40 | Other files 41 | =========== 42 | 43 | This repository contains a few files unrelated to Ansible: 44 | 45 | - ``buildbot.asc`` - Buildbot Release Team Keyring 46 | - ``scripts/`` - some scripts not under configuration management yet 47 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = localhost 3 | ansible_managed = Managed by Ansible 4 | 5 | deprecation_warnings = True 6 | error_on_undefined_vars = True 7 | legacy_playbook_variables = no 8 | allow_world_readable_tmpfiles = True 9 | 10 | display_skipped_hosts = no 11 | 12 | filter_plugins = library/ 13 | 14 | pipelining = True 15 | -------------------------------------------------------------------------------- /bootstrap: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | # 3 | # This script sets up a brand-new FreeBSD host as the proper piece of Buildbot 4 | # infrastructure. See README.rst 5 | # 6 | # Usage: ./bootstrap HOST 7 | 8 | set -e 9 | 10 | HOSTNAME="${1}" 11 | REMOTE_USER=${2:-$USER} 12 | if [ -z "${HOSTNAME}" ]; then 13 | echo "USAGE: ./bootstrap hostname [remote-user]" 14 | exit 1 15 | fi 16 | 17 | # don't prompt to add the SSH key 18 | SSH_ARGS="$SSH_ARGS -o StrictHostKeyChecking=no" 19 | # use a control connection to send multiple SSH commands through a single connection 20 | SSH_ARGS="$SSH_ARGS -o ControlPersist=10m" 21 | SSH_ARGS="$SSH_ARGS -o ControlMaster=auto" 22 | 23 | export ANSIBLE_SSH_ARGS="$SSH_ARGS" 24 | 25 | ansible-playbook --ask-vault-pass \ 26 | -e target_user=$REMOTE_USER \ 27 | -e target_host=${HOSTNAME} \ 28 | bootstrap.yml 29 | -------------------------------------------------------------------------------- /bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: prepare temporary group 3 | hosts: all 4 | gather_facts: no 5 | connection: local 6 | 7 | tasks: 8 | - name: create temporary group for the target host 9 | add_host: 10 | hostname: "{{ target_host }}" 11 | groupname: new_host 12 | 13 | # Needs to be performed here, so the secrets are available for the new group 14 | - import_playbook: "load-secrets.yml" 15 | 16 | - name: bootstrap remote server 17 | hosts: new_host 18 | connection: ssh 19 | remote_user: "{{ target_user }}" 20 | become: yes 21 | become_user: root 22 | gather_facts: no 23 | vars: 24 | # Fortunately, '/root' always exists. This variable is used to prevent 25 | # typing errors. 26 | bootstrap_script: "/root/run-once" 27 | 28 | tasks: 29 | # There's an overlap here with run-once script. I (sa2ajj) could not think 30 | # of any "nicer" way though. 31 | - name: install ansible 32 | raw: "pkg install --yes python3 {{ pkg_ansible_version }}" 33 | 34 | - name: prepare bootstrap script 35 | template: 36 | src: "templates/run-once" 37 | dest: "{{ bootstrap_script }}" 38 | mode: "0700" 39 | 40 | - name: execute bootstrap script 41 | command: "{{ bootstrap_script }}" 42 | 43 | - name: remove bootstrap script 44 | file: 45 | path: "{{ bootstrap_script }}" 46 | state: absent 47 | -------------------------------------------------------------------------------- /common.yml: -------------------------------------------------------------------------------- 1 | # This is the common playbook for local and ssh connection 2 | --- 3 | - import_playbook: "load-secrets.yml" 4 | - import_playbook: "track-config.yml" 5 | vars: 6 | commit_message: "pre-ansible checkin" 7 | 8 | # Service hosts and their jails 9 | - import_playbook: "host-service1.yml" 10 | - import_playbook: "jail-ns1.yml" 11 | - import_playbook: "jail-mx.yml" 12 | - import_playbook: "jail-syslog.yml" 13 | 14 | - import_playbook: "host-service2.yml" 15 | - import_playbook: "jail-ftp.yml" 16 | - import_playbook: "jail-lists.yml" 17 | 18 | - import_playbook: "host-service3.yml" 19 | - import_playbook: "jail-mysql.yml" 20 | - import_playbook: "jail-events.yml" 21 | 22 | - import_playbook: "host-vm1.yml" 23 | - import_playbook: "host-p12tic-local.yml" 24 | 25 | - import_playbook: "track-config.yml" 26 | vars: 27 | commit_message: "post-ansible checkin" 28 | -------------------------------------------------------------------------------- /docs/backups.txt: -------------------------------------------------------------------------------- 1 | For backups, service1 has a Bluray burner that supports dual-write. There is 2 | also a consumer grade (160gb) intel SSD on it to accept rapid backups from all 3 | our machines. 4 | 5 | Ticket about backups: 6 | http://trac.buildbot.net/ticket/2859 7 | -------------------------------------------------------------------------------- /docs/fingerprints.txt: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP SIGNED MESSAGE----- 2 | Hash: SHA1 3 | 4 | Host fingerprints 5 | ================= 6 | 7 | service1.buildbot.net 8 | 1024 28:a0:0a:f0:04:0f:05:09:87:fc:fc:ee:67:c3:34:ea (DSA) 9 | 256 42:6f:4a:b6:d5:4f:35:0b:0f:ec:30:95:3d:21:76:44 (ECDSA) 10 | 2048 ea:17:03:e4:c6:d1:74:a8:07:07:70:da:83:6a:16:e9 (RSA1) 11 | 2048 7a:bd:9a:f0:68:e4:25:57:32:5a:0d:0d:b2:ef:d8:60 (RSA) 12 | 13 | service2.buildbot.net 14 | 1024 c2:4b:42:27:09:0c:9d:90:bf:3f:cc:9b:01:9a:38:77 (DSA) 15 | 256 db:0f:6d:b1:e4:be:5a:f8:ef:e9:25:29:29:2b:a8:47 (ECDSA) 16 | 2048 f0:55:53:39:e9:04:76:26:a1:b7:ff:a7:47:a6:d9:bb (RSA1) 17 | 2048 dd:94:4b:54:ed:73:37:78:8d:b5:2d:0f:02:70:a7:3a (RSA) 18 | 19 | service3.buildbot.net 20 | 1024 d8:36:30:c7:9a:a9:ea:95:77:53:06:ab:20:75:17:b1 (DSA) 21 | 256 84:b1:f0:12:ec:b7:21:d5:70:02:4a:4c:00:6c:5a:3d (ECDSA) 22 | 2048 93:ef:06:e7:1b:0c:80:85:a4:db:d7:85:5b:a8:7a:19 (RSA1) 23 | 2048 a1:37:92:c0:a2:1f:91:e6:05:19:63:99:c2:01:08:d9 (RSA) 24 | 25 | vm1.buildbot.net 26 | 1024 51:15:8a:4e:93:48:ee:ca:01:a6:80:b5:f2:12:a4:9a (DSA) 27 | 256 9f:56:f9:0c:3d:84:d6:00:18:e7:b9:e0:63:e6:91:58 (ECDSA) 28 | 2048 71:0c:16:96:99:b9:b8:54:8d:23:8c:1b:32:81:46:d6 (RSA1) 29 | 2048 db:87:62:9d:41:42:d3:53:b9:97:cb:b9:77:01:1e:21 (RSA) 30 | 31 | Records suitable for known_hosts (as returned by ssh-keyscan) 32 | ============================================================= 33 | 34 | service1.buildbot.net ssh-dss AAAAB3NzaC1kc3MAAACBAPs7Mq/OG1XDFfZmlm7h9xjGY7M662wKrkrTyEVzaGLFLWhHIM/Uqd+9s7/enXdOG8J1wVgJGSnWl0J6XIDEi4CL0GXYgqderlU/4mzwmEUoSf20qCM1rLlQPCdnoy60AkkddeVAdo5JO+2kj2iXJVZBVWDsDVO227mBqytYnvshAAAAFQDcJK+BNeVl79qu2nFOIkm2Oj+DYQAAAIEAylePZmJEcDtpZvW3qLCxp/U+rJngG7qo5wj4s2XnIm0niggosG3KDBUPV2pf+pnJXDuLVXbNTGHfNuMVV+f5N2X54KAg2O+fzaZofDCN8h0I8UyXWmR7wfNHjMtOQBDt6ysAErlWw3pvrQHoT6SQEaKk785Fed5gDoeGPdqRqYsAAACBAIHclnUL1DSc62uMiEhRAW9zHO/JsJP0pN2YgXk7CdiSy8pTpeFXYX+pN87zX/eFoT8jV3UQVUse8PJdoovpAOijbwJc7w/WF8G8LpdGBKHMXXpIAtvK2LVdiJBajj4h+Zud9wsFAXbyGihpvCDXwV91BXWosFfH3JvMRJMeuDfL 35 | service1.buildbot.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEZxeMUWFzSAlSfx1Sn1fmXsNSLvqn/lvO0vmvya6GPGZXfKQZWbAgiZztZDRhbjhCa+y9ecNjRi/pbzRaWJUogqE0lBmep2wbkGuBzuLirAhmhwV0CzCvKcbqgQY6xH0IYDv5mfdTeNiOJhmtLtoz9qkh6O/Ws8yulJzFAleGlIGGt5AmnHGum0zvo1yvLIZkWBl0TZBN1RGKnO7lNZsGnm8KhH6oWIVOi1OpSp5L8RG+SBzKT91mLt1LCCxjSZCSNzbM6QnO73nhHJ68oLrcVR4sRw+ib5AIJWSfMdvd3pq13FWB1jnWPdbK66bPAzN6YC3X6Fn5RM4J5oKv7FnX 36 | service1.buildbot.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBBwrScifB2G0sPkwQW4uuomMqbnMWmpO1w1fpq5Z5yPyIYSaf2nQfcKqaKPog1Fbd9ADt3DkYHiVuleyh16XG5U= 37 | 38 | service2.buildbot.net ssh-dss AAAAB3NzaC1kc3MAAACBALO39NhrBguBsdR6uVLPXms0FNPSLetCWFrCVHgMXstkmCKcf4xfHXBj3QVF5sggzTCLwNinwsP5sFE1hPqhKbrORAkjWUQQ5PQyJaBhweN7lEIey0zxaD89ieZ1//CNcJ3Ep6hl98xgJAFuTOy8ZkT/UDPGYyshw4W7qtXZG6N1AAAAFQCfQ+qS7ojTs2xNnJnnWFnPG6GurwAAAIEAhsismJ433VG73Dn1xkyaWhfbmekRz45q25Yn0fJMMQy9jyk0BAUS0+jZvpDHpRVGqnw580ogjwi3NfkyuOIzbWgvLefeEa5LCMU8MbBMaGKiw5glaaH+TZJVLbgES41k0zMrSIof5CsyqPSMmfUehLJJKnETzonup7AxKPNcHj8AAACBAJZBA2wBxbc//84mpHy822OZYqU4vvqUg2ltrAWKiga6fadi/C9gd4MbA8H1ZCMKDPMt9d74X2b4zuOe4z6f0OU64mKnssKgYvhf4lYI5AOKSDtOh9Px6a3bsZjPtFyOJE+Gzxo8/v9r8lNUvvORhiebanjUMPFVqsciNRRkW3WL 39 | service2.buildbot.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC7HIfNZcaQIBSgHTClZeun57AR4mbSAFi277XiCWS65u6LuA2Bm3DIZoFJI8Jk6LLxTyBo7H/wQxyLEbIsEvl7+k7rNSi8OWjRcwN4QhERvc71XadCZdiLsQgZmSijrlR+e/sMGIbLFDlFzEq2XETqjTG1NUgJphhA7Q1bRV+uIjeuKIIY0tYMJClJPlcErB5NYbifr2OME5TbM3wyGw07PZTrF2UIVHY3EqcEEZd99U7/OGnA1sWF/AL7xygWuN4+/lIV3fmy/5sM6X2R52W+uVrS+y8c4pXtsSfoEdvUuNANzY9eOtc7UTcZAncy48UAkLPkceOFZqNqLXvXa8qv 40 | service2.buildbot.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPn0SWc6BZBzpLaJhjIX1t7eSbDpGQqokTiG/JFYmrwfZFL3FTkTprJr6NhvDS2wIsOw7724S44+JquOIYI/mm4= 41 | 42 | service3.buildbot.net ssh-dss AAAAB3NzaC1kc3MAAACBAJFHlx7VM7squwR6wv4mVaCk484KWHGd1FzCIyaA0hHEmhmv+X8eTSf3B4Bu3OMUczof3AVZhtQmdBZolT6Bp29KHyxUwkGBLamkM7ZvHhumLJErQRBoNL8ln1NzME6bn/yMX9rTH7y+CHMulsquo6eYL8HvfAsKyvtA9YuLKVrLAAAAFQDU+fLE1XrueF/FYIHk+RcSwO+W6wAAAIEAg4PIm5Cs2Kv+rC1eRip3+UfgL0vb9t9DzupCWOFSBZCKp1bEVbLZBDejSiM9nNu8pSA6DDtl2PgQl8SjxVw2hiJrP6sRSrjSrP17XBzFgZeLJQU0ExXWv1wnsjJuzs3Q8Y06T42ImCvkFP6mgVajikp9XI02bmaJgwPJvqWDmHMAAACAJCW6HNeBdcfBaLXIy1CNz3aUnTC7+LLZacivT4hAo/7KB/QrgzzpFrML8w2vG8M16fH+yVbMCF++shUlxJT501oilHEbZyeGqgHM5TgV4adIVplJ0hJhdmL5bHFacX8Y59PutgAJgtlD9lsM3UpbkAv3EQnwWQo6jcI2pYjqS7A= 43 | service3.buildbot.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNL7gwd75jxKb/UupMXk4qyr4UdiWB90ZG9MxFoXmjZqRVJneY7PqSJLORPnYA+N9szhyZcekaY/S67cpljvdza+YJC/MloLkjXCBRYbvqX14rEufcG8xYULB+SAKmq1srCffuDBhjnHQhhsAYSi6yozBzO69Cl4gf6/0gB4+Shct7FguHMMNiTFlGc57cA52MR8K/WsDE/pJCjuRTp1wWmSLPzCmWwfhadglAwFvD0dz8UcIWEFfnWfFgjqySBWJlyI6EeVEYrh9y4ckUKiPtwULqfyPO9Q3/au+cPFKPfEi627zvunN1PjAgF0frTa6PmaJ40nKTzjZB2VrjYsqP 44 | service3.buildbot.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBPcKrjr7+6GkOYecrskrUv0r0V9ofY9f8MsEIjzfnqFyJgE3qvPKhTiuwIbVYenFDgcPXTB2uUb76cGNG7CUrMs= 45 | 46 | vm1.buildbot.net ssh-dss AAAAB3NzaC1kc3MAAACBAMIZHJOm91VTEf2ZJp/CSG5k5pFqTKxdIFg9vUiM1z4oiTliptg31KSWUrQ7jkjhBhToeaqcjaAe4Za4EOU5fNLeRvYpOE8M/gZofCZgJcwlN5FJRaFwnyIqerTF+lZclUvxgrQgVNbLoa9tHQy6ZikwWPtnf2Vm71mr2OcEwfs/AAAAFQCPUYPdUqIQ9Dlm1zzfv/j/PCeSLwAAAIBG3Gg0u52Uns+sqobNotx63AHFHQyJmYXIs76NBU4CuLDzrFGCiAzwf0FKHKX0bGe1bBWC09y3ziRPr8uhr1Mh1GaTxAKW74pwJMchWXg6UIZQOX5vO1PFsvMSLOtxxV3hhQU9ktPIWlYEH+lail3lBTElrP1QhZtjbC3KXsmxhwAAAIA7YwYUDF3VEeFfR4ab3D4vTxtQ6vckwkYDV3G3Oz63q6iOit2ftRbCil5IhCW/MrUfmSW22dt+29k/669zoos5KAWRQ2H74Aj6M+mx8+CSeQEr7VseMjz0qZqzZ7lCZzOQ0aAzIx0UgBq14dzJO+XmfLrmW/JvCPaR8WPXJCX0pQ== 47 | vm1.buildbot.net ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDLLdXY9gx3/QcpYiuzoSBwoPHwgeTOIxAG+duINucp1QV1gm5Pl4WfzVzmQvKiDodOm6ww9NPMv86dss1B6MstkAgvP47y2+6JLXqyFCxScpFf2REiMjmOiQgt9TOt1eM+wu08NzZU336jXweeNYIc14XKk6Vn+kRd7Jzr+HWAgGBDWbcKhnWqAcX6H5bd+z8bmY89x47/lrLGgJjrfxopYl+n3Qfai93c3Dw4ulEePuNTda+Tz01uhg5kM62TydVQAw0Nzv8Q8I9oMWBev2GtOG7nfKbPu8+mUmDJ08zk0X7pg6PUOeYg79mVKv8fCseiKVSmmAcyI9gZuINbaRuV 48 | vm1.buildbot.net ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDRg6bvK+2FN67NiInzIwYMwmyBlsVZt4PYv/LfYzqyFUTdc6iIP2pSZ7K3rmxC27AnfeUw5RNJxgo6Tdqz6pZg= 49 | -----BEGIN PGP SIGNATURE----- 50 | Version: GnuPG v1 51 | 52 | iQEcBAEBAgAGBQJUeKndAAoJEA9WV6Ji41AjSDoH/RBe/Fo+kcMKlWMwPF7OnuMm 53 | Bzpi3TCbC+cW3LTDns/LxAXCsNuo3u+VQ93uhBbgabNzNZf60ao5trxAI9ToMytB 54 | W4ehT7aSxK2uH9iidYoVbdW01uBSKJ9poSZTIoeNGm+cAVQQ+0NL+SCKYCuU8MDv 55 | UMQ97bcXFz7cgAYqosevDx/vS3IPVs5hf1BpQcaumWsyaTPCN/Lm/dXCpRafMQp5 56 | nPyTCOQN8FBSy6Had5C4TN3hhfaSRa7zlrxHqdf8OIITPu0O5JVzluEOKIks8zac 57 | +FhwQ7moQc0Qcv3r71HlrtkWOOBWaT5JAIhLT6x70fr0MZ8204Lv6aAFOs1IAcU= 58 | =5b4a 59 | -----END PGP SIGNATURE----- 60 | -------------------------------------------------------------------------------- /docs/hardware.txt: -------------------------------------------------------------------------------- 1 | At OSUOSL 2 | ========= 3 | 4 | This is Buildbot's own hardware 5 | 6 | service1 7 | -------- 8 | ix1104 with one quad-core Xeon E3 3.3GHz CPU, 16GB RAM, 100GB SSD 9 | 10 | Hosting: 11 | * mx.buildbot.net (MTA) 12 | * ns1.buildbot.net 13 | * syslog 14 | * backup 15 | * ldap 16 | 17 | 'syslog' is for all syslog information from every server, jail and service to 18 | be forwarded to. 19 | 20 | 'backup' is meant for an Amanda installation. There is an extra 160GB SSD 21 | installed in that machine for fast backups and a DL Bluray (50gb/disc) burner 22 | in it. 23 | 24 | LDAP isn't something I was planning to setup for certain. I'm doing it for 25 | RTEMS because we have so many services it's a PITA to maintain different 26 | accounts. I'm going to be putting simple SSO software on register.rtems.org. 27 | 28 | service2 29 | -------- 30 | ix1104 with one quad-core Xeon E3 3.3GHz CPU, 16GB RAM, 100GB SSD 31 | 32 | Hosting: 33 | * lists.buildbot.net 34 | 35 | the configs are all in /usr/local/mailman/configs (git repo) 36 | a sh script (reconfig.sh) to reconfig the lists when changes are made 37 | 38 | * bot.buildbot.net 39 | * www.buildbot.net 40 | * ftp.buildbot.net 41 | 42 | service3 43 | -------- 44 | ix1104 with one quad-core Xeon E3 3.3GHz CPU, 32GB RAM, 100GB SSD 45 | 46 | Hosting: 47 | * mysql.int.buildbot.net 48 | 49 | vm1 50 | --- 51 | ixR1204 with two six-core Xeon 2.1GHz, 32GB RAM, and four 100GB SSDs 52 | 53 | Hosting: 54 | * For VMs (dual six core machine) 55 | -------------------------------------------------------------------------------- /docs/network.txt: -------------------------------------------------------------------------------- 1 | IP Space 2 | ======== 3 | 4 | Public IP space is 140.211.10.128/25, in a single VLAN shared with RTEMS. 5 | 6 | Buildbot is allocated 140.211.10.224/27 out of that /25: 7 | 140.211.10.225 -- 140.211.10.254 8 | 9 | (Misha: it looks like we are given the whole /27, so 140.211.10.224 can also be used.) 10 | 11 | Allocations 12 | ----------- 13 | 14 | (Based on Amar's email of Nov 25, 2014 and updated based on IRC/tickets.) 15 | 16 | Jail aren't bound to an NFS IP as the restriction is per-server. 17 | 18 | The format for service hosts is ServerBuildbot(, , , ): 19 | 20 | mac1 = ServerBuildbot("140.211.10.234", "192.168.103.234", "192.168.80.234", "192.168.90.234") 21 | service1 = ServerBuildbot("140.211.10.230", "192.168.103.230", "192.168.80.230", "192.168.90.230") 22 | service2 = ServerBuildbot("140.211.10.231", "192.168.103.231", "192.168.80.231", "192.168.90.231") 23 | service3 = ServerBuildbot("140.211.10.232", "192.168.103.232", "192.168.80.232", "192.168.90.232") 24 | vm1 = ServerBuildbot("140.211.10.233", "192.168.103.233", "192.168.80.233", "192.168.90.233") 25 | 26 | The format for jails is Jail(, , ): 27 | 28 | backup = JailBuildbot(service1, None, "192.168.80.212") 29 | ldap = JailBuildbot(service1, None, "192.168.80.214") 30 | mx = JailBuildbot(service2, "140.211.10.235", "192.168.80.235") 31 | ns1 = JailBuildbot(service2, "140.211.10.236", "192.168.80.236") 32 | syslog = JailBuildbot(service2, None, "192.168.80.211") 33 | 34 | bot = JailBuildbot(service2, "140.211.10.242", "192.168.80.242") 35 | ftp = JailBuildbot(service2, "140.211.10.243", "192.168.80.243") 36 | lists = JailBuildbot(service2, "140.211.10.241", "192.168.80.241") 37 | nine = JailBuildbot(service2, "140.211.10.244", "192.168.80.244") 38 | 39 | mysql = JailBuildbot(service3, None, "192.168.80.215") 40 | pgsql = JailBuildbot(service3, None, "192.168.80.216") 41 | bslave1 = JailBuildbot(service3, None, "192.168.80.226") 42 | events = JailBuildbot(service3, "140.211.10.227", "192.168.80.227") 43 | 44 | DNS 45 | === 46 | 47 | Forward: 48 | -------- 49 | 50 | hosted by dns.he.net under dustin's account 51 | 52 | Reverse: 53 | -------- 54 | 55 | 128-255.10.211.140.in-addr.arpa. 1799 IN SOA ns1.rtems.org. abuse.rtems.org. 7 10800 3600 604800 600 56 | 57 | Topology 58 | ======== 59 | 60 | The other item about our setup is we currently live within the RTEMS protected 61 | infrastructure. 62 | 63 | That setup has dual redundant CISCO switches that are clustered and two 64 | redundant firewalls running FreeBSD and PF. They are using failover with 65 | RSTP. 66 | 67 | Each server in our network has one cable to each switch. The firewalls have 4 68 | ports to the OSL then two cables to each switch -- cross connected. This way 69 | we can lose one firewall and one switch (random!) and still have connectivity. 70 | It also allows us to upgrade both without losing anything. 71 | 72 | The network has been setup so Buildbot can move outside of the infrastructure 73 | without losing anything. We just move our cables and we're done. 74 | -------------------------------------------------------------------------------- /docs/trunas.txt: -------------------------------------------------------------------------------- 1 | TruNAS 2 | ====== 3 | 4 | We have access to RTEMS TruNAS storage which is unlimited data-wise. We 5 | can store as much as we want on there without any worry should we want it. 6 | -------------------------------------------------------------------------------- /docs/upgrading-freebsd.rst: -------------------------------------------------------------------------------- 1 | Notes For Upgrading FreeBSD 2 | =========================== 3 | 4 | This is based on the FreeBSD Handbook, sections 23.2 FreeBSD Update and 14.6.4 Updating Jails. 5 | 6 | Process for upgrading a host: 7 | 8 | 1. Upgrade host. 9 | 2. Upgrade host packages. 10 | 3. Upgrade basejail userland. 11 | 4. Upgrade each jail's etc. 12 | 5. Upgrade each jail's packages. 13 | 14 | 15 | Upgrade Using freebsd-update 16 | ---------------------------- 17 | Run ``freebsd-update`` as many times as necessary. 18 | At the end of each install will be a note on whether it's necessary to run ``freebsd-update`` again after a reboot. 19 | 20 | Minor Upgrade 21 | ------------- 22 | For a minor upgrade, such as 11.2-RELEASE-p1 to 11.2-RELEASE-p2, use fetch and install:: 23 | 24 | freebsd-update fetch 25 | freebsd-update install 26 | 27 | Major Upgrade 28 | ------------- 29 | For a major upgrade, such as 11.2-RELEASE to 11.3-RELEASE or 12.0-RELEASE, use upgrade:: 30 | 31 | freebsd-update -r DESTINATION_VERSION upgrade 32 | 33 | When the upgrade finishes, you will be instructed to reboot and run ``freebsd-update install``. 34 | Each install may require additional installs after another reboot. 35 | 36 | Upgrade Host Packages 37 | --------------------- 38 | 39 | :: 40 | 41 | pkg upgrade 42 | 43 | Upgrade basejail's Userland 44 | --------------------------- 45 | 46 | Use ``ezjail-admin update`` to upgrade the basejail. 47 | The value for the ``-s`` option is the current FreeBSD version of the basejail. 48 | 49 | :: 50 | 51 | ezjail-admin update -U -s BASEJAIL_CURRENT_RELEASE 52 | 53 | Upgrade Jail's etc 54 | ------------------ 55 | 56 | For each jail, run mergemaster. 57 | 58 | :: 59 | 60 | mergemaster -U -D /path/to/jail 61 | 62 | The ``-i`` option cannot be used because of the symlinks used for some directories such as ``/boot``. 63 | The first time running this on a jail could take a while because no database exists on which files have been modified. 64 | 65 | Notes On Merging Files 66 | ~~~~~~~~~~~~~~~~~~~~~~ 67 | 68 | * Delete anything that tries to install new files in ``/boot``. 69 | The jails already have a symlink for ``/boot`` into the basejail so they will get the right files when running. 70 | * Install files that are missing. 71 | The majority of missing files are new for the release. 72 | Using the ``-i`` option would automatically install them but it can't be used because of some symlinks. 73 | * Delete files that are managed by Ansible. 74 | If you see anything that starts with ``# Managed by Ansible`` then don't bother trying to merge the changes now. 75 | Any changes should be handled by a pull-request to the Ansible repository. 76 | 77 | 78 | Upgrade Jail's Packages 79 | ----------------------- 80 | This can use ``pkg`` if you ``jexec`` to the jail. 81 | I use this script to upgrade all of the jails. 82 | 83 | :: 84 | 85 | for jid in $(jls | grep -v JID | awk '{print $1}'); do 86 | jail_hostname=$(jls -j $jid | grep -v JID | awk '{print $3}') 87 | echo "===== $jid: $jail_hostname" 88 | sudo jexec $jid pkg update && 89 | sudo jexec $jid pkg upgrade pkg && 90 | sudo jexec $jid pkg upgrade 91 | done 92 | 93 | 94 | From Source 95 | ----------- 96 | 97 | This part of the document is for historical purposes. 98 | It will be deleted once all hosts and jails use ``freebsd-update``. 99 | 100 | Much of this document is based on the Handbook, section 24.6 Rebuilding World. 101 | That should be the primary source to use when upgrading. 102 | 103 | :: 104 | 105 | # Update /usr/src. It is a git clone of a mirror on github. 106 | 107 | # Start a script session to log everything for review. 108 | script /var/tmp/mw.out 109 | chflags -R noschg /usr/obj/* 110 | rm -rf /usr/obj 111 | 112 | cd /usr/src 113 | make -j8 buildworld 114 | make -j8 buildkernel KERNCONF=$kernel 115 | make installkernel KERNCONF=$kernel 116 | mergemaster -Fp 117 | make installworld 118 | mergemaster -iFU 119 | yes | make delete-old 120 | 121 | ezjail-admin update -i 122 | 123 | # For each jail directory: 124 | mergemaster -FU -D /path/to/jail 125 | 126 | reboot 127 | 128 | # Wait for machine to come back. 129 | 130 | # Resume script output from before. 131 | script -a /var/tmp/mw.out 132 | cd /usr/src 133 | make delete-old-libs 134 | 135 | pkg upgrade 136 | 137 | # For each jail directory: 138 | # CAVEAT: mailman (in the lists jail) MUST be upgraded via ports. 139 | jexec $jid pkg upgrade 140 | -------------------------------------------------------------------------------- /files/collectevents.py: -------------------------------------------------------------------------------- 1 | import elasticsearch 2 | import argparse 3 | import datetime 4 | from time import mktime 5 | 6 | 7 | def ts(d): 8 | return int(mktime((d).timetuple())) * 1000 9 | 10 | 11 | parser = argparse.ArgumentParser() 12 | parser.add_argument('hosts', nargs='*', default=[]) 13 | args = parser.parse_args() 14 | 15 | e = elasticsearch.Elasticsearch(args.hosts) 16 | 17 | 18 | def installIdPerVersion(range1, range2, version): 19 | body = { 20 | "query": { 21 | "range": { 22 | "@timestamp": { 23 | "format": "epoch_millis", 24 | "gte": range1, 25 | "lte": range2 26 | } 27 | } 28 | }, 29 | "aggs": { 30 | "2": { 31 | "aggs": { 32 | "1": { 33 | "cardinality": { 34 | "field": "installid.raw" 35 | } 36 | } 37 | }, 38 | "terms": { 39 | "order": { 40 | "1": "desc" 41 | }, 42 | "field": "versions." + version + ".raw", 43 | "size": 20 44 | } 45 | } 46 | }, 47 | "size": 0 48 | } 49 | result = e.search(index='logstash-*', body=body) 50 | ret = {} 51 | for r in result['aggregations']['2']['buckets']: 52 | ret[r['key'].replace(".", "_")] = r['1']['value'] 53 | return ret 54 | 55 | 56 | def installId(range1, range2): 57 | body = { 58 | "query": { 59 | "range": { 60 | "@timestamp": { 61 | "format": "epoch_millis", 62 | "gte": range1, 63 | "lte": range2 64 | } 65 | } 66 | }, 67 | "aggs": { 68 | "1": { 69 | "cardinality": { 70 | "field": "installid.raw" 71 | } 72 | } 73 | }, 74 | "size": 0 75 | } 76 | result = e.search(index='logstash-*', body=body) 77 | return result['aggregations']['1']['value'] 78 | 79 | 80 | now = datetime.date.today() 81 | for i in xrange(10): 82 | then = now - datetime.timedelta(days=i) 83 | data = dict( 84 | timestamp=then, 85 | total=installId( 86 | ts(then - datetime.timedelta(days=1)), ts(then)), 87 | total_cumul=installId(0, ts(then))) 88 | for version in ['Buildbot', 'Python']: 89 | data['per' + version] = installIdPerVersion( 90 | ts(then - datetime.timedelta(days=1)), ts(then), version) 91 | data['per' + version + '_cumul'] = installIdPerVersion(0, ts(then), 92 | version) 93 | e.index( 94 | index="postprocess-index", 95 | doc_type='aggregations', 96 | id=ts(then), 97 | body=data) 98 | -------------------------------------------------------------------------------- /files/docs-redirects: -------------------------------------------------------------------------------- 1 | # Renamed documentation pages. 2 | rewrite ^/(latest|current)/developer/master-slave\.html /$1/developer/master-worker.html permanent; 3 | rewrite ^/(latest|current)/developer/cls-bslavemanager\.html /$1/developer/cls-workermanager.html permanent; 4 | rewrite ^/(latest|current)/developer/cls-buildslave\.html /$1/developer/cls-worker.html permanent; 5 | rewrite ^/(latest|current)/manual/cfg-buildslaves-libvirt\.html /$1/manual/cfg-workers-libvirt.html permanent; 6 | rewrite ^/(latest|current)/manual/cfg-buildslaves-docker\.html /$1/manual/cfg-workers-docker.html permanent; 7 | rewrite ^/(latest|current)/manual/cfg-buildslaves-openstack\.html /$1/manual/cfg-workers-openstack.html permanent; 8 | rewrite ^/(latest|current)/manual/cfg-buildslaves-ec2\.html /$1/manual/cfg-workers-ec2.html permanent; 9 | rewrite ^/(latest|current)/manual/cfg-buildslaves\.html /$1/manual/cfg-workers.html permanent; 10 | rewrite ^/(latest|current)/manual/installation/buildslave\.html /$1/manual/installation/worker.html permanent; 11 | -------------------------------------------------------------------------------- /gcloud_config/README.md: -------------------------------------------------------------------------------- 1 | This directory contains custom Kubernetes configuration for the GKE cluster. 2 | 3 | To apply, run the following in the Google Cloud Shell: 4 | 5 | ``` 6 | kubectl apply -f file.yaml 7 | ``` 8 | -------------------------------------------------------------------------------- /gcloud_config/kube-system-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: kube-system-pdb 5 | namespace: kube-system 6 | spec: 7 | selector: 8 | matchExpressions: 9 | - key: k8s-app 10 | operator: In 11 | values: 12 | - kube-dns 13 | - kube-dns-autoscaler 14 | - metrics-server 15 | - event-exporter 16 | - glbc 17 | maxUnavailable: 1 18 | -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | # This file defines all kind of global parameters and defaults 2 | --- 3 | # Use non-fully qualified name of python3 4 | # On FreeBSD machines it's available as /usr/local/bin/python3, on most Linux 5 | # machines it's available as /usr/bin/python3 6 | ansible_python_interpreter: python3 7 | pkg_ansible_version: py38-ansible28 8 | 9 | # admin users have administrative access to all systems (but they know better 10 | # than to change things by hand). They will be added to the 'wheel' group. 11 | # 12 | # Note that SSH keys are in roles/base/files/authorized_keys 13 | # 14 | # Keep this in sync with the `buildbot-infra` team in Github. 15 | # 16 | # WARNING: do not remove entries from this list; just mark them 'state: absent' 17 | 18 | admin_users: 19 | - username: jim # kept as an example.. 20 | state: absent 21 | - username: dustin 22 | state: present 23 | - username: amar 24 | state: present 25 | - username: mss 26 | state: present 27 | - username: sean 28 | state: present 29 | - username: tardyp 30 | state: present 31 | - username: p12tic 32 | state: present 33 | 34 | 35 | # proxy configuration is found automatically given the host's environment 36 | # via bb_filters.py 37 | proxy_env: "{{ {} | proxies_from_env }}" 38 | 39 | # This account is used to run `ansible-pull` and has passwordless sudo rights 40 | # on the host. 41 | service_account: bbinfra 42 | 43 | # This account is intended to be used for running non-privileged 44 | # services/tasks. 45 | worker_account: bbuser 46 | 47 | # Mandatory packages are the ones that must be installed on every host. 48 | # Ansible installation is taken care of depending on what kind of host is that 49 | # (service host, jail, vm) 50 | mandatory_packages: 51 | FreeBSD: 52 | - sudo 53 | - git # this is a mandatory package since we rely on ansible-pull 54 | - timelimit # this contains timelimit command which is used to avoid ansible-pull to be too long 55 | - nano # this helps ad-hoc debugging 56 | - gmake # buildbot uses GNU-specific makefile features 57 | Debian: 58 | - sudo 59 | - git # this is a mandatory package since we rely on ansible-pull 60 | - coreutils # this contains timeout command which is used to avoid ansible-pull to be too long 61 | - nano # this helps ad-hoc debugging 62 | - docker.io 63 | - docker-compose 64 | 65 | # Utility packages are the ones that must be installed on service host only (at 66 | # least, at the moment). 67 | # The original list is based on http://trac.buildbot.net/ticket/3036 68 | utility_packages: 69 | FreeBSD: 70 | - bash 71 | - rsync 72 | - screen 73 | - vim-console 74 | Debian: 75 | - bash 76 | - rsync 77 | - screen 78 | - vim-tiny # we probably do not want to bring all the stuff 79 | 80 | # The upstream repository from which the service host crontask should pull 81 | ansible_git_repository: https://github.com/buildbot/buildbot-infra 82 | 83 | # syslog server to which all syslog messages should be forwarded 84 | global_syslog_server: 192.168.80.211 85 | global_syslog_server_json_port: 5555 86 | 87 | # Alias for email to root (cronspam, etc.) 88 | root_email_destination: "sysadmin@buildbot.net" 89 | 90 | # This structure refers to `build_slaves` in `secrets.yml` 91 | slave_master_allocations: 92 | master: 93 | - p12-pd-any 94 | - p12-ep2-any 95 | obsolete: 96 | - cm-bbot-leopard-001 97 | - cm-bbot-leopard-002 98 | - cm-bbot-leopard-003 99 | - cm-bbot-linux-001 100 | - cm-bbot-linux-002 101 | - cm-bbot-linux-003 102 | - cm-bbot-xp-001 103 | - cm-bbot-xp-002 104 | - cm-bbot-xp-003 105 | - automan 106 | - bghimi4-2 107 | - buildbot-linux4 108 | - centos_5_python2_4 109 | - debian 110 | - freebsd_7 111 | - koobs-freebsd10 112 | - koobs-freebsd9 113 | - linux 114 | - metrohm-win81 115 | - minimata 116 | - minime 117 | - reed.tx.reedmedia.net 118 | - tomprince-hermes-gentoo-1 119 | - tomprince-socrates-winxp-1 120 | - ubuntu810-64 121 | - w7.kvm.r.igoro.us 122 | - win7-cygwin 123 | - xp-msysgit 124 | - bdbaddog-eight 125 | 126 | # configuration for tracking configuration directories 127 | track_config: 128 | # Directories to track. It's OK if these don't exist 129 | # everywhere -- they will only be tracked if they exist. 130 | dirs: 131 | - "/etc" 132 | - "/usr/local/etc" 133 | - "/usr/local/mailman/configs" 134 | author_name: "Ansible" 135 | author_email: "sysadmin@buildbot.net" 136 | default_author_name: "Unknown" 137 | default_author_email: "sysadmin@buildbot.net" 138 | 139 | # SSH host keys for each host that needs something to connect to it. 140 | ssh_host_keys: 141 | "[ftp.int.buildbot.net]:2200": 142 | - "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBMYtZrTzjDDtQa5DUGkuad+oi3N75cdb3iw0rLohuHBBL+iOxDBR6gdmOARNNqB2WKyN7inN3j965p335cy+BRc=" 143 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMvwDVNIL/7bA6rqCZZlgckn2ws1+r7PtLSDo3cDeBUr" 144 | - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEemfRZouB2MSDecG6SnHxLPcYvqmfDx+poIhnH5bJ1SrGJSK45fRdqDlJCgUeXxwTnz3qUwRSU6muXEziVbFZMLg8nRw8JauLZHwq/4oIKioXqtohLZ4dG70cs5rlZVvyimChsJ5F+15myBIlq9HusM4Ov0l023r53OaB8ZkdN2glSCqdHt6k6KyJulVsJjk51ug0e9y+yGWkmFG0WJpoOVlL3aRw7hs9CISZF4W3VouuAQ4tS9DsM3PuquH6wF8NEkzKez2v3zcykpng+1nUOimsAuSCvL3opX4ncDkaB8zMEZOqtCtJoBT28mZfmp9zbuSqMwHO8L2L8MPbga0b" 145 | 146 | # network subnets 147 | internal_network: 192.168.80 148 | external_network: 140.211.10 149 | 150 | nat_if: lagg0 151 | internal_if: vlan0 152 | external_if: lagg0 153 | 154 | # needs to be changed each time the dns config is updated 155 | dns_serial: 2024042901 # YYYYMMDD01 156 | 157 | # IP addresses for all hosts in the network 158 | hosts_ips: 159 | mysql : 215 160 | events : 227 161 | service1: 230 162 | service2: 231 163 | service3: 232 164 | vm1 : 233 165 | mac1 : 234 166 | mx : 235 167 | ns1 : 236 168 | lists : 241 169 | ftp : 243 170 | syslog : 211 171 | 172 | backup : 212 173 | 174 | # vars are lazily evaluated so ansible_hostname will be valid at the time of evaluation (via gatherfacts) 175 | internal_ip: "{{ internal_network }}.{{ hosts_ips[ansible_hostname] }}" 176 | -------------------------------------------------------------------------------- /host-p12tic-local.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure p12tic-local 3 | hosts: s-bw2 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | roles: 8 | - role: base 9 | - role: nginx-proxy 10 | vars: 11 | nginx_proxy_root: "/home/{{ service_account }}/nginx-docker" 12 | - role: bb-master-docker 13 | vars: 14 | bb_root: "/home/{{ service_account }}/bb-docker" 15 | web_host_name: "buildbot.buildbot.net" 16 | -------------------------------------------------------------------------------- /host-service1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure service1 3 | hosts: service1 4 | gather_facts: no 5 | connection: "{{ ansible_connection }}" 6 | become: yes 7 | roles: [] 8 | -------------------------------------------------------------------------------- /host-service2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure service2 3 | hosts: service2 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | roles: 8 | - role: jail 9 | vars: 10 | name: mx 11 | hostname: mx.buildbot.net 12 | internet_visible: true 13 | - role: jail 14 | vars: 15 | name: ns1 16 | hostname: ns1.buildbot.net 17 | internet_visible: true 18 | - role: jail 19 | vars: 20 | name: syslog 21 | hostname: syslog.buildbot.net 22 | internet_visible: true 23 | # note that the remainder here are specified the "old way", with 24 | # ip_address 25 | - role: jail 26 | vars: 27 | name: ftp 28 | hostname: ftp.buildbot.net 29 | ip_address: 30 | - 'lagg0|140.211.10.243' 31 | - 'vlan0|192.168.80.243' 32 | - role: jail 33 | vars: 34 | name: lists 35 | hostname: lists.buildbot.net 36 | ip_address: 37 | - 'lagg0|140.211.10.241' 38 | - 'vlan0|192.168.80.241' 39 | - role: jail 40 | vars: 41 | name: www 42 | hostname: www.buildbot.net 43 | ip_address: 44 | - 'lagg0|140.211.10.238' 45 | - 'vlan0|192.168.80.238' 46 | -------------------------------------------------------------------------------- /host-service3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure service3 3 | hosts: service3 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | roles: 8 | - role: jail 9 | vars: 10 | name: mysql 11 | hostname: mysql.int.buildbot.net 12 | ip_address: 13 | - 'vlan0|192.168.80.215' 14 | - role: jail 15 | vars: 16 | name: events 17 | hostname: events.buildbot.net 18 | ip_address: 19 | - 'lagg0|140.211.10.227' 20 | - 'vlan0|192.168.80.227' 21 | -------------------------------------------------------------------------------- /host-vm1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure vm1 3 | hosts: vm1 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | roles: 8 | - role: base 9 | - role: nginx-proxy-letsencrypt 10 | vars: 11 | nginx_proxy_root: "/home/{{ service_account }}/nginx-docker" 12 | - role: docs 13 | vars: 14 | compose_root: "/home/{{ service_account }}/docs" 15 | - role: www 16 | vars: 17 | compose_root: "/home/{{ service_account }}/www" 18 | -------------------------------------------------------------------------------- /jail-events.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure events 3 | hosts: events 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | vars: 8 | web_host_name: "events.buildbot.net" 9 | 10 | roles: 11 | - base 12 | - role: elk 13 | server_name: "{{ web_host_name }}" 14 | logstash_variant: events 15 | github_team: "buildbot-commit-access" # this restricts access to only people in the github 'buildbot-commit-access' team 16 | -------------------------------------------------------------------------------- /jail-ftp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure ftp 3 | hosts: ftp 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | vars: 8 | ftp_root: /data/ftp 9 | ftp_hostname: ftp.buildbot.net 10 | roles: 11 | - base 12 | - ftp 13 | # run a specialized SSH daemon to receive uploads from other internal hosts 14 | - role: ssh 15 | sshd_port: 2200 16 | listen_addresses: 17 | - "{{ internal_ip }}" 18 | extra_config: | 19 | Match User buildbot 20 | ChrootDirectory {{ ftp_root }} 21 | ForceCommand internal-sftp 22 | - role: user 23 | user_id: buildbot 24 | user_name: Buildbot Uploader 25 | authorized_key: | 26 | ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNZ3JHrBueI3OvmDTIJXMcUxyjadRYb2+44n8kBgxgU9Da6mQv2j02OH3MbOhLZoMhxqRC5GVeqmDYjcEw+vcIQ= bbinfra@bslave1.int.buildbot.net 27 | -------------------------------------------------------------------------------- /jail-lists.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # TODO: actually manage configuration (#3040) 3 | - name: configure lists 4 | hosts: lists 5 | gather_facts: no 6 | connection: local 7 | become: yes 8 | roles: 9 | - role: base 10 | configure_sendmail: False 11 | -------------------------------------------------------------------------------- /jail-mx.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure mx 3 | hosts: mx 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | roles: 8 | - role: base 9 | configure_sendmail: False 10 | - role: postfix 11 | -------------------------------------------------------------------------------- /jail-mysql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # TODO: actually manage configuration (#3047) 3 | - name: configure mysql 4 | hosts: mysql 5 | gather_facts: no 6 | connection: local 7 | become: yes 8 | roles: 9 | - base 10 | -------------------------------------------------------------------------------- /jail-ns1.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure ns1 3 | hosts: ns1 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | roles: 8 | - base 9 | - dns 10 | -------------------------------------------------------------------------------- /jail-syslog.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure syslog 3 | hosts: syslog 4 | gather_facts: no 5 | connection: local 6 | become: yes 7 | vars: 8 | web_host_name: "syslog.buildbot.net" 9 | roles: 10 | - role: elk 11 | logstash_variant: syslog 12 | server_name: "{{ web_host_name }}" 13 | # this restricts access to only people in the github 'buildbot-infra' team 14 | github_team: "buildbot-infra" 15 | -------------------------------------------------------------------------------- /library/bb_filters.py: -------------------------------------------------------------------------------- 1 | # Copyright Buildbot Team Members 2 | # 3 | # Permission is hereby granted, free of charge, to any person 4 | # obtaining a copy of this software and associated documentation 5 | # files (the "Software"), to deal in the Software without 6 | # restriction, including without limitation the rights to use, 7 | # copy, modify, merge, publish, distribute, sublicense, and/or sell 8 | # copies of the Software, and to permit persons to whom the 9 | # Software is furnished to do so, subject to the following 10 | # conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be 13 | # included in all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 16 | # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES 17 | # OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 18 | # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 19 | # HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, 20 | # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 | # OTHER DEALINGS IN THE SOFTWARE. 23 | 24 | import os 25 | 26 | 27 | def proxies_from_env(ret): 28 | for env in ['http_proxy', 'https_proxy', 'no_proxy']: 29 | if env in os.environ: 30 | ret[env] = os.environ[env] 31 | return ret 32 | 33 | 34 | class FilterModule(object): 35 | """ 36 | Buildbot Infra specific filters 37 | """ 38 | 39 | def filters(self): 40 | return { 41 | 'proxies_from_env': proxies_from_env 42 | } 43 | -------------------------------------------------------------------------------- /load-secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: no 4 | tasks: 5 | - name: load secrets from "{{ secrets_file|default('secrets.yml') }}" 6 | include_vars: "{{ secrets_file|default('secrets.yml') }}" 7 | tags: always 8 | -------------------------------------------------------------------------------- /local.yml: -------------------------------------------------------------------------------- 1 | # This is a playbook to be run with ansible-pull (the usage will be described 2 | # when a particular cron task is installed). 3 | # To deploy a particular service manually: 4 | # * log into the target host 5 | # * run 'ansible-playbook -i localhost local.yml -K' 6 | --- 7 | 8 | - name: determine local host name 9 | hosts: all 10 | gather_facts: yes 11 | connection: local 12 | 13 | tasks: 14 | # Use this to create "special" groups that will be used to setup a particular host. 15 | # We use non-fqdn here so a staging/test setup could work as well. 16 | - name: Group hosts by name 17 | group_by: 18 | key: "{{ansible_hostname}}" 19 | changed_when: False 20 | 21 | - debug: 22 | var: groups 23 | changed_when: False 24 | - import_playbook: "common.yml" 25 | vars: 26 | ansible_connection: "local" 27 | -------------------------------------------------------------------------------- /localhost: -------------------------------------------------------------------------------- 1 | localhost 2 | -------------------------------------------------------------------------------- /roles/admin-users/files/authorized_keys/amar.keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDT1SCOksifjvePs20Y2sjPxS23oZZZsVip4Xqd43vZ7SBz6lq1aawWE+6q4Cyml0MwoJ11pD3XC1CGHNYq8AnpqO6hxzLx+uR+P1Tc2RsHITgggNfetod8z1zDQLsw7HM0HW7jCm+s5Dz4AMgjney0VhTiEbEAGFMl8WTanxVFhwVl+huEDgfHdtWUP5wekvgv5KCn0KKLGcg8yeGed82ToOqwzyyMHCuV8gwAL1N0dggMpgyk2/2fiHWEzbYKM1XZ5f3kwEjO51xw6+h5Rky+YBmafeBZtIllCkb57I4da+lhabPZ5ntFn9AT+YqQPhSk6ATRnHUKaKXZoUgGOWvjhpNWVoVjwZjpnAOfmSxkrhOjOZhQKXMnt0u6avuibJj/DCuyOyLyeRmbpzj0Uw2JzB9r+xt7W5g6mM4qjW3lYIf5zgp3wIRag10tORtB+Nj8nN2GeKBr4O2Aih0o/CbxJ1bVTga+MSpYsKKLNWwUiTJRmgxtr5CJN8c3oAUlMOE= verm@peach.darkbeer.org 2 | -------------------------------------------------------------------------------- /roles/admin-users/files/authorized_keys/dustin.keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDPHpYH/lEQ+44TebHTz+wdQX+DEKHwYQtSMBBCdBTDfey/q79dPlHFjqd7MTk3DqmZXmVXoi6ORHs954euiGB4FZJSBifacdLHZydmxRH1/E/EQCIEtuI2fKqI4FEmR3hYsds4ARBnQMFqwshWYFczWTp/s6NOCFJ3sYH9G4ozjeqCsG64jTD6qzjx0nOC2P9zH8mlx7ETXtMQ6g7z5ZECAyhFViYX1kRonkkEGRlCa8YzVg92FVuk4BBRb4mOPsHO4jcDfbHoEIzFxFxDPQMmA88JCXUv/GIFYzxeTv4xU6klLd6HGgBtCicRfRfa8Zj3PO9ajnRFoyK/4aYnbqqwfapUVSJ5sLcil70iJfGC8oNS1c00X7FLCT9wX9k+JTuZvhMeGwPETtqdp3SFW+NqfkAnjghmNUy6AZL1FtjUFcaM97XjJnhaW8FJbkbyyZ9gjqDwg/s2exzGdnIrzt1PVPKqXyATYyRcb65SR4NKj+z4We6pOiPybMRrTng4/vEb0zBwvvT1G08r+TPIRDzWiDJ07GoLK5Nhq3N4BCeGb98vo33BPMbxkpAHpWwfJQtY7skpB9hsmcUxZ9f96LejV0HcIHMoMKOI0WnMx0XMXop0UM6U2MQEvRXRzGQXGP3XiMFTqILn7SEDjUl9OnjLeym2dMU9+0+/lQ4n2oQIow== dustin_personal_2019-04-20 2 | -------------------------------------------------------------------------------- /roles/admin-users/files/authorized_keys/mss.keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDY5gru1j3hrwgixlqowfYW+s/sQFM88MGsv4WFr17UWmrq5D949586jbhkOwzg2gzmvnkSYKpQw3ztMrbQYT4VZ9dE4w7qtSQvwxcWD8ynUap5cJ1W/Pq5ihY2OU1QxguoV/o9fmzzpSqqbgJciDhCqSTHKfIDggdLR/ScZ2mJ9AXDHmN5jaXNch0BddqIbAF3Yt251ddG30MJ+SDj+B5e9lkHF66T5dGV4AeorMKgYiWZp/v9enFG4iXIlha15vcx77yf7JuREQfthm+B7oItMAk/v8jsF6n0DKoJe0WAxCeBON+tw1p8ciMdGDfJozDabm7a7WP84LMnoaulOGtX /home/mss/.ssh/id_rsa 2 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBmCCRwuUB1Tu+LfO+SnCbjFgPRcffQR2R5ispwG9VyD mss@workhorse-x1 3 | -------------------------------------------------------------------------------- /roles/admin-users/files/authorized_keys/p12tic.keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC4fF5ZwI+XzwhlOPfOwM/RqzsfJ+T1ENZkr0uq2ajjLPS2RRKPnrjrjYfg0uRqTHEdmzygCHxdHcAOSVyw2yue1isIX8Fh39UzC5P63u+A2Qbz77IE3KoxRGNAzh3Qa4bTz6rY29d5BhXsuTHlXqBYNDtpwxT1T3l+ghi1YT+Ken2XWy58Sy/ljS5CBTiujbRQbVw+7a49rQKSlP64o6GGV4QAg7W9QuHhGNS+Ex2VknqbALPo2VQt7Uu0swjc4xfLpGtSFmEI6//cq0hqRML6x+S2YVuXgb4RmPS4F1fpDnYfxGkhWrAU/u/tzZDavU/cSUWHWx8p5ZS7Do8vHSNH povilas@radix.lt/github 2 | -------------------------------------------------------------------------------- /roles/admin-users/files/authorized_keys/sean.keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC35o7aYicHyYwBR3pM1E00UB+aS1gu/MN/O7EbAGppP/2O3upjHSCuU2t2Y8rd9yuGqkTHZTIGQb40OAimbRgflz6wtS4Xx0W29zHJAsRPTJMsIDqXErkRilUdiXn6k9DaxPLgp0wCLh++9p6xRBpxH3+DKNiAKTgaULMwJ8yFl5hrDf2/mzjfRZr1veculIiS6FDPXmEseSLoeNqhYdAoqcxf7EeV+cMOwirGY9pGDytGbidLcmdu/mt6vG+JvrwiqZQSLUMa6A6UqUur7fEAbXpCEttiWSV+hF9C1eHD16MgIZUZBIYSLk+niECgjxDivp+aD1SGcwu8KdTov7In 2 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINkJUZ43CP9bWkMz4ogwKTkdifhJMVkL/KMarlOYcAk/ 3 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIA2Yum4BOzzF0IdHTAsjElM1ojGT2a6D1azRLgDQnyTg 4 | -------------------------------------------------------------------------------- /roles/admin-users/files/authorized_keys/tardyp.keys: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAsgpW9h6Iekd45/0onbirhAEOikOMwi9viDn5XHlQG9Sae0Fk4oGwZejHaYQzjm+js4GDOGqcb2zrimqmwzvMkcyXKFVk6JekCnyflMETnllN3gw58klXzv2uT20Lil97JGi9gRTE9mgQNP4421xSlFeRCJ2cMFod3RHdQ3P7lCk6kSKzDMJ3BfGpb5Giodx2+4UoUQmAsdL4OGeLMnjQ5If6rxT42C6g/CXeJuBtoDviF9b9kXiRKtOn8UHxK0mG53MbAEtNDLhJYLb0gBuR8VuT+EpbsJgliQOUm01FPIQ80hLrA49HaCi5RO0gpv0KZwpSav3daYpuFq4d//S1 tardyp 2 | -------------------------------------------------------------------------------- /roles/admin-users/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add/remove administrative users 3 | user: 4 | name: "{{ item.username }}" 5 | groups: wheel 6 | remove: true 7 | password: x # unlock, but not a valid password 8 | state: "{{ item.state }}" 9 | with_items: "{{ admin_users }}" 10 | 11 | - name: add administrative users' SSH keys 12 | authorized_key: 13 | user: "{{ item.username }}" 14 | key: "{{ lookup('file', 'authorized_keys/' + item.username + '.keys') }}" 15 | with_items: "{{ admin_users }}" 16 | # only operate on present users; absent users don't have a homedir in /etc/passwd anymore 17 | when: item.state == 'present' 18 | -------------------------------------------------------------------------------- /roles/base-jailhost/files/flavours/base/etc/make.conf: -------------------------------------------------------------------------------- 1 | WRKDIRPREFIX=/var/ports 2 | DISTDIR=/var/ports/distfiles 3 | PACKAGES=/var/ports/packages 4 | INDEXDIR=/var/ports 5 | 6 | OPTIONS_UNSET=LIB32 DOCS EXAMPLES 7 | -------------------------------------------------------------------------------- /roles/base-jailhost/files/flavours/base/etc/periodic.conf: -------------------------------------------------------------------------------- 1 | daily_output="/var/log/daily.log" 2 | weekly_output="/var/log/weekly.log" 3 | monthly_output="/var/log/monthly.log" 4 | daily_status_security_output="/var/log/daily_status_security.log" 5 | daily_status_network_enable="NO" 6 | daily_status_security_ipfwlimit_enable="NO" 7 | daily_status_security_ipfwdenied_enable="NO" 8 | weekly_whatis_enable="NO" # our jails are read-only /usr 9 | -------------------------------------------------------------------------------- /roles/base-jailhost/files/flavours/base/etc/rc.conf: -------------------------------------------------------------------------------- 1 | network_interfaces="" 2 | rpcbind_enable="NO" 3 | cron_flags="$cron_flags -J 15" 4 | syslogd_flags="-ss" 5 | sendmail_enable="NO" 6 | sendmail_submit_enable="NO" 7 | sendmail_outbound_enable="NO" 8 | sendmail_msp_queue_enable="NO" 9 | sshd_enable="NO" 10 | -------------------------------------------------------------------------------- /roles/base-jailhost/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Be explicit about this 3 | allow_duplicates: no 4 | dependencies: 5 | - base-servicehost 6 | -------------------------------------------------------------------------------- /roles/base-jailhost/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install ezjail 3 | tags: jail 4 | pkgng: 5 | name: ezjail 6 | state: present 7 | environment: "{{ proxy_env }}" 8 | 9 | - name: enable jail networking 10 | lineinfile: 11 | dest: "/etc/rc.conf" 12 | line: "{{item.option}}=\"{{item.value}}\"" 13 | regexp: "^{{item.option}}=.*" 14 | state: present 15 | with_items: 16 | - option: "cloned_interfaces" 17 | value: "{{ nat_if }} {{ internal_if }} {{ external_if }}" 18 | - option: "pf_rules" 19 | value: "{{ pf_conf }}" 20 | - option: "pf_enable" 21 | value: "YES" 22 | when: "nat_if is defined" 23 | 24 | - name: pf config 25 | tags: jail 26 | template: 27 | src: pf.conf 28 | dest: "{{ pf_conf }}" 29 | validate: pfctl -vnf %s 30 | when: "nat_if is defined" 31 | 32 | - name: Restart pf 33 | tags: jail 34 | service: 35 | name: pf 36 | state: restarted 37 | 38 | - name: ezjail config 39 | tags: jail 40 | template: 41 | src: ezjail.conf 42 | dest: "{{ ezjail_conf_file }}" 43 | mode: "0644" 44 | 45 | - name: Populate basejail 46 | tags: jail 47 | command: ezjail-admin install 48 | args: 49 | creates: "{{ ezjail_jaildir }}/{{ ezjail_base_jail }}" 50 | environment: "{{ proxy_env }}" 51 | 52 | - name: Create base flavour directories 53 | tags: jail 54 | file: 55 | path: "{{ ezjail_jaildir }}/flavours/{{ ezjail_default_flavour }}/{{ item }}" 56 | state: directory 57 | with_items: 58 | - etc 59 | 60 | - name: Set base flavour config 61 | tags: jail 62 | copy: 63 | dest: "{{ ezjail_jaildir }}/flavours/{{ item }}" 64 | src: "flavours/{{ item }}" 65 | with_items: 66 | - "{{ ezjail_default_flavour }}/etc/make.conf" 67 | - "{{ ezjail_default_flavour }}/etc/periodic.conf" 68 | - "{{ ezjail_default_flavour }}/etc/rc.conf" 69 | 70 | - name: Enable jails 71 | tags: jail 72 | service: 73 | name: ezjail 74 | enabled: true 75 | -------------------------------------------------------------------------------- /roles/base-jailhost/templates/ezjail.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | # Path options 3 | ezjail_jaildir='{{ ezjail_jaildir }}' 4 | ezjail_jailtemplate="${ezjail_jaildir}/newjail" 5 | ezjail_jailbase="${ezjail_jaildir}/{{ ezjail_base_jail }}" 6 | ezjail_sourcetree=/usr/src 7 | ezjail_ftphost=ftp.freebsd.org 8 | ezjail_archivedir="{{ ezjail_jaildir }}/archived" 9 | 10 | # Jail admin options 11 | ezjail_default_execute="/usr/bin/login -f root" 12 | 13 | # Jail creation options 14 | ezjail_default_flavour="{{ ezjail_default_flavour }}" 15 | # ezjail_uglyperlhack="YES" 16 | ezjail_mount_enable="NO" 17 | ezjail_devfs_enable="YES" 18 | ezjail_devfs_ruleset="devfsrules_jail" 19 | ezjail_procfs_enable="NO" 20 | ezjail_fdescfs_enable="NO" 21 | -------------------------------------------------------------------------------- /roles/base-jailhost/templates/pf.conf: -------------------------------------------------------------------------------- 1 | # Interfaces 2 | ext_if = "{{ nat_if }}" 3 | int_if = "{{ internal_if }}" 4 | 5 | jail_net = $int_if:network 6 | 7 | # NAT 8 | set skip on "{{ internal_if }}" 9 | nat pass on $ext_if from $jail_net to any -> $ext_if 10 | -------------------------------------------------------------------------------- /roles/base-jailhost/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # pf configuration file 3 | pf_conf: /etc/pf.conf 4 | # Directory where jails are created 5 | ezjail_jaildir: /usr/local/jail 6 | # Directory where the ezjail-admin keeps jail configuration files 7 | ezjail_conf_dir: /usr/local/etc/ezjail 8 | # Main ezjail configuration file 9 | ezjail_conf_file: /usr/local/etc/ezjail.conf 10 | # NOTE: these two are different 11 | ezjail_base_jail: base 12 | ezjail_default_flavour: base 13 | -------------------------------------------------------------------------------- /roles/base-servicehost/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - base 4 | - ssh 5 | - admin-users 6 | -------------------------------------------------------------------------------- /roles/base-vm/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - base 4 | - ssh 5 | - admin-users 6 | -------------------------------------------------------------------------------- /roles/base/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # set this to False to skip configuring syslog (on a host with a special syslog 3 | # configuration) 4 | configure_syslog: True 5 | 6 | # set this to False to skip configuring sendmail (in cases where it is manually 7 | # configured or another MTA is in use) 8 | configure_sendmail: True 9 | -------------------------------------------------------------------------------- /roles/base/files/sudoers-wheel: -------------------------------------------------------------------------------- 1 | %wheel ALL=(ALL) NOPASSWD: ALL 2 | -------------------------------------------------------------------------------- /roles/base/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload syslogd 3 | service: 4 | name: syslogd 5 | state: reloaded 6 | 7 | - name: reload sendmail 8 | service: 9 | name: sendmail 10 | state: restarted 11 | 12 | - name: newaliases 13 | command: /usr/bin/newaliases 14 | -------------------------------------------------------------------------------- /roles/base/tasks/ansible-pull.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install ansible (FreeBSD) 3 | pkgng: 4 | name: "{{ pkg_ansible_version }}" 5 | state: present 6 | when: ansible_distribution == "FreeBSD" 7 | environment: "{{ proxy_env }}" 8 | 9 | - name: install ansible (Debian) 10 | apt: 11 | name: ansible 12 | state: present 13 | when: ansible_distribution == "Debian" 14 | 15 | - name: create wheel group 16 | group: 17 | name: "wheel" 18 | state: present 19 | 20 | - name: create service account 21 | user: 22 | name: "{{ service_account }}" 23 | groups: wheel 24 | home: "/home/{{ service_account }}" 25 | state: present 26 | when: ansible_distribution == "FreeBSD" 27 | 28 | - name: create service account 29 | user: 30 | name: "{{ service_account }}" 31 | groups: 32 | - wheel 33 | - sudo 34 | - docker 35 | home: "/home/{{ service_account }}" 36 | state: present 37 | when: ansible_distribution == "Debian" 38 | 39 | - name: install vault password 40 | copy: 41 | content: "{{ vault_password }}" 42 | dest: "/home/{{ service_account }}/.vault-password" 43 | mode: 0600 44 | owner: "{{ service_account }}" 45 | group: wheel 46 | 47 | # ansible-pull expects the inventory file to exist already, even before it 48 | # clones the repo, so we clone the repo explicitly. 49 | - name: clone ansible git repository 50 | tags: ansible-pull 51 | become: yes 52 | become_user: "{{ service_account }}" 53 | git: 54 | repo: "{{ ansible_git_repository }}" 55 | dest: "/home/{{ service_account }}/repo" 56 | version: master 57 | accept_hostkey: yes 58 | environment: "{{ proxy_env }}" 59 | 60 | - name: install ansible-pull cronscript 61 | tags: ansible-pull 62 | template: 63 | src: "run-ansible-pull.sh.j2" 64 | dest: "/home/{{ service_account }}/run-ansible-pull.sh" 65 | mode: 0755 66 | owner: "{{ service_account }}" 67 | group: wheel 68 | 69 | - name: install ansible-pull crontask 70 | tags: ansible-pull 71 | cron: 72 | name: ansible-pull 73 | job: "flock /home/{{ service_account }}/ansible-pull.lock /home/{{ service_account }}/run-ansible-pull.sh" 74 | user: "{{ service_account }}" 75 | minute: "{{ 60 | random(seed=ansible_fqdn) }}" 76 | state: "{{ 'absent' if no_ansible_pull|default('false')|bool else 'present' }}" 77 | -------------------------------------------------------------------------------- /roles/base/tasks/hosts.yml: -------------------------------------------------------------------------------- 1 | - name: "/etc/hosts - add configuration for system to know its friends on the subnet" 2 | template: 3 | src: hosts.j2 4 | dest: "/etc/hosts" 5 | -------------------------------------------------------------------------------- /roles/base/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: sudo.yml 3 | - import_tasks: hosts.yml 4 | when: ansible_hostname != "s-bw2" 5 | - include_tasks: syslog.yml 6 | when: ansible_distribution == "FreeBSD" 7 | - include_tasks: sendmail.yml 8 | when: ansible_distribution == "FreeBSD" 9 | - import_tasks: rootpw.yml 10 | when: ansible_hostname != "s-bw2" 11 | - import_tasks: ansible-pull.yml 12 | - include_tasks: periodic.yml 13 | when: ansible_distribution == "FreeBSD" 14 | - include_tasks: python-cert-symlink.yml 15 | when: ansible_distribution == "FreeBSD" 16 | -------------------------------------------------------------------------------- /roles/base/tasks/periodic.yml: -------------------------------------------------------------------------------- 1 | # configure periodic to not mail the admins too often 2 | - name: set periodic options 3 | lineinfile: 4 | dest: "/etc/periodic.conf" 5 | line: "{{item.option}}=\"{{item.value}}\"" 6 | regexp: "^{{item.option}}=.*" 7 | state: present 8 | create: yes 9 | with_items: 10 | # send logs to local disk, not email 11 | - option: daily_output 12 | value: /var/log/daily.log 13 | - option: weekly_output 14 | value: /var/log/weekly.log 15 | - option: monthly_output 16 | value: /var/log/monthly.log 17 | # include the security log with the daily log, rather 18 | # than mailing separately 19 | - option: daily_status_security_inline 20 | value: "YES" 21 | # and *try* not to log much (this doesn't completely 22 | # eliminate the logging, though) 23 | - option: daily_show_success 24 | value: "NO" 25 | - option: daily_show_info 26 | value: "NO" 27 | - option: daily_show_badconfig 28 | value: "YES" 29 | - option: weekly_show_success 30 | value: "NO" 31 | - option: weekly_show_info 32 | value: "NO" 33 | - option: weekly_show_badconfig 34 | value: "YES" 35 | - option: monthly_show_success 36 | value: "NO" 37 | - option: monthly_show_info 38 | value: "NO" 39 | - option: monthly_show_badconfig 40 | value: "YES" 41 | -------------------------------------------------------------------------------- /roles/base/tasks/python-cert-symlink.yml: -------------------------------------------------------------------------------- 1 | - name: add /etc/ssl/cert.pem for Python-2.7.9's benefit 2 | file: 3 | path: /etc/ssl/cert.pem 4 | state: link 5 | src: /usr/local/etc/ssl/cert.pem 6 | -------------------------------------------------------------------------------- /roles/base/tasks/rootpw.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set root password 3 | user: 4 | name: root 5 | password: "{{crypted_root_pw}}" 6 | -------------------------------------------------------------------------------- /roles/base/tasks/sendmail.yml: -------------------------------------------------------------------------------- 1 | # these sendmail options allow local mail submission; see #3131 2 | - name: set sendmail options 3 | lineinfile: 4 | dest: "/etc/rc.conf" 5 | line: "{{item.option}}=\"{{item.value}}\"" 6 | regexp: "^{{item.option}}=.*" 7 | state: present 8 | notify: reload sendmail 9 | with_items: 10 | - option: "sendmail_submit_enable" 11 | value: "NO" 12 | - option: "sendmail_outbound_enable" 13 | value: "YES" 14 | - option: "sendmail_msp_queue_enable" 15 | value: "YES" 16 | when: configure_sendmail 17 | 18 | - name: update root mail alias 19 | lineinfile: 20 | dest: "/etc/mail/aliases" 21 | line: "root: {{root_email_destination}}" 22 | regexp: "^#?root:" 23 | state: present 24 | notify: newaliases 25 | when: configure_sendmail 26 | 27 | - name: start sendmail 28 | service: 29 | name: sendmail 30 | enabled: true 31 | state: started 32 | when: configure_sendmail 33 | 34 | -------------------------------------------------------------------------------- /roles/base/tasks/sudo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: enable sudo modular configuration 3 | lineinfile: 4 | dest: "{{ sudoers_root }}/sudoers" 5 | line: "#includedir {{ sudoers_root }}/sudoers.d" 6 | state: present 7 | validate: "visudo -cf %s" 8 | 9 | - name: enable passwordless sudo for members of the wheel group 10 | copy: 11 | src: "sudoers-wheel" 12 | dest: "{{ sudoers_root }}/sudoers.d/sudoers-wheel" 13 | -------------------------------------------------------------------------------- /roles/base/tasks/syslog.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "start syslog" 3 | service: 4 | name: syslogd 5 | enabled: yes 6 | state: started 7 | when: configure_syslog 8 | 9 | - name: "syslog.conf - add configuration to send to global_syslog_server" 10 | tags: syslog 11 | lineinfile: 12 | line: "*.* @{{ global_syslog_server }}" 13 | regexp: "^\\*.\\* @" 14 | # insert after the header comment so we're not filtered 15 | # by any preceding programs (!) or hosts (+). 16 | insertafter: "Consult the syslog.conf.5. manpage." 17 | dest: /etc/syslog.conf 18 | notify: reload syslogd 19 | when: configure_syslog 20 | 21 | - name: "syslog.conf - remove old configuration" 22 | tags: syslog 23 | lineinfile: 24 | line: "*.* @192.168.80.51" 25 | regexp: "^\\*.\\* *@192.168.80.51" 26 | state: "absent" 27 | dest: /etc/syslog.conf 28 | notify: reload syslogd 29 | when: configure_syslog 30 | -------------------------------------------------------------------------------- /roles/base/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | ::1 localhost localhost.my.domain 4 | 127.0.0.1 localhost localhost.my.domain 5 | 6 | # generated according to groups_vars/all.hosts_ips 7 | 8 | {% for name, ip in hosts_ips | dictsort(false, 'value') %} 9 | {{internal_network}}.{{ip}} {{name}}.int.buildbot.net {{name}} 10 | {% endfor %} 11 | -------------------------------------------------------------------------------- /roles/base/templates/run-ansible-pull.sh.j2: -------------------------------------------------------------------------------- 1 | #! {{ '/usr/local/bin/bash' if ansible_distribution == 'FreeBSD' else '/bin/bash' }} 2 | 3 | set -e 4 | 5 | # set some environment variables that ansible-pull needs 6 | export HOME=/home/{{ service_account }} 7 | export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/games:/usr/local/sbin:/usr/local/bin:/home/{{ service_account }}/bin 8 | 9 | # set up to gather output 10 | OUTPUT_DIR=/tmp/ansible-pull.$(date --iso-8601=seconds) 11 | mkdir $OUTPUT_DIR || exit 1 12 | RETRY_LOG=$OUTPUT_DIR/retries.$$.log 13 | ANSIBLE_LOG=$OUTPUT_DIR/ansible-pull.log 14 | 15 | # retry controls 16 | RETRIES=5 17 | RETRY=0 18 | 19 | while [[ $RETRY -lt $RETRIES ]]; do 20 | RETRY=$((RETRY + 1)) 21 | 22 | # Run ansible-pull from the directory containing the repository (and, thus, the inventory file) 23 | # When running manually from custom branch, add "--skip-tags ansible-pull" to not revert to 24 | # master branch mid-execution. 25 | cd /home/{{ service_account}}/repo 26 | set +e 27 | timeout -k 2m 40m ansible-pull \ 28 | -C master \ 29 | -d /home/{{ service_account }}/repo/ \ 30 | -m git \ 31 | -U {{ ansible_git_repository }} \ 32 | -i localhost \ 33 | --vault-password-file=/home/{{ service_account }}/.vault-password \ 34 | -l "localhost,$(hostname),127.0.0.1" \ 35 | local.yml 2>&1 | tee -a $ANSIBLE_LOG 36 | RESULT=$? 37 | set -e 38 | 39 | if [[ $RESULT -eq 0 ]]; then 40 | break 41 | else 42 | echo '==== RETRYING ====' >> $RETRY_LOG 43 | sleep 30 44 | fi 45 | done 46 | 47 | if [[ $RESULT -eq 0 ]]; then 48 | # If everything was okay, check for deprecations but only at the 00:* run. 49 | # These deprecation warnings will happen on every run so only notify once a 50 | # day. The -A3 seems to be enough to get all/most of the deprecation 51 | # warning to make sense of what to fix. 52 | HOUR=$(date +%H) 53 | if [[ $HOUR == 00 ]]; then 54 | grep -A3 '\[DEPRECATION WARNING\]' $ANSIBLE_LOG 55 | fi 56 | else 57 | cat $RETRY_LOG 58 | echo "ansible-pull run on on "$(uname -n)" failed." 59 | echo 60 | cat $ANSIBLE_LOG 61 | fi 62 | 63 | rm -r "$OUTPUT_DIR" 64 | 65 | # clean up temp files 66 | find /home/{{ service_account }}/.ansible/tmp -type d -mtime +1 -delete 2>&1 | grep -v 'Permission denied' 67 | -------------------------------------------------------------------------------- /roles/base/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sudoers_root: "{{ '/usr/local/etc' if ansible_distribution == 'FreeBSD' else '/etc' }}" 3 | -------------------------------------------------------------------------------- /roles/bb-master-docker/files/.dockerignore: -------------------------------------------------------------------------------- 1 | volumes 2 | -------------------------------------------------------------------------------- /roles/bb-master-docker/files/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nopush/buildbot-master-deps 2 | 3 | RUN useradd --uid 1000 --create-home --shell /bin/bash bbuser 4 | 5 | USER bbuser 6 | RUN mkdir /home/bbuser/workdir 7 | WORKDIR /home/bbuser/workdir 8 | 9 | ARG BUILDBOT_REVISION=unknown 10 | RUN git clone https://github.com/buildbot/buildbot.git /home/bbuser/workdir/buildbot 11 | RUN cd /home/bbuser/workdir/buildbot && git checkout metabuildbot || git checkout master 12 | 13 | RUN virtualenv --python=python3 /home/bbuser/buildbot_venv \ 14 | && /home/bbuser/buildbot_venv/bin/pip install -U pip setuptools \ 15 | && make -C /home/bbuser/workdir/buildbot frontend_deps \ 16 | && cd /home/bbuser/workdir/buildbot \ 17 | && /home/bbuser/buildbot_venv/bin/pip install \ 18 | -r /home/bbuser/workdir/buildbot/requirements-ci.txt \ 19 | psycopg2-binary==2.9.7 \ 20 | && /home/bbuser/buildbot_venv/bin/pip install -e pkg \ 21 | && for name in \ 22 | master \ 23 | www/base \ 24 | www/grid_view \ 25 | www/console_view \ 26 | www/waterfall_view \ 27 | ; \ 28 | do \ 29 | /home/bbuser/buildbot_venv/bin/pip install -e $name ; \ 30 | done 31 | 32 | RUN /home/bbuser/buildbot_venv/bin/buildbot create-master /home/bbuser/workdir 33 | 34 | ARG METABBOTCFG_REVISION=unknown 35 | RUN git clone https://github.com/buildbot/metabbotcfg.git /home/bbuser/workdir/metabbotcfg 36 | 37 | RUN ln -s /home/bbuser/workdir/metabbotcfg/master.cfg /home/bbuser/workdir/master.cfg 38 | 39 | ADD run_buildbot.sh /run_buildbot.sh 40 | 41 | CMD ["/run_buildbot.sh"] 42 | -------------------------------------------------------------------------------- /roles/bb-master-docker/files/Dockerfile.deps: -------------------------------------------------------------------------------- 1 | FROM debian:12-slim 2 | 3 | RUN apt-get update \ 4 | && DEBIAN_FRONTEND=noninteractive apt-get upgrade -y \ 5 | && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ 6 | ca-certificates \ 7 | git \ 8 | wget \ 9 | gpg \ 10 | python3 \ 11 | && rm -rf /var/lib/apt/lists/* 12 | 13 | # Required when using nodejs from nodesource and yarn from Debian. 14 | # Remove when migrating to bookworm. See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=933229 15 | ENV NODE_PATH /usr/lib/nodejs:/usr/share/nodejs 16 | 17 | RUN \ 18 | KEYRING=/usr/share/keyrings/nodesource.gpg \ 19 | && wget --quiet -O - https://deb.nodesource.com/gpgkey/nodesource.gpg.key | gpg --dearmor > "$KEYRING" \ 20 | && gpg --no-default-keyring --keyring "$KEYRING" --list-keys \ 21 | && chmod a+r "$KEYRING" \ 22 | && VERSION=node_18.x \ 23 | && DISTRO=bookworm \ 24 | && echo "deb [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" >> /etc/apt/sources.list.d/nodesource.list \ 25 | && echo "deb-src [signed-by=$KEYRING] https://deb.nodesource.com/$VERSION $DISTRO main" >> /etc/apt/sources.list.d/nodesource.list \ 26 | && cat /etc/apt/sources.list.d/nodesource.list 27 | 28 | RUN \ 29 | KEYRING=/usr/share/keyrings/cloud.google.gpg \ 30 | && wget --quiet -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor > "$KEYRING" \ 31 | && gpg --no-default-keyring --keyring "$KEYRING" --list-keys \ 32 | && chmod a+r "$KEYRING" \ 33 | && echo "deb [signed-by=$KEYRING] https://packages.cloud.google.com/apt cloud-sdk main" >> /etc/apt/sources.list.d/google-cloud-sdk.list \ 34 | && cat /etc/apt/sources.list.d/google-cloud-sdk.list 35 | 36 | RUN apt-get update \ 37 | && DEBIAN_FRONTEND=noninteractive apt-get -y install -q \ 38 | curl \ 39 | git \ 40 | google-cloud-cli \ 41 | google-cloud-sdk-gke-gcloud-auth-plugin \ 42 | kubernetes-client \ 43 | libcairo-gobject2 \ 44 | libcairo2-dev \ 45 | libgirepository1.0-dev \ 46 | libglib2.0-dev \ 47 | libffi-dev \ 48 | libpq-dev \ 49 | libssl-dev \ 50 | nodejs \ 51 | pkg-config \ 52 | python3 \ 53 | python3-dev \ 54 | python3-pip \ 55 | yarnpkg \ 56 | tar \ 57 | tzdata \ 58 | virtualenv \ 59 | && \ 60 | rm -rf /var/lib/apt/lists/* 61 | -------------------------------------------------------------------------------- /roles/bb-master-docker/files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.5" 2 | 3 | services: 4 | buildbot: 5 | build: 6 | context: . 7 | dockerfile: Dockerfile 8 | image: nopush/buildbot-master 9 | ports: 10 | - 9989:9989 11 | restart: unless-stopped 12 | stop_grace_period: 5m 13 | volumes: 14 | - ../secrets:/home/bbuser/secrets 15 | environment: 16 | VIRTUAL_HOST_MULTIPORTS: |- 17 | { "buildbot.buildbot.net": { 18 | "/": { "port": 8010, "dest": "" }, 19 | "/workerws/": { "port": 9988, "dest": "/"} } } 20 | BB_LISTEN_PORT: tcp:8010 21 | BB_URL: https://buildbot.buildbot.net/ 22 | hostname: buildbot.buildbot.net 23 | networks: 24 | - buildbot 25 | - httpproxy 26 | depends_on: 27 | - postgres_buildbot 28 | 29 | postgres_buildbot: 30 | image: postgres:15.3 31 | env_file: 32 | - ../secrets/docker/postgres_db.env 33 | restart: unless-stopped 34 | stop_grace_period: 1m 35 | volumes: 36 | - ./volumes/postgres:/var/lib/postgresql/data 37 | networks: 38 | - buildbot 39 | 40 | networks: 41 | buildbot: 42 | httpproxy: 43 | external: true 44 | -------------------------------------------------------------------------------- /roles/bb-master-docker/files/run_buildbot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BB_SECRETS_ROOT=/home/bbuser/secrets/buildbot 4 | OTHER_SECRETS_ROOT=/home/bbuser/secrets/other 5 | cd /home/bbuser/workdir 6 | 7 | cp -t metabbotcfg $BB_SECRETS_ROOT/* 8 | 9 | until /home/bbuser/buildbot_venv/bin/buildbot upgrade-master . ; do 10 | echo "Could not upgrade Buildbot" 11 | sleep 1 12 | done 13 | 14 | gcloud auth activate-service-account \ 15 | $(cat "$OTHER_SECRETS_ROOT/gke_buildbot_master_key_client_email.pass") \ 16 | --key-file "$OTHER_SECRETS_ROOT/buildbot-master.json" 17 | 18 | gcloud container clusters get-credentials metabuildbot-fr \ 19 | --zone europe-west9-b \ 20 | --project $(cat "$OTHER_SECRETS_ROOT/gke_project.pass") 21 | 22 | exec /home/bbuser/buildbot_venv/bin/twistd --pidfile= -ny buildbot.tac 23 | -------------------------------------------------------------------------------- /roles/bb-master-docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Create and configure a Buildbot master to watch changes in Buildbot's git repo. 2 | --- 3 | - name: 'Check for busy master' 4 | uri: 5 | url: "https://{{ web_host_name }}/api/v2/builds?complete=false" 6 | headers: 7 | Accept: application/json 8 | ignore_errors: True 9 | register: master_building 10 | 11 | # only upgrade master when not busy 12 | - when: master_building.status != 200 or master_building.json is not defined or master_building.json.meta.total == 0 13 | block: 14 | - name: Make sure we have an updated copy of Buildbot repository 15 | # Note that buildbot container has its own copy of the repository 16 | become: yes 17 | become_user: "{{ service_account }}" 18 | git: 19 | repo: "https://github.com/buildbot/buildbot.git" 20 | dest: "{{ bb_root }}/buildbot" 21 | update: yes 22 | version: "master" 23 | accept_hostkey: True 24 | register: bb_repo_state 25 | tags: bb-master 26 | 27 | - name: Make sure we have an updated copy of metabbotcfg repository 28 | # Note that buildbot container has its own copy of the repository 29 | become: yes 30 | become_user: "{{ service_account }}" 31 | git: 32 | repo: "https://github.com/buildbot/metabbotcfg.git" 33 | dest: "{{ bb_root }}/metabbotcfg" 34 | update: yes 35 | version: "master" 36 | accept_hostkey: True 37 | register: mbb_repo_state 38 | environment: "{{ proxy_env }}" 39 | tags: bb-master 40 | 41 | - name: Print current state 42 | ansible.builtin.debug: 43 | msg: "Master building status: {{ master_building.status }} bb_repo_state: {{ bb_repo_state }} mbb_repo_state: {{ mbb_repo_state }}" 44 | 45 | - when: master_building.status != 200 or bb_repo_state is changed or mbb_repo_state is changed 46 | block: 47 | 48 | - name: Create directories 49 | become: yes 50 | become_user: "{{ service_account }}" 51 | file: 52 | path: "{{ item }}" 53 | state: directory 54 | loop: 55 | - "{{ bb_root }}/docker" 56 | - "{{ bb_root }}/docker/volumes" 57 | - "{{ bb_root }}/docker/volumes/postgres" 58 | - "{{ bb_root }}/secrets" 59 | - "{{ bb_root }}/secrets/buildbot" 60 | - "{{ bb_root }}/secrets/docker" 61 | - "{{ bb_root }}/secrets/other" 62 | tags: bb-master 63 | 64 | # Secrets have their owner explicitly set to the same numeric ID as the bbuser account in the 65 | # docker container. The secrets will be mounted to the docker container and must be readable 66 | - name: Make sure we have latest build worker passwords 67 | become: yes 68 | copy: 69 | dest: "{{ bb_root }}/secrets/buildbot/{{ item }}.pass" 70 | content: "{{ build_slaves[item].password }}" 71 | mode: "0600" 72 | owner: 1000 73 | with_items: "{{ slave_master_allocations[\"master\"] }}" 74 | # NOTE(sa2ajj): in case a password changes, a restart is also required, but 75 | # this is for future improvements. 76 | no_log: true 77 | tags: bb-master 78 | 79 | - name: Make sure we have latest JSON creds 80 | become: yes 81 | copy: 82 | dest: "{{ bb_root }}/secrets/{{item.filename}}" 83 | content: "{{ item.content | to_json}}" 84 | mode: "0600" 85 | owner: 1000 86 | with_items: 87 | - filename: "other/buildbot-master.json" 88 | content: "{{ gke_buildbot_master_key }}" 89 | - filename: "buildbot/github_oauth.pass" 90 | content: "{{ github_oauth_keys[\"buildbot\"] | default({}) }}" 91 | no_log: true 92 | tags: bb-master 93 | 94 | - name: Make sure we have latest raw creds 95 | become: yes 96 | copy: 97 | dest: "{{ bb_root }}/secrets/{{item.filename}}" 98 | content: "{{ item.content }}" 99 | mode: "0600" 100 | owner: 1000 101 | with_items: 102 | - filename: "buildbot/db_url" 103 | content: "postgresql+psycopg2://bb:{{ database_password }}@postgres_buildbot/bb" 104 | - filename: "buildbot/github_token" 105 | content: "{{ github_api_token }}" 106 | - filename: "other/gke_buildbot_master_key_client_email.pass" 107 | content: "{{ gke_buildbot_master_key.client_email }}" 108 | - filename: "other/gke_project.pass" 109 | content: "{{ gke_project }}" 110 | no_log: true 111 | tags: bb-master 112 | 113 | - name: Add database environment file 114 | become: yes 115 | become_user: "{{ service_account }}" 116 | ansible.builtin.copy: 117 | dest: "{{ bb_root }}/secrets/docker/postgres_db.env" 118 | content: | 119 | POSTGRES_PASSWORD={{ database_password }} 120 | POSTGRES_USER=bb 121 | POSTGRES_DB=bb 122 | # This will be read by docker, thus owner is not set 123 | mode: "0600" 124 | no_log: true 125 | tags: bb-master 126 | 127 | - name: Remove passwords for obsolete slaves 128 | become: yes 129 | become_user: "{{ service_account }}" 130 | file: 131 | dest: "{{ bb_root }}/secrets/buildbot/{{ item }}.pass" 132 | state: absent 133 | with_items: "{{ slave_master_allocations.obsolete }}" 134 | tags: bb-master 135 | 136 | - name: Copy docker files 137 | become: yes 138 | become_user: "{{ service_account }}" 139 | copy: 140 | dest: "{{ bb_root }}/docker/{{ item }}" 141 | src: "{{ role_path }}/files/{{ item }}" 142 | mode: preserve 143 | loop: 144 | - docker-compose.yml 145 | - Dockerfile 146 | - Dockerfile.deps 147 | - run_buildbot.sh 148 | - .dockerignore 149 | tags: bb-master 150 | 151 | - name: Rebuild buildbot deps container 152 | become: yes 153 | become_user: "{{ service_account }}" 154 | ansible.builtin.command: "docker build -f Dockerfile.deps -t nopush/buildbot-master-deps ." 155 | args: 156 | chdir: "{{ bb_root }}/docker" 157 | tags: bb-master 158 | 159 | - name: Rebuild buildbot container 160 | become: yes 161 | become_user: "{{ service_account }}" 162 | ansible.builtin.command: "docker-compose build --build-arg BUILDBOT_REVISION={{ bb_repo_state.after }} --build-arg METABBOTCFG_REVISION={{ mbb_repo_state.after }} buildbot" 163 | args: 164 | chdir: "{{ bb_root }}/docker" 165 | tags: bb-master 166 | 167 | - name: 'Check for busy master again' 168 | uri: 169 | url: "https://{{ web_host_name }}/api/v2/builds?complete=false" 170 | headers: 171 | Accept: application/json 172 | ignore_errors: True 173 | register: master_building2 174 | 175 | - name: Restart buildbot container 176 | become: yes 177 | become_user: "{{ service_account }}" 178 | ansible.builtin.shell: "docker-compose down && docker-compose up -d" 179 | args: 180 | chdir: "{{ bb_root }}/docker" 181 | when: "master_building2.status != 200 or master_building2.json is not defined or master_building2.json.meta.total == 0" 182 | tags: bb-master 183 | 184 | - name: Schedule restart for next iteration 185 | become: yes 186 | become_user: "{{ service_account }}" 187 | git: 188 | repo: "https://github.com/buildbot/metabbotcfg.git" 189 | dest: "{{ bb_root }}/metabbotcfg" 190 | update: yes 191 | version: "master^" 192 | accept_hostkey: True 193 | when: "not (master_building2.status != 200 or master_building2.json is not defined or master_building2.json.meta.total == 0)" 194 | tags: bb-master 195 | -------------------------------------------------------------------------------- /roles/bb-slave/README.rst: -------------------------------------------------------------------------------- 1 | bb-slave Role 2 | ============= 3 | 4 | This role allows to create a Buildbot slave running under supervisor. 5 | 6 | Parameters: 7 | 8 | ``bb_user`` 9 | User to run Buildbot slave under 10 | 11 | ``bb_slave_name`` 12 | Build slave user. 13 | It serves several purposes: 14 | 15 | * name to use while authenticating to the build master 16 | * key in `build_slaves` structure (defined in `secrets.yml`) to get the 17 | master name and the password to use 18 | 19 | ``bb_env_dir`` 20 | Virtual environment directory where buildbot-slave is installed. 21 | Relative to this path `bin/buildslave` will be looked for. 22 | 23 | ``bb_slave_dir`` 24 | Base directory of the build slave (location of `buildbot.tac` file and work 25 | directories for builds). 26 | -------------------------------------------------------------------------------- /roles/bb-slave/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: make sure build slave config exists 3 | become: yes 4 | become_user: "{{ bb_user }}" 5 | command: "{{ bb_env_dir }}/bin/buildslave create-slave {{ bb_slave_dir }} {{ build_slaves[bb_slave_name].master }} {{ bb_slave_name }} {{ build_slaves[bb_slave_name].password }}" 6 | args: 7 | creates: "{{ bb_slave_dir }}/buildbot.tac" 8 | -------------------------------------------------------------------------------- /roles/dns/files/empty.db: -------------------------------------------------------------------------------- 1 | 2 | ; $FreeBSD: head/dns/bind910/files/empty.db 340872 2014-01-24 00:14:07Z mat $ 3 | 4 | $TTL 3h 5 | @ SOA @ nobody.localhost. 42 1d 12h 1w 3h 6 | ; Serial, Refresh, Retry, Expire, Neg. cache TTL 7 | 8 | @ NS @ 9 | 10 | ; Silence a BIND warning 11 | @ A 127.0.0.1 12 | -------------------------------------------------------------------------------- /roles/dns/files/localhost-forward.db: -------------------------------------------------------------------------------- 1 | 2 | ; $FreeBSD: head/dns/bind910/files/localhost-forward.db 340872 2014-01-24 00:14:07Z mat $ 3 | 4 | $TTL 3h 5 | localhost. SOA localhost. nobody.localhost. 42 1d 12h 1w 3h 6 | ; Serial, Refresh, Retry, Expire, Neg. cache TTL 7 | 8 | NS localhost. 9 | 10 | A 127.0.0.1 11 | AAAA ::1 12 | -------------------------------------------------------------------------------- /roles/dns/files/localhost-reverse.db: -------------------------------------------------------------------------------- 1 | 2 | ; $FreeBSD: head/dns/bind910/files/localhost-reverse.db 340872 2014-01-24 00:14:07Z mat $ 3 | 4 | $TTL 3h 5 | @ SOA localhost. nobody.localhost. 42 1d 12h 1w 3h 6 | ; Serial, Refresh, Retry, Expire, Neg. cache TTL 7 | 8 | NS localhost. 9 | 10 | 1.0.0 PTR localhost. 11 | 12 | 1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0 PTR localhost. 13 | 14 | -------------------------------------------------------------------------------- /roles/dns/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # BIND related handlers 2 | --- 3 | - name: reload named 4 | service: 5 | name: named 6 | state: reloaded 7 | -------------------------------------------------------------------------------- /roles/dns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Configure Buildbot DNS service 2 | --- 3 | - name: install BIND v9.16 4 | pkgng: 5 | name: bind916 6 | state: present 7 | environment: "{{ proxy_env }}" 8 | 9 | - name: create rndc key 10 | command: "rndc-confgen -a -A hmac-sha256 -u bind -c {{ namedb_dir }}/{{ rndc_key }}" 11 | args: 12 | creates: "{{ namedb_dir }}/{{ rndc_key }}" 13 | 14 | - name: make sure necessary directories exist 15 | file: 16 | path: "{{ namedb_dir }}/{{item}}" 17 | mode: "0755" 18 | owner: bind 19 | group: bind 20 | state: directory 21 | with_items: 22 | - dynamic 23 | - master 24 | - slave 25 | - working 26 | 27 | - name: make sure necessary master zones exist 28 | copy: 29 | src: "{{item}}" 30 | dest: "{{ namedb_dir }}/master/{{ item }}" 31 | with_items: 32 | - empty.db 33 | - localhost-forward.db 34 | - localhost-reverse.db 35 | notify: reload named 36 | 37 | - name: install master zone files 38 | template: 39 | src: "{{ item }}" 40 | dest: "{{ namedb_dir }}/master/{{ item }}" 41 | with_items: 42 | - buildbot.net 43 | - 224-255.128-255.10.211.140.in-addr.arpa 44 | notify: reload named 45 | 46 | - name: install BIND configuration files 47 | template: 48 | src: "{{ item }}" 49 | dest: "{{ namedb_dir }}/{{ item }}" 50 | with_items: 51 | - rndc.conf 52 | - named.conf 53 | - named.conf.options 54 | - named.conf.zones 55 | - named.conf.rfcs 56 | notify: reload named 57 | 58 | - name: enable and start named service 59 | service: 60 | name: named 61 | enabled: true 62 | state: started 63 | -------------------------------------------------------------------------------- /roles/dns/templates/224-255.128-255.10.211.140.in-addr.arpa: -------------------------------------------------------------------------------- 1 | @ 86400 IN SOA ns1.buildbot.net. hostmaster.buildbot.net. ( 2 | {{ dns_serial }} ;serial 3 | 10800 ;refresh 4 | 1800 ;retry 5 | 604800 ;expire 6 | 86400 ) ;minimum 7 | @ 7200 IN NS c.ns.buddyns.com. ; germany 8 | @ 7200 IN NS f.ns.buddyns.com. ; india 9 | @ 7200 IN NS ns1.rtems.org. ; usa 10 | @ 7200 IN NS ns1.darkbeer.org. ; usa 11 | 12 | {% for name, ip in hosts_ips.items() %} 13 | {% if name == "www" -%} 14 | {{ip}} 86400 IN PTR buildbot.net. 15 | {% else -%} 16 | {{ip}} 86400 IN PTR {{name}}.buildbot.net. 17 | {% endif -%} 18 | {% endfor %} 19 | -------------------------------------------------------------------------------- /roles/dns/templates/buildbot.net: -------------------------------------------------------------------------------- 1 | $TTL 86400 2 | @ 86400 IN SOA ns1.buildbot.net. hostmaster.buildbot.net. ( 3 | {{ dns_serial }} ;serial 4 | 10800 ;refresh 5 | 1800 ;retry 6 | 604800 ;expire 7 | 86400 ) ;minimum 8 | 9 | 10 | @ IN NS ns1.he.net. ; usa 11 | @ IN NS ns5.he.net. ; london 12 | @ IN NS ns1.rtems.org. ; usa 13 | @ IN NS ns1.darkbeer.org. ; usa 14 | @ IN MX 10 mx.buildbot.net. 15 | @ IN A 140.211.10.233 16 | 17 | www IN CNAME buildbot.net. 18 | docs 3600 IN A {{external_network}}.233 19 | buildbot 3600 IN A 87.247.79.15 20 | 21 | {% for name, ip in hosts_ips.items() %} 22 | {{name}} IN A {{external_network}}.{{ip}} 23 | {% endfor %} 24 | 25 | status IN CNAME status.buildbot.net.s3-website-us-east-1.amazonaws.com. 26 | 27 | ; dont forget to update the serial in 3rd line when adding new records! 28 | 29 | ; Our "internal" addreses. Note that we do not use split DNS. Also note that 30 | ; there is no reverse resolution for these addresses. 31 | 32 | $ORIGIN int.buildbot.net. 33 | 34 | {% for name, ip in hosts_ips.items() %} 35 | {{name}} IN A {{internal_network}}.{{ip}} 36 | {% endfor %} 37 | -------------------------------------------------------------------------------- /roles/dns/templates/named.conf: -------------------------------------------------------------------------------- 1 | // $FreeBSD: head/dns/bind910/files/named.conf.in 369042 2014-09-23 11:22:40Z mat $ 2 | // 3 | // Refer to the named.conf(5) and named(8) man pages, and the documentation 4 | // in /usr/local/share/doc/bind for more details. 5 | // 6 | // The named.conf as it came with installation of BIND 9.10 is split so each 7 | // part could be easily read and understood: 8 | // * named.conf.options -- all options 9 | // * named.conf.rfcs -- all RFC related zones (these are more or less static) 10 | // * named.conf.zones -- all zone definitions (except for RFC related ones) 11 | 12 | options { 13 | recursion no; 14 | include "{{ namedb_dir }}/named.conf.options"; 15 | }; 16 | 17 | // If you enable a local name server, don't forget to enter 127.0.0.1 18 | // first in your /etc/resolv.conf so this server will be queried. 19 | // Also, make sure to enable it in /etc/rc.conf. 20 | 21 | include "{{ namedb_dir }}/named.conf.rfcs"; 22 | 23 | include "{{ namedb_dir }}/{{ rndc_key }}"; 24 | 25 | controls { 26 | inet {{ internal_ip }} port 953 allow { {{ internal_ip }}; } keys { "rndc-key"; }; 27 | }; 28 | 29 | include "{{ namedb_dir }}/named.conf.zones"; 30 | -------------------------------------------------------------------------------- /roles/dns/templates/named.conf.options: -------------------------------------------------------------------------------- 1 | // All file and path names are relative to the chroot directory, 2 | // if any, and should be fully qualified. 3 | directory "{{ namedb_dir }}/working"; 4 | pid-file "/var/run/named/pid"; 5 | dump-file "/var/dump/named_dump.db"; 6 | statistics-file "/var/stats/named.stats"; 7 | 8 | listen-on { 9 | any; 10 | }; 11 | 12 | // When we have IPv6 enabled on this system, uncomment this option. 13 | // listen-on-v6 { any }; 14 | 15 | // These zones are already covered by the empty zones listed below. If you 16 | // remove the related empty zones in named.conf.rfcs, comment these lines out. 17 | disable-empty-zone "255.255.255.255.IN-ADDR.ARPA"; 18 | disable-empty-zone "0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.IP6.ARPA"; 19 | disable-empty-zone "1.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.IP6.ARPA"; 20 | -------------------------------------------------------------------------------- /roles/dns/templates/named.conf.rfcs: -------------------------------------------------------------------------------- 1 | // The traditional root hints mechanism. Use this, OR the slave zones below. 2 | zone "." { type hint; file "{{ namedb_dir }}/named.root"; }; 3 | 4 | /* Slaving the following zones from the root name servers has some 5 | significant advantages: 6 | 1. Faster local resolution for your users 7 | 2. No spurious traffic will be sent from your network to the roots 8 | 3. Greater resilience to any potential root server failure/DDoS 9 | 10 | On the other hand, this method requires more monitoring than the 11 | hints file to be sure that an unexpected failure mode has not 12 | incapacitated your server. Name servers that are serving a lot 13 | of clients will benefit more from this approach than individual 14 | hosts. Use with caution. 15 | 16 | To use this mechanism, uncomment the entries below, and comment 17 | the hint zone above. 18 | 19 | As documented at http://dns.icann.org/services/axfr/ these zones: 20 | "." (the root), ARPA, IN-ADDR.ARPA, IP6.ARPA, and ROOT-SERVERS.NET 21 | are available for AXFR from these servers on IPv4 and IPv6: 22 | xfr.lax.dns.icann.org, xfr.cjr.dns.icann.org 23 | */ 24 | /* 25 | zone "." { 26 | type slave; 27 | file "/usr/local/etc/namedb/slave/root.slave"; 28 | masters { 29 | 192.5.5.241; // F.ROOT-SERVERS.NET. 30 | }; 31 | notify no; 32 | }; 33 | zone "arpa" { 34 | type slave; 35 | file "/usr/local/etc/namedb/slave/arpa.slave"; 36 | masters { 37 | 192.5.5.241; // F.ROOT-SERVERS.NET. 38 | }; 39 | notify no; 40 | }; 41 | */ 42 | 43 | /* Serving the following zones locally will prevent any queries 44 | for these zones leaving your network and going to the root 45 | name servers. This has two significant advantages: 46 | 1. Faster local resolution for your users 47 | 2. No spurious traffic will be sent from your network to the roots 48 | */ 49 | // RFCs 1912, 5735 and 6303 (and BCP 32 for localhost) 50 | zone "localhost" { type master; file "{{ namedb_dir }}/master/localhost-forward.db"; }; 51 | zone "127.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/localhost-reverse.db"; }; 52 | zone "255.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 53 | 54 | // RFC 1912-style zone for IPv6 localhost address (RFC 6303) 55 | zone "0.ip6.arpa" { type master; file "{{ namedb_dir }}/master/localhost-reverse.db"; }; 56 | 57 | // "This" Network (RFCs 1912, 5735 and 6303) 58 | zone "0.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 59 | 60 | // Private Use Networks (RFCs 1918, 5735 and 6303) 61 | zone "10.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 62 | zone "16.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 63 | zone "17.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 64 | zone "18.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 65 | zone "19.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 66 | zone "20.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 67 | zone "21.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 68 | zone "22.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 69 | zone "23.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 70 | zone "24.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 71 | zone "25.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 72 | zone "26.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 73 | zone "27.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 74 | zone "28.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 75 | zone "29.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 76 | zone "30.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 77 | zone "31.172.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 78 | zone "168.192.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 79 | 80 | // Shared Address Space (RFC 6598) 81 | zone "64.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 82 | zone "65.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 83 | zone "66.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 84 | zone "67.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 85 | zone "68.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 86 | zone "69.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 87 | zone "70.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 88 | zone "71.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 89 | zone "72.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 90 | zone "73.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 91 | zone "74.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 92 | zone "75.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 93 | zone "76.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 94 | zone "77.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 95 | zone "78.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 96 | zone "79.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 97 | zone "80.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 98 | zone "81.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 99 | zone "82.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 100 | zone "83.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 101 | zone "84.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 102 | zone "85.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 103 | zone "86.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 104 | zone "87.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 105 | zone "88.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 106 | zone "89.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 107 | zone "90.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 108 | zone "91.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 109 | zone "92.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 110 | zone "93.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 111 | zone "94.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 112 | zone "95.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 113 | zone "96.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 114 | zone "97.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 115 | zone "98.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 116 | zone "99.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 117 | zone "100.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 118 | zone "101.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 119 | zone "102.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 120 | zone "103.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 121 | zone "104.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 122 | zone "105.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 123 | zone "106.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 124 | zone "107.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 125 | zone "108.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 126 | zone "109.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 127 | zone "110.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 128 | zone "111.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 129 | zone "112.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 130 | zone "113.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 131 | zone "114.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 132 | zone "115.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 133 | zone "116.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 134 | zone "117.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 135 | zone "118.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 136 | zone "119.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 137 | zone "120.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 138 | zone "121.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 139 | zone "122.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 140 | zone "123.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 141 | zone "124.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 142 | zone "125.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 143 | zone "126.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 144 | zone "127.100.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 145 | 146 | // Link-local/APIPA (RFCs 3927, 5735 and 6303) 147 | zone "254.169.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 148 | 149 | // IETF protocol assignments (RFCs 5735 and 5736) 150 | zone "0.0.192.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 151 | 152 | // TEST-NET-[1-3] for Documentation (RFCs 5735, 5737 and 6303) 153 | zone "2.0.192.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 154 | zone "100.51.198.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 155 | zone "113.0.203.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 156 | 157 | // IPv6 Example Range for Documentation (RFCs 3849 and 6303) 158 | zone "8.b.d.0.1.0.0.2.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 159 | 160 | // Domain Names for Documentation and Testing (BCP 32) 161 | zone "test" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 162 | zone "example" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 163 | zone "invalid" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 164 | zone "example.com" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 165 | zone "example.net" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 166 | zone "example.org" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 167 | 168 | // Router Benchmark Testing (RFCs 2544 and 5735) 169 | zone "18.198.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 170 | zone "19.198.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 171 | 172 | // IANA Reserved - Old Class E Space (RFC 5735) 173 | zone "240.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 174 | zone "241.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 175 | zone "242.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 176 | zone "243.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 177 | zone "244.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 178 | zone "245.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 179 | zone "246.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 180 | zone "247.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 181 | zone "248.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 182 | zone "249.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 183 | zone "250.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 184 | zone "251.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 185 | zone "252.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 186 | zone "253.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 187 | zone "254.in-addr.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 188 | 189 | // IPv6 Unassigned Addresses (RFC 4291) 190 | zone "1.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 191 | zone "3.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 192 | zone "4.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 193 | zone "5.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 194 | zone "6.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 195 | zone "7.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 196 | zone "8.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 197 | zone "9.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 198 | zone "a.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 199 | zone "b.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 200 | zone "c.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 201 | zone "d.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 202 | zone "e.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 203 | zone "0.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 204 | zone "1.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 205 | zone "2.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 206 | zone "3.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 207 | zone "4.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 208 | zone "5.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 209 | zone "6.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 210 | zone "7.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 211 | zone "8.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 212 | zone "9.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 213 | zone "a.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 214 | zone "b.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 215 | zone "0.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 216 | zone "1.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 217 | zone "2.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 218 | zone "3.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 219 | zone "4.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 220 | zone "5.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 221 | zone "6.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 222 | zone "7.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 223 | 224 | // IPv6 ULA (RFCs 4193 and 6303) 225 | zone "c.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 226 | zone "d.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 227 | 228 | // IPv6 Link Local (RFCs 4291 and 6303) 229 | zone "8.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 230 | zone "9.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 231 | zone "a.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 232 | zone "b.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 233 | 234 | // IPv6 Deprecated Site-Local Addresses (RFCs 3879 and 6303) 235 | zone "c.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 236 | zone "d.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 237 | zone "e.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 238 | zone "f.e.f.ip6.arpa" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 239 | 240 | // IP6.INT is Deprecated (RFC 4159) 241 | zone "ip6.int" { type master; file "{{ namedb_dir }}/master/empty.db"; }; 242 | -------------------------------------------------------------------------------- /roles/dns/templates/named.conf.zones: -------------------------------------------------------------------------------- 1 | acl "buildbot.net-upstream" { 2 | 216.218.133.2; /* slave.dns.he.net */ 3 | 184.75.211.19; /* ns1.darkbeer.org */ 4 | 140.211.10.139; /* ns1.rtems.org */ 5 | }; 6 | 7 | zone "buildbot.net" { 8 | type master; 9 | file "{{ namedb_dir }}/master/buildbot.net"; 10 | allow-transfer { 11 | "buildbot.net-upstream"; 12 | }; 13 | notify explicit; 14 | also-notify { 15 | 216.218.130.2; /* ns1.he.net */ 16 | 184.75.211.19; /* ns1.darkbeer.org */ 17 | 140.211.10.139; /* ns1.rtems.org */ 18 | }; 19 | }; 20 | 21 | zone "224-255.128-255.10.211.140.in-addr.arpa" { 22 | type master; 23 | file "{{ namedb_dir }}/master/224-255.128-255.10.211.140.in-addr.arpa"; 24 | allow-transfer { 25 | "buildbot.net-upstream"; 26 | }; 27 | notify explicit; 28 | also-notify { 29 | 216.218.130.2; /* ns1.he.net */ 30 | 184.75.211.19; /* ns1.darkbeer.org */ 31 | 140.211.10.139; /* ns1.rtems.org */ 32 | }; 33 | }; 34 | -------------------------------------------------------------------------------- /roles/dns/templates/rndc.conf: -------------------------------------------------------------------------------- 1 | include "{{ namedb_dir }}/{{ rndc_key }}"; 2 | 3 | options { 4 | default-key "rndc-key"; 5 | default-server {{ internal_ip }}; 6 | default-port 953; 7 | }; 8 | -------------------------------------------------------------------------------- /roles/dns/vars/main.yml: -------------------------------------------------------------------------------- 1 | # DNS specific variables 2 | # TODO: consider putting them in global group_vars/all 3 | --- 4 | namedb_dir: "/usr/local/etc/namedb" 5 | # We do not want it to match the default one since 'rndc' produces annoying 6 | # warnings. 7 | rndc_key: "rndc-key" 8 | -------------------------------------------------------------------------------- /roles/docker-worker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - "AerisCloud.docker" 4 | -------------------------------------------------------------------------------- /roles/docker-worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Install worker-startup.sh" 3 | template: 4 | src: "worker-startup.sh" 5 | dest: "/root/{{bb_slave_name}}-worker-startup.sh" 6 | 7 | # because this just starts containers, it doesn't act like a service, so we just 8 | # start it on boot 9 | - name: "Run worker-startup on reboot" 10 | cron: 11 | name: "{{bb_slave_name}}-worker" 12 | job: "/bin/bash /root/{{bb_slave_name}}-worker-startup.sh" 13 | special_time: reboot 14 | -------------------------------------------------------------------------------- /roles/docker-worker/templates/worker-startup.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # See https://github.com/buildbot/metabbotcfg/blob/master/docker/linux-startup.sh 4 | 5 | # startup script for the `linux` worker, which is composed of two DB containers 6 | # and a (customized) worker container. All DB names, users, and paswords are 'bbtest'. 7 | # That's OK because access to the DB's is limited by docker linking. 8 | 9 | set -e 10 | 11 | stop() { 12 | docker stop $1 || true 13 | docker rm $1 || true 14 | } 15 | 16 | stop bbtest-postgres 17 | stop bbtest-mysql 18 | stop bbtest 19 | 20 | docker run -d --name bbtest-postgres \ 21 | -e POSTGRES_USER=bbtest \ 22 | -e POSTGRES_PASSWORD=bbtest \ 23 | postgres:9.5 24 | 25 | docker run -d --name bbtest-mysql \ 26 | -e MYSQL_RANDOM_ROOT_PASSWORD=1 \ 27 | -e MYSQL_DATABASE=bbtest \ 28 | -e MYSQL_USER=bbtest \ 29 | -e MYSQL_PASSWORD=bbtest \ 30 | buildbot/metamysql:5.6 --character-set-server=utf8 --collation-server=utf8_general_ci 31 | 32 | docker run -d --name bbtest \ 33 | -e BUILDMASTER={{ build_slaves[bb_slave_name].master }} \ 34 | -e BUILDMASTER_PORT=9989 \ 35 | -e WORKERNAME={{ bb_slave_name }} \ 36 | -e WORKERPASS={{ build_slaves[bb_slave_name].password }} \ 37 | --link bbtest-mysql:mysql \ 38 | --link bbtest-postgres:postgresql \ 39 | -d buildbot/metaworker:latest 40 | -------------------------------------------------------------------------------- /roles/docs/files/Dockerfile.build: -------------------------------------------------------------------------------- 1 | FROM docker.io/python:3.9-bookworm 2 | 3 | RUN apt-get update \ 4 | && apt-get install -y \ 5 | curl \ 6 | gcc \ 7 | git \ 8 | make \ 9 | libbz2-dev \ 10 | libdb-dev \ 11 | libexpat1-dev \ 12 | libffi-dev \ 13 | liblzma-dev \ 14 | libncursesw5-dev \ 15 | libreadline-dev \ 16 | libsqlite3-dev \ 17 | libssl-dev \ 18 | locales-all \ 19 | zlib1g-dev \ 20 | && rm -rf /var/lib/apt/lists/* 21 | 22 | RUN adduser --home /home/user --disabled-password --gecos "" user 23 | 24 | USER user 25 | 26 | ADD build_docs.sh /build_docs.sh 27 | -------------------------------------------------------------------------------- /roles/docs/files/Dockerfile.run: -------------------------------------------------------------------------------- 1 | FROM docker.io/nginx:1.25-bookworm 2 | 3 | ADD nginx.conf /etc/nginx/nginx.conf 4 | 5 | VOLUME /data 6 | -------------------------------------------------------------------------------- /roles/docs/files/build_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ROOTDIR=/home/user 6 | cd "$ROOTDIR/work" 7 | 8 | NEED_UPDATE=0 9 | 10 | if [ -d buildbot/.git ]; then 11 | pushd buildbot 12 | OLD_HEAD=$(git rev-parse HEAD) 13 | git fetch origin 14 | git reset --hard origin/master 15 | NEW_HEAD=$(git rev-parse HEAD) 16 | 17 | if [ "$OLD_HEAD" != "$NEW_HEAD" ]; then 18 | NEED_UPDATE=1 19 | fi 20 | 21 | popd 22 | else 23 | rm -rf buildbot 24 | git clone https://github.com/buildbot/buildbot buildbot 25 | NEED_UPDATE=1 26 | fi 27 | 28 | if [ "$NEED_UPDATE" = "0" ]; then 29 | echo "Skipped" 30 | exit 0 31 | fi 32 | 33 | pushd buildbot 34 | rm -rf ../venv 35 | python3 -m venv ../venv 36 | source ../venv/bin/activate 37 | pip install -r requirements-cidocs.txt -e master 38 | 39 | LC_ALL=en_US.UTF-8 LANG=en_US.UTF-8 make -C master/docs VERSION=latest 40 | 41 | find master/docs/_build/html -name '*.html' -exec python3 "$ROOTDIR/scripts/add-tracking.py" '{}' \; 42 | 43 | rm -rf "$ROOTDIR/results/html" 44 | rm -rf "$ROOTDIR/results/date" 45 | cp -ar master/docs/_build/html "$ROOTDIR/results/html" 46 | date --iso-8601=ns > "$ROOTDIR/results/date" 47 | 48 | echo "Done" 49 | -------------------------------------------------------------------------------- /roles/docs/files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | docs: 5 | image: nopush/nginx-bbdocs 6 | build: 7 | context: build_run 8 | dockerfile: Dockerfile.run 9 | restart: always 10 | # exposes port 80 11 | environment: 12 | - VIRTUAL_HOST=docs.buildbot.net 13 | - CERT_NAME=buildbot.net 14 | networks: 15 | - httpproxy 16 | volumes: 17 | - ./content:/data:ro 18 | 19 | networks: 20 | httpproxy: 21 | external: true 22 | -------------------------------------------------------------------------------- /roles/docs/files/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes auto; 3 | 4 | error_log /var/log/nginx/error.log notice; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | 13 | http { 14 | include /etc/nginx/mime.types; 15 | default_type application/octet-stream; 16 | 17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 18 | '$status $body_bytes_sent "$http_referer" ' 19 | '"$http_user_agent" "$http_x_forwarded_for"'; 20 | 21 | access_log /var/log/nginx/access.log main; 22 | 23 | sendfile on; 24 | 25 | keepalive_timeout 65; 26 | 27 | server { 28 | listen 80; 29 | server_name localhost; 30 | 31 | # Renamed documentation pages. 32 | rewrite ^/(latest|current)/developer/master-slave\.html /$1/developer/master-worker.html permanent; 33 | rewrite ^/(latest|current)/developer/cls-bslavemanager\.html /$1/developer/cls-workermanager.html permanent; 34 | rewrite ^/(latest|current)/developer/cls-buildslave\.html /$1/developer/cls-worker.html permanent; 35 | rewrite ^/(latest|current)/manual/cfg-buildslaves-libvirt\.html /$1/manual/cfg-workers-libvirt.html permanent; 36 | rewrite ^/(latest|current)/manual/cfg-buildslaves-docker\.html /$1/manual/cfg-workers-docker.html permanent; 37 | rewrite ^/(latest|current)/manual/cfg-buildslaves-openstack\.html /$1/manual/cfg-workers-openstack.html permanent; 38 | rewrite ^/(latest|current)/manual/cfg-buildslaves-ec2\.html /$1/manual/cfg-workers-ec2.html permanent; 39 | rewrite ^/(latest|current)/manual/cfg-buildslaves\.html /$1/manual/cfg-workers.html permanent; 40 | rewrite ^/(latest|current)/manual/installation/buildslave\.html /$1/manual/installation/worker.html permanent; 41 | 42 | location / { 43 | root /data/html; 44 | index index.html index.htm; 45 | } 46 | 47 | error_page 500 502 503 504 /50x.html; 48 | location = /50x.html { 49 | root /data/html; 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /roles/docs/files/refresh_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | NEED_UPDATE=0 8 | 9 | if [ -d bbdocs/.git ]; then 10 | pushd bbdocs 11 | OLD_HEAD=$(git rev-parse HEAD) 12 | git fetch origin 13 | git reset --hard origin/master 14 | NEW_HEAD=$(git rev-parse HEAD) 15 | 16 | if [ "$OLD_HEAD" != "$NEW_HEAD" ]; then 17 | NEED_UPDATE=1 18 | fi 19 | 20 | popd 21 | else 22 | rm -rf bbdocs 23 | git clone https://github.com/buildbot/bbdocs bbdocs 24 | NEED_UPDATE=1 25 | fi 26 | 27 | mkdir -p build_current 28 | mkdir -p build_current/scripts 29 | mkdir -p build_current/results 30 | mkdir -p build_current/work 31 | pushd build_current 32 | 33 | # container may have different user IDs inside 34 | chmod 777 scripts 35 | chmod 777 results 36 | chmod 777 work 37 | 38 | OLD_BUILDBOT_CONTENT_DATE=$(cat results/date 2> /dev/null || echo "no such file") 39 | 40 | cp ../bbdocs/add-tracking.py scripts/ 41 | sudo docker build -f Dockerfile.build -t nopush/bbdocs . 42 | sudo docker run --rm \ 43 | -v "$(pwd)/scripts:/home/user/scripts" \ 44 | -v "$(pwd)/results:/home/user/results" \ 45 | -v "$(pwd)/work:/home/user/work" \ 46 | nopush/bbdocs /build_docs.sh 47 | 48 | NEW_BUILDBOT_CONTENT_DATE=$(cat results/date 2> /dev/null || echo "no such file") 49 | 50 | if [ "$OLD_BUILDBOT_CONTENT_DATE" != "$NEW_BUILDBOT_CONTENT_DATE" ]; then 51 | rm -rf last_content 52 | cp -ar results/html last_content 53 | NEED_UPDATE=1 54 | fi 55 | 56 | popd 57 | 58 | if [ "$NEED_UPDATE" != "0" ]; then 59 | # Note that content is mounted to container thus it is not removed. 60 | rm -rf content/new_html 61 | cp -ar bbdocs/docs content/new_html 62 | cp -ar build_current/last_content content/new_html/latest 63 | 64 | # Use mv to swap data quickly 65 | if [ -d "content/html" ]; then 66 | mv content/html content/old_html 67 | fi 68 | mv content/new_html content/html 69 | rm -rf content/old_html 70 | fi 71 | -------------------------------------------------------------------------------- /roles/docs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create directories 3 | become: yes 4 | become_user: "{{ service_account }}" 5 | file: 6 | path: "{{ compose_root }}/{{ item }}" 7 | state: directory 8 | with_items: 9 | - content 10 | - build_current 11 | - build_run 12 | 13 | - name: Copy docker files 14 | become: yes 15 | become_user: "{{ service_account }}" 16 | copy: 17 | dest: "{{ compose_root }}/{{ item }}" 18 | src: "{{ role_path }}/files/{{ item }}" 19 | mode: preserve 20 | loop: 21 | - docker-compose.yml 22 | - refresh_data.sh 23 | 24 | - name: Copy docker files for current version building 25 | become: yes 26 | become_user: "{{ service_account }}" 27 | copy: 28 | dest: "{{ compose_root }}/build_current/{{ item }}" 29 | src: "{{ role_path }}/files/{{ item }}" 30 | mode: preserve 31 | loop: 32 | - Dockerfile.build 33 | - build_docs.sh 34 | 35 | - name: Copy docker files for run container building 36 | become: yes 37 | become_user: "{{ service_account }}" 38 | copy: 39 | dest: "{{ compose_root }}/build_run/{{ item }}" 40 | src: "{{ role_path }}/files/{{ item }}" 41 | mode: preserve 42 | loop: 43 | - Dockerfile.run 44 | - nginx.conf 45 | 46 | - name: Restart docs container 47 | become: yes 48 | become_user: "{{ service_account }}" 49 | ansible.builtin.command: "docker-compose up -d --build" 50 | args: 51 | chdir: "{{ compose_root }}" 52 | 53 | - name: Install data refresh crontask 54 | ansible.builtin.cron: 55 | name: refresh-docs-data 56 | job: "{{ compose_root }}/refresh_data.sh 2>&1 | systemd-cat -t crontab -p info" 57 | user: "{{ service_account }}" 58 | minute: "*/5" 59 | -------------------------------------------------------------------------------- /roles/elk/README.rst: -------------------------------------------------------------------------------- 1 | ELK 2 | === 3 | This is ansible setup for elk stack loosly based on 4 | https://blog.gufi.org/2016/02/15/elk-first-part/ 5 | -------------------------------------------------------------------------------- /roles/elk/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart elasticsearch 3 | service: 4 | name: elasticsearch 5 | state: restarted 6 | 7 | - name: restart logstash 8 | service: 9 | name: logstash 10 | state: restarted 11 | 12 | - name: restart kibana 13 | service: 14 | name: kibana 15 | state: restarted 16 | -------------------------------------------------------------------------------- /roles/elk/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - base 4 | 5 | - role: packages 6 | packages: 7 | - elasticsearch6 8 | - logstash6 9 | - kibana6 10 | - go # for oauth2-proxy 11 | 12 | - role: user 13 | user_id: "{{ worker_account }}" 14 | user_name: Buildbot Worker Account 15 | 16 | - role: supervisor-service 17 | service_name: "oauth2_proxy" 18 | service_dir: "{{ getent_passwd[worker_account].4 }}" 19 | service_command: /usr/local/bin/oauth2_proxy -config {{ oauth2_proxy_configuration }} 20 | service_user: "{{ worker_account }}" 21 | service_environment: "{{ proxy_env }}" 22 | 23 | - role: nginx 24 | nginx_template: multiproxy 25 | upstream_urls: 26 | - endpoint: /events 27 | url: "{{ internal_ip }}:{{ logstash_port }}" 28 | # elastic_search_direct_url is in the vault. Allow admins to access the data directly from internet 29 | - endpoint: /{{ elastic_search_direct_url }}/ 30 | url: "{{ internal_ip }}:{{ elastic_port }}/" 31 | - endpoint: / 32 | url: "{{ internal_ip }}:{{ oauth2_proxy_port }}" 33 | 34 | ssl: true 35 | -------------------------------------------------------------------------------- /roles/elk/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install/update oauth2_proxy 3 | shell: 'export GOROOT=/usr/local/go GOPATH=/usr/local; go get github.com/bitly/oauth2_proxy' 4 | args: 5 | executable: /usr/local/bin/bash 6 | environment: "{{ proxy_env }}" 7 | 8 | - name: configure elasticsearch 9 | template: 10 | src: elasticsearch.yml 11 | dest: "{{ elastic_configuration }}" 12 | notify: restart elasticsearch 13 | 14 | - name: configure logstash 15 | template: 16 | src: logstash_{{logstash_variant}}.conf 17 | dest: "{{ logstash_configuration }}" 18 | notify: restart logstash 19 | 20 | - name: configure kibana 21 | template: 22 | src: kibana.yml 23 | dest: "{{ kibana_configuration }}" 24 | notify: restart kibana 25 | 26 | - name: configure oauth2_proxy 27 | template: 28 | src: oauth2_proxy.conf 29 | dest: "{{ oauth2_proxy_configuration }}" 30 | mode: 0660 31 | owner: "{{ worker_account }}" 32 | group: "wheel" 33 | notify: restart supervisor 34 | 35 | - name: remove 36 | shell: 'rm -rf "{{ logstash_tmp_dir }}"' 37 | 38 | - name: enable elk 39 | lineinfile: 40 | dest: "/etc/rc.conf" 41 | line: "{{item.option}}=\"{{item.value}}\"" 42 | regexp: "^{{item.option}}=.*" 43 | state: present 44 | notify: restart logstash 45 | with_items: 46 | - option: "elasticsearch_enable" 47 | value: "YES" 48 | - option: "logstash_enable" 49 | value: "YES" 50 | - option: "logstash_log" 51 | value: "YES" 52 | - option: "logstash_log_file" 53 | value: "/var/log/logstash.log" 54 | # workaround logstash6 apparent incompatibility with FreeBSD. Note that the directory needs to be 55 | # removed before each startup. Unfortunately this does not work, but apparently logstash ignores 56 | # the JAVA_OPTS environment variable set by the service startup file and requires LS_JAVA_OPTS. 57 | # So the service startup script was modified by hand to force 58 | # LS_JAVA_OPTS=-Djava.io.tmpdir=/tmp/logstashdir 59 | - option: "logstash_java_opts" 60 | value: "-Djava.io.tmpdir={{ logstash_tmp_dir }}" 61 | - option: "kibana_enable" 62 | value: "YES" 63 | 64 | 65 | - name: start elasticsearch 66 | service: 67 | name: elasticsearch 68 | enabled: true 69 | state: started 70 | 71 | - name: start logstash 72 | service: 73 | name: logstash 74 | enabled: true 75 | state: started 76 | 77 | - name: start kibana 78 | service: 79 | name: kibana 80 | enabled: true 81 | state: started 82 | -------------------------------------------------------------------------------- /roles/elk/templates/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | # we replace default elasticsearch.yml with only the options we care about 2 | # see elasticsearch doc for configuration options 3 | 4 | network.host: {{ internal_ip }} 5 | http.port: {{ elastic_port }} 6 | path.data: /var/db/elasticsearch 7 | path.logs: /var/log/elasticsearch 8 | -------------------------------------------------------------------------------- /roles/elk/templates/kibana.yml: -------------------------------------------------------------------------------- 1 | server.port: {{ kibana_port }} 2 | server.host: "{{ internal_ip }}" 3 | elasticsearch.hosts: ["http://{{ internal_ip }}:{{ elastic_port }}"] 4 | logging.quiet: true 5 | -------------------------------------------------------------------------------- /roles/elk/templates/logstash_events.conf: -------------------------------------------------------------------------------- 1 | input { 2 | http { 3 | host => "{{ internal_ip }}" 4 | port => {{ logstash_port }} 5 | } 6 | } 7 | 8 | output { 9 | elasticsearch { 10 | hosts => ["{{ internal_ip }}:{{ elastic_port }}"] 11 | index => "logstash-%{+YYYY}" 12 | document_type => "logs" 13 | } 14 | stdout { codec => rubydebug } 15 | } 16 | -------------------------------------------------------------------------------- /roles/elk/templates/logstash_syslog.conf: -------------------------------------------------------------------------------- 1 | input { 2 | syslog { 3 | type => "syslog" 4 | host => "{{ internal_ip }}" 5 | } 6 | tcp { 7 | 8 | type => "logstash" 9 | host => "{{ internal_ip }}" 10 | port => {{ global_syslog_server_json_port }} 11 | codec => json_lines 12 | } 13 | } 14 | filter { 15 | if [type] == "syslog" { 16 | grok { 17 | match => { 18 | "message" => [ 19 | # for each message kind we have a rule 20 | # you can create rules with https://grokdebug.herokuapp.com/ 21 | '<%{NONNEGINT:syslog5424_pri}>%{SYSLOGTIMESTAMP:timestamp} sudo: %{DATA:sudoer_userid} : TTY=%{DATA:tty} ; PWD=%{DATA:pwd} ; USER=%{DATA:sudo_userid} ; COMMAND=%{GREEDYDATA:command}', 22 | '<%{NONNEGINT:syslog5424_pri}>%{SYSLOGTIMESTAMP:timestamp} /usr/sbin/cron\[%{NONNEGINT:pid}]: (%{DATA:cron_userid}) CMD (%{DATA:command})', 23 | 24 | # last rule is generic arbitrary message with arbitrary key=value in the end 25 | '<%{NONNEGINT:syslog5424_pri}>%{SYSLOGTIMESTAMP:timestamp} %{DATA:sender}: %{GREEDYDATA:syslog_msg}(?([^=]+=[^ ,]+,? +)*)' 26 | ] 27 | } 28 | } 29 | # this allows to split each key/values into one field inside the event (done in conjonction with the last grok rule) 30 | kv { 31 | source => "kvpairs" 32 | trim => "<>\[\]," 33 | 34 | remove_field => [ "kvpairs" ] # Delete the field afterwards 35 | } 36 | } 37 | 38 | mutate { 39 | add_field => { "hostname" => "%{host}" } 40 | } 41 | dns { 42 | action => "replace" 43 | reverse => [ "hostname" ] 44 | } 45 | } 46 | 47 | output { 48 | elasticsearch { hosts => ["{{ internal_ip }}:{{ elastic_port }}"] } 49 | stdout { codec => rubydebug } 50 | } 51 | -------------------------------------------------------------------------------- /roles/elk/templates/oauth2_proxy.conf: -------------------------------------------------------------------------------- 1 | # The OAuth Client ID, Secret 2 | client_id = "{{ github_oauth_keys[ansible_hostname]['clientid'] }}" 3 | client_secret = "{{ github_oauth_keys[ansible_hostname]['clientsecret'] }}" 4 | 5 | provider = "github" 6 | github_org = "buildbot" 7 | github_team = "{{ github_team }}" 8 | 9 | http_address = "http://{{ internal_ip }}:{{ oauth2_proxy_port }}" 10 | upstreams = [ 11 | "http://{{ internal_ip }}:{{ kibana_port }}/", 12 | "http://{{ internal_ip }}:{{ logstash_port }}/logstash-*/" 13 | ] 14 | email_domains = ['*'] 15 | 16 | cookie_secret = "{{ github_oauth_keys[ansible_hostname]['clientsecret'] }}" 17 | redirect_url = "https://{{ web_host_name }}/oauth2/callback" 18 | cookie_domain = "{{ web_host_name }}" 19 | -------------------------------------------------------------------------------- /roles/elk/vars/main.yml: -------------------------------------------------------------------------------- 1 | # elk networking configuration 2 | --- 3 | kibana_port: 5602 4 | elastic_port: 9100 5 | logstash_port: 8080 6 | oauth2_proxy_port: 8081 7 | elastic_configuration: /usr/local/etc/elasticsearch/elasticsearch.yml 8 | logstash_configuration: /usr/local/etc/logstash/logstash.conf 9 | kibana_configuration: /usr/local/etc/kibana/kibana.yml 10 | oauth2_proxy_configuration: /usr/local/etc/oauth2_proxy.conf 11 | logstash_tmp_dir: /tmp/logstashdir 12 | -------------------------------------------------------------------------------- /roles/ftp/files/buildbot-favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/buildbot/buildbot-infra/6fe9301f92151072e5ba546cc10feca890cf885a/roles/ftp/files/buildbot-favicon.ico -------------------------------------------------------------------------------- /roles/ftp/files/ftp-nginx.conf: -------------------------------------------------------------------------------- 1 | autoindex on; 2 | autoindex_exact_size off; 3 | -------------------------------------------------------------------------------- /roles/ftp/files/robots.txt: -------------------------------------------------------------------------------- 1 | Allow: / 2 | -------------------------------------------------------------------------------- /roles/ftp/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - base 4 | - role: nginx 5 | nginx_template: static 6 | server_name: "{{ ftp_hostname }}" 7 | server_root: "{{ ftp_root }}" 8 | extra_config_from: "roles/ftp/files/ftp-nginx.conf" 9 | ssl: True 10 | - role: vsftp 11 | server_name: "{{ ftp_hostname }}" 12 | server_root: "{{ ftp_root }}" 13 | ssl: True 14 | -------------------------------------------------------------------------------- /roles/ftp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create data directory 3 | file: 4 | path: "{{ ftp_root }}" 5 | state: directory 6 | owner: root 7 | mode: 0755 8 | 9 | # Set up some web-only files 10 | - name: add favicon 11 | copy: 12 | src: "buildbot-favicon.ico" 13 | dest: "{{ ftp_root }}/favicon.ico" 14 | 15 | - name: add robots.txt 16 | copy: 17 | src: "robots.txt" 18 | dest: "{{ ftp_root }}/robots.txt" 19 | -------------------------------------------------------------------------------- /roles/jail/README.rst: -------------------------------------------------------------------------------- 1 | Jail Role 2 | ========= 3 | 4 | The jail role requires four arguments to be used. 5 | 6 | ``name`` 7 | The name to use for the jail. This must be the non-fqdn jail's hostname. 8 | 9 | ``hostname`` 10 | The hostname of the jail. 11 | 12 | ``jail_debug`` (optional) 13 | set to true to dump the output of bootstrap script run in the jail 14 | 15 | ``internet_visible`` (optional) 16 | set to true to configure externally visible network card 17 | 18 | .. note:: 19 | 20 | JIDs are assigned automatically. To address the jail use jail's name, for example:: 21 | 22 | $ sudo jexec jailtest sh 23 | 24 | Example playbook 25 | ---------------- 26 | 27 | :: 28 | 29 | --- 30 | - name: Jail test 31 | hosts: servicehosts 32 | roles: 33 | - role: jail 34 | name: jailtest 35 | hostname: jailtest.buildbot.net 36 | internet_visible: false 37 | -------------------------------------------------------------------------------- /roles/jail/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | jail_debug: False 3 | internet_visible: False 4 | -------------------------------------------------------------------------------- /roles/jail/files/create_jail.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import sys 3 | 4 | (ezjail_default_flavour, name, ip, 5 | internal_network, external_network, internet_visible, 6 | internal_if, external_if) = sys.argv[1:] 7 | 8 | ip_addresses = [] 9 | 10 | if external_if and internet_visible == "True": 11 | ip_addresses.append(external_if + "|" + external_network + "." + ip) 12 | 13 | if internal_if: 14 | ip_addresses.append(internal_if + "|" + internal_network + "." + ip) 15 | 16 | subprocess.check_call(["ezjail-admin", "create", "-f", ezjail_default_flavour, 17 | name, ",".join(ip_addresses)]) 18 | -------------------------------------------------------------------------------- /roles/jail/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - base-jailhost 4 | allow_duplicates: yes 5 | -------------------------------------------------------------------------------- /roles/jail/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create jail script 3 | tags: jail 4 | copy: 5 | src: create_jail.py 6 | dest: "/usr/local/bin/create_jail.py" 7 | 8 | - name: create jail 9 | tags: jail 10 | command: "python3 /usr/local/bin/create_jail.py {{ ezjail_default_flavour }} {{ name }} {{ hosts_ips[name] }} {{ internal_network }} {{ external_network}} {{ internet_visible }} '{{internal_if}}' '{{ external_if }}'" 11 | args: 12 | creates: "{{ ezjail_conf_dir }}/{{ name }}" 13 | register: jail_created 14 | 15 | - name: set jail's hostname 16 | tags: jail 17 | lineinfile: 18 | dest: "{{ ezjail_conf_dir }}/{{ name }}" 19 | regexp: "^export jail_{{ name }}_hostname=" 20 | line: 'export jail_{{ name }}_hostname="{{ hostname }}"' 21 | when: jail_created is changed 22 | 23 | - name: start jail for bootstrapping 24 | tags: jail 25 | command: "ezjail-admin start {{ name }}" 26 | when: jail_created is changed 27 | 28 | # pf needs to account for that new jail 29 | - name: Restart pf 30 | tags: jail 31 | service: 32 | name: pf 33 | state: restarted 34 | when: jail_created is changed 35 | 36 | - name: Copy some host configuration files 37 | tags: jail 38 | command: "cp '/etc/{{ item }}' '{{ ezjail_jaildir }}/{{ name }}/etc/{{ item }}'" 39 | with_items: 40 | - resolv.conf 41 | 42 | - name: prepare bootstrap script 43 | tags: jail 44 | template: 45 | src: "templates/run-once" 46 | dest: "{{ ezjail_jaildir }}/{{ name }}/root/run-once" 47 | mode: "0700" 48 | when: jail_created is changed 49 | 50 | - name: execute bootstrap script {{ name }} 51 | tags: jail 52 | command: "jexec {{ name }} root/run-once" 53 | args: 54 | creates: "{{ ezjail_conf_dir }}/{{ name }}/root/.run-once" 55 | register: debug 56 | when: jail_created is changed 57 | 58 | - name: print output of bootstrap script 59 | tags: jail 60 | debug: 61 | var: debug.stdout_lines 62 | when: jail_debug 63 | 64 | - name: remove bootstrap script 65 | tags: jail 66 | file: 67 | path: "{{ ezjail_jaildir }}/{{ name }}/root/run-once" 68 | state: absent 69 | when: jail_created is changed 70 | 71 | - name: check if the jail is running 72 | tags: jail 73 | shell: "jls -j {{ name }} > /dev/null 2>&1" 74 | ignore_errors: True 75 | register: jail_running 76 | 77 | - name: start the jail 78 | tags: jail 79 | command: "ezjail-admin start {{ name }}" 80 | when: jail_running is failed 81 | -------------------------------------------------------------------------------- /roles/nginx-proxy-letsencrypt/files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | nginx-proxy: 4 | restart: always 5 | image: docker.io/nginxproxy/nginx-proxy:1.5.1 6 | container_name: nginx-proxy 7 | ports: 8 | - "80:80" 9 | - "443:443" 10 | volumes: 11 | - /var/run/docker.sock:/tmp/docker.sock:ro 12 | - ./vhost:/etc/nginx/vhost.d 13 | # The only things that ./certs contains are links to certificates in ./letsencrypt 14 | - ./certs:/etc/nginx/certs:ro 15 | - ./letsencrypt:/etc/nginx/letsencrypt:ro 16 | - ./acme_root:/usr/share/nginx/html:ro 17 | networks: 18 | - httpproxy 19 | 20 | certbot: 21 | image: docker.io/certbot/certbot:v2.6.0 22 | # Useful arguments: 23 | # --force-renew 24 | # --test-cert 25 | # --break-my-certs 26 | command: > 27 | certonly 28 | --keep-until-expiring 29 | --agree-tos 30 | --preferred-challenges=http 31 | --verbose 32 | --webroot 33 | --webroot-path /acme_webroot/ 34 | -m povilas@radix.lt 35 | -d buildbot.net 36 | -d docs.buildbot.net 37 | -d www.buildbot.net 38 | restart: "no" 39 | volumes: 40 | - "./letsencrypt:/etc/letsencrypt" 41 | - "./acme_root:/acme_webroot" 42 | environment: 43 | - TERM=xterm 44 | 45 | networks: 46 | httpproxy: 47 | name: httpproxy 48 | -------------------------------------------------------------------------------- /roles/nginx-proxy-letsencrypt/files/refresh_certs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd "$(dirname "${BASH_SOURCE[0]}")" 4 | 5 | docker-compose up certbot 6 | docker-compose exec nginx-proxy nginx -s reload 7 | -------------------------------------------------------------------------------- /roles/nginx-proxy-letsencrypt/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create volume vhost directory 3 | become: yes 4 | become_user: "{{ service_account }}" 5 | file: 6 | path: "{{ nginx_proxy_root }}/vhost" 7 | state: directory 8 | tags: bb-master 9 | 10 | - name: Create volume certs directory 11 | become: yes 12 | become_user: "{{ service_account }}" 13 | file: 14 | path: "{{ nginx_proxy_root }}/certs" 15 | state: directory 16 | tags: bb-master 17 | 18 | - name: Copy docker files 19 | become: yes 20 | become_user: "{{ service_account }}" 21 | copy: 22 | dest: "{{ nginx_proxy_root }}/{{ item }}" 23 | src: "{{ role_path }}/files/{{ item }}" 24 | mode: preserve 25 | loop: 26 | - docker-compose.yml 27 | - refresh_certs.sh 28 | tags: bb-master 29 | 30 | - name: Restart nginx-proxy container 31 | become: yes 32 | become_user: "{{ service_account }}" 33 | ansible.builtin.command: "docker-compose up -d" 34 | args: 35 | chdir: "{{ nginx_proxy_root }}" 36 | 37 | - name: install cert refresh crontask 38 | ansible.builtin.cron: 39 | name: refresh-nginx-proxy-certs 40 | job: "{{ nginx_proxy_root }}/refresh_certs.sh 2>&1 | systemd-cat -t crontab -p info" 41 | user: "{{ service_account }}" 42 | minute: "0" 43 | hour: "12" 44 | -------------------------------------------------------------------------------- /roles/nginx-proxy/files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | nginx-proxy: 4 | restart: always 5 | image: docker.io/nginxproxy/nginx-proxy:1.6.4 6 | container_name: nginx-proxy 7 | ports: 8 | - "80:80" 9 | - "443:443" 10 | volumes: 11 | - /var/run/docker.sock:/tmp/docker.sock:ro 12 | - ./vhost:/etc/nginx/vhost.d 13 | - ./certs:/etc/nginx/certs:ro 14 | networks: 15 | - httpproxy 16 | 17 | networks: 18 | httpproxy: 19 | name: httpproxy 20 | -------------------------------------------------------------------------------- /roles/nginx-proxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create volume vhost directory 3 | become: yes 4 | become_user: "{{ service_account }}" 5 | file: 6 | path: "{{ nginx_proxy_root }}/vhost" 7 | state: directory 8 | tags: bb-master 9 | 10 | - name: Create volume certs directory 11 | become: yes 12 | become_user: "{{ service_account }}" 13 | file: 14 | path: "{{ nginx_proxy_root }}/certs" 15 | state: directory 16 | tags: bb-master 17 | 18 | - name: Copy docker files 19 | become: yes 20 | become_user: "{{ service_account }}" 21 | copy: 22 | dest: "{{ nginx_proxy_root }}/{{ item }}" 23 | src: "{{ role_path }}/files/{{ item }}" 24 | mode: preserve 25 | loop: 26 | - docker-compose.yml 27 | tags: bb-master 28 | 29 | - name: Restart nginx-proxy container 30 | become: yes 31 | become_user: "{{ service_account }}" 32 | ansible.builtin.command: "docker-compose up -d" 33 | args: 34 | chdir: "{{ nginx_proxy_root }}" 35 | -------------------------------------------------------------------------------- /roles/nginx/README.rst: -------------------------------------------------------------------------------- 1 | Nginx role 2 | ========== 3 | 4 | The nginx role requires the following arguments: 5 | 6 | ``server_name`` 7 | the fqdn of the server. 8 | 9 | This will be used to name the configuration file as well as the server_name parameter for nginx server section. 10 | 11 | ``nginx_template`` 12 | The template to use to generate the configuration file. 13 | Each template has own parameters. 14 | The following templates are available (and no attempt to validate the value is made): 15 | 16 | ``static`` 17 | A template for static web site configuration. 18 | Parameters: 19 | 20 | ``server_root`` 21 | directory where the static content reside 22 | 23 | ``extra_config_from`` 24 | the name of the file to include in the config (currently only used for www site redirects) 25 | 26 | ``ssl`` 27 | if True, serve the same site via HTTPS (using the key and certificate in secrets) 28 | 29 | ``proxy`` 30 | A template for a simple reverse-proxy setup. 31 | Parameters: 32 | 33 | ``upstream_url`` 34 | : of the upstream 35 | 36 | ``uwsgi`` 37 | A template for a uwsgi site. 38 | Parameters: 39 | 40 | ``uwsgi_sock`` 41 | Either a unix domain socket (include the ``unix:`` prefix) or a : for uwsgi. 42 | 43 | ``domain-redirect`` 44 | A template for redirecting traffic for the whole domain to another one. 45 | Parameters: 46 | 47 | ``server_names`` 48 | list of server names that should be redirected to another domain 49 | 50 | ``target_url`` 51 | URL of the target server including scheme (e.g. http://buildbot.net) 52 | 53 | .. note:: 54 | 55 | For this template, ``server_names`` gives the hostnames that nginx will redirect; the common ``server_name`` parameter is only used to name the configuration file. 56 | 57 | Examples 58 | -------- 59 | 60 | Static:: 61 | 62 | - role: nginx 63 | server_name: test.buildbot.net 64 | server_root: / 65 | 66 | Proxy:: 67 | 68 | - role: nginx 69 | server_name: test.buildbot.net 70 | upstream: 192.168.1.0:8010 71 | 72 | Domain Redirect:: 73 | 74 | - role: nginx 75 | server_name: redirects 76 | server_names: 77 | - www.buildbot.net 78 | - www.buildbot.org 79 | - buildbot.org 80 | target_url: http://buildbot.net 81 | -------------------------------------------------------------------------------- /roles/nginx/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssl: False 3 | -------------------------------------------------------------------------------- /roles/nginx/handlers/main.yml: -------------------------------------------------------------------------------- 1 | # Nginx related handlers 2 | --- 3 | - name: reload nginx 4 | service: 5 | name: nginx 6 | state: reloaded 7 | -------------------------------------------------------------------------------- /roles/nginx/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install nginx package 3 | pkgng: 4 | name: nginx 5 | state: present 6 | environment: "{{ proxy_env }}" 7 | 8 | # this seems to be needed, but not formally required, for nginx 9 | - name: install pcre package 10 | pkgng: 11 | name: pcre 12 | state: present 13 | environment: "{{ proxy_env }}" 14 | 15 | - name: Download acme.sh 16 | git: repo=https://github.com/Neilpang/acme.sh dest=/usr/local/src/acme.sh 17 | when: ssl 18 | 19 | 20 | - name: Install acme.sh 21 | command: /usr/local/src/acme.sh/acme.sh --install 22 | args: 23 | creates: /root/.acme.sh/acme.sh 24 | chdir: /usr/local/src/acme.sh 25 | when: ssl 26 | 27 | 28 | - name: make sure necessary configuration directories exist 29 | file: 30 | path: "{{ nginx_conf_dir }}/{{item}}" 31 | mode: "0755" 32 | state: directory 33 | with_items: 34 | - conf.d 35 | - sites 36 | 37 | - name: make sure log directory exists 38 | file: 39 | path: "{{ item }}" 40 | mode: "0755" 41 | state: directory 42 | with_items: 43 | - "{{ nginx_log_dir }}" 44 | - "{{ nginx_log_dir }}/{{ server_name }}" 45 | 46 | - name: make sure spool exists 47 | file: 48 | path: "{{ nginx_spool_dir }}" 49 | mode: "0755" 50 | state: directory 51 | owner: "www" 52 | group: "www" 53 | 54 | - name: install server configuration file 55 | template: 56 | src: "{{ nginx_template }}" 57 | dest: "{{ nginx_conf_dir }}/sites/{{ server_name }}" 58 | notify: reload nginx 59 | 60 | 61 | - name: create DHE parameter file 62 | command: "/usr/bin/openssl dhparam -out {{ nginx_conf_dir }}/dhparam.pem 2048" 63 | args: 64 | creates: "{{ nginx_conf_dir }}/dhparam.pem" 65 | 66 | - name: set permissions on DHE parameter file 67 | file: 68 | path: "{{ nginx_conf_dir }}/dhparam.pem" 69 | mode: 0600 70 | owner: root 71 | group: wheel 72 | 73 | - name: install nginx.conf 74 | template: 75 | src: "nginx.conf" 76 | dest: "{{ nginx_conf_dir }}/nginx.conf" 77 | notify: reload nginx 78 | 79 | - name: install ssl configuration file 80 | template: 81 | src: "ssl.conf" 82 | dest: "{{ nginx_conf_dir }}/conf.d/ssl.conf" 83 | notify: reload nginx 84 | 85 | - name: enable and start nginx service 86 | service: 87 | name: nginx 88 | enabled: true 89 | state: started 90 | 91 | - name: Issue the certificate 92 | command: "/root/.acme.sh/acme.sh --nginx --issue -d {{server_name}}" 93 | args: 94 | chdir: /root/.acme.sh 95 | creates: "/root/.acme.sh/{{server_name}}" 96 | when: ssl 97 | 98 | - name: Install the certificate 99 | command: | 100 | /root/.acme.sh/acme.sh --install-cert -d {{server_name}} \ 101 | --key-file {{ nginx_cert_dir }}/{{server_name}}.key \ 102 | --fullchain-file {{ nginx_cert_dir }}/{{server_name}}.crt \ 103 | --reloadcmd "service nginx reload" 104 | when: ssl 105 | -------------------------------------------------------------------------------- /roles/nginx/templates/domain-redirect: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name {{ server_names|join(' ') }}; 4 | 5 | return 301 {{ target_url }}$request_uri; 6 | } 7 | -------------------------------------------------------------------------------- /roles/nginx/templates/multiproxy: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name {{server_name}}; 4 | 5 | access_log {{nginx_log_dir}}/{{server_name}}/access.log; 6 | error_log {{nginx_log_dir}}/{{server_name}}/error.log; 7 | 8 | {% for item in upstream_urls %} 9 | location {{ item.endpoint }} { 10 | proxy_pass http://{{ item.url }}; 11 | 12 | {% if 'use_websocket' in item %} 13 | proxy_buffering off; 14 | 15 | proxy_read_timeout 600; 16 | chunked_transfer_encoding off; 17 | proxy_cache off; 18 | # --- 19 | 20 | # These three lines would be required for web socket proxying 21 | proxy_http_version 1.1; 22 | proxy_set_header Upgrade $http_upgrade; 23 | proxy_set_header Connection "upgrade"; 24 | {% endif %} 25 | } 26 | {% endfor %} 27 | 28 | #error_page 404 /404.html; 29 | 30 | # redirect server error pages to the static page /50x.html 31 | # 32 | error_page 500 502 503 504 /50x.html; 33 | location = /50x.html { 34 | root /usr/local/www/nginx-dist; 35 | } 36 | } 37 | {% if ssl %} 38 | server { 39 | listen 443 ssl http2; 40 | server_name {{server_name}}; 41 | 42 | # certs sent to the client in SERVER HELLO are concatenated in ssl_certificate 43 | ssl_certificate {{ nginx_cert_dir }}/{{ server_name }}.crt; 44 | ssl_certificate_key {{ nginx_cert_dir }}/{{ server_name }}.key; 45 | 46 | # put a one day session timeout for websockets to stay longer 47 | ssl_session_cache shared:SSL:1440m; 48 | ssl_session_timeout 1440m; 49 | 50 | include conf.d/ssl.conf; 51 | 52 | access_log {{nginx_log_dir}}/{{server_name}}/ssl-access.log; 53 | error_log {{nginx_log_dir}}/{{server_name}}/ssl-error.log; 54 | 55 | {% for item in upstream_urls %} 56 | location {{ item.endpoint }} { 57 | 58 | proxy_pass http://{{ item.url }}; 59 | 60 | {% if 'use_websocket' in item %} 61 | 62 | proxy_buffering off; 63 | 64 | proxy_read_timeout 600; 65 | chunked_transfer_encoding off; 66 | proxy_cache off; 67 | # --- 68 | 69 | # These three lines would be required for web socket proxying 70 | proxy_http_version 1.1; 71 | proxy_set_header Upgrade $http_upgrade; 72 | proxy_set_header Connection "upgrade"; 73 | {% endif %} 74 | } 75 | {% endfor %} 76 | 77 | #error_page 404 /404.html; 78 | 79 | # redirect server error pages to the static page /50x.html 80 | # 81 | error_page 500 502 503 504 /50x.html; 82 | location = /50x.html { 83 | root /usr/local/www/nginx-dist; 84 | } 85 | } 86 | {% endif %} 87 | -------------------------------------------------------------------------------- /roles/nginx/templates/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes auto; 2 | 3 | events { 4 | worker_connections 1024; 5 | } 6 | 7 | http { 8 | include mime.types; 9 | default_type application/octet-stream; 10 | sendfile on; 11 | keepalive_timeout 65; 12 | 13 | # Put default logs in the same place where other logs can be found 14 | access_log {{nginx_log_dir}}/access.log; 15 | error_log {{nginx_log_dir}}/error.log; 16 | 17 | # To avoid temporary files/directories being removed by clean-tmp periodic 18 | # task. 19 | proxy_temp_path {{ nginx_spool_dir }}/proxy_temp 1 2; 20 | 21 | include {{ nginx_conf_dir }}/conf.d/*.conf; 22 | # TODO(sa2ajj): it might be good idea to use a particular .suffix 23 | include {{ nginx_conf_dir }}/sites/*; 24 | } 25 | -------------------------------------------------------------------------------- /roles/nginx/templates/proxy: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name {{server_name}}; 4 | 5 | access_log {{nginx_log_dir}}/{{server_name}}/access.log; 6 | error_log {{nginx_log_dir}}/{{server_name}}/error.log; 7 | 8 | location / { 9 | proxy_pass http://{{upstream_url}}; 10 | 11 | # +++ NOTE(sa2ajj): these need to be reviewed (trying to fix the errors 12 | # shown in error.log) 13 | proxy_buffering off; 14 | 15 | proxy_read_timeout 600; 16 | chunked_transfer_encoding off; 17 | proxy_cache off; 18 | # --- 19 | 20 | # These three lines would be required for web socket proxying 21 | proxy_http_version 1.1; 22 | proxy_set_header Upgrade $http_upgrade; 23 | proxy_set_header Connection "upgrade"; 24 | } 25 | 26 | #error_page 404 /404.html; 27 | 28 | # redirect server error pages to the static page /50x.html 29 | # 30 | error_page 500 502 503 504 /50x.html; 31 | location = /50x.html { 32 | root /usr/local/www/nginx-dist; 33 | } 34 | } 35 | {% if ssl %} 36 | server { 37 | listen 443 ssl http2; 38 | server_name {{server_name}}; 39 | 40 | # certs sent to the client in SERVER HELLO are concatenated in ssl_certificate 41 | ssl_certificate {{ nginx_cert_dir }}/{{ server_name }}.crt; 42 | ssl_certificate_key {{ nginx_cert_dir }}/{{ server_name }}.key; 43 | ssl_session_timeout 5m; 44 | ssl_session_cache shared:SSL:5m; 45 | add_header Strict-Transport-Security "max-age=31536000;" always; 46 | 47 | include conf.d/ssl.conf; 48 | 49 | access_log {{nginx_log_dir}}/{{server_name}}/ssl-access.log; 50 | error_log {{nginx_log_dir}}/{{server_name}}/ssl-error.log; 51 | 52 | location / { 53 | proxy_pass http://{{upstream_url}}; 54 | 55 | # +++ NOTE(sa2ajj): these need to be reviewed (trying to fix the errors 56 | # shown in error.log) 57 | proxy_buffering off; 58 | 59 | proxy_read_timeout 600; 60 | chunked_transfer_encoding off; 61 | proxy_cache off; 62 | # --- 63 | 64 | # These three lines would be required for web socket proxying 65 | proxy_http_version 1.1; 66 | proxy_set_header Upgrade $http_upgrade; 67 | proxy_set_header Connection "upgrade"; 68 | } 69 | 70 | #error_page 404 /404.html; 71 | 72 | # redirect server error pages to the static page /50x.html 73 | # 74 | error_page 500 502 503 504 /50x.html; 75 | location = /50x.html { 76 | root /usr/local/www/nginx-dist; 77 | } 78 | } 79 | {% endif %} 80 | -------------------------------------------------------------------------------- /roles/nginx/templates/ssl.conf: -------------------------------------------------------------------------------- 1 | # Based on https://wiki.mozilla.org/Security/Server_Side_TLS#Nginx. 2 | 3 | # Diffie-Hellman parameter for DHE ciphersuites, recommended 2048 bits 4 | ssl_dhparam {{ nginx_conf_dir }}/dhparam.pem; 5 | 6 | # Modern configuration. 7 | ssl_protocols TLSv1.1 TLSv1.2; 8 | ssl_ciphers 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK'; 9 | ssl_prefer_server_ciphers on; 10 | 11 | # TODO: support HSTS, OCSP staplingo 12 | 13 | 14 | # EnabLE THIS If your want HSTS (recommended) 15 | # add_header Strict-Transport-Security max-age=15768000; 16 | 17 | # OCSP Stapling --- 18 | # fetch OCSP records from URL in ssl_certificate and cache them 19 | ssl_stapling off; 20 | ssl_stapling_verify off; 21 | ## verify chain of trust of OCSP response using Root CA and Intermediate certs 22 | #ssl_trusted_certificate /path/to/root_CA_cert_plus_intermediates; 23 | #resolver ; 24 | -------------------------------------------------------------------------------- /roles/nginx/templates/static: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name {{server_name}}; 4 | 5 | access_log {{nginx_log_dir}}/{{server_name}}/access.log; 6 | error_log {{nginx_log_dir}}/{{server_name}}/error.log; 7 | {% if extra_config_from is defined -%} 8 | {{ lookup('file', extra_config_from)|indent }} 9 | {%- endif %} 10 | 11 | location / { 12 | root {{server_root}}; 13 | index index.html index.htm; 14 | } 15 | 16 | #error_page 404 /404.html; 17 | 18 | # redirect server error pages to the static page /50x.html 19 | # 20 | error_page 500 502 503 504 /50x.html; 21 | location = /50x.html { 22 | root /usr/local/www/nginx-dist; 23 | } 24 | } 25 | {% if ssl %} 26 | server { 27 | listen 443 ssl; 28 | server_name {{server_name}}; 29 | 30 | 31 | # certs sent to the client in SERVER HELLO are concatenated in ssl_certificate 32 | ssl_certificate {{ nginx_cert_dir }}/{{ server_name }}.crt; 33 | ssl_certificate_key {{ nginx_cert_dir }}/{{ server_name }}.key; 34 | ssl_session_timeout 5m; 35 | ssl_session_cache shared:SSL:5m; 36 | 37 | include conf.d/ssl.conf; 38 | 39 | access_log {{nginx_log_dir}}/{{server_name}}/ssl-access.log; 40 | error_log {{nginx_log_dir}}/{{server_name}}/ssl-error.log; 41 | {% if extra_config_from is defined -%} 42 | {{ lookup('file', extra_config_from)|indent }} 43 | {%- endif %} 44 | 45 | location / { 46 | root {{server_root}}; 47 | index index.html index.htm; 48 | } 49 | 50 | #error_page 404 /404.html; 51 | 52 | # redirect server error pages to the static page /50x.html 53 | # 54 | error_page 500 502 503 504 /50x.html; 55 | location = /50x.html { 56 | root /usr/local/www/nginx-dist; 57 | } 58 | } 59 | {% endif %} 60 | -------------------------------------------------------------------------------- /roles/nginx/templates/uwsgi: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name {{server_name}}; 4 | 5 | access_log {{nginx_log_dir}}/{{server_name}}/access.log; 6 | error_log {{nginx_log_dir}}/{{server_name}}/error.log; 7 | {% if extra_config_from is defined -%} 8 | {{ lookup('file', extra_config_from)|indent }} 9 | {%- endif %} 10 | 11 | location / { 12 | uwsgi_pass {{ uwsgi_sock }}; 13 | include uwsgi_params; 14 | } 15 | 16 | location /chrome { 17 | alias /usr/www/{{ server_name }}/chrome; 18 | } 19 | location /favicon.ico { 20 | alias /usr/www/{{ server_name }}/favicon.ico; 21 | } 22 | location /robots.txt { 23 | alias /usr/www/{{ server_name }}/robots.txt; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /roles/nginx/vars/main.yml: -------------------------------------------------------------------------------- 1 | # Nginx role specific variables 2 | --- 3 | nginx_conf_dir: "/usr/local/etc/nginx" 4 | nginx_cert_dir: "/usr/local/etc/nginx/certs" 5 | nginx_log_dir: "/var/log/nginx" 6 | nginx_spool_dir: "/var/spool/nginx" 7 | -------------------------------------------------------------------------------- /roles/packages/README.rst: -------------------------------------------------------------------------------- 1 | This role makes sure that the specified packages are installed. 2 | 3 | Parameter: 4 | 5 | ``packages`` 6 | list of packages that is expected to be installed in the system 7 | -------------------------------------------------------------------------------- /roles/packages/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | allow_duplicates: yes 3 | -------------------------------------------------------------------------------- /roles/packages/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install packages 3 | pkgng: 4 | name: "{{ packages }}" 5 | state: present 6 | environment: "{{ proxy_env }}" 7 | -------------------------------------------------------------------------------- /roles/postfix/files/local-host-names: -------------------------------------------------------------------------------- 1 | buildbot.net OK 2 | -------------------------------------------------------------------------------- /roles/postfix/files/master.cf: -------------------------------------------------------------------------------- 1 | # 2 | # Postfix master process configuration file. For details on the format 3 | # of the file, see the master(5) manual page (command: "man 5 master"). 4 | # 5 | # Do not forget to execute "postfix reload" after editing this file. 6 | # 7 | # ========================================================================== 8 | # service type private unpriv chroot wakeup maxproc command + args 9 | # (yes) (yes) (yes) (never) (100) 10 | # ========================================================================== 11 | smtp inet n - n - - smtpd 12 | #submission inet n - n - - smtpd 13 | # -o smtpd_tls_security_level=encrypt 14 | # -o smtpd_sasl_auth_enable=yes 15 | # -o smtpd_client_restrictions=permit_sasl_authenticated,reject 16 | # -o milter_macro_daemon_name=ORIGINATING 17 | smtps inet n - n - - smtpd 18 | -o smtpd_tls_wrappermode=yes 19 | -o smtpd_sasl_auth_enable=yes 20 | -o smtpd_client_restrictions=permit_sasl_authenticated,reject 21 | -o milter_macro_daemon_name=ORIGINATING 22 | #628 inet n - n - - qmqpd 23 | pickup fifo n - n 60 1 pickup 24 | cleanup unix n - n - 0 cleanup 25 | qmgr fifo n - n 300 1 qmgr 26 | #qmgr fifo n - n 300 1 oqmgr 27 | tlsmgr unix - - n 1000? 1 tlsmgr 28 | rewrite unix - - n - - trivial-rewrite 29 | bounce unix - - n - 0 bounce 30 | defer unix - - n - 0 bounce 31 | trace unix - - n - 0 bounce 32 | verify unix - - n - 1 verify 33 | flush unix n - n 1000? 0 flush 34 | proxymap unix - - n - - proxymap 35 | proxywrite unix - - n - 1 proxymap 36 | smtp unix - - n - - smtp 37 | # When relaying mail as backup MX, disable fallback_relay to avoid MX loops 38 | relay unix - - n - - smtp 39 | -o smtp_fallback_relay= 40 | # -o smtp_helo_timeout=5 -o smtp_connect_timeout=5 41 | showq unix n - n - - showq 42 | error unix - - n - - error 43 | retry unix - - n - - error 44 | discard unix - - n - - discard 45 | local unix - n n - - local 46 | virtual unix - n n - - virtual 47 | lmtp unix - - n - - lmtp 48 | anvil unix - - n - 1 anvil 49 | scache unix - - n - 1 scache 50 | # 51 | # ==================================================================== 52 | # Interfaces to non-Postfix software. Be sure to examine the manual 53 | # pages of the non-Postfix software to find out what options it wants. 54 | # 55 | # Many of the following services use the Postfix pipe(8) delivery 56 | # agent. See the pipe(8) man page for information about ${recipient} 57 | # and other message envelope options. 58 | # ==================================================================== 59 | # 60 | # maildrop. See the Postfix MAILDROP_README file for details. 61 | # Also specify in main.cf: maildrop_destination_recipient_limit=1 62 | # 63 | #maildrop unix - n n - - pipe 64 | # flags=DRhu user=vmail argv=/usr/local/bin/maildrop -d ${recipient} 65 | # 66 | # ==================================================================== 67 | # 68 | # The Cyrus deliver program has changed incompatibly, multiple times. 69 | # 70 | #old-cyrus unix - n n - - pipe 71 | # flags=R user=cyrus argv=/cyrus/bin/deliver -e -m ${extension} ${user} 72 | # 73 | # ==================================================================== 74 | # 75 | # Cyrus 2.1.5 (Amos Gouaux) 76 | # Also specify in main.cf: cyrus_destination_recipient_limit=1 77 | # 78 | #cyrus unix - n n - - pipe 79 | # user=cyrus argv=/cyrus/bin/deliver -e -r ${sender} -m ${extension} ${user} 80 | # 81 | # ==================================================================== 82 | # 83 | # See the Postfix UUCP_README file for configuration details. 84 | # 85 | #uucp unix - n n - - pipe 86 | # flags=Fqhu user=uucp argv=uux -r -n -z -a$sender - $nexthop!rmail ($recipient) 87 | # 88 | # ==================================================================== 89 | # 90 | # Other external delivery methods. 91 | # 92 | #ifmail unix - n n - - pipe 93 | # flags=F user=ftn argv=/usr/lib/ifmail/ifmail -r $nexthop ($recipient) 94 | # 95 | #bsmtp unix - n n - - pipe 96 | # flags=Fq. user=bsmtp argv=/usr/local/sbin/bsmtp -f $sender $nexthop $recipient 97 | # 98 | #scalemail-backend unix - n n - 2 pipe 99 | # flags=R user=scalemail argv=/usr/lib/scalemail/bin/scalemail-store 100 | # ${nexthop} ${user} ${extension} 101 | # 102 | #mailman unix - n n - - pipe 103 | # flags=FR user=list argv=/usr/lib/mailman/bin/postfix-to-mailman.py 104 | # ${nexthop} ${user} 105 | 106 | #dovecot unix - n n - - pipe 107 | # flags=DRhu user=vmail:vmail argv=/usr/local/libexec/dovecot/deliver -f ${sender} -d ${recipient} 108 | 109 | #dovecot unix - n n - - pipe 110 | # flags=DRhu user=vmail:vmail argv=/usr/local/libexec/dovecot/deliver -f ${sender} -d ${user}@${nexthop} -n -m ${extension} 111 | 112 | #procmail unix - n n - - pipe 113 | # flags=DRX user=nobody argv=/usr/local/bin/procmail -t -p -f ${sender} -d ${user} 114 | # ${sender} ${recipient} 115 | # flags=DRX user=nobody argv=/usr/local/bin/procmail -p -t -f ${sender} -d ${user} ${sender} ${recipient} 116 | 117 | #smtp inet n - n - 1 postscreen 118 | #smtpd pass - - n - - smtpd 119 | #dnsblog unix - - n - 0 dnsblog 120 | #tlsproxy unix - - n - 0 tlsproxy 121 | -------------------------------------------------------------------------------- /roles/postfix/files/transport: -------------------------------------------------------------------------------- 1 | mailman-loop@buildbot.net smtp:192.168.80.241 2 | 3 | 4 | mailman@buildbot.net smtp:192.168.80.241 5 | mailman-admin@buildbot.net smtp:192.168.80.241 6 | mailman-bounces@buildbot.net smtp:192.168.80.241 7 | mailman-confirm@buildbot.net smtp:192.168.80.241 8 | mailman-join@buildbot.net smtp:192.168.80.241 9 | mailman-leave@buildbot.net smtp:192.168.80.241 10 | mailman-owner@buildbot.net smtp:192.168.80.241 11 | mailman-request@buildbot.net smtp:192.168.80.241 12 | mailman-subscribe@buildbot.net smtp:192.168.80.241 13 | mailman-unsubscribe@buildbot.net smtp:192.168.80.241 14 | 15 | botherders@buildbot.net smtp:192.168.80.241 16 | botherders-admin@buildbot.net smtp:192.168.80.241 17 | botherders-bounces@buildbot.net smtp:192.168.80.241 18 | botherders-confirm@buildbot.net smtp:192.168.80.241 19 | botherders-join@buildbot.net smtp:192.168.80.241 20 | botherders-leave@buildbot.net smtp:192.168.80.241 21 | botherders-owner@buildbot.net smtp:192.168.80.241 22 | botherders-request@buildbot.net smtp:192.168.80.241 23 | botherders-subscribe@buildbot.net smtp:192.168.80.241 24 | botherders-unsubscribe@buildbot.net smtp:192.168.80.241 25 | 26 | gsoc@buildbot.net smtp:192.168.80.241 27 | gsoc-admin@buildbot.net smtp:192.168.80.241 28 | gsoc-bounces@buildbot.net smtp:192.168.80.241 29 | gsoc-confirm@buildbot.net smtp:192.168.80.241 30 | gsoc-join@buildbot.net smtp:192.168.80.241 31 | gsoc-leave@buildbot.net smtp:192.168.80.241 32 | gsoc-owner@buildbot.net smtp:192.168.80.241 33 | gsoc-request@buildbot.net smtp:192.168.80.241 34 | gsoc-subscribe@buildbot.net smtp:192.168.80.241 35 | gsoc-unsubscribe@buildbot.net smtp:192.168.80.241 36 | 37 | bugs@buildbot.net smtp:192.168.80.241 38 | bugs-admin@buildbot.net smtp:192.168.80.241 39 | bugs-bounces@buildbot.net smtp:192.168.80.241 40 | bugs-confirm@buildbot.net smtp:192.168.80.241 41 | bugs-join@buildbot.net smtp:192.168.80.241 42 | bugs-leave@buildbot.net smtp:192.168.80.241 43 | bugs-owner@buildbot.net smtp:192.168.80.241 44 | bugs-request@buildbot.net smtp:192.168.80.241 45 | bugs-subscribe@buildbot.net smtp:192.168.80.241 46 | bugs-unsubscribe@buildbot.net smtp:192.168.80.241 47 | 48 | metabuildbot@buildbot.net smtp:192.168.80.241 49 | metabuildbot-admin@buildbot.net smtp:192.168.80.241 50 | metabuildbot-bounces@buildbot.net smtp:192.168.80.241 51 | metabuildbot-confirm@buildbot.net smtp:192.168.80.241 52 | metabuildbot-join@buildbot.net smtp:192.168.80.241 53 | metabuildbot-leave@buildbot.net smtp:192.168.80.241 54 | metabuildbot-owner@buildbot.net smtp:192.168.80.241 55 | metabuildbot-request@buildbot.net smtp:192.168.80.241 56 | metabuildbot-subscribe@buildbot.net smtp:192.168.80.241 57 | metabuildbot-unsubscribe@buildbot.net smtp:192.168.80.241 58 | 59 | administration@buildbot.net smtp:192.168.80.241 60 | administration-admin@buildbot.net smtp:192.168.80.241 61 | administration-bounces@buildbot.net smtp:192.168.80.241 62 | administration-confirm@buildbot.net smtp:192.168.80.241 63 | administration-join@buildbot.net smtp:192.168.80.241 64 | administration-leave@buildbot.net smtp:192.168.80.241 65 | administration-owner@buildbot.net smtp:192.168.80.241 66 | administration-request@buildbot.net smtp:192.168.80.241 67 | administration-subscribe@buildbot.net smtp:192.168.80.241 68 | administration-unsubscribe@buildbot.net smtp:192.168.80.241 69 | 70 | announce@buildbot.net smtp:192.168.80.241 71 | announce-admin@buildbot.net smtp:192.168.80.241 72 | announce-bounces@buildbot.net smtp:192.168.80.241 73 | announce-confirm@buildbot.net smtp:192.168.80.241 74 | announce-join@buildbot.net smtp:192.168.80.241 75 | announce-leave@buildbot.net smtp:192.168.80.241 76 | announce-owner@buildbot.net smtp:192.168.80.241 77 | announce-request@buildbot.net smtp:192.168.80.241 78 | announce-subscribe@buildbot.net smtp:192.168.80.241 79 | announce-unsubscribe@buildbot.net smtp:192.168.80.241 80 | 81 | sysadmin@buildbot.net smtp:192.168.80.241 82 | sysadmin-admin@buildbot.net smtp:192.168.80.241 83 | sysadmin-bounces@buildbot.net smtp:192.168.80.241 84 | sysadmin-confirm@buildbot.net smtp:192.168.80.241 85 | sysadmin-join@buildbot.net smtp:192.168.80.241 86 | sysadmin-leave@buildbot.net smtp:192.168.80.241 87 | sysadmin-owner@buildbot.net smtp:192.168.80.241 88 | sysadmin-request@buildbot.net smtp:192.168.80.241 89 | sysadmin-subscribe@buildbot.net smtp:192.168.80.241 90 | sysadmin-unsubscribe@buildbot.net smtp:192.168.80.241 91 | 92 | users@buildbot.net smtp:192.168.80.241 93 | users-admin@buildbot.net smtp:192.168.80.241 94 | users-bounces@buildbot.net smtp:192.168.80.241 95 | users-confirm@buildbot.net smtp:192.168.80.241 96 | users-join@buildbot.net smtp:192.168.80.241 97 | users-leave@buildbot.net smtp:192.168.80.241 98 | users-owner@buildbot.net smtp:192.168.80.241 99 | users-request@buildbot.net smtp:192.168.80.241 100 | users-subscribe@buildbot.net smtp:192.168.80.241 101 | users-unsubscribe@buildbot.net smtp:192.168.80.241 102 | 103 | devel@buildbot.net smtp:192.168.80.241 104 | devel-admin@buildbot.net smtp:192.168.80.241 105 | devel-bounces@buildbot.net smtp:192.168.80.241 106 | devel-confirm@buildbot.net smtp:192.168.80.241 107 | devel-join@buildbot.net smtp:192.168.80.241 108 | devel-leave@buildbot.net smtp:192.168.80.241 109 | devel-owner@buildbot.net smtp:192.168.80.241 110 | devel-request@buildbot.net smtp:192.168.80.241 111 | devel-subscribe@buildbot.net smtp:192.168.80.241 112 | devel-unsubscribe@buildbot.net smtp:192.168.80.241 113 | 114 | commits@buildbot.net smtp:192.168.80.241 115 | commits-admin@buildbot.net smtp:192.168.80.241 116 | commits-bounces@buildbot.net smtp:192.168.80.241 117 | commits-confirm@buildbot.net smtp:192.168.80.241 118 | commits-join@buildbot.net smtp:192.168.80.241 119 | commits-leave@buildbot.net smtp:192.168.80.241 120 | commits-owner@buildbot.net smtp:192.168.80.241 121 | commits-request@buildbot.net smtp:192.168.80.241 122 | commits-subscribe@buildbot.net smtp:192.168.80.241 123 | commits-unsubscribe@buildbot.net smtp:192.168.80.241 124 | -------------------------------------------------------------------------------- /roles/postfix/files/virtusertable: -------------------------------------------------------------------------------- 1 | postmaster admin 2 | hostmaster admin 3 | usenet admin 4 | news admin 5 | webmaster admin 6 | www admin 7 | uucp admin 8 | ftp admin 9 | mailadmin admin 10 | abuse sysadmin@buildbot.net 11 | bbinfra sysadmin@buildbot.net 12 | 13 | admin amar 14 | #amar verm@darkbeer.org 15 | #dustin dustin@v.igoro.us 16 | tracnotify verm@darkbeer.org, djmitche@gmail.com, rutsky.vladimir@gmail.com 17 | trac verm@darkbeer.org, djmitche@gmail.com, rutsky.vladimir@gmail.com 18 | 19 | bill@buildbot.net bill@baddogconsulting.com 20 | dustin@buildbot.net dustin@v.igoro.us 21 | jared@buildbot.net jared.grubb@gmail.com 22 | pierre@buildbot.net tardyp@gmail.com 23 | tom@buildbot.net tom.prince@ualberta.net 24 | amar@buildbot.net verm@darkbeer.org 25 | sa2ajj@buildbot.net me@sa2ajj.net 26 | 27 | bot@buildbot.net me+bb@sa2ajj.net 28 | trachelp@buildbot.net verm@darkbeer.org 29 | -------------------------------------------------------------------------------- /roles/postfix/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Reload Postfix 2 | service: 3 | name: postfix 4 | state: reloaded 5 | -------------------------------------------------------------------------------- /roles/postfix/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: install postfix (FreeBSD) 2 | pkgng: 3 | name: postfix 4 | state: present 5 | when: 'ansible_distribution == "FreeBSD"' 6 | environment: "{{ proxy_env }}" 7 | 8 | - name: add postfix config 9 | notify: Reload Postfix 10 | copy: 11 | src: "{{item}}" 12 | dest: "/usr/local/etc/postfix/{{item}}" 13 | with_items: 14 | - main.cf 15 | - master.cf 16 | - local-host-names 17 | - virtusertable 18 | - transport 19 | -------------------------------------------------------------------------------- /roles/ssh-client/key/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set ssh keys 3 | copy: 4 | content: "{{ ssh_private_key }}" 5 | dest: "{{ getent_passwd[worker_account].4 }}/.ssh/{{ ssh_private_key_file }}" 6 | -------------------------------------------------------------------------------- /roles/ssh-client/known_hosts/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssh_client_hosts: [] 3 | -------------------------------------------------------------------------------- /roles/ssh-client/known_hosts/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Uses the ssh_client_hosts role variable and ssh_host_keys group variable. 3 | - name: Set host keys 4 | template: 5 | src: known_hosts.j2 6 | dest: "{{ getent_passwd[worker_account].4 }}/.ssh/known_hosts" 7 | -------------------------------------------------------------------------------- /roles/ssh-client/known_hosts/templates/known_hosts.j2: -------------------------------------------------------------------------------- 1 | {% for ssh_host in ssh_client_hosts %} 2 | {% if ssh_host in ssh_host_keys %} 3 | {% for host_key in ssh_host_keys[ssh_host] %} 4 | {{ ssh_host }} {{ host_key }} 5 | {% endfor %} 6 | {% endif %} 7 | {% endfor %} 8 | -------------------------------------------------------------------------------- /roles/ssh/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # parameters that can be tweaked for running ssh in jails, for example 3 | sshd_port: 22 4 | listen_addresses: 5 | - "{{ ansible_default_ipv4.address }}" 6 | - "{{ internal_ip }}" 7 | extra_config: 8 | -------------------------------------------------------------------------------- /roles/ssh/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart sshd 3 | service: 4 | name: sshd 5 | state: restarted 6 | -------------------------------------------------------------------------------- /roles/ssh/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - base 4 | -------------------------------------------------------------------------------- /roles/ssh/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: configure sshd 3 | template: 4 | src: "sshd_config.j2" 5 | dest: "/etc/ssh/sshd_config" 6 | notify: restart sshd 7 | 8 | - name: enable sshd 9 | service: 10 | name: sshd 11 | enabled: true 12 | state: started 13 | -------------------------------------------------------------------------------- /roles/ssh/templates/sshd_config.j2: -------------------------------------------------------------------------------- 1 | Port {{ sshd_port }} 2 | {% for listen_address in listen_addresses %} 3 | ListenAddress {{ listen_address }} 4 | {% endfor %} 5 | # these are always disabled 6 | PermitRootLogin no 7 | PasswordAuthentication no 8 | ChallengeResponseAuthentication no 9 | 10 | Subsystem sftp /usr/libexec/sftp-server 11 | 12 | {% if extra_config %} 13 | {{ extra_config }} 14 | {% endif %} 15 | -------------------------------------------------------------------------------- /roles/supervisor-service/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart supervisor 3 | service: 4 | name: supervisord 5 | state: restarted 6 | -------------------------------------------------------------------------------- /roles/supervisor-service/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - supervisor 4 | -------------------------------------------------------------------------------- /roles/supervisor-service/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install supervisor services 3 | template: 4 | src: "service.conf.j2" 5 | dest: "{{ supervisor_services_dir }}/{{ service_name }}.conf" 6 | notify: restart supervisor 7 | -------------------------------------------------------------------------------- /roles/supervisor-service/templates/service.conf.j2: -------------------------------------------------------------------------------- 1 | {#- 2 | Parameters: 3 | 4 | service_name 5 | service name (to be use with supervisorctl command) 6 | service_dir 7 | directory where the service resides 8 | service_command 9 | command to pass to the wrapper to actually run the service 10 | service_user 11 | user to run the command under 12 | #} 13 | [program:{{service_name}}] 14 | command=/usr/local/bin/supervisor-wrapper {{service_command}} 15 | directory={{service_dir}} 16 | user={{service_user}} 17 | {% if service_environment is defined %} 18 | environment={% for k, v in service_environment.items() %}{{k}}="{{v}}",{% endfor %} 19 | {% endif %} 20 | 21 | # [re]start behaviour 22 | autostart=true 23 | autorestart=true 24 | stopwaitsecs={{service_stopwaitsecs | default(10)}} 25 | stopsignal={{service_stopsignal | default('TERM')}} 26 | # output handling 27 | redirect_stderr=true 28 | stdout_logfile={{supervisor_log_dir}}/{{service_name}}.log 29 | -------------------------------------------------------------------------------- /roles/supervisor/files/supervisor-wrapper: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | # Note: USER and HOME are not set by supervisor, so these are set, if they are 4 | # not provided in the `environment` parameter for the program. 5 | # See: http://supervisord.org/subprocess.html#subprocess-environment 6 | # 7 | # Override USER and HOME in case they are set. 8 | USER=`id -u -n` 9 | export USER 10 | 11 | # Override HOME as it's set to '/' by supervisor(?) 12 | HOME="$(getent passwd ${USER} | cut -d: -f6)" 13 | export HOME 14 | 15 | # Overrid PATH as its value is not really clear 16 | PATH="/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin" 17 | export PATH 18 | 19 | exec "$@" 20 | -------------------------------------------------------------------------------- /roles/supervisor/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart supervisor 3 | service: 4 | name: supervisord 5 | state: restarted 6 | -------------------------------------------------------------------------------- /roles/supervisor/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install supervisor package 3 | pkgng: 4 | name: py38-supervisor 5 | state: present 6 | environment: "{{ proxy_env }}" 7 | 8 | - name: make directories for supervisor 9 | file: 10 | path: "{{ item }}" 11 | mode: "0755" 12 | state: directory 13 | with_items: 14 | - "{{ supervisor_services_dir }}" 15 | - "{{ supervisor_log_dir }}" 16 | 17 | - name: update supervisor configuration file 18 | ini_file: 19 | dest: "/usr/local/etc/supervisord.conf" 20 | section: "{{ item.section }}" 21 | option: "{{ item.option }}" 22 | value: "{{ item.value }}" 23 | with_items: 24 | - section: include 25 | option: files 26 | value: "{{ supervisor_services_dir }}/*.conf" 27 | - section: unix_http_server 28 | option: chown 29 | value: "root:wheel" 30 | - section: unix_http_server 31 | option: chmod 32 | value: "0770" 33 | - section: supervisord 34 | option: logfile 35 | value: "{{ supervisor_log_dir }}/supervisord.log" 36 | notify: restart supervisor 37 | 38 | - name: install the wrapper script 39 | copy: 40 | src: "supervisor-wrapper" 41 | dest: "/usr/local/bin/supervisor-wrapper" 42 | mode: "0755" 43 | 44 | - name: enable and start supervisor 45 | service: 46 | name: supervisord 47 | enabled: true 48 | state: started 49 | -------------------------------------------------------------------------------- /roles/supervisor/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | supervisor_services_dir: "/usr/local/etc/supervisord.conf.d" 3 | supervisor_log_dir: "/var/log/supervisor" 4 | -------------------------------------------------------------------------------- /roles/syslog-aggregator/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload syslog-ng 3 | service: 4 | name: syslog-ng 5 | state: restarted 6 | 7 | -------------------------------------------------------------------------------- /roles/syslog-aggregator/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Configure Buildbot syslog aggregator 2 | --- 3 | - name: disable and stop the syslogd service 4 | service: 5 | name: syslogd 6 | enabled: false 7 | state: stopped 8 | 9 | - name: install syslog-ng 10 | pkgng: 11 | name: syslog-ng 12 | state: present 13 | environment: "{{ proxy_env }}" 14 | 15 | - name: configure syslog-ng 16 | template: 17 | src: "{{ item }}.j2" 18 | dest: "/usr/local/etc/{{ item }}" 19 | with_items: 20 | - syslog-ng-buildbot.conf 21 | - syslog-ng.conf 22 | notify: reload syslog-ng 23 | 24 | - name: install log cleanup crontask 25 | cron: 26 | name: log-cleanup 27 | user: root 28 | job: "find {{ log_root }} -mtime +28 -type f -delete" 29 | minute: 0 30 | hour: 0 31 | state: present 32 | 33 | - name: enable and start syslog-ng service 34 | service: 35 | name: syslog-ng 36 | enabled: true 37 | state: started 38 | -------------------------------------------------------------------------------- /roles/syslog-aggregator/templates/syslog-ng-buildbot.conf.j2: -------------------------------------------------------------------------------- 1 | # Aggregate inputs from hosts on the network, putting them in named files 2 | 3 | options { 4 | create_dirs(yes); 5 | # keep hostnames from log messages (since reverse DNS for private IPs 6 | # doesn't work) 7 | keep_hostname(yes); 8 | }; 9 | 10 | source src_net { 11 | udp(); 12 | tcp(); 13 | }; 14 | 15 | destination per_host { 16 | file("{{ log_root }}/$HOST/$YEAR-$MONTH-$DAY.log" 17 | owner(root) 18 | group(root) 19 | perm(0644) 20 | dir_perm(0755) 21 | create_dirs(yes)); 22 | }; 23 | 24 | log { 25 | source(src_net); 26 | destination(per_host); 27 | }; 28 | 29 | log { 30 | source(src_local); 31 | destination(per_host); 32 | }; 33 | -------------------------------------------------------------------------------- /roles/syslog-aggregator/templates/syslog-ng.conf.j2: -------------------------------------------------------------------------------- 1 | @version:3.7 2 | @include "scl.conf" 3 | @include "syslog-ng-buildbot.conf" 4 | 5 | # Modified from the sample: 6 | # rename 'src' to 'src_local' and omit udp() in that source 7 | # Otherwise, this behaves equivalently to the stock FreeBSD syslog.conf 8 | 9 | # 10 | # options 11 | # 12 | options { chain_hostnames(off); flush_lines(0); threaded(yes); }; 13 | 14 | # 15 | # sources 16 | # 17 | source src_local { system(); internal(); }; 18 | 19 | # 20 | # destinations 21 | # 22 | destination messages { file("/var/log/messages"); }; 23 | destination security { file("/var/log/security"); }; 24 | destination authlog { file("/var/log/auth.log"); }; 25 | destination maillog { file("/var/log/maillog"); }; 26 | destination lpd-errs { file("/var/log/lpd-errs"); }; 27 | destination xferlog { file("/var/log/xferlog"); }; 28 | destination cron { file("/var/log/cron"); }; 29 | destination debuglog { file("/var/log/debug.log"); }; 30 | destination consolelog { file("/var/log/console.log"); }; 31 | destination all { file("/var/log/all.log"); }; 32 | destination newscrit { file("/var/log/news/news.crit"); }; 33 | destination newserr { file("/var/log/news/news.err"); }; 34 | destination newsnotice { file("/var/log/news/news.notice"); }; 35 | destination slip { file("/var/log/slip.log"); }; 36 | destination ppp { file("/var/log/ppp.log"); }; 37 | destination console { file("/dev/console"); }; 38 | destination allusers { usertty("*"); }; 39 | #destination loghost { udp("loghost" port(514)); }; 40 | 41 | # 42 | # log facility filters 43 | # 44 | filter f_auth { facility(auth); }; 45 | filter f_authpriv { facility(authpriv); }; 46 | filter f_not_authpriv { not facility(authpriv); }; 47 | #filter f_console { facility(console); }; 48 | filter f_cron { facility(cron); }; 49 | filter f_daemon { facility(daemon); }; 50 | filter f_ftp { facility(ftp); }; 51 | filter f_kern { facility(kern); }; 52 | filter f_lpr { facility(lpr); }; 53 | filter f_mail { facility(mail); }; 54 | filter f_news { facility(news); }; 55 | filter f_security { facility(security); }; 56 | filter f_user { facility(user); }; 57 | filter f_uucp { facility(uucp); }; 58 | filter f_local0 { facility(local0); }; 59 | filter f_local1 { facility(local1); }; 60 | filter f_local2 { facility(local2); }; 61 | filter f_local3 { facility(local3); }; 62 | filter f_local4 { facility(local4); }; 63 | filter f_local5 { facility(local5); }; 64 | filter f_local6 { facility(local6); }; 65 | filter f_local7 { facility(local7); }; 66 | 67 | # 68 | # log level filters 69 | # 70 | filter f_emerg { level(emerg); }; 71 | filter f_alert { level(alert..emerg); }; 72 | filter f_crit { level(crit..emerg); }; 73 | filter f_err { level(err..emerg); }; 74 | filter f_warning { level(warning..emerg); }; 75 | filter f_notice { level(notice..emerg); }; 76 | filter f_info { level(info..emerg); }; 77 | filter f_debug { level(debug..emerg); }; 78 | filter f_is_debug { level(debug); }; 79 | 80 | # 81 | # program filters 82 | # 83 | filter f_ppp { program("ppp"); }; 84 | filter f_slip { program("startslip"); }; 85 | 86 | # 87 | # *.err;kern.warning;auth.notice;mail.crit /dev/console 88 | # 89 | log { source(src_local); filter(f_err); destination(console); }; 90 | log { source(src_local); filter(f_kern); filter(f_warning); destination(console); }; 91 | log { source(src_local); filter(f_auth); filter(f_notice); destination(console); }; 92 | log { source(src_local); filter(f_mail); filter(f_crit); destination(console); }; 93 | 94 | # 95 | # *.notice;authpriv.none;kern.debug;lpr.info;mail.crit;news.err /var/log/messages 96 | # 97 | log { source(src_local); filter(f_notice); filter(f_not_authpriv); destination(messages); }; 98 | log { source(src_local); filter(f_kern); filter(f_debug); destination(messages); }; 99 | log { source(src_local); filter(f_lpr); filter(f_info); destination(messages); }; 100 | log { source(src_local); filter(f_mail); filter(f_crit); destination(messages); }; 101 | log { source(src_local); filter(f_news); filter(f_err); destination(messages); }; 102 | 103 | # 104 | # security.* /var/log/security 105 | # 106 | log { source(src_local); filter(f_security); destination(security); }; 107 | 108 | # 109 | # auth.info;authpriv.info /var/log/auth.log 110 | log { source(src_local); filter(f_auth); filter(f_info); destination(authlog); }; 111 | log { source(src_local); filter(f_authpriv); filter(f_info); destination(authlog); }; 112 | 113 | # 114 | # mail.info /var/log/maillog 115 | # 116 | log { source(src_local); filter(f_mail); filter(f_info); destination(maillog); }; 117 | 118 | # 119 | # lpr.info /var/log/lpd-errs 120 | # 121 | log { source(src_local); filter(f_lpr); filter(f_info); destination(lpd-errs); }; 122 | 123 | # 124 | # ftp.info /var/log/xferlog 125 | # 126 | log { source(src_local); filter(f_ftp); filter(f_info); destination(xferlog); }; 127 | 128 | # 129 | # cron.* /var/log/cron 130 | # 131 | log { source(src_local); filter(f_cron); destination(cron); }; 132 | 133 | # 134 | # *.=debug /var/log/debug.log 135 | # 136 | log { source(src_local); filter(f_is_debug); destination(debuglog); }; 137 | 138 | # 139 | # *.emerg * 140 | # 141 | log { source(src_local); filter(f_emerg); destination(allusers); }; 142 | 143 | # 144 | # uncomment this to log all writes to /dev/console to /var/log/console.log 145 | # console.info /var/log/console.log 146 | # 147 | #log { source(src_local); filter(f_console); filter(f_info); destination(consolelog); }; 148 | 149 | # 150 | # uncomment this to enable logging of all log messages to /var/log/all.log 151 | # touch /var/log/all.log and chmod it to mode 600 before it will work 152 | # *.* /var/log/all.log 153 | # 154 | #log { source(src_local); destination(all); }; 155 | 156 | # 157 | # uncomment this to enable logging to a remote loghost named loghost 158 | # *.* @loghost 159 | # 160 | #log { source(src_local); destination(loghost); }; 161 | 162 | # 163 | # uncomment these if you're running inn 164 | # news.crit /var/log/news/news.crit 165 | # news.err /var/log/news/news.err 166 | # news.notice /var/log/news/news.notice 167 | # 168 | #log { source(src_local); filter(f_news); filter(f_crit); destination(newscrit); }; 169 | #log { source(src_local); filter(f_news); filter(f_err); destination(newserr); }; 170 | #log { source(src_local); filter(f_news); filter(f_notice); destination(newsnotice); }; 171 | 172 | # 173 | # !startslip 174 | # *.* /var/log/slip.log 175 | # 176 | log { source(src_local); filter(f_slip); destination(slip); }; 177 | 178 | # 179 | # !ppp 180 | # *.* /var/log/ppp.log 181 | # 182 | log { source(src_local); filter(f_ppp); destination(ppp); }; 183 | -------------------------------------------------------------------------------- /roles/syslog-aggregator/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | log_root: /data/log 3 | -------------------------------------------------------------------------------- /roles/user/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # set this to the text of a public key to allow that key to authenticate 3 | # as this user 4 | authorized_key: '' 5 | -------------------------------------------------------------------------------- /roles/user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create the user 3 | user: 4 | name: "{{ user_id }}" 5 | comment: "{{ user_name }}" 6 | state: present 7 | 8 | - name: add authorized keys 9 | authorized_key: 10 | user: "{{ user_id }}" 11 | key: "{{ authorized_key }}" 12 | when: authorized_key != '' 13 | 14 | # NOTE: the information will only be available until the next use of `getent` 15 | - name: get user information 16 | getent: 17 | database: passwd 18 | key: "{{ user_id }}" 19 | tags: always 20 | -------------------------------------------------------------------------------- /roles/uwsgi/files/uwsgi.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | socket = /tmp/uwsgi.sock 3 | chdir = /usr/www/trac.buildbot.net 4 | wsgi-file = /usr/www/wsgi/trac.buildbot.net.wsgi 5 | processes = 4 6 | threads = 2 7 | stats = 127.0.0.1:9191 8 | -------------------------------------------------------------------------------- /roles/uwsgi/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | - name: reload uwsgi 2 | service: 3 | name: uwsgi 4 | state: reloaded 5 | -------------------------------------------------------------------------------- /roles/uwsgi/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install uwsgi 2 | pkgng: 3 | name: uwsgi 4 | state: present 5 | 6 | - name: Make uwsgi etc directory 7 | file: 8 | path: /usr/local/etc/uwsgi 9 | state: directory 10 | 11 | - name: Configure uwsgi for trac 12 | copy: 13 | src: uwsgi.ini 14 | dest: /usr/local/etc/uwsgi/uwsgi.ini 15 | notify: reload uwsgi 16 | 17 | - name: Set uwsgi process permissions 18 | lineinfile: 19 | dest: "/etc/rc.conf" 20 | line: "{{item.option}}=\"{{item.value}}\"" 21 | regexp: "^{{item.option}}=.*" 22 | state: present 23 | with_items: 24 | # This maps to www:www. 25 | - option: uwsgi_uid 26 | value: 80 27 | - option: uwsgi_gid 28 | value: 80 29 | 30 | - name: Enable uwsgi 31 | service: 32 | name: uwsgi 33 | state: started 34 | enabled: yes 35 | -------------------------------------------------------------------------------- /roles/vsftp/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # don't configure SSL unless requested 3 | ssl: False 4 | -------------------------------------------------------------------------------- /roles/vsftp/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart vsftpd 3 | service: 4 | name: vsftpd 5 | state: restarted 6 | -------------------------------------------------------------------------------- /roles/vsftp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install vsftpd 3 | pkgng: 4 | name: vsftpd-ssl 5 | state: present 6 | notify: restart vsftpd 7 | environment: "{{ proxy_env }}" 8 | 9 | - name: create ssl dir 10 | file: 11 | path: "{{ ssl_dir }}" 12 | state: directory 13 | when: ssl 14 | 15 | - name: configure vsftpd 16 | template: 17 | src: vsftpd.conf.j2 18 | dest: "/usr/local/etc/vsftpd.conf" 19 | notify: restart vsftpd 20 | 21 | - name: start vsftpd 22 | service: 23 | name: vsftpd 24 | enabled: true 25 | state: started 26 | -------------------------------------------------------------------------------- /roles/vsftp/templates/vsftpd.conf.j2: -------------------------------------------------------------------------------- 1 | listen=YES 2 | listen_port=21 3 | background=YES 4 | 5 | anonymous_enable=YES 6 | anon_root={{ server_root }} 7 | local_enable=NO 8 | write_enable=NO 9 | anon_upload_enable=NO 10 | anon_mkdir_write_enable=NO 11 | anon_other_write_enable=NO 12 | 13 | anon_world_readable_only=YES 14 | connect_from_port_20=YES 15 | hide_ids=YES 16 | pasv_min_port=10000 17 | pasv_max_port=60000 18 | 19 | xferlog_enable=YES 20 | ls_recurse_enable=YES 21 | ascii_download_enable=YES 22 | async_abor_enable=YES 23 | 24 | idle_session_timeout=120 25 | data_connection_timeout=300 26 | accept_timeout=60 27 | connect_timeout=60 28 | 29 | local_umask=022 30 | dirmessage_enable=YES 31 | 32 | vsftpd_log_file=/var/log/vsftpd.log 33 | xferlog_file=/var/log/xferlog 34 | dual_log_enable=YES 35 | #xferlog_std_format=NO 36 | nopriv_user=nobody 37 | 38 | setproctitle_enable=YES 39 | 40 | ftpd_banner=Buildbot FTP 41 | 42 | secure_chroot_dir=/var/empty 43 | 44 | ftp_username=ftp 45 | 46 | {% if ssl %} 47 | ssl_enable=YES 48 | allow_anon_ssl=YES 49 | ssl_tlsv1=YES 50 | ssl_sslv2=NO 51 | ssl_sslv3=NO 52 | require_ssl_reuse=NO 53 | ssl_ciphers=HIGH 54 | 55 | rsa_cert_file=/usr/local/etc/nginx/certs/ftp.buildbot.net.crt 56 | rsa_private_key_file=/usr/local/etc/nginx/certs/ftp.buildbot.net.key 57 | {% else %} 58 | ssl_enable=NO 59 | {% endif %} 60 | -------------------------------------------------------------------------------- /roles/vsftp/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssl_dir: /usr/local/etc/cert 3 | ssl_cert_file: /usr/local/etc/cert/vsftp.crt 4 | ssl_key_file: /usr/local/etc/cert/vsftp.key 5 | -------------------------------------------------------------------------------- /roles/www/files/Dockerfile.build: -------------------------------------------------------------------------------- 1 | FROM docker.io/python:3.9-bookworm 2 | 3 | RUN apt-get update \ 4 | && apt-get install -y \ 5 | git \ 6 | yarnpkg \ 7 | && rm -rf /var/lib/apt/lists/* 8 | 9 | RUN adduser --home /home/user --disabled-password --gecos "" user 10 | 11 | USER user 12 | 13 | ADD build_www.sh /build_www.sh 14 | -------------------------------------------------------------------------------- /roles/www/files/Dockerfile.run: -------------------------------------------------------------------------------- 1 | FROM docker.io/nginx:1.25-bookworm 2 | 3 | ADD nginx.conf /etc/nginx/nginx.conf 4 | 5 | VOLUME /data 6 | -------------------------------------------------------------------------------- /roles/www/files/build_www.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ROOTDIR=/home/user 6 | cd "$ROOTDIR/work" 7 | 8 | NEED_UPDATE=0 9 | 10 | if [ -d buildbot-website/.git ]; then 11 | pushd buildbot-website 12 | git fetch origin 13 | git reset --hard origin/master 14 | 15 | OLD_HEAD=$(cat "$ROOTDIR/results/commit" || echo "no commit") 16 | NEW_HEAD=$(git rev-parse HEAD) 17 | 18 | if [ "$OLD_HEAD" != "$NEW_HEAD" ]; then 19 | NEED_UPDATE=1 20 | fi 21 | 22 | popd 23 | else 24 | rm -rf buildbot-website 25 | git clone https://github.com/buildbot/buildbot-website buildbot-website 26 | NEW_HEAD=$(cd buildbot-website && git rev-parse HEAD) 27 | NEED_UPDATE=1 28 | fi 29 | 30 | if [ "$NEED_UPDATE" = "0" ]; then 31 | echo "Skipped" 32 | exit 0 33 | fi 34 | 35 | pushd buildbot-website 36 | yarnpkg install --pure-lockfile 37 | yarnpkg run compile 38 | 39 | rm -rf "$ROOTDIR/results/html" 40 | rm -rf "$ROOTDIR/results/date" 41 | rm -rf "$ROOTDIR/results/commit" 42 | cp -ar dist "$ROOTDIR/results/html" 43 | date --iso-8601=ns > "$ROOTDIR/results/date" 44 | echo "$NEW_HEAD" > "$ROOTDIR/results/commit" 45 | 46 | echo "Done" 47 | -------------------------------------------------------------------------------- /roles/www/files/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | docs: 5 | image: nopush/nginx-buildbot-www 6 | build: 7 | context: build_run 8 | dockerfile: Dockerfile.run 9 | restart: always 10 | # exposes port 80 11 | environment: 12 | - VIRTUAL_HOST=buildbot.net,www.buildbot.net 13 | - CERT_NAME=buildbot.net 14 | networks: 15 | - httpproxy 16 | volumes: 17 | - ./content:/data:ro 18 | 19 | networks: 20 | httpproxy: 21 | external: true 22 | -------------------------------------------------------------------------------- /roles/www/files/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes auto; 3 | 4 | error_log /var/log/nginx/error.log notice; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | 13 | http { 14 | include /etc/nginx/mime.types; 15 | default_type application/octet-stream; 16 | 17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 18 | '$status $body_bytes_sent "$http_referer" ' 19 | '"$http_user_agent" "$http_x_forwarded_for"'; 20 | 21 | access_log /var/log/nginx/access.log main; 22 | 23 | sendfile on; 24 | 25 | keepalive_timeout 65; 26 | 27 | server { 28 | listen 80; 29 | server_name localhost; 30 | 31 | location / { 32 | root /data/html; 33 | index index.html index.htm; 34 | } 35 | 36 | error_page 500 502 503 504 /50x.html; 37 | location = /50x.html { 38 | root /data/html; 39 | } 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /roles/www/files/refresh_data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | cd "$(dirname "${BASH_SOURCE[0]}")" 6 | 7 | mkdir -p build_current 8 | pushd build_current 9 | 10 | # container may have different user IDs inside 11 | mkdir -p results 12 | mkdir -p work 13 | chmod 777 results 14 | chmod 777 work 15 | 16 | OLD_BUILDBOT_CONTENT_DATE=$(cat results/date 2> /dev/null || echo "no such file") 17 | 18 | sudo docker build -f Dockerfile.build -t nopush/nginx-buildbot-www-build . 19 | sudo docker run --rm \ 20 | -v "$(pwd)/results:/home/user/results" \ 21 | -v "$(pwd)/work:/home/user/work" \ 22 | nopush/nginx-buildbot-www-build /build_www.sh 23 | 24 | NEW_BUILDBOT_CONTENT_DATE=$(cat results/date 2> /dev/null || echo "no such file") 25 | 26 | if [ "$OLD_BUILDBOT_CONTENT_DATE" != "$NEW_BUILDBOT_CONTENT_DATE" ]; then 27 | rm -rf last_content 28 | cp -ar results/html last_content 29 | NEED_UPDATE=1 30 | fi 31 | 32 | popd 33 | 34 | if [ "$NEED_UPDATE" != "0" ]; then 35 | # Note that content is mounted to container thus it is not removed. 36 | rm -rf content/new_html 37 | cp -ar build_current/last_content content/new_html 38 | 39 | # Use mv to swap data quickly 40 | if [ -d "content/html" ]; then 41 | mv content/html content/old_html 42 | fi 43 | mv content/new_html content/html 44 | rm -rf content/old_html 45 | fi 46 | -------------------------------------------------------------------------------- /roles/www/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create directories 3 | become: yes 4 | become_user: "{{ service_account }}" 5 | file: 6 | path: "{{ compose_root }}/{{ item }}" 7 | state: directory 8 | with_items: 9 | - content 10 | - build_current 11 | - build_current/scripts 12 | - build_current/results 13 | - build_current/work 14 | - build_run 15 | 16 | - name: Copy docker files 17 | become: yes 18 | become_user: "{{ service_account }}" 19 | copy: 20 | dest: "{{ compose_root }}/{{ item }}" 21 | src: "{{ role_path }}/files/{{ item }}" 22 | mode: preserve 23 | loop: 24 | - docker-compose.yml 25 | - refresh_data.sh 26 | 27 | - name: Copy docker files for current version building 28 | become: yes 29 | become_user: "{{ service_account }}" 30 | copy: 31 | dest: "{{ compose_root }}/build_current/{{ item }}" 32 | src: "{{ role_path }}/files/{{ item }}" 33 | mode: preserve 34 | loop: 35 | - Dockerfile.build 36 | - build_www.sh 37 | 38 | - name: Copy docker files for run container building 39 | become: yes 40 | become_user: "{{ service_account }}" 41 | copy: 42 | dest: "{{ compose_root }}/build_run/{{ item }}" 43 | src: "{{ role_path }}/files/{{ item }}" 44 | mode: preserve 45 | loop: 46 | - Dockerfile.run 47 | - nginx.conf 48 | 49 | - name: Restart www container 50 | become: yes 51 | become_user: "{{ service_account }}" 52 | ansible.builtin.command: "docker-compose up -d --build" 53 | args: 54 | chdir: "{{ compose_root }}" 55 | 56 | - name: Install data refresh crontask 57 | ansible.builtin.cron: 58 | name: refresh-www-data 59 | job: "{{ compose_root }}/refresh_data.sh" 60 | user: "{{ service_account }}" 61 | minute: "*/5" 62 | -------------------------------------------------------------------------------- /templates/run-once: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | set -e 4 | {% if proxy_env is defined %} 5 | {% for k, v in proxy_env.items() %} 6 | export {{k}}={{v}} 7 | {% endfor %} 8 | {% endif %} 9 | # Some local names to prevent typos 10 | vault_password_file=~/.vault-password 11 | repo_dir=repo 12 | 13 | # Go to the home directory 14 | cd ~ 15 | 16 | # Install prerequsite packages 17 | pkg install --yes {{ pkg_ansible_version }} git sudo 18 | 19 | # Ansible only runs locally, so clone the repository there 20 | git clone --depth 1 --branch master {{ ansible_git_repository }} ${repo_dir} 21 | 22 | # Prepare the vault password file (we do not want it to show in the 'ps' 23 | # output); make sure it's only readable by root. 24 | old_umask=$(umask) 25 | umask 077 26 | cat > ${vault_password_file} <<-'EOF' 27 | {{ vault_password }} 28 | EOF 29 | umask ${old_umask} 30 | 31 | # Run local.yml. This will take care of the real setup 32 | (cd ${repo_dir}; ansible-playbook --vault-password-file=${vault_password_file} local.yml) 33 | 34 | # Undo the damage we caused 35 | rm -rf ${repo_dir} ${vault_password_file} 36 | 37 | touch /root/.run-once 38 | -------------------------------------------------------------------------------- /templates/track-config.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh 2 | 3 | set -e 4 | 5 | dir="${1}" 6 | commit_message="${2}" 7 | 8 | if ! [ -d "${dir}" ]; then 9 | # nothing to track 10 | exit 0 11 | fi 12 | cd "${dir}" 13 | 14 | if ! [ -d ".git" ]; then 15 | git init 16 | fi 17 | 18 | # Prevent "detected dubious ownership" error in case the repository is modified 19 | # by other users. 20 | git config --global --add safe.directory "${dir}" || true 21 | 22 | # unconditionally update the config as necessary 23 | git config user.email "{{ track_config['default_author_email'] }}" 24 | git config user.name "{{ track_config['default_author_name'] }}" 25 | 26 | # check for changes (porcelain is meant for scripting. Will return empty stdout if clean) 27 | if [ -z "$(git status --porcelain)" ]; then 28 | exit 0 29 | fi 30 | 31 | # use git add --all to capture deletes, too 32 | git add --all . 33 | 34 | # commit the changes 35 | git commit --author='{{ track_config['author_name'] }} <{{ track_config['author_email'] }}>' \ 36 | -m "${commit_message}" 37 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist=lint,ansible-syntax 3 | skipsdist=true 4 | basepython = python3.8 5 | 6 | [testenv:lint] 7 | deps= 8 | flake8 9 | pep8<1.6 10 | commands=flake8 . 11 | 12 | [testenv:ansible-syntax] 13 | deps= 14 | ansible 15 | commands= 16 | ansible-playbook -i localhost local.yml --syntax-check 17 | -------------------------------------------------------------------------------- /track-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # see #3162 for the background of this technique 3 | - name: track configuration locally 4 | hosts: all 5 | gather_facts: no 6 | connection: local 7 | become: yes 8 | tasks: 9 | - name: install track-config.sh 10 | template: 11 | src: "templates/track-config.sh" 12 | dest: "/root/track-config.sh" 13 | mode: 0755 14 | 15 | - name: track configuration 16 | # when run manually if this fails with weird permission error, it is because you did "su bbinfra" and not "su - bbinfra". Dont ask why :/ 17 | command: "/root/track-config.sh {{ item }} '{{ commit_message }}'" 18 | with_items: "{{ track_config['dirs'] }}" 19 | changed_when: False 20 | -------------------------------------------------------------------------------- /vault-merge.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # vault-merge 4 | # Benjamin Ragheb 5 | 6 | # This shell script handles conflicts generated by attempts to merge encrypted 7 | # Ansible Vault files. Run `git merge` as usual; when git warns of a merge 8 | # conflict, run this command to attempt a merge on the unencrypted versions of 9 | # the file. If there are conflicts, you will be given a chance to correct them 10 | # in $EDITOR. 11 | 12 | # First, we ensure we are inside the working directory of a git repo. 13 | 14 | GIT_ROOT=`git rev-parse --show-toplevel` 15 | if [ $? != 0 ]; then 16 | exit $? 17 | fi 18 | 19 | # Next, we set a default location for a vault password file, and allow the user 20 | # to override it if desired. 21 | 22 | VAULT_PASSWORD_FILE="$HOME/.vault-password" 23 | 24 | while getopts "p:" opt; do 25 | case $opt in 26 | p) 27 | VAULT_PASSWORD_FILE=$OPTARG 28 | ;; 29 | \?) 30 | # Invalid option (e.g., -p without an argument) 31 | exit 1 32 | ;; 33 | esac 34 | done 35 | shift $(($OPTIND - 1)) 36 | 37 | VAULT_OPT="--vault-password-file=$VAULT_PASSWORD_FILE" 38 | VAULT_FILE=secret.yml 39 | 40 | # If no vault has been provided, abort! 41 | 42 | if [ -z $VAULT_FILE ]; then 43 | echo "Usage: $0 [-p PASSWORD_FILE] VAULT_FILE" 44 | exit 1 45 | fi 46 | 47 | # If the password file doesn't exist, we prompt for the password and save it. 48 | 49 | if [ ! -e $VAULT_PASSWORD_FILE ]; then 50 | read -s -p "Vault Password: " VAULT_PASSWORD 51 | echo 52 | echo "Remembering password in $VAULT_PASSWORD_FILE" 53 | echo $VAULT_PASSWORD > $VAULT_PASSWORD_FILE 54 | else 55 | echo "Using password saved in $VAULT_PASSWORD_FILE" 56 | fi 57 | 58 | # Fetch the base (common ancestor) version of the encrypted vault file, save 59 | # it to a temporary location, and decrypt it. (Hat Tip to the git-merge manual 60 | # page for tipping me off to the `git show :1:path` notation.) 61 | 62 | BASE=`mktemp ${VAULT_FILE}.base.XXXX` 63 | git show :1:${VAULT_FILE} > $BASE 2> /dev/null 64 | if [ $? != 0 ]; then 65 | echo "Path '${VAULT_FILE}' does not have any conflicts." 66 | rm $BASE 67 | exit 1 68 | fi 69 | ansible-vault decrypt $VAULT_OPT $BASE || exit $? 70 | 71 | # Do the same with the current (branch we are merging INTO) version of the vault 72 | # file. 73 | 74 | CURRENT=`mktemp ${VAULT_FILE}.current.XXXX` 75 | git show :2:${VAULT_FILE} > $CURRENT 2> /dev/null 76 | ansible-vault decrypt $VAULT_OPT $CURRENT || exit $? 77 | 78 | # And finally, with the other (branch we a merging FROM) version of the vault. 79 | 80 | OTHER=`mktemp ${VAULT_FILE}.other.XXXX` 81 | git show :3:${VAULT_FILE} > $OTHER 2> /dev/null 82 | ansible-vault decrypt $VAULT_OPT $OTHER || exit $? 83 | 84 | # Now that we have all three versions decrypted, ask git to attempt the merge 85 | # again. If it fails again due to a conflict, open $EDITOR and let the user 86 | # perform a manual merge. 87 | 88 | git merge-file $CURRENT $BASE $OTHER 89 | if [ $? == 0 ]; then 90 | echo "Merge OK" 91 | else 92 | echo "Merge conflict; opening editor to resolve." 93 | $EDITOR $CURRENT 94 | fi 95 | 96 | # Now that we're done, encrypt the file and move it into the repo, and clean up 97 | # the temporary files (they contain secrets!). 98 | 99 | ansible-vault encrypt $VAULT_OPT $CURRENT 100 | cp $CURRENT $VAULT_FILE 101 | rm $BASE $CURRENT $OTHER 102 | 103 | echo "$VAULT_FILE has been updated." 104 | echo " (use \"git add $VAULT_FILE\" to mark as resolved)" 105 | echo " (or re-run this command to retry the merge)" 106 | exit 0 107 | --------------------------------------------------------------------------------