├── .gitignore ├── .rspec ├── .travis.yml ├── Gemfile ├── Gemfile.lock ├── LICENSE.md ├── README.md ├── ansible.cfg ├── ansible ├── dedicated-server.yml ├── docker-web-do-leaf.yml ├── docker-web-vpnclient.yml ├── docker-web.yml ├── freebsd-vpn.yml ├── openbsd-mail.yml ├── roles │ ├── docker │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── clientcrt.yml │ │ │ ├── freebsd.yml │ │ │ ├── main.yml │ │ │ ├── servertls.yml │ │ │ └── ubuntu.yml │ │ ├── templates │ │ │ ├── daemon.json.j2 │ │ │ ├── docker.j2 │ │ │ └── extfile.cnf.j2 │ │ └── vars │ │ │ └── main.yml │ ├── firewall │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── freebsd.yml │ │ │ ├── main.yml │ │ │ ├── openbsd.yml │ │ │ └── ubuntu.yml │ │ ├── templates │ │ │ ├── pf.conf-freebsd.j2 │ │ │ └── pf.conf-openbsd.j2 │ │ └── vars │ │ │ └── main.yml │ ├── ipv6 │ │ ├── handler │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ ├── linux-es-maxmap │ │ └── tasks │ │ │ └── main.yml │ ├── monitoring │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── freebsd.yml │ │ │ ├── main.yml │ │ │ └── ubuntu.yml │ │ └── templates │ │ │ ├── netdata.conf-freebsd.j2 │ │ │ └── netdata.conf-ubuntu.j2 │ ├── ndppd │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── ndppd.conf.j2 │ │ └── vars │ │ │ └── main.yml │ ├── openbsd-basic │ │ └── tasks │ │ │ └── main.yml │ ├── openbsd-email │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── clamav.yml │ │ │ ├── dkim.yml │ │ │ ├── dovecot.yml │ │ │ ├── letsencrypt.yml │ │ │ ├── main.yml │ │ │ ├── opensmtpd.yml │ │ │ └── spam.yml │ │ ├── templates │ │ │ ├── certbot-deploy-hook.sh │ │ │ ├── clamd.conf.j2 │ │ │ ├── clamsmtpd.conf.j2 │ │ │ ├── clamsmtpd.rc │ │ │ ├── freshclam.conf.j2 │ │ │ ├── get_certs.sh.j2 │ │ │ ├── proxy_out.conf.j2 │ │ │ ├── rc.conf.local.j2 │ │ │ ├── smtpd.conf.j2 │ │ │ ├── spamassassin-local.cf.j2 │ │ │ ├── user.procmailrc.j2 │ │ │ ├── vdomains.j2 │ │ │ └── vusers.j2 │ │ └── vars │ │ │ └── main.yml │ ├── private-net │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── freebsd.yml │ │ │ ├── main.yml │ │ │ ├── openbsd.yml │ │ │ └── ubuntu.yml │ │ └── vars │ │ │ └── main.yml │ ├── root-password │ │ ├── tasks │ │ │ ├── enable-password.yml │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ ├── sysdig │ │ └── tasks │ │ │ └── main.yml │ ├── vpn-client │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── openvpn.conf.j2 │ │ └── vars │ │ │ └── main.yml │ └── vpn │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ ├── templates │ │ └── server.conf.j2 │ │ └── vars │ │ └── main.yml └── ssh-hostkey-check.yml ├── bee2 ├── dockerfiles ├── AWStatsCGI │ ├── Dockerfile │ ├── httpd.conf │ └── index.shell ├── AWStatsGenerator │ ├── Dockerfile │ └── generate.py ├── CertBot │ ├── Dockerfile │ ├── certbot-domains.py │ └── reload-haproxy.sh ├── DBSetup │ ├── Dockerfile │ ├── dbsetup.py │ └── requirements.txt ├── DuplicityBackup │ ├── Dockerfile │ ├── backup.sh │ └── db_backup.py ├── HAProxySetup │ ├── Dockerfile │ ├── dummy.pem │ └── haproxy-config.py ├── HTTPRedirects │ ├── Dockerfile │ └── redirect-server.py ├── JobScheduler │ ├── Dockerfile │ └── scheduler.py ├── LogRotate │ ├── Dockerfile │ └── rotate ├── Mail │ ├── Dockerfile │ ├── crontab │ ├── lineinfile │ ├── spamassassin-local.cf │ ├── startup │ └── supervisor.conf ├── MatomoGenerator │ ├── Dockerfile │ └── process_logs.py ├── NginxMastodon │ ├── Dockerfile │ ├── launcher.sh │ ├── maintenance.html │ ├── nginx-maintenance.conf │ └── nginx.conf ├── NginxPleroma │ ├── Dockerfile │ └── nginx.conf ├── NginxStatic │ ├── Dockerfile │ └── nginx.conf ├── Radicale │ ├── Dockerfile │ └── config ├── SimpleID │ ├── 000-default.conf │ ├── Dockerfile │ ├── config.php │ └── ports.conf ├── SoapBox │ ├── Dockerfile │ └── startup ├── TinyProxy │ ├── Dockerfile │ └── run.sh ├── TraefikCerts │ ├── Dockerfile │ └── extractor ├── VPNProxy │ ├── Dockerfile │ └── service └── WhoAmI │ ├── Dockerfile │ └── index.php ├── examples ├── freebsd-playbook.yml ├── openvpn.conf ├── settings.yml └── ubuntu-playbook.yml ├── lib ├── digitalocean.rb ├── dockerhandler.rb ├── exoscale.rb ├── name.rb ├── passstore.rb ├── provisioner.rb ├── synchandler.rb ├── util.rb └── vultr.rb └── spec ├── docker_addhost_spec.rb ├── docker_capapp_spec.rb ├── docker_multinet_spec.rb ├── docker_spec.rb ├── docker_static_ip_addhost_spec.rb ├── docker_traefik_spec.rb ├── mocks.rb ├── provisioner_spec.rb ├── spec_helper.rb ├── sync_spec.rb ├── test-state.yml └── util_spec.rb /.gitignore: -------------------------------------------------------------------------------- 1 | bin 2 | conf/* 3 | ansible/*.retry 4 | .bundle 5 | .idea 6 | conf 7 | -------------------------------------------------------------------------------- /.rspec: -------------------------------------------------------------------------------- 1 | --color 2 | --require spec_helper 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: ruby 2 | rvm: 3 | - 2.4 4 | script: bundle exec rspec -fd 5 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | gem 'docker-api', '1.34.2' 3 | gem 'git', '1.11.0' 4 | gem 'rspec', '~> 3.0' 5 | gem 'gpgme', '2.0.16' 6 | gem 'public_suffix', '3.0.3' 7 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | diff-lcs (1.5.0) 5 | docker-api (1.34.2) 6 | excon (>= 0.47.0) 7 | multi_json 8 | excon (0.92.4) 9 | git (1.11.0) 10 | rchardet (~> 1.8) 11 | gpgme (2.0.16) 12 | mini_portile2 (~> 2.3) 13 | mini_portile2 (2.8.0) 14 | multi_json (1.15.0) 15 | public_suffix (3.0.3) 16 | rchardet (1.8.0) 17 | rspec (3.11.0) 18 | rspec-core (~> 3.11.0) 19 | rspec-expectations (~> 3.11.0) 20 | rspec-mocks (~> 3.11.0) 21 | rspec-core (3.11.0) 22 | rspec-support (~> 3.11.0) 23 | rspec-expectations (3.11.0) 24 | diff-lcs (>= 1.2.0, < 2.0) 25 | rspec-support (~> 3.11.0) 26 | rspec-mocks (3.11.1) 27 | diff-lcs (>= 1.2.0, < 2.0) 28 | rspec-support (~> 3.11.0) 29 | rspec-support (3.11.0) 30 | 31 | PLATFORMS 32 | ruby 33 | 34 | DEPENDENCIES 35 | docker-api (= 1.34.2) 36 | git (= 1.11.0) 37 | gpgme (= 2.0.16) 38 | public_suffix (= 3.0.3) 39 | rspec (~> 3.0) 40 | 41 | BUNDLED WITH 42 | 2.1.4 43 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [ssh_connection] 2 | 3 | ssh_args = -o ServerAliveInterval=10 4 | -------------------------------------------------------------------------------- /ansible/dedicated-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: yes 4 | become_user: root 5 | become_method: sudo 6 | vars_files: 7 | - ../{{ config_file }} 8 | roles: 9 | - docker 10 | -------------------------------------------------------------------------------- /ansible/docker-web-do-leaf.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | ansible_python_interpreter: /usr/bin/python3 5 | private_eth: openvpn 6 | public_eth: ens3 7 | vars_files: 8 | - ../{{ config_file }} 9 | roles: 10 | - root-password 11 | - vpn-client 12 | - ndppd 13 | - docker 14 | - firewall 15 | - sysdig 16 | - monitoring 17 | -------------------------------------------------------------------------------- /ansible/docker-web-vpnclient.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | ansible_python_interpreter: /usr/bin/python3 5 | private_eth: openvpn 6 | public_eth: ens3 7 | vars_files: 8 | - ../{{ config_file }} 9 | roles: 10 | - root-password 11 | - vpn-client 12 | - ndppd 13 | - docker 14 | - firewall 15 | - sysdig 16 | - monitoring 17 | - linux-es-maxmap 18 | -------------------------------------------------------------------------------- /ansible/docker-web.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | ansible_python_interpreter: /usr/bin/python3 5 | public_eth: ens3 6 | vars_files: 7 | - ../{{ config_file }} 8 | roles: 9 | - root-password 10 | - private-net 11 | - ndppd 12 | - docker 13 | - firewall 14 | - sysdig 15 | - monitoring 16 | -------------------------------------------------------------------------------- /ansible/freebsd-vpn.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars_files: 4 | - ../{{ config_file }} 5 | vars: 6 | - ansible_python_interpreter: /usr/bin/env python 7 | roles: 8 | - root-password 9 | - private-net 10 | - vpn 11 | - firewall 12 | - monitoring 13 | -------------------------------------------------------------------------------- /ansible/openbsd-mail.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars_files: 4 | - ../{{ config_file }} 5 | vars: 6 | - ansible_python_interpreter: /usr/bin/env python 7 | roles: 8 | - root-password 9 | - private-net 10 | - ipv6 11 | - openbsd-basic 12 | - openbsd-email 13 | - firewall 14 | -------------------------------------------------------------------------------- /ansible/roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart docker 2 | service: name=docker state=restarted 3 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/clientcrt.yml: -------------------------------------------------------------------------------- 1 | - name: Create Docker Client Certificate Signing Request 2 | command: openssl req -subj '/CN=client' -new -key {{ client_key }} -out {{ client_csr }} 3 | args: 4 | creates: "{{ client_csr }}" 5 | 6 | - name: Create Client Configuration 7 | shell: echo extendedKeyUsage = clientAuth > {{ client_ext_file }} 8 | args: 9 | creates: "{{ client_ext_file }}" 10 | 11 | - name: Sign Client Key 12 | command: openssl x509 -req -days {{ cert_expire }} -sha256 -in {{ client_csr }} -CA {{ docker_ca }} -CAkey {{ docker_ca_key }} -CAcreateserial -out {{ client_crt }} -extfile {{ client_ext_file }} 13 | args: 14 | creates: "{{ client_crt }}" 15 | 16 | - name: Copy Client Cert Locally 17 | fetch: 18 | src: "{{ client_crt }}" 19 | dest: "{{ local_client_crt }}" 20 | flat: yes 21 | - file: path={{ client_key }} owner=root group=root mode=0600 22 | 23 | - name: Copy Client Key Locally 24 | fetch: 25 | src: "{{ client_key }}" 26 | dest: "{{ local_client_key }}" 27 | flat: yes 28 | 29 | - name: Set Client Key Permissions 30 | file: path={{ client_key }} owner=root group=root mode=0600 31 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/freebsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - pkgng: name=docker-freebsd,ca_root_nss state=present 3 | - command: dd if=/dev/zero of=/usr/local/dockerfs bs=1024K count=4000 4 | args: 5 | creates: /usr/local/dockerfs 6 | - command: zpool create -f dockerfs /usr/local/dockerfs 7 | run_once: true 8 | - command: zfs create -o mountpoint=/usr/docker dockerfs/docker 9 | run_once: true 10 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setting Private IP Fact 3 | set_fact: private_ip="{{ servers[ansible_hostname].private_ip }}" 4 | 5 | - name: Load State for IPv6 Subnet 6 | include_vars: 7 | file: "{{ provisioner_state_file }}" 8 | name: state 9 | 10 | - name: Ubuntu Docker Tasks 11 | include_tasks: ubuntu.yml 12 | when: ansible_distribution in [ 'Debian', 'Ubuntu' ] 13 | 14 | - name: FreeBSD Docker Tasks 15 | include_tasks: freebsd.yml 16 | when: ansible_distribution == 'FreeBSD' 17 | 18 | - name: Enable/Start Docker Service 19 | service: name=docker enabled=yes state=started 20 | 21 | - name: Create Docker Client Key 22 | command: openssl genrsa -out {{ client_key }} 4096 23 | args: 24 | creates: "{{ client_key }}" 25 | - import_tasks: servertls.yml 26 | - import_tasks: clientcrt.yml 27 | 28 | # Docker needs to be setup before Firewall handlers run 29 | - meta: flush_handlers 30 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/servertls.yml: -------------------------------------------------------------------------------- 1 | - name: Create Docker CA Private Key 2 | command: openssl genrsa -out {{ docker_ca_key }} 4096 3 | args: 4 | creates: "{{ docker_ca_key }}" 5 | 6 | - name: Create Docker CA Public Key 7 | command: openssl req -new -x509 -days {{ cert_expire }} -key {{ docker_ca_key }} -sha256 -out {{ docker_ca }} -subj "/CN=docker-ca" 8 | args: 9 | creates: "{{ docker_ca }}" 10 | 11 | - name: Copy Docker CA Locally 12 | fetch: 13 | src: "{{ docker_ca }}" 14 | dest: "{{ local_ca }}" 15 | flat: yes 16 | 17 | - name: Create Docker Server Certificate 18 | command: openssl genrsa -out {{ server_key }} 4096 19 | args: 20 | creates: "{{ server_key }}" 21 | 22 | - name: Generate Docker Certificate Signing Request 23 | command: openssl req -subj "/CN={{ docker_host }}" -sha256 -new -key {{ server_key }} -out {{ server_csr }} 24 | args: 25 | creates: "{{ server_csr }}" 26 | 27 | - name: Alternative DNS/IP Addresses Configuration 28 | template: src=extfile.cnf.j2 dest={{ ext_file }} owner=root group=root mode=0600 29 | 30 | - name: Sign Docker Server Key 31 | command: openssl x509 -req -days {{ cert_expire }} -sha256 -in {{ server_csr }} -CA {{ docker_ca }} -CAkey {{ docker_ca_key }} -CAcreateserial -out {{ server_crt }} -extfile {{ ext_file }} 32 | args: 33 | creates: "{{ server_crt }}" 34 | 35 | - name: Configure Docker to use json Configuration 36 | template: src=docker.j2 dest=/etc/default/docker 37 | 38 | - name: Configure Docker Daemon json 39 | template: src=daemon.json.j2 dest={{ docker_daemon_conf }} 40 | notify: restart docker 41 | - file: path={{ docker_ca_key }} owner=root group=root mode=0600 42 | - file: path={{ server_key }} owner=root group=root mode=0600 43 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Docker 3 | action: apt name=docker.io 4 | 5 | - name: Disable Local Socket Access for Docker 6 | lineinfile: 7 | dest: /lib/systemd/system/docker.service 8 | regexp: '^ExecStart=\/usr\/bin\/dockerd.*' 9 | line: "ExecStart=/usr/bin/dockerd" 10 | register: systemd_target_update 11 | 12 | - name: Reload Systemd targets 13 | command: systemctl daemon-reload 14 | when: systemd_target_update.changed 15 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/daemon.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "tls": true, 3 | "tlsverify": true, 4 | "tlscacert": "{{ docker_ca }}", 5 | "tlscert": "{{ server_crt }}", 6 | "tlskey": "{{ server_key }}", 7 | "ipv6": true, 8 | "fixed-cidr-v6": "{{ docker_ipv6_cidr }}", 9 | "hosts": ["127.0.0.1:2376", "{{ private_ip }}:2376", "unix://"], 10 | "storage-driver": "overlay2" 11 | } 12 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/docker.j2: -------------------------------------------------------------------------------- 1 | DOCKER_OPTS="--config-file {{ docker_daemon_conf }}" 2 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/extfile.cnf.j2: -------------------------------------------------------------------------------- 1 | subjectAltName = DNS:{{ docker_host }},IP:{{ private_ip }},IP:127.0.0.1 2 | -------------------------------------------------------------------------------- /ansible/roles/docker/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_ca_key: /etc/docker/ca-key.pem 3 | docker_ca: /etc/docker/ca.pem 4 | server_key: /etc/docker/server-key.pem 5 | server_csr: /etc/docker/server.csr 6 | server_crt: /etc/docker/server.crt 7 | docker_host: "{{ servers[ansible_hostname].dns.private[0] }}" 8 | docker_daemon_conf: /etc/docker/daemon.json 9 | ext_file: /etc/docker/extfile.cnf 10 | client_key: /etc/docker/client-key.pem 11 | client_csr: /etc/docker/client.csr 12 | client_crt: /etc/docker/client.crt 13 | client_ext_file: /etc/docker/client-extfile.cnf 14 | local_client_crt: "../conf/docker/{{ ansible_hostname }}/docker-client.crt" 15 | local_client_key: "../conf/docker/{{ ansible_hostname }}/docker-client.pem" 16 | local_ca: "../conf/docker/{{ ansible_hostname }}/ca.crt" 17 | provisioner_state_file: "../{{ provisioner.state_file }}" 18 | docker_ipv6_cidr: "{{ state.servers[ansible_hostname].ipv6.subnet }}{{ servers[ansible_hostname].ipv6.docker.suffix_bridge }}" 19 | cert_expire: 6000 20 | -------------------------------------------------------------------------------- /ansible/roles/firewall/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: reload pf 2 | service: name=pf state=reloaded enabled=yes 3 | # perform this without waiting for the response because PF will drop the 4 | # SSH connection if its service is not running 5 | async: 0 6 | poll: 10 7 | ignore_errors: true 8 | - name: load openbsd-pf 9 | command: pfctl -f /etc/pf.conf 10 | -------------------------------------------------------------------------------- /ansible/roles/firewall/tasks/freebsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy Firewall Configuration 3 | template: 4 | src: pf.conf-freebsd.j2 5 | dest: /etc/pf.conf 6 | validate: 'pfctl -n -f %s' 7 | notify: reload pf 8 | - name: Enable Firewall Service 9 | service: name=pf state=started enabled=yes 10 | # perform this without waiting for the response because PF will drop the 11 | # SSH connection if its service is not running 12 | async: 0 13 | poll: 10 14 | ignore_errors: true 15 | -------------------------------------------------------------------------------- /ansible/roles/firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: ubuntu.yml 3 | when: ansible_distribution in [ 'Debian', 'Ubuntu' ] 4 | - include_tasks: freebsd.yml 5 | when: ansible_distribution == 'FreeBSD' 6 | - meta: flush_handlers 7 | - include_tasks: openbsd.yml 8 | when: ansible_distribution == 'OpenBSD' 9 | -------------------------------------------------------------------------------- /ansible/roles/firewall/tasks/openbsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy Firewall Configuration 3 | template: 4 | src: pf.conf-openbsd.j2 5 | dest: /etc/pf.conf 6 | validate: 'pfctl -n -f %s' 7 | notify: load openbsd-pf 8 | -------------------------------------------------------------------------------- /ansible/roles/firewall/tasks/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set UFW default forward policy to ACCEPT 3 | lineinfile: 4 | dest: /etc/default/ufw 5 | line: DEFAULT_FORWARD_POLICY="ACCEPT" 6 | regexp: "^DEFAULT_FORWARD_POLICY\\=" 7 | 8 | - name: Enable ufw 9 | ufw: state=enabled policy=allow 10 | 11 | - name: Allow ssh internally 12 | ufw: rule=allow port=22 direction=in proto=tcp interface={{ private_eth }} 13 | 14 | - name: Allow Docker internally 15 | ufw: rule=allow port=2376 direction=in proto=tcp interface={{ private_eth }} 16 | 17 | - name: Allow Monitoring internally 18 | ufw: rule=allow port=19999 direction=in proto=tcp interface={{ private_eth }} 19 | 20 | - name: 80 is open 21 | ufw: rule=allow port=80 proto=tcp 22 | 23 | - name: 443 is open 24 | ufw: rule=allow port=443 proto=tcp 25 | 26 | - name: 80 for Docker IPv6 HAProxy is open 27 | ufw: rule=allow proto=tcp dest={{ docker_ipv6_web }} port=80 28 | 29 | - name: 443 for Docker IPv6 HAProxy is open 30 | ufw: rule=allow proto=tcp dest={{ docker_ipv6_web }} port=443 31 | 32 | - name: Disable default in 33 | ufw: direction=incoming policy=deny 34 | async: 0 35 | poll: 10 36 | ignore_errors: true 37 | -------------------------------------------------------------------------------- /ansible/roles/firewall/templates/pf.conf-freebsd.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | set skip on lo 4 | 5 | block all 6 | 7 | pass in quick on vtnet0 inet6 proto ipv6-icmp 8 | 9 | # allow all from host itself 10 | pass out inet all keep state 11 | pass out inet6 all keep state 12 | 13 | # allow all from private 14 | pass in quick on {{ private_eth }} inet from any to any keep state 15 | 16 | # openvpn 17 | pass in quick proto udp to vtnet0 port openvpn keep state 18 | pass in quick on tun0 inet from any to any keep state 19 | 20 | # ssh 21 | pass in quick proto tcp to vtnet0 port ssh flags S/SA keep state 22 | -------------------------------------------------------------------------------- /ansible/roles/firewall/templates/pf.conf-openbsd.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | set skip on lo 4 | 5 | block all 6 | 7 | pass in quick on egress inet6 proto ipv6-icmp 8 | 9 | # allow all from host itself 10 | pass out inet all keep state 11 | pass out inet6 all keep state 12 | 13 | pass in on {{ private_eth }} from any to any 14 | pass in on egress proto tcp from any to any port http 15 | pass in on egress proto tcp from any to any port smtp 16 | pass in on egress proto tcp from any to any port smtps 17 | pass in on egress proto tcp from any to any port submission 18 | pass in on egress proto tcp from any to any port imap 19 | pass in on egress proto tcp from any to any port imaps 20 | 21 | # Port build user does not need network 22 | block return out log proto {tcp udp} user _pbuild 23 | -------------------------------------------------------------------------------- /ansible/roles/firewall/vars/main.yml: -------------------------------------------------------------------------------- 1 | docker_ipv6_web: "{{ state.servers[ansible_hostname].ipv6.subnet }}{{ servers[ansible_hostname].ipv6.docker.static_web }}" 2 | openbsd_mirror: https://mirror.vdms.io/pub/OpenBSD/ 3 | -------------------------------------------------------------------------------- /ansible/roles/ipv6/handler/main.yml: -------------------------------------------------------------------------------- 1 | - name: run netstart 2 | command: sh /etc/netstart 3 | -------------------------------------------------------------------------------- /ansible/roles/ipv6/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # - name: FreeBSD IPv6 3 | # block: 4 | # - set_fact: private_eth=vtnet1 5 | # - include_tasks: freebsd.yml 6 | # when: ansible_distribution == 'FreeBSD' 7 | - name: Load State for IPv6 8 | include_vars: 9 | file: "{{ provisioner_state_file }}" 10 | name: state 11 | 12 | - name: OpenBSD IPv6 13 | lineinfile: 14 | dest: /etc/hostname.vio0 15 | regexp: ^inet6 alias" 16 | line: "inet6 alias {{ ipv6_alias }} 64" 17 | notify: run netstart 18 | -------------------------------------------------------------------------------- /ansible/roles/ipv6/vars/main.yml: -------------------------------------------------------------------------------- 1 | provisioner_state_file: "../{{ provisioner.state_file }}" 2 | ipv6_alias: "{{ state.servers[ansible_hostname].ipv6.addr }}" 3 | -------------------------------------------------------------------------------- /ansible/roles/linux-es-maxmap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set max map count for Elastic Search 3 | sysctl: name=vm.max_map_count value=262144 state=present 4 | -------------------------------------------------------------------------------- /ansible/roles/monitoring/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart netdata 2 | service: name=netdata state=restarted 3 | -------------------------------------------------------------------------------- /ansible/roles/monitoring/tasks/freebsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install netdata (FreeBSD) 3 | pkgng: name=netdata state=present 4 | 5 | - name: Configure netdata for Private Adapter (Ubuntu) 6 | template: src=netdata.conf-freebsd.j2 dest=/usr/local/etc/netdata/netdata.conf 7 | notify: restart netdata 8 | -------------------------------------------------------------------------------- /ansible/roles/monitoring/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ubuntu Monitoring 3 | block: 4 | - include_tasks: ubuntu.yml 5 | when: ansible_distribution in [ 'Debian', 'Ubuntu' ] 6 | 7 | - name: FreeBSD Monitoring 8 | block: 9 | - include_tasks: freebsd.yml 10 | when: ansible_distribution == 'FreeBSD' 11 | 12 | - name: Start and Enable netdata Service 13 | service: name=netdata enabled=yes state=started 14 | -------------------------------------------------------------------------------- /ansible/roles/monitoring/tasks/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install netdata (Ubuntu) 3 | action: apt name=netdata 4 | 5 | - name: Configure netdata for Private Adapter (Ubuntu) 6 | template: src=netdata.conf-ubuntu.j2 dest=/etc/netdata/netdata.conf 7 | notify: restart netdata 8 | -------------------------------------------------------------------------------- /ansible/roles/monitoring/templates/netdata.conf-freebsd.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | history = 86400 3 | bind to = {{ servers[ansible_hostname].private_ip }} 4 | disconnect idle web clients after seconds = 3600 5 | run as user = netdata 6 | web files owner = netdata 7 | web files group = netdata 8 | default port = 19999 9 | respect web browser do not track policy = yes 10 | 11 | [plugins] 12 | freebsd = yes 13 | 14 | [plugin:freebsd] 15 | netdata server resources = yes 16 | sysctl = yes 17 | -------------------------------------------------------------------------------- /ansible/roles/monitoring/templates/netdata.conf-ubuntu.j2: -------------------------------------------------------------------------------- 1 | [global] 2 | run as user = netdata 3 | web files owner = root 4 | web files group = root 5 | # Netdata is not designed to be exposed to potentially hostile 6 | # networks.See https://github.com/firehol/netdata/issues/164 7 | bind socket to IP = {{ servers[ansible_hostname].private_ip }} 8 | -------------------------------------------------------------------------------- /ansible/roles/ndppd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Load State for IPv6 Subnet 3 | include_vars: 4 | file: "{{ provisioner_state_file }}" 5 | name: state 6 | 7 | - name: Install ndppd 8 | action: apt name=ndppd 9 | 10 | - name: Configure ndppd 11 | template: src=ndppd.conf.j2 dest={{ ndppd_conf }} owner=root group=root mode=0644 12 | 13 | - name: Enable proxy_ndp for public adapter 14 | sysctl: name="net.ipv6.conf.{{ public_eth }}.proxy_ndp" value=1 state=present 15 | 16 | - name: Restart ndppd Service 17 | service: name=ndppd state=restarted 18 | -------------------------------------------------------------------------------- /ansible/roles/ndppd/templates/ndppd.conf.j2: -------------------------------------------------------------------------------- 1 | proxy {{ public_eth }} { 2 | timeout 500 3 | ttl 30000 4 | rule {{ state.servers[ansible_hostname].ipv6.subnet }}/80 { 5 | static 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /ansible/roles/ndppd/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | provisioner_state_file: "../{{ provisioner.state_file }}" 3 | ndppd_conf: /etc/ndppd.conf 4 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-basic/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Mirror 3 | raw: "echo {{ openbsd_mirror }} > /etc/installurl" 4 | 5 | - name: Install colorls 6 | openbsd_pkg: 7 | name: colorls 8 | state: present 9 | 10 | - name: Install nice to have packages 11 | openbsd_pkg: 12 | name: "{{ item }}" 13 | state: present 14 | with_items: ['colorls', 'vim--no_x11', 'fish'] 15 | 16 | - name: Set Kernel open file limits 17 | command: sysctl kern.maxfiles=20000 18 | 19 | - name: Kernel open file limits on reboots 20 | copy: 21 | content: "kern.maxfiles=20000" 22 | dest: /etc/sysctl.conf 23 | force: no 24 | 25 | - name: Setup colorls alias 26 | lineinfile: 27 | dest: "{{ item }}" 28 | line: "alias ls='colorls -FGa'" 29 | regexp: "^alias ls='colorls -FGa'$" 30 | state: present 31 | insertafter: EOF 32 | create: True 33 | with_items: ['/root/.profile', '/root/.config/fish/config.fish'] 34 | 35 | - name: Set Root to Fish Shell 36 | user: 37 | name: root 38 | shell: /usr/local/bin/fish 39 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Reload OpenSMTPD 2 | command: rcctl restart smtpd 3 | - name: Reload clamsmtpd 4 | command: rcctl restart clamsmtpd 5 | - name: Reload clamd 6 | command: rcctl restart clamd 7 | - name: Reload freshclam 8 | command: rcctl restart freshclam 9 | - name: Reload dkimproxy 10 | command: rcctl restart dkimproxy_out 11 | - name: Reload spampd 12 | command: rcctl restart spampd 13 | - name: Reload smtpd 14 | command: rcctl restart smtpd 15 | - name: Reload dovecot 16 | command: rcctl restart dovecot 17 | - name: rebuild login.conf 18 | command: cap_mkdb /etc/login.conf 19 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/tasks/clamav.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Freshcalm 3 | template: src=freshclam.conf.j2 dest=/etc/freshclam.conf 4 | notify: Reload freshclam 5 | 6 | - name: Run Freshcalm if there's no AV database 7 | command: /usr/local/bin/freshclam 8 | args: 9 | creates: /var/db/clamav/main.cvd 10 | 11 | - name: Create Clamsmtpd RC script 12 | template: src=clamsmtpd.rc dest=/etc/rc.d/clamsmtpd mode=0555 13 | 14 | - name: Setup Clamsmtpd 15 | template: src=clamsmtpd.conf.j2 dest=/etc/clamsmtpd.conf 16 | notify: Reload clamsmtpd 17 | 18 | - name: Fix Clamd var/lib permissions 19 | file: 20 | path: /var/spampd/.spamassassin 21 | state: directory 22 | group: _spampd 23 | mode: 0770 24 | 25 | - name: Setup Clamd 26 | template: src=clamd.conf.j2 dest=/etc/clamd.conf 27 | notify: Reload clamd 28 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/tasks/dkim.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup DKIM Proxy 3 | template: src=proxy_out.conf.j2 dest=/etc/dkimproxy_out.conf 4 | notify: Reload dkimproxy 5 | 6 | - name: Copy DKIM Key 7 | copy: src={{ local_dkim_key }} dest=/etc/dkim.key 8 | notify: Reload dkimproxy 9 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/tasks/dovecot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure Dovecot for SSL 3 | lineinfile: dest=/etc/dovecot/conf.d/10-ssl.conf regexp="{{ item.regexp }}" line="{{ item.line }}" 4 | with_items: 5 | - { regexp: "^#?ssl =", line: "ssl = required" } 6 | - { regexp: "^#?ssl_cert =", line: "ssl_cert = <{{ tls_cert }}" } 7 | - { regexp: "^#?ssl_key =", line: "ssl_key = <{{ tls_key }}" } 8 | notify: Reload dovecot 9 | 10 | - name: Increase file limits for dovecot 11 | blockinfile: 12 | path: /etc/login.conf 13 | block: | 14 | dovecot:\ 15 | :openfiles-cur=20000:\ 16 | :openfiles-max=20000:\ 17 | :tc=daemon: 18 | notify: rebuild login.conf 19 | 20 | - name: Dovecot Mail storage configuration 21 | lineinfile: dest=/etc/dovecot/conf.d/10-mail.conf regexp="^#?mail_location" line="mail_location = maildir:~/Mail" 22 | notify: Reload dovecot 23 | 24 | - name: Configure Dovecot IMAP 25 | lineinfile: dest=/etc/dovecot/dovecot.conf regexp="^#?protocols =" line="protocols = imap" 26 | notify: Reload dovecot 27 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/tasks/letsencrypt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Certbot deploy hook 3 | template: src=certbot-deploy-hook.sh dest=/usr/local/bin/certbot-deploy-hook.sh mode=0700 4 | 5 | - name: Install Certbot Renew Script 6 | template: src=get_certs.sh.j2 dest={{ cert_script }} mode=0700 7 | 8 | - name: Certbot Cron 9 | cron: 10 | cron_file: /etc/crontab 11 | user: root 12 | name: "certbot renew" 13 | minute: "0" 14 | hour: "2" 15 | job: "{{ cert_script }}" 16 | 17 | - name: Ensure LetsEncrypt Certs 18 | command: "{{ cert_script }}" 19 | args: 20 | creates: "{{ tls_cert }}" 21 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install required packages 3 | openbsd_pkg: 4 | name: "{{ item }}" 5 | state: present 6 | with_items: ['dovecot', 'dkimproxy', 'certbot', 'procmail', 'spampd', 'clamsmtp'] 7 | 8 | - name: ClamAV Tasks 9 | include_tasks: clamav.yml 10 | 11 | - name: Dovecot Tasks 12 | include_tasks: dovecot.yml 13 | 14 | - name: Lets Encrypt Tasks 15 | include_tasks: letsencrypt.yml 16 | 17 | - name: Spam Prevention Tasks 18 | include_tasks: spam.yml 19 | 20 | - name: DKIM Tasks 21 | include_tasks: dkim.yml 22 | 23 | - name: OpenSMTPD Tasks 24 | include_tasks: opensmtpd.yml 25 | 26 | - name: Added e-mail users to system 27 | user: 28 | name: "{{ item }}" 29 | shell: /usr/local/bin/fish 30 | with_items: "{{ users.keys() | list }}" 31 | 32 | - name: Setup Procmail for users 33 | template: 34 | src: user.procmailrc.j2 35 | dest: "/home/{{ item }}/.procmailrc" 36 | owner: "{{ item }}" 37 | group: "{{ item }}" 38 | mode: 0644 39 | force: no 40 | with_items: "{{ users.keys() | list }}" 41 | 42 | - name: Add users Maildir 43 | file: 44 | path: /home/{{ item }}/Mail 45 | state: directory 46 | owner: "{{ item }}" 47 | group: "{{ item }}" 48 | mode: 0700 49 | with_items: "{{ users.keys() | list }}" 50 | 51 | - name: Enable Services 52 | command: "rcctl enable {{ item }}" 53 | with_items: ['freshclam', 'clamd', 'cron', 'clamsmtpd', 'dkimproxy_out', 'smtpd', 'dovecot'] 54 | 55 | - name: Start Services 56 | command: "/etc/rc.d/{{ item }} start" 57 | with_items: ['freshclam', 'clamd', 'cron', 'clamsmtpd', 'dkimproxy_out', 'spampd', 'smtpd', 'dovecot'] 58 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/tasks/opensmtpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create domains file 3 | template: src=vdomains.j2 dest={{ vdoms_file }} 4 | notify: Reload OpenSMTPD 5 | 6 | - name: Create users file 7 | template: src=vusers.j2 dest={{ vusers_file }} 8 | notify: Reload OpenSMTPD 9 | 10 | - name: Setup OpenSMTPD 11 | template: src=smtpd.conf.j2 dest=/etc/mail/smtpd.conf 12 | notify: Reload OpenSMTPD 13 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/tasks/spam.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup assassin 3 | template: src=spamassassin-local.cf.j2 dest=/etc/mail/spamassassin/local.cf 4 | 5 | - name: Setup RC Flags for spampd 6 | lineinfile: 7 | dest: /etc/rc.conf.local 8 | regexp: ^spampd_flags 9 | line: "spampd_flags=\"--port={{ ports.spampd_in }} --relayhost=127.0.0.1:{{ ports.spampd_return }} --tagall -pid=/var/spampd/spampd.pid -aw\"" 10 | create: Yes 11 | notify: Reload spampd 12 | 13 | - name: Certbot Cron 14 | cron: 15 | cron_file: /etc/crontab 16 | user: root 17 | name: "start spampd" 18 | special_time: "reboot" 19 | job: "/etc/rc.d/spampd start" 20 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/certbot-deploy-hook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | chmod 0600 $RENEWED_LINEAGE/privkey.pem 4 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/clamd.conf.j2: -------------------------------------------------------------------------------- 1 | LogFile /tmp/clamd.log 2 | LogFileMaxSize 2M 3 | LogTime yes 4 | LogRotate yes 5 | LocalSocket /tmp/clamd.socket 6 | User _clamav 7 | MaxRecursion 12 8 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/clamsmtpd.conf.j2: -------------------------------------------------------------------------------- 1 | OutAddress: {{ ports.clam_return }} 2 | Listen: 0.0.0.0:{{ ports.clam_in }} 3 | ClamAddress: /tmp/clamd.socket 4 | Header: X-Virus-Scanned: ClamAV using ClamSMTP 5 | TempDirectory: /tmp 6 | Action: drop 7 | Quarantine: on 8 | TransparentProxy: off 9 | User: _clamav 10 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/clamsmtpd.rc: -------------------------------------------------------------------------------- 1 | #!/bin/ksh 2 | daemon="/usr/local/sbin/clamsmtpd" 3 | daemon_flags="-f /etc/clamsmtpd.conf" 4 | 5 | . /etc/rc.d/rc.subr 6 | 7 | rc_reload=NO 8 | 9 | rc_cmd $1 10 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/freshclam.conf.j2: -------------------------------------------------------------------------------- 1 | UpdateLogFile /tmp/freshclam.log 2 | LogFileMaxSize 2M 3 | LogTime yes 4 | DatabaseMirror db.us.ipv6.clamav.net 5 | DatabaseMirror database.clamav.net 6 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/get_certs.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /usr/local/bin/certbot certonly --standalone --preferred-challenges http \ 4 | --http-01-port 80 --agree-tos --renew-by-default --non-interactive \ 5 | --email {{ cert_email }} -d {{ mx }} \ 6 | --deploy-hook /usr/local/bin/certbot-deploy-hook.sh 7 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/proxy_out.conf.j2: -------------------------------------------------------------------------------- 1 | listen 127.0.0.1:{{ ports.dkim_in }} 2 | relay 127.0.0.1:{{ ports.dkim_return }} 3 | domain {{ domains_csv }} 4 | signature dkim(c=relaxed) 5 | signature domainkeys(c=nofws) 6 | keyfile /etc/dkim.key 7 | selector dkim1 8 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/rc.conf.local.j2: -------------------------------------------------------------------------------- 1 | spampd_flags="--port={{ ports.spampd_in }} --relayhost=127.0.0.1:{{ ports.spampd_return }} --tagall -aw" 2 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/smtpd.conf.j2: -------------------------------------------------------------------------------- 1 | pki {{ mx }} key "{{ tls_key }}" 2 | pki {{ mx }} certificate "{{ tls_cert }}" 3 | 4 | table vdoms "{{ vdoms_file }}" 5 | table vusers "{{ vusers_file }}" 6 | 7 | listen on lo0 port {{ ports.spampd_return}} tag SPAMD 8 | listen on lo0 port {{ ports.clam_return }} tag CLAM 9 | listen on lo0 port {{ ports.dkim_return }} tag DKIM 10 | 11 | listen on {{ private_eth }} port smtp 12 | 13 | listen on egress port smtp tls pki {{ mx }} 14 | listen on egress port 465 smtps pki {{ mx }} 15 | listen on egress port submission tls-require pki {{ mx }} auth 16 | 17 | accept tagged CLAM for domain virtual deliver to mda "procmail -f -" 18 | accept tagged SPAMD for any relay via "smtp://127.0.0.1:{{ ports.clam_in }}" 19 | accept from any for domain relay via "smtp://127.0.0.1:{{ ports.spampd_in }}" 20 | 21 | accept tagged DKIM for any relay hostname {{ mx }} 22 | accept from local for any relay via smtp://127.0.0.1:{{ ports.dkim_in }} 23 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/spamassassin-local.cf.j2: -------------------------------------------------------------------------------- 1 | rewrite_header Subject ***Spam*** 2 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/user.procmailrc.j2: -------------------------------------------------------------------------------- 1 | LOGFILE="$HOME/procmail.log" 2 | MAILDIR="$HOME/Mail/" 3 | DEFAULT="$HOME/Mail/" 4 | DELIVER="/usr/local/libexec/dovecot/deliver" 5 | 6 | :0 w 7 | * ^X-Spam-Status: Yes 8 | | $DELIVER -m Spam 9 | 10 | :0 w 11 | | $DELIVER 12 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/vdomains.j2: -------------------------------------------------------------------------------- 1 | {% for d in domains %} 2 | {{d}} 3 | {% endfor %} -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/templates/vusers.j2: -------------------------------------------------------------------------------- 1 | {% for user,emails in users.items() %} 2 | {% for email in emails %} 3 | {{email}} {{user}} 4 | {% endfor %} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /ansible/roles/openbsd-email/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | security: 3 | dkim: "{{ servers[ansible_hostname].mail.security.dkim }}" 4 | spf: "{{ servers[ansible_hostname].mail.security.spf }}" 5 | dmarc: "{{ servers[ansible_hostname].mail.security.dmarc }}" 6 | domains: "{{ servers[ansible_hostname].mail.domains }}" 7 | users: "{{ servers[ansible_hostname].mail.users }}" 8 | mx: "{{ servers[ansible_hostname].mail.mx }}" 9 | cert_email: "{{ servers[ansible_hostname].mail.cert_email }}" 10 | vusers_file: /etc/mail/vusers 11 | vdoms_file: /etc/mail/vdomains 12 | domains_csv: "{{ domains | join(',') }}" 13 | local_dkim_key: "../{{ servers[ansible_hostname].mail.dkim_private }}" 14 | cert_script: /usr/local/bin/get_certs 15 | tls_key: /etc/letsencrypt/live/{{ mx }}/privkey.pem 16 | tls_cert: /etc/letsencrypt/live/{{ mx }}/fullchain.pem 17 | ports: 18 | spampd_in: 10025 19 | clam_in: 10026 20 | spampd_return: 10027 21 | clam_return: 10028 22 | dkim_in: 10029 23 | dkim_return: 10030 24 | -------------------------------------------------------------------------------- /ansible/roles/private-net/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart netif 2 | service: name=netif enabled=yes state=restarted args={{ private_eth }} 3 | - name: restart networking 4 | service: name=networking enabled=yes state=restarted 5 | - name: run netstart 6 | command: sh /etc/netstart 7 | -------------------------------------------------------------------------------- /ansible/roles/private-net/tasks/freebsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Private Network 3 | lineinfile: > 4 | dest=/etc/rc.conf state=present regexp='^ifconfig_{{ private_eth }}.*' 5 | line='ifconfig_{{ private_eth }}="inet {{ private_ip }} netmask 255.255.255.0"' 6 | notify: restart netif 7 | -------------------------------------------------------------------------------- /ansible/roles/private-net/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setting Private IP Fact 3 | set_fact: private_ip="{{ servers[ansible_hostname].private_ip }}" 4 | 5 | - name: Ubuntu Private Networking 6 | block: 7 | - set_fact: private_eth=ens7 8 | - include_tasks: ubuntu.yml 9 | when: ansible_distribution in [ 'Debian', 'Ubuntu' ] 10 | 11 | - name: FreeBSD Private Networking 12 | block: 13 | - set_fact: private_eth=vtnet1 14 | - include_tasks: freebsd.yml 15 | when: ansible_distribution == 'FreeBSD' 16 | 17 | - name: OpenBSD Private Networking 18 | block: 19 | - set_fact: private_eth=vio1 20 | - include_tasks: openbsd.yml 21 | when: ansible_distribution == 'OpenBSD' 22 | 23 | # Private network needs to be setup before Firewall handlers run 24 | - meta: flush_handlers 25 | -------------------------------------------------------------------------------- /ansible/roles/private-net/tasks/openbsd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Private Network 3 | copy: 4 | content: "inet {{ private_ip }} 255.255.255.0\n!route add -net {{ openvpn.server.subnet }}/24 {{ openvpn.hosts.gateway }}\n" 5 | dest: "/etc/hostname.{{ private_eth }}" 6 | force: no 7 | notify: run netstart -------------------------------------------------------------------------------- /ansible/roles/private-net/tasks/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install ifupdown 3 | action: apt name=ifupdown 4 | - blockinfile: 5 | path: /etc/network/interfaces 6 | block: | 7 | auto {{ private_eth }} 8 | iface {{ private_eth }} inet static 9 | address {{ private_ip }} 10 | netmask 255.255.255.0 11 | up route add -net {{ openvpn.server.subnet }}/24 gw {{ openvpn.hosts.gateway }} dev {{ private_eth }} 12 | notify: restart networking 13 | -------------------------------------------------------------------------------- /ansible/roles/private-net/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | private_eth: 'etx_unknown' 3 | -------------------------------------------------------------------------------- /ansible/roles/root-password/tasks/enable-password.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check if password has been generated 4 | local_action: stat path={{ pgp_password_file }} 5 | register: local_pgp_password_file 6 | become: False 7 | 8 | - name : Generate password 9 | local_action: command pwgen -s 25 1 #TODO: settings for length 10 | register: password 11 | when: local_pgp_password_file.stat.exists == False 12 | become: False 13 | 14 | - name: Ensure password directory exists 15 | local_action: file path={{ pgp_password_path }} state=directory 16 | when: local_pgp_password_file.stat.exists == False 17 | become: False 18 | 19 | - name: Encrypt and save password 20 | local_action: shell echo {{ password.stdout }} | gpg --encrypt --armor -r {{ security.pgp_id }} > {{ pgp_password_file }} creates={{ pgp_password_file }} 21 | when: local_pgp_password_file.stat.exists == False 22 | become: False 23 | 24 | - name: Decrypt password 25 | local_action: command gpg --decrypt {{ pgp_password_file }} 26 | register: password 27 | become: False 28 | 29 | # Set it in the VM 30 | 31 | - local_action: shell echo {{ password.stdout }} | mkpasswd --method=SHA-512 --stdin 32 | register: crypt_password 33 | become: False 34 | 35 | - user: name=root password={{ crypt_password.stdout }} 36 | -------------------------------------------------------------------------------- /ansible/roles/root-password/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: enable-password.yml 3 | when: security.pgp_id is defined 4 | -------------------------------------------------------------------------------- /ansible/roles/root-password/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | pgp_password_path: "{{ lookup('env','HOME') }}/.password-store/bee2/servers" 3 | pgp_password_file: "{{ pgp_password_path }}/{{ server_name }}.gpg" 4 | -------------------------------------------------------------------------------- /ansible/roles/sysdig/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install Sysdig Repo Key 3 | apt_key: 4 | url: https://s3.amazonaws.com/download.draios.com/DRAIOS-GPG-KEY.public 5 | state: present 6 | 7 | - name: Setup Sysdig Respository 8 | apt_repository: 9 | repo: 'deb https://s3.amazonaws.com/download.draios.com/stable/deb stable-$(ARCH)/' 10 | state: present 11 | filename: 'draios' 12 | update_cache: 'yes' 13 | 14 | - name: Install linux linux-headers 15 | action: shell apt-get install -y linux-headers-$(uname -r) warn=no 16 | 17 | - name: Install Sysdig 18 | apt: name=sysdig state=present 19 | -------------------------------------------------------------------------------- /ansible/roles/vpn-client/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: reload openvpn-client 2 | service: name=openvpn@client state=reloaded 3 | -------------------------------------------------------------------------------- /ansible/roles/vpn-client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install OpenVPN 3 | action: apt name=openvpn 4 | 5 | - name: Copy CA 6 | copy: 7 | src: "{{ local_openvpn_ca }}" 8 | dest: "{{ openvpn_client_dir }}/ca.crt" 9 | mode: 0600 10 | 11 | - name: Copy Client Cert 12 | copy: 13 | src: "{{ local_openvpn_crt }}" 14 | dest: "{{ openvpn_client_dir }}/{{ ansible_hostname }}.crt" 15 | mode: 0600 16 | 17 | - name: Copy Client Key 18 | copy: 19 | src: "{{ local_openvpn_key }}" 20 | dest: "{{ openvpn_client_dir }}/{{ ansible_hostname }}.key" 21 | mode: 0600 22 | 23 | - name: Configure OpenVPN 24 | template: 25 | src: openvpn.conf.j2 26 | dest: "/etc/openvpn/client.conf" 27 | notify: reload openvpn-client 28 | 29 | - name: Enable OpenVPN Service 30 | service: name=openvpn@client enabled=yes state=started 31 | 32 | - meta: flush_handlers 33 | -------------------------------------------------------------------------------- /ansible/roles/vpn-client/templates/openvpn.conf.j2: -------------------------------------------------------------------------------- 1 | client 2 | dev-type tun 3 | dev openvpn 4 | proto udp 5 | remote {{ openvpn_host }} 1194 6 | resolv-retry infinite 7 | nobind 8 | ca /etc/openvpn/client/ca.crt 9 | cert /etc/openvpn/client/{{ ansible_hostname }}.crt 10 | key /etc/openvpn/client/{{ ansible_hostname }}.key 11 | cipher AES-256-CBC 12 | compress lz4-v2 13 | persist-key 14 | persist-tun 15 | status /var/log/openvpn/{{ ansible_hostname }}-status.log 16 | log-append /var/log/openvpn/{{ ansible_hostname }}.log 17 | -------------------------------------------------------------------------------- /ansible/roles/vpn-client/vars/main.yml: -------------------------------------------------------------------------------- 1 | openvpn_host: "{{ servers[ansible_hostname].vpn }}" 2 | local_openvpn_client_keys: ../conf/opvn-clients 3 | local_openvpn_crt: "{{ local_openvpn_client_keys }}/{{ ansible_hostname }}.crt" 4 | local_openvpn_key: "{{ local_openvpn_client_keys }}/{{ ansible_hostname }}.key" 5 | local_openvpn_ca: "{{ local_openvpn_client_keys }}/ca.crt" 6 | openvpn_client_dir: /etc/openvpn/client 7 | -------------------------------------------------------------------------------- /ansible/roles/vpn/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: reload openvpn 2 | service: name=openvpn state=reloaded 3 | -------------------------------------------------------------------------------- /ansible/roles/vpn/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - set_fact: private_ip="{{ servers[ansible_hostname].private_ip }}" 3 | 4 | - name: Install OpenVPN (FreeBSD) 5 | pkgng: name=openvpn state=present 6 | 7 | - name: Install easy-rsa 8 | pkgng: name=easy-rsa state=present 9 | 10 | - name: Create Configuration Directory 11 | file: path={{ openvpn_etc }} state=directory 12 | 13 | - name: Copy Default OpenSSL Configuration 14 | copy: remote_src=True src=/usr/local/share/easy-rsa/openssl-easyrsa.cnf.example dest={{ openvpn_etc }}/openssl-easyrsa.cnf 15 | 16 | - name: Symbolic Link x509 Types 17 | file: 18 | src: /usr/local/share/easy-rsa/x509-types 19 | dest: "{{ openvpn_etc }}/x509-types" 20 | state: link 21 | 22 | - name: Initalize PKI 23 | command: easyrsa init-pki 24 | environment: 25 | EASYRSA: "{{ openvpn_etc }}" 26 | args: 27 | creates: "{{ openvpn_pki }}/private" 28 | 29 | - name: Create CA 30 | command: easyrsa build-ca nopass 31 | environment: 32 | EASYRSA_BATCH: true 33 | EASYRSA_REQ_CN: "bee2" 34 | EASYRSA: "{{ openvpn_etc }}" 35 | args: 36 | creates: "{{ openvpn_pki }}/private/ca.key" 37 | 38 | - name: Create DH 39 | command: easyrsa gen-dh 40 | environment: 41 | EASYRSA: "{{ openvpn_etc }}" 42 | args: 43 | creates: "{{ openvpn_pki }}/dh.pem" 44 | 45 | - name: Create Server Key 46 | command: easyrsa build-server-full {{ openvpn_host }} nopass 47 | environment: 48 | EASYRSA: "{{ openvpn_etc }}" 49 | EASYRSA_BATCH: true 50 | args: 51 | creates: "{{ openvpn_pki }}/private/{{ openvpn_host }}.key" 52 | 53 | - name: Create Client Keys 54 | command: easyrsa build-client-full {{ item.key }} nopass 55 | environment: 56 | EASYRSA: "{{ openvpn_etc }}" 57 | EASYRSA_PKI: "{{ openvpn_pki }}" 58 | EASYRSA_BATCH: true 59 | with_dict: "{{ openvpn.clients }}" 60 | args: 61 | creates: "{{ openvpn_pki }}/private/{{ item.key }}.key" 62 | 63 | - name: Ensure Client Config Directory Exists 64 | file: 65 | path: "{{ openvpn_client_config }}" 66 | state: directory 67 | 68 | - name: Assign Client IP 69 | copy: 70 | content: "ifconfig-push {{ item.value['ip'] }} 255.255.255.0" 71 | dest: "{{ openvpn_client_config }}/{{ item.key }}" 72 | force: yes 73 | with_dict: "{{ openvpn.clients }}" 74 | 75 | - name: Copy Client Keys 76 | fetch: 77 | src: "{{ openvpn_pki }}/private/{{ item.key }}.key" 78 | dest: "{{ local_openvpn_client_keys }}" 79 | flat: yes 80 | with_dict: "{{ openvpn.clients }}" 81 | 82 | - name: Copy Client Certs 83 | fetch: 84 | src: "{{ openvpn_pki }}/issued/{{ item.key }}.crt" 85 | dest: "{{ local_openvpn_client_keys }}" 86 | flat: yes 87 | with_dict: "{{ openvpn.clients }}" 88 | 89 | - name: Copy CA Cert 90 | fetch: 91 | src: "{{ openvpn_pki }}/ca.crt" 92 | dest: "{{ local_openvpn_client_keys }}" 93 | flat: yes 94 | 95 | - name: Configure OpenVPN Service 96 | template: src=server.conf.j2 dest={{ openvpn_server_conf }} 97 | notify: reload openvpn 98 | 99 | - name: Enable IP Forwarding on Reboots 100 | lineinfile: 101 | path: /etc/rc.conf 102 | line: gateway_enable="YES" 103 | 104 | - name: Enable IP Forwarding Now 105 | command: sysctl net.inet.ip.forwarding=1 106 | 107 | - name: Enable OpenVPN Service 108 | service: name=openvpn enabled=yes state=started 109 | 110 | - meta: flush_handlers 111 | -------------------------------------------------------------------------------- /ansible/roles/vpn/templates/server.conf.j2: -------------------------------------------------------------------------------- 1 | port 1194 2 | proto udp 3 | dev tun 4 | topology subnet 5 | ca {{ openvpn_pki }}/ca.crt 6 | cert {{ openvpn_pki }}/issued/{{ openvpn_host }}.crt 7 | key {{ openvpn_pki }}/private/{{ openvpn_host }}.key 8 | dh {{ openvpn_pki }}/dh.pem 9 | server {{ openvpn.server.subnet }} {{ openvpn.server.netmask }} 10 | ifconfig-pool-persist {{ openvpn_etc}}/ipp.txt 11 | keepalive 10 120 12 | cipher {{ openvpn.server.cipher }} 13 | {% for route in openvpn_routes %} 14 | route {{ route }} 15 | push "route {{ route }}" 16 | {% endfor %} 17 | compress lz4-v2 18 | push "compress lz4-v2" 19 | client-to-client 20 | client-config-dir {{ openvpn_client_config }} 21 | persist-key 22 | persist-tun 23 | status /tmp/openvpn-status.log 24 | log-append /var/log/openvpn.log 25 | -------------------------------------------------------------------------------- /ansible/roles/vpn/vars/main.yml: -------------------------------------------------------------------------------- 1 | openvpn_etc: /usr/local/etc/openvpn 2 | openvpn_pki: "{{ openvpn_etc }}/pki" 3 | openvpn_host: "{{ servers[ansible_hostname].dns.public[0] }}" 4 | local_openvpn_client_keys: ../conf/opvn-clients/ 5 | openvpn_server_conf: "{{ openvpn_etc }}/openvpn.conf" 6 | openvpn_routes: "{{ openvpn.server.routes }}" 7 | openvpn_client_config: "{{ openvpn_etc }}/clientconfig" 8 | -------------------------------------------------------------------------------- /ansible/ssh-hostkey-check.yml: -------------------------------------------------------------------------------- 1 | # Taken from https://serverfault.com/a/816621/92872 2 | --- 3 | - name: accept ssh fingerprint automatically for the first time 4 | hosts: all 5 | connection: local 6 | gather_facts: False 7 | 8 | tasks: 9 | - name: Check if known_hosts contains server's fingerprint 10 | command: ssh-keygen -F {{ inventory_hostname }} 11 | register: keygen 12 | failed_when: keygen.stderr != '' 13 | changed_when: False 14 | 15 | - name: Fetch remote SSH key 16 | command: ssh-keyscan -T5 {{ inventory_hostname }} 17 | register: keyscan 18 | failed_when: keyscan.rc != 0 or keyscan.stdout == '' 19 | changed_when: False 20 | when: keygen.rc == 1 21 | 22 | - name: Add ssh-key to local known_hosts 23 | lineinfile: 24 | name: ~/.ssh/known_hosts 25 | create: yes 26 | line: "{{ item }}" 27 | when: keygen.rc == 1 28 | with_items: '{{ keyscan.stdout_lines|default([]) }}' 29 | -------------------------------------------------------------------------------- /bee2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | require 'yaml' 4 | require 'optparse' 5 | require 'logger' 6 | require_relative 'lib/vultr' 7 | require_relative 'lib/exoscale' 8 | require_relative 'lib/name' 9 | require_relative 'lib/digitalocean' 10 | require_relative 'lib/dockerhandler' 11 | require_relative 'lib/synchandler' 12 | 13 | log = Logger.new(STDOUT) 14 | 15 | options = {} 16 | opts = OptionParser.new do |opts| 17 | 18 | opts.banner = 'Usage: bee2 [-v] [-h|--help] [-c ] [-p [-r]] [-d COMMAND]' 19 | 20 | opts.on('-c','--config CONFIG','Configuration File') do |config| 21 | options[:config] = config 22 | end 23 | 24 | opts.on('-p', '--provision', 'Provision Servers') do |provision| 25 | options[:provision] = provision 26 | end 27 | 28 | opts.on('-d', '--docker COMMAND', 'Docker Command (use command \'help\' for more information)') do |docker| 29 | options[:docker] = docker 30 | end 31 | 32 | opts.on('-v', '--verbose', 'Debug Logging Output Enabled') do |verbose| 33 | if verbose 34 | log.level = Logger::DEBUG 35 | end 36 | end 37 | 38 | opts.on('-r','--rebuild','Destroy and Rebuild Servers During Provisioning') do |rebuild| 39 | options[:rebuild] = rebuild 40 | end 41 | 42 | opts.on('-a', '--ansible INVENTORY', 'Run Ansible on (public|private)[:server].') do |inventory| 43 | cmd = inventory.split(':') 44 | options[:inventory] = cmd[0] 45 | if not cmd[1].nil? 46 | options[:server] = cmd[1] 47 | end 48 | end 49 | 50 | opts.on('-s', '--sync SERVERS', 'Sync data between host and server') do |srv| 51 | options[:sync] = srv 52 | end 53 | 54 | opts.on_tail("-h", "--help", "Show this message") do 55 | STDERR.puts opts 56 | exit 57 | end 58 | end 59 | 60 | begin opts.parse! ARGV 61 | rescue *[OptionParser::InvalidOption,OptionParser::InvalidArgument,OptionParser::MissingArgument] => e 62 | STDERR.puts e 63 | STDERR.puts opts 64 | exit 1 65 | end 66 | 67 | if options[:config].nil? 68 | log.fatal('You must specify a config file (-c). Use -h for usage.') 69 | exit 1 70 | end 71 | 72 | config = YAML.load_file(options[:config]) 73 | case config['provisioner']['type'] 74 | when 'vultr' 75 | p = VultrProvisioner.new(config, log) 76 | when 'exoscale' 77 | p = ExoscaleProvisioner.new(config, log) 78 | when 'digitalocean' 79 | p = DigitalOceanProvisioner.new(config, log) 80 | when 'name' 81 | p = NameProvisioner.new(config, log) 82 | end 83 | 84 | if options[:provision] 85 | if options[:rebuild] 86 | puts 'WARNING: Rebuilding will destroy existing servers. Do you wish to contine? [Type: YES]' 87 | if gets.chomp != 'YES' 88 | puts 'Rebuild aborted' 89 | exit 5 90 | end 91 | end 92 | p.provision(rebuild = options[:rebuild]) 93 | end 94 | 95 | 96 | if options[:inventory] 97 | inv_file = config['inventory'][options[:inventory].to_s] 98 | 99 | playbooks = config['servers'].flat_map { |server, info| {server => info['playbook']} }.inject(:update) 100 | log.info("Running Ansible (Inventory: #{options[:inventory]} :: #{inv_file})") 101 | playbooks.each { |server, playbook| 102 | # if not hosts.empty? and not hosts.split(',').include?(server) 103 | # next 104 | # end 105 | next if not options[:server].nil? and options[:server] != server 106 | 107 | log.info("Applying #{playbook} to #{server}") 108 | cmd = ['ansible-playbook', '--limit', server, '--key-file', config['provisioner']['ssh_key']['private'], 109 | '-u', 'root', '-e', "config_file=#{options[:config].to_s}", '-i', inv_file, 110 | "ansible/ssh-hostkey-check.yml", "ansible/#{playbook}"] 111 | log.debug(cmd) 112 | Process.fork do 113 | exec(*cmd) 114 | end 115 | Process.wait 116 | } 117 | end 118 | 119 | if options[:sync] 120 | SyncHandler.new(config, log, options[:sync]) 121 | end 122 | 123 | if options[:docker] 124 | docker = DockerHandler.new(config, log, options[:docker], PassStore.new(config)) 125 | end 126 | -------------------------------------------------------------------------------- /dockerfiles/AWStatsCGI/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM httpd 2 | 3 | RUN apt-get update -y && \ 4 | apt-get install -y awstats 5 | 6 | COPY httpd.conf /usr/local/apache2/conf/httpd.conf 7 | 8 | RUN mkdir -p /usr/local/apache2/htdocs/stats 9 | 10 | COPY index.shell /usr/local/apache2/htdocs/index.shell 11 | RUN chmod 755 /usr/local/apache2/htdocs/index.shell 12 | 13 | RUN ln -s /usr/lib/cgi-bin/awstats.pl /usr/local/apache2/htdocs/stats 14 | RUN ln -s ln -s /usr/share/awstats/icon /usr/local/apache2/htdocs/stats 15 | RUN sed -i "s/\/etc\/opt\/awstats/\/awstats\/config/g" /usr/lib/cgi-bin/awstats.pl 16 | RUN rm -f /etc/awstats/awstats.conf 17 | 18 | EXPOSE 8080 19 | -------------------------------------------------------------------------------- /dockerfiles/AWStatsCGI/httpd.conf: -------------------------------------------------------------------------------- 1 | ServerRoot "/usr/local/apache2" 2 | 3 | Listen 8080 4 | 5 | LoadModule mpm_event_module modules/mod_mpm_event.so 6 | LoadModule authn_file_module modules/mod_authn_file.so 7 | LoadModule authn_core_module modules/mod_authn_core.so 8 | LoadModule authz_host_module modules/mod_authz_host.so 9 | LoadModule authz_groupfile_module modules/mod_authz_groupfile.so 10 | LoadModule authz_user_module modules/mod_authz_user.so 11 | LoadModule authz_core_module modules/mod_authz_core.so 12 | LoadModule access_compat_module modules/mod_access_compat.so 13 | LoadModule auth_basic_module modules/mod_auth_basic.so 14 | LoadModule reqtimeout_module modules/mod_reqtimeout.so 15 | LoadModule filter_module modules/mod_filter.so 16 | LoadModule mime_module modules/mod_mime.so 17 | LoadModule log_config_module modules/mod_log_config.so 18 | LoadModule env_module modules/mod_env.so 19 | LoadModule cgi_module modules/mod_cgi.so 20 | LoadModule headers_module modules/mod_headers.so 21 | LoadModule setenvif_module modules/mod_setenvif.so 22 | LoadModule version_module modules/mod_version.so 23 | LoadModule unixd_module modules/mod_unixd.so 24 | LoadModule status_module modules/mod_status.so 25 | LoadModule autoindex_module modules/mod_autoindex.so 26 | 27 | 28 | 29 | 30 | LoadModule dir_module modules/mod_dir.so 31 | LoadModule alias_module modules/mod_alias.so 32 | 33 | 34 | User daemon 35 | Group daemon 36 | 37 | 38 | ServerAdmin nobody@example.com 39 | 40 | 41 | AllowOverride none 42 | Require all denied 43 | 44 | 45 | 46 | DocumentRoot "/usr/local/apache2/htdocs" 47 | 48 | AddHandler cgi-script .pl .shell 49 | Options +ExecCGI -Indexes +FollowSymLinks 50 | AuthName "Stats" 51 | AuthType Basic 52 | AuthBasicProvider file 53 | AuthUserFile /awstats/auth/htpasswd 54 | Require valid-user 55 | 56 | 57 | 58 | DirectoryIndex index.shell 59 | 60 | 61 | 62 | Require all denied 63 | 64 | 65 | ErrorLog /proc/self/fd/2 66 | 67 | LogLevel warn 68 | 69 | 70 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined 71 | LogFormat "%h %l %u %t \"%r\" %>s %b" common 72 | 73 | 74 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio 75 | 76 | 77 | CustomLog /proc/self/fd/1 common 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | RequestHeader unset Proxy early 86 | 87 | 88 | 89 | TypesConfig conf/mime.types 90 | AddType application/x-compress .Z 91 | AddType application/x-gzip .gz .tgz 92 | 93 | 94 | 95 | Include conf/extra/proxy-html.conf 96 | 97 | -------------------------------------------------------------------------------- /dockerfiles/AWStatsCGI/index.shell: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Content-type: text/html" 3 | echo "" 4 | echo "
    " 5 | cd /awstats/config 6 | for f in *; do 7 | site=$(echo $f | sed -e "s/awstats\.//" -e "s/\.conf$//") 8 | echo "
  • $site
  • " 9 | done 10 | echo "
" 11 | -------------------------------------------------------------------------------- /dockerfiles/AWStatsGenerator/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | RUN apt-get update -y && \ 4 | apt-get install -y awstats python3 \ 5 | libnet-ip-perl libnet-dns-perl xz-utils 6 | 7 | RUN rm -f /etc/awstats/awstats.conf 8 | RUN sed -i "s/\/etc\/opt\/awstats/\/awstats\/config/g" /usr/lib/cgi-bin/awstats.pl 9 | 10 | COPY generate.py / 11 | RUN chmod 700 /generate.py 12 | 13 | ENTRYPOINT ["/generate.py"] 14 | -------------------------------------------------------------------------------- /dockerfiles/AWStatsGenerator/generate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from os import environ, makedirs, path 4 | from subprocess import call 5 | import crypt 6 | import random 7 | 8 | template = """ 9 | LogFile="/usr/share/awstats/tools/logresolvemerge.pl {}/{}.log* |" 10 | LogType=W 11 | LogFormat=1 12 | LogSeparator=" " 13 | SiteDomain="{}" 14 | HostAliases="www.{} {}" 15 | DNSLookup=1 16 | DirData="{}" 17 | DirCgi="/cgi-bin" 18 | DirIcons="/stats/icon" 19 | AllowToUpdateStatsFromBrowser=0 20 | AllowFullYearView=2 21 | EnableLockForUpdate=0 22 | DNSStaticCacheFile="dnscache.txt" 23 | DNSLastUpdateCacheFile="dnscachelastupdate.txt" 24 | SkipDNSLookupFor="" 25 | AllowAccessFromWebToAuthenticatedUsersOnly=0 26 | AllowAccessFromWebToFollowingAuthenticatedUsers="" 27 | AllowAccessFromWebToFollowingIPAddresses="" 28 | CreateDirDataIfNotExists=0 29 | BuildHistoryFormat=text 30 | BuildReportFormat=html 31 | SaveDatabaseFilesWithPermissionsForEveryone=0 32 | PurgeLogFile=0 33 | ArchiveLogRecords=0 34 | KeepBackupOfHistoricFiles=0 35 | DefaultFile="index.php index.html" 36 | SkipHosts="" 37 | SkipUserAgents="" 38 | SkipFiles="" 39 | SkipReferrersBlackList="" 40 | OnlyHosts="" 41 | OnlyUserAgents="" 42 | OnlyUsers="" 43 | OnlyFiles="" 44 | NotPageList="css js class gif jpg jpeg png bmp ico rss xml swf" 45 | ValidHTTPCodes="200 304" 46 | ValidSMTPCodes="1 250" 47 | AuthenticatedUsersNotCaseSensitive=0 48 | URLNotCaseSensitive=0 49 | URLWithAnchor=0 50 | URLQuerySeparators="?;" 51 | URLWithQuery=0 52 | URLWithQueryWithOnlyFollowingParameters="" 53 | URLWithQueryWithoutFollowingParameters="" 54 | URLReferrerWithQuery=0 55 | WarningMessages=1 56 | ErrorMessages="" 57 | DebugMessages=0 58 | NbOfLinesForCorruptedLog=50 59 | WrapperScript="" 60 | DecodeUA=0 61 | MiscTrackerUrl="/js/awstats_misc_tracker.js" 62 | UseFramesWhenCGI=1 63 | DetailedReportsOnNewWindows=1 64 | Expires=0 65 | MaxRowsInHTMLOutput=1000 66 | Lang="auto" 67 | DirLang="./lang" 68 | ShowMenu=1 69 | ShowSummary=UVPHB 70 | ShowMonthStats=UVPHB 71 | ShowDaysOfMonthStats=VPHB 72 | ShowDaysOfWeekStats=PHB 73 | ShowHoursStats=PHB 74 | ShowDomainsStats=PHB 75 | ShowHostsStats=PHBL 76 | ShowAuthenticatedUsers=0 77 | ShowRobotsStats=HBL 78 | ShowWormsStats=0 79 | ShowEMailSenders=0 80 | ShowEMailReceivers=0 81 | ShowSessionsStats=1 82 | ShowPagesStats=PBEX 83 | ShowFileTypesStats=HB 84 | ShowFileSizesStats=0 85 | ShowDownloadsStats=HB 86 | ShowOSStats=1 87 | ShowBrowsersStats=1 88 | ShowScreenSizeStats=0 89 | ShowOriginStats=PH 90 | ShowKeyphrasesStats=1 91 | ShowKeywordsStats=1 92 | ShowMiscStats=a 93 | ShowHTTPErrorsStats=1 94 | ShowSMTPErrorsStats=0 95 | ShowClusterStats=0 96 | AddDataArrayMonthStats=1 97 | AddDataArrayShowDaysOfMonthStats=1 98 | AddDataArrayShowDaysOfWeekStats=1 99 | AddDataArrayShowHoursStats=1 100 | IncludeInternalLinksInOriginSection=0 101 | MaxNbOfDomain = 10 102 | MinHitDomain = 1 103 | MaxNbOfHostsShown = 10 104 | MinHitHost = 1 105 | MaxNbOfLoginShown = 10 106 | MinHitLogin = 1 107 | MaxNbOfRobotShown = 10 108 | MinHitRobot = 1 109 | MaxNbOfDownloadsShown = 10 110 | MinHitDownloads = 1 111 | MaxNbOfPageShown = 10 112 | MinHitFile = 1 113 | MaxNbOfOsShown = 10 114 | MinHitOs = 1 115 | MaxNbOfBrowsersShown = 10 116 | MinHitBrowser = 1 117 | MaxNbOfScreenSizesShown = 5 118 | MinHitScreenSize = 1 119 | MaxNbOfWindowSizesShown = 5 120 | MinHitWindowSize = 1 121 | MaxNbOfRefererShown = 10 122 | MinHitRefer = 1 123 | MaxNbOfKeyphrasesShown = 10 124 | MinHitKeyphrase = 1 125 | MaxNbOfKeywordsShown = 10 126 | MinHitKeyword = 1 127 | MaxNbOfEMailsShown = 20 128 | MinHitEMail = 1 129 | FirstDayOfWeek=1 130 | ShowFlagLinks="" 131 | ShowLinksOnUrl=1 132 | UseHTTPSLinkForUrl="" 133 | MaxLengthOfShownURL=64 134 | HTMLHeadSection="" 135 | HTMLEndSection="" 136 | MetaRobot=0 137 | Logo="awstats_logo6.png" 138 | LogoLink="http://www.awstats.org" 139 | BarWidth = 260 140 | BarHeight = 90 141 | StyleSheet="" 142 | ExtraTrackedRowsLimit=500 143 | LoadPlugin="ipv6" 144 | """ 145 | 146 | 147 | # Taken from https://gist.github.com/eculver/1420227 148 | def salt(): 149 | """Returns a string of 2 random letters""" 150 | letters = 'abcdefghijklmnopqrstuvwxyz' \ 151 | 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' \ 152 | '0123456789/.' 153 | return random.choice(letters) + random.choice(letters) 154 | 155 | 156 | def domain_list(): 157 | lst = [] 158 | """convert DOMAINS env variable to a list of names""" 159 | for d in environ['DOMAINS'].split(','): 160 | lst.append(d.strip()) 161 | return lst 162 | 163 | 164 | if __name__ == '__main__': 165 | 166 | awdata = '/awstats/data' 167 | awauth = '/awstats/auth' 168 | awconfig = '/awstats/config' 169 | logs = '/weblogs' 170 | domains = domain_list() 171 | 172 | for d in [awdata, awauth, awconfig]: 173 | if not path.exists(d): 174 | print('Creating {}'.format(d)) 175 | makedirs(d) 176 | 177 | with open(path.join(awauth, 'htpasswd'), 'w') as htpasswd: 178 | print('Setting username and password') 179 | htpasswd.write("{}:{}\n".format(environ['USERNAME'], 180 | crypt.crypt(environ['PASSWORD'], 181 | salt()))) 182 | 183 | for d in domains: 184 | 185 | # # remove port if present 186 | # if '/' in d: 187 | # d = d.split('/')[0] 188 | 189 | if path.exists(path.join(logs, '{}.log'.format(d))): 190 | conf = template.format(logs, d, d, d, d, awdata) 191 | cfile = path.join(awconfig, 'awstats.{}.conf'.format(d)) 192 | with open(cfile, 'w') as cf: 193 | print('Configuring {}'.format(d)) 194 | cf.write(conf) 195 | print('Generating Stats for {}'.format(d)) 196 | call(['/usr/bin/awstats', '-config={}'.format(d)]) 197 | -------------------------------------------------------------------------------- /dockerfiles/CertBot/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM certbot/certbot:latest 2 | 3 | ENV DOMAINS example.com example.net 4 | ENV EMAIL noreply@example.com 5 | ENV TEST false 6 | ENV PORT 8080 7 | # 1 Week = 10080 min 8 | ENV RENEW_INTERVAL 10080 9 | ENV HAPROXY_CONTAINER bee2-app-haproxy 10 | 11 | EXPOSE 8080 12 | 13 | RUN apk update 14 | RUN apk add python3 15 | RUN apk add netcat-openbsd 16 | RUN pip3 install check_docker 17 | 18 | COPY certbot-domains.py /opt 19 | RUN chmod 700 /opt/certbot-domains.py 20 | 21 | COPY reload-haproxy.sh /opt 22 | RUN chmod 700 /opt/reload-haproxy.sh 23 | 24 | ENTRYPOINT /opt/certbot-domains.py 25 | -------------------------------------------------------------------------------- /dockerfiles/CertBot/certbot-domains.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from os import path, environ, unlink 4 | from time import sleep 5 | from subprocess import call 6 | from sys import stderr 7 | 8 | CERT_BASE = '/etc/letsencrypt/live' 9 | 10 | def create_pem(domain): 11 | combined = path.join(CERT_BASE, '{}-combined.pem'.format(domain)) 12 | with open(combined, 'w') as f: 13 | files = [path.join(CERT_BASE, domain, 'fullchain.pem'), 14 | path.join(CERT_BASE, domain, 'privkey.pem')] 15 | for file in files: 16 | with open(file) as infile: 17 | f.write(infile.read()) 18 | print('Created {}'.format(combined)) 19 | 20 | def convert_domains(): 21 | domains = [] 22 | """convert bee2-app-nginx-static:example.com,example.net bee2-app-php:example.org 23 | to just the names""" 24 | for d in environ['DOMAINS'].split(' '): 25 | domains = domains + (d.split(':')[1].split(',')) 26 | return domains 27 | 28 | if __name__ == '__main__': 29 | 30 | domains = convert_domains() 31 | email = environ['EMAIL'] 32 | dryrun = environ['TEST'].lower() == 'true' 33 | port = environ['PORT'] 34 | renew = int(environ['RENEW_INTERVAL']) 35 | haproxy_container = environ['HAPROXY_CONTAINER'] 36 | 37 | while call(['check_docker', '--containers', haproxy_container, '--status', 'running']) != 0: 38 | print('Waiting on HAProxy to become active') 39 | sleep(2) 40 | 41 | while True: 42 | for d in domains: 43 | if '/' in d: 44 | domain = d.split('/')[0] 45 | else: 46 | domain = d 47 | print('Processing {}'.format(domain)) 48 | cmd = ['/usr/local/bin/certbot', 'certonly', '--standalone', 49 | '--preferred-challenges', 'http', 50 | '--http-01-port', port, '--agree-tos', '--renew-by-default', 51 | '--non-interactive', '--email', email, '-d', domain, '-d', 52 | 'www.{}'.format(domain)] 53 | if dryrun: 54 | cmd.append('--test-cert') 55 | if call(cmd) == 0: 56 | create_pem(domain) 57 | print('Sending reload to HAProxy Docker Container') 58 | call(['/opt/reload-haproxy.sh']) 59 | else: 60 | stderr.write('Error running certbot. Skipping {}\n'.format(domain)) 61 | 62 | print('Removing README from live directory') 63 | unlink(path.join(CERT_BASE, 'README')) 64 | 65 | print('Sleeping {} minutes...'.format(renew)) 66 | sleep(renew * 60) 67 | -------------------------------------------------------------------------------- /dockerfiles/CertBot/reload-haproxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo -e "POST /containers/$HAPROXY_CONTAINER/kill?signal=HUP HTTP/1.0\r\n" | \ 4 | nc -U /var/run/docker.sock 5 | -------------------------------------------------------------------------------- /dockerfiles/DBSetup/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8 2 | 3 | WORKDIR /usr/src/app 4 | 5 | COPY requirements.txt ./ 6 | RUN pip install --no-cache-dir -r requirements.txt 7 | 8 | COPY dbsetup.py . 9 | RUN chmod 700 dbsetup.py 10 | CMD [ "python", "./dbsetup.py" ] 11 | -------------------------------------------------------------------------------- /dockerfiles/DBSetup/dbsetup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from os import environ as env 3 | import mysql.connector 4 | import psycopg2 5 | from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT 6 | import json 7 | 8 | # Format: 9 | # { "admin": 10 | # {"mysql": "***", "postgres": "***", "redis": "***"}, 11 | # "containers": 12 | # [ 13 | # {"container": "somename", "db": "postgres", "password": "****"}, 14 | # {"container": "somename", "db": "mysql", "password": "****"} 15 | # ] 16 | # } 17 | db_list = json.loads(env['DATABASE_JSON']) 18 | 19 | def normalize(name): 20 | normalized = name.replace('-', '_') 21 | if name != normalized: 22 | print('{} normalized to {}'.format(name, normalized)) 23 | return normalized 24 | 25 | # db_type: postgres or mysql 26 | def dbs(db_type): 27 | return [i for i in db_list['containers'] if i['db'] == db_type] 28 | 29 | # Mysql 30 | 31 | if not 'mysql' in db_list['admin']: 32 | print('No containers configured for mysql') 33 | else: 34 | cnx = mysql.connector.connect(user = 'root', 35 | password = db_list['admin']['mysql'], 36 | host = env['MYSQL_HOST'], 37 | database = 'mysql') 38 | cur = cnx.cursor() 39 | 40 | for my in dbs('mysql'): 41 | (app,password) = normalize(my['container']), my['password'] 42 | print('MySQL DB Setup: {}'.format(app)) 43 | cur.execute("CREATE DATABASE IF NOT EXISTS {}".format(app)) 44 | cur.execute("GRANT ALL ON {}.* TO '{}'@'%' IDENTIFIED BY '{}'".format( 45 | app,app,password)) 46 | 47 | cur.close() 48 | cnx.close() 49 | 50 | # Postgres 51 | 52 | if not 'postgres' in db_list['admin']: 53 | print('No containers configured for postgres') 54 | else: 55 | conn = psycopg2.connect(dbname = 'postgres', user = 'postgres', 56 | password = db_list['admin']['postgres'], 57 | host = env['POSTGRES_HOST']) 58 | conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT) 59 | cur = conn.cursor() 60 | 61 | for pg in dbs('postgres'): 62 | (app,password) = normalize(pg['container']), pg['password'] 63 | sql = "SELECT COUNT(*) = 0 FROM pg_catalog.pg_database WHERE datname = '{}'" 64 | cur.execute(sql.format(app)) 65 | not_exists_row = cur.fetchone() 66 | not_exists = not_exists_row[0] 67 | if not_exists: 68 | print('Postgres DB Setup: {}'.format(app)) 69 | cur.execute('CREATE DATABASE {}'.format(app)) 70 | sql = "CREATE ROLE {} LOGIN PASSWORD '{}'".format(app, password) 71 | cur.execute(sql) 72 | sql = 'GRANT ALL ON DATABASE {} to {}'.format(app, app) 73 | cur.execute(sql) 74 | else: 75 | print('Postgres DB {} Exists'.format(app)) 76 | -------------------------------------------------------------------------------- /dockerfiles/DBSetup/requirements.txt: -------------------------------------------------------------------------------- 1 | mysql-connector-python==8.0.20 2 | psycopg2-binary==2.8.5 3 | -------------------------------------------------------------------------------- /dockerfiles/DuplicityBackup/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:20.04 2 | 3 | ENV BB_VOL_BUCKET volume-backups 4 | ENV BB_SQL_BUCKET sql-backups 5 | ENV BB_APP_ID insert_bb_app_id 6 | ENV BB_APP_KEY insert_bb_app_key 7 | 8 | ENV VOLUME_ENABLED enabled 9 | ENV DATABASE_ENABLED enabled 10 | 11 | ENV BACKUP_VOL_DIR /backup 12 | ENV BACKUP_SQL_DIR /sql 13 | ENV DATABASE_JSON "{}" 14 | 15 | RUN apt-get update && \ 16 | apt-get update && \ 17 | apt-get install -y duplicity mysql-client postgresql-client python3-pip && \ 18 | pip3 install b2 19 | 20 | COPY backup.sh / 21 | COPY db_backup.py / 22 | RUN chmod 700 /backup.sh /db_backup.py 23 | 24 | CMD [ "/backup.sh" ] 25 | -------------------------------------------------------------------------------- /dockerfiles/DuplicityBackup/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | mkdir -p $BACKUP_SQL_DIR 4 | mkdir -p $BACKUP_VOL_DIR 5 | 6 | 7 | if [ "$VOLUME_ENABLED" = "enabled" ]; then 8 | echo "Backing up Docker Volumes in /backup" 9 | duplicity --no-encryption --allow-source-mismatch --full-if-older-than 1M /backup b2://$BB_APP_ID:$BB_APP_KEY@$BB_VOL_BUCKET 10 | echo "Removing backups older than 2 months" 11 | duplicity remove-older-than 2M --force b2://$BB_APP_ID:$BB_APP_KEY@$BB_VOL_BUCKET 12 | else 13 | echo "Volume backups disabled" 14 | fi 15 | 16 | if [ "$DATABASE_ENABLED" = "enabled" ]; then 17 | echo "Backup up SQL Files" 18 | b2 authorize-account $BB_APP_ID $BB_APP_KEY 19 | /db_backup.py 20 | else 21 | echo "Database backups disabled" 22 | fi 23 | -------------------------------------------------------------------------------- /dockerfiles/DuplicityBackup/db_backup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from os import environ as env, unlink, system 3 | import sys 4 | from os.path import join, basename 5 | import json 6 | from datetime import datetime 7 | import subprocess 8 | 9 | # Format: 10 | # { "admin": 11 | # {"mysql": "***", "postgres": "***", "redis": "***"}, 12 | # "containers": 13 | # [ 14 | # {"container": "somename", "db": "postgres", "password": "****"}, 15 | # {"container": "somename", "db": "mysql", "password": "****"} 16 | # ] 17 | # } 18 | db_list = json.loads(env['DATABASE_JSON']) 19 | 20 | def normalize(name): 21 | normalized = name.replace('-', '_') 22 | if name != normalized: 23 | print('{} normalized to {}'.format(name, normalized)) 24 | return normalized 25 | 26 | def run(cmd): 27 | p = subprocess.Popen(cmd, shell=True, stdout=sys.stdout, stderr=sys.stderr) 28 | p.wait() 29 | 30 | # db_type: postgres or mysql 31 | def dbs(db_type): 32 | return [i for i in db_list['containers'] if i['db'] == db_type] 33 | 34 | # Multiple containers that share the same database 35 | # Taken from http://stackoverflow.com/questions/9427163/ddg#9427216 36 | def without_dups(db_type): 37 | return [dict(t) for t in {tuple(d.items()) for d in dbs(db_type)}] 38 | 39 | def timestamp_sql_file(db, db_type): 40 | sqlfile = '{}-{}.{}.sql'.format(db, datetime.utcnow().strftime('%Y-%m-%d-%H:%m:%S'), db_type) 41 | path = env['BACKUP_SQL_DIR'] 42 | return join(path, sqlfile) 43 | 44 | def b2_upload(sqlfile): 45 | cmd = 'b2 upload-file {} {} {}'.format(env['BB_SQL_BUCKET'], sqlfile, basename(sqlfile)) 46 | run(cmd) 47 | unlink(sqlfile) 48 | 49 | # Mysql 50 | 51 | if not 'mysql' in db_list['admin']: 52 | print('No containers configured for mysql') 53 | else: 54 | for my in without_dups('mysql'): 55 | (app,password) = normalize(my['container']), my['password'] 56 | mysql_dump_file = timestamp_sql_file(app, 'my') 57 | print('Dumping {} to {}'.format(app, mysql_dump_file)) 58 | cmd = 'mysqldump -h {} -P 3306 -u root --password={} --result-file={} {}'.format( 59 | env['MYSQL_HOST'], db_list['admin']['mysql'], mysql_dump_file, app) 60 | run(cmd) 61 | b2_upload(mysql_dump_file) 62 | 63 | # Postgres 64 | 65 | if not 'postgres' in db_list['admin']: 66 | print('No containers configured for postgres') 67 | else: 68 | for pg in without_dups('postgres'): 69 | (app,password) = normalize(pg['container']), pg['password'] 70 | pg_dump_file = timestamp_sql_file(app, 'pg') 71 | 72 | print('Dumping {} to {}'.format(app, pg_dump_file)) 73 | cmd = 'env PGPASSWORD="{}" pg_dump -Fc -d {} -h {} -f {} -U postgres'.format( 74 | db_list['admin']['postgres'], app, env['POSTGRES_HOST'], pg_dump_file 75 | ) 76 | run(cmd) 77 | b2_upload(pg_dump_file) 78 | -------------------------------------------------------------------------------- /dockerfiles/HAProxySetup/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | 3 | ENV CERTBOT_CONTAINER bee2-app-certbot 4 | ENV AWSTATS_CONTAINER disabled 5 | ENV DOMAINS example.com 6 | 7 | VOLUME ["/etc/haproxy", "/etc/letsencrypt"] 8 | 9 | WORKDIR /usr/src/app 10 | 11 | COPY dummy.pem . 12 | COPY haproxy-config.py . 13 | RUN chmod 700 haproxy-config.py 14 | CMD [ "python", "./haproxy-config.py" ] 15 | -------------------------------------------------------------------------------- /dockerfiles/HAProxySetup/dummy.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDXTCCAkWgAwIBAgIJAPuTl12XrU78MA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV 3 | BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX 4 | aWRnaXRzIFB0eSBMdGQwHhcNMTcxMTAxMTk1NTE0WhcNMTcxMjAxMTk1NTE0WjBF 5 | MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50 6 | ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB 7 | CgKCAQEAufi0w6cvHY1izihDafe3PXNCKusLFb3AmUMiRe2XfM/FJDt3RLBeCqlX 8 | zed8pzyPb2f+r8gT3XtUxyE5bE1H2Vs0Mu9RPpSONQCCeiK9NDdkXN+VgCLKsfOA 9 | xqW9UeYPA4/x32cQJvqKw3EmZ53hujea9vn4J/KpPemJpBSjTMdsHFSuVTl5JhRJ 10 | 82N53jeAWCzrnODujVWsCT/c/np37UGOUFYcM3APihqRIc/KcUK/BTX/eK5SmjQS 11 | UM3R3a/mTpBee2mzB0OOXUEde4qJsCAq6ZkH/Wf8qIcxEogYrXpF/fdL/unUwLzC 12 | dYBftxwNal7iiPZ54yvoBDLkI2bhvQIDAQABo1AwTjAdBgNVHQ4EFgQUZV6mxXR2 13 | zG/CG3Bpmh6jqik0OxwwHwYDVR0jBBgwFoAUZV6mxXR2zG/CG3Bpmh6jqik0Oxww 14 | DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAj1tt6M2crPrMw0Db+SVC 15 | hndyyRvOna3k/n0m+450D334rikmH8m/Zuz8r9RsmdCa9RBAVuHdTo3wbI9xnw/U 16 | /XnNPB6IZZtwHHGE4afe0OLD2Cd3I6UoWbZ0NcWswrN56+1nKuxcMpJR4jT/TPSm 17 | 2s7T4MFWj2aV4cM62DdAphRPVZJVdBXRNv5xOxdLXB827pSMsgy02KR2zPQ8O/Yg 18 | AQTh97pu7ysgMrKbcz95toxiZNy2CN46gpa8v3S2sP+gCR2yxaF2/ZGbHcHT5mSC 19 | zSSZkvlsf2fxAQrlifVVSE+z3hLSdnFP5nVK/02KuPrjTTKSjm3AErPeKw1pfpF8 20 | Tg== 21 | -----END CERTIFICATE----- 22 | -----BEGIN PRIVATE KEY----- 23 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC5+LTDpy8djWLO 24 | KENp97c9c0Iq6wsVvcCZQyJF7Zd8z8UkO3dEsF4KqVfN53ynPI9vZ/6vyBPde1TH 25 | ITlsTUfZWzQy71E+lI41AIJ6Ir00N2Rc35WAIsqx84DGpb1R5g8Dj/HfZxAm+orD 26 | cSZnneG6N5r2+fgn8qk96YmkFKNMx2wcVK5VOXkmFEnzY3neN4BYLOuc4O6NVawJ 27 | P9z+enftQY5QVhwzcA+KGpEhz8pxQr8FNf94rlKaNBJQzdHdr+ZOkF57abMHQ45d 28 | QR17iomwICrpmQf9Z/yohzESiBitekX990v+6dTAvMJ1gF+3HA1qXuKI9nnjK+gE 29 | MuQjZuG9AgMBAAECggEBAKOOjjw8vv04qZsyYgfCaw/KXHkuenNGjSq9llj12nNU 30 | r8dr3Ts1+Z9ay/Q1ELZsVqHeTmn+hyoVG7x+BeiwFu6cfc4GF+f38xG+PwJ3pvtT 31 | dBsSMIOfFbFnvOv4PAbZVH8Hi+u00snVM1EsJGCgxDNoUAvCUHOhwSLIT9tUo0IT 32 | 5tqgHATYdKWmch9maMXMYCrVy4vE8Z2p7vv1MwKwURLtW1e6Hs2bWm6ZuDpud5lY 33 | kZ//rZlPsrK9/T7sdGugFpNsQAm0cQ//AoRwUQXDXOfwRdRehDpMxeZi8ThkWrii 34 | TkpXO8m3OgoR4FH7RTzMWehB0VwdcQfve8ncT8EGDGkCgYEA9qw2vk4qflkCqw2b 35 | r9xAKAg5VZx+Zi2cjjCkFhE5Zqo2MQ3eSKMzBG0B9vF0RfColn2OHJJUTFyPoYt1 36 | 9lrZFWAejmu+YDbWYgnDp3+xki5cNGMY4mWFwO1IzqPiK8PUmZJhv8DIFvUCd3+9 37 | lRE2PvzlNGYF4hqstYfRJXKYzMMCgYEAwQDn8ZY012bWjLAkcDVOFDo3A+wKY59D 38 | 8lHcbGZ73YD73Lnt7szNX+uOBfvYyTIVl7LQChZb0ZbQ5m5xHWVqkRCK+MyJvgM6 39 | acpORtCF9fkl7RIs/8HA/f09qGgjhsn7xcmNC9NPEpQch0tOaN0EyYfHeHlytNej 40 | 4F/Cn/ngr38CgYAbbG3xnlMPFOo9BhhG5t+jE7b8E5fh2DRNgo7bfTK6fTXlIjuS 41 | K8KiPVQ4lgNtOyIjCPZhqssAalH1jcGhj9wOik8bWlMh8zbGU3uTBZlCPTE3CcVc 42 | dhj2p+quPn4Ii475O9LuNIqKMtcBXgw/pFg5jHgW6TjCOGD3FZBk72aZbQKBgCT0 43 | zHT5AxM/0NNNoxyNSG8dGo24s+CRjYwn2qIzGYK3gQKp44J3WtgEHcTNjZww5+uN 44 | RMK5t0kxrLyC50vusHIojcPIPpPQwG2jXEu4//2ygG2drlomupKn3BeUIUFdCcW8 45 | qzJZeumezLunUtNHhvQj+LD76FfydqGvFHBXg4t3AoGBAKNbbsybAp+v/6ktbaO/ 46 | EKrDHFjEekaGwvjPPLu3clCfDZrZiHe7cfozWgVuaejFG8pC7McgpPxZwqB86Sr6 47 | Pm1zwiTu699XZhO2iSKnt3NKG1RH20d0V8LNuC0/CEqZZL4MDhWFkxUPA8ZEyLTM 48 | A2HyEcnO0//+l3TnZf1eq2QQ 49 | -----END PRIVATE KEY----- 50 | -------------------------------------------------------------------------------- /dockerfiles/HAProxySetup/haproxy-config.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from os import environ, makedirs, path 4 | from shutil import copyfile 5 | import socket 6 | import sys 7 | 8 | template = """ 9 | global 10 | #log /dev/stdout local0 11 | #log /dev/stdout local1 notice 12 | #stats socket /run/haproxy/admin.sock mode 660 level admin 13 | stats timeout 30s 14 | ssl-default-bind-options no-sslv3 no-tls-tickets force-tlsv12 15 | ssl-default-bind-ciphers AES128+EECDH:AES128+EDH 16 | tune.ssl.default-dh-param 2048 17 | defaults 18 | log global 19 | option dontlognull 20 | timeout connect 5000 21 | timeout client 50000 22 | timeout server 50000 23 | timeout tunnel 3600s 24 | #errorfile 400 /usr/local/etc/haproxy/errors/400.http 25 | #errorfile 403 /usr/local/etc/haproxy/errors/403.http 26 | #errorfile 408 /usr/local/etc/haproxy/errors/408.http 27 | #errorfile 500 /usr/local/etc/haproxy/errors/500.http 28 | #errorfile 502 /usr/local/etc/haproxy/errors/502.http 29 | #errorfile 503 /usr/local/etc/haproxy/errors/503.http 30 | #errorfile 504 /usr/local/etc/haproxy/errors/504.http 31 | #maxconn 4096 32 | mode http 33 | option http-keep-alive 34 | 35 | frontend http 36 | bind :::80 v4v6 37 | mode http 38 | 39 | # Let's Encrypt 40 | acl is_letsencrypt path_beg -i /.well-known/acme-challenge/ 41 | 42 | # SSL Redirect 43 | redirect scheme https if !{{ ssl_fc }} !is_letsencrypt 44 | 45 | use_backend bk_letsencrypt if is_letsencrypt 46 | 47 | frontend https 48 | # TLS/SNI 49 | bind :::443 v4v6 ssl crt /etc/letsencrypt/live 50 | mode http 51 | 52 | http-request redirect prefix http://%[hdr(host),regsub(^www\.,,i)] code 301 if {{ hdr_beg(host) -i www. }} 53 | 54 | http-request set-header X-Forwarded-For %[src] 55 | http-request set-header X-Forwarded-Port %[dst_port] 56 | http-request add-header X-Forwarded-Proto https if {{ ssl_fc }} 57 | 58 | {} 59 | 60 | {} 61 | default_backend bk_ssl_default 62 | 63 | {} 64 | 65 | 66 | backend bk_letsencrypt 67 | server certbot {}:8080 init-addr libc,none 68 | {} 69 | backend bk_ssl_default 70 | server default_ssl 127.0.0.1:8080 71 | """ 72 | 73 | 74 | def awstats_config(): 75 | if environ['AWSTATS_CONTAINER'] != 'disabled': 76 | return """ 77 | # Awstats 78 | acl is_awstats path_beg -i /stats/ 79 | use_backend bk_awstats if is_awstats 80 | """ 81 | else: 82 | return "" 83 | 84 | def awstats_backend(): 85 | if environ['AWSTATS_CONTAINER'] != 'disabled': 86 | return """ 87 | backend bk_awstats 88 | server awstats {}:8080 init-addr libc,none check 89 | """.format(environ['AWSTATS_CONTAINER']) 90 | else: 91 | return "" 92 | 93 | def ssl_vhosts(domain_map): 94 | vhosts = '' 95 | for link,domains in domain_map.items(): 96 | for domain in domains: 97 | if '/' in domain: 98 | domain = domain.split('/')[0] 99 | dsh = domain.replace('.', '_') 100 | vhosts += """ 101 | acl {} ssl_fc_sni -i {} 102 | use_backend bk_{} if {} 103 | """.format(dsh, domain, link, dsh) 104 | return vhosts 105 | 106 | def ssl_backends(domain_map): 107 | backends = '' 108 | for link,domains in domain_map.items(): 109 | port = 8080 110 | if '/' in domains[0]: 111 | port = domains[0].split('/')[1] 112 | backends += """ 113 | backend bk_{} 114 | server srv_{} {}:{} init-addr libc,none check 115 | """.format(link, link, link, port) 116 | return backends 117 | 118 | def map_domains(): 119 | domain_map = {} 120 | """convert bee2-app-nginx-static:dyject.com,samathia.com bee2-app-php:example.org 121 | to just the names""" 122 | for d in environ['DOMAINS'].split(' '): 123 | parts = d.split(':') 124 | domain_map[parts[0]] = parts[1].split(',') 125 | return domain_map 126 | 127 | if __name__ == '__main__': 128 | certbot_container = environ['CERTBOT_CONTAINER'] 129 | domian_map = map_domains() 130 | config = template.format(awstats_config(), 131 | ssl_vhosts(domian_map), 132 | ssl_backends(domian_map), 133 | certbot_container, 134 | awstats_backend()) 135 | 136 | live_crt = '/etc/letsencrypt/live' 137 | if not path.exists(live_crt): 138 | print('Creating Letsencrypt Live Directory') 139 | makedirs(live_crt) 140 | 141 | copyfile('dummy.pem', path.join(live_crt, 'dummy.pem')) 142 | print('Writing HAProxy Configuration') 143 | with open('/etc/haproxy/haproxy.cfg', 'w') as fd: 144 | fd.write(config) 145 | 146 | # reload HAProxy 147 | print('Reloading HAProxy') 148 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 149 | sock.connect('/var/run/docker.sock') 150 | sock.sendall(str.encode('POST /containers/{}/kill?signal=HUP HTTP/1.0\r\n\n'.format(environ['HAPROXY_CONTAINER']))) 151 | 152 | print('Done') 153 | -------------------------------------------------------------------------------- /dockerfiles/HTTPRedirects/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | 3 | WORKDIR /usr/src/app 4 | 5 | #COPY requirements.txt ./ 6 | #RUN pip install --no-cache-dir -r requirements.txt 7 | 8 | COPY redirect-server.py . 9 | RUN chmod 700 redirect-server.py 10 | CMD [ "python", "./redirect-server.py" ] 11 | -------------------------------------------------------------------------------- /dockerfiles/HTTPRedirects/redirect-server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # HTTP Redirects - Part of bee2 https://github.com/sumdog/bee2 4 | # https://penguindreams.org - Sumit Khanna 5 | # License: GNU GPLv3 6 | 7 | import http.server 8 | import socket 9 | import socketserver 10 | from os import environ as env 11 | 12 | 13 | class HTTPServerV6(http.server.HTTPServer): 14 | address_family = socket.AF_INET6 15 | 16 | 17 | class RedirectHandler(http.server.SimpleHTTPRequestHandler): 18 | 19 | def redirect_to(self, domain): 20 | for r in eval(env['REDIRECTS']): 21 | (d_from, d_to) = r.split(':') 22 | if d_from == domain: 23 | return d_to 24 | return None 25 | 26 | def do_GET(self): 27 | self.redirect() 28 | 29 | def do_POST(self): 30 | self.redirect() 31 | 32 | def do_PUT(self): 33 | self.redirect() 34 | 35 | def do_DELETE(self): 36 | self.redirect() 37 | 38 | def redirect(self): 39 | to = self.redirect_to(self.headers['Host']) 40 | if to is not None: 41 | self.send_response(301) 42 | self.send_header('Location', 'https://{}'.format(to)) 43 | self.end_headers() 44 | else: 45 | self.send_response(404) 46 | self.end_headers() 47 | self.wfile.write('Not Found'.encode('utf-8')) 48 | 49 | if __name__ == '__main__': 50 | 51 | handler = RedirectHandler 52 | httpd = HTTPServerV6(('::', 8080), handler) 53 | httpd.serve_forever() 54 | -------------------------------------------------------------------------------- /dockerfiles/JobScheduler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | MAINTAINER sumit@penguindreams.org 3 | 4 | RUN apk --update add python3 docker dcron 5 | 6 | ADD scheduler.py /scheduler.py 7 | RUN chmod 700 /scheduler.py 8 | 9 | CMD /scheduler.py 10 | -------------------------------------------------------------------------------- /dockerfiles/JobScheduler/scheduler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from os import environ, execv 4 | import subprocess 5 | 6 | if __name__ == '__main__': 7 | 8 | tasks = {} 9 | 10 | for k,v in environ.items(): 11 | if k.startswith('RUN_'): 12 | task_name = k.split('_')[1] 13 | if not task_name in tasks: 14 | tasks[task_name] = {} 15 | tasks[task_name]['container'] = v 16 | elif k.startswith('WHEN_'): 17 | task_name = k.split('_')[1] 18 | if not task_name in tasks: 19 | tasks[task_name] = {} 20 | tasks[task_name]['schedule'] = v 21 | 22 | with open('/etc/crontabs/root', 'w') as ctab: 23 | for name, cmds in tasks.items(): 24 | ctab.write('{}\tdocker start {}\n'.format(cmds['schedule'], cmds['container'])) 25 | 26 | subprocess.call(['crond', '-f']) 27 | -------------------------------------------------------------------------------- /dockerfiles/LogRotate/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | MAINTAINER sumit@penguindreams.org 3 | 4 | ENV NGINX_CONTAINER bee2-app-nginx-static 5 | 6 | VOLUME ["/weblogs"] 7 | 8 | RUN apk --update add logrotate python3 xz 9 | ADD rotate /rotate 10 | RUN chmod 700 /rotate 11 | 12 | CMD ["/rotate"] 13 | -------------------------------------------------------------------------------- /dockerfiles/LogRotate/rotate: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from os import listdir, system, environ 4 | import socket 5 | 6 | template=""" 7 | /weblogs/{log} {{ 8 | compress 9 | compresscmd xz 10 | dateext 11 | maxage 365 12 | rotate 99 13 | size=+1024k 14 | notifempty 15 | missingok 16 | create 17 | }} 18 | """ 19 | 20 | with open('/etc/logrotate.conf','w') as config: 21 | for log in listdir('/weblogs'): 22 | if log.endswith('.log'): 23 | print('Adding logrotate config for {}'.format(log)) 24 | config.write(template.format(log=log)) 25 | config.close() 26 | 27 | print('Running Logrotate') 28 | system('/usr/sbin/logrotate /etc/logrotate.conf') 29 | 30 | # reload HAProxy 31 | print('Reloading Nginx') 32 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 33 | sock.connect('/var/run/docker.sock') 34 | sock.sendall(str.encode('POST /containers/{}/kill?signal=USR1 HTTP/1.0\r\n\n'.format(environ['NGINX_CONTAINER']))) 35 | 36 | print('Done') 37 | -------------------------------------------------------------------------------- /dockerfiles/Mail/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.13.6 2 | 3 | RUN apk update && \ 4 | apk add opensmtpd supervisor certbot dkimproxy clamav-libunrar spamassassin \ 5 | dovecot-pgsql dovecot perl-mail-spamassassin freshclam clamsmtp clamav-daemon opensmtpd-table-passwd && \ 6 | apk add fdm --repository=http://dl-cdn.alpinelinux.org/alpine/edge/testing 7 | 8 | 9 | # SpamPD in alpine repo is out of date 10 | RUN wget https://github.com/mpaperno/spampd/archive/refs/tags/2.61.tar.gz 11 | RUN echo "91e60f10745ea4f9c27b9e57619a1bf246ab9a88ea1b88c4f39f8af607e2dbae 2.61.tar.gz" | sha256sum -c 12 | RUN tar xvfz 2.61.tar.gz 13 | RUN rm 2.61.tar.gz 14 | 15 | COPY spamassassin-local.cf /etc/mail/spamassassin/local.cf 16 | 17 | COPY supervisor.conf /etc/supervisord.conf 18 | COPY crontab /var/spool/cron/crontabs/root 19 | 20 | COPY lineinfile /usr/share/misc/lineinfile 21 | 22 | RUN adduser -h /mail/spool -s /bin/false -D -u 2000 -g 2000 vmail 23 | 24 | VOLUME ["/mail"] 25 | 26 | STOPSIGNAL SIGTERM 27 | 28 | COPY startup / 29 | RUN chmod 755 /startup 30 | 31 | CMD ["/startup"] 32 | -------------------------------------------------------------------------------- /dockerfiles/Mail/crontab: -------------------------------------------------------------------------------- 1 | 1 1 * * 1 /mail/bin/get_certs 2 | 1 */12 * * * /usr/bin/freshclam --foreground --config=/mail/config/freshclam.conf --daemon-notify=/mail/config/clamd.conf 3 | 30 */12 * * * sa-update -v --updatedir /mail/db/spamassassin -------------------------------------------------------------------------------- /dockerfiles/Mail/lineinfile: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Taken from: https://gist.github.com/kokumura/a6d819ddcb4efe54c5541fc15e1d0347 3 | 4 | # Ansible 'lineinfile' like function in Shell Script. 5 | # Works on both Bash and Zsh. 6 | 7 | function lineinfile(){ 8 | if [[ $# != 3 ]];then 9 | local THIS_FUNC_NAME="${funcstack[1]-}${FUNCNAME[0]-}" 10 | echo "$THIS_FUNC_NAME - 3 arguments are expected. given $#. args=[$@]" >&2 11 | echo "usage: $THIS_FUNC_NAME PATTERN LINE FILE" >&2 12 | return 1 13 | fi 14 | local PATTERN="$1" 15 | local LINE="$2" 16 | local FILE="$3" 17 | if grep -E -q "${PATTERN}" "${FILE}" ;then 18 | 19 | ## solution 1: works with GNU sed well, but not works with BSD sed. 20 | # sed -E -i '' "/${PATTERN//\//\\/}/c${LINE}" "${FILE}" 21 | 22 | ## solution 2: works with both (GNU|BSD) sed, but get useless *.bak file generated. 23 | # sed -E -i.bak "/${PATTERN//\//\\/}/c\\"$'\n'"${LINE}" "${FILE}" 24 | 25 | ## solution 3: give up to use sed, using perl instead. 26 | PATTERN="${PATTERN}" LINE="${LINE}" perl -i -nle 'if(/$ENV{"PATTERN"}/){print $ENV{"LINE"}}else{print}' "${FILE}" 27 | 28 | else 29 | echo "$LINE" >> "$FILE" 30 | fi 31 | } 32 | 33 | ###################### 34 | # example 35 | ###################### 36 | 37 | # write some lines to 'test.txt' 38 | #cat < test.txt 39 | #foo = FOO1 # first occurence 40 | #bar = BAR 41 | #foo = FOO2 # second occurence 42 | #EOF 43 | 44 | # usage: lineinfile PATTERN LINE FILE 45 | 46 | # if some lines in FILE matches PATTEN, all of them are replaced with LINE. 47 | #lineinfile '^foo\s*=\s*' "foo = POO # changed!" test.txt 48 | 49 | # if no lines in FILE matches PATTERN, LINE is appended to end of FILE. 50 | #lineinfile '^baz\s*=' "baz = BAZ" test.txt 51 | 52 | #cat test.txt 53 | # now 'test.txt' will contain: 54 | # 55 | # foo = POO # changed! 56 | # bar = BAR 57 | # foo = POO # changed! 58 | # baz = BAZ 59 | -------------------------------------------------------------------------------- /dockerfiles/Mail/spamassassin-local.cf: -------------------------------------------------------------------------------- 1 | rewrite_header Subject ***Spam*** -------------------------------------------------------------------------------- /dockerfiles/Mail/startup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -x /mail/bin/startup ]; then 4 | echo "Running /mail/bin/startup" 5 | /mail/bin/startup 6 | fi 7 | 8 | echo "Setting up spampd" 9 | mkdir -p /mail/db/spampd || true 10 | chown vmail:vmail /mail/db/spampd 11 | 12 | echo "Setting up Dovecot" 13 | source /usr/share/misc/lineinfile 14 | lineinfile '^#?mail_location' "mail_location = maildir:/mail/spool/%n/Mail" /etc/dovecot/conf.d/10-mail.conf 15 | lineinfile "^#?protocols =" "protocols = imap" /etc/dovecot/dovecot.conf 16 | lineinfile "^#log_path =" "log_path = /dev/stderr" /etc/dovecot/conf.d/10-logging.conf 17 | 18 | echo "DKIM Proxy Permissions" 19 | chown dkimproxy:dkimproxy /mail/config/dkim.key /mail/config/dkimproxy_out.conf 20 | 21 | exec /usr/bin/supervisord -n -c /etc/supervisord.conf -------------------------------------------------------------------------------- /dockerfiles/Mail/supervisor.conf: -------------------------------------------------------------------------------- 1 | [unix_http_server] 2 | file=/run/supervisor.sock ; (the path to the socket file) 3 | 4 | [supervisord] 5 | logfile=/mail/log/supervisord.log ; (main log file;default $CWD/supervisord.log) 6 | logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB) 7 | logfile_backups=10 ; (num of main logfile rotation backups;default 10) 8 | loglevel=info ; (log level;default info; others: debug,warn,trace) 9 | pidfile=/tmp/supervisord.pid ; (supervisord pidfile;default supervisord.pid) 10 | nodaemon=false ; (start in foreground if true;default false) 11 | minfds=1024 ; (min. avail startup file descriptors;default 1024) 12 | minprocs=200 ; (min. avail process descriptors;default 200) 13 | user=root ; 14 | 15 | ; the below section must remain in the config file for RPC 16 | ; (supervisorctl/web interface) to work, additional interfaces may be 17 | ; added by defining them in separate rpcinterface: sections 18 | [rpcinterface:supervisor] 19 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 20 | 21 | [supervisorctl] 22 | serverurl=unix:///run/supervisor.sock ; use a unix:// URL for a unix socket 23 | 24 | [program:opensmtpd] 25 | command = /usr/sbin/smtpd -d -f /mail/config/smtpd.conf 26 | autostart=true 27 | autorestart=true 28 | priority=5 29 | stdout_logfile=/mail/log/smtpd.log 30 | stdout_logfile_maxbytes=10MB 31 | stdout_logfile_backups=10 32 | redirect_stderr=true 33 | 34 | 35 | [program:cron] 36 | command = crond -f -d 8 37 | autostart=true 38 | autorestart=true 39 | priority=5 40 | stdout_logfile=/mail/log/cron.log 41 | stdout_logfile_maxbytes=10MB 42 | stdout_logfile_backups=10 43 | redirect_stderr=true 44 | 45 | [program:dovecot] 46 | command = dovecot -F -c /etc/dovecot/dovecot.conf 47 | autostart=true 48 | autorestart=true 49 | priority=5 50 | stdout_logfile=/mail/log/dovecot.log 51 | stdout_logfile_maxbytes=10MB 52 | stdout_logfile_backups=10 53 | redirect_stderr=true 54 | 55 | [program:dkimproxy] 56 | command =/usr/sbin/dkimproxy.out --conf_file=/mail/config/dkimproxy_out.conf --user=dkimproxy 57 | autostart=true 58 | autorestart=true 59 | priority=5 60 | stdout_logfile=/mail/log/dkimproxy.log 61 | stdout_logfile_maxbytes=10MB 62 | stdout_logfile_backups=10 63 | redirect_stderr=true 64 | 65 | [program:clamsmtpd] 66 | command = /usr/sbin/clamsmtpd -f /mail/config/clamsmtpd.conf -d 1 67 | autostart=true 68 | autorestart=true 69 | priority=5 70 | stdout_logfile=/mail/log/clamsmtpd.log 71 | stdout_logfile_maxbytes=10MB 72 | stdout_logfile_backups=10 73 | redirect_stderr=true 74 | 75 | [program:clamd] 76 | command = /usr/sbin/clamd --config-file=/mail/config/clamd.conf --foreground 77 | autostart=true 78 | autorestart=true 79 | priority=5 80 | 81 | [program:spampd] 82 | command = /spampd-2.61/spampd.pl --port=10025 --relayhost=127.0.0.1:10027 --tagall --nodetach --homedir=/mail/db/spampd --logfile /mail/log/spampd.log --saconfig=/mail/db/spamassassin/updates_spamassassin_org.cf -u vmail -g vmail 83 | autostart=true 84 | autorestart=true 85 | priority=5 86 | 87 | [include] 88 | files = /etc/supervisor/conf.d/*.conf 89 | -------------------------------------------------------------------------------- /dockerfiles/MatomoGenerator/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.10.3 2 | 3 | 4 | RUN apk update && \ 5 | apk add python python3 xz 6 | 7 | ARG MATOMO_VER=3.12.0 8 | 9 | RUN wget https://builds.matomo.org/matomo-$MATOMO_VER.tar.gz 10 | RUN tar xvfz matomo-$MATOMO_VER.tar.gz \ 11 | --strip-components=3 \ 12 | -C /usr/local/bin \ 13 | matomo/misc/log-analytics/import_logs.py 14 | 15 | ENV LOG_PATH /weblogs 16 | ENV ROTATE_PATH /weblogs/processed 17 | ENV NGINX_CONTAINER unknown-container 18 | ENV LOG_CONFIG 1:example.org.log,2:example.com.log 19 | 20 | ENV MATOMO_URL http://localhost 21 | ENV MATOMO_TOKEN tokennotset 22 | 23 | COPY process_logs.py /usr/local/bin/process_logs 24 | RUN chmod 755 /usr/local/bin/process_logs 25 | 26 | CMD ["process_logs"] 27 | -------------------------------------------------------------------------------- /dockerfiles/MatomoGenerator/process_logs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import os 3 | import time 4 | import logging 5 | import subprocess 6 | import socket 7 | 8 | 9 | log = logging.getLogger('matomogen') 10 | log.setLevel(logging.DEBUG) 11 | console = logging.StreamHandler() 12 | console.setFormatter(logging.Formatter('%(asctime)s: %(message)s')) 13 | log.addHandler(console) 14 | 15 | 16 | def reload_web_container(container): 17 | """Sends the HUP signal to a container""" 18 | log.info(f'Reloading Container {container}') 19 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 20 | sock.connect('/var/run/docker.sock') 21 | sock.sendall(str.encode('POST /containers/{}/kill?signal=HUP HTTP/1.0\r\n\n'.format(container))) 22 | log.info(f'HUP Signal to {container} sent') 23 | 24 | 25 | if __name__ == '__main__': 26 | 27 | nginx_container = os.environ['NGINX_CONTAINER'] 28 | log_path = os.environ['LOG_PATH'] 29 | rotate_path = os.environ['ROTATE_PATH'] 30 | log_config = os.environ['LOG_CONFIG'] 31 | matomo_token = os.environ['MATOMO_TOKEN'] 32 | matomo_url = os.environ['MATOMO_URL'] 33 | process_time = time.strftime('%Y-%m-%d-%H:%M:%S') 34 | 35 | log.info(f'NGINX Container: {nginx_container}') 36 | log.info(f'Log Path {log_path}') 37 | log.info(f'Processed Path {rotate_path}') 38 | log.info(f'Log Config {log_config}') 39 | log.info(f'Timestamp {process_time}') 40 | 41 | sites = [] 42 | for l in log_config.split(','): 43 | parts = l.split(':') 44 | sites.append({'id': parts[0], 'host': parts[1]}) 45 | 46 | for site in sites: 47 | log_file = os.path.join(log_path, f"{site['host']}.log") 48 | if not os.path.isfile(log_file): 49 | log.error(f"Could not find log {log_file}. Skipping...") 50 | else: 51 | process_logfile = os.path.join(rotate_path, f"{site['host']}.{process_time}.log") 52 | log.info(f'Moving {log_file} to {process_logfile}') 53 | os.rename(log_file, process_logfile) 54 | reload_web_container(nginx_container) 55 | 56 | cmd = ['import_logs.py', f'--token-auth={matomo_token}', 57 | f'--url={matomo_url}', 58 | f"--idsite={site['id']}", 59 | '--enable-bots', 60 | '--enable-http-errors', 61 | '--enable-http-redirects', 62 | ' --enable-reverse-dns', 63 | '--exclude-path=*.json', 64 | #'--dry-run' 65 | process_logfile] 66 | 67 | log.info(f"Importing logs for {site['host']}") 68 | log.debug(f'Command :: {cmd}') 69 | result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) 70 | log.debug(result.stdout) 71 | 72 | if result.returncode != 0: 73 | log.error("Error Processing Logs") 74 | log.error(result.stderr) 75 | else: 76 | log.info(f"Log Import for {site['host']} complete") 77 | 78 | log.info("Marking log as processed") 79 | processed_name = f'{process_logfile}.processed' 80 | os.rename(process_logfile, processed_name) 81 | 82 | log.info("Compressing Log") 83 | cmd = ['xz', processed_name] 84 | result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) 85 | if result.returncode != 0: 86 | log.error("Error Compressing Processed Logs") 87 | log.error(result.stderr) 88 | else: 89 | log.info("Process Logs Compressed") 90 | log.debug(result.stdout) 91 | -------------------------------------------------------------------------------- /dockerfiles/NginxMastodon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | 3 | RUN apk add --no-cache nginx-mod-http-lua 4 | 5 | env MASTODON_WEB_CONTAINER bee2-unknown-host 6 | env MASTODON_STREAMING_CONTAINER bee2-unknown-host 7 | env MAINTENANCE_MODE false 8 | 9 | # Create folder for PID file 10 | RUN mkdir -p /run/nginx 11 | 12 | # Add our nginx conf 13 | COPY ./nginx.conf /etc/nginx/nginx-standard.conf 14 | 15 | # Maintenance files 16 | RUN mkdir -p /www 17 | COPY ./maintenance.html /www/maintenance.html 18 | COPY ./nginx-maintenance.conf /etc/nginx/nginx-maintenance.conf 19 | 20 | VOLUME ["/var/log/nginx"] 21 | 22 | COPY launcher.sh /launcher.sh 23 | RUN chmod 700 /launcher.sh 24 | CMD ["/launcher.sh"] 25 | -------------------------------------------------------------------------------- /dockerfiles/NginxMastodon/launcher.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "$MAINTENANCE_MODE" == "true" ]; then 4 | echo "Maintenance Mode Enabled" 5 | exec nginx -c /etc/nginx/nginx-maintenance.conf 6 | else 7 | echo "Standard Mode Enabled" 8 | exec nginx -c /etc/nginx/nginx-standard.conf 9 | fi 10 | -------------------------------------------------------------------------------- /dockerfiles/NginxMastodon/maintenance.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |

Down for Scheduled Maintenance

4 | 5 | 6 | -------------------------------------------------------------------------------- /dockerfiles/NginxMastodon/nginx-maintenance.conf: -------------------------------------------------------------------------------- 1 | pcre_jit on; 2 | daemon off; 3 | error_log /dev/stdout info; 4 | 5 | events { 6 | worker_connections 1024; 7 | } 8 | 9 | http{ 10 | 11 | access_log /dev/stdout; 12 | 13 | server { 14 | listen 8080; 15 | listen [::]:8080; 16 | 17 | location / { 18 | return 503; 19 | } 20 | # Error pages. 21 | error_page 503 /maintenance.html; 22 | location = /maintenance.html { 23 | root /www; 24 | } 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /dockerfiles/NginxMastodon/nginx.conf: -------------------------------------------------------------------------------- 1 | include /etc/nginx/modules/*.conf; 2 | env MASTODON_WEB_CONTAINER; 3 | env MASTODON_STREAMING_CONTAINER; 4 | 5 | pcre_jit on; 6 | daemon off; 7 | error_log /dev/stdout info; 8 | 9 | events { 10 | worker_connections 1024; 11 | } 12 | 13 | http{ 14 | 15 | map $http_upgrade $connection_upgrade { 16 | default upgrade; 17 | '' close; 18 | } 19 | 20 | log_format main '$http_x_forwarded_for - $remote_user [$time_local] ' 21 | '"$request" $status $body_bytes_sent "$http_referer" ' 22 | '"$http_user_agent"'; 23 | 24 | access_log "/var/log/nginx/$http_host.log" main; 25 | 26 | server { 27 | listen 8080; 28 | listen [::]:8080; 29 | 30 | set_by_lua $mastodon_web_container 'return os.getenv("MASTODON_WEB_CONTAINER")'; 31 | set_by_lua $mastodon_streaming_container 'return os.getenv("MASTODON_STREAMING_CONTAINER")'; 32 | 33 | resolver 127.0.0.11 valid=600s; 34 | 35 | keepalive_timeout 70; 36 | sendfile on; 37 | client_max_body_size 8m; 38 | gzip on; 39 | gzip_disable "msie6"; 40 | gzip_vary on; 41 | gzip_proxied any; 42 | gzip_comp_level 6; 43 | gzip_buffers 16 8k; 44 | gzip_http_version 1.1; 45 | gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; 46 | 47 | add_header Strict-Transport-Security "max-age=31536000"; 48 | 49 | location / { 50 | try_files $uri @proxy; 51 | } 52 | 53 | location ~ ^/(emoji|packs|system/accounts/avatars|system/media_attachments/files) { 54 | add_header Cache-Control "public, max-age=31536000, immutable"; 55 | try_files $uri @proxy; 56 | } 57 | 58 | location /sw.js { 59 | add_header Cache-Control "public, max-age=0"; 60 | try_files $uri @proxy; 61 | } 62 | 63 | location @proxy { 64 | proxy_set_header Host $host; 65 | proxy_set_header X-Real-IP $http_x_forwarded_for; 66 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 67 | proxy_set_header X-Forwarded-Proto https; 68 | proxy_set_header Proxy ""; 69 | proxy_pass_header Server; 70 | 71 | proxy_pass http://$mastodon_web_container:3000; 72 | proxy_buffering off; 73 | proxy_redirect off; 74 | proxy_http_version 1.1; 75 | proxy_set_header Upgrade $http_upgrade; 76 | proxy_set_header Connection $connection_upgrade; 77 | 78 | tcp_nodelay on; 79 | } 80 | 81 | location /api/v1/streaming { 82 | proxy_set_header Host $host; 83 | proxy_set_header X-Real-IP $http_x_forwarded_for; 84 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 85 | proxy_set_header X-Forwarded-Proto https; 86 | proxy_set_header Proxy ""; 87 | 88 | proxy_pass http://$mastodon_streaming_container:4000; 89 | proxy_buffering off; 90 | proxy_redirect off; 91 | proxy_http_version 1.1; 92 | proxy_set_header Upgrade $http_upgrade; 93 | proxy_set_header Connection $connection_upgrade; 94 | tcp_nodelay on; 95 | } 96 | 97 | error_page 500 501 502 503 504 /500.html; 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /dockerfiles/NginxPleroma/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | 3 | RUN apk add --no-cache nginx-mod-http-lua 4 | 5 | env PLEROMA_CONTAINER bee2-unknown-host 6 | 7 | # Create folder for PID file 8 | RUN mkdir -p /run/nginx 9 | 10 | # Add our nginx conf 11 | COPY ./nginx.conf /etc/nginx/nginx.conf 12 | 13 | VOLUME ["/var/log/nginx", "/tmp/pleroma-media-cache"] 14 | 15 | CMD ["nginx"] 16 | -------------------------------------------------------------------------------- /dockerfiles/NginxPleroma/nginx.conf: -------------------------------------------------------------------------------- 1 | include /etc/nginx/modules/*.conf; 2 | pcre_jit on; 3 | daemon off; 4 | error_log /dev/stdout info; 5 | 6 | events { 7 | worker_connections 1024; 8 | } 9 | env PLEROMA_CONTAINER; 10 | 11 | http { 12 | proxy_cache_path /tmp/pleroma-media-cache levels=1:2 keys_zone=pleroma_media_cache:10m max_size=10g 13 | inactive=720m use_temp_path=off; 14 | resolver 127.0.0.11; 15 | server { 16 | listen 8080; 17 | listen [::]:8080; 18 | set_by_lua $pleroma_container 'return os.getenv("PLEROMA_CONTAINER")'; 19 | 20 | gzip_vary on; 21 | gzip_proxied any; 22 | gzip_comp_level 6; 23 | gzip_buffers 16 8k; 24 | gzip_http_version 1.1; 25 | gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript application/activity+json application/atom+xml; 26 | 27 | # the nginx default is 1m, not enough for large media uploads 28 | client_max_body_size 16m; 29 | 30 | location / { 31 | proxy_http_version 1.1; 32 | proxy_set_header Upgrade $http_upgrade; 33 | proxy_set_header Connection "upgrade"; 34 | proxy_set_header Host $http_host; 35 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 36 | 37 | # this is explicitly IPv4 since Pleroma.Web.Endpoint binds on IPv4 only 38 | # and `localhost.` resolves to [::0] on some systems: see issue #930 39 | proxy_pass http://$pleroma_container:4000; 40 | 41 | client_max_body_size 16m; 42 | } 43 | 44 | location ~ ^/(media|proxy) { 45 | proxy_cache pleroma_media_cache; 46 | slice 1m; 47 | proxy_cache_key $host$uri$is_args$args$slice_range; 48 | proxy_set_header Range $slice_range; 49 | proxy_http_version 1.1; 50 | proxy_cache_valid 200 206 301 304 1h; 51 | proxy_cache_lock on; 52 | proxy_ignore_client_abort on; 53 | proxy_buffering on; 54 | chunked_transfer_encoding on; 55 | proxy_pass http://$pleroma_container:4000; 56 | } 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /dockerfiles/NginxStatic/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.15.5 2 | 3 | COPY nginx.conf /etc/nginx/nginx.conf 4 | RUN chown -R nginx:nginx /var/log/nginx 5 | 6 | EXPOSE 8080 7 | 8 | VOLUME ["/var/log/nginx", "/var/www"] 9 | -------------------------------------------------------------------------------- /dockerfiles/NginxStatic/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes 1; 2 | 3 | error_log /var/log/nginx/error.log info; 4 | 5 | events { 6 | worker_connections 1024; 7 | use epoll; 8 | } 9 | 10 | http { 11 | include /etc/nginx/mime.types; 12 | default_type application/octet-stream; 13 | 14 | client_header_timeout 10m; 15 | client_body_timeout 10m; 16 | send_timeout 10m; 17 | 18 | connection_pool_size 256; 19 | client_header_buffer_size 1k; 20 | large_client_header_buffers 4 2k; 21 | request_pool_size 4k; 22 | 23 | gzip on; 24 | gzip_min_length 1100; 25 | gzip_buffers 4 8k; 26 | gzip_types text/plain; 27 | 28 | output_buffers 1 32k; 29 | postpone_output 1460; 30 | 31 | sendfile on; 32 | tcp_nopush on; 33 | tcp_nodelay on; 34 | 35 | keepalive_timeout 75 20; 36 | 37 | ignore_invalid_headers on; 38 | 39 | index index.html; 40 | 41 | log_format main '$http_x_forwarded_for - $remote_user [$time_local] ' 42 | '"$request" $status $body_bytes_sent "$http_referer" ' 43 | '"$http_user_agent"'; 44 | 45 | server { 46 | listen 8080; 47 | listen [::]:8080; 48 | server_name ~^(www\.)(?.*)$; 49 | return 301 https://$domain$request_uri; 50 | } 51 | 52 | server { 53 | listen 8080 default_server; 54 | listen [::]:8080 default_server; 55 | server_name _default_; 56 | root "/www/$http_host"; 57 | port_in_redirect off; 58 | 59 | access_log "/var/log/nginx/$http_host.log" main; 60 | 61 | error_page 404 /404/index.html; 62 | rewrite ^/feed[/]?$ /feed.xml permanent; 63 | 64 | location /related.json { 65 | add_header Access-Control-Allow-Origin *; 66 | } 67 | 68 | if ( $request_uri ~ "/index.htm" ) { 69 | rewrite (.*)/ /$1 permanent; 70 | } 71 | 72 | location ~ /\.ht { 73 | deny all; 74 | } 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /dockerfiles/Radicale/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3 2 | 3 | ARG VERSION=3.1.8 4 | RUN pip install pytz passlib bcrypt radicale==$VERSION 5 | 6 | ENV RADICALE_CONFIG /etc/radicale/config 7 | RUN mkdir -p /etc/radicale 8 | COPY config $RADICALE_CONFIG 9 | 10 | VOLUME /radicale 11 | EXPOSE 8080 12 | 13 | CMD ["radicale"] 14 | -------------------------------------------------------------------------------- /dockerfiles/Radicale/config: -------------------------------------------------------------------------------- 1 | [auth] 2 | type = htpasswd 3 | htpasswd_filename = /radicale/users 4 | htpasswd_encryption = bcrypt 5 | 6 | [server] 7 | hosts = 0.0.0.0:8080, [::]:8080 8 | 9 | [storage] 10 | filesystem_folder = /radicale/collections 11 | 12 | [logging] 13 | level = info 14 | -------------------------------------------------------------------------------- /dockerfiles/SimpleID/000-default.conf: -------------------------------------------------------------------------------- 1 | 2 | 3 | DocumentRoot /var/www/html 4 | ErrorLog ${APACHE_LOG_DIR}/error.log 5 | CustomLog ${APACHE_LOG_DIR}/access.log combined 6 | 7 | 8 | Options -Indexes +FollowSymLinks 9 | AllowOverride All 10 | Require all granted 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /dockerfiles/SimpleID/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:7.2.2-apache-stretch 2 | 3 | ENV SIMPLEID_VERSION 1.0.2 4 | ENV DOMAIN openid.example.com 5 | ENV APP_DIR /var/www/html 6 | 7 | # Apache Setup 8 | RUN a2enmod rewrite 9 | RUN sed -i "s/80/8080/g" /etc/apache2/ports.conf 10 | COPY 000-default.conf /etc/apache2/sites-available/000-default.conf 11 | COPY ports.conf /etc/apache2/ports.conf 12 | EXPOSE 8080 13 | 14 | # PHP Extensions 15 | RUN docker-php-ext-install bcmath 16 | RUN apt-get update && apt-get install -y libgmp-dev \ 17 | && docker-php-ext-install gmp \ 18 | && rm -rf /var/lib/apt/lists/* 19 | 20 | # State 21 | RUN mkdir -p /simpleid/identities 22 | RUN mkdir -p /simpleid/cache 23 | RUN mkdir -p /simpleid/store 24 | VOLUME ["/simpleid"] 25 | 26 | # SimpleID 27 | WORKDIR /opt 28 | RUN mkdir simpleid 29 | RUN curl -L https://downloads.sourceforge.net/project/simpleid/simpleid/$SIMPLEID_VERSION/simpleid-$SIMPLEID_VERSION.tar.gz -o r.tgz 30 | RUN tar xfz r.tgz 31 | RUN mv simpleid/www/.htaccess.dist $APP_DIR/.htaccess 32 | RUN mv simpleid/www/* $APP_DIR 33 | COPY config.php $APP_DIR 34 | 35 | # Cleanup 36 | RUN rm -rf /opt/simpleid 37 | RUN rm -rf /opt/r.tgz 38 | -------------------------------------------------------------------------------- /dockerfiles/SimpleID/config.php: -------------------------------------------------------------------------------- 1 | 18 | -------------------------------------------------------------------------------- /dockerfiles/SimpleID/ports.conf: -------------------------------------------------------------------------------- 1 | Listen [::]:8080 2 | Listen 8080 3 | -------------------------------------------------------------------------------- /dockerfiles/SoapBox/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | RUN apk update && \ 4 | apk add curl unzip ncurses postgresql-contrib tini file 5 | 6 | ARG SOAPBOX_VERSION=1.3.0 7 | 8 | RUN adduser --system --shell /bin/false --home /opt/pleroma pleroma 9 | RUN mkdir -p /etc/pleroma 10 | RUN ln -s /state/etc/config.exs /etc/pleroma/config.exs 11 | RUN mkdir -p /var/lib/pleroma 12 | RUN ln -s /state/static /var/lib/pleroma/ 13 | 14 | VOLUME ["/state"] 15 | 16 | RUN echo cache_bust 17 | 18 | USER pleroma 19 | RUN wget 'https://git.pleroma.social/api/v4/projects/2/jobs/artifacts/stable/download?job=amd64-musl' -O /tmp/pleroma.zip 20 | RUN unzip /tmp/pleroma.zip -d /tmp 21 | RUN mv /tmp/release/* /opt/pleroma 22 | RUN rmdir /tmp/release 23 | RUN rm /tmp/pleroma.zip 24 | 25 | #COPY soapbox-fe.zip /opt/pleroma/soapbox-fe.zip 26 | RUN wget https://gitlab.com/soapbox-pub/soapbox-fe/-/jobs/artifacts/v$SOAPBOX_VERSION/download?job=build-production -O /opt/pleroma/soapbox-fe.zip 27 | 28 | COPY startup /startup 29 | ENTRYPOINT ["/sbin/tini", "--"] 30 | CMD '/startup' 31 | -------------------------------------------------------------------------------- /dockerfiles/SoapBox/startup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | SOAPBOX_ZIP="/opt/pleroma/soapbox-fe.zip" 6 | 7 | mkdir -p /state/uploads /state/static /state/etc 8 | if [ -f $SOAPBOX_ZIP ]; then 9 | busybox unzip $SOAPBOX_ZIP -o -d /state 10 | rm $SOAPBOX_ZIP 11 | fi 12 | 13 | 14 | /opt/pleroma/bin/pleroma_ctl migrate 15 | exec /opt/pleroma/bin/pleroma start 16 | -------------------------------------------------------------------------------- /dockerfiles/TinyProxy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.10 2 | 3 | # Source: https://github.com/monokal/docker-tinyproxy 4 | 5 | RUN apk add --no-cache \ 6 | bash \ 7 | tinyproxy 8 | 9 | COPY run.sh /opt/docker-tinyproxy/run.sh 10 | 11 | ENTRYPOINT ["/opt/docker-tinyproxy/run.sh"] -------------------------------------------------------------------------------- /dockerfiles/TinyProxy/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Source: https://github.com/monokal/docker-tinyproxy 4 | 5 | # Global vars 6 | PROG_NAME='DockerTinyproxy' 7 | PROXY_CONF='/etc/tinyproxy/tinyproxy.conf' 8 | TAIL_LOG='/var/log/tinyproxy/tinyproxy.log' 9 | 10 | # Usage: screenOut STATUS message 11 | screenOut() { 12 | timestamp=$(date +"%H:%M:%S") 13 | 14 | if [ "$#" -ne 2 ]; then 15 | status='INFO' 16 | message="$1" 17 | else 18 | status="$1" 19 | message="$2" 20 | fi 21 | 22 | echo -e "[$PROG_NAME][$status][$timestamp]: $message" 23 | } 24 | 25 | # Usage: checkStatus $? "Error message" "Success message" 26 | checkStatus() { 27 | case $1 in 28 | 0) 29 | screenOut "SUCCESS" "$3" 30 | ;; 31 | 1) 32 | screenOut "ERROR" "$2 - Exiting..." 33 | exit 1 34 | ;; 35 | *) 36 | screenOut "ERROR" "Unrecognised return code." 37 | ;; 38 | esac 39 | } 40 | 41 | displayUsage() { 42 | echo 43 | echo ' Usage:' 44 | echo " docker run -d --name='tinyproxy' -p :8888 dannydirect/tinyproxy:latest " 45 | echo 46 | echo " - Set to the port you wish the proxy to be accessible from." 47 | echo " - Set to 'ANY' to allow unrestricted proxy access, or one or more spece seperated IP/CIDR addresses for tighter security." 48 | echo 49 | echo " Examples:" 50 | echo " docker run -d --name='tinyproxy' -p 6666:8888 dannydirect/tinyproxy:latest ANY" 51 | echo " docker run -d --name='tinyproxy' -p 7777:8888 dannydirect/tinyproxy:latest 87.115.60.124" 52 | echo " docker run -d --name='tinyproxy' -p 8888:8888 dannydirect/tinyproxy:latest 10.103.0.100/24 192.168.1.22/16" 53 | echo 54 | } 55 | 56 | stopService() { 57 | screenOut "Checking for running Tinyproxy service..." 58 | if [ "$(pidof tinyproxy)" ]; then 59 | screenOut "Found. Stopping Tinyproxy service for pre-configuration..." 60 | killall tinyproxy 61 | checkStatus $? "Could not stop Tinyproxy service." \ 62 | "Tinyproxy service stopped successfully." 63 | else 64 | screenOut "Tinyproxy service not running." 65 | fi 66 | } 67 | 68 | parseAccessRules() { 69 | list='' 70 | for ARG in $@; do 71 | line="Allow\t$ARG\n" 72 | list+=$line 73 | done 74 | echo "$list" | sed 's/.\{2\}$//' 75 | } 76 | 77 | setMiscConfig() { 78 | sed -i -e"s,^MinSpareServers ,MinSpareServers\t1 ," $PROXY_CONF 79 | checkStatus $? "Set MinSpareServers - Could not edit $PROXY_CONF" \ 80 | "Set MinSpareServers - Edited $PROXY_CONF successfully." 81 | 82 | sed -i -e"s,^MaxSpareServers ,MaxSpareServers\t1 ," $PROXY_CONF 83 | checkStatus $? "Set MinSpareServers - Could not edit $PROXY_CONF" \ 84 | "Set MinSpareServers - Edited $PROXY_CONF successfully." 85 | 86 | sed -i -e"s,^StartServers ,StartServers\t1 ," $PROXY_CONF 87 | checkStatus $? "Set MinSpareServers - Could not edit $PROXY_CONF" \ 88 | "Set MinSpareServers - Edited $PROXY_CONF successfully." 89 | } 90 | 91 | enableLogFile() { 92 | sed -i -e"s,^#LogFile,LogFile," $PROXY_CONF 93 | } 94 | 95 | setAccess() { 96 | if [[ "$1" == *ANY* ]]; then 97 | sed -i -e"s/^Allow /#Allow /" $PROXY_CONF 98 | checkStatus $? "Allowing ANY - Could not edit $PROXY_CONF" \ 99 | "Allowed ANY - Edited $PROXY_CONF successfully." 100 | else 101 | sed -i "s,^Allow 127.0.0.1,$1," $PROXY_CONF 102 | checkStatus $? "Allowing IPs - Could not edit $PROXY_CONF" \ 103 | "Allowed IPs - Edited $PROXY_CONF successfully." 104 | fi 105 | } 106 | 107 | setAuth() { 108 | if [ -n "${BASIC_AUTH_USER}" ] && [ -n "${BASIC_AUTH_PASSWORD}" ]; then 109 | screenOut "Setting up basic auth credentials." 110 | sed -i -e"s/#BasicAuth user password/BasicAuth ${BASIC_AUTH_USER} ${BASIC_AUTH_PASSWORD}/" $PROXY_CONF 111 | fi 112 | } 113 | 114 | setFilter(){ 115 | if [ -n "$FilterDefaultDeny" ] ; then 116 | screenOut "Setting up FilterDefaultDeny." 117 | sed -i -e"s/#FilterDefaultDeny Yes/FilterDefaultDeny $FilterDefaultDeny/" $PROXY_CONF 118 | fi 119 | 120 | if [ -n "$FilterURLs" ] ; then 121 | screenOut "Setting up FilterURLs." 122 | sed -i -e"s/#FilterURLs Yes/FilterURLs $FilterURLs/" $PROXY_CONF 123 | fi 124 | 125 | if [ -n "$FilterExtended" ] ; then 126 | screenOut "Setting up FilterExtended." 127 | sed -i -e"s/#FilterExtended Yes/FilterExtended $FilterExtended/" $PROXY_CONF 128 | fi 129 | 130 | if [ -n "$FilterCaseSensitive" ] ; then 131 | screenOut "Setting up FilterCaseSensitive." 132 | sed -i -e"s/#FilterCaseSensitive Yes/FilterCaseSensitive $FilterCaseSensitive/" $PROXY_CONF 133 | fi 134 | 135 | 136 | if [ -n "$Filter" ] ; then 137 | screenOut "Setting up Filter." 138 | sed -i -e"s+#Filter \"/etc/tinyproxy/filter\"+Filter \"$Filter\"+" $PROXY_CONF 139 | fi 140 | 141 | } 142 | 143 | setTimeout() { 144 | if [ -n "${TIMEOUT}" ]; then 145 | screenOut "Setting up Timeout." 146 | sed -i -e"s/Timeout 600/Timeout ${TIMEOUT}/" $PROXY_CONF 147 | fi 148 | } 149 | 150 | startService() { 151 | screenOut "Starting Tinyproxy service..." 152 | /usr/bin/tinyproxy 153 | checkStatus $? "Could not start Tinyproxy service." \ 154 | "Tinyproxy service started successfully." 155 | } 156 | 157 | tailLog() { 158 | touch /var/log/tinyproxy/tinyproxy.log 159 | screenOut "Tailing Tinyproxy log..." 160 | tail -f $TAIL_LOG 161 | checkStatus $? "Could not tail $TAIL_LOG" \ 162 | "Stopped tailing $TAIL_LOG" 163 | } 164 | 165 | # Check args 166 | if [ "$#" -lt 1 ]; then 167 | displayUsage 168 | exit 1 169 | fi 170 | # Start script 171 | echo && screenOut "$PROG_NAME script started..." 172 | # Stop Tinyproxy if running 173 | stopService 174 | # Parse ACL from args 175 | export rawRules="$@" && parsedRules=$(parseAccessRules $rawRules) && unset rawRules 176 | # Set ACL in Tinyproxy config 177 | setAccess $parsedRules 178 | # Enable basic auth (if any) 179 | setAuth 180 | # Enable Filtering (if any) 181 | setFilter 182 | # Set Timeout (if any) 183 | setTimeout 184 | # Enable log to file 185 | enableLogFile 186 | # Start Tinyproxy 187 | startService 188 | # Tail Tinyproxy log 189 | tailLog 190 | # End 191 | screenOut "$PROG_NAME script ended." && echo 192 | exit 0 -------------------------------------------------------------------------------- /dockerfiles/TraefikCerts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8 2 | 3 | ENV TARGET_CONTAINER xmpp 4 | 5 | RUN pip3 install watchdog 6 | 7 | COPY extractor / 8 | RUN chmod 755 /extractor 9 | 10 | CMD ["python","-u","/extractor"] -------------------------------------------------------------------------------- /dockerfiles/TraefikCerts/extractor: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import sys 3 | import os 4 | import socket 5 | import errno 6 | import time 7 | import json 8 | import glob 9 | import traceback 10 | from base64 import b64decode 11 | from watchdog.observers import Observer 12 | from watchdog.events import FileSystemEventHandler 13 | 14 | 15 | class Handler(FileSystemEventHandler): 16 | def on_created(self, event): 17 | self.handle_event(event) 18 | 19 | def on_modified(self, event): 20 | self.handle_event(event) 21 | 22 | def handle_event(self, event): 23 | # Check if it's a JSON file 24 | if not event.is_directory and event.src_path.endswith('.json'): 25 | print('Certificate storage changed (' + os.path.basename(event.src_path) + ')') 26 | self.handle_file(event.src_path) 27 | 28 | def handle_file(self, file): 29 | # Read JSON file 30 | data = json.loads(open(file).read()) 31 | data = data['lec'] 32 | 33 | # Determine ACME version 34 | try: 35 | acme_version = 2 if 'acme-v02' in data['Account']['Registration']['uri'] else 1 36 | except TypeError: 37 | if 'DomainsCertificate' in data: 38 | acme_version = 1 39 | else: 40 | acme_version = 2 41 | 42 | # Find certificates 43 | if acme_version == 1: 44 | certs = data['DomainsCertificate']['Certs'] 45 | elif acme_version == 2: 46 | certs = data['Certificates'] 47 | 48 | print(f'Certificate storage contains {str(len(certs))} certificates (acme v{acme_version})') 49 | 50 | # Loop over all certificates 51 | for c in certs: 52 | if acme_version == 1: 53 | name = c['Certificate']['Domain'] 54 | privatekey = c['Certificate']['PrivateKey'] 55 | fullchain = c['Certificate']['Certificate'] 56 | elif acme_version == 2: 57 | name = c['domain']['main'] 58 | privatekey = c['key'] 59 | fullchain = c['certificate'] 60 | 61 | # Decode private key, certificate and chain 62 | privatekey = b64decode(privatekey).decode('utf-8') 63 | fullchain = b64decode(fullchain).decode('utf-8') 64 | start = fullchain.find('-----BEGIN CERTIFICATE-----', 1) 65 | cert = fullchain[0:start] 66 | chain = fullchain[start:] 67 | 68 | # Create domain directory if it doesn't exist 69 | directory = 'certs/' + name + '/' 70 | try: 71 | os.makedirs(directory) 72 | except OSError as error: 73 | if error.errno != errno.EEXIST: 74 | raise 75 | 76 | # Write private key, certificate and chain to file 77 | with open(directory + 'privkey.pem', 'w') as f: 78 | f.write(privatekey) 79 | 80 | with open(directory + 'cert.pem', 'w') as f: 81 | f.write(cert) 82 | 83 | with open(directory + 'chain.pem', 'w') as f: 84 | f.write(chain) 85 | 86 | with open(directory + 'fullchain.pem', 'w') as f: 87 | f.write(fullchain) 88 | 89 | # Write private key, certificate and chain to flat files 90 | directory = 'certs_flat/' 91 | 92 | with open(directory + name + '.key', 'w') as f: 93 | f.write(privatekey) 94 | with open(directory + name + '.crt', 'w') as f: 95 | f.write(fullchain) 96 | with open(directory + name + '.chain.pem', 'w') as f: 97 | f.write(chain) 98 | 99 | print(f'Extracted certificate for: {name}') 100 | 101 | container = os.environ['TARGET_CONTAINER'] 102 | print(f'Reloading Container: {container}') 103 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 104 | sock.connect('/var/run/docker.sock') 105 | sock.sendall(str.encode(f'POST /containers/{container}/kill?signal=HUP HTTP/1.0\r\n\n')) 106 | 107 | 108 | if __name__ == "__main__": 109 | 110 | # Determine path to watch 111 | path = sys.argv[1] if len(sys.argv) > 1 else './data' 112 | 113 | # Create output directories if it doesn't exist 114 | try: 115 | os.makedirs('certs') 116 | except OSError as error: 117 | if error.errno != errno.EEXIST: 118 | raise 119 | try: 120 | os.makedirs('certs_flat') 121 | except OSError as error: 122 | if error.errno != errno.EEXIST: 123 | raise 124 | 125 | # Create event handler and observer 126 | event_handler = Handler() 127 | observer = Observer() 128 | 129 | # Extract certificates from current file(s) before watching 130 | files = glob.glob(os.path.join(path, '*.json')) 131 | try: 132 | for file in files: 133 | print('Certificate storage found (' + os.path.basename(file) + ')') 134 | event_handler.handle_file(file) 135 | except Exception as e: 136 | print(e) 137 | traceback.print_exc() 138 | 139 | # Register the directory to watch 140 | observer.schedule(event_handler, path) 141 | 142 | # Main loop to watch the directory 143 | observer.start() 144 | try: 145 | while True: 146 | time.sleep(1) 147 | except KeyboardInterrupt: 148 | observer.stop() 149 | observer.join() -------------------------------------------------------------------------------- /dockerfiles/VPNProxy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | ENV VPN_SUBNET 10.0.0.0/16 4 | ENV GATEWAY 127.0.0.1 5 | ENV LISTEN_PORT 80 6 | ENV REMOTE_ADDR 127.0.0.1:8080 7 | 8 | RUN apk update && \ 9 | apk add socat tini 10 | 11 | COPY service /service 12 | RUN chmod 700 /service 13 | 14 | ENTRYPOINT ["/sbin/tini", "--"] 15 | CMD '/service' -------------------------------------------------------------------------------- /dockerfiles/VPNProxy/service: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ip ro add $VPN_SUBNET via $GATEWAY 4 | 5 | exec socat -d -d \ 6 | TCP4-LISTEN:$LISTEN_PORT,reuseaddr,fork,su=nobody \ 7 | TCP4:$REMOTE_ADDR -------------------------------------------------------------------------------- /dockerfiles/WhoAmI/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:7.4.6-apache 2 | 3 | COPY index.php /var/www/html 4 | -------------------------------------------------------------------------------- /dockerfiles/WhoAmI/index.php: -------------------------------------------------------------------------------- 1 | $xf, 14 | "ipv4" => $ipv4_parts[1], 15 | "ipv6" => $ipv6_parts[1] 16 | ); 17 | 18 | header("Cotent-Type: application/json"); 19 | echo json_encode($data); 20 | ?> 21 | -------------------------------------------------------------------------------- /examples/freebsd-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars_files: 4 | - ../{{ config_file }} 5 | vars: 6 | - ansible_python_interpreter: /usr/local/bin/python 7 | roles: 8 | - private-net 9 | -------------------------------------------------------------------------------- /examples/openvpn.conf: -------------------------------------------------------------------------------- 1 | client 2 | dev tun 3 | proto udp 4 | remote vpn.example.net 1194 5 | resolv-retry infinite 6 | nobind 7 | ca /etc/openvpn/ca.crt 8 | cert /etc/openvpn/laptop.crt 9 | key /etc/openvpn/laptop.key 10 | cipher AES-256-CBC 11 | compress lz4-v2 12 | persist-key 13 | persist-tun 14 | status /var/log/openvpn-status.log 15 | log-append /var/log/openvpn.log 16 | 17 | -------------------------------------------------------------------------------- /examples/settings.yml: -------------------------------------------------------------------------------- 1 | provisioner: 2 | type: vultr 3 | token: InsertValidAPIKeyHere 4 | region: LAX 5 | state_file: vultr-state.yml 6 | ssh_key: 7 | public: vultr-key.pub 8 | private: vultr-key 9 | inventory: 10 | public: vultr.pub.inv 11 | private: vultr.pri.inv 12 | servers: 13 | web1: 14 | plan: 202 # 2048 MB RAM,40 GB SSD,2.00 TB BW 15 | os: 241 # Ubuntu 17.04 x64 16 | private_ip: 192.168.150.10 17 | dns: 18 | public: 19 | - web1.example.com 20 | private: 21 | - web1.example.net 22 | web: 23 | - dyject.com 24 | - rearviewmirror.cc 25 | playbook: ubuntu-playbook.yml 26 | vpn: 27 | plan: 201 # 1024 MB RAM,25 GB SSD,1.00 TB BW 28 | os: 230 # FreeBSD 11 x64 29 | private_ip: 192.168.150.20 30 | dns: 31 | public: 32 | - vpn.example.com 33 | private: 34 | - vpn.example.net 35 | playbook: freebsd-playbook.yml 36 | openvpn: 37 | hosts: 38 | gateway: 192.168.150.20 39 | server: 40 | subnet: 10.10.12.0 41 | routes: 42 | - 192.168.150.0 255.255.255.0 43 | netmask: 255.255.255.0 44 | cipher: AES-256-CBC 45 | clients: 46 | laptop: type: host 47 | security: 48 | pgp_id: 1ACBD3G 49 | docker: 50 | prefix: bee2 51 | read_timeout: 900 52 | backup: 53 | web1: 54 | storage_dir: /media/backups 55 | volumes: 56 | - letsencrypt 57 | - logs-web 58 | jobs: 59 | dyject: 60 | server: web1 61 | git: git@github.com:/sumdog/dyject_web.git 62 | volumes: 63 | - dyject-web:/dyject/build:rw 64 | applications: 65 | certbot: 66 | server: web1 67 | build_dir: CertBot 68 | volumes: 69 | - letsencrypt:/etc/letsencrypt:rw 70 | - /var/run/docker.sock:/var/run/docker.sock 71 | env: 72 | email: blackhole@example.com 73 | test: false 74 | domains: all 75 | port: 8080 76 | haproxy_container: $haproxy 77 | nginx-static: 78 | server: web1 79 | build_dir: NginxStatic 80 | env: 81 | domains: 82 | - dyject.com 83 | http_port: 8080 84 | volumes: 85 | - dyject-web:/www/dyject.com:ro 86 | - logs-web:/var/log/nginx:rw 87 | haproxy: 88 | server: web1 89 | build_dir: HAProxy 90 | env: 91 | domains: all 92 | certbot_container: $certbot 93 | link: 94 | - nginx-static 95 | - certbot 96 | volumes: 97 | - letsencrypt:/etc/letsencrypt:rw 98 | ports: 99 | - 80 100 | - 443 101 | -------------------------------------------------------------------------------- /examples/ubuntu-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | ansible_python_interpreter: /usr/bin/python3 5 | vars_files: 6 | - ../{{ config_file }} 7 | roles: 8 | - private-net 9 | -------------------------------------------------------------------------------- /lib/digitalocean.rb: -------------------------------------------------------------------------------- 1 | require_relative 'provisioner' 2 | require 'ipaddr' 3 | 4 | class DigitalOceanProvisioner < Provisioner 5 | 6 | DO_ENDPOINT = 'https://api.digitalocean.com/v2' 7 | 8 | SSH_KEY_ID = 'b2-provisioner' 9 | 10 | def initialize(config, log) 11 | super(config, log) 12 | @config = config 13 | end 14 | 15 | def ensure_ssh_keys 16 | ssh_key = File.read(@config['provisioner']['ssh_key']['public']) 17 | key_list = request('GET', 'account/keys')['ssh_keys'].find { |v| v['name'] == SSH_KEY_ID } 18 | if key_list.nil? or not key_list.any? 19 | @log.info("Adding SSH Key #{SSH_KEY_ID}") 20 | @state['ssh_key_id'] = request('POST', 'account/keys', {'name' => SSH_KEY_ID, 'public_key' =>ssh_key})['ssh_key']['id'] 21 | save_state 22 | else 23 | @log.info("SSH key #{SSH_KEY_ID} exists for account") 24 | end 25 | end 26 | 27 | # Digital Ocean's default image forces an interactive root password reset 28 | # breaking automatic provisioning 29 | # see: https://www.digitalocean.com/community/questions/how-to-remove-reset-root-password 30 | private def initial_user_data() 31 | rand_password = ('a'..'z').to_a.shuffle[0,20].join 32 | user_data = <<-USER_DATA 33 | #cloud-config 34 | runcmd: 35 | - echo root:#{rand_password} | chpasswd 36 | USER_DATA 37 | end 38 | 39 | def reserve_ips() 40 | @servers.each { |s,v| 41 | if @state['servers'].nil? 42 | @state['servers'] = {} 43 | end 44 | if @state['servers'][s].nil? 45 | @state['servers'][s] = { 'ipv4' => {}} 46 | @state['servers'][s]['ipv4']['addr'] = request('POST', 'floating_ips', { 47 | 'region' => @config['provisioner']['region']})['floating_ip']['ip'] 48 | @log.info("Reserved IPs for: #{s} / #{@state['servers'][s]['ipv4']['addr']}") 49 | save_state 50 | else 51 | @log.info("IP Already Reserved for: #{s} / #{@state['servers'][s]['ipv4']['addr']}") 52 | end 53 | } 54 | end 55 | 56 | def wait_server(server, field, field_value, field_state = true) 57 | while true 58 | current_servers = request('GET', 'droplets')['droplets'].map { |d| 59 | if d['name'] == server 60 | if (field_state and d[field] != field_value) or (!field_state and d[field] == field_value) 61 | verb = field_state ? 'have' : 'change from' 62 | @log.info("Waiting on #{server} to #{verb} #{field} #{field_value}. Current state: #{d[field]}") 63 | sleep(5) 64 | else 65 | @log.info("Complete. Server: #{server} / #{field} => #{field_value}") 66 | return true 67 | end 68 | end 69 | } 70 | end 71 | end 72 | 73 | def ensure_servers 74 | current_servers = request('GET', 'droplets')['droplets'].map { |item| item['name'] } 75 | create_servers = @state['servers'].keys.reject { |server| current_servers.include? server } 76 | create_servers.each { |server| 77 | @log.info("Creating #{server}") 78 | server_config = { 79 | 'name' => server, 80 | 'region' => @config['provisioner']['region'], 81 | 'size' => @servers[server]['plan'], 82 | 'image' => @servers[server]['os'], 83 | 'ssh_keys' => [@state['ssh_key_id']], 84 | 'ipv6' => true, 85 | 'private_networking' => true, 86 | 'user_data' => initial_user_data 87 | } 88 | 89 | create_response = request('POST', 'droplets', server_config)['droplet'] 90 | 91 | @state['servers'][server]['id'] = create_response['id'] 92 | save_state 93 | 94 | wait_server(server, 'status', 'active') 95 | @log.info("Assinging floating IP #{@state['servers'][server]['ipv4']['addr']} to Server") 96 | request('POST', "floating_ips/#{@state['servers'][server]['ipv4']['addr']}/actions", {'type' => 'assign', 'droplet_id' => create_response['id'] }) 97 | } 98 | 99 | end 100 | 101 | def request(method, path, args = {}, ok_lambda = nil, error_code = nil, err_lambda = nil) 102 | 103 | uri = URI.parse("#{DO_ENDPOINT}/#{path}") 104 | https = Net::HTTP.new(uri.host, uri.port) 105 | https.use_ssl = true 106 | 107 | headers = {'Content-Type' => 'application/json', 108 | 'Authorization' => "Bearer #{@config['provisioner']['api_key']}" } 109 | 110 | req = case method 111 | when 'POST' 112 | r = Net::HTTP::Post.new(uri.path, initheader = headers) 113 | r.body = args.to_json 114 | r 115 | when 'PUT' 116 | r = Net::HTTP::Put.new(uri.path, initheader = headers) 117 | r.body = args.to_json 118 | r 119 | when 'GET' 120 | path = "#{uri.path}?".concat(args.collect { |k,v| "#{k}=#{CGI::escape(v.to_s)}" }.join('&')) 121 | Net::HTTP::Get.new(path, initheader = headers) 122 | end 123 | 124 | res = https.request(req) 125 | 126 | case res.code.to_i 127 | when 200..299 128 | if not ok_lambda.nil? 129 | ok_lambda.() 130 | else 131 | if res.body == '' 132 | '' 133 | else 134 | JSON.parse(res.body) 135 | end 136 | end 137 | else 138 | if not error_code.nil? and res.code.to_i == error_code 139 | err_lambda.() 140 | else 141 | @log.fatal('Error Executing Exoscale Command. Aborting...') 142 | @log.fatal("#{res.code} :: #{res.body}") 143 | exit(2) 144 | end 145 | end 146 | end 147 | 148 | def pull_ipv6_info 149 | request('GET', 'droplets')['droplets'].map { |d| 150 | if @state['servers'].has_key?(d['name']) 151 | @log.info("Server #{d['name']} IPv6 Address #{d['networks']['v6'][0]['ip_address']}") 152 | if @state['servers'][d['name']]['ipv6'].nil? 153 | @state['servers'][d['name']]['ipv6'] = {} 154 | end 155 | ipv6 = d['networks']['v6'][0]['ip_address'] 156 | @state['servers'][d['name']]['ipv6']['addr'] = ipv6 157 | subnet = IPAddr.new(ipv6).mask(d['networks']['v6'][0]['netmask']).to_s 158 | @log.info("IPv6 Subnet #{subnet}") 159 | @state['servers'][d['name']]['ipv6']['subnet'] = subnet 160 | end 161 | } 162 | save_state 163 | end 164 | 165 | def dns_update_check(r) 166 | r['name'] = r['name'] == '' ? '@' : r['name'] 167 | current = request('GET', "domains/#{r['domain']}/records", {})['domain_records'].find{ |c| 168 | c['type'] == r['type'] and c['name'] == r['name'] and IPAddr.new(c['data']) == IPAddr.new(r['data']) 169 | } 170 | msg = "Domain: #{r['domain']}, Name: #{r['name']}, Type: #{r['type']}" 171 | if current.nil? 172 | request('POST', "domains/#{r['domain']}/records", r) 173 | @log.info("Record Created :: #{msg}") 174 | else 175 | request('PUT', "domains/#{r['domain']}/records/#{current['id']}", r) 176 | @log.info("Record Updated :: #{msg}") 177 | end 178 | end 179 | 180 | def list_domain_records(domain, ok_func, err_func) 181 | request('GET', "domains/#{domain}", {}, ok_func, 404, err_func) 182 | end 183 | 184 | def create_domain(domain, ip) 185 | args = {'name' => domain } 186 | if not ip.nil? 187 | args['ip_address'] = ip 188 | end 189 | request('POST', 'domains', args) 190 | end 191 | 192 | def web_ipv6 193 | # Broked 194 | @servers.select { |name, s| 195 | s['dns'].has_key?('web') 196 | }.each { |name,cfg| 197 | if not @state['servers'][name].has_key?('static_web') 198 | ipv6 = @state['servers'][name]['ipv6']['subnet'] + cfg['ipv6']['docker']['static_web'] 199 | @log.info("Creating IPv6 Web IP #{ipv6} for #{name}") 200 | @state['servers'][name]['ipv6']['static_web'] = ipv6 201 | end 202 | } 203 | save_state 204 | end 205 | 206 | def provision(rebuild = false, server = nil) 207 | ensure_ssh_keys 208 | reserve_ips 209 | 210 | if rebuild 211 | @log.warn('Rebuilding not implemented for DigitalOcean Provisioner') 212 | exit 2 213 | end 214 | 215 | ensure_servers 216 | pull_ipv6_info 217 | web_ipv6 218 | update_dns 219 | # cleanup_dns 220 | # mail_dns 221 | write_inventory 222 | end 223 | 224 | 225 | end 226 | -------------------------------------------------------------------------------- /lib/exoscale.rb: -------------------------------------------------------------------------------- 1 | class ExoscaleProvisioner 2 | 3 | COMPUTE_ENDPOINT = 'https://api.exoscale.ch/compute' 4 | 5 | DNS_ENDPOINT = 'https://api.exoscale.ch/dns' 6 | 7 | def initialize(config, log) 8 | @log = log 9 | @api_key = config['provisioner']['api_key'] 10 | 11 | end 12 | 13 | def request(method, path, endpoint = COMPUTE_ENDPOINT, args = {}, ok_lambda = nil, error_code = nil, err_lambda = nil) 14 | uri = URI.parse("#{endpoint}/#{path}") 15 | https = Net::HTTP.new(uri.host, uri.port) 16 | https.use_ssl = true 17 | 18 | req = case method 19 | when 'POST' 20 | r = Net::HTTP::Post.new(uri.path, initheader = {'API-Key' => @api_key }) 21 | r.set_form_data(args) 22 | r 23 | when 'GET' 24 | path = "#{uri.path}?".concat(args.collect { |k,v| "#{k}=#{CGI::escape(v.to_s)}" }.join('&')) 25 | Net::HTTP::Get.new(path, initheader = {'API-Key' => @api_key }) 26 | end 27 | 28 | res = https.request(req) 29 | 30 | case res.code.to_i 31 | when 503 32 | @log.warn('Rate Limit Reached. Waiting...') 33 | sleep(2) 34 | request(method, path, args, ok_lambda, error_code, err_lambda) 35 | when 200 36 | if not ok_lambda.nil? 37 | ok_lambda.() 38 | else 39 | if res.body == '' 40 | '' 41 | else 42 | JSON.parse(res.body) 43 | end 44 | end 45 | else 46 | if not error_code.nil? and res.code.to_i == error_code 47 | err_lambda.() 48 | else 49 | @log.fatal('Error Executing Exoscale Command. Aborting...') 50 | @log.fatal("#{res.code} :: #{res.body}") 51 | exit(2) 52 | end 53 | end 54 | end 55 | 56 | def provision(rebuild = false, server = nil) 57 | puts('Too Expensive. Gave up. Who charges for DNS zones?!') 58 | # ensure_ssh_keys 59 | # reserve_ips 60 | # populate_ips 61 | # web_ipv6 62 | # if rebuild 63 | # @log.info('Rebuilding Servers') 64 | # delete_provisioned_servers 65 | # end 66 | # ensure_servers 67 | # update_dns 68 | # cleanup_dns 69 | # mail_dns 70 | # write_inventory 71 | end 72 | 73 | 74 | end 75 | -------------------------------------------------------------------------------- /lib/name.rb: -------------------------------------------------------------------------------- 1 | require_relative 'provisioner' 2 | require_relative 'util' 3 | 4 | class NameProvisioner < Provisioner 5 | 6 | NAME_ENDPOINT = 'https://api.name.com/v4/' 7 | 8 | def initialize(config, log) 9 | super(config, log) 10 | @config = config 11 | @domain_records = {} 12 | end 13 | 14 | def request(method, path, args = {}, ok_lambda = nil, error_code = nil, err_lambda = nil) 15 | 16 | uri = URI.parse("#{NAME_ENDPOINT}/#{path}") 17 | https = Net::HTTP.new(uri.host, uri.port) 18 | https.use_ssl = true 19 | 20 | headers = {'Content-Type' => 'application/json'} 21 | 22 | req = case method 23 | when 'POST' 24 | r = Net::HTTP::Post.new(uri.path, initheader = headers) 25 | r.body = args.to_json 26 | r 27 | when 'PUT' 28 | r = Net::HTTP::Put.new(uri.path, initheader = headers) 29 | r.body = args.to_json 30 | r 31 | when 'GET' 32 | path = "#{uri.path}?".concat(args.collect { |k,v| "#{k}=#{CGI::escape(v.to_s)}" }.join('&')) 33 | Net::HTTP::Get.new(path, initheader = headers) 34 | end 35 | 36 | req.basic_auth(@config['provisioner']['username'], @config['provisioner']['api_key']) 37 | 38 | res = https.request(req) 39 | 40 | case res.code.to_i 41 | when 200..299 42 | if not ok_lambda.nil? 43 | ok_lambda.() 44 | else 45 | if res.body == '' 46 | '' 47 | else 48 | JSON.parse(res.body) 49 | end 50 | end 51 | else 52 | if not error_code.nil? and res.code.to_i == error_code 53 | err_lambda.() 54 | else 55 | @log.fatal('Error Executing Name Command. Aborting...') 56 | @log.fatal("#{res.code} :: #{res.body}") 57 | exit(2) 58 | end 59 | end 60 | end 61 | 62 | def dns_upsert(domain, name, record_type, data, ttl = 3600) 63 | 64 | dns_api_call = "domains/#{domain}/records" 65 | 66 | if @domain_records[domain].nil? 67 | @domain_records[domain] = request('GET', dns_api_call) 68 | end 69 | 70 | cur = @domain_records[domain].fetch('records', {}).select { |r| 71 | (r['fqdn'] == "#{domain}." or r['fqdn'] == "#{name}.#{domain}.") and r['type'] == record_type 72 | }.first 73 | 74 | params = {'host' => name, 'type' => record_type, 'answer' => data, 'ttl'=> ttl} 75 | fqdn = "#{name}.#{domain}".sub(/^[0.]*/, "") 76 | log_msg = "#{data} :: #{fqdn}" 77 | 78 | if cur.nil? 79 | @log.info("Creating #{log_msg}") 80 | request('POST', dns_api_call, params) 81 | elsif cur['answer'] == data 82 | @log.info("DNS Correct. Skipping #{log_msg}") 83 | else 84 | @log.info("Updating #{log_msg}") 85 | request('PUT', "#{dns_api_call}/#{cur['id']}", params) 86 | end 87 | end 88 | 89 | 90 | def mail_dns(server) 91 | 92 | mail_config = @config['servers'][server]['mail'] 93 | 94 | dkim_key = OpenSSL::PKey::RSA.new(File.read(mail_config['dkim_private'])) 95 | b64_key = Base64.strict_encode64(dkim_key.public_key.to_der) 96 | dkim_dns = "k=rsa; t=s; p=#{b64_key}" 97 | 98 | mail_network = mail_config['network'] 99 | mail_ipv4 = @config['servers'][server]['ip'][mail_network]['ipv4'] 100 | mail_ipv6 = @config['servers'][server]['ip'][mail_network]['ipv6'] 101 | 102 | mail_config['domains'].each { |domain| 103 | [ 104 | { 'name' => '', 'type' => 'MX', 'data' => "#{mail_config['mx']}" }, 105 | # { 'name' => 'mail', 'type' => 'A', 'data' => mail_ipv4 }, 106 | # { 'name' => 'mail', 'type' => 'AAAA', 'data' => mail_ipv6 }, 107 | { 'name' => '_dmarc', 'type' => 'TXT', 'data' => "#{mail_config['dmarc']}" }, 108 | { 'name' => 'dkim1._domainkey', 'type' => 'TXT', 'data' => "#{dkim_dns}" }, 109 | { 'name' => '', 'type' => 'TXT', 'data' => "#{mail_config['spf']}" } 110 | ].each { |d| 111 | @log.info("Creating/Updating Mail Record #{d['name']} for #{d['domain']} #{d['type']} #{d['data']}") 112 | dns_upsert(domain, d['name'], d['type'], d['data']) 113 | } 114 | } 115 | end 116 | 117 | 118 | def provision(rebuild = false, server = nil) 119 | 120 | @config['servers'].each { |server, cfg| 121 | 122 | mail_dns(server) 123 | 124 | cfg['dns'].each { |dns_set, domains| 125 | domains.each { |domain| 126 | {'ipv4':'A', 'ipv6': 'AAAA'}.each { |ipv, record_type| 127 | ipv = ipv.to_s 128 | if @config['servers'][server]['ip'][dns_set].include?(ipv) 129 | cur_ip = @config['servers'][server]['ip'][dns_set][ipv] 130 | dns_upsert(Util.base_domain(domain), Util.host_domain(domain), record_type, cur_ip) 131 | else 132 | @log.warn("No #{ipv} records for #{dns_set}") 133 | end 134 | } 135 | } 136 | } 137 | } 138 | 139 | end 140 | 141 | end 142 | -------------------------------------------------------------------------------- /lib/passstore.rb: -------------------------------------------------------------------------------- 1 | require 'gpgme' 2 | require 'securerandom' 3 | require 'fileutils' 4 | 5 | class PassStore 6 | 7 | def initialize(config) 8 | folder = config.fetch('security', {}).fetch('folder', 'bee2') 9 | @password_store = config.fetch('security', {}).fetch('password_store', File.join(Dir.home, '.password-store', folder)) 10 | @crypt = GPGME::Crypto.new 11 | @pgp_id = config['security']['pgp_id'] 12 | end 13 | 14 | ## rescue GPGME::Error::DecryptFailed upstream for failed passwords 15 | def get_or_generate_password(folder, name) 16 | pfile = File.join(@password_store, folder, "#{name}.gpg") 17 | if File.file?(pfile) 18 | open(pfile, 'rb') do |f| 19 | return @crypt.decrypt(f.read()).read().strip() 20 | end 21 | else 22 | new_passowrd = SecureRandom.hex 23 | FileUtils.mkdir_p File.join(@password_store, folder) 24 | File.open(pfile, 'wb') do |out| 25 | @crypt.encrypt new_passowrd, :recipients=>@pgp_id, :output=>out 26 | end 27 | return new_passowrd 28 | end 29 | end 30 | 31 | end 32 | -------------------------------------------------------------------------------- /lib/provisioner.rb: -------------------------------------------------------------------------------- 1 | class Provisioner 2 | 3 | def initialize(config, log) 4 | @log = log 5 | @servers = config['servers'] 6 | @inventory_files = config['inventory'] 7 | 8 | if(config['provisioner'].has_key?('state_file')) 9 | @state_file = config['provisioner']['state_file'] 10 | if File.exists? @state_file 11 | @state = YAML.load_file(@state_file) 12 | else 13 | @state = {} 14 | end 15 | end 16 | 17 | end 18 | 19 | # Convert domian array (from YAML config) to a hash from each base domain to subdomains 20 | # Input: [ 'something.example.com', 'some.other.example.com', 'example.net', 'somethingelse.example.net', 'example.org' ] 21 | # Output: { 'example.com' => [ 'something', 'some.other' ], 'example.net' => ['somethingelse'], 'example.org' => [] } 22 | # special thanks to elomatreb on #ruby/freenode IRC 23 | def domain_records(records) 24 | Hash[records.group_by {|d| d[/\w+\.\w+\z/] }.map do |suffix, domains| 25 | [suffix, domains.map {|d| d.gsub(suffix, "").gsub(/\.\z/, "") }.reject {|d| d == "" }] 26 | end] 27 | end 28 | 29 | def create_subdomains(server_name, subdomains, domain, server_config, typ_cfg) 30 | subdomains.each { |s| 31 | typ_cfg.map { |ip_type| 32 | case ip_type 33 | when 'ipv4' 34 | {'domain' => domain, 'name' => s, 'type' => 'A', 'data' => server_config['ipv4']['addr'] } 35 | when 'ipv6' 36 | {'domain' => domain, 'name' => s, 'type' => 'AAAA', 'data' => server_config['ipv6']['addr'] } 37 | when 'ipv6-web' 38 | {'domain' => domain, 'name' => s, 'type' => 'AAAA', 'data' => server_config['ipv6']['static_web'] } 39 | when 'private_ip' 40 | {'domain' => domain, 'name' => s, 'type' => 'A', 'data' => @servers[server_name]['private_ip'] } 41 | when 'web' 42 | [{'domain' => domain, 'name' => "www.#{s}", 'type' => 'A', 'data' => server_config['ipv4']['addr'] }, 43 | {'domain' => domain, 'name' => "www.#{s}", 'type' => 'AAAA', 'data' => server_config['ipv6']['static_web'] }] 44 | end 45 | }.flatten.each { |d| 46 | @log.info("Creating/Updating #{d['name']}.#{d['domain']} #{d['type']} #{d['data']}") 47 | dns_update_check(d) 48 | } 49 | } 50 | end 51 | 52 | def list_domain_records(domain, ok_func, err_func) 53 | @log.error("Unimplemented domain_records function") 54 | exit 2 55 | end 56 | 57 | def create_domain(domain, ip) 58 | @log.error("Unimplemented create_domain function") 59 | exit 2 60 | end 61 | 62 | def dns_update_check(r) 63 | @log.error("Unimplemented dns_update_check function") 64 | exit 2 65 | end 66 | 67 | def update_dns 68 | @state['servers'].each { |server, config| 69 | dns_sets = {"public"=>["ipv4", "ipv6"], "private"=>["private_ip"], "web"=>["ipv4", "ipv6-web", "web"]} 70 | ipv4 = @state['servers'][server]['ipv4']['addr'] 71 | ipv6 = @state['servers'][server]['ipv6']['addr'] 72 | dns_sets.each { |ds_type, typ_cfg| 73 | records = @servers[server]['dns'][ds_type] 74 | 75 | if ds_type == 'web' 76 | ipv6 = @state['servers'][server]['ipv6']['static_web'] 77 | end 78 | 79 | if not records.nil? 80 | domain_records(records).each { |domain, subdomains| 81 | list_domain_records(domain, -> { 82 | @log.info("Domain #{domain} exists") 83 | if ds_type == 'web' 84 | dns_update_check({'domain' => domain, 'name' => '', 'type' => 'A', 'data' => ipv4 }) 85 | dns_update_check({'domain' => domain, 'name' => '', 'type' => 'AAAA', 'data' => ipv6 }) 86 | create_subdomains(server, ['www'], domain, config, ['ipv4', 'ipv6-web']) 87 | end 88 | create_subdomains(server, subdomains, domain, config, typ_cfg) 89 | }, -> { 90 | @log.info("No records for #{domain}. Creating Base Record.") 91 | if ds_type == 'web' 92 | @log.debug("IP Map: #{server} -> #{ipv4}/#{ipv6}") 93 | create_domain(domain, ipv4) 94 | dns_update_check({'domain' => domain, 'name' => '', 'type' => 'AAAA', 'data' => ipv6 }) 95 | create_subdomains(server, ['www'], domain, config, ['ipv4', 'ipv6-web']) 96 | else 97 | create_domain(domain, nil) 98 | end 99 | create_subdomains(server, subdomains, domain, config, typ_cfg) 100 | }) 101 | } 102 | end 103 | } 104 | } 105 | end 106 | 107 | def web_ipv6 108 | @servers.select { |name, s| 109 | s['dns'].has_key?('web') 110 | }.each { |name,cfg| 111 | if not @state['servers'][name].has_key?('static_web') 112 | ipv6 = @state['servers'][name]['ipv6']['subnet'] + cfg['ipv6']['docker']['static_web'] 113 | @log.info("Creating IPv6 Web IP #{ipv6} for #{name}") 114 | @state['servers'][name]['ipv6']['static_web'] = ipv6 115 | end 116 | } 117 | save_state 118 | end 119 | 120 | def write_inventory 121 | ['public', 'private'].each { |inv_type| 122 | inventory_file = @inventory_files[inv_type] 123 | File.open(inventory_file, 'w') { |pub| 124 | @log.info("Writing #{inv_type} inventory to #{inventory_file}") 125 | @servers.each { |server, settings| 126 | if not settings['dns'][inv_type].nil? 127 | pub.write("[#{server}]\n") 128 | pub.write(settings['dns'][inv_type].first) 129 | pub.write(" server_name=#{server}") 130 | pub.write("\n\n") 131 | end 132 | } 133 | } 134 | } 135 | end 136 | 137 | def save_state() 138 | File.open(@state_file, 'w') { |f| YAML.dump(@state, f) } 139 | end 140 | 141 | end 142 | -------------------------------------------------------------------------------- /lib/synchandler.rb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | 3 | class SyncHandler 4 | 5 | private def usage() 6 | doc =<<-USAGE 7 | Usage: bee2 -c -s SERVER 8 | 9 | help - This usage message 10 | all - Sync all servers 11 | - Sync individual server 12 | 13 | Performs an rsync operation for the given server 14 | based on the settings in the sync section of the 15 | configuration file. 16 | USAGE 17 | 18 | print(doc) 19 | exit 1 20 | end 21 | 22 | def initialize(config, log, server) 23 | @log = log 24 | @config = config 25 | 26 | case server 27 | when 'all' 28 | when 'test' 29 | else 30 | cmds = rsync_cmds(server) 31 | if cmds.empty? 32 | else 33 | run_rsync_cmds(cmds) 34 | end 35 | end 36 | end 37 | 38 | def rsync_cmds(server) 39 | server_dns = @config.fetch('servers', {}).fetch(server, {}).fetch('dns', {}).fetch('private', {}) 40 | @config['sync'].fetch(server, {}).fetch('push', {}).map { |p| 41 | (local,remote) = p.split(':') 42 | ['-av', '--delete', '-e', "\"ssh -i #{@config['provisioner']['ssh_key']['private']}\"", 43 | local, "root@#{server_dns.first}:#{remote}"] 44 | } 45 | end 46 | 47 | def run_rsync_cmds(cmds) 48 | cmds.each { |c| 49 | @log.info("Syncing #{c[-2]} to #{c[-1]}") 50 | system((['rsync'] + c).join(' ')) 51 | } 52 | end 53 | 54 | end 55 | -------------------------------------------------------------------------------- /lib/util.rb: -------------------------------------------------------------------------------- 1 | require 'public_suffix' 2 | 3 | class Util 4 | 5 | def self.lstrip_underscore(s) 6 | s.sub(/^[_:]*/,"") 7 | end 8 | 9 | def self.base_domain(domain) 10 | PublicSuffix.domain(domain) 11 | end 12 | 13 | def self.host_domain(domain) 14 | if Util.base_domain(domain) == domain 15 | "" 16 | else 17 | domain.chomp(".#{Util.base_domain(domain)}") 18 | end 19 | end 20 | 21 | end 22 | -------------------------------------------------------------------------------- /lib/vultr.rb: -------------------------------------------------------------------------------- 1 | require 'yaml' 2 | require 'logger' 3 | require 'net/http' 4 | require 'net/https' 5 | require 'uri' 6 | require 'cgi' 7 | require 'json' 8 | require 'openssl' 9 | require 'base64' 10 | require 'ipaddr' 11 | require_relative 'provisioner' 12 | 13 | class VultrProvisioner < Provisioner 14 | 15 | SSH_KEY_ID = 'b2-provisioner' 16 | 17 | def initialize(config, log) 18 | super(config, log) 19 | @api_key = config['provisioner']['token'] 20 | @vpn = config['openvpn'] 21 | @ssh_key = File.read(config['provisioner']['ssh_key']['public']) 22 | @default_region = config['provisioner']['region'] 23 | end 24 | 25 | def request(method, path, args = {}, ok_lambda = nil, error_code = nil, err_lambda = nil) 26 | uri = URI.parse("https://api.vultr.com/v1/#{path}") 27 | https = Net::HTTP.new(uri.host, uri.port) 28 | https.use_ssl = true 29 | 30 | req = case method 31 | when 'POST' 32 | r = Net::HTTP::Post.new(uri.path, initheader = {'API-Key' => @api_key }) 33 | r.set_form_data(args) 34 | r 35 | when 'GET' 36 | path = "#{uri.path}?".concat(args.collect { |k,v| "#{k}=#{CGI::escape(v.to_s)}" }.join('&')) 37 | Net::HTTP::Get.new(path, initheader = {'API-Key' => @api_key }) 38 | end 39 | 40 | res = https.request(req) 41 | 42 | case res.code.to_i 43 | when 503 44 | @log.warn('Rate Limit Reached. Waiting...') 45 | sleep(2) 46 | request(method, path, args, ok_lambda, error_code, err_lambda) 47 | when 200 48 | if not ok_lambda.nil? 49 | ok_lambda.() 50 | else 51 | if res.body == '' 52 | '' 53 | else 54 | JSON.parse(res.body) 55 | end 56 | end 57 | else 58 | if not error_code.nil? and res.code.to_i == error_code 59 | err_lambda.() 60 | else 61 | @log.fatal('Error Executing Vultr Command. Aborting...') 62 | @log.fatal("#{res.code} :: #{res.body}") 63 | exit(2) 64 | end 65 | end 66 | end 67 | 68 | def provision(rebuild = false) 69 | ensure_ssh_keys 70 | reserve_ips 71 | populate_ips 72 | web_ipv6 73 | if rebuild 74 | @log.info('Rebuilding Servers') 75 | delete_provisioned_servers 76 | end 77 | ensure_servers 78 | update_dns 79 | vpn_dns 80 | cleanup_dns 81 | mail_dns 82 | write_inventory 83 | 84 | # Per ticket TTD-04IGO, removing auto assigned IPv6 addresses is impossible via the API 85 | # remove_unwanted_ips 86 | end 87 | 88 | def dns_update_check(r) 89 | current = request('GET', 'dns/records', {'domain' => r['domain']}).find{ |c| 90 | c['type'] == r['type'] and c['name'] == r['name'] and c['data'] == r['data'] 91 | } 92 | msg = "Domain: #{r['domain']}, Name: #{r['name']}, Type: #{r['type']}" 93 | if current.nil? 94 | request('POST', 'dns/create_record', r) 95 | @log.info("Record Created :: #{msg}") 96 | else 97 | r['RECORDID'] = current['RECORDID'] 98 | request('POST', 'dns/update_record', r) 99 | @log.info("Record Updated :: #{msg}") 100 | end 101 | end 102 | 103 | # Remove anything set to 127.0.0.1 and MX records 104 | def cleanup_dns() 105 | request('GET', 'dns/list').each {|domain| 106 | request('GET', 'dns/records', {'domain' => domain['domain']}).each { |record| 107 | if(record['data'] == '127.0.0.1' or 108 | (record['type'] == 'MX' and record['data'] == domain['domain']) or 109 | (record['type'] == 'CNAME' and record['data'] == domain['domain']) 110 | ) 111 | @log.info("Removing #{record['type']} #{record['name']}.#{domain['domain']}") 112 | request('POST', 'dns/delete_record', { 'RECORDID' => record['RECORDID'], 'domain' => domain['domain']}) 113 | end 114 | } 115 | } 116 | end 117 | 118 | def vpn_dns() 119 | if not @vpn.nil? 120 | @log.info("Updating VPN DNS for #{@vpn['dnsdomain']}") 121 | 122 | # TODO: domain creation if it doesn't exist? 123 | 124 | @vpn['clients'].each { |client, cfg| 125 | dns_update_check({'domain' => @vpn['dnsdomain'], 'name' => client, 'type' => 'A', 'data' => cfg['ip'] }) 126 | } 127 | end 128 | end 129 | 130 | def mail_dns() 131 | @servers.select { |srv, cfg| cfg.key?('mail') }.each { |server, config| 132 | 133 | # reverse DNS 134 | subid = @state['servers'][server]['SUBID'] 135 | ipv4 = @state['servers'][server]['ipv4']['addr'] 136 | 137 | # Get Vult's auto assigned IPv6 Address, and our reserved on 138 | rec = domain_records([config['mail']['mx']]).first 139 | request('GET', 'server/list_ipv6', { 'SUBID' => subid })[subid].each { |addrs| 140 | # This is really hacky. I hate how Vultr assigns you an IPv6 Address you cannot remove. 141 | # For mail and only mail servers, we create a AAAA records for both of the IPs. 142 | 143 | @log.info("Creating/Updating AAAA mail server record for IPv6 #{addrs['ip']} :: #{rec[1].first}.#{rec[0]} AAAA") 144 | dns_update_check({'domain' => rec[0], 'name' => rec[1].first, 'type' => 'AAAA', 'data' => addrs['ip'] }) 145 | 146 | @log.info("Creating Reverse DNS for Mail records #{addrs['ip']} to #{config['mail']['mx']}") 147 | request('POST', 'server/reverse_set_ipv6', { 'SUBID' => subid, 'ip' => addrs['ip'], 'entry' => config['mail']['mx']}) 148 | } 149 | 150 | @log.info("Creating/Updating A mail server record for IPv4 #{ipv4} :: #{rec[1].first}.#{rec[0]} ") 151 | dns_update_check({'domain' => rec[0], 'name' => rec[1].first, 'type' => 'A', 'data' => ipv4 }) 152 | 153 | @log.info("Creating Reverse DNS for Mail records #{ipv4} to #{config['mail']['mx']}") 154 | request('POST', 'server/reverse_set_ipv4', { 'SUBID' => subid, 'ip' => ipv4, 'entry' => config['mail']['mx']}) 155 | 156 | dkim_key = OpenSSL::PKey::RSA.new(File.read(config['mail']['dkim_private'])) 157 | b64_key = Base64.strict_encode64(dkim_key.public_key.to_der) 158 | dkim_dns = "k=rsa; t=s; p=#{b64_key}" 159 | 160 | config['mail']['domains'].each { |domain| 161 | [ 162 | {'domain' => domain, 'name' => '', 'type' => 'MX', 'data' => config['mail']['mx'], 'priority' => 10 }, 163 | {'domain' => domain, 'name' => '_dmarc', 'type' => 'TXT', 'data' => "\"#{config['mail']['dmarc']}\"" }, 164 | {'domain' => domain, 'name' => 'dkim1._domainkey', 'type' => 'TXT', 'data' => "\"#{dkim_dns}\"" }, 165 | {'domain' => domain, 'name' => '', 'type' => 'TXT', 'data' => "\"#{config['mail']['spf']}\"" } 166 | ].each { |d| 167 | @log.info("Creating/Updating Mail Record #{d['name']}.#{d['domain']} #{d['type']} #{d['data']}") 168 | dns_update_check(d) 169 | } 170 | } 171 | } 172 | end 173 | 174 | def list_domain_records(domain, ok_func, err_func) 175 | request('GET', 'dns/records', {'domain' => domain}, ok_func, 412, err_func) 176 | end 177 | 178 | def create_domain(domain, ip) 179 | ip = ip.nil? ? '127.0.0.1' : ip 180 | request('POST', 'dns/create_domain', {'domain' => domain, 'serverip' => ip }) 181 | end 182 | 183 | def region_for_server(server) 184 | regioncode = (not @servers[server]['region'].nil?) ? @servers[server]['region'] : @default_region 185 | dcid = request('GET', 'regions/list').find { |id,region| 186 | region['regioncode'] == regioncode 187 | }.last['DCID'] 188 | if dcid.nil? 189 | @log.fatal("Invalid Data Center #{regioncode}") 190 | exit 1 191 | end 192 | dcid 193 | end 194 | 195 | def ensure_servers 196 | current_servers = request('GET', 'server/list').map { |k,v| v['label'] } 197 | create_servers = @state['servers'].keys.reject { |server| current_servers.include? server } 198 | create_servers.each { |server| 199 | @log.info("Creating #{server} in Region #{region_for_server(server)}") 200 | server_config = {'DCID' => region_for_server(server), 'VPSPLANID' => @servers[server]['plan'], 'OSID' => @servers[server]['os'], 201 | 'enable_private_network' => 'yes', 202 | 'enable_ipv6' => 'yes', 203 | 'label' => server, 'SSHKEYID' => @state['ssh_key_id'], 204 | 'hostname' => server, 'reserved_ip_v4' => @state['servers'][server]['ipv4']['subnet'] } 205 | subid = request('POST', 'server/create', server_config)['SUBID'] 206 | @state['servers'][server]['SUBID'] = subid 207 | save_state 208 | 209 | wait_server(server, 'status', 'active') 210 | wait_server(server, 'server_state', 'ok') 211 | 212 | # Attach our Reserved /Public IPv6 Address 213 | ip = @state['servers'][server]['ipv6']['subnet'] 214 | @log.info("Attaching #{ip} to #{server}") 215 | request('POST', 'reservedip/attach', {'ip_address' => ip, 'attach_SUBID' => subid}, -> { 216 | @log.info('IP Attached') 217 | }, 412, ->{ 218 | @log.warn('Unable to attach IP. Rebooting VM') 219 | request('POST', 'server/reboot', {'SUBID' => subid}) 220 | }) 221 | 222 | # We can only get the full IPv6 address after it's attached to a server 223 | # IPv4 subnets are their IP addresses, so we'll set that here too 224 | tst = request('GET', 'server/list') 225 | srv = tst.map { |subid, s| s }.select { |ss| ss['label'] == server }.first 226 | srv_v6_net = @state['servers'][server]['ipv6']['subnet'] 227 | ipv6 = srv['v6_networks'].select { |net| net['v6_network'] == srv_v6_net }.first['v6_main_ip'] 228 | @state['servers'][server]['ipv6']['addr'] = ipv6 229 | @state['servers'][server]['ipv4']['addr'] = srv['main_ip'] 230 | @log.info("Updating IPv6 for #{server} from net/#{srv_v6_net} to IP/#{ipv6}") 231 | @log.info("Setting IPv4 for #{server} to #{srv['main_ip']}") 232 | save_state 233 | } 234 | end 235 | 236 | private def remove_ssh_key(host_or_ip) 237 | @log.info("Removing SSH Key for #{host_or_ip}") 238 | Process.fork do 239 | exec('ssh-keygen', '-R', host_or_ip) 240 | end 241 | Process.wait 242 | end 243 | 244 | private def delete_server(server) 245 | @log.info("Deleting #{server}") 246 | request('POST', 'server/destroy', {'SUBID' => @state['servers'][server]['SUBID']}, -> { 247 | @log.info("Server #{server} Deleted") 248 | 249 | # SSH Key Cleanup (DNS, private IP, public IPv4/v6) 250 | ['public', 'private'].each { |dns_type| 251 | @servers[server]['dns'][dns_type].each { |hostname| 252 | remove_ssh_key(hostname) 253 | } 254 | } 255 | remove_ssh_key(@servers[server]['private_ip']) 256 | ['ipv4', 'ipv6'].each { |ip_type| 257 | remove_ssh_key(@state['servers'][server][ip_type]['addr']) 258 | } 259 | }, 412, -> { 260 | @log.warn("Unable to destory server. Servers cannot be destoryed within 5 minutes of creation") 261 | @log.warn("Waiting 15 Seconds") 262 | sleep(15) 263 | delete_server(server) 264 | }) 265 | end 266 | 267 | def delete_provisioned_servers 268 | current_servers = request('GET', 'server/list').map { |k,v| v['label'] } 269 | delete_servers = @state['servers'].keys.reject { |server| not current_servers.include? server } 270 | delete_servers.each { |server| 271 | delete_server(server) 272 | while request('GET', 'reservedip/list').find { |k,v| v['label'] == server }.last['attached_SUBID'] 273 | @log.info("Waiting on Reserved IP to Detach from #{server}") 274 | sleep(5) 275 | end 276 | } 277 | end 278 | 279 | def wait_server(server, field, field_value, field_state = true) 280 | while true 281 | current_servers = request('GET', 'server/list').map { |k,v| 282 | if v['label'] == server 283 | if (field_state and v[field] != field_value) or (!field_state and v[field] == field_value) 284 | verb = field_state ? 'have' : 'change from' 285 | @log.info("Waiting on #{server} to #{verb} #{field} #{field_value}. Current state: #{v[field]}") 286 | sleep(5) 287 | else 288 | @log.info("Complete. Server: #{server} / #{field} => #{field_value}") 289 | return true 290 | end 291 | end 292 | } 293 | end 294 | end 295 | 296 | def ensure_ssh_keys 297 | key_list = request('GET', 'sshkey/list').find { |k,v| v['name'] == SSH_KEY_ID } 298 | if key_list.nil? or not key_list.any? 299 | @log.info("Adding SSH Key #{SSH_KEY_ID}") 300 | @state['ssh_key_id'] = request('POST', 'sshkey/create', {'name' => SSH_KEY_ID, 'ssh_key' =>@ssh_key})['SSHKEYID'] 301 | save_state 302 | end 303 | end 304 | 305 | def reserve_ips() 306 | @servers.each { |s,v| 307 | if @state['servers'].nil? 308 | @state['servers'] = {} 309 | end 310 | if @state['servers'][s].nil? 311 | @state['servers'][s] = {} 312 | @state['servers'][s]['ipv4'] = request('POST', 'reservedip/create', { 313 | 'DCID' => region_for_server(s), 'ip_type'=> 'v4', 'label'=> s}) 314 | @state['servers'][s]['ipv6'] = request('POST', 'reservedip/create', { 315 | 'DCID' => region_for_server(s), 'ip_type'=> 'v6', 'label'=> s}) 316 | @log.info("Reserved IPs for: #{s} in Region #{region_for_server(s)}") 317 | save_state 318 | end 319 | } 320 | end 321 | 322 | def populate_ips() 323 | ip_list = request('GET', 'reservedip/list') 324 | ['ipv4', 'ipv6'].each { |ip_type| 325 | @servers.each { |s,v| 326 | if @state['servers'][s][ip_type]['net'].nil? 327 | ip = ip_list.find { |x,y| x == @state['servers'][s][ip_type]['SUBID'].to_s } 328 | @state['servers'][s][ip_type]['subnet'] = ip.last['subnet'] 329 | @log.info("Server #{s} Assigned Subnet #{ip.last['subnet']}/#{ip.last['subnet_size']}") 330 | end 331 | } 332 | } 333 | save_state 334 | end 335 | 336 | end 337 | -------------------------------------------------------------------------------- /spec/docker_addhost_spec.rb: -------------------------------------------------------------------------------- 1 | require 'yaml' 2 | require_relative '../lib/dockerhandler' 3 | require 'logger' 4 | require 'mocks.rb' 5 | 6 | log = Logger.new(STDOUT) 7 | 8 | RSpec.describe DockerHandler do 9 | config = <<-CONFIG 10 | provisioner: 11 | state_file: spec/test-state.yml 12 | docker: 13 | serverone: 14 | prefix: tra 15 | applications: 16 | service_with_custom_hostname: 17 | build_dir: Foo 18 | hostname: special_snowflake 19 | 20 | CONFIG 21 | cfg_yaml = YAML.load(config) 22 | config = DockerHandler.new(cfg_yaml, log, 'serverone:test', MockPassStore.new) 23 | describe 'service with custom container hostname' do 24 | it 'has special_snowflake as container hostname' do 25 | r = config.config_to_containers('apps', 'service_with_custom_hostname') 26 | expect(r['tra-app-service_with_custom_hostname']['container_args']['Hostname']).to eq('special_snowflake') 27 | end 28 | end 29 | 30 | # describe 'service without special permissions' do 31 | # it 'has no cap permissions' do 32 | # r = config.config_to_containers('apps', 'service_without_special') 33 | # expect(r['tra-app-service_without_special']['container_args']['HostConfig']['CapAdd']).to be_nil 34 | # end 35 | # end 36 | 37 | # describe 'service with multiple special permissions' do 38 | # it 'has list of caps' do 39 | # r = config.config_to_containers('apps', 'service_with_multi_special') 40 | # expect(r['tra-app-service_with_multi_special']['container_args']['HostConfig']['CapAdd']).to eq(['NET_ADMIN', 'SYS_ADMIN']) 41 | # end 42 | # end 43 | 44 | end 45 | -------------------------------------------------------------------------------- /spec/docker_capapp_spec.rb: -------------------------------------------------------------------------------- 1 | require 'yaml' 2 | require_relative '../lib/dockerhandler' 3 | require 'logger' 4 | require 'mocks.rb' 5 | 6 | log = Logger.new(STDOUT) 7 | 8 | RSpec.describe DockerHandler do 9 | prefix = 'foo2' 10 | prefix2 = 'bar3' 11 | config = <<-CONFIG 12 | provisioner: 13 | state_file: spec/test-state.yml 14 | docker: 15 | serverone: 16 | prefix: tra 17 | applications: 18 | service_with_special: 19 | build_dir: Foo 20 | capadd: 21 | - NET_ADMIN 22 | service_without_special: 23 | build_dir: Foo 24 | service_with_multi_special: 25 | build_dir: Foo 26 | capadd: 27 | - NET_ADMIN 28 | - SYS_ADMIN 29 | CONFIG 30 | cfg_yaml = YAML.load(config) 31 | config = DockerHandler.new(cfg_yaml, log, 'serverone:test', MockPassStore.new) 32 | 33 | describe 'service with one special permissions' do 34 | it 'added NET_ADMIN' do 35 | r = config.config_to_containers('apps', 'service_with_special') 36 | expect(r['tra-app-service_with_special']['container_args']['HostConfig']['CapAdd']).to eq(['NET_ADMIN']) 37 | end 38 | end 39 | 40 | describe 'service without special permissions' do 41 | it 'has no cap permissions' do 42 | r = config.config_to_containers('apps', 'service_without_special') 43 | expect(r['tra-app-service_without_special']['container_args']['HostConfig']['CapAdd']).to be_nil 44 | end 45 | end 46 | 47 | describe 'service with multiple special permissions' do 48 | it 'has list of caps' do 49 | r = config.config_to_containers('apps', 'service_with_multi_special') 50 | expect(r['tra-app-service_with_multi_special']['container_args']['HostConfig']['CapAdd']).to eq(['NET_ADMIN', 'SYS_ADMIN']) 51 | end 52 | end 53 | 54 | end 55 | -------------------------------------------------------------------------------- /spec/docker_multinet_spec.rb: -------------------------------------------------------------------------------- 1 | require 'yaml' 2 | require_relative '../lib/dockerhandler' 3 | require 'logger' 4 | require 'mocks.rb' 5 | 6 | log = Logger.new(STDOUT) 7 | 8 | RSpec.describe DockerHandler do 9 | multi_net = <<-NETCONFIG 10 | provisioner: 11 | state_file: spec/test-state.yml 12 | servers: 13 | leaderone: 14 | ip: 15 | bastion: 16 | ipv4: 24.25.26.27 17 | ipv6: a:b:c:d1b 18 | public: 19 | ipv4: 10.20.30.40 20 | ipv6: a:b:c:d102 21 | anon: 22 | ipv4: 1.2.3.4 23 | ipv6: a:b:c:ffe 24 | nosix: 25 | ipv4: 169.198.10.1 26 | private: 27 | ipv4: 10.10.100.1 28 | docker: 29 | leaderone: 30 | prefix: am 31 | networks: 32 | public: 33 | ipv4: 172.20.0.1 34 | ipv6: fd00:20:10aa::/48 35 | masquerade: off 36 | bridge: br10 37 | anon: 38 | ipv4: 172.30.0.1 39 | ipv6: fd00:30:10bb::/48 40 | masquerade: off 41 | bridge: br11 42 | nosix: 43 | ipv4: 10.9.8.1 44 | onlysix: 45 | ipv6: fd00:10:9988::/48 46 | bridge: br10 47 | jobs: 48 | goodjob: 49 | image: nginx 50 | networks: 51 | - public 52 | secretjob: 53 | image: apache 54 | networks: 55 | - anon 56 | ujob: 57 | build_dir: Foo 58 | applications: 59 | superbot: 60 | build_dir: SuperBot 61 | env: 62 | production: false 63 | volumes: 64 | - data:/someapp 65 | networks: 66 | - public 67 | hiddenbot: 68 | image: scum/hiddenbot:v1.2 69 | networks: 70 | - anon 71 | disconbot: 72 | image: foo:lts 73 | frontendbot: 74 | image: bee 75 | networks: 76 | - public 77 | - nosix 78 | ports: 79 | - 10 80 | - 4410 81 | NETCONFIG 82 | 83 | cfg_yaml = YAML.load(multi_net) 84 | config_leo = DockerHandler.new(cfg_yaml, log, 'leaderone:test', MockPassStore.new) 85 | cfg_nets = config_leo.config_for_networks 86 | 87 | describe "networking mapping" do 88 | 89 | it "adds a network to an applications that requests it" do 90 | r = config_leo.config_to_containers('apps', 'superbot') 91 | r_config = r["am-app-superbot"]['container_args']['NetworkingConfig']['EndpointsConfig'] 92 | expect(r_config).to have_key("am-public") 93 | 94 | s = config_leo.config_to_containers('apps', 'hiddenbot') 95 | s_config = s["am-app-hiddenbot"]['container_args']['NetworkingConfig']['EndpointsConfig'] 96 | expect(s_config).to have_key("am-anon") 97 | end 98 | 99 | it "adds a network to a job that requests it" do 100 | r = config_leo.config_to_containers('jobs', 'goodjob') 101 | r_config = r["am-job-goodjob"]['container_args']['NetworkingConfig']['EndpointsConfig'] 102 | expect(r_config).to have_key("am-public") 103 | 104 | s = config_leo.config_to_containers('jobs', 'secretjob') 105 | s_config = s["am-job-secretjob"]['container_args']['NetworkingConfig']['EndpointsConfig'] 106 | expect(s_config).to have_key("am-anon") 107 | end 108 | 109 | it "adds default network for app if none is specified" do 110 | t = config_leo.config_to_containers('apps', 'disconbot') 111 | t_config = t["am-app-disconbot"]['container_args']['NetworkingConfig']['EndpointsConfig'] 112 | expect(t_config).to have_key("am-network") 113 | end 114 | 115 | it "adds default network for job if none is specified" do 116 | t = config_leo.config_to_containers('jobs', 'ujob') 117 | t_config = t["am-job-ujob"]['container_args']['NetworkingConfig']['EndpointsConfig'] 118 | expect(t_config).to have_key("am-network") 119 | end 120 | 121 | it "adds multiple networks if specified" do 122 | t = config_leo.config_to_containers('apps', 'frontendbot') 123 | t_config = t["am-app-frontendbot"]['container_args']['NetworkingConfig']['EndpointsConfig'] 124 | expect(t_config).to have_key("am-public") 125 | expect(t["am-app-frontendbot"]['additional_networks'].size).to be(1) 126 | expect(t["am-app-frontendbot"]['additional_networks']).to include("am-nosix") 127 | end 128 | 129 | end 130 | 131 | 132 | describe "network creations" do 133 | 134 | it "creates all user defined networks" do 135 | expect(cfg_nets.size).to be(4) 136 | end 137 | 138 | it "correctly enable IPv6" do 139 | expect(cfg_nets['public']['EnableIPv6']).to be(true) 140 | expect(cfg_nets['anon']['EnableIPv6']).to be(true) 141 | expect(cfg_nets['nosix']['EnableIPv6']).to be(false) 142 | expect(cfg_nets['onlysix']['EnableIPv6']).to be(true) 143 | end 144 | 145 | it "setups up subnets for each network" do 146 | expect(cfg_nets['public']['IPAM']['Config'].size).to be(2) 147 | expect(cfg_nets['public']['IPAM']['Config']).to include({"Subnet" => "fd00:20:10aa::/48"}) 148 | expect(cfg_nets['public']['IPAM']['Config']).to include({"Subnet" => "172.20.0.1"}) 149 | expect(cfg_nets['anon']['IPAM']['Config'].size).to be(2) 150 | expect(cfg_nets['anon']['IPAM']['Config']).to include({"Subnet" => "fd00:30:10bb::/48"}) 151 | expect(cfg_nets['anon']['IPAM']['Config']).to include({"Subnet" => "172.30.0.1"}) 152 | expect(cfg_nets['nosix']['IPAM']['Config'].size).to be(1) 153 | expect(cfg_nets['nosix']['IPAM']['Config']).to include({"Subnet" => "10.9.8.1"}) 154 | expect(cfg_nets['onlysix']['IPAM']['Config'].size).to be(1) 155 | expect(cfg_nets['onlysix']['IPAM']['Config']).to include({"Subnet" => "fd00:10:9988::/48"}) 156 | end 157 | 158 | it "binds to public ipv4 address if defined" do 159 | expect(cfg_nets['public']['Options']['com.docker.network.bridge.host_binding_ipv4']).to eq('10.20.30.40') 160 | expect(cfg_nets['anon']['Options']['com.docker.network.bridge.host_binding_ipv4']).to eq('1.2.3.4') 161 | expect(cfg_nets['nosix']['Options']['com.docker.network.bridge.host_binding_ipv4']).to eq('169.198.10.1') 162 | expect(cfg_nets['onlysix']['Options']['com.docker.network.bridge.host_binding_ipv4']).to be_nil 163 | end 164 | 165 | it "corrects enables masquerade" do 166 | expect(cfg_nets['public']['Options']['com.docker.network.bridge.enable_ip_masquerade']).to eq("false") 167 | expect(cfg_nets['anon']['Options']['com.docker.network.bridge.enable_ip_masquerade']).to eq("false") 168 | expect(cfg_nets['nosix']['Options']['com.docker.network.bridge.enable_ip_masquerade']).to be_nil 169 | expect(cfg_nets['onlysix']['Options']['com.docker.network.bridge.enable_ip_masquerade']).to be_nil 170 | end 171 | 172 | it "selects the correct bridge" do 173 | expect(cfg_nets['public']['Options']['com.docker.network.bridge.name']).to eq("br10") 174 | expect(cfg_nets['anon']['Options']['com.docker.network.bridge.name']).to eq("br11") 175 | expect(cfg_nets['nosix']['Options']['com.docker.network.bridge.name']).to be_nil 176 | expect(cfg_nets['onlysix']['Options']['com.docker.network.bridge.name']).to eq("br10") 177 | end 178 | 179 | it "adds the bound IPv6 Address" do 180 | s = config_leo.config_to_containers('apps', 'frontendbot') 181 | expect(s["am-app-frontendbot"]['container_args']['HostConfig']['PortBindings']).to eq( 182 | {"10/tcp"=> 183 | [{'HostPort'=>'10'},{'HostPort'=>'10', 'HostIp'=>'a:b:c:d102'}], 184 | "4410/tcp"=> 185 | [{'HostPort'=>'4410'},{'HostPort'=>'4410', 'HostIp'=>'a:b:c:d102'}] 186 | } 187 | ) 188 | end 189 | 190 | end 191 | 192 | end 193 | -------------------------------------------------------------------------------- /spec/docker_static_ip_addhost_spec.rb: -------------------------------------------------------------------------------- 1 | require 'yaml' 2 | require_relative '../lib/dockerhandler' 3 | require 'logger' 4 | require 'mocks.rb' 5 | 6 | log = Logger.new(STDOUT) 7 | 8 | RSpec.describe DockerHandler do 9 | prefix = 'foo2' 10 | prefix2 = 'bar3' 11 | config = <<-CONFIG 12 | provisioner: 13 | state_file: spec/test-state.yml 14 | docker: 15 | serverone: 16 | prefix: tra 17 | networks: 18 | everything: 19 | ipv4: 172.100.100.1 20 | ipv6: fd00:100:10aa::/48 21 | bridge: eve0 22 | applications: 23 | lb_dual_stack: 24 | networks: 25 | - everything 26 | build_dir: Foo 27 | labels: 28 | lb.net: everything 29 | static_ip: 30 | ipv4: 10.66.55.44.1 31 | ipv6: fd00:66:55:44:1 32 | lb_ipv4_stack: 33 | networks: 34 | - everything 35 | build_dir: Foo 36 | labels: 37 | lb.net: everything 38 | static_ip: 39 | ipv4: 10.55.55.44.1 40 | lb_ipv6_stack: 41 | networks: 42 | - everything 43 | build_dir: Foo 44 | labels: 45 | lb.net: everything 46 | static_ip: 47 | ipv6: fd00:77:33:44:1 48 | daemon_with_additional_hosts: 49 | networks: 50 | - everything 51 | build_dir: Foo 52 | labels: 53 | lb.net: everything 54 | internal_dns: 55 | example.com: lb_ipv4_stack 56 | example.su: lb_dual_stack 57 | CONFIG 58 | cfg_yaml = YAML.load(config) 59 | config = DockerHandler.new(cfg_yaml, log, 'serverone:test', MockPassStore.new) 60 | 61 | describe 'static container IP configuration' do 62 | it 'correctly maps IPv4 only addresses' do 63 | container_config = config.config_to_containers('apps', 'lb_ipv4_stack') 64 | ipam = container_config['tra-app-lb_ipv4_stack']['container_args']['NetworkingConfig']['EndpointsConfig']['tra-everything']['IPAMConfig'] 65 | expect(ipam).to eq({"IPv4Address"=>"10.55.55.44.1"}) 66 | end 67 | it 'correctly maps IPv6 only addresses' do 68 | container_config = config.config_to_containers('apps', 'lb_ipv6_stack') 69 | ipam = container_config['tra-app-lb_ipv6_stack']['container_args']['NetworkingConfig']['EndpointsConfig']['tra-everything']['IPAMConfig'] 70 | expect(ipam).to eq({"IPv6Address"=>"fd00:77:33:44:1"}) 71 | end 72 | it 'correctly maps IPv4/IPv6 dual stack configurations' do 73 | container_config = config.config_to_containers('apps', 'lb_dual_stack') 74 | ipam = container_config['tra-app-lb_dual_stack']['container_args']['NetworkingConfig']['EndpointsConfig']['tra-everything']['IPAMConfig'] 75 | expect(ipam).to eq({"IPv4Address"=>"10.66.55.44.1", "IPv6Address"=>"fd00:66:55:44:1"}) 76 | end 77 | end 78 | 79 | describe 'additional host file configuration' do 80 | it 'correctly adds additional hosts for IPv4 configurations' do 81 | container_config = config.config_to_containers('apps', 'daemon_with_additional_hosts') 82 | extra_hosts = container_config['tra-app-daemon_with_additional_hosts']['container_args']['HostConfig']['ExtraHosts'] 83 | expect(extra_hosts).to containing_exactly('example.com:10.55.55.44.1', 84 | 'example.su:10.66.55.44.1', 85 | 'example.su:fd00:66:55:44:1') 86 | end 87 | end 88 | 89 | end 90 | -------------------------------------------------------------------------------- /spec/docker_traefik_spec.rb: -------------------------------------------------------------------------------- 1 | require 'yaml' 2 | require_relative '../lib/dockerhandler' 3 | require 'logger' 4 | require 'mocks.rb' 5 | 6 | log = Logger.new(STDOUT) 7 | 8 | RSpec.describe DockerHandler do 9 | prefix = 'foo2' 10 | prefix2 = 'bar3' 11 | config = <<-CONFIG 12 | provisioner: 13 | state_file: spec/test-state.yml 14 | docker: 15 | serverone: 16 | prefix: tra 17 | networks: 18 | public: 19 | ipv4: 172.20.0.1 20 | ipv6: fd00:20:10aa::/48 21 | bridge: pub10 22 | other: 23 | ipv4: 182.20.0.1 24 | ipv6: fd80:20:10aa::/48 25 | bridge: oth10 26 | applications: 27 | nginx_single_host: 28 | networks: 29 | - public 30 | build_dir: NginxStatic 31 | labels: 32 | lb.net: public 33 | traefik: 34 | http: 35 | hosts: 36 | - example.com 37 | port: 8181 38 | tls: enabled 39 | nginx_multi_host: 40 | networks: 41 | - other 42 | build_dir: NginxStatic 43 | labels: 44 | lb.net: other 45 | traefik: 46 | http: 47 | hosts: 48 | - my.style 49 | - www.my.style 50 | port: 8080 51 | tls: enabled 52 | nginx_no_tls: 53 | networks: 54 | - other 55 | build_dir: NginxStatic 56 | labels: 57 | lb.net: other 58 | traefik: 59 | http: 60 | hosts: 61 | - no.ssl 62 | - www.no.ssl 63 | port: 8282 64 | CONFIG 65 | cfg_yaml = YAML.load(config) 66 | config_nginx_single_host = DockerHandler.new(cfg_yaml, log, 'serverone:test', MockPassStore.new) 67 | 68 | 69 | describe 'traefix lables for secure http service' do 70 | it 'creates correct labels for a service with a single host with tls enabled' do 71 | t = config_nginx_single_host.config_to_containers('apps', 'nginx_single_host') 72 | labels = t['tra-app-nginx_single_host']['container_args']['Labels'] 73 | expect(labels).to eq({'lb.net' => 'public', 74 | 'traefik.http.routers.tra-app-nginx_single_host.rule' => 'Host(`example.com`)', 75 | 'traefik.http.services.tra-app-nginx_single_host.loadbalancer.server.port' => '8181', 76 | 'traefik.http.routers.tra-app-nginx_single_host.entrypoints' => 'websecure', 77 | 'traefik.http.routers.tra-app-nginx_single_host.tls.certresolver' => 'lec' 78 | }) 79 | end 80 | it 'creates correct labels for a service with multiple hosts' do 81 | t = config_nginx_single_host.config_to_containers('apps', 'nginx_multi_host') 82 | labels = t['tra-app-nginx_multi_host']['container_args']['Labels'] 83 | expect(labels).to eq({'lb.net' => 'other', 84 | 'traefik.http.routers.tra-app-nginx_multi_host.rule' => 'Host(`my.style`,`www.my.style`)', 85 | 'traefik.http.services.tra-app-nginx_multi_host.loadbalancer.server.port' => '8080', 86 | 'traefik.http.routers.tra-app-nginx_multi_host.entrypoints' => 'websecure', 87 | 'traefik.http.routers.tra-app-nginx_multi_host.tls.certresolver' => 'lec' 88 | }) 89 | end 90 | it 'creates correct labels for a service without tls' do 91 | t = config_nginx_single_host.config_to_containers('apps', 'nginx_no_tls') 92 | labels = t['tra-app-nginx_no_tls']['container_args']['Labels'] 93 | expect(labels).to eq({'lb.net' => 'other', 94 | 'traefik.http.routers.tra-app-nginx_no_tls.rule' => 'Host(`no.ssl`,`www.no.ssl`)', 95 | 'traefik.http.services.tra-app-nginx_no_tls.loadbalancer.server.port' => '8282' 96 | }) 97 | end 98 | end 99 | end 100 | -------------------------------------------------------------------------------- /spec/mocks.rb: -------------------------------------------------------------------------------- 1 | class MockPassStore 2 | 3 | def initialize() 4 | end 5 | 6 | ## rescue GPGME::Error::DecryptFailed upstream for failed passwords 7 | def get_or_generate_password(folder, name) 8 | return "passfor:#{folder}:#{name}" 9 | end 10 | 11 | end 12 | -------------------------------------------------------------------------------- /spec/provisioner_spec.rb: -------------------------------------------------------------------------------- 1 | require 'tempfile' 2 | require 'logger' 3 | require_relative '../lib/provisioner' 4 | 5 | class MockProvisioner < Provisioner 6 | 7 | attr_accessor :state, :domains_created, :update_check 8 | 9 | def initialize(cfg_yaml, log) 10 | super(cfg_yaml, log) 11 | @domains_created = {} 12 | @existing_domains = [ 'example.com' ] 13 | @update_check = [] 14 | end 15 | 16 | def list_domain_records(domain, ok_func, err_func) 17 | if @existing_domains.include?(domain) 18 | ok_func.() 19 | else 20 | err_func.() 21 | end 22 | #{'name' => '', 'type' => 'A', 'data' => '1.1.1.1'} 23 | end 24 | 25 | def create_domain(domain, ip) 26 | @domains_created[domain] = ip 27 | @existing_domains.push(domain) 28 | end 29 | 30 | def dns_update_check(r) 31 | @update_check.push(r) 32 | end 33 | 34 | end 35 | 36 | RSpec.describe Provisioner do 37 | 38 | tmp_state = Tempfile.new('bee2_test_state') 39 | tmp_public_inv = Tempfile.new('bee2_test_public_inv') 40 | tmp_private_inv = Tempfile.new('bee2_test_private_inv') 41 | log = Logger.new(STDOUT) 42 | log.level = Logger::ERROR 43 | 44 | after(:all) do 45 | tmp_state.unlink 46 | tmp_public_inv.unlink 47 | tmp_private_inv.unlink 48 | end 49 | 50 | state = <<-STATE 51 | servers: 52 | srv1: 53 | ipv4: 54 | addr: 10.10.10.1 55 | ipv6: 56 | addr: fe80:aaaa:bbbb:cccc::61 57 | subnet: 'fe80:aaaa:bbbb:cccc::' 58 | srv6: 59 | ipv4: 60 | addr: 10.20.20.1 61 | ipv6: 62 | addr: fe80:aaaa:bbbb:dddd::62 63 | subnet: 'fe80:aaaa:bbbb:dddd::' 64 | STATE 65 | 66 | config = <<-CONFIG 67 | provisioner: 68 | state_file: #{tmp_state.path} 69 | inventory: 70 | public: #{tmp_public_inv.path} 71 | private: #{tmp_private_inv.path} 72 | servers: 73 | srv6: 74 | private_ip: 192.168.1.1 75 | ipv6: 76 | docker: 77 | suffix_bridge: 1:0:0/96 78 | suffix_net: 2:0:0/96 79 | static_web: 2:aaaa:a 80 | dns: 81 | public: 82 | - web.example.com 83 | private: 84 | - web.example.net 85 | web: 86 | - battlepenguin.com 87 | - penguindreams.org 88 | - social.battlepenguin.com 89 | - tech.battlepenguin.com 90 | srv1: 91 | dns: 92 | public: 93 | - srv1.example.com 94 | CONFIG 95 | 96 | cfg_yaml = YAML.load(config) 97 | pro = MockProvisioner.new(cfg_yaml, log) 98 | pro.state = YAML.load(state) 99 | 100 | describe "Domain Records" do 101 | it "convert examples in code comments/documentation" do 102 | expect(pro.domain_records([ 'something.example.com', 'some.other.example.com', 'example.net', 'somethingelse.example.net', 'example.org' ])).to( 103 | eq( { 'example.com' => [ 'something', 'some.other' ], 'example.net' => ['somethingelse'], 'example.org' => [] }) 104 | ) 105 | end 106 | it "convert domain array from YAML config to a hash from each base to subdomains" do 107 | expect(pro.domain_records(cfg_yaml['servers']['srv6']['dns']['web'])).to eq( 108 | {"battlepenguin.com"=>["social", "tech"], "penguindreams.org"=>[]} 109 | ) 110 | end 111 | end 112 | 113 | describe "List Current Domain Records" do 114 | it "runs ok function if domain is found" do 115 | acc = false 116 | pro.list_domain_records('example.com', ->{ acc = true }, ->{ fail 'domain should exist in test fixture' }) 117 | expect(acc).to eq(true) 118 | end 119 | it "runs error function if domain is not found" do 120 | acc = false 121 | pro.list_domain_records('example.net', ->{ fail 'domain should not exist in test fixture' }, ->{ acc = true }) 122 | expect(acc).to eq(true) 123 | end 124 | end 125 | 126 | # TODO: fix mock 127 | # describe "Update DNS" do 128 | # it "creates new domains" do 129 | # pro.update_dns 130 | # expect(pro.domains_created).to eq({"example.net"=>nil}) 131 | # pro.domains_created = {} 132 | # end 133 | # it "skip created domains" do 134 | # pro.update_dns 135 | # expect(pro.domains_created).to eq({}) 136 | # pro.domains_created = {} 137 | # end 138 | # end 139 | 140 | # describe "DNS Update Check" do 141 | # it "visits all records to update A/AAAA names" do 142 | # end 143 | # end 144 | 145 | describe "Web IPv6" do 146 | it "should not have a static_web in state for srv6 prior to running web_ipv6 function" do 147 | expect(pro.state['servers']['srv6']['ipv6']['static_web']).to be_nil 148 | end 149 | it "should add an haproxy/docker container IPv6 Address for srv6" do 150 | pro.web_ipv6 151 | expect(pro.state['servers']['srv6']['ipv6']['static_web']).to eq('fe80:aaaa:bbbb:dddd::2:aaaa:a') 152 | end 153 | it "should not add an haproxy/docker container IPv6 for srv1" do 154 | pro.web_ipv6 155 | expect(pro.state['servers']['srv1']['static_web']).to be_nil 156 | end 157 | end 158 | 159 | describe "Save State" do 160 | it "should save the current state to the state file" do 161 | pro.save_state 162 | expect(YAML.load_file(tmp_state)).to eq(pro.state) 163 | end 164 | end 165 | 166 | # TODO 167 | # describe "Save Inventory" do 168 | # it "should save the public inventory" do 169 | # end 170 | # it "should save the private inventory" do 171 | # end 172 | # end 173 | 174 | end 175 | -------------------------------------------------------------------------------- /spec/spec_helper.rb: -------------------------------------------------------------------------------- 1 | # This file was generated by the `rspec --init` command. Conventionally, all 2 | # specs live under a `spec` directory, which RSpec adds to the `$LOAD_PATH`. 3 | # The generated `.rspec` file contains `--require spec_helper` which will cause 4 | # this file to always be loaded, without a need to explicitly require it in any 5 | # files. 6 | # 7 | # Given that it is always loaded, you are encouraged to keep this file as 8 | # light-weight as possible. Requiring heavyweight dependencies from this file 9 | # will add to the boot time of your test suite on EVERY test run, even for an 10 | # individual file that may not need all of that loaded. Instead, consider making 11 | # a separate helper file that requires the additional dependencies and performs 12 | # the additional setup, and require it from the spec files that actually need 13 | # it. 14 | # 15 | # The `.rspec` file also contains a few flags that are not defaults but that 16 | # users commonly want. 17 | # 18 | # See http://rubydoc.info/gems/rspec-core/RSpec/Core/Configuration 19 | RSpec.configure do |config| 20 | # rspec-expectations config goes here. You can use an alternate 21 | # assertion/expectation library such as wrong or the stdlib/minitest 22 | # assertions if you prefer. 23 | config.expect_with :rspec do |expectations| 24 | # This option will default to `true` in RSpec 4. It makes the `description` 25 | # and `failure_message` of custom matchers include text for helper methods 26 | # defined using `chain`, e.g.: 27 | # be_bigger_than(2).and_smaller_than(4).description 28 | # # => "be bigger than 2 and smaller than 4" 29 | # ...rather than: 30 | # # => "be bigger than 2" 31 | expectations.include_chain_clauses_in_custom_matcher_descriptions = true 32 | end 33 | 34 | # rspec-mocks config goes here. You can use an alternate test double 35 | # library (such as bogus or mocha) by changing the `mock_with` option here. 36 | config.mock_with :rspec do |mocks| 37 | # Prevents you from mocking or stubbing a method that does not exist on 38 | # a real object. This is generally recommended, and will default to 39 | # `true` in RSpec 4. 40 | mocks.verify_partial_doubles = true 41 | end 42 | 43 | # This option will default to `:apply_to_host_groups` in RSpec 4 (and will 44 | # have no way to turn it off -- the option exists only for backwards 45 | # compatibility in RSpec 3). It causes shared context metadata to be 46 | # inherited by the metadata hash of host groups and examples, rather than 47 | # triggering implicit auto-inclusion in groups with matching metadata. 48 | config.shared_context_metadata_behavior = :apply_to_host_groups 49 | 50 | # The settings below are suggested to provide a good initial experience 51 | # with RSpec, but feel free to customize to your heart's content. 52 | =begin 53 | # This allows you to limit a spec run to individual examples or groups 54 | # you care about by tagging them with `:focus` metadata. When nothing 55 | # is tagged with `:focus`, all examples get run. RSpec also provides 56 | # aliases for `it`, `describe`, and `context` that include `:focus` 57 | # metadata: `fit`, `fdescribe` and `fcontext`, respectively. 58 | config.filter_run_when_matching :focus 59 | 60 | # Allows RSpec to persist some state between runs in order to support 61 | # the `--only-failures` and `--next-failure` CLI options. We recommend 62 | # you configure your source control system to ignore this file. 63 | config.example_status_persistence_file_path = "spec/examples.txt" 64 | 65 | # Limits the available syntax to the non-monkey patched syntax that is 66 | # recommended. For more details, see: 67 | # - http://rspec.info/blog/2012/06/rspecs-new-expectation-syntax/ 68 | # - http://www.teaisaweso.me/blog/2013/05/27/rspecs-new-message-expectation-syntax/ 69 | # - http://rspec.info/blog/2014/05/notable-changes-in-rspec-3/#zero-monkey-patching-mode 70 | config.disable_monkey_patching! 71 | 72 | # This setting enables warnings. It's recommended, but in some cases may 73 | # be too noisy due to issues in dependencies. 74 | config.warnings = true 75 | 76 | # Many RSpec users commonly either run the entire suite or an individual 77 | # file, and it's useful to allow more verbose output when running an 78 | # individual spec file. 79 | if config.files_to_run.one? 80 | # Use the documentation formatter for detailed output, 81 | # unless a formatter has already been configured 82 | # (e.g. via a command-line flag). 83 | config.default_formatter = 'doc' 84 | end 85 | 86 | # Print the 10 slowest examples and example groups at the 87 | # end of the spec run, to help surface which specs are running 88 | # particularly slow. 89 | config.profile_examples = 10 90 | 91 | # Run specs in random order to surface order dependencies. If you find an 92 | # order dependency and want to debug it, you can fix the order by providing 93 | # the seed, which is printed after each run. 94 | # --seed 1234 95 | config.order = :random 96 | 97 | # Seed global randomization in this process using the `--seed` CLI option. 98 | # Setting this allows you to use `--seed` to deterministically reproduce 99 | # test failures related to randomization by passing the same `--seed` value 100 | # as the one that triggered the failure. 101 | Kernel.srand config.seed 102 | =end 103 | end 104 | -------------------------------------------------------------------------------- /spec/sync_spec.rb: -------------------------------------------------------------------------------- 1 | require 'yaml' 2 | require_relative '../lib/synchandler' 3 | require 'logger' 4 | 5 | log = Logger.new(STDOUT) 6 | 7 | RSpec.describe SyncHandler do 8 | prefix = 'foo2' 9 | config = <<-CONFIG 10 | provisioner: 11 | ssh_key: 12 | private: conf/my-key 13 | servers: 14 | web1: 15 | dns: 16 | private: 17 | - web1.private 18 | web2: 19 | dns: 20 | private: 21 | - web2.private 22 | sync: 23 | web1: 24 | push: 25 | - /media/local1/photos:/media/remote1/photos 26 | web2: 27 | push: 28 | - /media/local2/data:/media/remote2/data 29 | - /media/local2/pool:/media/remote2/swimming 30 | CONFIG 31 | 32 | describe "rsync command generation" do 33 | it "empty list when given a non-existant server" do 34 | r = SyncHandler.new(YAML.load(config), log, 'test') 35 | expect(r.rsync_cmds('x1')).to be_empty 36 | end 37 | 38 | it "generates a set of parameters for web1" do 39 | r = SyncHandler.new(YAML.load(config), log, 'test') 40 | r = r.rsync_cmds('web1') 41 | expect(r.length).to eq(1) 42 | expect(r.first).to contain_exactly( 43 | '-av', '--delete', '-e', '"ssh -i conf/my-key"', 44 | '/media/local1/photos', 45 | 'root@web1.private:/media/remote1/photos') 46 | end 47 | 48 | it "generates a set of parameters for web2" do 49 | r = SyncHandler.new(YAML.load(config), log, 'test') 50 | r = r.rsync_cmds('web2') 51 | expect(r.length).to eq(2) 52 | expect(r.first).to contain_exactly( 53 | '-av', '--delete', '-e', '"ssh -i conf/my-key"', 54 | '/media/local2/data', 55 | 'root@web2.private:/media/remote2/data') 56 | expect(r.last).to contain_exactly( 57 | '-av', '--delete', '-e', '"ssh -i conf/my-key"', 58 | '/media/local2/pool', 59 | 'root@web2.private:/media/remote2/swimming') 60 | end 61 | end 62 | 63 | end 64 | -------------------------------------------------------------------------------- /spec/test-state.yml: -------------------------------------------------------------------------------- 1 | --- 2 | servers: 3 | web1: 4 | ipv6: 5 | subnet: 'a:b:c:d::' 6 | static_web: a:b:c:d::2:0:a 7 | web2: 8 | -------------------------------------------------------------------------------- /spec/util_spec.rb: -------------------------------------------------------------------------------- 1 | require_relative '../lib/util' 2 | 3 | RSpec.describe Util do 4 | 5 | describe "Strip Underscores" do 6 | it "strips leading underscores" do 7 | expect(Util.lstrip_underscore("_foo")).to eq("foo") 8 | end 9 | it "does not strip trailing underscores" do 10 | expect(Util.lstrip_underscore("foo_")).to eq("foo_") 11 | end 12 | it "does not strip non-leading underscores" do 13 | expect(Util.lstrip_underscore("_foo_bar")).to eq("foo_bar") 14 | end 15 | it "leaves strings with no underscores unaltered" do 16 | expect(Util.lstrip_underscore("bar")).to eq("bar") 17 | end 18 | end 19 | 20 | describe "Base domain" do 21 | it "removes leading subdomain" do 22 | expect(Util.base_domain("test.example.com")).to eq("example.com") 23 | end 24 | it "removes leading subdomains" do 25 | expect(Util.base_domain("a.b.c.d.test.example.com")).to eq("example.com") 26 | end 27 | it "removes leading subdomain with secondary base" do 28 | expect(Util.base_domain("test.example.co.uk")).to eq("example.co.uk") 29 | end 30 | it "removes leading subdomains with secondary base" do 31 | expect(Util.base_domain("a.b.c.d.test.example.co.uk")).to eq("example.co.uk") 32 | end 33 | end 34 | 35 | describe "Host subdomain" do 36 | it "single leading subdomain" do 37 | expect(Util.host_domain("test.example.com")).to eq("test") 38 | end 39 | it "multiple leading subdomains" do 40 | expect(Util.host_domain("a.b.c.d.test.example.com")).to eq("a.b.c.d.test") 41 | end 42 | it "single leading subdomain with secondary base" do 43 | expect(Util.host_domain("test.example.co.uk")).to eq("test") 44 | end 45 | it "multiple leading subdomains with secondary base" do 46 | expect(Util.host_domain("a.b.c.d.test.example.co.uk")).to eq("a.b.c.d.test") 47 | end 48 | end 49 | 50 | describe "WWW subdomain" do 51 | it "web subdomain for io url" do 52 | expect(Util.host_domain("www.wiki-dev.bigsense.io")).to eq("www.wiki-dev") 53 | end 54 | it "base for web subdomain for io url" do 55 | expect(Util.base_domain("www.wiki-dev.bigsense.io")).to eq("bigsense.io") 56 | end 57 | end 58 | 59 | describe "Primary Domain" do 60 | it "should have an empty host" do 61 | expect(Util.host_domain("bigsense.io")).to eq("") 62 | end 63 | it "should have a full base" do 64 | expect(Util.base_domain("bigsense.io")).to eq("bigsense.io") 65 | end 66 | end 67 | 68 | end 69 | --------------------------------------------------------------------------------