├── ffinfo ├── list.tmpl └── init.sls ├── apt ├── ffho.apt.conf ├── update_apt.cron ├── ffho.preferences ├── ffho.gpg.Debian.bookworm ├── ffho.gpg.Debian.bullseye ├── ffho.list.Debian.bookworm ├── ffho.list.Debian.bullseye ├── salt.sources ├── sources.list.Debian.bullseye ├── sources.list.Debian.bookworm └── init.sls ├── timezone └── init.sls ├── rsyslog ├── ffho.conf ├── ffho │ ├── 20-ntpd.conf │ ├── 20-bind.conf │ ├── 20-dhcp.conf │ ├── 20-slapd.conf │ ├── 20-snmpd.conf │ ├── 20-yanic.conf │ ├── 50-kern.conf │ ├── 50-user.conf │ ├── 20-mail.conf │ ├── 50-auth.conf │ ├── 20-fastd.conf │ ├── 20-influxdb.conf │ ├── 20-openvpn.conf │ ├── 20-salt-master.conf │ ├── 20-salt-minion.conf │ ├── 20-bird.conf │ ├── 30-net.conf │ ├── 90-cron.conf │ ├── 30-ap.conf │ └── 50-messages.conf ├── zz-debug.conf ├── ffho.logrotate ├── rsyslog-early.d │ └── sudo-ignores.conf └── init.sls ├── vim ├── vimrc ├── vimrc.local └── init.sls ├── pppoe ├── ff_fix_ppp.cron ├── noop.local ├── fix_ppp_vrf ├── ff_fix_ppp ├── ip-up.local ├── init.sls └── pap-secrets ├── mongodb ├── mongodb_backup.cron ├── mongodb_backup.conf ├── mongod.conf └── init.sls ├── openvpn ├── ifup_real ├── ifdown ├── ifup ├── ldap-auth │ ├── pam_ldap.conf.tmpl │ ├── openvpn.pam.d │ └── ldap.conf.tmpl ├── ccd.tmpl ├── openvpn@.service ├── ops.conf.tmpl └── openvpn.conf.tmpl ├── .gitignore ├── graylog ├── graylog-group-mapping.cron ├── graylog-system-notifications.cron ├── graylog-group-mapping.conf.tmpl ├── graylog-api-scripts.conf.tmpl └── default-graylog-server ├── Documentation └── example-pillar │ ├── nodes │ ├── init.sls │ ├── ALL.sls │ ├── mail.sls │ ├── fe01.sls │ ├── cr03.sls │ ├── gw03.sls │ └── bbr-kt.sls │ ├── ldap.sls │ ├── burp.sls │ ├── network.sls │ ├── logging.sls │ ├── top.sls │ ├── globals.sls │ ├── monitoring.sls │ ├── ssh.sls │ ├── sites.sls │ ├── te.sls │ └── dns-server.sls ├── burp ├── burp.gpg ├── burp.list.tmpl ├── client │ ├── default_burp │ └── burp.conf.tmpl ├── server │ ├── default_burp │ ├── client.tmpl │ └── common_incexc ├── init.sls ├── client.sls └── server.sls ├── dhcp-client ├── dont-update-resolv-conf ├── init.sls └── dhclient.conf ├── sury ├── sury.gpg ├── sury.list.tmpl └── init.sls ├── icingaweb2 ├── modules │ └── monitoring │ │ ├── backends.ini │ │ ├── config.ini │ │ └── commandtransports.ini ├── roles.ini.tmpl ├── resources.ini.tmpl ├── menu.ini.tmpl ├── authentication.ini.tmpl └── groups.ini.tmpl ├── nginx ├── service-override.conf ├── node.ffho.net ├── ffho.d │ ├── add-headers.conf │ └── proxy-headers.conf ├── www2.ffho.net ├── nginx.conf └── firmware.srv.in.ffho.net ├── snmpd ├── service-override.conf ├── default_snmpd └── init.sls ├── icinga2 ├── icinga.gpg ├── commands.d │ ├── check_salt.conf │ ├── needrestart.conf │ ├── check_systemd_units.conf │ ├── dhcp-server.conf │ ├── cpu_usage.conf │ ├── syncrepl_extended.conf │ ├── check_lv_snap.conf │ ├── nagios-plugins-contrib.conf │ ├── ssl_cert.conf │ ├── dns_sync.conf │ ├── check_gpg_expiry.conf │ └── mailq_sudo.conf ├── icinga.list.tmpl ├── secrets.conf.tmpl ├── zones.conf.H_icinga2.in.ffho.net ├── api.conf ├── services │ ├── lv.conf │ ├── salt.conf │ ├── ssl_cert.conf │ ├── dhcp-server.conf │ ├── mail.conf │ ├── gpg.conf │ ├── burp.conf │ ├── ldap.conf │ ├── common.conf │ └── ntp.conf ├── zones.conf ├── users.conf.tmpl ├── wbbl.conf.tmpl ├── plugins │ ├── check_salt │ ├── check_systemd_units │ └── check_conntrack_size ├── icinga2.sudoers ├── constants.conf ├── icinga2.conf └── icinga2.conf.H_icinga2.in.ffho.net ├── firewall ├── modules └── init.sls ├── unattended-upgrades ├── 20auto-upgrades └── init.sls ├── batman ├── batman-adv.module.conf ├── hosts.sls ├── ff_check_gateway.cron ├── bat-hosts.tmpl └── init.sls ├── dhcp-server ├── dhcpd-pool.1.gz ├── dhcpd.default ├── init.sls └── dhcpd.conf ├── bird ├── local.conf ├── icinga2 │ ├── ospf_interfaces_down_ok.txt.tmpl │ └── ibgp_sessions_down_ok.txt.tmpl ├── l3-access.conf ├── mesh_routes.conf ├── ibgp.conf ├── bogon_unreach.conf ├── radv.conf ├── bird.conf └── VRF_external.conf ├── dns-server ├── zone.gen.tmpl ├── zones │ └── README.md ├── named.conf.local ├── named.conf.options └── named.conf.options.recursor ├── kernel └── init.sls ├── fastd ├── peers-blacklist ├── verify-peer.sh ├── fastd@.service └── ff_fastd_conn ├── influxdb ├── influxdata-archive_compat.gpg ├── influxdb.list.tmpl ├── backup.sh └── init.sls ├── grafana ├── grafana.list.tmpl ├── dashboards │ ├── prometheus │ │ └── README.md │ └── prometheus.yaml ├── datasources │ └── prom-local.yaml.tmpl ├── prometheus.sls ├── plugins │ └── piechart-panel.sls └── init.sls ├── network ├── resolv.conf.H_mail.in.ffho.net ├── resolv.conf.H_dns01.ffho.net ├── rt_tables.conf.tmpl ├── ifupdown2 │ ├── ff_fix_default_route.cron │ ├── init.sls │ ├── reload.sls │ └── ff_fix_default_route ├── resolv.conf ├── ifupdown-ng │ ├── reload.sls │ └── init.sls ├── interfaces │ ├── init.sls │ ├── vm_interfaces.tmpl │ └── openvpn.tmpl ├── systemd-link.tmpl ├── bootstrap.sls ├── link.sls └── init.sls ├── mosh ├── mosh.ufw.conf └── init.sls ├── sysctl ├── router.conf └── init.sls ├── screen ├── init.sls └── screenrc.root ├── yanic ├── ff_merge_nodes_json.cron ├── yanic@.service └── ff_merge_nodes_json ├── anycast-healthchecker ├── check.conf.tmpl ├── bird.anycast-service.conf └── anycast-healthchecker.conf ├── needrestart ├── init.sls └── monitoring.conf ├── ntp ├── init.sls └── ntp.conf ├── utils.sls ├── postfix ├── aliases ├── main.cf └── main.cf.H_ticket.in.ffho.net ├── respondd ├── respondd@.service └── respondd-config.tmpl ├── systemd ├── 90-unfuck-mac-overwrite.link ├── wait-for-routes.service ├── init.sls └── wait-for-routes ├── prometheus-server ├── prometheus.default └── init.sls ├── install-server ├── ffho-first-boot.service └── ffho-first-boot.sh ├── sudo ├── init.sls ├── sudoers.Debian.bullseye └── sudoers.Debian.bookworm ├── users └── init.sls ├── bash ├── init.sls ├── bashrc.root └── bash_aliases.root ├── salt-minion ├── init.sls └── minion_conf.tmpl ├── prometheus-exporters ├── node-exporter │ └── prometheus-node-exporter.default └── init.sls ├── motd └── init.sls ├── _modules ├── ffho.py └── ffho_auth.py ├── locales └── init.sls ├── wireguard ├── wireguard.conf.tmpl └── init.sls ├── slapd ├── init.sls └── slapd.default ├── kvm ├── init.sls └── get-bridge-vids ├── docker └── init.sls ├── ssh └── authorized_keys.tmpl ├── elasticsearch └── init.sls ├── apu2 └── init.sls ├── nftables └── init.sls ├── certs └── ffho-cacert.pem ├── forgejo ├── init.sls ├── forgejo-apt.asc └── app.ini.tmpl ├── firmware └── init.sls └── README.md /ffinfo/list.tmpl: -------------------------------------------------------------------------------- 1 | {{ "\n".join( list|sort ) }} 2 | -------------------------------------------------------------------------------- /apt/ffho.apt.conf: -------------------------------------------------------------------------------- 1 | APT::Install-Recommends "false"; 2 | -------------------------------------------------------------------------------- /timezone/init.sls: -------------------------------------------------------------------------------- 1 | Europe/Berlin: 2 | timezone.system 3 | -------------------------------------------------------------------------------- /rsyslog/ffho.conf: -------------------------------------------------------------------------------- 1 | $IncludeConfig /etc/rsyslog.d/ffho/*.conf 2 | -------------------------------------------------------------------------------- /vim/vimrc: -------------------------------------------------------------------------------- 1 | set nowrap 2 | set ai 3 | syn on 4 | color delek 5 | -------------------------------------------------------------------------------- /apt/update_apt.cron: -------------------------------------------------------------------------------- 1 | 3 */6 * * * root apt-get update > /dev/null 2>&1 2 | -------------------------------------------------------------------------------- /pppoe/ff_fix_ppp.cron: -------------------------------------------------------------------------------- 1 | */5 * * * * root /usr/local/sbin/ff_fix_ppp 2 | -------------------------------------------------------------------------------- /mongodb/mongodb_backup.cron: -------------------------------------------------------------------------------- 1 | 0 23 * * * root /usr/local/sbin/mongodb_backup 2 | -------------------------------------------------------------------------------- /apt/ffho.preferences: -------------------------------------------------------------------------------- 1 | Package: * 2 | Pin: origin apt.ffho.net 3 | Pin-Priority: 1001 4 | -------------------------------------------------------------------------------- /openvpn/ifup_real: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ifdown --force "$1" 2>/dev/null 4 | ifup "$1" 5 | -------------------------------------------------------------------------------- /vim/vimrc.local: -------------------------------------------------------------------------------- 1 | " Do not load defaults ever at all 2 | let g:skip_defaults_vim = 1 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # 2 | # .gitignore 3 | # 4 | 5 | *.pyc 6 | _modules/__pycache__/ 7 | .*.swp 8 | -------------------------------------------------------------------------------- /graylog/graylog-group-mapping.cron: -------------------------------------------------------------------------------- 1 | */5 * * * * root /usr/local/sbin/graylog-group-mapping 2 | -------------------------------------------------------------------------------- /Documentation/example-pillar/nodes/init.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - nodes.ALL: 3 | key: nodes 4 | -------------------------------------------------------------------------------- /burp/burp.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreifunkHochstift/ffho-salt-public/HEAD/burp/burp.gpg -------------------------------------------------------------------------------- /dhcp-client/dont-update-resolv-conf: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | make_resolv_conf(){ 4 | : 5 | } 6 | -------------------------------------------------------------------------------- /sury/sury.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreifunkHochstift/ffho-salt-public/HEAD/sury/sury.gpg -------------------------------------------------------------------------------- /icingaweb2/modules/monitoring/backends.ini: -------------------------------------------------------------------------------- 1 | [icinga] 2 | type = "ido" 3 | resource = "icinga_ido" 4 | -------------------------------------------------------------------------------- /nginx/service-override.conf: -------------------------------------------------------------------------------- 1 | [Unit] 2 | After=network-online.target 3 | Wants=network-online.target 4 | -------------------------------------------------------------------------------- /snmpd/service-override.conf: -------------------------------------------------------------------------------- 1 | [Unit] 2 | After=network-online.target 3 | Wants=network-online.target 4 | -------------------------------------------------------------------------------- /icinga2/icinga.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreifunkHochstift/ffho-salt-public/HEAD/icinga2/icinga.gpg -------------------------------------------------------------------------------- /icingaweb2/modules/monitoring/config.ini: -------------------------------------------------------------------------------- 1 | [security] 2 | protected_customvars = "*pw*,*pass*,community" 3 | -------------------------------------------------------------------------------- /firewall/modules: -------------------------------------------------------------------------------- 1 | # Make sure conntrack is loaded as early as possible so sysctl can be set. 2 | nf_conntrack 3 | -------------------------------------------------------------------------------- /graylog/graylog-system-notifications.cron: -------------------------------------------------------------------------------- 1 | 0 12,20 * * * root /usr/local/sbin/graylog-system-notifications 2 | -------------------------------------------------------------------------------- /openvpn/ifdown: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /usr/bin/flock --exclusive --wait 30 /var/lock/ff_ifupdown2 /sbin/ifdown "$1" 4 | -------------------------------------------------------------------------------- /openvpn/ifup: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /usr/bin/flock --exclusive --wait 30 /var/lock/ff_ifupdown2 /etc/openvpn/ifup_real "$@" 4 | -------------------------------------------------------------------------------- /unattended-upgrades/20auto-upgrades: -------------------------------------------------------------------------------- 1 | APT::Periodic::Update-Package-Lists "1"; 2 | APT::Periodic::Unattended-Upgrade "1"; 3 | -------------------------------------------------------------------------------- /apt/ffho.gpg.Debian.bookworm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreifunkHochstift/ffho-salt-public/HEAD/apt/ffho.gpg.Debian.bookworm -------------------------------------------------------------------------------- /apt/ffho.gpg.Debian.bullseye: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreifunkHochstift/ffho-salt-public/HEAD/apt/ffho.gpg.Debian.bullseye -------------------------------------------------------------------------------- /batman/batman-adv.module.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Load batman-adv module on system boot (Salt managed) 3 | # 4 | batman-adv 5 | dummy 6 | -------------------------------------------------------------------------------- /dhcp-server/dhcpd-pool.1.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreifunkHochstift/ffho-salt-public/HEAD/dhcp-server/dhcpd-pool.1.gz -------------------------------------------------------------------------------- /bird/local.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Local router config knobs (NOT Salt-managed) 3 | # 4 | 5 | define DRAIN_FULL = 0; 6 | define DRAIN_FFRL = 0; 7 | -------------------------------------------------------------------------------- /dns-server/zone.gen.tmpl: -------------------------------------------------------------------------------- 1 | ;; 2 | ;; {{ zone }} (Salt managed) 3 | ;; 4 | {%- for entry in entries %} 5 | {{ entry }} 6 | {%- endfor %} 7 | -------------------------------------------------------------------------------- /icinga2/commands.d/check_salt.conf: -------------------------------------------------------------------------------- 1 | object CheckCommand "check_salt" { 2 | command = [ FFHOPluginDir + "/check_salt" ] 3 | } 4 | -------------------------------------------------------------------------------- /icingaweb2/modules/monitoring/commandtransports.ini: -------------------------------------------------------------------------------- 1 | [icinga2] 2 | transport = "local" 3 | path = "/var/run/icinga2/cmd/icinga2.cmd" 4 | -------------------------------------------------------------------------------- /kernel/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Linux Kernel 3 | # 4 | 5 | linux-kernel: 6 | pkg.latest: 7 | - name: linux-image-{{ grains.osarch }} 8 | -------------------------------------------------------------------------------- /Documentation/example-pillar/nodes/ALL.sls: -------------------------------------------------------------------------------- 1 | include: 2 | - nodes.bbr-kt 3 | - nodes.cr03 4 | - nodes.gw03 5 | - nodes.fe01 6 | # [...] 7 | -------------------------------------------------------------------------------- /fastd/peers-blacklist: -------------------------------------------------------------------------------- 1 | # Insert one key per row. verify.sh greps for the connecting key in this file 2 | # and exits with 1 if key is found. 3 | -------------------------------------------------------------------------------- /burp/burp.list.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # burp repository (Salt managed) 3 | # 4 | deb http://ziirish.info/repos/debian/{{ grains.oscodename }} zi-latest main 5 | -------------------------------------------------------------------------------- /influxdb/influxdata-archive_compat.gpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/FreifunkHochstift/ffho-salt-public/HEAD/influxdb/influxdata-archive_compat.gpg -------------------------------------------------------------------------------- /mongodb/mongodb_backup.conf: -------------------------------------------------------------------------------- 1 | [mongodb-backup] 2 | BAKDIR=/srv/backup/mongodb_bak 3 | MAXAGE=3 4 | QUIET=True 5 | 6 | [db_ignore] 7 | db1=local 8 | -------------------------------------------------------------------------------- /apt/ffho.list.Debian.bookworm: -------------------------------------------------------------------------------- 1 | deb http://apt.ffho.net bookworm contrib non-free main 2 | deb-src http://apt.ffho.net bookworm contrib non-free main 3 | -------------------------------------------------------------------------------- /apt/ffho.list.Debian.bullseye: -------------------------------------------------------------------------------- 1 | deb http://apt.ffho.net bullseye contrib non-free main 2 | deb-src http://apt.ffho.net bullseye contrib non-free main 3 | -------------------------------------------------------------------------------- /grafana/grafana.list.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # grafana deb repo 3 | # 4 | 5 | deb [signed-by=/usr/share/keyrings/grafana.key] https://apt.grafana.com stable main 6 | -------------------------------------------------------------------------------- /network/resolv.conf.H_mail.in.ffho.net: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/resolv.conf (Salt managed) 3 | # 4 | search in.ffho.net 5 | nameserver 2a03:2260:2342:f251::53 6 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-ntpd.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'ntpd' then /var/log/ntpd.log 6 | & stop 7 | -------------------------------------------------------------------------------- /icinga2/icinga.list.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # icinga2 repository (Salt managed) 3 | # 4 | 5 | deb http://packages.icinga.com/debian icinga-{{ grains.oscodename }} main 6 | -------------------------------------------------------------------------------- /mosh/mosh.ufw.conf: -------------------------------------------------------------------------------- 1 | [mosh] 2 | title=Mosh (mobile shell) 3 | description=Mobile shell that supports roaming and intelligent local echo 4 | ports=60000:60010/udp 5 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-bind.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'named' then /var/log/named.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-dhcp.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'dhcpd' then /var/log/dhcp.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-slapd.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'slapd' then /var/log/slapd.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-snmpd.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'snmpd' then /var/log/snmpd.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-yanic.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'yanic' then /var/log/yanic.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/50-kern.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | kern.* -/var/log/kern.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/50-user.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | user.* -/var/log/user.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-mail.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $syslogfacility-text == 'mail' then /var/log/mail.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/50-auth.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | auth.*,authpriv.* /var/log/auth.log 6 | & stop 7 | -------------------------------------------------------------------------------- /Documentation/example-pillar/ldap.sls: -------------------------------------------------------------------------------- 1 | ldap: 2 | global: 3 | server_uri: "ldaps://" 4 | base_dn: "" 5 | 6 | # Special DNs used for mgmt VPN etc. 7 | -------------------------------------------------------------------------------- /network/resolv.conf.H_dns01.ffho.net: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/resolv.conf (Salt managed) 3 | # 4 | search in.ffho.net 5 | nameserver 127.0.0.1 6 | nameserver 2a03:2260:2342:f251::53 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-fastd.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname startswith 'fastd' then /var/log/fastd.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-influxdb.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'influxd' then /var/log/influxdb.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-openvpn.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname startswith 'ovpn' then /var/log/openvpn.log 6 | & stop 7 | -------------------------------------------------------------------------------- /Documentation/example-pillar/burp.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Burp backup related configuration 3 | # 4 | 5 | burp: 6 | 7 | # Server settings 8 | server: 9 | fqdn: 10 | -------------------------------------------------------------------------------- /burp/client/default_burp: -------------------------------------------------------------------------------- 1 | # 2 | # Burp backup defaults (Salt Managed) 3 | # 4 | # This is a POSIX shell fragment 5 | # 6 | 7 | # We want to run the bloody client 8 | RUN=yes 9 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-salt-master.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'salt-master' then /var/log/salt-master.log 6 | & stop 7 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-salt-minion.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'salt-minion' then /var/log/salt-minion.log 6 | & stop 7 | -------------------------------------------------------------------------------- /Documentation/example-pillar/network.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Global network settings 3 | # 4 | 5 | network: 6 | # The default suite to configure network interfaces with 7 | suite: ifupdown-ng 8 | -------------------------------------------------------------------------------- /rsyslog/ffho/20-bird.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $programname == 'bird' or $programname == 'bird6' then /var/log/bird.log 6 | & stop 7 | -------------------------------------------------------------------------------- /batman/hosts.sls: -------------------------------------------------------------------------------- 1 | # Conveniance bat-hosts file for informative batctl output 2 | /etc/bat-hosts: 3 | file.managed: 4 | - source: salt://batman/bat-hosts.tmpl 5 | - template: jinja 6 | -------------------------------------------------------------------------------- /rsyslog/ffho/30-net.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $fromhost-ip startswith '172.30.' and not $msg contains 'regdmn5g' then /var/log/net.log 6 | & stop 7 | -------------------------------------------------------------------------------- /sury/sury.list.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # sury php repository (Salt managed) 3 | # 4 | 5 | deb [signed-by=/etc/apt/trusted.gpg.d/deb.sury.org-php.gpg] https://packages.sury.org/php/ {{ grains.oscodename }} main 6 | -------------------------------------------------------------------------------- /sysctl/router.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Sysctls for FFHO router nodes (Salt managed) 3 | # 4 | 5 | 6 | # 7 | # Activate IP Unicast Routing 8 | net.ipv4.ip_forward=1 9 | net.ipv6.conf.all.forwarding=1 10 | -------------------------------------------------------------------------------- /influxdb/influxdb.list.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # influxdb deb repo 3 | # 4 | 5 | deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian {{ grains.oscodename }} stable 6 | -------------------------------------------------------------------------------- /screen/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Screen 3 | # 4 | 5 | screen: 6 | pkg.installed: 7 | - name: screen 8 | 9 | /root/.screenrc: 10 | file.managed: 11 | - source: salt://screen/screenrc.root 12 | -------------------------------------------------------------------------------- /icinga2/secrets.conf.tmpl: -------------------------------------------------------------------------------- 1 | /* 2 | * Secrets used for monitoring checks (Salt managed) 3 | */ 4 | 5 | const LdapSyncReplBindPassword = "{{ salt['pillar.get']('monitoring:private:ldap_syncrepl_bindpw') }}" 6 | -------------------------------------------------------------------------------- /mosh/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Mosh 3 | # 4 | 5 | mosh: 6 | pkg.installed: 7 | - name: 'mosh' 8 | 9 | /etc/ufw/applications.d/mosh: 10 | file.managed: 11 | - source: salt://mosh/mosh.ufw.conf 12 | -------------------------------------------------------------------------------- /yanic/ff_merge_nodes_json.cron: -------------------------------------------------------------------------------- 1 | # 2 | # Merge all nodes.json into one ubernodes.json 3 | # 4 | 5 | */5 * * * * root /usr/local/bin/ff_merge_nodes_json /srv/yanic/data/*/nodes.json > /srv/yanic/data/nodes.json 6 | -------------------------------------------------------------------------------- /icinga2/zones.conf.H_icinga2.in.ffho.net: -------------------------------------------------------------------------------- 1 | # 2 | # zones.conf (Salt managed) 3 | # 4 | 5 | object Endpoint NodeName { 6 | host = NodeName 7 | } 8 | 9 | object Zone "master" { 10 | endpoints = [ NodeName ] 11 | } 12 | -------------------------------------------------------------------------------- /network/rt_tables.conf.tmpl: -------------------------------------------------------------------------------- 1 | {%- set vrfs_by_id = salt['ffho_net.get_vrfs_by_id']() -%} 2 | # FFHO routing tables (Salt managed) 3 | {%- for tid, name in vrfs_by_id.items()|sort %} 4 | {{ tid }} {{ name }} 5 | {%- endfor %} 6 | -------------------------------------------------------------------------------- /rsyslog/ffho/90-cron.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | #crons sollten nicht auf dem logserver geloggt werden. Falls doch bitte mit Einzelfilter und höherer Priorität 6 | cron.* stop 7 | -------------------------------------------------------------------------------- /icingaweb2/roles.ini.tmpl: -------------------------------------------------------------------------------- 1 | [Administratoren] 2 | users = "{{ icingaweb2_config['roles']['users'] }}" 3 | permissions = "{{ icingaweb2_config['roles']['permissions'] }}" 4 | groups = "{{ icingaweb2_config['roles']['groups'] }}" 5 | -------------------------------------------------------------------------------- /yanic/yanic@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=yanic (Site %I) 3 | After=network.service 4 | 5 | [Service] 6 | ExecStart=/srv/yanic/yanic serve --config /srv/yanic/%i.conf 7 | 8 | [Install] 9 | WantedBy=multi-user.target 10 | -------------------------------------------------------------------------------- /dns-server/zones/README.md: -------------------------------------------------------------------------------- 1 | ## These are not the droids you are locking for! 2 | 3 | The zones/ directory is part of the internal salt git as the contents of the 4 | zones should not be public :-) 5 | 6 | Go there for zone changes. 7 | -------------------------------------------------------------------------------- /batman/ff_check_gateway.cron: -------------------------------------------------------------------------------- 1 | # 2 | # Check if THE INTERNET is reachable via chosen upstream (Salt managed) 3 | # 4 | SHELL=/bin/bash 5 | PATH=/sbin:/bin:/usr/sbin:/usr/bin 6 | 7 | * * * * * root /usr/local/sbin/ff_check_gateway 8 | -------------------------------------------------------------------------------- /icingaweb2/resources.ini.tmpl: -------------------------------------------------------------------------------- 1 | {%- for title, nested_dict in icingaweb2_config['resources'].items() -%} 2 | [{{ title }}] 3 | {%- for key, value in nested_dict.items() %} 4 | {{ key }} = "{{ value }}" 5 | {%- endfor %} 6 | 7 | {% endfor %} 8 | -------------------------------------------------------------------------------- /graylog/graylog-group-mapping.conf.tmpl: -------------------------------------------------------------------------------- 1 | [DEFAULTS] 2 | default-role={{ graylog_config['default_role'] }} 3 | 4 | [GROUP-MAPPING] 5 | {%- for key, value in graylog_config['role_mapping'].items() %} 6 | {{ key }}={{ value }} 7 | {%- endfor %} 8 | 9 | -------------------------------------------------------------------------------- /pppoe/noop.local: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FFHO ip*.local replacement script (Salt managed) 4 | # 5 | # Using this NO-OP script intentionally PREVENTS ALL OTHER ip-$flavour.d/ scripts 6 | # from being called. We don't want that stuff. 7 | 8 | exit 0 9 | -------------------------------------------------------------------------------- /grafana/dashboards/prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Grafana dashboards for Prometheus data 2 | 3 | The `bind9-full.json` and `node-exporter-full.json` dashboards are downloaded from https://github.com/rfmoz/grafana-dashboards and are available under the Apache License 2.0. 4 | -------------------------------------------------------------------------------- /network/ifupdown2/ff_fix_default_route.cron: -------------------------------------------------------------------------------- 1 | # 2 | # Check if a default route within vrf_external is configured and if it's active 3 | # 4 | SHELL=/bin/bash 5 | PATH=/sbin:/bin:/usr/sbin:/usr/bin 6 | 7 | * * * * * root /usr/local/sbin/ff_fix_default_route 8 | -------------------------------------------------------------------------------- /network/resolv.conf: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/resolv.conf (Salt managed) 3 | # 4 | search {{ salt['pillar.get'] ('globals:dns:search') }} 5 | nameserver {{ salt['pillar.get'] ('globals:dns:resolver_v4') }} 6 | nameserver {{ salt['pillar.get'] ('globals:dns:resolver_v6') }} 7 | -------------------------------------------------------------------------------- /network/ifupdown-ng/reload.sls: -------------------------------------------------------------------------------- 1 | # 2 | # network.ifupdown-ng.reload 3 | # 4 | 5 | # Reload interface configuration if neccessary (no-op for now) 6 | ifreload: 7 | cmd.wait: 8 | - name: /bin/true 9 | - watch: 10 | - file: /etc/network/interfaces 11 | -------------------------------------------------------------------------------- /anycast-healthchecker/check.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # Check definition for service {{ service }} 3 | # 4 | {% for check, cfg in service_config.items() %} 5 | [{{ check }}] 6 | check_cmd = {{ cfg['check_cmd'] }} 7 | ip_prefix = {{ cfg['ip_prefix'] }} 8 | 9 | {% endfor %} 10 | -------------------------------------------------------------------------------- /bird/icinga2/ospf_interfaces_down_ok.txt.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # Interfaces which are OK to be down in OSPF (Salt managed) 3 | # 4 | {%- set interfaces = salt['pillar.get']('node:routing:ospf:ifaces_down_ok', []) %} 5 | {%- for iface in interfaces %} 6 | {{ iface }} 7 | {%- endfor %} 8 | -------------------------------------------------------------------------------- /icingaweb2/menu.ini.tmpl: -------------------------------------------------------------------------------- 1 | {% for menu in icingaweb2_config['menu'] -%} 2 | [{{ menu['title'] }}] 3 | type = "menu-item" 4 | target = "_main" 5 | url = "{{ menu['url'] }}" 6 | owner = "{{ menu['owner'] }}" 7 | groups = "{{ menu['groups'] }}" 8 | 9 | {% endfor %} 10 | -------------------------------------------------------------------------------- /needrestart/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Needrestart 3 | # 4 | 5 | needrestart: 6 | pkg.installed 7 | 8 | /etc/needrestart/conf.d/monitoring.conf: 9 | file.managed: 10 | - source: salt://needrestart/monitoring.conf 11 | - require: 12 | - pkg: needrestart 13 | -------------------------------------------------------------------------------- /network/interfaces/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # network.interfaces 3 | # 4 | # Generate and install /etc/network/interfaces file 5 | # 6 | 7 | /etc/network/interfaces: 8 | file.managed: 9 | - template: jinja 10 | - source: salt://network/interfaces/interfaces.tmpl 11 | -------------------------------------------------------------------------------- /ntp/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # NTP 3 | # 4 | 5 | ntp: 6 | pkg.installed: 7 | - name: ntp 8 | 9 | 10 | /etc/ntp.conf: 11 | file.managed: 12 | - source: 13 | - salt://ntp/ntp.conf.{{ grains.os }}.{{ grains.oscodename }} 14 | - salt://ntp/ntp.conf 15 | -------------------------------------------------------------------------------- /utils.sls: -------------------------------------------------------------------------------- 1 | utils: 2 | pkg.installed: 3 | - pkgs: 4 | # System 5 | - bc 6 | - htop 7 | - sysstat 8 | - zstd 9 | # dig 10 | - dnsutils 11 | # networking 12 | - mtr-tiny 13 | - tcpdump 14 | - iperf 15 | -------------------------------------------------------------------------------- /vim/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Vim magic 3 | # 4 | 5 | vim: 6 | pkg.installed: 7 | - name: vim 8 | 9 | /etc/vim/vimrc.local: 10 | file.managed: 11 | - source: salt://vim/vimrc.local 12 | 13 | /root/.vimrc: 14 | file.managed: 15 | - source: salt://vim/vimrc 16 | -------------------------------------------------------------------------------- /icinga2/api.conf: -------------------------------------------------------------------------------- 1 | # 2 | # The API listener is used for distributed monitoring setups. (Salt managed) 3 | # 4 | 5 | object ApiListener "api" { 6 | bind_host = "127.0.0.1" 7 | 8 | ticket_salt = TicketSalt 9 | 10 | accept_commands = true 11 | accept_config = true 12 | } 13 | -------------------------------------------------------------------------------- /openvpn/ldap-auth/pam_ldap.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # PAM LDAP configuration (Salt managed) 3 | # 4 | 5 | uri {{ server_uri }} 6 | base {{ base_dn }} 7 | 8 | ldap_version 3 9 | 10 | {% if bind_dn and bind_pw %} 11 | binddn {{ bind_dn }} 12 | bindpw {{ bind_pw }} 13 | {% endif %} 14 | -------------------------------------------------------------------------------- /burp/server/default_burp: -------------------------------------------------------------------------------- 1 | # 2 | # Burp backup defaults (Salt Managed) 3 | # 4 | # This is a POSIX shell fragment 5 | # 6 | 7 | # We want to run the bloody server 8 | RUN=yes 9 | 10 | # Additional options that are passed to the Daemon. 11 | DAEMON_ARGS="-c /etc/burp/burp-server.conf" 12 | -------------------------------------------------------------------------------- /fastd/verify-peer.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | timestamp() { 4 | date +"%Y-%m-%d %H:%M:%S" 5 | } 6 | 7 | if [ -n "$(cat /etc/fastd/peers-blacklist | grep "$1")" ]; then 8 | echo -e "$(timestamp)\t$1\t$2\tblocked" >> /var/log/fastd.blacklist; 9 | exit 1; 10 | else 11 | exit 0; 12 | fi 13 | -------------------------------------------------------------------------------- /pppoe/fix_ppp_vrf: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Simple and stupid wrapper around dynamically generated VRF fix script. 4 | # This wrapper will be run by at. 5 | # 6 | # Maximilian Wilhelm 7 | # -- Tue, 28 Mar 2017 22:57:42 +0200 8 | # 9 | 10 | /usr/local/sbin/fix_ppp_vrf.gen 11 | -------------------------------------------------------------------------------- /grafana/datasources/prom-local.yaml.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: {{ grains.id }} 5 | isDefault: true 6 | type: prometheus 7 | url: http://localhost:9090 8 | jsonData: 9 | httpMethod: POST 10 | manageAlerts: false 11 | prometheusType: Prometheus 12 | -------------------------------------------------------------------------------- /openvpn/ldap-auth/openvpn.pam.d: -------------------------------------------------------------------------------- 1 | # 2 | # LDAP auth for OpenVPN (Salt managed) 3 | # 4 | auth sufficient pam_ldap.so 5 | auth required pam_deny.so 6 | 7 | account sufficient pam_ldap.so 8 | account required pam_deny.so 9 | 10 | session required pam_deny.so 11 | 12 | password required pam_deny.so 13 | -------------------------------------------------------------------------------- /pppoe/ff_fix_ppp: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # 4 | # Check for pppd 5 | if pidof pppd >/dev/null; then 6 | # Ok processes are running 7 | exit 0 8 | fi 9 | 10 | logger -t fix_inet "Restarting internet internet connection." 11 | 12 | poff tkom >/dev/null || true 13 | 14 | sleep 1 15 | 16 | pon tkom 17 | -------------------------------------------------------------------------------- /postfix/aliases: -------------------------------------------------------------------------------- 1 | # /etc/aliases 2 | mailer-daemon: postmaster 3 | postmaster: root 4 | nobody: root 5 | hostmaster: root 6 | usenet: root 7 | news: root 8 | webmaster: root 9 | www: root 10 | ftp: root 11 | abuse: root 12 | noc: root 13 | security: root 14 | www-data: root 15 | 16 | root: {{ root_mail_address }} 17 | -------------------------------------------------------------------------------- /respondd/respondd@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=respondd (Site %I) 3 | After=syslog.target network-online.target 4 | 5 | [Service] 6 | Type=simple 7 | User=root 8 | Group=root 9 | ExecStart=/srv/ffho-respondd/ext-respondd.py -c /srv/ffho-respondd/%i.conf 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /icinga2/services/lv.conf: -------------------------------------------------------------------------------- 1 | apply Service "check_lv_snap" { 2 | import "generic-service" 3 | 4 | display_name = "LV Snap Deletion" 5 | check_command = "check_lv_snap" 6 | 7 | check_interval = 24h 8 | 9 | command_endpoint = host.name 10 | 11 | vars.lv_snap_delete = true 12 | 13 | assign where "kvm" in host.vars.roles 14 | } 15 | -------------------------------------------------------------------------------- /network/systemd-link.tmpl: -------------------------------------------------------------------------------- 1 | {%- set mac = iface_config.get ('mac') -%} 2 | {%- set driver = iface_config.get ('driver') -%} 3 | # 4 | # {{ interface }} / {{ desc }} 5 | # 6 | 7 | [Match] 8 | MACAddress={{ mac }} 9 | {%- if driver %} 10 | Driver={{ driver }} 11 | {%- endif %} 12 | 13 | [Link] 14 | NamePolicy= 15 | Name={{ interface }} 16 | -------------------------------------------------------------------------------- /systemd/90-unfuck-mac-overwrite.link: -------------------------------------------------------------------------------- 1 | # 2 | # Overwrite MACAddressPolicy=persistent which is set in defautl config file 3 | # /lib/systemd/network/99-default.link so systemd-udevd will NOT fiddle 4 | # around with interfaces it has no businss in touching at all. 5 | # 6 | 7 | [Match] 8 | OriginalName=* 9 | 10 | [Link] 11 | MACAddressPolicy=none 12 | -------------------------------------------------------------------------------- /fastd/fastd@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Fast and Secure Tunnelling Daemon (connection %I) 3 | After=network-online.target 4 | 5 | [Service] 6 | Type=notify 7 | ExecStart=/usr/bin/fastd --syslog-level info --syslog-ident fastd@%i -c /etc/fastd/%i/fastd.conf 8 | ExecReload=/bin/kill -HUP $MAINPID 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /rsyslog/zz-debug.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | #*.=debug;\ 6 | # auth,authpriv.none;\ 7 | # news.none;mail.none -/var/log/debug 8 | 9 | #fängt vorerst alle Nachrichten ab, die nicht in anderen Logs landen um rauszufinden was noch interessant sein könnte 10 | 11 | *.* /var/log/debug 12 | & stop 13 | -------------------------------------------------------------------------------- /bird/icinga2/ibgp_sessions_down_ok.txt.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # BGP sessions which are OK to be down (Salt managed) 3 | # 4 | {%- for peer in salt['pillar.get']('node:routing:bgp:internal:peers:down_OK') %} 5 | {#- save peers node name, mangle . and - to _ to make bird happy #} 6 | {%- set peer_name = salt['ffho.re_replace']('[.-]', '_', peer) %} 7 | {{ peer_name }} 8 | {%- endfor %} 9 | -------------------------------------------------------------------------------- /rsyslog/ffho/30-ap.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | if $hostname startswith 'ap-' then /var/log/ap.log 6 | & stop 7 | if $programname startswith 'U7LT,' then /var/log/ap.log 8 | & stop 9 | if $programname startswith 'U7MP,' then /var/log/ap.log 10 | & stop 11 | if $programname startswith 'U7MSH,' then /var/log/ap.log 12 | & stop 13 | -------------------------------------------------------------------------------- /screen/screenrc.root: -------------------------------------------------------------------------------- 1 | # 2 | # /root/.screenrc (Salt managed) 3 | # 4 | 5 | nethack on 6 | autodetach on 7 | crlf off 8 | deflogin off 9 | hardcopy_append off 10 | startup_message off 11 | vbell off 12 | defscrollback 10000 13 | silencewait 15 14 | hardstatus alwayslastline " %H (%l) | %d.%m %c | %w" 15 | sorendition 02 10 16 | 17 | bind P paste . 18 | -------------------------------------------------------------------------------- /sury/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # sury php 3 | # 4 | 5 | /etc/apt/trusted.gpg.d/deb.sury.org-php.gpg: 6 | file.managed: 7 | - source: salt://sury/sury.gpg 8 | 9 | /etc/apt/sources.list.d/sury.php.list: 10 | file.managed: 11 | - source: salt://sury/sury.list.tmpl 12 | - template: jinja 13 | - require: 14 | - file: /etc/apt/trusted.gpg.d/deb.sury.org-php.gpg 15 | -------------------------------------------------------------------------------- /burp/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Burp backup 3 | # 4 | 5 | include: 6 | - certs 7 | 8 | /etc/apt/trusted.gpg.d/burp.gpg: 9 | file.managed: 10 | - source: salt://burp/burp.gpg 11 | 12 | /etc/apt/sources.list.d/burp.list: 13 | file.managed: 14 | - source: salt://burp/burp.list.tmpl 15 | - template: jinja 16 | - require: 17 | - file: /etc/apt/trusted.gpg.d/burp.gpg 18 | -------------------------------------------------------------------------------- /openvpn/ldap-auth/ldap.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # LDAP Defaults 3 | # 4 | 5 | # See ldap.conf(5) for details 6 | # This file should be world readable but not world writable. 7 | 8 | URI {{ server_uri }} 9 | BASE {{ base_dn }} 10 | 11 | #SIZELIMIT 12 12 | #TIMELIMIT 15 13 | #DEREF never 14 | 15 | # TLS certificates (needed for GnuTLS) 16 | TLS_CACERT /etc/ssl/certs/ca-certificates.crt 17 | -------------------------------------------------------------------------------- /icinga2/commands.d/needrestart.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Check commands for needrestart kernel and library check 3 | # 4 | 5 | object CheckCommand "needrestart_kernel" { 6 | import "plugin-check-command" 7 | 8 | command = [ "sudo", "/usr/sbin/needrestart", "-p", "-k" ] 9 | } 10 | 11 | object CheckCommand "needrestart_libs" { 12 | command = [ "sudo", "/usr/sbin/needrestart", "-p", "-l" ] 13 | } 14 | -------------------------------------------------------------------------------- /icingaweb2/authentication.ini.tmpl: -------------------------------------------------------------------------------- 1 | [icingaweb2] 2 | user_class = "{{ icingaweb2_config['authentication']['user_class'] }}" 3 | filter = "{{ icingaweb2_config['authentication']['filter'] }}" 4 | user_name_attribute = "{{ icingaweb2_config['authentication']['user_name_attribute'] }}" 5 | backend = "ldap" 6 | base_dn = "{{ icingaweb2_config['authentication']['base_dn'] }}" 7 | resource = "ffho_ldap" 8 | -------------------------------------------------------------------------------- /batman/bat-hosts.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/bat-hosts (Salt generated) 3 | # 4 | {%- set sites = salt['pillar.get']('sites', {}) %} 5 | {%- set nodes = salt['pillar.get']('nodes', {}) %} 6 | {%- set bat_hosts = salt['ffho_net.gen_bat_hosts'](nodes, sites) %} 7 | {%- for mac in bat_hosts|sort %} 8 | {%- set entry_name = bat_hosts.get (mac) %} 9 | {{ mac }} {{ entry_name }} 10 | {%- endfor %} 11 | -------------------------------------------------------------------------------- /prometheus-server/prometheus.default: -------------------------------------------------------------------------------- 1 | # Set the command-line arguments to pass to the server. (Salt managed) 2 | # Due to shell escaping, to pass backslashes for regexes, you need to double 3 | # them (\\d for \d). If running under systemd, you need to double them again 4 | # (\\\\d to mean \d), and escape newlines too. 5 | ARGS="--storage.tsdb.path=/srv/prometheus/metrics2/ --storage.tsdb.retention.time=30d" 6 | -------------------------------------------------------------------------------- /bird/l3-access.conf: -------------------------------------------------------------------------------- 1 | # 2 | # L3 Access VLANs 3 | # 4 | 5 | protocol direct l3_access { 6 | {%- for iface in salt['pillar.get']('node:ifaces')|sort %} 7 | {%- set config = salt['pillar.get']('node:ifaces:' ~ iface) %} 8 | {%- if salt['ffho.re_search']('^vlan(3\d\d|29\d\d)$', iface) or 'l3-access' in config.get ('tags', []) %} 9 | interface "{{ iface }}"; 10 | {%- endif %} 11 | {%- endfor %} 12 | } 13 | -------------------------------------------------------------------------------- /icinga2/services/salt.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Check if salt has to do anything 3 | # 4 | 5 | 6 | # 7 | # Salt 8 | apply Service "salt" { 9 | import "generic-service" 10 | 11 | check_command = "check_salt" 12 | 13 | if (host.name != NodeName) { 14 | command_endpoint = host.name 15 | } 16 | 17 | vars.ok_string = "[ OK ]" 18 | 19 | assign where host.address && host.vars.os == "linux" 20 | } 21 | 22 | -------------------------------------------------------------------------------- /icinga2/services/ssl_cert.conf: -------------------------------------------------------------------------------- 1 | apply Service "SSL Host Cert" { 2 | import "generic-service" 3 | 4 | check_command = "ssl_host_cert" 5 | command_endpoint = host.name 6 | 7 | check_interval = 1d 8 | 9 | vars.ssl_cert_file = "/etc/ssl/certs/" + host.name + ".cert.pem" 10 | vars.ssl_cert_warn = 90 11 | vars.ssl_cert_crit = 30 12 | 13 | assign where host.address && host.vars.os == "linux" 14 | } 15 | -------------------------------------------------------------------------------- /install-server/ffho-first-boot.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=FFHO first boot service 3 | After=network-online.target 4 | Wants=network-online.target 5 | ConditionPathExists=/etc/freifunk/late-command-OK 6 | 7 | [Service] 8 | Type=oneshot 9 | RemainAfterExit=yes 10 | SyslogIdentifier=ffho-first-boot 11 | ExecStart=/opt/ffho/sbin/ffho-first-boot.sh 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | 16 | -------------------------------------------------------------------------------- /sudo/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Vim magic 3 | # 4 | 5 | sudo: 6 | pkg.installed: 7 | - name: sudo 8 | 9 | /etc/sudoers.d: 10 | file.directory: 11 | - user: root 12 | - group: root 13 | - mode: 755 14 | - makedirs: True 15 | 16 | /etc/sudoers: 17 | file.managed: 18 | - source: 19 | - salt://sudo/sudoers.{{ grains.os }}.{{ grains.oscodename }} 20 | - salt://sudo/sudoers 21 | -------------------------------------------------------------------------------- /icinga2/commands.d/check_systemd_units.conf: -------------------------------------------------------------------------------- 1 | object CheckCommand "systemd_units" { 2 | import "plugin-check-command" 3 | 4 | command = ["/usr/local/share/monitoring-plugins/check_systemd_units" ] 5 | 6 | arguments = { 7 | "-w" = { 8 | required = false 9 | value = "$whitelist$" 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /systemd/wait-for-routes.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Wait for routing adjacencies to come up 3 | DefaultDependencies=no 4 | Conflicts=shutdown.target 5 | Requisite=networking.service 6 | After=networking.service 7 | Before=network-online.target 8 | 9 | [Service] 10 | Type=oneshot 11 | ExecStart=/usr/local/sbin/wait-for-routes 12 | RemainAfterExit=yes 13 | 14 | [Install] 15 | WantedBy=network-online.target 16 | -------------------------------------------------------------------------------- /users/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Manage root user (password) 3 | # 4 | 5 | # This should break, when the pillar isn't present 6 | {% set root_pw_hash = pillar['globals']['root_password_hash'] %} 7 | 8 | root: 9 | user.present: 10 | - fullname: root 11 | - uid: 0 12 | - gid: 0 13 | - home: /root 14 | - password: {{ root_pw_hash }} 15 | - enforce_password: True 16 | - empty_password: False 17 | -------------------------------------------------------------------------------- /network/ifupdown2/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Use ifupdown2 to manage the interfaces of this box 3 | # 4 | 5 | ifupdown2: 6 | pkg.installed 7 | 8 | # ifupdown2 configuration 9 | /etc/network/ifupdown2/ifupdown2.conf: 10 | file.managed: 11 | - source: 12 | - salt://network/ifupdown2/ifupdown2.conf.{{ grains['oscodename'] }} 13 | - salt://network/ifupdown2/ifupdown2.conf 14 | - require: 15 | - pkg: ifupdown2 16 | -------------------------------------------------------------------------------- /graylog/graylog-api-scripts.conf.tmpl: -------------------------------------------------------------------------------- 1 | [DEFAULTS] 2 | token={{ graylog_config['api_token'] }} 3 | 4 | [LDAP] 5 | server_uri={{ graylog_config['server_uri'] }} 6 | bind_dn={{ graylog_config['bind_dn'] }} 7 | bind_passwd={{ graylog_config['bind_passwd'] }} 8 | search_base_dn={{ graylog_config['search_base_dn'] }} 9 | ldap_group_search={{ graylog_config['ldap_group_search'] }} 10 | search_attribute={{ graylog_config['search_attribute'] }} 11 | -------------------------------------------------------------------------------- /anycast-healthchecker/bird.anycast-service.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Anycast Services (Salt managed) 3 | # 4 | 5 | include "/var/lib/anycast-healthchecker/anycast-prefixes-{{ proto }}.conf"; 6 | 7 | protocol direct anycast_srv { 8 | interface "anycast_srv"; 9 | 10 | import filter { 11 | if net ~ ANYCAST_ADVERTISE then { 12 | bgp_community.add (ANYCAST_PREFIX); 13 | accept; 14 | } 15 | 16 | reject; 17 | }; 18 | export none; 19 | } 20 | -------------------------------------------------------------------------------- /unattended-upgrades/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Unattended Upgrades 3 | # 4 | 5 | unattended-upgrades: 6 | pkg.installed: 7 | - name: unattended-upgrades 8 | 9 | /etc/apt/apt.conf.d/20auto-upgrades: 10 | file.managed: 11 | - source: salt://unattended-upgrades/20auto-upgrades 12 | 13 | /etc/apt/apt.conf.d/50unattended-upgrades: 14 | file.managed: 15 | - source: salt://unattended-upgrades/50unattended-upgrades.{{ grains.os }}.{{ grains.oscodename }} 16 | -------------------------------------------------------------------------------- /openvpn/ccd.tmpl: -------------------------------------------------------------------------------- 1 | {%- if network_config.get ('device-type', 'tap') == 'tap' %} 2 | {%- for ip in host_stanza.get ('ip', []) %} 3 | {%- if ':' in ip %} 4 | ifconfig-ipv6-push {{ ip }}/{{ network_config['netmask_v6'] }} 5 | {%- else %} 6 | {%- set mask = network_config['netmask_v4'] if '.' in network_config['netmask_v4']|string else '255.255.255.254' %} 7 | ifconfig-push {{ ip }} {{ mask }} 8 | {%- endif %} 9 | {%- endfor %} 10 | {%- endif %} 11 | -------------------------------------------------------------------------------- /bash/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Bash 3 | # 4 | 5 | # 6 | # .bashrc for root 7 | /root/.bashrc: 8 | file.managed: 9 | - source: salt://bash/bashrc.root 10 | - template: jinja 11 | 12 | # 13 | # Nifty aliases for gateway 14 | {% if 'batman_gw' in salt['pillar.get']('node:roles', []) %} 15 | /root/.bash_aliases: 16 | file.managed: 17 | - source: salt://bash/bash_aliases.root 18 | {% endif %} 19 | 20 | 21 | # bashrc.user is used in state 'build' for the build user! 22 | -------------------------------------------------------------------------------- /dhcp-client/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # DHCP client w/ VRF support 3 | # 4 | 5 | /etc/dhcp/dhclient.conf: 6 | file.managed: 7 | - source: salt://dhcp-client/dhclient.conf 8 | 9 | /etc/dhcp/dhclient-enter-hooks.d/dont-update-resolv-conf: 10 | file.managed: 11 | - source: salt://dhcp-client/dont-update-resolv-conf 12 | - mode: 0755 13 | 14 | /usr/local/sbin/dhclient-script: 15 | file.managed: 16 | - source: salt://dhcp-client/dhclient-script 17 | - mode: 755 18 | -------------------------------------------------------------------------------- /network/ifupdown-ng/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Use ifupdown-ng to manage the interfaces of this box 3 | # 4 | 5 | ifupdown-ng: 6 | pkg.installed 7 | 8 | # ifupdown-ng configuration 9 | /etc/network/ifupdown-ng.conf: 10 | file.managed: 11 | - source: 12 | - salt://network/ifupdown-ng/ifupdown-ng.conf 13 | 14 | # Remove workaround for ifupdown2 15 | /usr/local/sbin/ff_fix_default_route: 16 | file.absent 17 | 18 | /etc/cron.d/ff_fix_default_route: 19 | file.absent 20 | -------------------------------------------------------------------------------- /icinga2/commands.d/dhcp-server.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Check DHCP pool usage (Salt managed) 3 | # 4 | object CheckCommand "dhcp_pool" { 5 | import "plugin-check-command" 6 | 7 | command = [ "/usr/bin/sudo", "/usr/local/sbin/dhcpd-pool", "--nagios" ] 8 | 9 | arguments = { 10 | "--config" = "$dhcpd_config_file$" 11 | "--leases" = "$dhcpd_leases_file$" 12 | } 13 | 14 | vars.dhcpd_config_file = "/etc/dhcp/dhcpd.conf" 15 | vars.dhcpd_leases_file = "/var/lib/dhcp/dhcpd.leases" 16 | } 17 | -------------------------------------------------------------------------------- /icinga2/services/dhcp-server.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Check DHCP server pools (Salt managed) 3 | # 4 | 5 | 6 | # 7 | # dhcp_pool 8 | apply Service "dhcp_pool" { 9 | import "generic-service" 10 | 11 | check_command = "dhcp_pool" 12 | check_interval = 10m 13 | 14 | if (host.name != NodeName) { 15 | command_endpoint = host.name 16 | } 17 | 18 | assign where host.address && host.vars.os == "linux" && ("batman_gw" in host.vars.roles || "dhcp-server" in host.vars.roles) 19 | } 20 | -------------------------------------------------------------------------------- /rsyslog/ffho/50-messages.conf: -------------------------------------------------------------------------------- 1 | $FileCreateMode 0640 2 | $FileOwner root 3 | $FileGroup adm 4 | 5 | *.=info;*.=notice;*.=warn;*.=emerg;\ 6 | auth.none,authpriv.none;\ 7 | cron.none,daemon.none;\ 8 | mail.none,news.none;\ 9 | local0.none,local1.none;\ 10 | local2.none,local3.none;\ 11 | local4.none,local5.none;\ 12 | local6.none,local7.none -/var/log/messages 13 | 14 | & stop 15 | -------------------------------------------------------------------------------- /icinga2/services/mail.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Mail related stuff (Salt managed) 3 | # 4 | 5 | # 6 | # mailq 7 | apply Service "mailq" { 8 | import "generic-service" 9 | 10 | check_command = "sudo_mailq" 11 | 12 | if (host.name != NodeName) { 13 | command_endpoint = host.name 14 | } 15 | 16 | vars.mailq_servertype = "postfix" 17 | vars.mailq_warning = 5 18 | vars.mailq_critical = 10 19 | # vars.mailq_sudo = true 20 | 21 | assign where host.address && host.vars.os == "linux" 22 | } 23 | -------------------------------------------------------------------------------- /icingaweb2/groups.ini.tmpl: -------------------------------------------------------------------------------- 1 | [icingaweb2] 2 | resource = "ffho_ldap" 3 | user_backend = "icingaweb2" 4 | group_class = "{{ icingaweb2_config['groups']['group_class'] }}" 5 | group_filter = "{{ icingaweb2_config['groups']['group_filter'] }}" 6 | group_name_attribute = "{{ icingaweb2_config['groups']['group_name_attribute'] }}" 7 | group_member_attribute = "{{ icingaweb2_config['groups']['group_member_attribute'] }}" 8 | base_dn = "{{ icingaweb2_config['groups']['base_dn'] }}" 9 | backend = "ldap" 10 | -------------------------------------------------------------------------------- /nginx/node.ffho.net: -------------------------------------------------------------------------------- 1 | geo $url { 2 | # default 3 | default hochstift.freifunk.net; 4 | 5 | {%- set sites = salt['pillar.get']('sites') %} 6 | {% for site_name, site in sites.items()|sort %} 7 | # {{ site.name}} 8 | {{ site.prefix_v6 }} node.{{ site_name }}.ffho.net; 9 | {{ site.prefix_v4 }} node.{{ site_name }}.ffho.net; 10 | {% endfor %} 11 | } 12 | 13 | server { 14 | listen 80; 15 | listen [::]:80; 16 | 17 | server_name ~^node.(srv\.)?(in\.)?ffho.net$; 18 | return 302 http://$url; 19 | } 20 | -------------------------------------------------------------------------------- /icinga2/services/gpg.conf: -------------------------------------------------------------------------------- 1 | {% set dirs = salt['pillar.get']("monitoring:checks:check_gpg_expiry:paths", []) %} 2 | apply Service "check_gpg_expiry" { 3 | import "generic-service" 4 | 5 | display_name = "GPG Expiry" 6 | check_command = "check_gpg_expiry" 7 | 8 | check_interval = 24h 9 | 10 | command_endpoint = host.name 11 | 12 | vars.gpg_directory = ["{{ dirs|join('\", \"') }}"] 13 | vars.gpg_verbose = true 14 | vars.gpg_sort = true 15 | 16 | assign where "salt-master" in host.vars.roles 17 | } 18 | -------------------------------------------------------------------------------- /network/bootstrap.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Install and configure configured network suite, configure links and install 3 | # /etc/network/interfaces but do not reload the interafces. 4 | # 5 | # To be called from pressed 6 | # 7 | 8 | # Which networ suite to configure? 9 | {% set default_suite = salt['pillar.get']('network:suite', 'ifupdown2') %} 10 | {% set suite = salt['pillar.get']('node:network:suite', default_suite) %} 11 | 12 | include: 13 | - network.link 14 | - network.interfaces 15 | - network.{{ suite }} 16 | -------------------------------------------------------------------------------- /burp/client.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Burp backup - Client 3 | # 4 | 5 | include: 6 | - burp 7 | 8 | 9 | burp-client: 10 | pkg.installed 11 | 12 | /etc/default/burp-client: 13 | file.managed: 14 | - source: salt://burp/client/default_burp 15 | 16 | /etc/burp/burp.conf: 17 | file.managed: 18 | - source: salt://burp/client/burp.conf.tmpl 19 | - template: jinja 20 | burp_server_name: {{ salt['pillar.get']('burp:server:fqdn') }} 21 | burp_password: {{ salt['pillar.get']('node:burp:password') }} 22 | -------------------------------------------------------------------------------- /nginx/ffho.d/add-headers.conf: -------------------------------------------------------------------------------- 1 | # Include for header to be set in webserver mode (Salt managed) 2 | add_header X-Frame-Options "SAMEORIGIN; always;"; 3 | add_header X-Content-Type-Options nosniff; 4 | add_header X-XSS-Protection "1; mode=block"; 5 | add_header Strict-Transport-Security "max-age=15552000;includeSubDomains"; 6 | add_header Content-Security-Policy "default-src blob: https: data: 'unsafe-inline' 'unsafe-eval' always; upgrade-insecure-requests"; 7 | add_header Referrer-Policy "strict-origin-when-cross-origin"; 8 | -------------------------------------------------------------------------------- /Documentation/example-pillar/nodes/mail.sls: -------------------------------------------------------------------------------- 1 | mail.in.ffho.net: 2 | id: 30 3 | 4 | loopback_override: 5 | v4: 80.70.181.59 6 | v6: 2a02:450:1::25 7 | 8 | sysLocation: Vega 9 | 10 | mailname: mail.ffho.net 11 | 12 | roles: 13 | - mx 14 | 15 | ifaces: 16 | eth0: 17 | desc: "Upstream Vega" 18 | prefixes: 19 | - 80.70.181.59/32 20 | - 2a02:450:1::25/64 21 | pointopoint: 80.70.181.56 22 | gateway: 23 | - 80.70.181.56 24 | - 2a02:450:1::1 25 | -------------------------------------------------------------------------------- /icinga2/zones.conf: -------------------------------------------------------------------------------- 1 | # 2 | # zones.conf (Salt managed) 3 | # 4 | 5 | object Endpoint "{{ grains.id }}" { 6 | host = "{{ grains.id }}" 7 | } 8 | 9 | object Zone "{{ grains.id }}" { 10 | endpoints = [ "{{ grains.id }}" ] 11 | parent = "master" 12 | } 13 | 14 | object Endpoint "icinga2.in.ffho.net" { 15 | host = "icinga2.in.ffho.net" 16 | port = "5665" 17 | } 18 | 19 | object Zone "master" { 20 | endpoints = [ "icinga2.in.ffho.net" ] 21 | } 22 | 23 | object Zone "global-templates" { 24 | global = true 25 | } 26 | -------------------------------------------------------------------------------- /Documentation/example-pillar/logging.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Logging related config 3 | # 4 | 5 | logging: 6 | 7 | # Config for (r)syslog 8 | syslog: 9 | 10 | # Central logserver every node should send logs to 11 | logserver: "" 12 | 13 | # Config for Graylog 14 | graylog: 15 | 16 | # IP of the graylog entry point 17 | syslog_uri: "" 18 | 19 | # password secret 20 | password_secret: "" 21 | 22 | root_password_sha2: "" 23 | 24 | root_username: "" 25 | -------------------------------------------------------------------------------- /burp/server/client.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # Client specific configuration for {{ node }} 3 | # 4 | 5 | # Include common values for all clients 6 | . incexc/common 7 | 8 | {% for keyword in burp_config|sort %} 9 | {%- set argument = burp_config.get (keyword) %} 10 | {%- if argument is none %} 11 | {%- elif argument is string or argument is number %} 12 | {{ keyword }} = {{ argument }} 13 | {%- else %} 14 | {%- for item in argument %} 15 | {{ keyword }} = {{ item }} 16 | {%- endfor %} 17 | {%- endif %} 18 | {% endfor %} 19 | -------------------------------------------------------------------------------- /salt-minion/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Salt minion config 3 | # 4 | 5 | salt-minion: 6 | pkg.installed: 7 | - pkgs: 8 | - salt-minion 9 | service.running: 10 | - enable: true 11 | # - reload: true 12 | 13 | /etc/salt/minion: 14 | file.managed: 15 | - source: salt://salt-minion/minion_conf.tmpl 16 | - template: jinja 17 | - context: 18 | salt_config: {{ salt['pillar.get']('globals:salt') }} 19 | - require: 20 | - pkg: salt-minion 21 | # - watch_in: 22 | # - service: salt-minion 23 | -------------------------------------------------------------------------------- /bird/mesh_routes.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Learn mesh prefixes (Salt managed) 3 | # 4 | 5 | {% for site in salt['pillar.get']('node:sites', []) %} 6 | protocol direct mesh_{{ salt['ffho.re_replace']('-', '_', site) }} { 7 | interface "br-{{ site }}"; 8 | check link yes; 9 | 10 | import all; 11 | # TODO Add filter to enable/disable import of prefix per site 12 | # according to gateway status (announced / not announced) to 13 | # prevent prefix from beein announced when mesh is broken or 14 | # something like this. 15 | } 16 | {% endfor %} 17 | -------------------------------------------------------------------------------- /network/interfaces/vm_interfaces.tmpl: -------------------------------------------------------------------------------- 1 | {%- for vm_fqdn, vm_cfg in vms.items ()|sort %} 2 | {%- set vm_name = vm_fqdn.split ('.')[0].split ('-')[0] %} 3 | {%- for vm_iface, iface_cfg in vm_cfg['ifaces'].items ()|sort %} 4 | {%- if iface_cfg.get ('vlan-mode') == 'tagged' and iface_cfg.get ('tagged_vlans') %} 5 | {%- set host_iface = "%s_%s" % (vm_name, vm_iface.replace ('vlan', 'v')) %} 6 | iface {{ host_iface }} 7 | bridge-vids {{ iface_cfg.get ('tagged_vlans')|join (' ') }} 8 | {%- endif %} 9 | {%- endfor %} 10 | {%- endfor %} 11 | -------------------------------------------------------------------------------- /nginx/ffho.d/proxy-headers.conf: -------------------------------------------------------------------------------- 1 | # Include for headers to be (re)set in proxy mode (Salt managed) 2 | proxy_set_header X-Frame-Options "SAMEORIGIN; always;"; 3 | proxy_set_header X-Content-Type-Options nosniff; 4 | proxy_set_header X-XSS-Protection "1; mode=block"; 5 | proxy_set_header Strict-Transport-Security "max-age=15552000;includeSubDomains"; 6 | proxy_set_header Content-Security-Policy "default-src blob: https: data: 'unsafe-inline' 'unsafe-eval' always; upgrade-insecure-requests"; 7 | proxy_set_header Referrer-Policy "strict-origin-when-cross-origin"; 8 | -------------------------------------------------------------------------------- /prometheus-exporters/node-exporter/prometheus-node-exporter.default: -------------------------------------------------------------------------------- 1 | # Set the command-line arguments to pass to the server. (Salt managed) 2 | # Due to shell escaping, to pass backslashes for regexes, you need to double 3 | # them (\\d for \d). If running under systemd, you need to double them again 4 | # (\\\\d to mean \d), and escape newlines too. 5 | ARGS="--collector.interrupts --collector.ntp --collector.qdisc --no-collector.infiniband --no-collector.ipvs --no-collector.nfs --no-collector.nfsd --no-collector.wifi --no-collector.zfs --no-collector.textfile" 6 | -------------------------------------------------------------------------------- /motd/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # motd 3 | # 4 | 5 | {% set name = grains.id.split('.') %} 6 | motd: 7 | pkg.installed: 8 | - pkgs: 9 | - figlet 10 | 11 | cmd.run: 12 | - name: echo > /etc/motd.ffho ; figlet {{name[0]}} >> /etc/motd.ffho; sed -i -e 's/^\(.*\)/ \1/' /etc/motd.ffho ; sed -i -e '$s/\(.*\)/\1.{{name[1:]|join('.')}}/' /etc/motd.ffho ; echo >> /etc/motd.ffho 13 | - creates: /etc/motd.ffho 14 | 15 | file.symlink: 16 | - name: /etc/motd 17 | - target: /etc/motd.ffho 18 | - force: True 19 | - backupname: /etc/motd.old 20 | -------------------------------------------------------------------------------- /_modules/ffho.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | def re_replace (pattern, replacement, string): 4 | return re.sub (pattern, replacement, string) 5 | 6 | def re_search (pattern, string, flags = 0): 7 | return re.search (pattern, string, flags) 8 | 9 | def is_bool (value): 10 | return type (value) == bool 11 | 12 | def any_item_in_list (items, list): 13 | return len(set(items).intersection(set(list))) != 0 14 | 15 | def cmp (x, y): 16 | """ 17 | Most generic comparator 18 | """ 19 | if x < y: 20 | return -1 21 | elif x == y: 22 | return 0 23 | else: 24 | return 1 25 | -------------------------------------------------------------------------------- /icinga2/commands.d/cpu_usage.conf: -------------------------------------------------------------------------------- 1 | # 2 | # CPU-Check von https://github.com/iamcheko/check_cpu_usage 3 | # 4 | 5 | object CheckCommand "cpu_usage" { 6 | import "plugin-check-command" 7 | command = [ FFHOPluginDir + "/check_cpu_usage" ] 8 | arguments = { 9 | "--timeout" = "$cpu_usage_timeout$" 10 | "--critical" = "$cpu_usage_critical$" 11 | "--warning" = "$cpu_usage_warning$" 12 | "--statfile" = "$cpu_usage_statfile$" 13 | "--gapfile" = "$cpu_usage_gapfile$" 14 | "--names" = "$cpu_usage_names$" 15 | "--details" = "$cpu_usage_details$" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /locales/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Configure locales 3 | # 4 | 5 | locales: 6 | pkg.installed 7 | 8 | # Workaround missing locale.present in our salt version 9 | /etc/locale.gen: 10 | file.managed: 11 | - source: 12 | - salt://locales/locale.gen.{{ grains.os }}.{{ grains.oscodename }} 13 | - salt://locales/locale.gen 14 | - require: 15 | - pkg: locales 16 | 17 | locale-gen: 18 | cmd.wait: 19 | - watch: 20 | - file: /etc/locale.gen 21 | 22 | en_US.UTF-8: 23 | locale.system: 24 | - require: 25 | - file: /etc/locale.gen 26 | 27 | 28 | -------------------------------------------------------------------------------- /icinga2/users.conf.tmpl: -------------------------------------------------------------------------------- 1 | {% set users = salt['pillar.get']("monitoring:users", {}) %} 2 | {% for user,params in users.items()|sort %} 3 | object User "{{ user }}" { 4 | import "generic-user" 5 | 6 | {%- if "display_name" in params %} 7 | display_name = "{{ params['display_name'] }}" 8 | {%- endif %} 9 | 10 | {%- if "email" in params %} 11 | email = "{{ params['email'] }}" 12 | {%- endif %} 13 | 14 | {%- if "telegram_chat_id" in params %} 15 | vars.telegram_chat_id = "{{ params['telegram_chat_id'] }}" 16 | {%- endif %} 17 | } 18 | {% endfor %} 19 | -------------------------------------------------------------------------------- /wireguard/wireguard.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # Wireguard tunnel to {{ config['peer_fqdn'] }} (Salt managed) 3 | # 4 | [Interface] 5 | PrivateKey = {{ privkey }} 6 | {%- if config['mode'] == 'server' %} 7 | ListenPort = {{ config['port'] }} 8 | {%- endif %} 9 | FwMark = {{ config['fwmark'] }} 10 | 11 | [Peer] 12 | {%- if config['mode'] == 'client' %} 13 | Endpoint = {{ config['peer_ip'] }}:{{ config['port'] }} 14 | {%- endif %} 15 | PublicKey = {{ config['peer_pubkey'] }} 16 | AllowedIPs = 0.0.0.0/0, ::/0 17 | {%- if config['mode'] == 'client' %} 18 | PersistentKeepalive = 25 19 | {%- endif %} 20 | -------------------------------------------------------------------------------- /icinga2/services/burp.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Check burp backup 3 | # 4 | 5 | {%- set burp_server = salt['pillar.get']('burp:server:fqdn') %} 6 | 7 | 8 | apply Service "burp_backup" { 9 | import "generic-service" 10 | 11 | check_interval = 1h 12 | 13 | check_command = "file_age" 14 | vars.file_age_warning_time = 100800 # 28h 15 | vars.file_age_critical_time = 115200 # 32h 16 | vars.file_age_file = "/srv/burp/" + host.name + "/current" 17 | 18 | command_endpoint = "{{ burp_server }}" 19 | 20 | assign where host.address && host.vars.os == "linux" && "backup" in host.vars.tags 21 | } 22 | -------------------------------------------------------------------------------- /rsyslog/ffho.logrotate: -------------------------------------------------------------------------------- 1 | /var/log/ap.log 2 | /var/log/bird.log 3 | /var/log/dhcp.log 4 | /var/log/fastd.log 5 | /var/log/influxdb.log 6 | /var/log/named.log 7 | /var/log/net.log 8 | /var/log/ntpd.log 9 | /var/log/openvpn.log 10 | /var/log/salt-master.log 11 | /var/log/slapd.log 12 | /var/log/snmpd.log 13 | /var/log/wbbl.log 14 | /var/log/yanic.log 15 | { 16 | rotate 7 17 | daily 18 | missingok 19 | notifempty 20 | delaycompress 21 | compress 22 | postrotate 23 | /usr/lib/rsyslog/rsyslog-rotate 24 | endscript 25 | } 26 | 27 | -------------------------------------------------------------------------------- /icinga2/commands.d/syncrepl_extended.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Check LDAP replication 3 | # 4 | 5 | object CheckCommand "syncrepl_extended" { 6 | import "plugin-check-command" 7 | 8 | command = [ "/usr/local/share/monitoring-plugins/check_syncrepl_extended" ] 9 | 10 | arguments = { 11 | "--provider" = "$provider$" 12 | "--consumer" = "$consumer$" 13 | "--serverID" = "$serverid$" 14 | "--starttls" = { 15 | set_if = "$starttls$" 16 | } 17 | "--dn" = "$bind_dn$" 18 | "--pwd" = "$bind_password$" 19 | "--basedn" = "$base_dn$" 20 | "--nagios" = { 21 | set_if = true 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /openvpn/openvpn@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=OpenVPN connection to %i 3 | PartOf=openvpn.service 4 | ReloadPropagatedFrom=openvpn.service 5 | After=vrf_external-online.target 6 | CapabilityBoundingSet=CAP_IPC_LOCK CAP_NET_ADMIN CAP_NET_BIND_SERVICE CAP_NET_RAW CAP_SETGID CAP_SETUID CAP_SYS_CHROOT CAP_DAC_OVERRIDE CAP_AUDIT_WRITE 7 | 8 | [Service] 9 | Type=forking 10 | ExecStart=/usr/sbin/openvpn --daemon ovpn-%i --status /run/openvpn/%i.status 10 --cd /etc/openvpn --config /etc/openvpn/%i.conf 11 | ExecReload=/bin/kill -HUP $MAINPID 12 | WorkingDirectory=/etc/openvpn 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /needrestart/monitoring.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Icinga plugin related configuration (Salt managed) 3 | # 4 | # Nagios Plugin: configure return code use by nagios 5 | # as service status[1]. 6 | # 7 | # [1] https://nagios-plugins.org/doc/guidelines.html#AEN78 8 | # 9 | # Default: 10 | # 'nagios-status' => { 11 | # 'sessions' => 1, 12 | # 'services' => 2, 13 | # 'kernel' => 2, 14 | # 'ucode' => 2, 15 | # 'containers' => 1 16 | # }, 17 | # 18 | # Example: to ignore outdated sessions (status OK) 19 | # $nrconf{'nagios-status'}->{sessions} = 0; 20 | 21 | # Just warn about newer kernel 22 | $nrconf{'nagios-status'}->{kernel} = 1; 23 | -------------------------------------------------------------------------------- /slapd/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # LDAP server configuration 3 | # 4 | 5 | slapd: 6 | pkg.installed: 7 | - name: slapd 8 | service.running: 9 | - restart: True 10 | 11 | ldap-utils: 12 | pkg.installed 13 | 14 | # Remove slapd.d config directory 15 | /etc/ldap/slapd.d: 16 | file.absent 17 | 18 | # Install proper slapd.conf 19 | /etc/ldap/slapd.conf: 20 | file.managed: 21 | - source: salt://slapd/slapd.conf.H_{{ grains.id }} 22 | - watch_in: 23 | - service: slapd 24 | 25 | # Listen on ldaps! 26 | /etc/default/slapd: 27 | file.managed: 28 | - source: salt://slapd/slapd.default 29 | - watch_in: 30 | - service: slapd 31 | -------------------------------------------------------------------------------- /icinga2/commands.d/check_lv_snap.conf: -------------------------------------------------------------------------------- 1 | object CheckCommand "check_lv_snap" { 2 | import "plugin-check-command" 3 | command = [ "/usr/bin/sudo", FFHOPluginDir + "/check_lv_snap" ] 4 | arguments = { 5 | "--regex" = { 6 | required = false 7 | value = "$lv_snap_regex$" 8 | repeat_key = false 9 | } 10 | "--warning" = { 11 | required = false 12 | value = "$lv_snap_warning_secs$" 13 | } 14 | "--critical" = { 15 | required = false 16 | value = "$lv_snap_critical_secs$" 17 | } 18 | "--delete" = { 19 | set_if = "$lv_snap_delete$" 20 | description = "Only show snapshots overdue for deletion in output" 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /icinga2/commands.d/nagios-plugins-contrib.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Checks aus nagios-plugins-contrib (Salt managed) 3 | # 4 | 5 | ### Memory-Check 6 | object CheckCommand "memory" { 7 | import "plugin-check-command" 8 | command = [ PluginDir + "/check_memory" ] 9 | arguments = { 10 | "--warning" = "$memory.warning$" 11 | "--critical" = "$memory.critical$" 12 | "--unit" = "$memory.unit$" 13 | "--timeout" = "$memory.timeout$" 14 | } 15 | vars.memory.warning = 10 16 | vars.memory.critical = 5 17 | vars.memory.unit = "M" 18 | } 19 | 20 | ### libs-Check 21 | object CheckCommand "libs" { 22 | import "plugin-check-command" 23 | command = [ PluginDir + "/check_libs" ] 24 | } 25 | -------------------------------------------------------------------------------- /bird/ibgp.conf: -------------------------------------------------------------------------------- 1 | # 2 | # FFHO iBGP configuration (Salt managed) 3 | # 4 | 5 | template bgp ibgp { 6 | import filter ibgp_in; 7 | export filter ibgp_out; 8 | 9 | local as AS_OWN; 10 | 11 | source address LO_IP; 12 | 13 | enable route refresh yes; 14 | graceful restart yes; 15 | } 16 | 17 | {%- set peers = salt['pillar.get']("node:routing:bgp:internal:peers:" ~ family, []) %} 18 | {% for peer_config in peers %} 19 | protocol bgp {{ peer_config.get ('node')|replace(".", "_")|replace("-", "_") }} from ibgp { 20 | neighbor {{ peer_config.get ('ip') }} as AS_OWN; 21 | 22 | {%- if peer_config.get ('rr_client') %} 23 | rr client; 24 | {%- endif %} 25 | } 26 | 27 | {% endfor %} 28 | -------------------------------------------------------------------------------- /icinga2/wbbl.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # {{ link_id }} 3 | # 4 | {%- set devices = link_config.get ('devices', {}) %} 5 | 6 | {% for device, device_config in devices.items () %} 7 | {%- set fqdn = device ~ '.' ~ link_id %} 8 | {%- set addresses = [] %} 9 | {%- for iface, iface_config in device_config.get ('ifaces', {}).items () %} 10 | {%- for ip in iface_config.get ('prefixes', []) %} 11 | {%- do addresses.append (ip.split ('/')[0]) %} 12 | {%- endfor %} 13 | {%- endfor %} 14 | object Host "{{ fqdn }}" { 15 | import "generic-host" 16 | 17 | display_name = "{{ fqdn }}" 18 | 19 | address = "{{ addresses[0] }}" 20 | 21 | vars.os = "AirOS" 22 | } 23 | 24 | {%- endfor %} 25 | -------------------------------------------------------------------------------- /kvm/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # KVM host 3 | # 4 | 5 | virt-pkgs: 6 | pkg.installed: 7 | - pkgs: 8 | - qemu-system-x86 9 | - libvirt-daemon-system 10 | - xmlstarlet 11 | - netcat-openbsd 12 | 13 | libvirtd: 14 | service.running: 15 | - enable: True 16 | - reload: True 17 | 18 | /etc/libvirt/hooks/qemu: 19 | file.managed: 20 | - source: salt://kvm/qemu-hook 21 | - mode: 755 22 | - require: 23 | - pkg: virt-pkgs 24 | - watch_in: 25 | - service: libvirtd 26 | 27 | /etc/libvirt/hooks/get-bridge-vids: 28 | file.managed: 29 | - source: salt://kvm/get-bridge-vids 30 | - mode: 755 31 | - require: 32 | - pkg: virt-pkgs 33 | -------------------------------------------------------------------------------- /icinga2/plugins/check_salt: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Check if state-file exists, otherwise exit with unknown 4 | if [ ! -f /var/cache/salt/state_apply ] ; then echo "Statefile does not exist" ; exit 3 ; fi 5 | 6 | # Check age of statefile. If it's older than 2 hours, exit with unknown 7 | if [ $(($(date +%s) - $(date -r /var/cache/salt/state_apply +%s))) -gt 25200 ] ; then echo "Statefile too old" ; exit 3 ; fi 8 | 9 | # List all IDs and exclude ffho-repo 10 | CHANGES_IDS=$(grep "ID:" /var/cache/salt/state_apply | grep -v "ID: .*-repo$") 11 | 12 | if [ -n "$CHANGES_IDS" ] ; then 13 | echo "IDs with changes:" 14 | echo "$CHANGES_IDS" 15 | exit 1 # warning 16 | fi 17 | 18 | echo "Nothing to do" 19 | exit 0 # ok 20 | 21 | -------------------------------------------------------------------------------- /influxdb/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | WORKDIR=/var/lib/influxdb/backup 3 | # check if we are the user influxdb 4 | if [ ! "$(whoami)" == "influxdb" ] ; then 5 | echo "This script must run as user influxdb" 6 | exit 1 7 | fi 8 | 9 | # Create workdir if it does not exist 10 | [ ! -d ${WORKDIR} ] && mkdir ${WORKDIR} 11 | 12 | pushd ${WORKDIR} > /dev/null 13 | if [ -d $(date -I) ] ; then 14 | echo "Backupdirectory for today already exists. I refuse to do anything" 15 | exit 1 16 | fi 17 | 18 | echo "Backup" 19 | influxd backup -portable $(date -I) 20 | echo "Backup finished" 21 | ehco "--------------------------------------------" 22 | echo "Cleanup" 23 | find ${WORKDIR} -ctime +2 -delete 24 | 25 | popd > /dev/null 26 | -------------------------------------------------------------------------------- /docker/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Setup docker.io 3 | # 4 | 5 | docker-repo: 6 | pkgrepo.managed: 7 | - comments: "# Docker.io" 8 | - human_name: Docker.io repository 9 | - name: "deb https://download.docker.com/linux/debian {{ grains.oscodename }} stable" 10 | - dist: {{ grains.oscodename }} 11 | - file: /etc/apt/sources.list.d/docker.list 12 | - key_url: https://download.docker.com/linux/debian/gpg 13 | 14 | docker-pkgs: 15 | pkg.installed: 16 | - pkgs: 17 | - docker-ce 18 | - docker-ce-cli 19 | - containerd.io 20 | 21 | # Install docker-compose via pip *shrug* 22 | python-pip: 23 | pkg.installed 24 | 25 | docker-compose: 26 | pip.installed: 27 | - require: 28 | - pkg: python-pip 29 | -------------------------------------------------------------------------------- /network/link.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Networking / link 3 | # 4 | 5 | # Write an systemd link file for every interface with a MAC 6 | {% for iface, iface_config in salt['pillar.get']('node:ifaces', {}).items ()|sort %} 7 | {% if 'mac' in iface_config %} 8 | /etc/systemd/network/42-{{ iface }}.link: 9 | file.managed: 10 | - source: salt://network/systemd-link.tmpl 11 | - template: jinja 12 | interface: {{ iface }} 13 | iface_config: {{ iface_config }} 14 | desc: {{ iface_config.get ('desc', '') }} 15 | - watch_in: 16 | - cmd: update-initramfs 17 | {% endif %} 18 | {% endfor %} 19 | 20 | # Rebuild initrd files if neccessary 21 | update-initramfs: 22 | cmd.wait: 23 | - name: /usr/sbin/update-initramfs -k all -u 24 | -------------------------------------------------------------------------------- /respondd/respondd-config.tmpl: -------------------------------------------------------------------------------- 1 | { 2 | "batman": "{{ bat_iface }}", 3 | "bridge": "{{ mcast_iface }}", 4 | {%- if mesh_vpn %} 5 | "mesh-vpn": ["{{ ('", "').join(mesh_vpn) }}"], 6 | {%- endif %} 7 | "nodeinfo": { 8 | "hostname": "{{ hostname }}", 9 | {%- if location and not location_hidden and location.get ('latitude') and location.get ('longitude') %} 10 | "location": { 11 | "latitude": {{ location.latitude }}, 12 | "longitude": {{ location.longitude }} 13 | }, 14 | {%- endif %} 15 | "software": { 16 | "firmware": { 17 | "release": "FFHO-{{ grains.osmajorrelease }}.0" 18 | } 19 | }, 20 | "system": { 21 | "site_code": "{{ site_code }}" 22 | }, 23 | "vpn": {{ fastd_peers }} 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /Documentation/example-pillar/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | '*': 3 | # Site wide options 4 | - globals 5 | - network 6 | 7 | - net 8 | - nodes 9 | - sites 10 | - regions 11 | - cert 12 | - ssh 13 | 14 | # 15 | # Role/Application specific stuff 16 | 17 | # Automatic Certificate Management 18 | - acme 19 | 20 | # Burp backup 21 | - burp 22 | 23 | # Traffic engineering 24 | - te 25 | 26 | # DNS server 27 | - dns-server 28 | 29 | # OpenVPN tunnels 30 | - ovpn 31 | 32 | # Anycast Healthchecker 33 | - anycast-healthchecker 34 | 35 | # Frontend Config 36 | - frontend 37 | 38 | # Logging 39 | - logging 40 | 41 | # LDAP 42 | - ldap 43 | 44 | # Icinga2 45 | - monitoring 46 | -------------------------------------------------------------------------------- /apt/salt.sources: -------------------------------------------------------------------------------- 1 | X-Repolib-Name: Salt Project 2 | Description: Salt has many possible uses, including configuration management. 3 | Built on Python, Salt is an event-driven automation tool and framework to deploy, 4 | configure, and manage complex IT systems. Use Salt to automate common 5 | infrastructure administration tasks and ensure that all the components of your 6 | infrastructure are operating in a consistent desired state. 7 | - Website: https://saltproject.io 8 | - Public key: https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public 9 | Enabled: yes 10 | Types: deb 11 | URIs: https://packages.broadcom.com/artifactory/saltproject-deb 12 | Signed-By: /etc/apt/keyrings/salt-archive-keyring.pgp 13 | Suites: stable 14 | Components: main 15 | -------------------------------------------------------------------------------- /ssh/authorized_keys.tmpl: -------------------------------------------------------------------------------- 1 | {%- set ssh_config = salt['pillar.get']('ssh') %} 2 | {%- for entry_name, entry in ssh_config.get('keys',{}).items() if entry.get('pillar', False) %} 3 | {%- set entry_split = entry_name.split('@') %} 4 | {%- if entry_split|length() == 2 %} 5 | {%- set user = entry_split[0] %} 6 | {%- set host = entry_split[1] %} 7 | {%- else %} 8 | {%- set user = 'root' %} 9 | {%- set host = entry_split[0] %} 10 | {%- endif %} 11 | {%- do entry.update({ 'pubkeys': [salt['pillar.get']('nodes:' + host + ':ssh:' + user + ':pubkey')]}) %} 12 | {%- endfor %} 13 | {%- set node_config = salt['pillar.get']('node') -%} 14 | {%- set auth_keys = salt['ffho_auth.get_ssh_authkeys'](ssh_config, node_config, grains['id'], username) -%} 15 | {{ "\n".join (auth_keys) }} 16 | -------------------------------------------------------------------------------- /firewall/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Netfiler stuff 3 | # 4 | 5 | /etc/modules-load.d/netfilter: 6 | file.managed: 7 | - source: salt://firewall/modules 8 | 9 | 10 | iptables-persistent: 11 | pkg.installed 12 | 13 | iptables-restore: 14 | cmd.wait: 15 | - name: /sbin/iptables-restore < /etc/iptables/rules.v4 16 | - watch: 17 | - file: /etc/iptables/rules.v4 18 | 19 | ip6tables-restore: 20 | cmd.wait: 21 | - name: /sbin/ip6tables-restore < /etc/iptables/rules.v6 22 | - watch: 23 | - file: /etc/iptables/rules.v6 24 | 25 | /etc/iptables/rules.v4: 26 | file.managed: 27 | - source: salt://firewall/rules.v4.tmpl 28 | - template: jinja 29 | 30 | /etc/iptables/rules.v6: 31 | file.managed: 32 | - source: salt://firewall/rules.v6.tmpl 33 | - template: jinja 34 | -------------------------------------------------------------------------------- /prometheus-server/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Set up prometheus server 3 | # 4 | 5 | prometheus: 6 | pkg.installed: 7 | - name: prometheus 8 | service.running: 9 | - enable: true 10 | - restart: true 11 | 12 | /srv/prometheus/metrics2: 13 | file.directory: 14 | - makedirs: true 15 | - user: prometheus 16 | - group: prometheus 17 | 18 | /etc/default/prometheus: 19 | file.managed: 20 | - source: salt://prometheus-server/prometheus.default 21 | - watch_in: 22 | - service: prometheus 23 | 24 | /etc/prometheus/prometheus.yml: 25 | file.managed: 26 | - source: salt://prometheus-server/prometheus.yml 27 | - template: jinja 28 | - require: 29 | - pkg: prometheus 30 | - file: /srv/prometheus/metrics2 31 | - watch_in: 32 | - service: prometheus 33 | -------------------------------------------------------------------------------- /sudo/sudoers.Debian.bullseye: -------------------------------------------------------------------------------- 1 | # 2 | # This file MUST be edited with the 'visudo' command as root. 3 | # 4 | # Please consider adding local content in /etc/sudoers.d/ instead of 5 | # directly modifying this file. 6 | # 7 | # See the man page for details on how to write a sudoers file. 8 | # 9 | Defaults env_reset 10 | Defaults mail_badpass 11 | Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" 12 | 13 | # Host alias specification 14 | 15 | # User alias specification 16 | 17 | # Cmnd alias specification 18 | 19 | # User privilege specification 20 | root ALL=(ALL:ALL) ALL 21 | 22 | # Allow members of group sudo to execute any command 23 | %sudo ALL=(ALL:ALL) ALL 24 | 25 | # See sudoers(5) for more information on "@include" directives: 26 | 27 | @includedir /etc/sudoers.d 28 | -------------------------------------------------------------------------------- /bash/bashrc.root: -------------------------------------------------------------------------------- 1 | # ~/.bashrc: executed by bash(1) for non-login shells. 2 | 3 | # Note: PS1 and umask are already set in /etc/profile. You should not 4 | # need this unless you want different defaults for root. 5 | fqdn="{{ grains['id'] }}" 6 | PS1='${debian_chroot:+($debian_chroot)}${fqdn}:\w\$ ' 7 | # umask 022 8 | 9 | # You may uncomment the following lines if you want `ls' to be colorized: 10 | export LS_OPTIONS='--color=auto' 11 | eval "`dircolors`" 12 | alias ls='ls $LS_OPTIONS' 13 | alias ll='ls $LS_OPTIONS -l' 14 | alias l='ls $LS_OPTIONS -lA' 15 | 16 | # Increase history size and make sure the history is always appended 17 | HISTSIZE=1000 18 | HISTFILESIZE=2000 19 | shopt -s histappend 20 | 21 | # Load any aliases which might be present 22 | if [ -f ~/.bash_aliases ]; then 23 | . ~/.bash_aliases 24 | fi 25 | -------------------------------------------------------------------------------- /Documentation/example-pillar/globals.sls: -------------------------------------------------------------------------------- 1 | # Global configuration items 2 | 3 | globals: 4 | 5 | # Mail address of the operators of this fine backbone? 6 | ops_mail: "" 7 | 8 | # SNMP setting 9 | snmp: 10 | # read-only community string for snmpd 11 | ro_community: "" 12 | 13 | # List of IPs allowed to query snmpd 14 | nms_list: 15 | - "" 16 | 17 | # DNS settings 18 | dns: 19 | # IP address of DNS resolver for nodes (should be anycasted) 20 | resolver_v4: "" 21 | resolver_v6: "" 22 | 23 | # Search domain 24 | search: "" 25 | 26 | # Salt (minion) configuration 27 | salt: 28 | master: "" 29 | master_port: 4506 30 | ipv6: "" 31 | -------------------------------------------------------------------------------- /dhcp-server/dhcpd.default: -------------------------------------------------------------------------------- 1 | {%- set dhcp_interfaces = salt['pillar.get']("node:dhcp:server:ifaces", []) -%} 2 | # Defaults for isc-dhcp-server (sourced by /etc/init.d/isc-dhcp-server) (Salt managed) 3 | 4 | # Path to dhcpd's config file (default: /etc/dhcp/dhcpd.conf). 5 | #DHCPDv4_CONF=/etc/dhcp/dhcpd.conf 6 | #DHCPDv6_CONF=/etc/dhcp/dhcpd6.conf 7 | 8 | # Path to dhcpd's PID file (default: /var/run/dhcpd.pid). 9 | #DHCPDv4_PID=/var/run/dhcpd.pid 10 | #DHCPDv6_PID=/var/run/dhcpd6.pid 11 | 12 | # Additional options to start dhcpd with. 13 | # Don't use options -cf or -pf here; use DHCPD_CONF/ DHCPD_PID instead 14 | #OPTIONS="" 15 | 16 | # On what interfaces should the DHCP server (dhcpd) serve DHCP requests? 17 | # Separate multiple interfaces with spaces, e.g. "eth0 eth1". 18 | INTERFACESv4="{{ dhcp_interfaces|join(" ") }}" 19 | INTERFACESv6="" 20 | -------------------------------------------------------------------------------- /snmpd/default_snmpd: -------------------------------------------------------------------------------- 1 | # This file controls the activity of snmpd and snmptrapd (Salt managed) 2 | 3 | # Don't load any MIBs by default. 4 | # You might comment this lines once you have the MIBs downloaded. 5 | export MIBS= 6 | 7 | # snmpd control (yes means start daemon). 8 | SNMPDRUN=yes 9 | 10 | # snmpd options (use syslog, close stdin/out/err). 11 | SNMPDOPTS='-LSwd -Lf /dev/null -u snmp -g snmp -I -smux -p /var/run/snmpd.pid' 12 | 13 | # snmptrapd control (yes means start daemon). As of net-snmp version 14 | # 5.0, master agentx support must be enabled in snmpd before snmptrapd 15 | # can be run. See snmpd.conf(5) for how to do this. 16 | TRAPDRUN=no 17 | 18 | # snmptrapd options (use syslog). 19 | TRAPDOPTS='-LS5d -p /var/run/snmptrapd.pid' 20 | 21 | # create symlink on Debian legacy location to official RFC path 22 | SNMPDCOMPAT=yes 23 | -------------------------------------------------------------------------------- /mongodb/mongod.conf: -------------------------------------------------------------------------------- 1 | # mongod.conf 2 | # salt managed 3 | 4 | # for documentation of all options, see: 5 | # http://docs.mongodb.org/manual/reference/configuration-options/ 6 | 7 | # Where and how to store data. 8 | storage: 9 | dbPath: /var/lib/mongodb 10 | journal: 11 | enabled: true 12 | # engine: 13 | # wiredTiger: 14 | 15 | # where to write logging data. 16 | systemLog: 17 | destination: file 18 | logAppend: true 19 | path: /var/log/mongodb/mongod.log 20 | 21 | # network interfaces 22 | net: 23 | port: 27017 24 | bindIp: 127.0.0.1 25 | 26 | # how the process runs 27 | processManagement: 28 | timeZoneInfo: /usr/share/zoneinfo 29 | 30 | security: 31 | authorization: enabled 32 | 33 | #operationProfiling: 34 | 35 | #replication: 36 | 37 | #sharding: 38 | 39 | ## Enterprise-Only Options: 40 | 41 | #auditLog: 42 | 43 | #snmp: 44 | -------------------------------------------------------------------------------- /apt/sources.list.Debian.bullseye: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/apt/sources.list (Salt managed) 3 | # 4 | 5 | deb http://deb.debian.org/debian/ bullseye main non-free contrib 6 | deb-src http://deb.debian.org/debian/ bullseye main non-free contrib 7 | 8 | deb http://security.debian.org/debian-security bullseye-security main contrib non-free 9 | deb-src http://security.debian.org/debian-security bullseye-security main contrib non-free 10 | 11 | # bullseye-updates, previously known as 'volatile' 12 | deb http://deb.debian.org/debian/ bullseye-updates main contrib non-free 13 | deb-src http://deb.debian.org/debian/ bullseye-updates main contrib non-free 14 | 15 | # bullseye-backports, previously on backports.debian.org 16 | deb http://deb.debian.org/debian/ bullseye-backports main contrib non-free 17 | deb-src http://deb.debian.org/debian/ bullseye-backports main contrib non-free 18 | -------------------------------------------------------------------------------- /elasticsearch/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # elasticsearch 3 | # 4 | 5 | elasticsearch7x: 6 | pkgrepo.managed: 7 | - humanname: Elasticsearch 7.x 8 | - name: deb https://artifacts.elastic.co/packages/oss-7.x/apt stable main 9 | - file: /etc/apt/sources.list.d/elastic-7.x.list 10 | - key_url: https://artifacts.elastic.co/GPG-KEY-elasticsearch 11 | 12 | elasticsearch: 13 | pkg.installed: 14 | - name: elasticsearch-oss 15 | service.running: 16 | - name: elasticsearch 17 | - enable: True 18 | - require: 19 | - file: /etc/elasticsearch/elasticsearch.yml 20 | - watch: 21 | - file: /etc/elasticsearch/elasticsearch.yml 22 | 23 | /etc/elasticsearch/elasticsearch.yml: 24 | file.managed: 25 | - source: 26 | - salt://elasticsearch/elasticsearch.yml.H_{{grains['id']}} 27 | - salt://elasticsearch/elasticsearch.yml 28 | 29 | -------------------------------------------------------------------------------- /icinga2/commands.d/ssl_cert.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Old Icinga2 doesn't ship with check command for ssl_cert check 3 | # and new version have a different arguments definitions, so we 4 | # hack this by adding our own CheckCommand \o/ 5 | # 6 | 7 | object CheckCommand "ssl_host_cert" { 8 | import "plugin-check-command" 9 | command = [ PluginDir + "/check_ssl_cert" ] 10 | 11 | arguments = { 12 | "-H" = "$ssl_cert_host$" 13 | "-f" = "$ssl_cert_file$" 14 | "-w" = "$ssl_cert_warn$" 15 | "-c" = "$ssl_cert_crit$" 16 | {%- if grains.osrelease|int > 10 %} 17 | "--ignore-sct" = { 18 | set_if = true 19 | } 20 | {%- endif %} 21 | {%- if grains.osrelease|int > 11 %} 22 | "--allow-empty-san" = { 23 | set_if = true 24 | } 25 | "--ignore-maximum-validity" = { 26 | set_if = true 27 | } 28 | {%- endif %} 29 | } 30 | 31 | vars.ssl_cert_host = "localhost" 32 | } 33 | -------------------------------------------------------------------------------- /grafana/dashboards/prometheus.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: "FFHO" 5 | # name of the dashboard folder. 6 | folder: "FFHO" 7 | # folder UID. will be automatically generated if not specified 8 | folderUid: '' 9 | # provider type. Default to 'file' 10 | type: file 11 | # disable dashboard deletion 12 | disableDeletion: false 13 | # how often Grafana will scan for changed dashboards 14 | updateIntervalSeconds: 10 15 | # allow updating provisioned dashboards from the UI 16 | allowUiUpdates: false 17 | options: 18 | # path to dashboard files on disk. Required when using the 'file' type 19 | path: /var/lib/grafana/dashboards 20 | # use folder names from filesystem to create folders in Grafana 21 | foldersFromFilesStructure: true 22 | -------------------------------------------------------------------------------- /icinga2/commands.d/dns_sync.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Check DNS sync 3 | object CheckCommand "dns_sync" { 4 | import "plugin-check-command" 5 | 6 | command = [ "/usr/local/share/monitoring-plugins/check_dns_sync" ] 7 | 8 | arguments = { 9 | "--reference-ns" = "$reference_ns$" 10 | "--replica-ns" = "$replica_ns$" 11 | "--timeout" = { 12 | set_if = "$timeout$" 13 | value = "$timeout$" 14 | } 15 | "--check_mode" = { 16 | set_if = "$check_mode$" 17 | value = "$check_mode$" 18 | } 19 | "zones" = { 20 | value = "$zones$" 21 | skip_key = true 22 | order = 99 23 | } 24 | } 25 | 26 | vars.reference_ns = "" # IP of reference NS Server 27 | vars.replica_ns = "" # IP of replica NS Server 28 | vars.timeout = "" # Timeout for DNS operations 29 | vars.check_mode = "serial" # Check mode: serial or axfr 30 | vars.zones = [] # List of zone names to be checked 31 | } 32 | -------------------------------------------------------------------------------- /apu2/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # APU2 - Firmware-Update 3 | # 4 | 5 | apu2-flashrom: 6 | pkg.latest: 7 | - name: flashrom 8 | 9 | {% if salt['pkg.version_cmp'](salt['pkg.version']('flashrom'), '0.9.9') >= 0 %} 10 | apu2-read-firmware: 11 | cmd.run: 12 | - name: flashrom --programmer internal --read /tmp/apu2-firmware.rom 13 | - creates: /tmp/apu2-firmware.rom 14 | - require: 15 | - pkg: apu2-flashrom 16 | 17 | apu2-copy-firmware: 18 | file.managed: 19 | - name: /tmp/apu2-firmware.rom 20 | - source: salt://apu2/apu2-firmware.rom 21 | - require: 22 | - cmd: apu2-read-firmware 23 | 24 | apu2-write-firmware: 25 | cmd.wait: 26 | - name: flashrom --programmer internal --write /tmp/apu2-firmware.rom 27 | # - name: flashrom --programmer internal:boardmismatch=force --write /tmp/apu2-firmware.rom 28 | - watch: 29 | - file: apu2-copy-firmware 30 | {% endif %} 31 | -------------------------------------------------------------------------------- /icinga2/icinga2.sudoers: -------------------------------------------------------------------------------- 1 | # 2 | # sudoers file for Icinga2 monitoring commands (Salt managed) 3 | # 4 | 5 | # No lecture for the nagios user 6 | Defaults: nagios lecture=never 7 | 8 | # Network basics (IP, conntrack, routing) 9 | nagios ALL=NOPASSWD: /usr/local/share/monitoring-plugins/check_ifupdown2 10 | nagios ALL=NOPASSWD: /usr/local/share/monitoring-plugins/check_conntrack_size 11 | nagios ALL=NOPASSWD: /usr/local/share/monitoring-plugins/check_bird_ospf 12 | nagios ALL=NOPASSWD: /usr/local/share/monitoring-plugins/check_bird_bgp 13 | 14 | # DHCP 15 | nagios ALL=NOPASSWD: /usr/local/sbin/dhcpd-pool 16 | 17 | # Mail 18 | nagios ALL=NOPASSWD: /usr/lib/nagios/plugins/check_mailq 19 | 20 | # LVM 21 | nagios ALL=NOPASSWD: /usr/local/share/monitoring-plugins/check_lv_snap 22 | 23 | # Needrestart 24 | nagios ALL=NOPASSWD: /usr/sbin/needrestart -p -k 25 | nagios ALL=NOPASSWD: /usr/sbin/needrestart -p -l 26 | -------------------------------------------------------------------------------- /nftables/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # nftables state 3 | # 4 | 5 | {% if not 'no-nftables' in salt['pillar.get']('node:tags', []) %} 6 | 7 | nftables: 8 | pkg.installed: 9 | - name: nftables 10 | service.running: 11 | - enable: true 12 | - reload: true 13 | 14 | 15 | /etc/nftables.conf: 16 | file.managed: 17 | - source: salt://nftables/nftables.conf.tmpl 18 | - template: jinja 19 | - mode: 755 20 | - require: 21 | - pkg: nftables 22 | - watch_in: 23 | - service: nftables 24 | 25 | 26 | {% set no_purge_roles = ['docker', 'kvm'] %} 27 | {% set roles = salt['pillar.get']('node:roles', [])%} 28 | {% set not_purge_iptables = salt['ffho.any_item_in_list'](no_purge_roles, roles) %} 29 | 30 | purge-iptables: 31 | pkg.purged: 32 | - pkgs: 33 | - iptables-persistent 34 | {%- if not not_purge_iptables %} 35 | - iptables 36 | {%- endif %} 37 | 38 | {% endif %} 39 | -------------------------------------------------------------------------------- /kvm/get-bridge-vids: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # Maximilian Wilhelm 4 | # -- Fri 28 Apr 2023 08:41:13 PM CEST 5 | # 6 | 7 | import re 8 | import sys 9 | 10 | if len (sys.argv) != 2: 11 | print("Usage: get-bridge-vids IFACE", file=sys.stderr) 12 | sys.exit(1) 13 | 14 | ifname = sys.argv[1] 15 | 16 | ifstanza_re = re.compile(rf"^iface {ifname}") 17 | bridge_vid_re = re.compile(r"bridge-vids (.*)$") 18 | 19 | interfaces_fh = open("/etc/network/interfaces", "r") 20 | iface_found = False 21 | 22 | for line in interfaces_fh.readlines(): 23 | line = line.strip() 24 | 25 | if line.startswith('#'): 26 | continue 27 | 28 | if iface_found: 29 | match = bridge_vid_re.search(line) 30 | if match: 31 | print (match.group(1)) 32 | 33 | continue 34 | 35 | match = ifstanza_re.search(line) 36 | if match: 37 | iface_found = True 38 | -------------------------------------------------------------------------------- /icinga2/commands.d/check_gpg_expiry.conf: -------------------------------------------------------------------------------- 1 | object CheckCommand "check_gpg_expiry" { 2 | import "plugin-check-command" 3 | command = [ FFHOPluginDir + "/check_gpg_expiry" ] 4 | arguments = { 5 | "--dirs" = { 6 | required = false 7 | value = "$gpg_directory$" 8 | repeat_key = false 9 | } 10 | "--warning" = { 11 | required = false 12 | value = "$gpg_warning_secs$" 13 | } 14 | "--critical" = { 15 | required = false 16 | value = "$gpg_critical_secs$" 17 | } 18 | "--verbose" = { 19 | set_if = "$gpg_verbose$" 20 | description = "Output all keys with their corresponding dates" 21 | } 22 | "--sort" = { 23 | set_if = "$gpg_sort$" 24 | description = "Sort keys by expiry date" 25 | } 26 | "--expiring" = { 27 | set_if = "$gpg_expiring$" 28 | description = "Only show expiring keys in verbose output" 29 | } 30 | "--ignore" = { 31 | required = false 32 | value = "$gpg_ignore$" 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /icinga2/services/ldap.conf: -------------------------------------------------------------------------------- 1 | # 2 | # LDAP related stuff (Salt managed) 3 | # 4 | 5 | apply Service "ldaps" { 6 | import "generic-service" 7 | 8 | check_command = "ldap" 9 | 10 | vars.ldap_address = host.vars.ldap_fqdn 11 | vars.ldap_base = "dc=ffho,dc=net" 12 | vars.ldap_ssl = true 13 | vars.ldap_v3 = true 14 | 15 | assign where host.vars.ldap_fqdn && "ldap-master" in host.vars.roles 16 | assign where host.vars.ldap_fqdn && "ldap-replica" in host.vars.roles 17 | } 18 | 19 | apply Service "ldap_syncrepl_extended" { 20 | import "generic-service" 21 | 22 | check_command = "syncrepl_extended" 23 | 24 | vars.provider = "ldaps://ldap-master.srv.in.ffho.net" 25 | vars.consumer = "ldaps://" + host.vars.ldap_fqdn 26 | vars.base_dn = "dc=ffho,dc=net" 27 | vars.bind_dn = "uid=sync-check,ou=accounts,dc=ffho,dc=net" 28 | vars.bind_password = LdapSyncReplBindPassword 29 | 30 | assign where host.vars.ldap_fqdn && "ldap-replica" in host.vars.roles 31 | } 32 | -------------------------------------------------------------------------------- /grafana/prometheus.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Grafana as Prometheus front end 3 | # 4 | 5 | # 6 | # Data sources 7 | # 8 | 9 | /etc/grafana/provisioning/datasources/prom-local.yaml: 10 | file.managed: 11 | - source: salt://grafana/datasources/prom-local.yaml.tmpl 12 | - template: jinja 13 | - require: 14 | - pkg: grafana 15 | - watch_in: 16 | - service: grafana-server 17 | 18 | # 19 | # Dashboards 20 | # 21 | /etc/grafana/provisioning/dashboards/FFHO.yaml: 22 | file.managed: 23 | - source: salt://grafana/dashboards/prometheus.yaml 24 | - require: 25 | - pkg: grafana 26 | - watch_in: 27 | - service: grafana-server 28 | 29 | /var/lib/grafana/dashboards/: 30 | file.recurse: 31 | - source: salt://grafana/dashboards/prometheus/ 32 | - file_mode: 644 33 | - dir_mode: 755 34 | - user: root 35 | - group: root 36 | - clean: True 37 | - require: 38 | - pkg: grafana 39 | - watch_in: 40 | - service: grafana-server 41 | -------------------------------------------------------------------------------- /apt/sources.list.Debian.bookworm: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/apt/sources.list (Salt managed) 3 | # 4 | 5 | deb http://deb.debian.org/debian/ bookworm main contrib non-free non-free-firmware 6 | deb-src http://deb.debian.org/debian/ bookworm main contrib non-free non-free-firmware 7 | 8 | deb http://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware 9 | deb-src http://security.debian.org/debian-security bookworm-security main contrib non-free non-free-firmware 10 | 11 | # bookworm-updates, previously known as 'volatile' 12 | deb http://deb.debian.org/debian/ bookworm-updates main contrib non-free non-free-firmware 13 | deb-src http://deb.debian.org/debian/ bookworm-updates main contrib non-free non-free-firmware 14 | 15 | # bookworm-backports, previously on backports.debian.org 16 | deb http://deb.debian.org/debian/ bookworm-backports main contrib non-free non-free-firmware 17 | deb-src http://deb.debian.org/debian/ bookworm-backports main contrib non-free non-free-firmware 18 | -------------------------------------------------------------------------------- /sysctl/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # sysctl 3 | # 4 | {%- set roles = salt['pillar.get']('node:roles', []) %} 5 | 6 | # Define command to reload sysctl settings here without dependencies 7 | # and define inverse dependencies where useful (see sysctl.conf) 8 | reload-sysctl: 9 | cmd.wait: 10 | - watch: [] 11 | - name: /sbin/sysctl --system 12 | 13 | 14 | /etc/sysctl.conf: 15 | file.managed: 16 | - source: 17 | - salt://sysctl/sysctl.conf.{{ grains.os }}.{{ grains.oscodename }} 18 | - salt://sysctl/sysctl.conf 19 | - watch_in: 20 | - cmd: reload-sysctl 21 | 22 | 23 | /etc/sysctl.d/global.conf: 24 | file.managed: 25 | - source: salt://sysctl/global.conf 26 | - watch_in: 27 | - cmd: reload-sysctl 28 | 29 | 30 | {% if 'router' in roles %} 31 | /etc/sysctl.d/router.conf: 32 | file.managed: 33 | - source: salt://sysctl/router.conf 34 | - watch_in: 35 | - cmd: reload-sysctl 36 | {% else %} 37 | /etc/sysctl.d/router.conf: 38 | file.absent 39 | {% endif %} 40 | -------------------------------------------------------------------------------- /graylog/default-graylog-server: -------------------------------------------------------------------------------- 1 | # Path to a custom java executable. By default the java executable of the 2 | # bundled JVM is used. 3 | #JAVA=/usr/bin/java 4 | 5 | # Default Java options for heap and garbage collection. 6 | GRAYLOG_SERVER_JAVA_OPTS="-Xms1g -Xmx1g -server -XX:+UseG1GC -XX:-OmitStackTraceInFastThrow" 7 | 8 | # Avoid endless loop with some TLSv1.3 implementations. 9 | GRAYLOG_SERVER_JAVA_OPTS="$GRAYLOG_SERVER_JAVA_OPTS -Djdk.tls.acknowledgeCloseNotify=true" 10 | 11 | # Fix for log4j CVE-2021-44228 12 | GRAYLOG_SERVER_JAVA_OPTS="$GRAYLOG_SERVER_JAVA_OPTS -Dlog4j2.formatMsgNoLookups=true" 13 | 14 | # Use custom keystore for self signed certificate 15 | GRAYLOG_SERVER_JAVA_OPTS="$GRAYLOG_SERVER_JAVA_OPTS -Djavax.net.ssl.trustStore=/etc/ssl/certs/java/cacerts" 16 | 17 | # Pass some extra args to graylog-server. (i.e. "-d" to enable debug mode) 18 | GRAYLOG_SERVER_ARGS="" 19 | 20 | # Program that will be used to wrap the graylog-server command. Useful to 21 | # support programs like authbind. 22 | GRAYLOG_COMMAND_WRAPPER="" 23 | -------------------------------------------------------------------------------- /bird/bogon_unreach.conf: -------------------------------------------------------------------------------- 1 | # Add unreachable routes for any prefix we don't want to route to 2 | # the internet. 3 | protocol static bogon_unreach { 4 | {%- if proto == 'v4' %} 5 | # RFC1918, RFC 6598, APIPA, TEST nets, and stuff 6 | route 0.0.0.0/8 unreachable; # Host-Subnet 7 | route 10.0.0.0/8 unreachable; # RFC 1918 8 | route 100.64.0.0/10 unreachable; # RFC 6598 9 | route 169.254.0.0/16 unreachable; # APIPA 10 | route 172.16.0.0/12 unreachable; # RFC 1918 11 | route 192.0.0.0/24 unreachable; # IANA RESERVED 12 | route 192.0.2.0/24 unreachable; # TEST-NET-1 13 | route 192.168.0.0/16 unreachable; # RFC 1918 14 | route 198.18.0.0/15 unreachable; # BENCHMARK 15 | route 198.51.100.0/24 unreachable; # TEST-NET-2 16 | route 203.0.113.0/24 unreachable; # TEST-NET-3 17 | route 224.0.0.0/3 unreachable; # MCast + Class E 18 | {%- else %} 19 | route ::/96 unreachable; # RFC 4291 20 | route 2001:db8::/32 unreachable; # Documentation 21 | route fec0::/10 unreachable; # Site Local 22 | route fc00::/7 unreachable; # ULA 23 | {%- endif %} 24 | } 25 | -------------------------------------------------------------------------------- /dns-server/named.conf.local: -------------------------------------------------------------------------------- 1 | // 2 | // Zone configuration for master name server (Salt managed) 3 | // 4 | 5 | // 6 | // ACLs 7 | // 8 | 9 | {% for acl_name, acl_config in salt['pillar.get']('dns-server:acls', {}).items ()|sort %} 10 | acl {{ acl_name }} { 11 | {%- for entry in acl_config %} 12 | // {{ entry }} 13 | {%- for IP in acl_config[entry] %} 14 | {{ IP }}; 15 | {%- endfor %} 16 | {%- endfor %} 17 | }; 18 | 19 | {% endfor %} 20 | 21 | // 22 | // Zones 23 | // 24 | 25 | {%- set defaults = salt['pillar.get']('dns-server:zone_defaults', {}) %} 26 | {% for zone, zone_config in salt['pillar.get']('dns-server:zones', {}).items ()|sort %} 27 | {%- set allow_transfer = zone_config.get ('allow-transfer', defaults.get ('allow-transfer')) %} 28 | // {{ zone_config.get ('desc', zone ) }} 29 | zone "{{ zone }}" { 30 | type {{ zone_config.get ('type', defaults.get ('type')) }}; 31 | file "{{ zone_config.get ('file') }}"; 32 | {%- if allow_transfer %} 33 | allow-transfer { {{ allow_transfer }} }; 34 | {%- endif %} 35 | }; 36 | 37 | {% endfor %} 38 | -------------------------------------------------------------------------------- /network/interfaces/openvpn.tmpl: -------------------------------------------------------------------------------- 1 | {#- 2 | # OpenVPN VPNs (if any) 3 | #} 4 | {%- set networks = [] %} 5 | {%- for netname, network in salt['pillar.get']('ovpn', {}).items () if grains['id'] in network %} 6 | {%- do networks.append (netname) %} 7 | {%- endfor %} 8 | {%- for netname in networks|sort %} 9 | {%- set network = salt['pillar.get']('ovpn:' ~ netname) %} 10 | {%- set network_config = network.get ('config') %} 11 | {%- set host_stanza = network.get (grains['id']) %} 12 | {%- set host_config = host_stanza.get ('config', {}) %} 13 | 14 | # 15 | # {{ network_config.get ('_desc') }} 16 | {%- set interface = host_config.get ('interface', network_config.get ('interface')) %} 17 | {%- if network_config.get ('dev-type', 'tap') == 'tap' %} 18 | iface {{ interface }} 19 | {%- for ip in host_stanza.get ('ip', []) %} 20 | {%- set netmask = network_config['netmask_v6'] if ':' in ip else network_config['netmask_v4'] %} 21 | address {{ ip }}/{{ netmask }} 22 | {%- endfor %} 23 | {%- endif %} {#- dev-type tap #} 24 | {% endfor %} {#- network #} 25 | -------------------------------------------------------------------------------- /nginx/www2.ffho.net: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/nginx/sites-enabled/www2.ffho.net (Salt managed) 3 | # 4 | 5 | {%- set acme_thumbprint = salt['pillar.get']('acme:thumbprint', False) %} 6 | 7 | server { 8 | listen 443; 9 | listen [::]:443; 10 | 11 | ssl on; 12 | ssl_certificate /etc/ssl/certs/www2.ffho.net.cert.pem; 13 | ssl_certificate_key /etc/ssl/private/www2.ffho.net.key.pem; 14 | 15 | include /etc/nginx/ffho.d/add-headers.conf; 16 | 17 | root /srv/www2/ 18 | 19 | server_name www2.ffho.net 20 | fancyindex on; 21 | fancyindex_exact_size off; 22 | fancyindex_name_length 70; 23 | fancyindex_header /header.html; 24 | fancyindex_localtime on; 25 | fancyindex_default_sort name; 26 | 27 | location / { 28 | try_files $uri $uri/ /index.html =404; 29 | fancyindex_ignore header.html favicon.ico models-short.txt models.txt robots.txt scripts; 30 | } 31 | 32 | {%- if acme_thumbprint %} 33 | location ~ "^/\.well-known/acme-challenge/([-_a-zA-Z0-9]+)$" { 34 | default_type text/plain; 35 | return 200 "$1.{{ acme_thumbprint }}"; 36 | } 37 | {%- endif %} 38 | } 39 | -------------------------------------------------------------------------------- /wireguard/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Wireguard VPNs 3 | # 4 | {% set wg_cfg = salt['pillar.get']('node:wireguard', {}) %} 5 | 6 | 7 | include: 8 | - sysctl # Make sure udp_l3mdev_accept is set 9 | 10 | # Install wireguard-tools (from backports currently) 11 | wireguard-tools: 12 | pkg.installed 13 | 14 | 15 | Create /etc/wireguard: 16 | file.directory: 17 | - name: /etc/wireguard 18 | - require: 19 | - pkg: wireguard-tools 20 | 21 | Cleanup /etc/wireguard: 22 | file.directory: 23 | - name: /etc/wireguard 24 | - clean: true 25 | # Add cleanup action for active tunnels 26 | 27 | {% for iface, tunnel_config in wg_cfg.get ('tunnels', {}).items () %} 28 | /etc/wireguard/{{ iface }}.conf: 29 | file.managed: 30 | - source: salt://wireguard/wireguard.conf.tmpl 31 | - template: jinja 32 | - context: 33 | config: {{ tunnel_config }} 34 | privkey: {{ wg_cfg.get ('privkey') }} 35 | - require: 36 | - file: Create /etc/wireguard 37 | - require_in: 38 | - file: Cleanup /etc/wireguard 39 | # start/reload tunnel 40 | {% endfor %} 41 | -------------------------------------------------------------------------------- /Documentation/example-pillar/nodes/fe01.sls: -------------------------------------------------------------------------------- 1 | fe01.in.ffho.net: 2 | sysLocation: Vega 3 | 4 | roles: 5 | - router 6 | - frontend 7 | 8 | ifaces: 9 | lo: 10 | prefixes: 11 | - 10.132.255.29/32 12 | - 2a03:2260:2342:ffff::29/128 13 | 14 | vlan1013: 15 | desc: "L2-Vega" 16 | prefixes: 17 | - /28 18 | - /64 19 | 20 | eth0: 21 | desc: "Ext. Vega" 22 | prefixes: 23 | - 80.70.181.61/32 24 | - 2a02:450:1:6::10/64 25 | pointopoint: 80.70.181.56 26 | gateway: 27 | - 80.70.181.56 28 | - 2a02:450:1:6::1 29 | vrf: vrf_external 30 | 31 | veth_int2ext: 32 | prefixes: 33 | - /31 34 | - /126 35 | 36 | veth_ext2int: 37 | prefixes: 38 | - /31 39 | - /126 40 | vrf: vrf_external 41 | 42 | 43 | nginx: 44 | websites: 45 | - ff-frontend.conf 46 | - node.ffho.net 47 | -------------------------------------------------------------------------------- /install-server/ffho-first-boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Maximilian Wilhelm 4 | # -- Fri, 14 Apr 2023 22:05:24 +0200 5 | # 6 | 7 | while ! salt-call test.ping >/dev/null 2>&1; do 8 | echo "Please accept minion key on Salt master." 9 | sleep 10 10 | done 11 | 12 | echo "Looks like you did, cool, let's get started!" 13 | echo 14 | 15 | ################################################################################ 16 | # Set up screeen and SSH # 17 | ################################################################################ 18 | 19 | echo "Syncing modules..." 20 | salt-call saltutil.sync_all 21 | 22 | echo "Configuring screen and SSH..." 23 | salt-call state.apply screen,ssh 24 | 25 | echo "Backing up SSH keys..." 26 | cp -a /etc/ssh /opt 27 | 28 | cat << EOF 29 | SSH configured, you should now be able to SSH into this device (as root). 30 | 31 | EOF 32 | 33 | ip -br a 34 | 35 | echo 36 | echo 37 | 38 | echo "Running highstate..." 39 | salt-call state.highstate 40 | 41 | systemctl disable ffho-first-boot.service 42 | -------------------------------------------------------------------------------- /grafana/plugins/piechart-panel.sls: -------------------------------------------------------------------------------- 1 | # Grafana-Piechart-Panel 2 | grafana-piechart: 3 | cmd.run: 4 | - name: grafana-cli plugins install grafana-piechart-panel 5 | - creates: /var/lib/grafana/plugins/grafana-piechart-panel 6 | - watch_in: 7 | - service: grafana 8 | 9 | grafana-imagerenderer-deps: 10 | pkg.installed: 11 | - pkgs: 12 | - libxdamage1 13 | - libxext6 14 | - libxi6 15 | - libxtst6 16 | - libnss3 17 | - libnss3 18 | - libcups2 19 | - libxss1 20 | - libxrandr2 21 | - libasound2 22 | - libatk1.0-0 23 | - libatk-bridge2.0-0 24 | - libpangocairo-1.0-0 25 | - libpango-1.0-0 26 | - libcairo2 27 | - libatspi2.0-0 28 | - libgtk3.0-cil 29 | - libgdk3.0-cil 30 | - libx11-xcb-dev 31 | 32 | grafana-imagerenderer: 33 | cmd.run: 34 | - name: grafana-cli plugins install grafana-image-renderer 35 | - creates: /var/lib/grafana/plugins/grafana-image-renderer 36 | - watch_in: 37 | - service: grafana 38 | - require: 39 | - pkg: grafana-imagerenderer-deps 40 | -------------------------------------------------------------------------------- /snmpd/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SNMPd 3 | # 4 | 5 | include: 6 | - network 7 | - systemd 8 | 9 | # 10 | # Install and start SNMPd 11 | # Require /etc/network/interfaces to be installed (and ifreload'ed) so we 12 | # can simply pick lookback IP addresses from grains. 13 | snmpd: 14 | pkg.installed: 15 | - name: snmpd 16 | - require: 17 | - file: /etc/network/interfaces 18 | service.running: 19 | - enable: true 20 | - restart: true 21 | 22 | # Add dependecy on network-online.target 23 | /etc/systemd/system/snmpd.service.d/override.conf: 24 | file.managed: 25 | - makedirs: true 26 | - source: salt://snmpd/service-override.conf 27 | - watch_in: 28 | - cmd: systemctl-daemon-reload 29 | 30 | /etc/default/snmpd: 31 | file.managed: 32 | - source: salt://snmpd/default_snmpd 33 | - require: 34 | - pkg: snmpd 35 | - watch_in: 36 | - service: snmpd 37 | 38 | 39 | /etc/snmp/snmpd.conf: 40 | file.managed: 41 | - template: jinja 42 | - source: salt://snmpd/snmpd.conf.tmpl 43 | - require: 44 | - pkg: snmpd 45 | - watch_in: 46 | - service: snmpd 47 | -------------------------------------------------------------------------------- /prometheus-exporters/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Prometheus exporters to be set up 3 | # 4 | 5 | # All nodes get node_exporter 6 | prometheus-node-exporter: 7 | pkg.installed: 8 | - name: prometheus-node-exporter 9 | service.running: 10 | - enable: true 11 | - reload: true 12 | 13 | /etc/default/prometheus-node-exporter: 14 | file.managed: 15 | - source: salt://prometheus-exporters/node-exporter/prometheus-node-exporter.default 16 | - require: 17 | - pkg: prometheus-node-exporter 18 | - watch_in: 19 | - service: prometheus-node-exporter 20 | 21 | 22 | # 23 | # Role specific exporters 24 | # 25 | {% set roles = salt['pillar.get']('node:roles', []) %} 26 | 27 | # DNS server 28 | {% if 'dns-recursor' in roles or 'dns-auth' in roles %} 29 | prometheus-bind-exporter: 30 | pkg.installed: 31 | - name: prometheus-bind-exporter 32 | service.running: 33 | - enable: true 34 | - reload: true 35 | {% endif %} 36 | 37 | # Routers 38 | {% if 'router' in roles %} 39 | prometheus-bird-exporter: 40 | pkg.installed: 41 | - name: prometheus-bird-exporter 42 | service.running: 43 | - enable: true 44 | - reload: true 45 | {% endif %} 46 | -------------------------------------------------------------------------------- /systemd/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # systemd related stuff 3 | # 4 | 5 | # Define systemd daemon-reload command to pull in if required 6 | systemctl-daemon-reload: 7 | cmd.wait: 8 | - name: systemctl daemon-reload 9 | - watch: [] 10 | 11 | 12 | # 13 | # Install service to wait for routing adjancies to come up (if needed) 14 | # 15 | /etc/systemd/system/wait-for-routes.service: 16 | file.managed: 17 | - source: salt://systemd/wait-for-routes.service 18 | - watch_in: 19 | - cmd: systemctl-daemon-reload 20 | 21 | wait-for-routes.service: 22 | service.running: 23 | - enable: true 24 | - require: 25 | - file: /etc/systemd/system/wait-for-routes.service 26 | - file: /usr/local/sbin/wait-for-routes 27 | 28 | /usr/local/sbin/wait-for-routes: 29 | file.managed: 30 | - source: salt://systemd/wait-for-routes 31 | - mode: 755 32 | 33 | 34 | # 35 | # Unfuck systemd defaults likely to break stuff 36 | # 37 | {% if grains.oscodename == "bullseye" %} 38 | /etc/systemd/network/90-unfuck-mac-overwrite.link: 39 | file.managed: 40 | - source: salt://systemd/90-unfuck-mac-overwrite.link 41 | - watch_in: 42 | - cmd: systemctl-daemon-reload 43 | {% endif %} 44 | -------------------------------------------------------------------------------- /Documentation/example-pillar/monitoring.sls: -------------------------------------------------------------------------------- 1 | monitoring: 2 | # 3 | # Used by netfilter module to generate nftables rules to allow monitoring access 4 | # 5 | 6 | librenms: 7 | role: librenms 8 | nftables_rule_spec: "udp dport 161" 9 | 10 | # A simple exporter which runs everywhere 11 | prometheus-node-exporter: 12 | role: prometheus-server 13 | nftables_rule_spec: "tcp dport 9100" 14 | 15 | prometheus-bind-exporter: 16 | # role of the node(s) running the server querying other nodes 17 | role: prometheus-server 18 | # list of roles where this exporter will be running and needs to be allowed 19 | node_roles: 20 | - dns-auth 21 | - dns-recursor 22 | nftables_rule_spec: "tcp dport 9119" 23 | 24 | prometheus-bird-exporter: 25 | role: prometheus-server 26 | node_roles: 27 | - router 28 | nftables_rule_spec: "tcp dport 9324" 29 | 30 | icinga2: 31 | role: icinga2server 32 | 33 | 34 | {% if grains['id'] in [""] %} 35 | users: 36 | ffho-ops: 37 | display_name: "" 38 | telegram_chat_id: "-" 39 | 40 | # ... 41 | 42 | private: 43 | telegram_bot_token: "" 44 | {% endif %} 45 | -------------------------------------------------------------------------------- /icinga2/constants.conf: -------------------------------------------------------------------------------- 1 | /** 2 | * This file defines global constants which can be used in 3 | * the other configuration files. (Salt managed) 4 | */ 5 | 6 | /* The directory which contains the plugins from the Monitoring Plugins project. */ 7 | const PluginDir = "/usr/lib/nagios/plugins" 8 | 9 | /* The directory which contains the Manubulon plugins. 10 | * Check the documentation, chapter "SNMP Manubulon Plugin Check Commands", for details. 11 | */ 12 | const ManubulonPluginDir = "/usr/lib/nagios/plugins" 13 | 14 | /* The directory which you use to store additional plugins which ITL provides user contributed command definitions for. 15 | * Check the documentation, chapter "Plugins Contribution", for details. 16 | */ 17 | const PluginContribDir = "/usr/lib/nagios/plugins" 18 | 19 | /* Our local instance name. By default this is the server's hostname as returned by `hostname --fqdn`. 20 | * This should be the common name from the API certificate. 21 | */ 22 | //const NodeName = "localhost" 23 | 24 | /* Our local zone name. */ 25 | const ZoneName = NodeName 26 | 27 | /* Secret key for remote node tickets */ 28 | const TicketSalt = "" 29 | 30 | const FFHOPluginDir = "/usr/local/share/monitoring-plugins" 31 | -------------------------------------------------------------------------------- /network/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Networking 3 | # 4 | 5 | # Which networ suite to configure? 6 | {% set default_suite = salt['pillar.get']('network:suite', 'ifupdown-ng') %} 7 | {% set suite = salt['pillar.get']('node:network:suite', default_suite) %} 8 | 9 | include: 10 | - network.link 11 | - network.{{ suite }} 12 | - network.interfaces 13 | - network.{{ suite }}.reload 14 | 15 | network-pkg: 16 | pkg.installed: 17 | - pkgs: 18 | - iproute2 19 | - ipv6calc 20 | - require_in: 21 | - file: /etc/network/interfaces 22 | 23 | vnstat: 24 | pkg.installed: 25 | - name: vnstat 26 | service.running: 27 | - restart: True 28 | 29 | /etc/vnstat.conf: 30 | file.managed: 31 | - source: salt://network/vnstat.conf 32 | - watch_in: 33 | - service: vnstat 34 | 35 | # /etc/resolv.conf 36 | /etc/resolv.conf: 37 | file.managed: 38 | - source: 39 | - salt://network/resolv.conf.H_{{ grains.id }} 40 | - salt://network/resolv.conf 41 | - template: jinja 42 | 43 | 44 | /etc/iproute2/rt_tables.d/ffho.conf: 45 | file.managed: 46 | - source: salt://network/rt_tables.conf.tmpl 47 | - template: jinja 48 | - require: 49 | - pkg: network-pkg 50 | -------------------------------------------------------------------------------- /ffinfo/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Basic system related information 3 | # 4 | 5 | /etc/freifunk: 6 | file.directory: 7 | - user: root 8 | - group: root 9 | - mode: 755 10 | - makedirs: True 11 | 12 | # Generate /etc/freifunk/role file with main role the node has configured in NetBox 13 | /etc/freifunk/role: 14 | file.managed: 15 | - contents: {{ salt['pillar.get']('node:role', "") }} 16 | 17 | # Generate /etc/freifunk/roles file with all roles configured on the node, 18 | # one on each line. 19 | /etc/freifunk/roles: 20 | file.managed: 21 | - source: salt://ffinfo/list.tmpl 22 | - template: jinja 23 | list: {{ salt['pillar.get']('node:roles', []) }} 24 | 25 | # Generate /etc/freifunk/sites file with all sites configured on the node, 26 | # one on each line. Empty if no sites configured. 27 | /etc/freifunk/sites: 28 | file.managed: 29 | - source: salt://ffinfo/list.tmpl 30 | - template: jinja 31 | list: {{ salt['pillar.get']('node:sites', []) }} 32 | 33 | # Generate /etc/freifunk/status file with the status of this node 34 | {% set status = salt['pillar.get']('node:status', 'UNKNOWN') %} 35 | /etc/freifunk/status: 36 | file.managed: 37 | - contents: {{ status }} 38 | -------------------------------------------------------------------------------- /Documentation/example-pillar/nodes/cr03.sls: -------------------------------------------------------------------------------- 1 | cr03.in.ffho.net: 2 | sysLocation: BER 3 | 4 | roles: 5 | - router 6 | - routereflector 7 | - ffrl-exit 8 | 9 | ifaces: 10 | lo: 11 | prefixes: 12 | - 10.132.255.3/32 13 | - 2a03:2260:2342:ffff::3/128 14 | 15 | eth0: 16 | prefixes: 17 | - 185.46.137.162/25 18 | - 2a00:13c8:1000:2::162/64 19 | gateway: 20 | - 185.46.137.129 21 | - 2a00:13c8:1000:2::1 22 | vrf: vrf_external 23 | 24 | vlan1015: 25 | desc: "L2-BER" 26 | prefixes: 27 | - /28 28 | - /64 29 | 30 | # DUS 31 | gre_ffrl_dus_a: 32 | type: GRE_FFRL 33 | endpoint: 185.66.193.0 34 | tunnel-physdev: eth0 35 | prefixes: 36 | - /31 37 | - /64 38 | 39 | # gre_ffrl_dus_b: 40 | # [...] 41 | # 42 | # # FRA 43 | # gre_ffrl_fra_a: 44 | # 45 | # gre_ffrl_fra_b: 46 | # 47 | # # BER 48 | # gre_ffrl_ber_a: 49 | # 50 | # gre_ffrl_ber_b: 51 | # 52 | 53 | # NAT IP 54 | nat: 55 | link-type: dummy 56 | prefixes: 57 | - 185.66.x.y/32 58 | -------------------------------------------------------------------------------- /systemd/wait-for-routes: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Wait for routing adjacencies to come up and produce a default route 4 | # 5 | # Maximilian Wilhelm 6 | # -- Mon, 05 Apr 2021 02:31:58 +0200 7 | 8 | # Wait for this amount of seconds before giving up 9 | timeout=30 10 | 11 | # Wait for IPv4 default route to emerge 12 | ( 13 | for n in $(seq 1 $timeout); do 14 | # If there is an IPv4 default route, stop waiting 15 | if ip -4 route | grep -q "^default"; then 16 | break 17 | fi 18 | 19 | sleep 1 20 | done 21 | )& 22 | 23 | # Wait for IPv6 default route to emerge 24 | ( 25 | for n in $(seq 1 $timeout); do 26 | # If there is an IPv6 default route, stop waiting 27 | if ip -6 route | grep -q "^default"; then 28 | break 29 | fi 30 | 31 | sleep 1 32 | done 33 | )& 34 | 35 | # Wait for IPv6 IPs do leave tentative state 36 | # This will most likely be only relevant for nodes with static IPs/route 37 | ( 38 | for n in $(seq 1 $timeout); do 39 | # If there is an IPv6 in state tentative, wait on 40 | if ip -6 addr | grep -q "tentative"; then 41 | sleep 1 42 | else 43 | break 44 | fi 45 | done 46 | 47 | )& 48 | 49 | # WAit for both sub-shells to finish 50 | wait 51 | 52 | exit 0 53 | -------------------------------------------------------------------------------- /burp/server/common_incexc: -------------------------------------------------------------------------------- 1 | # 2 | # Common options for all clients (Salt managed) 3 | # 4 | 5 | # If you add at least one 'include=' line, the server will override the 6 | # rest of the client options below, which define exactly what to backup. 7 | # Setting any of the other options here will then also take effect on the 8 | # client. 9 | 10 | include = /etc 11 | include = /root 12 | include = /usr/local 13 | include = /srv 14 | 15 | # Exclude various temporary file systems. 16 | exclude_fs = sysfs 17 | exclude_fs = tmpfs 18 | exclude_fs = proc 19 | exclude_fs = devfs 20 | exclude_fs = devpts 21 | 22 | # exclude_ext=vdi 23 | # exclude_regex=/\.cache/ 24 | 25 | # Exclude files from compression by extension. 26 | exclude_comp = bz2 27 | exclude_comp = gz 28 | exclude_comp = xz 29 | 30 | # cross_filesystem=/some/path 31 | cross_all_filesystems=0 32 | 33 | # split_vss=1 34 | # strip_vss=0 35 | 36 | # When backing up, whether to enable O_NOATIME when opening files and 37 | # directories. The default is atime=0, which enables O_NOATIME. 38 | atime = 0 39 | 40 | # When enabled, this causes problems in the phase1 scan (such as an 'include' 41 | # being missing) to be treated as fatal errors. The default is 0. 42 | scan_problem_raises_error = 0 43 | -------------------------------------------------------------------------------- /dhcp-server/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # DHCP server (for gateways) 3 | # 4 | 5 | include: 6 | - network 7 | 8 | isc-dhcp-server: 9 | pkg.installed: 10 | - name: isc-dhcp-server 11 | service.running: 12 | - enable: True 13 | - restart: True 14 | - require: 15 | - file: /etc/network/interfaces 16 | 17 | 18 | /etc/dhcp/dhcpd.conf: 19 | file.managed: 20 | - source: salt://dhcp-server/dhcpd.conf 21 | - template: jinja 22 | - watch_in: 23 | - service: isc-dhcp-server 24 | 25 | /etc/default/isc-dhcp-server: 26 | file.managed: 27 | - source: 28 | - salt://dhcp-server/dhcpd.default.{{ grains.oscodename }} 29 | - salt://dhcp-server/dhcpd.default 30 | - template: jinja 31 | - watch_in: 32 | - service: isc-dhcp-server 33 | 34 | # 35 | # Install dhcpd-pool monitoring magic from 36 | # http://folk.uio.no/trondham/software/dhcpd-pool.html 37 | /usr/local/sbin/dhcpd-pool: 38 | file.managed: 39 | - source: salt://dhcp-server/dhcpd-pool 40 | - mode: 755 41 | - user: root 42 | - group: root 43 | 44 | # There's a man page. Be nice, install it. 45 | /usr/local/share/man/man1/dhcpd-pool.1.gz: 46 | file.managed: 47 | - source: salt://dhcp-server/dhcpd-pool.1.gz 48 | - makedirs: true 49 | -------------------------------------------------------------------------------- /icinga2/commands.d/mailq_sudo.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Bloody fooking plugin doesn't do sudo correcty. 3 | # 4 | 5 | object CheckCommand "sudo_mailq" { 6 | command = [ "sudo", PluginDir + "/check_mailq" ] 7 | 8 | arguments = { 9 | "-w" = { 10 | value = "$mailq_warning$" 11 | description = "Min. number of messages in queue to generate warning" 12 | required = true 13 | } 14 | "-c" = { 15 | value = "$mailq_critical$" 16 | description = "Min. number of messages in queue to generate critical alert ( w < c )" 17 | required = true 18 | } 19 | "-W" = { 20 | value = "$mailq_domain_warning$" 21 | description = "Min. number of messages for same domain in queue to generate warning" 22 | } 23 | "-C" = { 24 | value = "$mailq_domain_critical$" 25 | description = "Min. number of messages for same domain in queue to generate critical alert ( W < C )" 26 | } 27 | "-t" = { 28 | value = "$mailq_timeout$" 29 | description = "Plugin timeout in seconds (default = 15)" 30 | } 31 | "-M" = { 32 | value = "$mailq_servertype$" 33 | description = "[ sendmail | qmail | postfix | exim | nullmailer ] (default = autodetect)" 34 | } 35 | "-s" = { 36 | set_if = "$mailq_sudo$" 37 | description = "Use sudo for mailq command" 38 | } 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /burp/client/burp.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # Burp backup client configuration (Salt managed) 3 | # 4 | 5 | mode = client 6 | port = 4971 7 | status_port = 4972 8 | 9 | server = {{ burp_server_name }} 10 | 11 | password = {{ burp_password }} 12 | cname = {{ grains.id }} 13 | 14 | # Where to find all the SSL stuff 15 | ssl_cert_ca = /etc/ssl/certs/ffho-cacert.pem 16 | ssl_cert = /etc/ssl/certs/{{ grains.id }}.cert.pem 17 | ssl_key = /etc/ssl/private/{{ grains.id }}.key.pem 18 | ssl_dhfile = /etc/ssl/dhparam.pem 19 | 20 | ssl_peer_cn = {{ burp_server_name }} 21 | 22 | # Choose the protocol automagically 23 | protocol = 0 24 | 25 | pidfile = /var/run/burp.client.pid 26 | syslog = 0 27 | stdout = 1 28 | progress_counter = 1 29 | 30 | # Wait a random number of seconds between 0 and the given number before 31 | # contacting the server on a timed backup. 32 | randomise = 1200 33 | 34 | # Set server_can_restore to 0 if you do not want the server to be able to 35 | # initiate a restore. 36 | server_can_restore = 0 37 | 38 | # The server should configure what we should back up 39 | server_can_override_includes = 1 40 | 41 | # Example syntax for pre/post scripts 42 | #backup_script_pre=/path/to/a/script 43 | #backup_script_post=/path/to/a/script 44 | #restore_script_pre=/path/to/a/script 45 | #restore_script_post=/path/to/a/script 46 | -------------------------------------------------------------------------------- /Documentation/example-pillar/ssh.sls: -------------------------------------------------------------------------------- 1 | ssh: 2 | keys: 3 | # 4 | # : 5 | # pubkeys: 6 | # - "" 7 | # - "" 8 | # access: 9 | # 10 | # Option 1: Access for on all nodes 11 | # 12 | # : global 13 | # 14 | # Option 2: Access for on list of given nodes: 15 | # 16 | # : 17 | # nodes: 18 | # - node1.in.ffho.net 19 | # - node2.in.ffho.net 20 | # 21 | # Option 3: Access as on all nodes matching at least one of tht 22 | # given roles: 23 | # 24 | # : 25 | # roles: 26 | # - webserver 27 | # - router 28 | # 29 | # 30 | # Examples: 31 | # 32 | max: 33 | pubkeys: 34 | - "ssh-rsa ABC max@pandora" 35 | access: 36 | root: global 37 | 38 | karsten: 39 | pubkeys: 40 | - "ssh-rsa ACBDE kb-light@leo-loewe" 41 | access: 42 | root: 43 | global: true 44 | build: 45 | nodes: 46 | - masterbuilder.in.ffho.net 47 | 48 | webmaster: 49 | pubkeys: 50 | - "ssh-rsa AAAfoo webmaster@apache" 51 | access: 52 | root: 53 | roles: 54 | - webserver 55 | nodes: 56 | - fe01.in.ffho.net 57 | -------------------------------------------------------------------------------- /Documentation/example-pillar/sites.sls: -------------------------------------------------------------------------------- 1 | sites: 2 | # Legacy 3 | legacy: 4 | site_no: 0 5 | name: paderborn.freifunk.net 6 | prefix_v4: 10.132.0.0/19 7 | prefix_v6: fdca:ffee:ff12:132::/64 8 | 9 | 10 | # Paderborn (Kernstadt) 11 | pad-cty: 12 | site_no: 1 13 | name: Paderborn (Kernstadt) 14 | prefix_v4: 10.132.32.0/20 15 | prefix_v6: 2a03:2260:2342:100::/64 16 | next_node_v4: 10.132.32.1 17 | next_node_v6: 2a03:226:2342:100::1 18 | domain_seed: 19 | 20 | # Paderborn (Umland) 21 | pad-uml: 22 | site_no: 2 23 | name: Paderborn (Umland) 24 | prefix_v4: 10.132.48.0/21 25 | prefix_v6: 2a03:2260:2342:200::/64 26 | next_node_v4: 10.132.48.1 27 | next_node_v6: 2a03:226:2342:200::1 28 | domain_seed: 29 | 30 | # Bueren 31 | buq: 32 | site_no: 3 33 | name: Bueren 34 | prefix_v4: 10.132.56.0/21 35 | prefix_v6: 2a03:2260:2342:300::/64 36 | next_node_v4: 10.132.56.1 37 | next_node_v6: 2a03:226:2342:300::1 38 | domain_seed: 39 | 40 | 41 | # Kreis Paderborn 42 | pb-nord: 43 | site_no: 4 44 | name: PB-Nord 45 | prefix_v4: 10.132.64.0/21 46 | prefix_v6: 2a03:2260:2342:400::/64 47 | next_node_v4: 10.132.64.1 48 | next_node_v6: 2a03:226:2342:400::1 49 | domain_seed: 50 | -------------------------------------------------------------------------------- /Documentation/example-pillar/te.sls: -------------------------------------------------------------------------------- 1 | te: 2 | 3 | # Which communities should be evaluated at which nodes for which routing 4 | # decisions? 5 | community_map: 6 | 7 | # EXAMPLE 8 | # 9 | # node01.in.ffho.net: 10 | # : 11 | # - COMMUNITY_ONE 12 | # 13 | # Up to now the only predefined entity is "ffrl" which controls which 14 | # routes tagged with "EXPORT_RESTRICT" will be exported to AS20101 at 15 | # the given node. 16 | 17 | cr03.in.ffho.net: 18 | ffrl: 19 | - EXPORT_ONLY_AT_CR03 20 | 21 | 22 | # Tag prefixes with communities at given nodes 23 | prefixes: 24 | 25 | # EXAMPLE 26 | # 27 | # : 28 | # desc: "my magic prefix" 29 | # communities: 30 | # - COMMUNITY_ONE 31 | # - "(12345, 4711)" 32 | # nodes: 33 | # - node01.in.ffho.net 34 | 35 | 2a03:2260:2342::/52: 36 | desc: "Mesh Prefixes" 37 | communities: 38 | - EXPORT_RESTRICT 39 | - EXPORT_ONLY_AT_CR03 40 | nodes: 41 | - cr03.in.ffho.net 42 | 43 | 10.132.32.0/23: 44 | desc: "Gw03 Pad-Cty prefix" 45 | communities: 46 | - GATEWAY_TE_ROUTE 47 | nodes: 48 | - gw03.in.ffho.net 49 | 50 | 10.132.96.0/23: 51 | desc: "Gw03 PB-Nord prefix" 52 | communities: 53 | - GATEWAY_TE_ROUTE 54 | nodes: 55 | - gw03.in.ffho.net 56 | -------------------------------------------------------------------------------- /certs/ffho-cacert.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDlzCCAn+gAwIBAgIJAKjG4bigHRHdMA0GCSqGSIb3DQEBCwUAMGIxCzAJBgNV 3 | BAYTAkRFMQwwCgYDVQQIDANOUlcxGzAZBgNVBAoMEkZyZWlmdW5rIEhvY2hzdGlm 4 | dDELMAkGA1UEAwwCQ0ExGzAZBgkqhkiG9w0BCQEWDG9wc0BmZmhvLm5ldDAeFw0y 5 | NTA5MDQxNzM4MDVaFw0zNTA5MDIxNzM4MDVaMGIxCzAJBgNVBAYTAkRFMQwwCgYD 6 | VQQIDANOUlcxGzAZBgNVBAoMEkZyZWlmdW5rIEhvY2hzdGlmdDELMAkGA1UEAwwC 7 | Q0ExGzAZBgkqhkiG9w0BCQEWDG9wc0BmZmhvLm5ldDCCASIwDQYJKoZIhvcNAQEB 8 | BQADggEPADCCAQoCggEBAJtB0vwt/yDckFaD9FBxK9VbnD06YVYFIs0G4C3CEw6I 9 | HyZOS28I6LoawVaiYgPCcUmz5YkWCORIFK2YwY8O1vpQVva4nJ10QrJUI27G40VS 10 | t0jFSHmUWos4dyBDgEZsIgFBhrneceCwGbETHGTmB7yIIpluDtCKJT4irmRslfWM 11 | 0EO+iIMugn7CjQpnaURxIOsdmxbFiAkj732BnXLJ+Y+nz2lxcLsOWO/vbEhHPehq 12 | 9/Q6zehwYnGWJQvdbWo1XPGElrGqhxyWvz6O7cTsjZpD9aVvHEXlFhDeeln7a6fL 13 | bSE6SzMLjf8g7t3dATb4LugydbI67wwbYUDYKP8H6h8CAwEAAaNQME4wHQYDVR0O 14 | BBYEFMR56jwaUwPnPFtIROEncn33c7xFMB8GA1UdIwQYMBaAFMR56jwaUwPnPFtI 15 | ROEncn33c7xFMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAAjLulge 16 | XIn6Y+1IXC+0Kaoo/PIMUeUF/RoImfiF2r3dVzOOxthVxcsWbe+tKobsj9iagSYy 17 | YFOUGH6Snu2rftNVKsqbdYDmY682Qv3JkkR/OUgOccKo+jyi9zq5ng99zVJXPlPU 18 | FvNizXMEHrMq4VFGrA/D1sXWnc2GQLurF2BxzRRZmPfmAlq1fJRhcoY/8hW/xaVo 19 | GzDyiLwtIrv8ZQYOiu195EAZIDZXlcTg28LwQDeeeTmtfEEE69MF6BaO85Po9H1q 20 | HSjtcPLFIzygoH+HegGTnDc3jSiRZ7EbSz+1xXn7wlHcjQs18Ksvv+cetT/D5+Uo 21 | NFFJUQ54bY91OJM= 22 | -----END CERTIFICATE----- 23 | -------------------------------------------------------------------------------- /openvpn/ops.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # FFHO OPS VPN 3 | # 4 | 5 | proto {{ config['proto'] }} 6 | port {{ config['port'] }} 7 | {%- if "bind-dev" in config %} 8 | bind-dev {{ config['bind-dev'] }} 9 | {% endif %} 10 | 11 | tls-server 12 | 13 | dev-type tun 14 | dev tun-ops 15 | tun-mtu 1400 16 | 17 | ca /etc/ssl/certs/ffho-cacert.pem 18 | cert /etc/ssl/certs/{{ config['fqdn'] }}.cert.pem 19 | key /etc/ssl/private/{{ config['fqdn'] }}.key.pem 20 | dh /etc/ssl/dhparam.pem 21 | 22 | # Auth via LDAP 23 | plugin /usr/lib/x86_64-linux-gnu/openvpn/plugins/openvpn-plugin-auth-pam.so openvpn 24 | verify-client-cert none 25 | username-as-common-name 26 | 27 | # Server mode and client subnets 28 | server {{ config['servers'][grains.id]['prefix_v4'] }} 29 | server-ipv6 {{ config['servers'][grains.id]['prefix_v6'] }} 30 | topology subnet 31 | 32 | # Push route for aggregates 33 | {%- for prefix in config['routes'] %} 34 | {%- set kw = 'route-ipv6' if ':' in prefix else 'route' %} 35 | push "{{ kw }} {{ prefix }}" 36 | {%- endfor %} 37 | 38 | # push "dhcp-option DNS 10.132.251.53" 39 | 40 | persist-key 41 | persist-tun 42 | 43 | keepalive 10 120 44 | 45 | data-ciphers {{ config.get ('data-ciphers', 'AES-256-GCM:AES-128-GCM') }} 46 | data-ciphers-fallback {{ config.get ('data-ciphers-fallback', 'AES-256-CBC') }} 47 | 48 | # Log a short status 49 | status /var/log/openvpn/openvpn-status-ops.log 50 | 51 | verb 1 52 | -------------------------------------------------------------------------------- /Documentation/example-pillar/dns-server.sls: -------------------------------------------------------------------------------- 1 | # 2 | # DNS related settings 3 | # 4 | 5 | dns-server: 6 | 7 | # Reference NS for sync checks 8 | reference_ns: 9 | 10 | 11 | # These settings are only relevant for boxes running DNS and monitoring 12 | {% if grains['id'].startswith ('dns') or grains['id'].startswith ('infra-') or grains['id'].startswith ('icinga2') %} 13 | 14 | acls: 15 | ffho-ops: 16 | FFH OPS: 17 | - 18 | 19 | replicas: 20 | ns1.acme.org: 21 | - 22 | ns1.acme.org: 23 | - 24 | 25 | # Defaults if not specified below 26 | zone_defaults: 27 | type: master 28 | # ACLs defined above 29 | allow-transfer: "replicas; localhost; ffho-ops;" 30 | 31 | zones: 32 | # public zones 33 | paderborn.freifunk.net: 34 | file: /etc/bind/zones/static/paderborn.freifunk.net.zone 35 | 36 | hochstift.freifunk.net: 37 | file: /etc/bind/zones/static/hochstift.freifunk.net.zone 38 | 39 | ffho.net: 40 | file: /etc/bind/zones/generated/ffho.net.zone 41 | 42 | # reverse zones etc. 43 | # ... 44 | 45 | 46 | # Configuration for authoritive name server 47 | auth: 48 | 49 | ips: 50 | - 51 | 52 | allow-recursion: 53 | - 54 | 55 | {% endif %} 56 | -------------------------------------------------------------------------------- /pppoe/ip-up.local: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # FFHO ip-up replacement script (Salt managed) 4 | # 5 | # Using a ip-up.local script intentionally PREVENTS ALL OTHER ip-up.d/ scripts 6 | # from being called. We don't want that stuff. 7 | # 8 | # 9 | # This script is run by the pppd after the link is established. 10 | # 11 | # This script is called with the following arguments: 12 | # Arg Name Example 13 | # $1 Interface name ppp0 14 | # $2 The tty ttyS1 15 | # $3 The link speed 38400 16 | # $4 Local IP number 12.34.56.78 17 | # $5 Peer IP number 12.34.56.99 18 | # $6 Optional ``ipparam'' value foo 19 | 20 | # The environment is cleared before executing this script 21 | # so the path must be reset 22 | PATH=/usr/local/sbin:/usr/sbin:/sbin:/usr/local/bin:/usr/bin:/bin 23 | export PATH 24 | 25 | cat << EOF > /usr/local/sbin/fix_ppp_vrf.gen 26 | #!/bin/sh 27 | 28 | while ! ip a s dev ppp0 | grep -q "inet "; do 29 | sleep 1 30 | done 31 | 32 | ip link set ${1} master vrf_external 33 | ip link set ${1} up 34 | ip route add default via ${5} table 1023 35 | 36 | # ET... Phone... Home. 37 | ip vrf exec vrf_external wget --max-redirect=0 --header="Host: noc.ffho.net" http://80.70.180.55/et-phone-home/$(hostname -f) 38 | EOF 39 | 40 | chmod 755 /usr/local/sbin/fix_ppp_vrf.gen 41 | 42 | at -f /usr/local/sbin/fix_ppp_vrf now 43 | -------------------------------------------------------------------------------- /bird/radv.conf: -------------------------------------------------------------------------------- 1 | {%- set node_config = salt['pillar.get']('node') %} 2 | {%- set sites_config = salt['pillar.get']('sites') %} 3 | protocol radv { 4 | # ONLY advertise prefix, IF default route is available 5 | import all; 6 | export all; 7 | trigger ::/0; 8 | 9 | rdnss {{ salt['pillar.get'] ('globals:dns:resolver_v6') }}; 10 | 11 | {%- if grains.id.startswith('gw') %} 12 | {% for site in node_config.get ('sites', []) %} 13 | {%- set iface = 'br-' ~ site %} 14 | {%- set site_config = sites_config.get (site) %} 15 | # {{ site }} / {{ site_config.get ('name') }} 16 | interface "{{ iface }}" { 17 | default lifetime 600 sensitive yes; 18 | 19 | prefix {{ site_config.get ('prefix_v6') }} { 20 | preferred lifetime 3600; 21 | }; 22 | }; 23 | 24 | {% endfor %} 25 | {%- endif %} 26 | {%- for iface, iface_config in node_config.get ('ifaces').items ()|sort %} 27 | {%- if salt['ffho.re_search']('^vlan(3\d\d|39\d\d)$', iface) or "l3-access" in iface_config.get ('tags', []) %} 28 | {%- set v6_ip = salt['ffho_net.get_node_iface_ips'](node_config, iface, with_mask = True)['v6'][0] %} 29 | {%- set prefix = salt['ffho_net.get_network_address'](v6_ip, with_prefixlen = True) %} 30 | # {{ iface_config.get ('desc', 'L3-Access') }} 31 | interface "{{ iface }}" { 32 | default lifetime 600 sensitive yes; 33 | 34 | prefix {{ prefix }} { 35 | preferred lifetime 3600; 36 | }; 37 | }; 38 | 39 | {%- endif %} 40 | {%- endfor %} 41 | } 42 | -------------------------------------------------------------------------------- /burp/server.sls: -------------------------------------------------------------------------------- 1 | # 2 | # burp backup server 3 | # 4 | 5 | include: 6 | - burp 7 | 8 | 9 | burp-server: 10 | pkg.installed: 11 | - name: burp-server 12 | service.running: 13 | - enable: True 14 | - restart: True 15 | 16 | /etc/default/burp: 17 | file.managed: 18 | - source: salt://burp/server/default_burp 19 | - watch_in: 20 | - service: burp-server 21 | 22 | /etc/burp/burp-server.conf: 23 | file.managed: 24 | - source: salt://burp/server/burp-server.conf.tmpl 25 | - template: jinja 26 | - watch_in: 27 | - service: burp-server 28 | 29 | /etc/burp/clientconfdir: 30 | file.directory: 31 | - mode: 700 32 | 33 | /etc/burp/clientconfdir/incexc: 34 | file.directory: 35 | - require: 36 | - file: /etc/burp/clientconfdir 37 | 38 | /etc/burp/clientconfdir/incexc/common: 39 | file.managed: 40 | - source: salt://burp/server/common_incexc 41 | - require: 42 | - file: /etc/burp/clientconfdir/incexc 43 | - watch_in: 44 | - service: burp-server 45 | 46 | {% set nodes = salt['pillar.get']('nodes') %} 47 | {% for node, node_config in nodes.items()|sort if 'burp' in node_config and node_config['burp'].get ('password', False) %} 48 | /etc/burp/clientconfdir/{{ node }}: 49 | file.managed: 50 | - source: salt://burp/server/client.tmpl 51 | - template: jinja 52 | - context: 53 | node: {{ node }} 54 | burp_config: {{ node_config.get ('burp') }} 55 | {% endfor %} 56 | -------------------------------------------------------------------------------- /pppoe/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # PPPoE (Vectoring-Glasfaser-Technologie!) (Salt Managed) 3 | # 4 | 5 | pppoe: 6 | pkg.installed 7 | 8 | at: 9 | pkg.installed 10 | 11 | 12 | # Generate VRF fix script and make sure it's run after session start 13 | /etc/ppp/ip-up.local: 14 | file.managed: 15 | - source: salt://pppoe/ip-up.local 16 | - mode: 755 17 | - template: jinja 18 | 19 | /usr/local/sbin/fix_ppp_vrf: 20 | file.managed: 21 | - source: salt://pppoe/fix_ppp_vrf 22 | - mode: 755 23 | 24 | 25 | # Disable all other scripts alltogether 26 | /etc/ppp/ip-down.local: 27 | file.managed: 28 | - source: salt://pppoe/noop.local 29 | - mode: 755 30 | 31 | /etc/ppp/ipv6-up.local: 32 | file.managed: 33 | - source: salt://pppoe/noop.local 34 | - mode: 755 35 | 36 | /etc/ppp/ipv6-down.local: 37 | file.managed: 38 | - source: salt://pppoe/noop.local 39 | - mode: 755 40 | 41 | 42 | # Install peer config and password 43 | /etc/ppp/peers/tkom: 44 | file.managed: 45 | - source: salt://pppoe/tkom_peer.tmpl 46 | - template: jinja 47 | 48 | /etc/ppp/pap-secrets: 49 | file.managed: 50 | - source: salt://pppoe/pap-secrets 51 | - template: jinja 52 | 53 | 54 | # Install pppd restart script and cron-job 55 | /etc/cron.d/ff_fix_ppp: 56 | file.managed: 57 | - source: salt://pppoe/ff_fix_ppp.cron 58 | 59 | /usr/local/sbin/ff_fix_ppp: 60 | file.managed: 61 | - source: salt://pppoe/ff_fix_ppp 62 | - mode: 755 63 | -------------------------------------------------------------------------------- /network/ifupdown2/reload.sls: -------------------------------------------------------------------------------- 1 | # 2 | # network.ifupdown2.reload 3 | # 4 | 5 | # Reload interface configuration if neccessary 6 | ifreload: 7 | cmd.wait: 8 | - name: /sbin/ifreload -a 9 | - watch: 10 | - file: /etc/network/interfaces 11 | 12 | # If there is an interface in vrf_external, install a workaround script 13 | # for a bug in ifupdown2 which will sometimes drop an IPv4 default route 14 | # present in the kernel and not reinstall it. 15 | # 16 | # The fix script will be called every minute by cron and after ifreload 17 | # was called to try to minimize any downtime. 18 | {% set vrf = [False] %} 19 | {% for iface, iface_config in salt['pillar.get']('node:ifaces', {}).items() %} 20 | {% if iface_config.get ('vrf', '') == 'vrf_external' %} 21 | {% do vrf.append (True) %} 22 | {% break %} 23 | {% endif %} 24 | {% endfor %} 25 | 26 | /usr/local/sbin/ff_fix_default_route: 27 | {% if True in vrf %} 28 | file.managed: 29 | - source: salt://network/ifupdown2/ff_fix_default_route 30 | - mode: 755 31 | cmd.wait: 32 | - require: 33 | - cmd: ifreload 34 | - file: /usr/local/sbin/ff_fix_default_route 35 | - watch: 36 | - file: /etc/network/interfaces 37 | {% else %} 38 | file.absent 39 | {% endif %} 40 | 41 | /etc/cron.d/ff_fix_default_route: 42 | {% if True in vrf %} 43 | file.managed: 44 | - source: salt://network/ifupdown2/ff_fix_default_route.cron 45 | - template: jinja 46 | {% else %} 47 | file.absent 48 | {% endif %} 49 | 50 | -------------------------------------------------------------------------------- /bird/bird.conf: -------------------------------------------------------------------------------- 1 | # 2 | # IP{{ proto }} Bird configuration (Salt managed) 3 | # 4 | {%- set node_config = salt['pillar.get']('node', {}) %} 5 | 6 | define AS_OWN = 65132; 7 | define LO_IP = {{ salt['ffho_net.get_primary_ip'](node_config, proto).ip }}; 8 | 9 | router id {{ salt['ffho_net.get_router_id'](node_config, grains['id']) }}; 10 | 11 | 12 | # this pseudo-protocol watches all interface up/down events 13 | protocol device { 14 | scan time 10; 15 | }; 16 | 17 | # This pseudo-protocol performs synchronization between BIRD's routing 18 | # tables and the kernel. If your kernel supports multiple routing tables 19 | # (as Linux 2.2.x does), you can run multiple instances of the kernel 20 | # protocol and synchronize different kernel tables with different BIRD tables. 21 | protocol kernel { 22 | scan time 20; # Scan kernel routing table every 20 seconds 23 | 24 | import none; 25 | export filter { 26 | # Do NOT export local unreachable routes for TE purposes 27 | if proto = "ffho_te" then 28 | reject; 29 | 30 | # Export routes with source address set to loopback IP 31 | krt_prefsrc = LO_IP; 32 | accept; 33 | }; 34 | } 35 | 36 | 37 | # 38 | # Load local config knobs and additiional configuration (IGP, FFRL, 'n stuff) 39 | include "/etc/bird/local.conf"; 40 | 41 | {%- if proto == "v4" %} 42 | include "/etc/bird/ff-policy.conf"; 43 | include "/etc/bird/bird.d/*.conf"; 44 | {%- else %} 45 | include "/etc/bird/ff-policy6.conf"; 46 | include "/etc/bird/bird6.d/*.conf"; 47 | {%- endif %} 48 | -------------------------------------------------------------------------------- /anycast-healthchecker/anycast-healthchecker.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Anycast Healthchecker configuration 3 | # 4 | 5 | [daemon] 6 | pidfile = /var/run/anycast-healthchecker/anycast-healthchecker.pid 7 | ipv4 = true 8 | ipv6 = true 9 | bird_conf = /var/lib/anycast-healthchecker/anycast-prefixes-v4.conf 10 | bird6_conf = /var/lib/anycast-healthchecker/anycast-prefixes-v6.conf 11 | bird_variable = ANYCAST_ADVERTISE 12 | bird6_variable = ANYCAST_ADVERTISE 13 | bird_reconfigure_cmd = /usr/sbin/birdc configure 14 | bird6_reconfigure_cmd = /usr/sbin/birdc6 configure 15 | dummy_ip_prefix = 10.132.255.255/32 16 | dummy_ip6_prefix = 2a03:2260:2342:ffff::ffff/128 17 | bird_keep_changes = false 18 | bird6_keep_changes = false 19 | bird_changes_counter = 1 20 | bird6_changes_counter = 1 21 | purge_ip_prefixes = true 22 | loglevel = info 23 | log_maxbytes = 104857600 24 | log_backups = 1 25 | log_file = /var/log/anycast-healthchecker/anycast-healthchecker.log 26 | stderr_file = /var/log/anycast-healthchecker/stderr.log 27 | stdout_file = /var/log/anycast-healthchecker/stdout.log 28 | 29 | 30 | # 31 | # Default configuration values for checks 32 | [DEFAULT] 33 | interface = anycast_srv 34 | check_interval = 3 35 | check_timeout = 2 36 | check_rise = 2 37 | check_fail = 2 38 | check_disabled = false 39 | on_disabled = withdraw 40 | ip_check_disabled = false 41 | -------------------------------------------------------------------------------- /influxdb/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # influxdb 3 | # 4 | influxdb: 5 | file.managed: 6 | - names: 7 | - /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg: 8 | - source: salt://influxdb/influxdata-archive_compat.gpg 9 | - /etc/apt/sources.list.d/influxdb.list: 10 | - source: salt://influxdb/influxdb.list.tmpl 11 | - template: jinja 12 | - require: 13 | - file: /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg 14 | pkg.installed: 15 | - name: influxdb 16 | - require: 17 | - file: /etc/apt/sources.list.d/influxdb.list 18 | service.running: 19 | - name: influxdb 20 | - enable: True 21 | - require: 22 | - pkg: influxdb 23 | - file: /etc/influxdb/influxdb.conf 24 | - watch: 25 | - file: /etc/influxdb/influxdb.conf 26 | user.present: 27 | - name: influxdb 28 | - system: True 29 | - groups: 30 | - ssl-cert 31 | - require: 32 | - pkg: influxdb 33 | 34 | /etc/influxdb/influxdb.conf: 35 | file.managed: 36 | - source: salt://influxdb/influxdb.conf.tmpl 37 | - template: jinja 38 | - require: 39 | - pkg: influxdb 40 | 41 | /usr/local/sbin/backup-influx.sh: 42 | file.managed: 43 | - source: salt://influxdb/backup.sh 44 | - mode: 700 45 | - user: influxdb 46 | 47 | /etc/cron.d/backup-influx: 48 | file.managed: 49 | - contents: "0 22 * * * influxdb [ -f /usr/local/sbin/backup-influx.sh ] && /usr/local/sbin/backup-influx.sh" 50 | - require: 51 | - file: /usr/local/sbin/backup-influx.sh 52 | -------------------------------------------------------------------------------- /dns-server/named.conf.options: -------------------------------------------------------------------------------- 1 | // 2 | // Bind options (Salt managed) 3 | // 4 | 5 | options { 6 | directory "/var/cache/bind"; 7 | 8 | // If there is a firewall between you and nameservers you want 9 | // to talk to, you may need to fix the firewall to allow multiple 10 | // ports to talk. See http://www.kb.cert.org/vuls/id/800113 11 | 12 | // If your ISP provided one or more IP addresses for stable 13 | // nameservers, you probably want to use them as forwarders. 14 | // Uncomment the following block, and insert the addresses replacing 15 | // the all-0's placeholder. 16 | 17 | // forwarders { 18 | // 0.0.0.0; 19 | // }; 20 | 21 | //======================================================================== 22 | // If BIND logs error messages about the root key being expired, 23 | // you will need to update your keys. See https://www.isc.org/bind-keys 24 | //======================================================================== 25 | // Disable DNSSEC validation as it will FAIL for all ffXY domains which will 26 | // render them unuseable. As bind can only be run in all-on or all-off mode 27 | // this seems to be our only chance for now :-( 28 | dnssec-validation no; 29 | 30 | 31 | auth-nxdomain no; # conform to RFC1035 32 | listen-on-v6 { any; }; 33 | 34 | allow-recursion { 35 | 127.0.0.0/8; 36 | ::1/128; 37 | }; 38 | 39 | version "Freifunk Hochstift DNS"; 40 | use-v4-udp-ports { range 1024 65535; }; 41 | use-v6-udp-ports { range 1024 65535; }; 42 | }; 43 | 44 | // Allow scraping by bind-exporter 45 | statistics-channels { 46 | inet 127.0.0.1 port 8053 allow { 127.0.0.1; }; 47 | }; 48 | -------------------------------------------------------------------------------- /forgejo/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # forgejo 3 | # 4 | 5 | {% set config = salt['pillar.get']('node:forgejo', {}) %} 6 | 7 | forgejo-repo: 8 | file.managed: 9 | - names: 10 | - /usr/share/keyrings/forgejo-apt.asc: 11 | - source: salt://forgejo/forgejo-apt.asc 12 | 13 | pkgrepo.managed: 14 | - comments: "# forgejo repo" 15 | - human_name: forgejo repository 16 | - name: "deb [signed-by=/usr/share/keyrings/forgejo-apt.asc] https://code.forgejo.org/api/packages/apt/debian lts main" 17 | - file: /etc/apt/sources.list.d/forgejo.list 18 | 19 | postgresql: 20 | pkg.installed: 21 | - name: postgresql 22 | 23 | service.running: 24 | - name: postgresql 25 | - enable: true 26 | - require: 27 | - pkg: postgresql 28 | 29 | forgejo: 30 | pkg.installed: 31 | - pkgs: 32 | - forgejo 33 | - require: 34 | - pkgrepo: forgejo-repo 35 | 36 | postgres_database.present: 37 | - name: forgejo 38 | - require: 39 | - service: postgresql 40 | 41 | postgres_user.present: 42 | - password: {{config.password}} 43 | - require: 44 | - service: postgresql 45 | 46 | postgres_privileges.present: 47 | - object_name: forgejo 48 | - object_type: database 49 | - user: postgres 50 | - privileges: 51 | - all 52 | - require: 53 | - postgres_database: forgejo 54 | - postgres_user: forgejo 55 | 56 | /etc/forgejo/app.ini: 57 | file.managed: 58 | - source: salt://forgejo/app.ini.tmpl 59 | - template: jinja 60 | - context: 61 | config: {{ config }} 62 | - require: 63 | - pkg: forgejo 64 | 65 | -------------------------------------------------------------------------------- /icinga2/icinga2.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Icinga2 main configuration for nodes (Salt managed) 3 | # 4 | 5 | /** 6 | * Icinga 2 configuration file 7 | * - this is where you define settings for the Icinga application including 8 | * which hosts/services to check. 9 | * 10 | * For an overview of all available configuration options please refer 11 | * to the documentation that is distributed as part of Icinga 2. 12 | */ 13 | 14 | /** 15 | * The constants.conf defines global constants. 16 | */ 17 | include "constants.conf" 18 | 19 | /** 20 | * The zones.conf defines zones for a cluster setup. 21 | * Not required for single instance setups. 22 | */ 23 | include "zones.conf" 24 | 25 | /** 26 | * The Icinga Template Library (ITL) provides a number of useful templates 27 | * and command definitions. 28 | * Common monitoring plugin command definitions are included separately. 29 | */ 30 | include 31 | include 32 | include 33 | include 34 | 35 | /** 36 | * This includes the NSClient++ check commands. These command definitions 37 | * are required on a master node when a client is used as command endpoint. 38 | */ 39 | include 40 | 41 | /** 42 | * The features-available directory contains a number of configuration 43 | * files for features which can be enabled and disabled using the 44 | * icinga2 feature enable / icinga2 feature disable CLI commands. 45 | * These commands work by creating and removing symbolic links in 46 | * the features-enabled directory. 47 | */ 48 | include "features-enabled/*.conf" 49 | 50 | # Include command definitions 51 | include_recursive "commands.d" 52 | -------------------------------------------------------------------------------- /icinga2/plugins/check_systemd_units: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright (C) 2016 Mohamed El Morabity 4 | # 5 | # This module is free software: you can redistribute it and/or modify it under 6 | # the terms of the GNU General Public License as published by the Free Software 7 | # Foundation, either version 3 of the License, or (at your option) any later 8 | # version. 9 | # 10 | # This software is distributed in the hope that it will be useful, but WITHOUT 11 | # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS 12 | # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU General Public License along with 15 | # this program. If not, see . 16 | 17 | 18 | PLUGINDIR=/usr/lib/nagios/plugins/ 19 | . $PLUGINDIR/utils.sh 20 | 21 | 22 | status=$(systemctl list-units --failed --no-legend --plain | cut -f1 -d" " |xargs) 23 | r=$? 24 | 25 | while getopts "w" opt; do 26 | case $opt in 27 | w) 28 | # Whitelist einlesen 29 | readarray -t units < /etc/icinga2/service_whitelist 30 | esac 31 | status=( $(systemctl list-units --failed --no-legend --plain ${units[*]}| cut -f1 -d" " |xargs) ) 32 | r=$? 33 | done 34 | 35 | if [ $r -ne 0 ]; then 36 | echo "UNKNOWN: Check command failed." 37 | exit $STATE_UNKNOWN 38 | fi 39 | 40 | if [ -z "$status" ]; then 41 | echo "OK: No Failed Units." 42 | exit $STATE_OK 43 | else 44 | echo "CRITICAL: Some units failed. ${status[*]}." 45 | exit $STATE_CRITICAL 46 | fi 47 | 48 | echo "OK: service $service is running" 49 | exit $STATE_OK 50 | 51 | -------------------------------------------------------------------------------- /openvpn/openvpn.conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # {{ netname }} / {{ network_config.get ('_desc', '') }} (Salt managed) 3 | # 4 | 5 | {%- set mode = config.get ('mode', 'client') %} 6 | {%- if 'server' in mode %} 7 | local {{ network_config.get ('server_ip') }} 8 | port {{ network_config.get ('port') }} 9 | 10 | tls-server 11 | {%- elif 'client' in mode %} 12 | remote {{ config.get ('remote', config.get ('server_ip')) }} {{ network_config.get ('port') }} 13 | 14 | tls-client 15 | nobind 16 | {%- endif %} 17 | 18 | {%- if 'bind_dev' in config %} 19 | bind-dev {{ config.get ('bind_dev') }} 20 | {%- endif %} 21 | 22 | proto {{ network_config.get ('proto', 'udp') }} 23 | 24 | dev-type {{ network_config.get ('dev-type', 'tap') }} 25 | dev {{ config.get ('interface') }} 26 | 27 | {%- if mode == 'server' %} 28 | mode server 29 | 30 | client-config-dir /etc/openvpn/{{ netname }} 31 | ccd-exclusive 32 | 33 | push "route remote_host 255.255.255.255 net_gateway" 34 | {%- endif %} 35 | 36 | ca /etc/ssl/certs/ffho-cacert.pem 37 | cert /etc/ssl/certs/{{ host_config.get ('cert_cn', grains['id']) }}.cert.pem 38 | key /etc/ssl/private/{{ host_config.get ('cert_cn', grains['id']) }}.key.pem 39 | dh /etc/ssl/dhparam.pem 40 | 41 | script-security 2 42 | up /etc/openvpn/ifup 43 | down /etc/openvpn/ifdown 44 | 45 | keepalive 10 30 46 | {%- if 'server' in mode %} 47 | connect-retry 1 1 48 | {%- endif %} 49 | 50 | comp-lzo 51 | 52 | persist-key 53 | persist-tun 54 | 55 | status /var/log/openvpn/openvpn-status-{{ netname }}.log 56 | 57 | verb 1 58 | tls-version-min 1.0 59 | data-ciphers {{ config.get ('data-ciphers', 'AES-256-GCM:AES-128-GCM') }} 60 | -------------------------------------------------------------------------------- /bash/bash_aliases.root: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Nifty shell aliases for debugging (Salt managed) 4 | # 5 | 6 | function peer2v6ffpb() 7 | { 8 | peername=$1 9 | peer2v6ll $1 fdca:ffee:ff12:132: 10 | } 11 | 12 | function peer2v6ll() 13 | { 14 | peername=$1 15 | prefix=$2 16 | if [ -z "$prefix" ]; then 17 | prefix="fe80::" 18 | fi 19 | 20 | mac=$(grep MAC /etc/freifunk/peers/$peername | cut -d ' ' -f 3) 21 | if [ -z "${mac}" ]; then 22 | echo "no peer named '${peername}' found, did you spell it correctly?" >&2 23 | return 1 24 | else 25 | euid64=$(ipv6calc -q --action geneui64 -I mac ${mac}) 26 | echo ${prefix}${euid64} 27 | return 0 28 | fi 29 | } 30 | 31 | function connect2peer() 32 | { 33 | peername=$1 34 | target=$(peer2v6ll $peername) 35 | if [ "$?" == "0" ]; then 36 | echo "trying to ssh into peer '${peername}' (${target})" 37 | ssh -l root -6 ${target}%br-ffpb 38 | fi 39 | } 40 | 41 | function pingpeer() 42 | { 43 | peername=$1 44 | target=$(peer2v6ll $peername) 45 | if [ "$?" == "0" ]; then 46 | echo "pinging peer '${peername}' (${target})" 47 | ping6 ${target}%br-ffpb 48 | fi 49 | } 50 | 51 | function peerstatus() 52 | { 53 | peername=$1 54 | target=$(peer2v6ll $peername fdca:ffee:ff12:132:) 55 | tf=`tempfile` 56 | echo -en "\e[97mFetching node status of '$peername' ...\e[39m " 57 | wget -q "http://[$target]/cgi-bin/status" -O $tf 58 | if [ $? -eq 0 ]; then 59 | echo -e "\e[92mOK\e[39m" 60 | cp $tf /tmp/ffpb-nodestatus-$peername.htm 61 | #less /tmp/ffpb-nodestatus-$peername.htm 62 | lynx -dump /tmp/ffpb-nodestatus-$peername.htm 63 | else 64 | echo -e "\e[91mERROR\e[39m" 65 | fi 66 | rm $tf 67 | } 68 | -------------------------------------------------------------------------------- /network/ifupdown2/ff_fix_default_route: -------------------------------------------------------------------------------- 1 | #!/usr/bin/perl -W 2 | # 3 | # Maximilian Wilhelm 4 | # -- Sat 11 Feb 2017 10:29:29 PM CET 5 | # 6 | 7 | use strict; 8 | 9 | # Search for interface entry for an external interface with an IPv4 default 10 | # route configured, like this: 11 | # 12 | #auto eth0 13 | #iface eth0 14 | # address 5.196.106.54/32 15 | # gateway 5.196.106.48 16 | # mtu 1500 17 | # pointopoint 5.196.106.48 18 | my $gateway = undef; 19 | open (ENI, "< /etc/network/interfaces") 20 | or die "Failed to open '/etc/network/interfaces': $!\n"; 21 | while (my $line = ) { 22 | chomp $line; 23 | 24 | # New interface stanza 25 | if ($line =~ /^iface (.*)/) { 26 | $gateway = undef; 27 | } 28 | 29 | # gateway set? 30 | elsif ($line =~ m/gateway\s+([0-9.]+)$/) { 31 | $gateway = $1; 32 | } 33 | 34 | # Interface part of vrf_external 35 | elsif ($line =~ m/vrf vrf_external/) { 36 | last; 37 | } 38 | } 39 | close (ENI); 40 | 41 | # If there's no gateway configured for vrf_external, nothing to do 42 | if (not defined $gateway) { 43 | exit (0); 44 | } 45 | 46 | 47 | # Check for current default route in vrf_external 48 | my $default_route_active = undef; 49 | open (ROUTE, "ip -4 route show table 1023 | grep ^default |") 50 | or die "Failed to read default route from table 1023: $!\n"; 51 | while (my $line = ) { 52 | if ($line =~ m/^default via ([0-9.]+)/) { 53 | $default_route_active = $1; 54 | } 55 | } 56 | close (ROUTE); 57 | 58 | 59 | # If we didn't find an active default route, re-add it. 60 | if (not defined $default_route_active) { 61 | system ("ip route add default via $gateway table 1023"); 62 | } 63 | -------------------------------------------------------------------------------- /firmware/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # firmware 3 | # 4 | {% set firmware_path = salt['pillar.get']('node:path:firmware') 5 | 6 | firmware-pkgs: 7 | pkg.installed: 8 | - pkgs: 9 | - git 10 | - pandoc 11 | user.present: 12 | - name: firmware 13 | - gid: firmware 14 | - shell: /bin/bash 15 | - home: /home/firmware 16 | - createhome: True 17 | 18 | firmware-git: 19 | file.directory: 20 | - name: {{ firmware_path }} 21 | - user: firmware 22 | - group: firmware 23 | - mode: 755 24 | - require: 25 | - user: firmware 26 | git.latest: 27 | - name: gogs@git.srv.in.ffho.net:FreifunkHochstift/ffho-firmware-website.git 28 | - target: {{ firmware_path }} 29 | - user: firmware 30 | - update_head: False 31 | - require: 32 | - pkg: firmware-pkgs 33 | - user: firmware 34 | - file: firmware-git 35 | 36 | firmware-changelog: 37 | cmd.run: 38 | - name: FORCE=1 /usr/local/sbin/update-firmware 39 | - creates: {{ firmware_path }}/stable/Changelog.html 40 | - user: firmware 41 | - group: firmware 42 | - watch: 43 | - git: firmware-git 44 | - require: 45 | - user: firmware 46 | - file: /usr/local/sbin/update-firmware 47 | 48 | firmware-cron: 49 | cron.present: 50 | - name: /usr/local/sbin/update-firmware 51 | - identifier: firmware-cron 52 | - user: firmware 53 | - minute: 42 54 | - require: 55 | - user: firmware 56 | - file: /usr/local/sbin/update-firmware 57 | 58 | 59 | /usr/local/sbin/update-firmware: 60 | file.managed: 61 | - source: salt://firmware/update-firmware 62 | - template: jinja 63 | - mode: 755 64 | -------------------------------------------------------------------------------- /batman/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Set up B.A.T.M.A.N. module 'n stuff 3 | # 4 | 5 | # 6 | # Only set up batman and load batman_adv kernel module if the role »batman« 7 | # has been configured for this node. 8 | # 9 | {%- set roles = salt['pillar.get']('node:roles', []) %} 10 | 11 | {%- if 'batman' in roles %} 12 | batctl: 13 | pkg.latest: 14 | - name: batctl 15 | 16 | 17 | # Convenience bat-hosts file for informative batctl output 18 | /etc/bat-hosts: 19 | file.managed: 20 | - source: salt://batman/bat-hosts.tmpl 21 | - template: jinja 22 | 23 | 24 | # Make sure the batman_adv module is loaded at boot time 25 | /etc/modules-load.d/batman-adv.conf: 26 | file.managed: 27 | - source: salt://batman/batman-adv.module.conf 28 | 29 | 30 | # 31 | # Is this node a B.A.T.M.A.N. gateway? 32 | {%- if 'batman_gw' in roles %} 33 | 34 | /etc/cron.d/ff_check_gateway: 35 | file.managed: 36 | - source: salt://batman/ff_check_gateway.cron 37 | - template: jinja 38 | 39 | /usr/local/sbin/ff_check_gateway: 40 | file.managed: 41 | - source: salt://batman/ff_check_gateway 42 | - mode: 755 43 | - user: root 44 | - group: root 45 | 46 | {% endif %} 47 | 48 | # 49 | # If the role »batman» is NOT configured for this node, make sure to purge any 50 | # traces of a previous installation, if present. 51 | # 52 | {% else %} 53 | 54 | batctl: 55 | pkg.purged 56 | 57 | batman-adv-dkms: 58 | pkg.purged 59 | 60 | /etc/bat-hosts: 61 | file.absent 62 | 63 | /etc/modules-load.d/batman-adv.conf: 64 | file.absent 65 | 66 | /etc/cron.d/ff_check_gateway: 67 | file.absent 68 | 69 | /usr/local/sbin/ff_check_gateway: 70 | file.absent 71 | {% endif %} 72 | -------------------------------------------------------------------------------- /fastd/ff_fastd_conn: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Maximilian Wilhelm 4 | # -- Mon 31 Aug 2015 08:55:27 AM CEST 5 | # 6 | 7 | if [ $# -lt 1 ]; then 8 | echo "Usage: $(basename $0) [ -c ] fastd_instance | -a" >&2 9 | exit 1 10 | fi 11 | 12 | fastd_ciphers () { 13 | socket_path=$(grep "status socket" "/etc/fastd/${1}/fastd.conf" | grep -o '/[0-9a-z/_.-]\+') 14 | 15 | echo -n "$1: " 16 | socat - "UNIX-CONNECT:${socket_path}" | jq '.peers[] | select (.connection) | .connection | .method ' | sort | uniq -c 17 | } 18 | 19 | fastd_con () { 20 | socket_path=$(grep "status socket" "/etc/fastd/${1}/fastd.conf" | grep -o '/[0-9a-z/_.-]\+') 21 | 22 | echo -n "$1: " 23 | socat - "UNIX-CONNECT:${socket_path}" | jq '.peers[] | select( .connection ) | .name' | wc -l 24 | } 25 | 26 | mode="count" 27 | if [ "${1}" = "-c" ]; then 28 | mode="ciphers" 29 | shift 30 | fi 31 | 32 | instances="${@}" 33 | if [ "${1}" = "-a" ]; then 34 | instances="all" 35 | fi 36 | 37 | 38 | # Verify existance of fastd instance 39 | if [ "$instances" != "all" -a ! -d "/etc/fastd/${instances}" ]; then 40 | echo "Invalid fastd instance \"$instances\"." >&2 41 | exit 1 42 | 43 | # Figure out all fastd instances 44 | elif [ "$instances" = "all" ]; then 45 | instances=$(find /etc/fastd -mindepth 1 -maxdepth 1 -type d -exec basename {} \; | sort) 46 | 47 | if [ ! "${instances}" ]; then 48 | echo "Did not find any configured instances in /etc/fastd." >&2 49 | exit 1 50 | fi 51 | fi 52 | 53 | 54 | for inst in ${instances}; do 55 | if [ "${mode}" = "count" ]; then 56 | fastd_con ${inst} 57 | 58 | elif [ "$mode" = "ciphers" ]; then 59 | fastd_ciphers ${inst} 60 | fi 61 | done 62 | -------------------------------------------------------------------------------- /icinga2/services/common.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Global checks for every Host 3 | # 4 | 5 | apply Service "ping4" { 6 | import "generic-service" 7 | check_command = "ping4" 8 | 9 | if (host.vars.service_param["ping"]["warn"]) { 10 | vars.ping_wrta = host.vars.service_param["ping"]["warn"] 11 | } 12 | 13 | if (host.vars.service_param["ping"]["crit"]) { 14 | vars.ping_crta = host.vars.service_param["ping"]["crit"] 15 | } 16 | 17 | assign where host.address 18 | ignore where host.vars.service_param["ping"]["ignore"] 19 | } 20 | 21 | apply Service "ping6" { 22 | import "generic-service" 23 | check_command = "ping6" 24 | 25 | if (host.vars.service_param["ping"]["warn"]) { 26 | vars.ping_wrta = host.vars.service_param["ping"]["warn"] 27 | } 28 | 29 | if (host.vars.service_param["ping"]["crit"]) { 30 | vars.ping_crta = host.vars.service_param["ping"]["crit"] 31 | } 32 | 33 | assign where host.address6 34 | ignore where host.vars.service_param["ping"]["ignore"] 35 | } 36 | 37 | apply Service "dns" { 38 | import "generic-service" 39 | 40 | check_command = "dns" 41 | check_interval = 5m 42 | 43 | assign where host.address 44 | } 45 | 46 | apply Service "cluster zone" { 47 | import "generic-service" 48 | 49 | check_command = "cluster-zone" 50 | 51 | assign where host.address && host.vars.os == "linux" 52 | ignore where host.vars.noagent 53 | ignore where host.name == NodeName 54 | } 55 | 56 | apply Service "icinga" { 57 | import "generic-service" 58 | 59 | check_command = "icinga" 60 | 61 | 62 | if (host.name != NodeName) { 63 | command_endpoint = host.name 64 | } 65 | 66 | assign where host.address && host.vars.os == "linux" 67 | ignore where host.vars.noagent 68 | } 69 | -------------------------------------------------------------------------------- /ntp/ntp.conf: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/ntp.conf (Salt managed) 3 | # 4 | # configuration for ntpd; see ntp.conf(5) for help 5 | # 6 | driftfile /var/lib/ntp/ntp.drift 7 | 8 | # Enable this if you want statistics to be logged. 9 | #statsdir /var/log/ntpstats/ 10 | 11 | statistics loopstats peerstats clockstats 12 | filegen loopstats file loopstats type day enable 13 | filegen peerstats file peerstats type day enable 14 | filegen clockstats file clockstats type day enable 15 | 16 | 17 | # You do need to talk to an NTP server or two (or three). 18 | #server ntp.your-provider.example 19 | 20 | # pool.ntp.org maps to about 1000 low-stratum NTP servers. Your server will 21 | # pick a different set every time it starts up. Please consider joining the 22 | # pool: 23 | server 0.debian.pool.ntp.org iburst 24 | server 1.debian.pool.ntp.org iburst 25 | server 2.debian.pool.ntp.org iburst 26 | 27 | # Access control configuration; see /usr/share/doc/ntp-doc/html/accopt.html for 28 | # details. The web page 29 | # might also be helpful. 30 | # 31 | # Note that "restrict" applies to both servers and clients, so a configuration 32 | # that might be intended to block requests from certain clients could also end 33 | # up blocking replies from your own upstream servers. 34 | 35 | # By default, exchange time with everybody, but don't allow configuration. 36 | restrict -4 default kod notrap nomodify nopeer noquery 37 | restrict -6 default kod notrap nomodify nopeer noquery 38 | 39 | # Local users may interrogate the ntp server more closely. 40 | restrict 127.0.0.1 41 | restrict ::1 42 | 43 | # CVE-2013-5211 44 | disable monitor 45 | -------------------------------------------------------------------------------- /dhcp-client/dhclient.conf: -------------------------------------------------------------------------------- 1 | # Configuration file for /sbin/dhclient. 2 | # 3 | # This is a sample configuration file for dhclient. See dhclient.conf's 4 | # man page for more information about the syntax of this file 5 | # and a more comprehensive list of the parameters understood by 6 | # dhclient. 7 | # 8 | # Normally, if the DHCP server provides reasonable information and does 9 | # not leave anything out (like the domain name, for example), then 10 | # few changes must be made to this file, if any. 11 | # 12 | 13 | option rfc3442-classless-static-routes code 121 = array of unsigned integer 8; 14 | 15 | send host-name = gethostname(); 16 | request subnet-mask, broadcast-address, time-offset, routers, interface-mtu; 17 | 18 | #send dhcp-client-identifier 1:0:a0:24:ab:fb:9c; 19 | #send dhcp-lease-time 3600; 20 | #supersede domain-name "fugue.com home.vix.com"; 21 | #prepend domain-name-servers 127.0.0.1; 22 | #require subnet-mask, domain-name-servers; 23 | #timeout 60; 24 | #retry 60; 25 | #reboot 10; 26 | #select-timeout 5; 27 | #initial-interval 2; 28 | script "/usr/local/sbin/dhclient-script"; 29 | #media "-link0 -link1 -link2", "link0 link1"; 30 | #reject 192.33.137.209; 31 | 32 | #alias { 33 | # interface "eth0"; 34 | # fixed-address 192.5.5.213; 35 | # option subnet-mask 255.255.255.255; 36 | #} 37 | 38 | #lease { 39 | # interface "eth0"; 40 | # fixed-address 192.33.137.200; 41 | # medium "link0 link1"; 42 | # option host-name "andare.swiftmedia.com"; 43 | # option subnet-mask 255.255.255.0; 44 | # option broadcast-address 192.33.137.255; 45 | # option routers 192.33.137.250; 46 | # option domain-name-servers 127.0.0.1; 47 | # renew 2 2000/1/12 00:00:01; 48 | # rebind 2 2000/1/12 00:00:01; 49 | # expire 2 2000/1/12 00:00:01; 50 | #} 51 | -------------------------------------------------------------------------------- /postfix/main.cf: -------------------------------------------------------------------------------- 1 | # See /usr/share/postfix/main.cf.dist for a commented, more complete version 2 | 3 | 4 | # Debian specific: Specifying a file name will cause the first 5 | # line of that file to be used as the name. The Debian default 6 | # is /etc/mailname. 7 | #myorigin = /etc/mailname 8 | 9 | smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU) 10 | biff = no 11 | 12 | # appending .domain is the MUA's job. 13 | append_dot_mydomain = no 14 | 15 | # Uncomment the next line to generate "delayed mail" warnings 16 | #delay_warning_time = 4h 17 | 18 | readme_directory = no 19 | 20 | # See http://www.postfix.org/COMPATIBILITY_README.html -- default to 2 on 21 | # fresh installs. 22 | compatibility_level = 2 23 | 24 | # TLS parameters 25 | smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem 26 | smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key 27 | smtpd_use_tls=yes 28 | smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache 29 | smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache 30 | smtp_tls_security_level = may 31 | smtp_tls_note_starttls_offer = yes 32 | 33 | 34 | # See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for 35 | # information on enabling SSL in the smtp client. 36 | 37 | smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination 38 | myhostname = {{ grains['id'] }} 39 | alias_maps = hash:/etc/aliases 40 | alias_database = hash:/etc/aliases 41 | myorigin = /etc/mailname 42 | mydestination = {{ grains['id'] }}, localhost 43 | relayhost = mail.ffho.net 44 | mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 45 | mailbox_size_limit = 0 46 | recipient_delimiter = + 47 | inet_interfaces = all 48 | # IPv6 only 49 | inet_protocols = ipv6 50 | -------------------------------------------------------------------------------- /apt/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # APT 3 | # 4 | 5 | # OS sources.list 6 | /etc/apt/sources.list: 7 | file.managed: 8 | - source: salt://apt/sources.list.{{ grains.os }}.{{ grains.oscodename }} 9 | 10 | /etc/cron.d/apt: 11 | file.managed: 12 | - source: salt://apt/update_apt.cron 13 | 14 | # APT preferences 15 | /etc/apt/preferences.d/ffho: 16 | file.managed: 17 | - source: salt://apt/ffho.preferences 18 | 19 | 20 | /etc/apt/apt.conf.d/ffho: 21 | file.managed: 22 | - source: salt://apt/ffho.apt.conf 23 | 24 | # New place for keyrings 25 | /etc/apt/keyrings: 26 | file.directory 27 | 28 | # FFHO APT 29 | /etc/apt/trusted.gpg.d/ffho.gpg: 30 | file.managed: 31 | - source: salt://apt/ffho.gpg.{{ grains.os }}.{{ grains.oscodename }} 32 | 33 | /etc/apt/sources.list.d/ffho.list: 34 | file.managed: 35 | - source: salt://apt/ffho.list.{{ grains.os }}.{{ grains.oscodename }} 36 | - require: 37 | - file: /etc/apt/trusted.gpg.d/ffho.gpg 38 | 39 | # Salt repo 40 | /etc/apt/keyrings/salt-archive-keyring.pgp: 41 | file.managed: 42 | - source: salt://apt/salt-archive-keyring.pgp 43 | - require: 44 | - file: /etc/apt/keyrings 45 | 46 | /etc/apt/sources.list.d/salt.sources: 47 | file.managed: 48 | - source: salt://apt/salt.sources 49 | - require: 50 | - file: /etc/apt/keyrings/salt-archive-keyring.pgp 51 | 52 | /etc/apt/preferences.d/salt-pin-1001: 53 | file.managed: 54 | - contents: | 55 | Package: salt-* 56 | Pin: version 3006.10* 57 | Pin-Priority: 1001 58 | - require: 59 | - file: /etc/apt/sources.list.d/salt.sources 60 | 61 | /etc/apt/sources.list.d/salt.list: 62 | file.absent 63 | 64 | /usr/share/keyrings/salt-archive-keyring.gpg: 65 | file.absent 66 | 67 | 68 | -------------------------------------------------------------------------------- /postfix/main.cf.H_ticket.in.ffho.net: -------------------------------------------------------------------------------- 1 | # See /usr/share/postfix/main.cf.dist for a commented, more complete version 2 | 3 | 4 | # Debian specific: Specifying a file name will cause the first 5 | # line of that file to be used as the name. The Debian default 6 | # is /etc/mailname. 7 | #myorigin = /etc/mailname 8 | 9 | smtpd_banner = $myhostname ESMTP $mail_name (Debian/GNU) 10 | biff = no 11 | 12 | # appending .domain is the MUA's job. 13 | append_dot_mydomain = no 14 | 15 | # Uncomment the next line to generate "delayed mail" warnings 16 | #delay_warning_time = 4h 17 | 18 | readme_directory = no 19 | 20 | # TLS parameters 21 | smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem 22 | smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key 23 | smtpd_use_tls=yes 24 | smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache 25 | smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache 26 | smtp_tls_security_level = may 27 | smtp_tls_note_starttls_offer = yes 28 | 29 | # See /usr/share/doc/postfix/TLS_README.gz in the postfix-doc package for 30 | # information on enabling SSL in the smtp client. 31 | 32 | smtpd_relay_restrictions = permit_mynetworks permit_sasl_authenticated defer_unauth_destination 33 | myhostname = ticket.in.ffho.net 34 | alias_maps = hash:/etc/aliases 35 | alias_database = hash:/etc/aliases 36 | myorigin = /etc/mailname 37 | mydestination = ticket.in.ffho.net, ops.paderborn.freifunk.net, ops.ffpb, localhost.ffpb, localhost, ops.ffho.net 38 | relayhost = mail.ffho.net 39 | mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128 40 | mailbox_command = procmail -a "$EXTENSION" 41 | mailbox_size_limit = 0 42 | recipient_delimiter = + 43 | inet_interfaces = all 44 | # IPv6 only 45 | inet_protocols = ipv6 46 | compatibility_level = 2 47 | -------------------------------------------------------------------------------- /nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user www-data; 2 | worker_processes 4; 3 | pid /run/nginx.pid; 4 | include /etc/nginx/modules-enabled/*.conf; 5 | 6 | events { 7 | worker_connections 768; 8 | # multi_accept on; 9 | } 10 | 11 | http { 12 | 13 | ## 14 | # Basic Settings 15 | ## 16 | # increase body size that nextcloud can receive large files 17 | client_max_body_size 64m; 18 | 19 | sendfile on; 20 | tcp_nopush on; 21 | tcp_nodelay on; 22 | keepalive_timeout 65; 23 | types_hash_max_size 2048; 24 | # server_tokens off; 25 | {% if 'frontend' in salt['pillar.get']('node:roles', []) %} 26 | server_names_hash_bucket_size 64; 27 | {%- else %} 28 | # server_names_hash_bucket_size 64; 29 | {%- endif %} 30 | # server_name_in_redirect off; 31 | 32 | include /etc/nginx/mime.types; 33 | default_type application/octet-stream; 34 | 35 | ## 36 | # SSL Settings 37 | ## 38 | 39 | ssl_prefer_server_ciphers on; 40 | ssl_protocols TLSv1.2 TLSv1.3; 41 | ssl_ciphers 'EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH'; 42 | ssl_dhparam /etc/ssl/dhparam.pem; 43 | ssl_ecdh_curve secp384r1; 44 | ssl_session_cache shared:SSL:10m; 45 | ssl_session_timeout 1d; 46 | 47 | ## 48 | # Logging Settings 49 | ## 50 | 51 | access_log /var/log/nginx/access.log; 52 | error_log /var/log/nginx/error.log; 53 | 54 | ## 55 | # Gzip Settings 56 | ## 57 | 58 | gzip on; 59 | gzip_disable "msie6"; 60 | 61 | # gzip_vary on; 62 | # gzip_proxied any; 63 | # gzip_comp_level 6; 64 | # gzip_buffers 16 8k; 65 | # gzip_http_version 1.1; 66 | # gzip_types text/plain text/css application/json application/javascript text/xml application/xml application/xml+rss text/javascript; 67 | 68 | ## 69 | # Virtual Host Configs 70 | ## 71 | 72 | include /etc/nginx/conf.d/*.conf; 73 | include /etc/nginx/sites-enabled/*; 74 | } 75 | -------------------------------------------------------------------------------- /icinga2/services/ntp.conf: -------------------------------------------------------------------------------- 1 | # 2 | # ntp.srv.in.ffho.net 3 | # 4 | 5 | # Anycast NTP server 6 | object Host "ntp.srv.in.ffho.net" { 7 | import "generic-dummy-host" 8 | 9 | display_name = "ntp.srv.in.ffho.net" 10 | 11 | address = "10.132.251.123" 12 | address6 = "2a03:2260:2342:f251::123" 13 | 14 | vars.services = [ 15 | "ntp", 16 | ] 17 | } 18 | 19 | object Host "ntp01.srv.in.ffho.net" { 20 | import "generic-dummy-host" 21 | 22 | display_name = "ntp01.srv.in.ffho.net" 23 | 24 | address = "10.132.251.124" 25 | address6 = "2a03:2260:2342:f251::124" 26 | 27 | vars.services = [ 28 | "ntp", 29 | ] 30 | } 31 | 32 | object Host "ntp02.srv.in.ffho.net" { 33 | import "generic-dummy-host" 34 | 35 | display_name = "ntp02.srv.in.ffho.net" 36 | 37 | address = "10.132.251.125" 38 | address6 = "2a03:2260:2342:f251::125" 39 | 40 | vars.services = [ 41 | "ntp", 42 | ] 43 | } 44 | 45 | 46 | # Check NTP servers 47 | apply Service "ntp4" { 48 | import "generic-service" 49 | 50 | check_command = "ntp_time" 51 | vars.ntp_ipv4 = true 52 | 53 | assign where host.address && "ntp" in host.vars.services 54 | } 55 | 56 | apply Service "ntp6" { 57 | import "generic-service" 58 | 59 | check_command = "ntp_time" 60 | vars.ntp_ipv6 = true 61 | 62 | assign where host.address6 && "ntp" in host.vars.services 63 | } 64 | 65 | # Check if ntpd is running on every node 66 | apply Service "ntpd" { 67 | import "generic-service" 68 | 69 | check_command = "procs" 70 | 71 | if (host.name != NodeName) { 72 | command_endpoint = host.name 73 | } 74 | 75 | vars.procs_critical = "1:" 76 | vars.procs_warning = ":1" 77 | vars.procs_command = "ntpd" 78 | check_interval = 1h 79 | max_check_attempts = 3 80 | retry_interval = 5m 81 | 82 | assign where host.address && host.vars.os == "linux" 83 | } 84 | -------------------------------------------------------------------------------- /rsyslog/rsyslog-early.d/sudo-ignores.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Don't log sudo messages generated by Icinga2 checks. 3 | # 4 | 5 | :msg, contains, "pam_unix(sudo:session): session closed for user root" ~ 6 | :msg, contains, "pam_unix(cron:session): session opened for user root by (uid=0)" ~ 7 | :msg, contains, "pam_unix(sudo:session): session opened for user root by (uid=0)" ~ 8 | 9 | # Interfaces 10 | :msg, contains, "nagios : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/local/share/monitoring-plugins/check_ifupdown2" ~ 11 | :msg, contains, "root : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/sbin/ifquery -c -a" ~ 12 | 13 | # OSPF 14 | :msg, contains, "nagios : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/local/share/monitoring-plugins/check_bird_ospf" ~ 15 | :msg, contains, "root : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/sbin/birdc show ospf neighbors" ~ 16 | :msg, contains, "root : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/sbin/birdc6 show ospf neighbors" ~ 17 | :msg, contains, "root : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/sbin/birdc show ospf interface" ~ 18 | :msg, contains, "root : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/sbin/birdc6 show ospf interface" ~ 19 | 20 | # BGP 21 | :msg, contains, "nagios : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/local/share/monitoring-plugins/check_bird_bgp --asn 65132" ~ 22 | :msg, contains, "root : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/sbin/birdc show protocols all" ~ 23 | :msg, contains, "root : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/sbin/birdc6 show protocols all" ~ 24 | 25 | # Conntrack 26 | :msg, contains, "nagios : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/local/share/monitoring-plugins/check_conntrack_size" ~ 27 | 28 | # Mail 29 | :msg, contains, "nagios : TTY=unknown ; PWD=/ ; USER=root ; COMMAND=/usr/lib/nagios/plugins/check_mailq" ~ 30 | -------------------------------------------------------------------------------- /dns-server/named.conf.options.recursor: -------------------------------------------------------------------------------- 1 | // 2 | // Bind options (Salt managed) 3 | // 4 | 5 | options { 6 | directory "/var/cache/bind"; 7 | 8 | // If there is a firewall between you and nameservers you want 9 | // to talk to, you may need to fix the firewall to allow multiple 10 | // ports to talk. See http://www.kb.cert.org/vuls/id/800113 11 | 12 | // If your ISP provided one or more IP addresses for stable 13 | // nameservers, you probably want to use them as forwarders. 14 | // Uncomment the following block, and insert the addresses replacing 15 | // the all-0's placeholder. 16 | 17 | // forwarders { 18 | // 0.0.0.0; 19 | // }; 20 | 21 | //======================================================================== 22 | // If BIND logs error messages about the root key being expired, 23 | // you will need to update your keys. See https://www.isc.org/bind-keys 24 | //======================================================================== 25 | // Disable DNSSEC validation as it will FAIL for all ffXY domains which will 26 | // render them unuseable. As bind can only be run in all-on or all-off mode 27 | // this seems to be our only chance for now :-( 28 | dnssec-validation no; 29 | 30 | auth-nxdomain no; # conform to RFC1035 31 | listen-on-v6 { any; }; 32 | 33 | allow-recursion { 34 | 127.0.0.0/8; 35 | ::1/128; 36 | 37 | // Entries from pillar 38 | {%- for entry in salt['pillar.get']('dns-server:auth:allow-recursion', []) %} 39 | {{ entry }}; 40 | {%- endfor %} 41 | }; 42 | 43 | // Disable notifies on non-master DNS 44 | notify no; 45 | 46 | version "Freifunk Hochstift DNS"; 47 | use-v4-udp-ports { range 1024 65535; }; 48 | use-v6-udp-ports { range 1024 65535; }; 49 | }; 50 | 51 | // Allow scraping by bind-exporter 52 | statistics-channels { 53 | inet 127.0.0.1 port 8053 allow { 127.0.0.1; }; 54 | }; 55 | -------------------------------------------------------------------------------- /_modules/ffho_auth.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # 3 | # Maximilian Wilhelm 4 | # -- Mon 23 Jan 2017 12:21:22 AM CET 5 | # 6 | 7 | import collections 8 | 9 | def _ssh_user_allowed (access_config, node_id, node_config, entry_name): 10 | if type (node_config) not in [ dict, collections.OrderedDict ]: 11 | raise Exception ("The pillar node config of node \"%s\" seem to be broken or missing!" % node_id) 12 | 13 | roles = node_config.get ('roles', []) 14 | 15 | # Access config for the given user is the string "global" 16 | if type (access_config) == str: 17 | if access_config == "global": 18 | return True 19 | 20 | if type (access_config) not in [ dict, collections.OrderedDict ]: 21 | raise Exception ("SSH configuration for entry %s seems broken!" % (entry_name)) 22 | 23 | # String "global" found in the access config? 24 | elif "global" in access_config: 25 | return True 26 | 27 | # Is there an entry for this node_id in the 'nodes' list? 28 | elif node_id in access_config.get ('nodes', {}): 29 | return True 30 | 31 | # Should the key be allowed for any of the roles configured for this node? 32 | for allowed_role in access_config.get ('roles', []): 33 | if allowed_role in roles: 34 | return True 35 | 36 | return False 37 | 38 | 39 | def get_ssh_authkeys (ssh_config, node_config, node_id, username): 40 | auth_keys = [] 41 | 42 | for entry_name, entry in ssh_config['keys'].items (): 43 | access = entry.get ('access', {}) 44 | add_keys = False 45 | 46 | # Skip this key if there's no entry for the given username 47 | if username not in access: 48 | continue 49 | 50 | user_access = access.get (username) 51 | if _ssh_user_allowed (user_access, node_id, node_config, entry_name): 52 | for key in entry.get ('pubkeys', []): 53 | if key not in auth_keys: 54 | auth_keys.append (key) 55 | 56 | return sorted (auth_keys) 57 | -------------------------------------------------------------------------------- /grafana/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # grafana 3 | # 4 | 5 | {% set grafana_cfg = salt['pillar.get']('grafana') %} 6 | 7 | {% set node_config = salt['pillar.get']('node') %} 8 | {% if node_config.get('role') == "prometheus-server" %} 9 | include: 10 | - .prometheus 11 | {% endif %} 12 | 13 | 14 | grafana: 15 | # add Grafana Repo 16 | file.managed: 17 | - names: 18 | - /usr/share/keyrings/grafana.key: 19 | - source: salt://grafana/grafana.key 20 | - /etc/apt/sources.list.d/grafana.list: 21 | - source: salt://grafana/grafana.list.tmpl 22 | - template: jinja 23 | - require: 24 | - file: /usr/share/keyrings/grafana.key 25 | 26 | # install grafana 27 | pkg.installed: 28 | - name: grafana 29 | - require: 30 | - file: /etc/apt/sources.list.d/grafana.list 31 | 32 | service.running: 33 | - name: grafana-server 34 | - enable: True 35 | - require: 36 | - pkg: grafana 37 | - file: /etc/grafana/grafana.ini 38 | - file: /etc/grafana/ldap.toml 39 | - user: grafana 40 | - watch: 41 | - file: /etc/grafana/grafana.ini 42 | - file: /etc/grafana/ldap.toml 43 | 44 | # add user 'grafana' to group 'ssl-cert' to access ssl-key file 45 | user.present: 46 | - name: grafana 47 | - system: True 48 | - groups: 49 | - ssl-cert 50 | - require: 51 | - pkg: grafana 52 | 53 | /etc/grafana/grafana.ini: 54 | file.managed: 55 | - source: salt://grafana/grafana.ini.tmpl 56 | - template: jinja 57 | config: {{ grafana_cfg }} 58 | - require: 59 | - pkg: grafana 60 | 61 | 62 | /etc/grafana/ldap.toml: 63 | {% if 'ldap' in grafana_cfg %} 64 | file.managed: 65 | - source: salt://grafana/ldap.toml.tmpl 66 | - template: jinja 67 | config: {{ grafana_cfg.ldap }} 68 | {% else %} 69 | file.absent: 70 | {% endif %} 71 | - require: 72 | - pkg: grafana 73 | -------------------------------------------------------------------------------- /mongodb/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # mongodb 3 | # 4 | 5 | mongodb-repo: 6 | pkgrepo.managed: 7 | - humanname: MongoDB Repo 8 | - file: /etc/apt/sources.list.d/mongodb-org.list 9 | - key_url: https://www.mongodb.org/static/pgp/server-{{ mongodb_version }}.asc 10 | {% if mongodb_version == '4.2' %} 11 | - name: deb http://repo.mongodb.org/apt/debian buster/mongodb-org/4.2 main 12 | {% elif mongodb_version == '4.4' %} 13 | - name: deb http://repo.mongodb.org/apt/debian buster/mongodb-org/4.4 main 14 | {% elif mongodb_version == '5.0' %} 15 | - name: deb http://repo.mongodb.org/apt/debian {{ grains.oscodename }}/mongodb-org/5.0 main 16 | {% endif %} 17 | 18 | mongodb: 19 | pkg.installed: 20 | - pkgs: 21 | - mongodb-org 22 | - python3-pymongo 23 | service.running: 24 | - name: mongod 25 | - enable: True 26 | - require: 27 | - pkg: mongodb 28 | - watch: 29 | - file: /etc/mongod.conf 30 | 31 | # Create mongodb admin user 32 | mongoadmin: 33 | mongodb_user.present: 34 | - name: {{ mongodb_admin_username }} 35 | - passwd: {{ mongodb_admin_password }} 36 | - database: admin 37 | - roles: {{ mongodb_admin_roles }} 38 | - user: {{ mongodb_admin_username }} 39 | - password: {{ mongodb_admin_password }} 40 | 41 | # Install mongod config, cronjob, backup script and corresponding config file 42 | /etc/mongod.conf: 43 | file.managed: 44 | - source: salt://mongodb/mongod.conf 45 | - require: 46 | - mongodb_user: mongoadmin 47 | 48 | /etc/cron.d/mongodb_backup: 49 | file.managed: 50 | - source: salt://mongodb/mongodb_backup.cron 51 | 52 | /usr/local/sbin/mongodb_backup: 53 | file.managed: 54 | - source: salt://mongodb/mongodb_backup 55 | - mode: 755 56 | 57 | /etc/mongodb_backup.conf: 58 | file.managed: 59 | - source: salt://mongodb/mongodb_backup.conf 60 | - mode: 600 61 | - user: root 62 | - group: root 63 | -------------------------------------------------------------------------------- /nginx/firmware.srv.in.ffho.net: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/nginx/sites-enabled/firmware.in.ffho.net (Salt managed) 3 | # 4 | 5 | {%- set acme_thumbprint = salt['pillar.get']('acme:thumbprint', False) %} 6 | 7 | server { 8 | listen 80; 9 | listen [::]:80; 10 | 11 | root {{ salt['pillar.get']('node:path:firmware') }}; 12 | 13 | server_name ~^firmware\.((srv\.)?in|im)\.ffho\.net$; 14 | fancyindex on; 15 | fancyindex_exact_size off; 16 | fancyindex_name_length 70; 17 | fancyindex_header /header.html; 18 | fancyindex_localtime on; 19 | fancyindex_default_sort name; 20 | 21 | location / { 22 | try_files $uri $uri/ /index.html =404; 23 | fancyindex_ignore header.html favicon.ico models-short.txt models.txt robots.txt scripts; 24 | } 25 | 26 | {%- if acme_thumbprint %} 27 | location ~ "^/\.well-known/acme-challenge/([-_a-zA-Z0-9]+)$" { 28 | default_type text/plain; 29 | return 200 "$1.{{ acme_thumbprint }}"; 30 | } 31 | {%- endif %} 32 | 33 | location ~ /\. { 34 | deny all; 35 | } 36 | 37 | location /scripts { 38 | deny all; 39 | } 40 | 41 | location ~ ^/(?(ffho(_(\w\w\w))?(_(cty|uml))?))/(stable|testing|experimental)/sysupgrade/((?(stable|testing|experimental))\.manifest)$ { 42 | disable_symlinks off; 43 | try_files $uri /$branch/sysupgrade/$branch.$site_code.manifest /$branch/sysupgrade/$branch.manifest; 44 | } 45 | 46 | location ~ ^/(?(ffho(_(\w\w\w))?(_(cty|uml))?))/(?(stable|testing|experimental))/sysupgrade/(?.*) { 47 | disable_symlinks off; 48 | try_files $uri /$branch/sysupgrade/$file /stable/sysupgrade/$file /testing/sysupgrade/$file /experimental/sysupgrade/$file; 49 | } 50 | 51 | # opkg mirror 52 | location ~^/openwrt/(?.+)$ { 53 | return 302 http://downloads.openwrt.org/releases/$file; 54 | } 55 | 56 | # lede mirror 57 | location ~^/lede/(?.+)$ { 58 | return 302 http://downloads.lede-project.org/releases/$file; 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /dhcp-server/dhcpd.conf: -------------------------------------------------------------------------------- 1 | {%- set dhcp_prefixes = salt['pillar.get']("node:dhcp:server:prefixes", []) -%} 2 | {%- set dns_resolver_IP = salt["pillar.get"]("globals:dns:resolver_v4") -%} 3 | {%- set dns_search_domain = salt["pillar.get"]("globals:dns:search") -%} 4 | {%- set unifi_address = salt["pillar.get"]("globals:unifi:address") -%} 5 | # 6 | # DHCP server configuration (Salt managed) 7 | # 8 | 9 | authoritative; 10 | 11 | ddns-update-style none; 12 | log-facility local7; 13 | 14 | default-lease-time 600; 15 | max-lease-time 3600; 16 | 17 | option domain-name-servers {{ dns_resolver_IP }}; 18 | 19 | option space ubnt; 20 | option ubnt.unifi-address code 1 = ip-address; 21 | class "ubnt" { 22 | match if substring (option vendor-class-identifier, 0, 4) = "ubnt"; 23 | option vendor-class-identifier "ubnt"; 24 | vendor-option-space ubnt; 25 | } 26 | 27 | 28 | {% for prefix in dhcp_prefixes %} 29 | # {{ prefix["description"] }} 30 | subnet {{ prefix["network"] }} netmask {{ prefix["netmask"] }} { 31 | {#- We must only define a pool when there's at least one range for the subnet #} 32 | {%- if "ranges" in prefix %} 33 | pool { 34 | {%- endif %} 35 | {%- if not prefix.get("authoritative", True) %} 36 | not authoritative; 37 | {%- endif %} 38 | # monitor: 75% 90% Y {{ grains['nodename'] }}/{{ prefix["description"] }} 39 | 40 | # Use our own IP as gateway for our clients 41 | option routers {{ prefix["routers"] }}; 42 | {%- if prefix.get("role") == "mgmt" %} 43 | option ubnt.unifi-address 10.132.251.21; 44 | option domain-name "{{ dns_search_domain }}"; 45 | {%- endif %} 46 | {% if "ranges" in prefix %} 47 | # Range(s) of IPs to lease to clients 48 | {%- for range in prefix["ranges"] %} 49 | range {{ range }}; 50 | {%- endfor %} 51 | {%- else %} 52 | # No ranges defined, static leases only? 53 | {%- endif %} 54 | {%- if "ranges" in prefix %} 55 | } 56 | {%- endif %} 57 | } 58 | {% endfor %} 59 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Freifunk Hochstift infrastructure - SaltStack configuration 2 | 3 | This repository contains the Salt environment (states + modules) used to configure the infrastructure of the 4 | [Freifunk Hochstift](https://ffho.net) community network. 5 | 6 | It uses the [NetBox Abstraction and Caching Layer (NACL)](https://github.com/BarbarossaTM/nacl) as its interface to communicate with NetBox, which holds all node specific configuration. 7 | This includes the node name, role(s), interfaces, IP addresses, tags, config contexts, etc. 8 | 9 | ## Principles 10 | 11 | This whole code base follows the principles of [Holistic (network) automation](https://blog.sdn.clinic/2022/01/this-is-the-way-holistic-approach-on-network-automation/), which means that as much configuration bits are derived from properties of nodes or its relationship(s) to other nodes. 12 | This includes but is not limited to, OSPF adjacencies, internal BGP sessions, B.A.T.M.A.N. adv. configuration, Nftables rules, etc. 13 | 14 | Most of these bits live inside the Python modules which are included in this repository (see the `_modules/` directory), which contains modules for authentication, netfilter, and networking related configuration. 15 | The `ffho_net` modules currently is the heart of our SDN logic, with more recent pieces (e.g. iBGP mesh calculation) living inside NACL. 16 | Eventually most logic should move over to NACL or another daemon which takes over the SDN role, so that Salt is only used to apply configuration based on a generic device configuration. 17 | 18 | ## Further Reading 19 | 20 | Our CTO, @BarbarossaTM, has started a [blog series](https://blog.sdn.clinic/2017/09/building-your-own-software-defined-network-with-linux-and-open-source-tools/) about our infrastructure, its architecture and evolution and also [blogs about NetBox related things](https://blog.sdn.clinic/category/automation/netbox/), which may or may not be related to this code base - it mostly is though :-) 21 | -------------------------------------------------------------------------------- /sudo/sudoers.Debian.bookworm: -------------------------------------------------------------------------------- 1 | # 2 | # This file MUST be edited with the 'visudo' command as root. 3 | # 4 | # Please consider adding local content in /etc/sudoers.d/ instead of 5 | # directly modifying this file. 6 | # 7 | # See the man page for details on how to write a sudoers file. 8 | # 9 | Defaults env_reset 10 | Defaults mail_badpass 11 | Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" 12 | 13 | # This fixes CVE-2005-4890 and possibly breaks some versions of kdesu 14 | # (#1011624, https://bugs.kde.org/show_bug.cgi?id=452532) 15 | Defaults use_pty 16 | 17 | # This preserves proxy settings from user environments of root 18 | # equivalent users (group sudo) 19 | #Defaults:%sudo env_keep += "http_proxy https_proxy ftp_proxy all_proxy no_proxy" 20 | 21 | # This allows running arbitrary commands, but so does ALL, and it means 22 | # different sudoers have their choice of editor respected. 23 | #Defaults:%sudo env_keep += "EDITOR" 24 | 25 | # Completely harmless preservation of a user preference. 26 | #Defaults:%sudo env_keep += "GREP_COLOR" 27 | 28 | # While you shouldn't normally run git as root, you need to with etckeeper 29 | #Defaults:%sudo env_keep += "GIT_AUTHOR_* GIT_COMMITTER_*" 30 | 31 | # Per-user preferences; root won't have sensible values for them. 32 | #Defaults:%sudo env_keep += "EMAIL DEBEMAIL DEBFULLNAME" 33 | 34 | # "sudo scp" or "sudo rsync" should be able to use your SSH agent. 35 | #Defaults:%sudo env_keep += "SSH_AGENT_PID SSH_AUTH_SOCK" 36 | 37 | # Ditto for GPG agent 38 | #Defaults:%sudo env_keep += "GPG_AGENT_INFO" 39 | 40 | # Host alias specification 41 | 42 | # User alias specification 43 | 44 | # Cmnd alias specification 45 | 46 | # User privilege specification 47 | root ALL=(ALL:ALL) ALL 48 | 49 | # Allow members of group sudo to execute any command 50 | %sudo ALL=(ALL:ALL) ALL 51 | 52 | # See sudoers(5) for more information on "@include" directives: 53 | 54 | @includedir /etc/sudoers.d 55 | -------------------------------------------------------------------------------- /icinga2/icinga2.conf.H_icinga2.in.ffho.net: -------------------------------------------------------------------------------- 1 | # 2 | # Icinga2 main configuration for nodes (Salt managed) 3 | # 4 | 5 | /** 6 | * Icinga 2 configuration file 7 | * - this is where you define settings for the Icinga application including 8 | * which hosts/services to check. 9 | * 10 | * For an overview of all available configuration options please refer 11 | * to the documentation that is distributed as part of Icinga 2. 12 | */ 13 | 14 | /** 15 | * The constants.conf defines global constants. 16 | */ 17 | include "constants.conf" 18 | include "secrets.conf" 19 | 20 | /** 21 | * The zones.conf defines zones for a cluster setup. 22 | * Not required for single instance setups. 23 | */ 24 | include "zones.conf" 25 | 26 | /** 27 | * The Icinga Template Library (ITL) provides a number of useful templates 28 | * and command definitions. 29 | * Common monitoring plugin command definitions are included separately. 30 | */ 31 | include 32 | include 33 | include 34 | include 35 | 36 | /** 37 | * This includes the NSClient++ check commands. These command definitions 38 | * are required on a master node when a client is used as command endpoint. 39 | */ 40 | include 41 | 42 | /** 43 | * The features-available directory contains a number of configuration 44 | * files for features which can be enabled and disabled using the 45 | * icinga2 feature enable / icinga2 feature disable CLI commands. 46 | * These commands work by creating and removing symbolic links in 47 | * the features-enabled directory. 48 | */ 49 | include "features-enabled/*.conf" 50 | 51 | /** 52 | * Although in theory you could define all your objects in this file 53 | * the preferred way is to create separate directories and files in the conf.d 54 | * directory. Each of these files must have the file extension ".conf". 55 | */ 56 | include_recursive "conf.d" 57 | 58 | # Include command defintions 59 | include_recursive "commands.d" 60 | -------------------------------------------------------------------------------- /salt-minion/minion_conf.tmpl: -------------------------------------------------------------------------------- 1 | # 2 | # Salt minion config (Salt managed) 3 | # 4 | 5 | master: {{ salt_config['master'] }} 6 | master_port: {{ salt_config['master_port'] }} 7 | ipv6: {{ salt_config['ipv6'] }} 8 | 9 | # When waiting for a master to accept the minion's public key, salt will 10 | # continuously attempt to reconnect until successful. This is the time, in 11 | # seconds, between those reconnection attempts. 12 | acceptance_wait_time: 10 13 | 14 | # If this is nonzero, the time between reconnection attempts will increase by 15 | # acceptance_wait_time seconds per iteration, up to this maximum. If this is 16 | # set to zero, the time between reconnection attempts will stay constant. 17 | acceptance_wait_time_max: 0 18 | 19 | # Cache rendered pillar data on the minion. Default is False. 20 | # This may cause 'cachedir'/pillar to contain sensitive data that should be 21 | # protected accordingly. 22 | minion_pillar_cache: False 23 | 24 | # Set this option to 'True' to force a 'KeyError' to be raised whenever an 25 | # attempt to retrieve a named value from pillar fails. When this option is set 26 | # to 'False', the failed attempt returns an empty string. Default is 'False'. 27 | pillar_raise_on_missing: True 28 | 29 | # The state_verbose and state_output settings can be used to change the way 30 | # state system data is printed to the display. By default all data is printed. 31 | # The state_verbose setting can be set to True or False, when set to False 32 | # all data that has a result of True and no changes will be suppressed. 33 | state_verbose: False 34 | 35 | # The state_output_diff setting changes whether or not the output from 36 | # successful states is returned. Useful when even the terse output of these 37 | # states is cluttering the logs. Set it to True to ignore them. 38 | #state_output_diff: False 39 | 40 | # The state_output_profile setting changes whether profile information 41 | # will be shown for each state run. 42 | #state_output_profile: True 43 | -------------------------------------------------------------------------------- /Documentation/example-pillar/nodes/gw03.sls: -------------------------------------------------------------------------------- 1 | gw03.in.ffho.net: 2 | id: 11 3 | 4 | sysLocation: BER 5 | 6 | roles: 7 | - router 8 | - batman 9 | - batman_gw 10 | - batman_ext 11 | - fastd 12 | 13 | sites: 14 | - legacy 15 | - pad-cty 16 | - hx-nord 17 | 18 | ifaces: 19 | lo: 20 | prefixes: 21 | - 10.132.255.11/32 22 | - 2a03:2260:2342:ffff::11/128 23 | 24 | eth0: 25 | desc: SysEleven 26 | mac: 52:54:1f:03:01:63 27 | # 28 | prefixes: 29 | - 185.46.137.163/25 30 | - 2a00:13c8:1000:2::163/64 31 | gateway: 32 | - 185.46.137.129 33 | - 2a00:13c8:1000:2::1 34 | vrf: vrf_external 35 | 36 | vlan1015: 37 | desc: L2-BER 38 | mac: 52:54:1f:03:10:15 39 | prefixes: 40 | - /28 41 | - /64 42 | 43 | he-ipv6: 44 | method: tunnel 45 | desc: HE IPv6 Transit 46 | mode: sit 47 | ttl: 255 48 | local: 185.46.137.163 49 | endpoint: 50 | tunnel-physdev: vrf_external 51 | prefixes: 52 | - /64 53 | 54 | br-legacy: 55 | desc: "Site Legacy" 56 | bridge-ports: bat-legacy 57 | prefixes: 58 | - 2001:470:6d:860:8::3/64 59 | 60 | br-pad-cty: 61 | desc: "Site Paderborn City" 62 | bridge-ports: bat-pad-cty 63 | prefixes: 64 | - 10.132.32.3/20 65 | - 2a03:2260:2342:100::3/64 66 | 67 | br-hx-nord: 68 | desc: "Site Hoexter Nord" 69 | bridge-ports: bat-hx-nord 70 | prefixes: 71 | - 10.132.96.3/21 72 | - 2a03:2260:2342:800::3/64 73 | 74 | 75 | fastd: 76 | nodes_pubkey: 77 | intergw_pubkey: 78 | 79 | {% if grains['id'] == 'gw03.in.ffho.net' %} 80 | nodes_privkey: 81 | intergw_privkey: 82 | {% endif %} 83 | -------------------------------------------------------------------------------- /forgejo/forgejo-apt.asc: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | 3 | xsBNBGj+3tMBCAC/rQfjykFLb7tlSXv817TqWrpb7m43RuuqeRzt7IOWvOdJbAEi 4 | /R565sQVyzUnbAg6mJ08YdMOy+VuhmtWC/bzVJhsHzwtFmetYyQhWsusNP07s+mo 5 | Q/0v2yPdGB3rh/Erodx8/cWt0qGhyZOnGDsdupMLPtMCeuDNLfqFgU3u++hmH6WB 6 | 25dWTNFP4NNatodPz8PvmX/+sgT19miMvO9Yvo0ptJ7cQSDR2pLYrDURajfPtx+b 7 | 7Zy3dhECCq4ddZ1KNlFP5Guiuje6YYacMUAXV1auB666yEDs2qZql6qgtcik1wt8 8 | fcqIWF17h5EfXAOVBR44QLjk2bPX5s7FDeqFABEBAAHNEShEZWJpYW4gUmVnaXN0 9 | cnkpwsC7BBMBCABvBYJo/t7TAgsHCRAQBgiE7kLIMzUUAAAAAAAcABBzYWx0QG5v 10 | dGF0aW9ucy5vcGVucGdwanMub3JnH3oB1I4LRUgpPhGddmwn4wIVCAIWAAIZAQKb 11 | AwIeARYhBPy3B0wSf3KGBMu03RAGCITuQsgzAABNzgf/fT++jXc/gW18+CQIobiC 12 | DJwS8uWAO8k9USK/9noG9a18Sh8mDwifW0vsct1HDr5uk0nMnX2RX2vjqaWYLLhK 13 | NbaFob2vtkdwr15Gv6+iG3WQx5bH8mUSjTCIeq7oEHxzBRpm/gS4m0T+FM5F+bnw 14 | JLOMAXb5Yb3O91Snw/4hdLBEJ6FpXNbMynIgEXtzAFEy0HkwAufhxCfb9Bf+3EG0 15 | lpgRT5QZeyGZMh10ue+vjaCEdXl8cmKB2tVnH752S+PTbSHMY043c51JwcSCmjrk 16 | wS8bBuYtz5un3jZ8HtKn4hWq20kL8LoSjS4wig+3dK6rzfHtWuM0O3fC38r7bIXh 17 | Qs7ATQRo/t7TAQgAvD4QmBP0pvJ3++sqpUc6UtSgU9jinyCm3ZRHUJywKWsgutfE 18 | KK8XnMtZIRCkZp4wktSoB8H11uyklmdjMvS32tNaUCYt22oVoAQhAmrqls6z6wSH 19 | f3IjyOU7cicffT8iAWRtmfx4W8ehBv84lVALCHLEpQdqcdv8GNqZdcyH1UzaX868 20 | wDgNDuf95rpXeyWN6h6ujIIb/gYxEn0lqMMwJd3JFG1a9fdHNogbUlHwatuJn3bX 21 | mTc7RixnGtJzHP3lXdO6e8wKCGxbbXoD8opJgvAH53t3BNPxOAb/2Lcj+BiBa/ck 22 | 1o+x+tfUsr3Bfh6Fksnphe3YNuaV+to8r2nfhwARAQABwsCsBBgBCABgBYJo/t7T 23 | CRAQBgiE7kLIMzUUAAAAAAAcABBzYWx0QG5vdGF0aW9ucy5vcGVucGdwanMub3Jn 24 | ELfZEeK+yKLKdn8W01uyKAKbDBYhBPy3B0wSf3KGBMu03RAGCITuQsgzAABezwf/ 25 | V5HKQI3E8KYzo1Cw5T3RZcWaboNiBa+1ExJP5ZsMX90LX2q98WCXRh58LkH11fMh 26 | 2w9LsKMjeSmzH+LjIcfJLk1vL+tNK0qcKke1vKWg6vixrTRaA4blbLdlwiUlUq25 27 | zGdpfaMWdkEc/RyvTEaBLVY1wc1Xaz2Xnz3CxJub1q6PD/KthCN6MxPDhqQ3OjqM 28 | LcprL4XxkU4bCJngXEFTlrDy0bvo3t2v8XTSR7S0pDm5WlpUJyplVZPb8l7s0oh/ 29 | d59WfxKY2eSYm6kzlEaUrlASP8WF33Jqky2WohqKdy4T5sM1TpCYv1ppFOQtgaHB 30 | pGZIt8f0fOlglDJoXzBP4Q== 31 | =VAC2 32 | -----END PGP PUBLIC KEY BLOCK----- -------------------------------------------------------------------------------- /pppoe/pap-secrets: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/ppp/pap-secrets (Salt managed) 3 | # 4 | # This is a pap-secrets file to be used with the AUTO_PPP function of 5 | # mgetty. mgetty-0.99 is preconfigured to startup pppd with the login option 6 | # which will cause pppd to consult /etc/passwd (and /etc/shadow in turn) 7 | # after a user has passed this file. Don't be disturbed therefore by the fact 8 | # that this file defines logins with any password for users. /etc/passwd 9 | # (again, /etc/shadow, too) will catch passwd mismatches. 10 | # 11 | # This file should block ALL users that should not be able to do AUTO_PPP. 12 | # AUTO_PPP bypasses the usual login program so it's necessary to list all 13 | # system userids with regular passwords here. 14 | # 15 | # ATTENTION: The definitions here can allow users to login without a 16 | # password if you don't use the login option of pppd! The mgetty Debian 17 | # package already provides this option; make sure you don't change that. 18 | 19 | # INBOUND connections 20 | 21 | # Every regular user can use PPP and has to use passwords from /etc/passwd 22 | * hostname "" * 23 | 24 | # UserIDs that cannot use PPP at all. Check your /etc/passwd and add any 25 | # other accounts that should not be able to use pppd! 26 | guest hostname "*" - 27 | master hostname "*" - 28 | root hostname "*" - 29 | support hostname "*" - 30 | stats hostname "*" - 31 | 32 | # OUTBOUND connections 33 | 34 | # Here you should add your userid password to connect to your providers via 35 | # PAP. The * means that the password is to be used for ANY host you connect 36 | # to. Thus you do not have to worry about the foreign machine name. Just 37 | # replace password with your password. 38 | # If you have different providers with different passwords then you better 39 | # remove the following line. 40 | 41 | # * password 42 | {%- set user = salt['pillar.get']('node:pppoe:user', 'WRONG USER') %} 43 | {%- set pass = salt['pillar.get']('node:pppoe:pass', 'NO PASS') %} 44 | "{{ user }}" * "{{ pass }}" 45 | -------------------------------------------------------------------------------- /forgejo/app.ini.tmpl: -------------------------------------------------------------------------------- 1 | APP_NAME = FFHO Forgejo 2 | APP_SLOGAN = Beyond coding. We Forge. 3 | RUN_USER = forgejo 4 | WORK_PATH = /var/lib/forgejo 5 | RUN_MODE = prod 6 | 7 | [database] 8 | DB_TYPE = postgres 9 | HOST = {{ config.database_host }} 10 | NAME = forgejo 11 | USER = forgejo 12 | PASSWD = {{ config.password }} 13 | SCHEMA = 14 | SSL_MODE = {{ config.database_ssl_mode }} 15 | PATH = /var/lib/forgejo/data/forgejo.db 16 | LOG_SQL = false 17 | 18 | [repository] 19 | ROOT = /var/lib/forgejo/data/forgejo-repositories 20 | 21 | [server] 22 | SSH_DOMAIN = {{ config.url }} 23 | DOMAIN = localhost 24 | HTTP_ADDR = localhost 25 | HTTP_PORT = {{ config.http_port }} 26 | ROOT_URL = https://{{ config.url }}/ 27 | APP_DATA_PATH = /var/lib/forgejo/data 28 | DISABLE_SSH = false 29 | SSH_PORT = {{ config.ssh_port }} 30 | LFS_START_SERVER = true 31 | LFS_JWT_SECRET = {{ config.lfs_jwt_secret }} 32 | OFFLINE_MODE = true 33 | 34 | [lfs] 35 | PATH = /var/lib/forgejo/data/lfs 36 | 37 | [mailer] 38 | ENABLED = false 39 | 40 | [service] 41 | REGISTER_EMAIL_CONFIRM = false 42 | ENABLE_NOTIFY_MAIL = false 43 | DISABLE_REGISTRATION = true 44 | ALLOW_ONLY_EXTERNAL_REGISTRATION = false 45 | ENABLE_CAPTCHA = false 46 | REQUIRE_SIGNIN_VIEW = false 47 | DEFAULT_KEEP_EMAIL_PRIVATE = false 48 | DEFAULT_ALLOW_CREATE_ORGANIZATION = true 49 | DEFAULT_ENABLE_TIMETRACKING = true 50 | NO_REPLY_ADDRESS = noreply.localhost 51 | 52 | [openid] 53 | ENABLE_OPENID_SIGNIN = false 54 | ENABLE_OPENID_SIGNUP = false 55 | 56 | [cron.update_checker] 57 | ENABLED = true 58 | 59 | [session] 60 | PROVIDER = file 61 | 62 | [log] 63 | MODE = console 64 | LEVEL = info 65 | ROOT_PATH = /var/lib/forgejo/log 66 | 67 | [repository.pull-request] 68 | DEFAULT_MERGE_STYLE = merge 69 | 70 | [repository.signing] 71 | DEFAULT_TRUST_MODEL = committer 72 | 73 | [security] 74 | INSTALL_LOCK = true 75 | INTERNAL_TOKEN = {{ config.internal_token }} 76 | PASSWORD_HASH_ALGO = {{ config.password_hash_algo }} 77 | 78 | [oauth2] 79 | JWT_SECRET = {{ config.jwt_secret }} 80 | -------------------------------------------------------------------------------- /icinga2/plugins/check_conntrack_size: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # Nagios plugin to check netfilter conntrack size 4 | # 5 | # Maximilian Wilhelm 6 | # -- Fri 11 Mar 2016 08:56:08 PM CET 7 | # 8 | 9 | import argparse 10 | import os.path 11 | import sys 12 | 13 | code = 0 14 | msg = "" 15 | 16 | parser = argparse.ArgumentParser (description = 'check netfilter conntrack table size') 17 | 18 | parser.add_argument ('--warn', '-w', help = "Warning conntrack table usage (percent)", default = "70", type = int) 19 | parser.add_argument ('--crit', '-c', help = "Critical conntrack table usage (percent)", default = "85", type = int) 20 | parser.add_argument ('--no-conntrack', help = "Return code when no conntrack is loaded.", default = "ok", choices = [ "ok", "warn", "crit", "unkn" ]) 21 | 22 | args = parser.parse_args () 23 | 24 | ret_map = { 25 | 'ok' : 0, 26 | 'warn' : 1, 27 | 'crit' : 2, 28 | 'unkn' : 3, 29 | } 30 | 31 | def read_int (path): 32 | try: 33 | with open (path, 'r') as fh: 34 | return float (fh.read ()) 35 | except ValueError as v: 36 | return -1 37 | except IOError as i: 38 | print ("conntrack seems not to be loaded.") 39 | sys.exit (ret_map[args.no_conntrack]) 40 | 41 | num_entries = read_int ("/proc/sys/net/netfilter/nf_conntrack_count") 42 | max_entries = read_int ("/proc/sys/net/netfilter/nf_conntrack_max") 43 | 44 | 45 | usage = num_entries / max_entries * 100 46 | 47 | if usage >= args.crit: 48 | code = 2 49 | msg = "Conntrack pool usage over %d%%: %d (%d / %d)" % (args.crit, usage, num_entries, max_entries) 50 | 51 | elif usage >= args.warn: 52 | code = 1 53 | msg = "Conntrack pool usage over %d%%: %d (%d/ %d)" % (args.warn, usage, num_entries, max_entries) 54 | 55 | elif usage < args.warn: 56 | code = 0 57 | msg = "Conntrack pool usage at %d%% (%d / %d)" % (usage, num_entries, max_entries) 58 | 59 | else: 60 | code = 3 61 | msg = "WTF? Please examinte the situation manually and kinly do the needful!" 62 | 63 | print (msg) 64 | sys.exit (code) 65 | -------------------------------------------------------------------------------- /yanic/ff_merge_nodes_json: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | # 3 | # Maximilian Wilhelm 4 | # -- Tue 20 Jun 2017 06:40:18 PM CEST 5 | # 6 | 7 | import argparse 8 | import json 9 | import os 10 | import sys 11 | import time 12 | 13 | parser = argparse.ArgumentParser (description = 'Merge nodes.json files') 14 | parser.add_argument ('files', help = 'Path for nodes.json file(s)', nargs = '+') 15 | parser.add_argument ('--pretty-print', help = 'Pretty-print JSON output', action = 'store_true') 16 | args = parser.parse_args () 17 | 18 | all_nodes = {} 19 | uberdict = {} 20 | 21 | # Read all nodes lists into all_nodes dict, thereby dropping any duplicate nodes. 22 | for file_path in args.files: 23 | try: 24 | with open (file_path, 'rb') as fh: 25 | nodes = json.load (fh) 26 | except IOError as e: 27 | print (f"Error while reading file '{file_path}': {str(e)}") 28 | sys.exit (1) 29 | 30 | for node in nodes['nodes']: 31 | node_id = node['nodeinfo']['node_id'] 32 | 33 | # If node_id has already been seen make sure to use the newer entry 34 | if node_id in all_nodes: 35 | try: 36 | node_lastseen = time.strptime (node['lastseen'], "%Y-%m-%dT%H:%M:%S%z") 37 | existing_node_lastseen = time.strptime (existing_node_lastseen['lastseen'], "%Y-%m-%dT%H:%M:%S%z") 38 | 39 | # If the node information already stored in all_nodes is more 40 | # recent than the node we just found, don't overwrite it. 41 | if existing_node_lastseen > node_lastseen: 42 | continue 43 | except Exception: 44 | # If parsing a timestamp fails just carry on 45 | continue 46 | 47 | all_nodes[node['nodeinfo']['node_id']] = node 48 | 49 | for key in nodes.keys (): 50 | if key != 'nodes': 51 | uberdict[key] = nodes[key] 52 | 53 | uberdict['nodes'] = list(all_nodes.values ()) 54 | 55 | # Print merged nodes.json's to stdout 56 | if args.pretty_print: 57 | print (json.dumps (uberdict, sort_keys = True, indent = 4, separators = (',', ': '))) 58 | else: 59 | print (json.dumps (uberdict)) 60 | -------------------------------------------------------------------------------- /bird/VRF_external.conf: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Internet table # 3 | ################################################################################ 4 | 5 | {%- set ifaces = salt['pillar.get']('node:ifaces', {}) %} 6 | {%- set have_vrf_external = [] %} 7 | {%- for iface, iface_config in ifaces.items () %} 8 | {%- if iface_config.get ('vrf', '') == 'vrf_external' %} 9 | {%- do have_vrf_external.append (True) %} 10 | {%- break %} 11 | {%- endif %} 12 | {%- endfor %} 13 | 14 | {%- if True not in have_vrf_external %} 15 | # 16 | # No vrf_external configured on this node. Nothing to do. 17 | # 18 | {%- else %} 19 | table t_external; 20 | 21 | protocol kernel k_external { 22 | scan time 20; 23 | 24 | learn; 25 | import none; 26 | export all; 27 | 28 | table t_external; 29 | kernel table 1023; 30 | } 31 | 32 | # Add unreachable routes for RFC1918, RFC 6598, APIPA so we don't route 33 | # anything private into the internet + null route some bogons. 34 | protocol static bogon_unreach_ext { 35 | table t_external; 36 | 37 | {%- if proto == 'v4' %} 38 | route 0.0.0.0/8 unreachable; # Host-Subnet 39 | route 10.0.0.0/8 unreachable; # RFC 1918 40 | route 169.254.0.0/16 unreachable; # APIPA 41 | route 172.16.0.0/12 unreachable; # RFC 1918 42 | route 192.0.0.0/24 unreachable; # IANA RESERVED 43 | route 192.0.2.0/24 unreachable; # TEST-NET-1 44 | route 192.168.0.0/16 unreachable; # RFC 1918 45 | route 198.18.0.0/15 unreachable; # BENCHMARK 46 | route 198.51.100.0/24 unreachable; # TEST-NET-2 47 | route 203.0.113.0/24 unreachable; # TEST-NET-3 48 | route 224.0.0.0/3 unreachable; # MCast + Class E 49 | {%- else %} 50 | route ::/96 unreachable; # RFC 4291 51 | route 2001:db8::/32 unreachable; # Documentation 52 | route fec0::/10 unreachable; # Site Local 53 | route fc00::/7 unreachable; # ULA 54 | {%- endif %} 55 | } 56 | {%- endif %} {#- vrf_external exists #} 57 | -------------------------------------------------------------------------------- /rsyslog/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Rsyslog configuration 3 | # 4 | 5 | {% set roles = salt['pillar.get'] ('node:roles') %} 6 | {% set logserver = salt['pillar.get'] ('logging:syslog:logserver') %} 7 | {% set graylog_uri = salt['pillar.get'] ('logging:graylog:syslog_uri') %} 8 | 9 | rsyslog: 10 | pkg.installed: 11 | - name: rsyslog 12 | service.running: 13 | - enable: True 14 | 15 | 16 | /etc/rsyslog-early.d: 17 | file.recurse: 18 | - source: salt://rsyslog/rsyslog-early.d 19 | - user: root 20 | - group: root 21 | - file_mode: 644 22 | - dir_mode: 755 23 | - clean: true 24 | - watch_in: 25 | - service: rsyslog 26 | 27 | 28 | /etc/rsyslog.conf: 29 | file.managed: 30 | - watch_in: 31 | - service: rsyslog 32 | {% if 'logserver' in roles %} 33 | - source: salt://rsyslog/rsyslog.conf.logserver 34 | - template: jinja 35 | graylog_uri: {{ graylog_uri }} 36 | {% else %} 37 | - source: salt://rsyslog/rsyslog.conf 38 | - template: jinja 39 | logserver: {{ logserver }} 40 | {% endif %} 41 | 42 | # 43 | # Install filter rules everywhere so we have the same log layout everywhere 44 | # and avoid logging stuff (kernel log, dhcpd, ...) multiple times (daemon.log, 45 | # message, syslog) on every node. 46 | # 47 | /etc/rsyslog.d/ffho.conf: 48 | file.managed: 49 | - source: salt://rsyslog/ffho.conf 50 | - watch_in: 51 | - service: rsyslog 52 | - require: 53 | - file: /etc/rsyslog.d/ffho 54 | 55 | /etc/rsyslog.d/ffho: 56 | file.recurse: 57 | - source: salt://rsyslog/ffho 58 | - file_mode: 644 59 | - dir_mode: 755 60 | - user: root 61 | - group: root 62 | - clean: true 63 | - watch_in: 64 | - service: rsyslog 65 | 66 | /etc/logrotate.d/ffho: 67 | file.managed: 68 | - source: salt://rsyslog/ffho.logrotate 69 | 70 | 71 | {% if 'logserver' in roles %} 72 | /etc/rsyslog.d/zz-debug.conf: 73 | file.managed: 74 | - source: salt://rsyslog/zz-debug.conf 75 | - watch_in: 76 | - service: rsyslog 77 | {% endif %} 78 | -------------------------------------------------------------------------------- /slapd/slapd.default: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/default/slapd (Salt managed) 3 | # 4 | 5 | # Default location of the slapd.conf file or slapd.d cn=config directory. If 6 | # empty, use the compiled-in default (/etc/ldap/slapd.d with a fallback to 7 | # /etc/ldap/slapd.conf). 8 | SLAPD_CONF= 9 | 10 | # System account to run the slapd server under. If empty the server 11 | # will run as root. 12 | SLAPD_USER="openldap" 13 | 14 | # System group to run the slapd server under. If empty the server will 15 | # run in the primary group of its user. 16 | SLAPD_GROUP="openldap" 17 | 18 | # Path to the pid file of the slapd server. If not set the init.d script 19 | # will try to figure it out from $SLAPD_CONF (/etc/ldap/slapd.conf by 20 | # default) 21 | SLAPD_PIDFILE= 22 | 23 | # slapd normally serves ldap only on all TCP-ports 389. slapd can also 24 | # service requests on TCP-port 636 (ldaps) and requests via unix 25 | # sockets. 26 | # Example usage: 27 | # SLAPD_SERVICES="ldap://127.0.0.1:389/ ldaps:/// ldapi:///" 28 | #SLAPD_SERVICES="ldap:/// ldapi:///" 29 | SLAPD_SERVICES="ldap://127.0.0.1:389/ ldaps:/// ldapi:///" 30 | 31 | # If SLAPD_NO_START is set, the init script will not start or restart 32 | # slapd (but stop will still work). Uncomment this if you are 33 | # starting slapd via some other means or if you don't want slapd normally 34 | # started at boot. 35 | #SLAPD_NO_START=1 36 | 37 | # If SLAPD_SENTINEL_FILE is set to path to a file and that file exists, 38 | # the init script will not start or restart slapd (but stop will still 39 | # work). Use this for temporarily disabling startup of slapd (when doing 40 | # maintenance, for example, or through a configuration management system) 41 | # when you don't want to edit a configuration file. 42 | SLAPD_SENTINEL_FILE=/etc/ldap/noslapd 43 | 44 | # For Kerberos authentication (via SASL), slapd by default uses the system 45 | # keytab file (/etc/krb5.keytab). To use a different keytab file, 46 | # uncomment this line and change the path. 47 | #export KRB5_KTNAME=/etc/krb5.keytab 48 | 49 | # Additional options to pass to slapd 50 | SLAPD_OPTIONS="" 51 | -------------------------------------------------------------------------------- /Documentation/example-pillar/nodes/bbr-kt.sls: -------------------------------------------------------------------------------- 1 | bbr-kt.in.ffho.net: 2 | sysLocation: KT 3 | 4 | roles: 5 | - batman 6 | - router 7 | - ffrl-exit 8 | 9 | sites: 10 | - legacy 11 | - pad-cty 12 | 13 | ifaces: 14 | lo: 15 | prefixes: 16 | - 10.132.255.197/32 17 | - 2a03:2260:2342:ffff::197/128 18 | 19 | bond0: 20 | bond-slaves: "eth0 eth1" 21 | mtu: 1600 22 | 23 | vlan2200: 24 | desc: "<-> bbr-voba" 25 | vlan-raw-device: bond0 26 | prefixes: 27 | - /31 28 | - /126 29 | batman_connect_sites: legacy 30 | 31 | vlan2201: 32 | desc: "<-> bbr-upb" 33 | vlan-raw-device: bond0 34 | prefixes: 35 | - /31 36 | - /126 37 | batman_connect_sites: legacy 38 | 39 | vlan2205: 40 | desc: "<-> bbr-dl0ps" 41 | vlan-raw-device: bond0 42 | prefixes: 43 | - /31 44 | - /126 45 | 46 | vlan3007: 47 | desc: "Mgmt KT" 48 | vlan-raw-device: bond0 49 | prefixes: 50 | - /24 51 | mtu: 1500 52 | 53 | vlan4006: 54 | desc: "T-DSL" 55 | vlan-raw-device: bond0 56 | vrf: vrf_external 57 | mtu: 1500 58 | 59 | 60 | # # DUS 61 | # gre_ffrl_dus_a: 62 | # type: GRE_FFRL 63 | # endpoint: 185.66.193.0 64 | # local: <$DSL IP> 65 | # tunnel-physdev: ppp0 66 | # prefixes: 67 | # - /31 68 | # - /126 69 | # 70 | # gre_ffrl_dus_b: 71 | # [...] 72 | # 73 | # # FRA 74 | # gre_ffrl_fra_a: 75 | # 76 | # gre_ffrl_fra_b: 77 | # 78 | # # BER 79 | # gre_ffrl_ber_a: 80 | # 81 | # gre_ffrl_ber_b: 82 | 83 | # NAT IP 84 | nat: 85 | link-type: dummy 86 | prefixes: 87 | - 185.66.x.y/32 88 | 89 | alfred: 90 | location_lat: '51.726572935605475' 91 | location_lon: '8.798632621765135' 92 | 93 | {% if grains['id'] == 'bbr-kt.in.ffho.net' %} 94 | pppoe: 95 | user: "<081547112342>#0001@$ISP.de" 96 | pass: "<1234567890>" 97 | {% endif %} 98 | --------------------------------------------------------------------------------