├── .gitignore ├── docker ├── unifi │ ├── env.unifi │ └── docker-compose.yml ├── nextcloud │ ├── web │ │ ├── Dockerfile │ │ └── nginx.conf │ ├── env.nextcloud │ ├── README.md │ └── docker-compose.yml ├── tig │ ├── env.grafana │ └── docker-compose.yml └── portainer │ └── docker-compose.yml ├── etc ├── systemd │ └── system │ │ ├── transmission-daemon.service.d │ │ └── local.conf │ │ ├── mongod.service │ │ ├── certbot-renew-all.timer │ │ ├── glances-web.service │ │ ├── certbot-renew-all.service │ │ ├── glances-server.service │ │ └── nextcloud-backup.service ├── telegraf │ ├── telegraf.d │ │ ├── inputs.filecount.conf │ │ ├── inputs.docker_log.conf │ │ ├── inputs.exec.conf │ │ ├── inputs.disk.conf │ │ ├── inputs.cpu.conf │ │ ├── inputs.systemd_units.conf │ │ ├── inputs.no_configuration.conf │ │ ├── inputs.tail.conf │ │ ├── inputs.diskio.conf │ │ ├── inputs.docker.conf │ │ ├── outputs.influxdb.conf │ │ └── inputs.mysql.conf │ ├── scripts │ │ └── disk-usage.sh │ ├── README.md │ ├── telegraf_env.sample.sh │ └── telegraf.conf ├── motd ├── ssh │ └── sshd_config ├── nginx │ └── sites-available │ │ └── reverse.conf └── cups │ └── cupsd.conf ├── packages.txt ├── scripts ├── pip-upgrade-all.sh ├── utils │ ├── add-to-cron.sh │ ├── remove-from-cron.sh │ └── add-alias.sh ├── apt-install-from-file.sh ├── lanscan.sh ├── provisions │ ├── install-zeroconf-mdns.sh │ ├── install-hplip.sh │ ├── setup-initial.sh │ ├── create-letsencrypt-cert-apt.sh │ ├── create-letsencrypt-cert-apt-nginx.sh │ ├── install-tick-stack.sh │ ├── install-nginx-with-ssl.sh │ ├── create-letsencrypt-cert-from-source.sh │ ├── dynamic-dns-duck-dns.sh │ ├── enable-unattended-upgrades.sh │ └── nextcloud-version-manager.sh ├── create-self-signed-ssl-cert.sh └── backup_rsync_compress.sh ├── .vscode └── settings.json ├── packages ├── datadog │ ├── template.service │ └── install.sh ├── sickchill │ ├── template.service │ └── install.sh ├── homebridge │ ├── homebridge.service │ └── install.sh ├── grafana │ └── install.sh ├── plex │ └── install.sh ├── influxdb │ └── install-influxdb.sh ├── telegraf │ └── install.sh ├── node │ └── install_arm.sh ├── samba │ └── install.sh ├── miflora │ └── install.sh ├── docker │ ├── install_docker_compose.sh │ └── install_docker.sh └── motioneye │ └── install_arm.sh ├── .screenrc ├── .github └── workflows │ └── main.yml ├── .bash_prompt ├── install.template ├── .exports ├── .functions ├── .aliases ├── .profile ├── .inputrc └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | debug.sh 3 | -------------------------------------------------------------------------------- /docker/unifi/env.unifi: -------------------------------------------------------------------------------- 1 | PGID=1000 2 | PUID=1000 3 | TZ=Europe/Brussels 4 | -------------------------------------------------------------------------------- /etc/systemd/system/transmission-daemon.service.d/local.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | User=pi 3 | Group=pi 4 | -------------------------------------------------------------------------------- /docker/nextcloud/web/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:alpine 2 | 3 | COPY nginx.conf /etc/nginx/nginx.conf 4 | -------------------------------------------------------------------------------- /packages.txt: -------------------------------------------------------------------------------- 1 | nmap 2 | python-devel 3 | python-pip 4 | samba 5 | screen 6 | vim 7 | unattended-upgrades 8 | -------------------------------------------------------------------------------- /docker/nextcloud/env.nextcloud: -------------------------------------------------------------------------------- 1 | # Postgres 2 | POSTGRES_DB=nextcloud 3 | 4 | # Nextcloud 5 | POSTGRES_HOST=db 6 | NEXTCLOUD_TRUSTED_DOMAINS=localhost 7 | -------------------------------------------------------------------------------- /docker/tig/env.grafana: -------------------------------------------------------------------------------- 1 | GF_INSTALL_PLUGINS=grafana-clock-panel,grafana-simple-json-datasource,jdbranham-diagram-panel,snuids-trafficlights-panel,raintank-worldping-app 2 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.filecount.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/filecount 2 | [[inputs.filecount]] 3 | directories = [] 4 | size = "1MB" 5 | -------------------------------------------------------------------------------- /scripts/pip-upgrade-all.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # ▲ Updates pip and any outdated package. 4 | 5 | pip install --upgrade pip 6 | pip list --outdated | cut -d' ' -f1 | xargs pip install --upgrade -------------------------------------------------------------------------------- /scripts/utils/add-to-cron.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Adds a new cronjob to the user's crontab. 4 | 5 | # @arg: Cronjob to add to the crontab 6 | (crontab -l 2>/dev/null; echo "$1") | crontab - 7 | -------------------------------------------------------------------------------- /etc/systemd/system/mongod.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=MongoDB Daemon 3 | 4 | [Service] 5 | ExecStart=/usr/bin/mongod 6 | ExecStop=killall /usr/bin/mongod 7 | 8 | [Install] 9 | WantedBy=multi-user.target 10 | -------------------------------------------------------------------------------- /etc/telegraf/scripts/disk-usage.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "[" 4 | du -ks "$@" | awk '{if (NR!=1) {printf ",\n"};printf " { \"directory_size_kilobytes\": "$1", \"path\": \""$2"\" }";}' 5 | echo 6 | echo "]" 7 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.docker_log.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker_log 2 | [[inputs.docker_log]] 3 | endpoint = "unix:///var/run/docker.sock" 4 | container_name_include = [] 5 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "**/etc/ssh/**": "ssh_config", 4 | "**/etc/nginx/**": "nginx", 5 | "**/etc/telegraf/**/*.conf": "toml", 6 | "**/etc/systemd/system/**": "systemd-unit-file", 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /etc/systemd/system/certbot-renew-all.timer: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Run certbot-renew-all.service twice a day on a random minute within the hour 3 | 4 | [Timer] 5 | OnCalendar=monthly 6 | Persistent=true 7 | 8 | [Install] 9 | WantedBy=multi-user.target 10 | -------------------------------------------------------------------------------- /packages/datadog/template.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Datadog Agent 3 | 4 | [Service] 5 | ExecStart=/home//.datadog-agent/bin/agent start 6 | ExecStop=/home//.datadog-agent/bin/agent stop 7 | 8 | [Install] 9 | WantedBy=multi-user.target 10 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.exec.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/exec 2 | [[inputs.exec]] 3 | commands = [] 4 | timeout = "5s" 5 | name_override = "" 6 | name_suffix = "" 7 | data_format = "json" 8 | tag_keys = [] 9 | -------------------------------------------------------------------------------- /etc/systemd/system/glances-web.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Glances Web Server 3 | 4 | [Service] 5 | # Starts a Glances webserver bound on port 61208 6 | ExecStart=/usr/local/bin/glances -w 7 | Restart=on-abort 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /etc/telegraf/README.md: -------------------------------------------------------------------------------- 1 | # Loading env vars 2 | 3 | The env file will provide InfluxDB credentials and needs to be copied and sourced. 4 | 5 | ```shell 6 | $ sudo cp telegraf_env.sample.sh /etc/telegraf/env.sh 7 | $ echo "source /etc/telegraf/env.sh" >> ~/.bash_profile 8 | ``` 9 | -------------------------------------------------------------------------------- /scripts/apt-install-from-file.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # ▲ Installs multiple packages from a txt file 4 | # much like requirements.txt & Gemfile 5 | 6 | # @arg: Text file with listed packages. 7 | # For example: packages.txt 8 | xargs -a "$1" sudo apt install 9 | -------------------------------------------------------------------------------- /scripts/utils/remove-from-cron.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Removes a given string/pattern from the user's crontab. 4 | 5 | # @arg: Cronjob to remove from the crontab, 6 | # specifically a pattern match to search and delete 7 | crontab -l | grep -v "$1" | crontab - 8 | -------------------------------------------------------------------------------- /scripts/lanscan.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Scans connected hosts on a given subnet. 4 | 5 | if [[ -z $1 ]]; then 6 | echo "No domain given. Specify gateway address to scan from: e.g 192.168.1.1/24" 7 | fi 8 | 9 | nmap "$1" -n -sP | grep report | awk '{print $5}' 10 | -------------------------------------------------------------------------------- /docker/nextcloud/README.md: -------------------------------------------------------------------------------- 1 | # Setup database connection 2 | 3 | ```shell 4 | $ docker exec -it nextcloud_db_1 bash 5 | 6 | $ bash-4.4# su postgres 7 | > createuser -P nextcloud 8 | > Enter password for new role: 9 | > Enter it again: 10 | > createdb -O nextcloud nextcloud 11 | ``` 12 | -------------------------------------------------------------------------------- /etc/systemd/system/certbot-renew-all.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Renew all certs generated by letsencrypts certbot 3 | 4 | [Service] 5 | Type=oneshot 6 | ExecStartPre=/bin/systemctl stop nginx 7 | ExecStart=/home/pi/dev/certbot/certbot-auto renew --quiet 8 | ExecStartPost=/bin/systemctl start nginx 9 | -------------------------------------------------------------------------------- /etc/systemd/system/glances-server.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Glances Server 3 | 4 | [Service] 5 | # Starts a headless Glances server. 6 | # Connect with `glances -c ` 7 | ExecStart=/usr/local/bin/glances -s 8 | Restart=on-abort 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /.screenrc: -------------------------------------------------------------------------------- 1 | # Use the same shell as login shell (for .bash_profile) 2 | shell -/bin/bash 3 | 4 | # Disable the startup message 5 | startup_message off 6 | 7 | # Set a large scrollback buffer 8 | defscrollback 32000 9 | 10 | # Always start `screen` with UTF-8 enabled (`screen -U`) 11 | defutf8 on 12 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf_env.sample.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | export INFLUXDB_HOST="" 4 | export INFLUXDB_DB="telegraf" 5 | export INFLUXDB_USER="" 6 | export INFLUXDB_PASSWORD="" 7 | 8 | export MYSQL_HOST="" 9 | export MYSQL_PORT="3306" 10 | export MYSQL_USER="" 11 | export MYSQL_PASSWORD="" 12 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: CI 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | 9 | jobs: 10 | validate_shell: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v1 14 | - name: shellcheck 15 | uses: ludeeus/action-shellcheck@0.1.0 16 | -------------------------------------------------------------------------------- /.bash_prompt: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function _update_ps1() { 4 | PS1="$($HOME/.powerline-shell/powerline-shell.py $? 2> /dev/null)\n \[${yellow}\]→ " 5 | } 6 | 7 | if [ "$TERM" != "linux" ]; then 8 | PROMPT_COMMAND="_update_ps1; $PROMPT_COMMAND" 9 | fi 10 | 11 | PS2="\[${yellow}\]→ \[${reset}\]"; 12 | export PS2 13 | -------------------------------------------------------------------------------- /packages/sickchill/template.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=SickChill Daemon 3 | 4 | [Service] 5 | User= 6 | Group= 7 | Type=forking 8 | GuessMainPID=no 9 | ExecStart=/usr/bin/python /home//.sickChill/SickBeard.py -q --daemon --nolaunch --datadir=/home//.sickChill 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /packages/homebridge/homebridge.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Node.js HomeKit Server 3 | After=syslog.target network-online.target 4 | 5 | [Service] 6 | Type=simple 7 | User=homebridge 8 | ExecStart=/usr/bin/homebridge -U /var/homebridge 9 | Restart=on-failure 10 | RestartSec=10 11 | KillMode=process 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.disk.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/disk 2 | [[inputs.disk]] 3 | ## By default stats will be gathered for all mount points. 4 | ## Set mount_points will restrict the stats to only the specified mount points. 5 | # mount_points = ["/"] 6 | ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"] 7 | -------------------------------------------------------------------------------- /docker/portainer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | portainer: 5 | image: portainer/portainer 6 | container_name: portainer 7 | command: -H unix:///var/run/docker.sock 8 | ports: 9 | - 9000:9000 10 | volumes: 11 | - /var/run/docker.sock:/var/run/docker.sock 12 | - data:/data 13 | restart: always 14 | 15 | volumes: 16 | data: 17 | -------------------------------------------------------------------------------- /scripts/utils/add-alias.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Adds a new alias to .aliases. 4 | 5 | # @arg1: Alias name 6 | # @arg2: Aliased command 7 | if [ -z "$1" ] && [ -z "$2" ]; then 8 | echo "[WARN]: Incorrect params specified" 9 | echo "Usage: ./add-alias.sh rel \"exec \${SHELL} -l\"" 10 | echo "Output: alias rel=\"exec \${SHELL} -l\"" 11 | exit 1 12 | fi 13 | 14 | echo "alias $1=\"$2\"" >> .aliases && rel 15 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.cpu.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/cpu 2 | [[inputs.cpu]] 3 | ## Whether to report per-cpu stats or not 4 | percpu = true 5 | ## Whether to report total system cpu stats or not 6 | totalcpu = true 7 | ## If true, collect raw CPU time metrics. 8 | collect_cpu_time = false 9 | ## If true, compute and report the sum of all non-idle CPU states. 10 | report_active = false 11 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.systemd_units.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/systemd_units 2 | [[inputs.systemd_units]] 3 | ## Set timeout for systemctl execution 4 | # timeout = "1s" 5 | 6 | ## Filter for a specific unit type, default is "service", other possible 7 | ## values are "socket", "target", "device", "mount", "automount", "swap", 8 | ## "timer", "path", "slice" and "scope ": 9 | # unittype = "service" 10 | -------------------------------------------------------------------------------- /scripts/provisions/install-zeroconf-mdns.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs ZeroConf (mDNS, Bonjour, Avahi) 5 | 6 | # Check if sudo 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt]" 11 | echo "" 12 | exit 13 | fi 14 | 15 | function main() { 16 | apt install avahi-daemon avahi-discover libnss-mdns && \ 17 | avahi-discover 18 | } 19 | main 20 | -------------------------------------------------------------------------------- /install.template: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs : https://... 5 | 6 | # Check if sudo 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, systemd]" 11 | echo "" 12 | exit 13 | fi 14 | 15 | function fn() { 16 | 17 | } 18 | 19 | function main() { 20 | fn && \ 21 | echo "" && \ 22 | echo " was successfully installed and automatically started." 23 | } 24 | 25 | "${@:-main}" 26 | -------------------------------------------------------------------------------- /docker/unifi/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | unifi: 5 | image: linuxserver/unifi-controller 6 | container_name: unifi-controller 7 | env_file: env.unifi 8 | environment: 9 | - MEM_LIMIT=1024M 10 | volumes: 11 | - ./data:/config 12 | ports: 13 | - 3478:3478/udp 14 | - 10001:10001/udp 15 | - 8080:8080 16 | - 8081:8081 17 | - 8443:8443 18 | - 8843:8843 19 | - 8880:8880 20 | - 9876:6789 21 | restart: unless-stopped 22 | -------------------------------------------------------------------------------- /etc/systemd/system/nextcloud-backup.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Backup Nextcloud config dir and mysql db 3 | 4 | [Service] 5 | Type=oneshot 6 | 7 | # _SERVER and _DATA_DIR envs are preset in a home directory dotfile 8 | ExecStartPre=/bin/systemctl stop apache2 9 | ExecStart=/usr/bin/rsync -Aax $NEXTCLOUD_SERVER/config $NEXTCLOUD_DATA_DIR/thibmaek/files/Backups/self/owncloud/ ; \ 10 | /usr/bin/mysqldump --lock-tables -h [server] -u [username] -p [password] [db_name] > nextcloud-dbbackup_`date +"%Y%m%d"`.bak 11 | ExecStartPost=/bin/systemctl start apache2 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /docker/tig/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | influxdb: 5 | image: influxdb:latest 6 | container_name: influxdb 7 | ports: 8 | - 8086:8086 9 | volumes: 10 | - /opt/appdata/influxdb:/var/lib/influxdb 11 | restart: always 12 | 13 | grafana: 14 | image: grafana/grafana:latest 15 | container_name: grafana 16 | env_file: env.grafana 17 | ports: 18 | - 3000:3000 19 | volumes: 20 | - ./grafana_data:/var/lib/grafana 21 | - ./config.ini:/usr/share/grafana/conf/defaults.ini 22 | depends_on: 23 | - influxdb 24 | restart: always 25 | -------------------------------------------------------------------------------- /.exports: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # => Set Vim as default editor 4 | export EDITOR=vim 5 | 6 | # => Systemd Services 7 | export SYSTEMD_LIB="/lib/systemd/system" 8 | export SYSTEMD_ETC="/etc/systemd/system" 9 | 10 | # => Web Directories 11 | export NGINX_HOME="/etc/nginx" 12 | export APACHE_HOME="/etc/apache2" 13 | export MYSQL_HOME="/etc/mysql" 14 | export WEBROOT="/var/www" 15 | export CERTBOT_HOME="/etc/letsencrypt/live" 16 | 17 | # => Log directories 18 | export LOGS_DIR="/var/log/" 19 | export NGINX_LOGS="/var/log/nginx" 20 | 21 | # Expose a variable to get CPU architecture 22 | ARCH=$(dpkg --print-architecture) 23 | export ARCH 24 | -------------------------------------------------------------------------------- /scripts/create-self-signed-ssl-cert.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Creates a self signed SSL certificate, key and D-H Params file. 4 | 5 | # -e : generate a certificate with a certain expiriation in days (default: 9999) 6 | EXPIRY=9999 7 | 8 | while getopts "h?e:" opt; do 9 | case $opt in 10 | e) EXPIRY=$OPTARG 11 | ;; 12 | h|\?) 13 | echo "Syntax: -e to specify expiration for certificate" >&2 14 | exit 1 15 | ;; 16 | esac 17 | done 18 | 19 | openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days "$EXPIRY" 20 | openssl rsa -in key.pem -out key.pem 21 | openssl dhparam -out dhparams.pem 2048 22 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.no_configuration.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/kernel 2 | [[inputs.kernel]] 3 | 4 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mem 5 | [[inputs.mem]] 6 | 7 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/processes 8 | [[inputs.processes]] 9 | 10 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/swap 11 | [[inputs.swap]] 12 | 13 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/system 14 | [[inputs.system]] 15 | 16 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/temp 17 | [[inputs.temp]] 18 | -------------------------------------------------------------------------------- /etc/motd: -------------------------------------------------------------------------------- 1 | 2 | 3 |  ___      4 | /\_ \  __  ___  __ __  __ _  5 | \//\ \ /\_\ /' _ `\ /\ \/\ \ /\ \/'\  6 |  \_\ \_ \/\ \ /\ \/\ \ \ \ \_\ \\/> > "$tmpfile" 13 | cat "$tmpfile" 14 | rm -f "$tmpfile" 15 | } 16 | 17 | # Display filesize for files & folders 18 | function fs() { 19 | if du -b /dev/null > /dev/null 2>&1; then 20 | local arg=-sbh; 21 | else 22 | local arg=-sh; 23 | fi 24 | if [[ -n "$@" ]]; then 25 | du $arg -- "$@"; 26 | else 27 | du $arg .[^.]* *; 28 | fi; 29 | } 30 | 31 | ## SYSTEMD 32 | function sysd() { 33 | sudo systemctl "$@" 34 | } 35 | 36 | function sysd-logs() { 37 | if [ -z "$1" ]; then 38 | echo "" 39 | echo "Usage: sysd-logs " 40 | echo " e.g: sysd-logs telegraf" 41 | echo "" 42 | return 43 | fi 44 | 45 | sudo journalctl -u "$1" -f 46 | } 47 | -------------------------------------------------------------------------------- /packages/telegraf/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs Telegraf: https://www.influxdata.com/time-series-platform/telegraf/ 5 | 6 | # Check if sudo 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, systemd]" 11 | echo "" 12 | exit 13 | fi 14 | 15 | function install_packages() { 16 | curl -sL https://repos.influxdata.com/influxdb.key | apt-key add - && \ 17 | echo "deb https://repos.influxdata.com/debian stretch stable" | \ 18 | tee /etc/apt/sources.list.d/influxdb.list 19 | 20 | apt update && \ 21 | apt install telegraf 22 | 23 | systemctl enable telegraf && \ 24 | systemctl start telegraf 25 | } 26 | 27 | # TODO: install scripts 28 | 29 | function main() { 30 | apt update 31 | 32 | echo "Installing required packages for apt usage over HTTPS..." 33 | apt install apt-transport-https 34 | 35 | install_packages && \ 36 | echo "" && \ 37 | echo "Telegraf was successfully installed and automatically started." 38 | } 39 | 40 | "${@:-main}" 41 | -------------------------------------------------------------------------------- /.aliases: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Navigation 4 | alias ..="cd .." 5 | alias ...="cd ../.." 6 | alias ....="cd ../../.." 7 | alias rmrf="sudo rm -rf" 8 | alias temp="mkd temp" 9 | alias pip="sudo pip" 10 | 11 | # `ls` aliases 12 | alias l="ls -lF --color" 13 | alias lsa="ls -AF" 14 | alias lsd="ls -lF --color | grep --color=never '^d'" 15 | alias lsf="ls -lF --color | grep -v '^d'" 16 | 17 | # apt (non-sudo & shorthands) 18 | alias apt="sudo apt" 19 | alias apt-check-update="apt update && apt list --upgradable -a" 20 | alias apt-upgrade="apt update && apt list --upgradable -a && apt upgrade -y" 21 | 22 | # System Level Commands 23 | alias halts="sudo systemctl shutdown" 24 | alias reboots="sudo systemctl reboot" 25 | 26 | # Get the CPU temperature in C° 27 | alias get_cpu_temp="vcgencmd measure_temp" 28 | 29 | # Lookup an installed package 30 | alias aptls="dpkg --get-selections | grep" 31 | 32 | # Get file permissions in octal format 33 | alias octal="stat -c \"%a %n\"" 34 | 35 | # Reload the shell (i.e. invoke it as a login shell) 36 | alias reload="exec \$SHELL -l" 37 | alias rel="reload" 38 | -------------------------------------------------------------------------------- /packages/node/install_arm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # TODO: url 5 | # ▲ Installs Node.js for ARM architectures: https://... 6 | 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, bash, npm]" 11 | echo "" 12 | exit 1 13 | fi 14 | 15 | function install_repo() { 16 | # TODO: use official node mirror? 17 | echo "Installing node@$1 from deb.nodesource.com" 18 | curl -sL "https://deb.nodesource.com/setup_$1.x" | bash - && \ 19 | apt update && \ 20 | apt install nodejs 21 | 22 | echo "Updating to latest npm" 23 | npm install -g npm 24 | } 25 | 26 | function main() { 27 | local nodeVersion 28 | 29 | if [ -z "$1" ]; then 30 | nodeVersion="10" 31 | else 32 | nodeVersion="$1" 33 | fi 34 | 35 | # TODO: check cpu architecture 36 | install_repo "$nodeVersion" && \ 37 | echo "" && \ 38 | echo "Node was successfully installed." && \ 39 | echo " node version: $(node -v)" && \ 40 | echo "Running versions: $(npm -v)" 41 | 42 | unset nodeVersion 43 | } 44 | 45 | "${@:-main}" 46 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.tail.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/tail 2 | [[inputs.tail]] 3 | ## files to tail. 4 | ## These accept standard unix glob matching rules, but with the addition of 5 | ## ** as a "super asterisk". ie: 6 | ## "/var/log/**.log" -> recursively find all .log files in /var/log 7 | ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log 8 | ## "/var/log/apache.log" -> just tail the apache log file 9 | ## 10 | ## See https://github.com/gobwas/glob for more examples 11 | ## 12 | files = ["/var/log/apt/history.log", "/var/log/unattended-upgrades/unattended-upgrades-dpkg.log"] 13 | ## Read file from beginning. 14 | from_beginning = false 15 | ## Whether file is a named pipe 16 | pipe = false 17 | 18 | ## Method used to watch for file updates. Can be either "inotify" or "poll". 19 | # watch_method = "inotify" 20 | 21 | ## Data format to consume. 22 | ## Each data format has its own unique set of configuration options, read 23 | ## more about them here: 24 | ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md 25 | data_format = "influx" 26 | -------------------------------------------------------------------------------- /scripts/provisions/create-letsencrypt-cert-apt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Creates a new LetsEncrypt certificate with certbot 4 | # and sets up auto renewal 5 | 6 | # -d : The FQDN to create a certificate for 7 | # -e : Email address to sign the certificate to. 8 | DOMAIN= 9 | EMAIL= 10 | 11 | while getopts "h?d:e:" opt; do 12 | case $opt in 13 | d) DOMAIN=$OPTARG 14 | ;; 15 | e) EMAIL=$OPTARG 16 | ;; 17 | h|\?) 18 | echo "Syntax: ./create-letsencrypt-cert-apt.sh -d -e " >&2 19 | exit 1 20 | ;; 21 | esac 22 | done 23 | 24 | if [[ "$EUID" -ne 0 ]]; then 25 | echo "[WARN]: Please run this script as root" 26 | exit 27 | fi 28 | 29 | # Install certbot from apt (requires ppa ppa:certbot/certbot on Ubuntu, Debian >=9) 30 | apt install certbot 31 | 32 | # Generate a certificate at /etc/letsencrypt/live 33 | certbot certonly --standalone --preferred-challenges http-01 --email "$EMAIL" -d "$DOMAIN" 34 | 35 | # Add auto-renewal script to the user's crontab if certs are generated 36 | if [[ -d /etc/letsencrypt/live ]]; then 37 | (crontab -l 2>/dev/null; echo "40 11,23 * * * certbot renew --quiet --no-self-upgrade) | crontab -") 38 | fi 39 | -------------------------------------------------------------------------------- /scripts/provisions/create-letsencrypt-cert-apt-nginx.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Creates a new LetsEncrypt certificate with certbot 4 | # and sets up auto renewal 5 | 6 | # -d : The FQDN to create a certificate for 7 | # -e : Email address to sign the certificate to. 8 | DOMAIN= 9 | EMAIL= 10 | 11 | while getopts "h?d:e:" opt; do 12 | case $opt in 13 | d) DOMAIN=$OPTARG 14 | ;; 15 | e) EMAIL=$OPTARG 16 | ;; 17 | h|\?) 18 | echo "Syntax: ./create-letsencrypt-cert-apt-nginx.sh -d -e " >&2 19 | exit 1 20 | ;; 21 | esac 22 | done 23 | 24 | if [[ "$EUID" -ne 0 ]]; then 25 | echo "[WARN]: Please run this script as root" 26 | exit 27 | fi 28 | 29 | # Install certbot from apt (requires ppa ppa:certbot/certbot on Ubuntu, Debian >=9) 30 | apt install certbot python-certbot-nginx 31 | 32 | # Generate a certificate at /etc/letsencrypt/live and set up for given nginx deomain 33 | certbot --nginx --email "$EMAIL" -d "$DOMAIN" 34 | 35 | # Add auto-renewal script to the user's crontab if certs are generated 36 | if [[ -d /etc/letsencrypt/live ]]; then 37 | (crontab -l 2>/dev/null; echo "40 11,23 * * * certbot renew --quiet --no-self-upgrade) | crontab -") 38 | fi 39 | -------------------------------------------------------------------------------- /scripts/provisions/install-tick-stack.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs TICK stack: Telegraf, InfluxDB, Chronograf, Kapacitor 5 | 6 | # Check if sudo 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, systemd]" 11 | echo "" 12 | exit 13 | fi 14 | 15 | function install_packages() { 16 | local tickstack=(telegraf influxdb chronograf kapacitor) 17 | curl -sL https://repos.influxdata.com/influxdb.key | apt-key add - 18 | echo "deb https://repos.influxdata.com/debian stretch stable" | \ 19 | tee /etc/apt/sources.list.d/influxdb.list 20 | 21 | apt update 22 | 23 | for component in "${tickstack[@]}"; do 24 | apt install "$component" 25 | systemctl enable "$component" 26 | systemctl start "$component" 27 | done 28 | 29 | unset tickstack 30 | } 31 | 32 | function main() { 33 | install_packages && \ 34 | echo "" && \ 35 | echo "TICK stack was successfully installed and automatically started." && \ 36 | echo " - InfluxDB: Dont't forget to create a database and a user. HTTP API is available at http://$HOSTNAME:8086" && \ 37 | echo " - Chronograf: Web interface is available at http://$HOSTNAME:8888" 38 | } 39 | 40 | "${@:-main}" 41 | -------------------------------------------------------------------------------- /scripts/provisions/install-nginx-with-ssl.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Installs nginx and sets up a boilerplate reverse proxy 4 | # with self signed OpenSSL certificates. 5 | 6 | # @protected: nginx directory 7 | _NGINX_DIR=/etc/nginx 8 | 9 | if [[ "$EUID" -ne 0 ]]; then 10 | echo "[WARN]: Please run this script as root" 11 | exit 12 | fi 13 | 14 | # Install Nginx & stop the systemd service 15 | apt install nginx -y 16 | systemctl stop nginx 17 | 18 | # Create a self-signed certificate & dhparams file 19 | # and move them to the right directory 20 | openssl req -x509 -newkey rsa:2048 -keyout $_NGINX_DIR/ssl/key.pem -out $_NGINX_DIR/ssl/cert.pem -days 9999 21 | openssl rsa -in $_NGINX_DIR/ssl/key.pem -out $_NGINX_DIR/ssl/key.pem 22 | chmod 600 $_NGINX_DIR/ssl/key.pem $_NGINX_DIR/ssl/cert.pem 23 | chown root:root $_NGINX_DIR/ssl/key.pem $_NGINX_DIR/ssl/cert.pem 24 | openssl dhparam -out $_NGINX_DIR/ssl/dhparams.pem 2048 25 | 26 | # Unlink the default symlinked site and enable 27 | # our reverse proxy file 28 | unlink $_NGINX_DIR/sites-enabled 29 | cp ../../etc/nginx/reverse $_NGINX_DIR/sites-available/reverse 30 | ln $_NGINX_DIR/sites-available/reverse $_NGINX_DIR/sites-enabled/default 31 | 32 | # Check if the nginx config is valid, then start the service 33 | nginx -t && systemctl start nginx 34 | -------------------------------------------------------------------------------- /.profile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ~/.profile: executed by the command interpreter for login shells. 3 | # This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login 4 | # exists. 5 | # see /usr/share/doc/bash/examples/startup-files for examples. 6 | # the files are located in the bash-doc package. 7 | 8 | # the default umask is set in /etc/profile; for setting the umask 9 | # for ssh logins, install and configure the libpam-umask package. 10 | #umask 022 11 | 12 | # if running bash 13 | if [ -n "$BASH_VERSION" ]; then 14 | # include .bashrc if it exists 15 | if [ -f "$HOME/.bashrc" ]; then 16 | . "$HOME/.bashrc" 17 | fi 18 | fi 19 | 20 | # set PATH so it includes user's private bin if it exists 21 | if [ -d "$HOME/bin" ] ; then 22 | PATH="$HOME/bin:$PATH" 23 | fi 24 | 25 | # Append to Bash history instead of overwriting 26 | shopt -s histappend 27 | 28 | # Autocorrect directory-typo's 29 | shopt -s cdspell 30 | 31 | # Load the shell dotfiles, and then some: 32 | for file in ~/.{aliases,bash_prompt,bash_sources,exports,functions,profile_local}; do 33 | [ -r "$file" ] && [ -f "$file" ] && source "$file" 34 | done 35 | unset file 36 | 37 | # Complete screen names 38 | complete -C "perl -e '@w=split(/ /,\$ENV{COMP_LINE},-1);\$w=pop(@w);for(qx(screen -ls)){print qq/\$1\n/ if 39 | (/^\s*\$w/&&/(\d+\.\w+)/||/\d+\.(\$w\w*)/)}'" screen 40 | -------------------------------------------------------------------------------- /.inputrc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Make Tab autocomplete regardless of filename case 4 | set completion-ignore-case on 5 | 6 | # List all matches in case multiple possible completions are possible 7 | set show-all-if-ambiguous on 8 | 9 | # Immediately add a trailing slash when autocompleting symlinks to directories 10 | set mark-symlinked-directories on 11 | 12 | # Do not autocomplete hidden files unless the pattern explicitly begins with a dot 13 | set match-hidden-files off 14 | 15 | # Show all autocomplete results at once 16 | set page-completions off 17 | 18 | # If there are more than 200 possible completions for a word, ask to show them all 19 | set completion-query-items 200 20 | 21 | # Show extra file information when completing, like `ls -F` does 22 | set visible-stats on 23 | 24 | # Be more intelligent when autocompleting by also looking at the text after the cursor. 25 | # For example, when the current line is "cd ~/src/mozil", and 26 | # the cursor is on the "z", pressing Tab will not autocomplete it to "cd && # ~/src/mozillail", but to "cd ~/src/mozilla". (This is supported by the 27 | # Readline used by Bash 4.) 28 | set skip-completed-text on 29 | 30 | # Allow UTF-8 input and output, instead of showing stuff like $'\0123\0456' 31 | set input-meta on 32 | set output-meta on 33 | set convert-meta off 34 | 35 | # Use Alt/Meta + Delete to delete the preceding word 36 | "\e[3;3~": kill-word 37 | -------------------------------------------------------------------------------- /packages/homebridge/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # TODO: url 5 | # ▲ Installs the homebridge: https://... 6 | 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, systemd, npm, chmod, useradd]" 11 | echo "" 12 | exit 1 13 | fi 14 | 15 | function check_requirements() { 16 | if command -v node > /dev/null; then 17 | echo "[WARN]: Node not installed and is required by this script." 18 | exit 1 19 | fi 20 | } 21 | 22 | function install_packages() { 23 | echo "Installing required pacakges and homebridge through npm" 24 | apt install libavahi-compat-libdnssd-dev && \ 25 | npm install --unsafe-perm -g homebridge 26 | } 27 | 28 | function setup_application() { 29 | echo "Adding a new system user for homebridge and creating its home directory" 30 | useradd --system homebridge && \ 31 | mkdir /var/homebridge && \ 32 | chmod -R 0777 /var/homebridge 33 | 34 | cp homebridge.service /etc/systemd/system/ && \ 35 | systemctl daemon-reload && \ 36 | systemctl enable homebridge && \ 37 | systemctl start homebridge 38 | } 39 | 40 | function main() { 41 | check_requirements && \ 42 | install_packages && \ 43 | setup_application && \ 44 | echo "" && \ 45 | echo "homebridge was successfully installed and automatically started." 46 | } 47 | 48 | "${@:-main}" 49 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.diskio.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/diskio 2 | [[inputs.diskio]] 3 | ## By default, telegraf will gather stats for all devices including 4 | ## disk partitions. 5 | ## Setting devices will restrict the stats to the specified devices. 6 | # devices = ["sda", "sdb", "vd*"] 7 | ## Uncomment the following line if you need disk serial numbers. 8 | # skip_serial_number = false 9 | # 10 | ## On systems which support it, device metadata can be added in the form of 11 | ## tags. 12 | ## Currently only Linux is supported via udev properties. You can view 13 | ## available properties for a device by running: 14 | ## 'udevadm info -q property -n /dev/sda' 15 | # device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"] 16 | # 17 | ## Using the same metadata source as device_tags, you can also customize the 18 | ## name of the device via templates. 19 | ## The 'name_templates' parameter is a list of templates to try and apply to 20 | ## the device. The template may contain variables in the form of '$PROPERTY' or 21 | ## '${PROPERTY}'. The first template which does not contain any variables not 22 | ## present for the device is used as the device name tag. 23 | ## The typical use case is for LVM volumes, to get the VG/LV name instead of 24 | ## the near-meaningless DM-0 name. 25 | # name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"] 26 | -------------------------------------------------------------------------------- /packages/samba/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs Samba/CIFS: https://www.samba.org/ 5 | 6 | if [[ "$EUID" -ne 0 ]]; then 7 | echo "" 8 | echo "[WARN]: Please run this script as root" 9 | echo " Required permissions: [apt, systemd, cp, smbpasswd]" 10 | echo " Possibly required permissions: [ufw, usermod]" 11 | echo "" 12 | exit 1 13 | fi 14 | 15 | function install_samba() { 16 | apt update && \ 17 | apt install samba && \ 18 | systemctl stop smbd 19 | } 20 | 21 | function setup_samba_base() { 22 | cp /etc/samba/smbd.conf /etc/samba/smbd.orig 23 | smbpasswd -a "$USER" 24 | if ! groups "$USER" | grep -qw "users"; then 25 | usermod -aG users "$USER" 26 | fi 27 | grep -v -E "^#|^;" /etc/samba/smb.orig | grep . > /etc/samba/smb.conf 28 | } 29 | 30 | function allow_ufw_rule() { 31 | if command -v ufw; then 32 | if ufw status | grep -qw active; then 33 | ufw allow samba && ufw reload 34 | fi 35 | fi 36 | } 37 | 38 | function main() { 39 | install_samba && \ 40 | setup_samba_base && \ 41 | allow_ufw_rule && \ 42 | echo "" && \ 43 | echo "Samba was successfully installed and automatically started." && \ 44 | echo "You can connect to the shares defined in /etc/samba/smb.conf using:" && \ 45 | echo " smb://$HOSTNAME" && \ 46 | echo "" && \ 47 | echo "Define additional shares to /etc/samba/smb.conf" 48 | } 49 | 50 | "${@:-main}" 51 | -------------------------------------------------------------------------------- /packages/datadog/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # TODO: url 5 | # ▲ Installs the Datadog agent: https://... 6 | 7 | # Check if sudo 8 | if [[ "$EUID" -ne 0 ]]; then 9 | echo "" 10 | echo "[WARN]: Please run this script as root" 11 | echo " Required permissions: [apt, sh, cp, systemd]" 12 | echo "" 13 | exit 14 | fi 15 | 16 | function build_from_source() { 17 | echo "Installing sysstat to collect system metrics" 18 | apt install sysstat 19 | 20 | echo "Installing datadog-agent from official shell script" 21 | DD_API_KEY="$1" sh -c \ 22 | "$(curl -L https://raw.githubusercontent.com/DataDog/dd-agent/master/packaging/datadog-agent/source/setup_agent.sh)" 23 | } 24 | 25 | function start_service() { 26 | sed -e "s//$USER/" template.service > datadog-agent.service && \ 27 | cp datadog-agent.service /etc/systemd/system/ 28 | systemctl daemon-reload && \ 29 | systemctl enable datadog-agent && \ 30 | systemctl start datadog-agent 31 | } 32 | 33 | function main() { 34 | if [ -z "$1" ]; then 35 | echo "[ERROR]: No API key found!" 36 | echo "Retry and provide an API key to continue installation:" 37 | echo "e.g: ./install.sh hd53hjkdzk383nndh366hdnj1mnhda55" 38 | exit 1 39 | fi 40 | 41 | apt update 42 | 43 | build_from_source "$@" && \ 44 | start_service && \ 45 | echo "" && \ 46 | echo "Datadog agent was successfully installed and automatically started." 47 | } 48 | 49 | "${@:-main}" 50 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 🐧 Linux Confs — Collective repository for Linux OSes 2 | 3 | This repository is aimed at collecting some handy/useful/lengthy/hard scripts and configurations for Linux OSes. 4 | It's the first thing I clone to every new Raspberry Pi or Linux box and then it's basically just a pick & mix of scripts and configurations to install. No install.sh or installation method, only manual cp allowed 🙌🏻 5 | 6 | These script are grafted on Debian Stretch but might also work for Ubuntu. 7 | 8 | ## What's in the box 9 | 10 | Lots of stuff! 11 | 12 | - ~Nearly~ cross platform dotfiles for a bash env 13 | - A copy of the `/etc` folder with: 14 | - A default nginx reverse proxy conf 15 | - Systemd services for mongod, datadog, glances, certbot… 16 | - Scripts of various natures: 17 | - Provisions for LetsEncrypt, Dynamic DNS, Nginx + OpenSSL, Node for armhf archs… 18 | - Utilities for the shell like adding an alias, adding/removing a cronjob 19 | - Loose onetime scripts to generate OpenSSL certs locally, scan the local subnet, upgrade all pip modules 20 | 21 | ## Installation 22 | 23 | It's a pick & mix! You can manually copy over files with cp, rsync, drag and drop or choose to copy over the whole folder like `cp -R etc/**/* /etc/`. This is in no way a provisioning tool, then you're better off with Puppet or Ansible. Running headless or automated not recommended since these scripts might require interaction. 24 | 25 | ## Testing? 26 | 27 | Runs test.sh for shellcheck'ing the files on Travis CI or locally in dev with `./test.sh` 28 | -------------------------------------------------------------------------------- /scripts/provisions/create-letsencrypt-cert-from-source.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Creates a new LetsEncrypt certificate with certbot-auto 4 | # and sets up an automatic renewal in the crontab. 5 | 6 | # @protected: location to download certbot-auto from 7 | _CERTBOT_URL=https://dl.eff.org/certbot-auto 8 | 9 | # -d : The FQDN to create a certificate for 10 | # -e : Email address to sign the certificate to. 11 | DOMAIN= 12 | EMAIL= 13 | 14 | while getopts "h?d:e:" opt; do 15 | case $opt in 16 | d) DOMAIN=$OPTARG 17 | ;; 18 | e) EMAIL=$OPTARG 19 | ;; 20 | h|\?) 21 | echo "Syntax: ./create-letsencrypt-cert-from-source.sh -d -e " >&2 22 | exit 1 23 | ;; 24 | esac 25 | done 26 | 27 | # Create a new isolated directory only if it doesn't exist 28 | if [[ ! -d $HOME/.certbot && ! -f $HOME/.certbot/certbot-auto ]]; then 29 | mkdir -p "$HOME/.certbot" 30 | wget -P "$HOME/.certbot" "$_CERTBOT_URL" 31 | chmod a+x "$HOME/.certbot/certbot-auto" 32 | fi 33 | 34 | # Generate a certificate at /etc/letsencrypt/live 35 | "$HOME/.certbot/certbot-auto" certonly --standalone \ 36 | --preferred-challenges http-01 \ 37 | --email "$EMAIL" \ 38 | -d "$DOMAIN" 39 | 40 | # Add auto-renewal script to the user's crontab if certs are generated 41 | if [[ -d /etc/letsencrypt/live ]]; then 42 | (crontab -l 2>/dev/null; echo "40 11,23 * * * $HOME/.certbot/certbot-auto renew --quiet --no-self-upgrade >> $HOME/.certbot/certbot_renew.log") | crontab - 43 | fi 44 | -------------------------------------------------------------------------------- /scripts/provisions/dynamic-dns-duck-dns.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Sets up dynamic dns for the public ip through DuckDNS 4 | # and sets up refreshing it in the crontab. 5 | 6 | # -d : The Duck DNS domain to update 7 | # -t : Duck DNS token to authenticate with the service 8 | DOMAIN= 9 | TOKEN= 10 | 11 | function uninstall_duck_dns() { 12 | echo "Uninstalling…" 13 | rm -rf "$HOME/.duckdns" 14 | crontab -l | grep -v '/.duckdns/update.sh' | crontab - 15 | echo "Deleted $HOME/.duckdns and removed cron entry" 16 | } 17 | 18 | while getopts "h?d:t:" opt; do 19 | case $opt in 20 | d) DOMAIN=$OPTARG ;; 21 | t) TOKEN=$OPTARG ;; 22 | h|\?) 23 | echo "Syntax: ./dynamic-dns-duck-dns.sh -d -t " >&2 24 | exit 1 25 | ;; 26 | esac 27 | done 28 | 29 | # Create a new isolated directory & update script 30 | mkdir -p "$HOME/.duckdns" 31 | touch "$HOME/.duckdns/update.sh" 32 | echo "echo url=\"https://www.duckdns.org/update?domains=$DOMAIN&token=$TOKEN&ip=\" | curl -k -o $HOME/.duckdns/duck.log -K -" \ 33 | > "$HOME/.duckdns/update.sh" 34 | chmod 700 "$HOME/.duckdns/update.sh" 35 | 36 | # Add the update script to the user's crontab 37 | (crontab -l 2>/dev/null; echo "*/5 * * * * $HOME/.duckdns/update.sh >/dev/null 2>&1") | crontab - 38 | 39 | # Perform a domain & token check 40 | sh "$HOME/.duckdns/update.sh" 41 | # shellcheck disable=SC2002 42 | cat "$HOME/.duckdns/duck.log" | grep "KO" > nul && \ 43 | (echo "Domain and/or token invalid. Please check $HOME/.duckdns/update.sh" && uninstall_duck_dns && exit 1) 44 | 45 | # Restart the cron service 46 | sudo systemctl restart cron 47 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.docker.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/docker 2 | [[inputs.docker]] 3 | ## Docker Endpoint 4 | # To use TCP, set endpoint = "tcp://[ip]:[port]" 5 | # To use environment variables (ie, docker-machine), set endpoint = "ENV" 6 | endpoint = "unix:///var/run/docker.sock" 7 | 8 | ## Only collect metrics for these containers, collect all if empty 9 | # container_names = [] 10 | 11 | ## Containers to include and exclude. Globs accepted. 12 | ## Note that an empty array for both will include all containers 13 | container_name_include = [] 14 | container_name_exclude = [] 15 | 16 | ## Container states to include and exclude. Globs accepted. 17 | ## When empty only containers in the "running" state will be captured. 18 | # container_state_include = [] 19 | # container_state_exclude = [] 20 | 21 | ## Timeout for docker list, info, and stats commands 22 | timeout = "5s" 23 | 24 | ## Whether to report for each container per-device blkio (8:0, 8:1...) and 25 | ## network (eth0, eth1, ...) stats or not 26 | perdevice = true 27 | 28 | ## Whether to report for each container total blkio and network stats or not 29 | total = false 30 | 31 | ## Which environment variables should we use as a tag 32 | # tag_env = ["JAVA_HOME", "HEAP_SIZE"] 33 | 34 | ## Docker labels to include and exclude as tags. Globs accepted. 35 | ## Note that an empty array for both will include all labels as tags 36 | docker_label_include = [] 37 | docker_label_exclude = [] 38 | 39 | ## Optional TLS Config 40 | # tls_ca = "/etc/telegraf/ca.pem" 41 | # tls_cert = "/etc/telegraf/cert.pem" 42 | # tls_key = "/etc/telegraf/key.pem" 43 | ## Use TLS but skip chain & host verification 44 | # insecure_skip_verify = false 45 | -------------------------------------------------------------------------------- /packages/miflora/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs the MiFlora MQTT daemon: https://github.com/ThomDietrich/miflora-mqtt-daemon 5 | 6 | # Check if sudo 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, pip3, systemctl, cp]" 11 | echo "" 12 | exit 13 | fi 14 | 15 | REPO_CLONE_DIR="/opt/miflora-mqtt-daemon" 16 | 17 | function install_packages() { 18 | echo "Installing required packages..." 19 | apt install git && \ 20 | apt install python3 python3-pip && \ 21 | apt install bluetooth bluez 22 | } 23 | 24 | function install_repo() { 25 | local prevDir 26 | prevDir=$(pwd) 27 | 28 | echo "Cloning ThomDietrich/miflora-mqtt-daemon to $REPO_CLONE_DIR" 29 | git clone https://github.com/ThomDietrich/miflora-mqtt-daemon.git "$REPO_CLONE_DIR" 30 | cd "$REPO_CLONE_DIR" 31 | 32 | echo "Installing repo and configuring..." 33 | pip3 install -r requirements.txt 34 | cp "$REPO_CLONE_DIR"/config.{ini.dist,ini} 35 | cd "$prevDir" 36 | 37 | echo "" 38 | echo "Systemd service is not automatically installed because config needs to be finetuned first." 39 | echo "Edit $REPO_CLONE_DIR/config.ini first and then install the service using:" 40 | echo " $ ./install-miflora-mqtt-daemon.sh install_systemd_service" 41 | echo "" 42 | } 43 | 44 | function install_systemd_service() { 45 | cp "$REPO_CLONE_DIR"/template.service /etc/systemd/system/miflora-mqtt-daemon.service 46 | systemctl daemon-reload 47 | 48 | systemctl start miflora-mqtt-daemon.service 49 | systemctl status miflora-mqtt-daemon.service 50 | 51 | systemctl enable miflora-mqtt-daemon.service 52 | } 53 | 54 | function main() { 55 | install_packages 56 | install_repo 57 | } 58 | 59 | "${@:-main}" 60 | -------------------------------------------------------------------------------- /scripts/provisions/enable-unattended-upgrades.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Enables unattended upgrades (patches, security fixes etc.) 5 | # See: https://wiki.debian.org/UnattendedUpgrades 6 | 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt]" 11 | echo "" 12 | exit 1 13 | fi 14 | 15 | function install_packages() { 16 | apt install unattended-upgrades apt-listchanges 17 | } 18 | 19 | function enable_upgrades() { 20 | echo "Backing up apt configuration to /etc/apt/apt.conf.d/50unattended-upgrades.orig" 21 | cp /etc/apt/apt.conf.d/50unattended-upgrades /etc/apt/apt.conf.d/50unattended-upgrades.orig 22 | 23 | echo "Writing base configuration to to /etc/apt/apt.conf.d/50unattended-upgrades" 24 | # TODO: EOF 25 | { 26 | :; 27 | echo "Unattended-Upgrade::Origins-Pattern {" 28 | echo " \"origin=Debian,codename=\${distro_codename},label=Debian-Security\";" 29 | echo "};" 30 | echo "" 31 | echo "Unattended-Upgrade::Package-Blacklist {" 32 | echo "};" 33 | echo "" 34 | echo "Unattended-Upgrade::Mail \"root\";" 35 | } > /etc/apt/apt.conf.d/50unattended-upgrades 36 | 37 | if ! grep "APT::Periodic::Update-Package-Lists \"1\";" /etc/apt/apt.conf.d/20auto-upgrades; then 38 | echo "/etc/apt/apt.conf.d/20auto-upgrades is outdated." 39 | echo "Reconfiguring it now using dpkg-reconfigure in noninteractive mode" 40 | echo unattended-upgrades unattended-upgrades/enable_auto_updates boolean true | debconf-set-selections 41 | dpkg-reconfigure -f noninteractive unattended-upgrades 42 | fi 43 | } 44 | 45 | function main() { 46 | install_packages && \ 47 | enable_auto_updates && \ 48 | echo "Unattended upgrades have been enabled for this system." 49 | } 50 | 51 | "${@:-main}" 52 | -------------------------------------------------------------------------------- /packages/sickchill/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # TODO: url 5 | # ▲ Installs SickChill: https://... 6 | 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, systemd, pip, cp]" 11 | echo " Possibly required permissions: [dpkg]" 12 | echo "" 13 | exit 1 14 | fi 15 | 16 | function install_packages() { 17 | echo "Installing required packages" 18 | apt install install python-pip python-dev git libssl-dev libxslt1-dev libxslt1.1 \ 19 | libxml2-dev libxml2 libssl-dev libffi-dev build-essential 20 | 21 | echo "Installing pyopenssl (via pip)" 22 | pip install pyopenssl 23 | } 24 | 25 | function install_application() { 26 | # TODO: correct repo 27 | echo "Cloning official SickChill repo" 28 | git clone https://github.com/SickRage/SickRage "$1" 29 | 30 | sed -e "s//$USER/" template.service > sickchill.service && \ 31 | cp sickchill.service /etc/systemd/system/ 32 | systemctl daemon-reload && \ 33 | systemctl enable sickchill && \ 34 | systemctl start sickchill 35 | } 36 | 37 | function build_unrar_from_source() { 38 | # TODO: newer version? 39 | wget http://sourceforge.net/projects/bananapi/files/unrar_5.2.6-1_armhf.deb 40 | dpkg -i unrar_5.2.6-1_armhf.deb 41 | } 42 | 43 | function main() { 44 | echo " sudo ./$(basename "$0") build_unrar_from_source" 45 | exit 1 46 | 47 | local sickChillHome="$HOME/.sickChill" 48 | apt update 49 | 50 | install_packages && \ 51 | install_repo "$sickChillHome" && \ 52 | echo "" && \ 53 | echo "SickChill was successfully installed and automatically started." && \ 54 | echo "If you want support for RAR files you can run the following" && \ 55 | echo "to build unrar (non-free) from source:" && \ 56 | echo " sudo ./$(basename "$0") build_unrar_from_source" 57 | } 58 | 59 | "${@:-main}" 60 | -------------------------------------------------------------------------------- /packages/docker/install_docker_compose.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs docker-compose. Defaults to git but can also install from apt 5 | 6 | if [[ "$EUID" -ne 0 ]]; then 7 | echo "" 8 | echo "[WARN]: Please run this script as root" 9 | echo " Required permissions: [curl, apt, chmod]" 10 | echo "" 11 | exit 1 12 | fi 13 | 14 | # This function is only callable on the script and not part of the main() function 15 | function install_from_apt() { 16 | echo "Installing docker-compose from apt." 17 | echo "The version installed might be behind on the most recently available version" 18 | echo "from the official docker-compose git repository." 19 | apt update 20 | apt install docker-compose 21 | } 22 | 23 | function install_from_git() { 24 | # We always want latest/stable 25 | downloadURL=$(curl -s "https://api.github.com/repos/docker/compose/releases/latest" \ 26 | | grep "browser_download_url" \ 27 | | grep "$(uname -s)-$(uname -m)" \ 28 | | grep -v ".sha256" \ 29 | | cut -d '"' -f 4) 30 | 31 | echo "Installing docker-compose from the official git repository." 32 | curl -L "$downloadURL" -o /usr/local/bin/docker-compose && \ 33 | chmod +x /usr/local/bin/docker-compose 34 | 35 | echo "Installing bash completion." 36 | curl -L "https://raw.githubusercontent.com/docker/compose/$(docker-compose version --short)/contrib/completion/bash/docker-compose" \ 37 | -o /etc/bash_completion.d/docker-compose 38 | } 39 | 40 | function main() { 41 | if ! command -v docker > /dev/null; then 42 | echo "It seems like Docker is not installed." 43 | echo "This script can not be run until Docker CE is installed!" 44 | exit 1 45 | fi 46 | 47 | install_from_git && \ 48 | echo "" && \ 49 | echo "Docker Compose was successfully installed." && \ 50 | echo "Bash completion was installed to /etc/bash_completion.d/docker-compose" && \ 51 | echo " $(docker-compose --version)" 52 | } 53 | 54 | "${@:-main}" 55 | -------------------------------------------------------------------------------- /packages/motioneye/install_arm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # TODO: url 5 | # ▲ Installs motioneye for armhf architectures (Raspberry Pi): 6 | 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, systemd, mkdir, dpkg, pip, cp]" 11 | echo "" 12 | exit 1 13 | fi 14 | 15 | function install_packages() { 16 | echo "Installing motion dependencies" 17 | apt install ffmpeg libmariadbclient18 libpq5 libmicrohttpd12 18 | 19 | echo "Installing motion from the official git repository for most recent version" 20 | echo "This replaces the pre-bundled motion@4 that comes with Debian" 21 | wget https://github.com/Motion-Project/motion/releases/download/release-4.2.2/pi_stretch_motion_4.2.2-1_armhf.deb && \ 22 | dpkg -i pi_stretch_motion_4.2.2-1_armhf.deb 23 | 24 | echo "Installing motioneye dependencies" 25 | apt install python-pip python-dev libssl-dev libcurl4-openssl-dev libjpeg-dev libz-dev && \ 26 | pip install motioneye 27 | } 28 | 29 | function setup_application() { 30 | echo "Copying the default motioneye config" 31 | mkdir -p /etc/motioneye && \ 32 | cp /usr/local/share/motioneye/extra/motioneye.conf.sample /etc/motioneye/motioneye.conf &&\ 33 | mkdir -p /var/lib/motioneye 34 | 35 | echo "Copying the default motioneye systemd service" 36 | cp /usr/local/share/motioneye/extra/motioneye.systemd-unit-local /etc/systemd/system/motioneye.service && \ 37 | systemctl daemon-reload && \ 38 | systemctl enable motioneye && \ 39 | systemctl start motioneye 40 | } 41 | 42 | function main() { 43 | install_packages && \ 44 | setup_application && \ 45 | echo "" && \ 46 | echo "motioneye was succesfully installed and automatically started" && \ 47 | echo "To upgrade to future versions of motioneye run:" && \ 48 | echo " pip install motioneye --upgrade" && \ 49 | echo " systemctl restart motioneye" && \ 50 | echo "" && \ 51 | echo "motioneye service was auto started. Service running at:" && \ 52 | echo " http://$HOSTNAME:8765" 53 | } 54 | 55 | "${@:-main}" 56 | -------------------------------------------------------------------------------- /packages/docker/install_docker.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | set -e 3 | 4 | # ▲ Installs Docker CE for 64-bit Linux Debian 5 | 6 | # Check if sudo 7 | if [[ "$EUID" -ne 0 ]]; then 8 | echo "" 9 | echo "[WARN]: Please run this script as root" 10 | echo " Required permissions: [apt, apt-key, usermod]" 11 | echo "" 12 | exit 13 | fi 14 | 15 | function get_docker_repo() { 16 | echo "Getting Docker's GPG key and verifying..." 17 | curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - 18 | apt-key fingerprint 0EBFCD88 | grep -q "9DC8 5822 9FC7 DD38 854A E2D8 8D81 803C 0EBF CD88" \ 19 | && echo "Docker GPG key verified" 20 | 21 | echo "Adding Docker's 'stable' repository to apt..." 22 | add-apt-repository \ 23 | "deb [arch=$(dpkg --print-architecture)] https://download.docker.com/linux/debian \ 24 | $(lsb_release -cs) \ 25 | stable" 26 | } 27 | 28 | function install_docker_ce() { 29 | apt update 30 | apt-cache policy docker-ce 31 | echo "Installing docker-ce and docker-ce-cli..." 32 | apt install docker-ce docker-ce-cli 33 | } 34 | 35 | # This function is only callable on the script and not part of the main() function 36 | function use_overlay2_storage_driver() { 37 | if docker info | grep "Storage Driver: overlay2"; then 38 | echo "Docker is already using overlay2 as its storage driver." 39 | exit 0 40 | fi 41 | 42 | systemctl stop docker 43 | cp -au /var/lib/docker /var/lib/docker.bk 44 | # TODO: EOF 45 | { 46 | :; 47 | echo "{" 48 | echo " \"storage-driver\": \"overlay2\"" 49 | echo "}" 50 | } >> /etc/docker/daemon.json 51 | systemctl start docker 52 | docker info | grep "Storage Driver" 53 | } 54 | 55 | function uninstall() { 56 | systemctl stop docker 57 | apt purge docker-ce && rm -rf /var/lib/docker 58 | } 59 | 60 | function main() { 61 | apt update 62 | 63 | echo "Installing required packages for apt usage over HTTPS..." 64 | apt install apt-transport-https ca-certificates curl gnupg2 software-properties-common 65 | 66 | get_docker_repo && \ 67 | install_docker_ce && \ 68 | usermod -aG docker "$USER" && \ 69 | echo "" && \ 70 | echo "Docker was successfully installed and automatically started." && \ 71 | echo "Current user $USER has been added to the docker group" && \ 72 | echo "If you want to enable overlay2 support, rerun this script:" && \ 73 | echo " sudo ./$(basename "$0") use_overlay2_storage_driver" 74 | } 75 | 76 | "${@:-main}" 77 | -------------------------------------------------------------------------------- /etc/nginx/sites-available/reverse.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | listen [::]:80; # listen on ipv6 4 | server_name ; 5 | return 301 https://$server_name$request_uri; # enforce ssl 6 | } 7 | 8 | server { 9 | listen 80; 10 | listen [::]:80; # listen on ipv6 11 | server_name ; 12 | return 301 https://$server_name$request_uri; # enforce ssl 13 | } 14 | 15 | server { 16 | listen 443 ssl spdy; 17 | listen [::]:443 ssl spdy; # listen on ipv6 18 | server_name ; 19 | 20 | # Add headers to serve security related headers 21 | add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload;"; 22 | add_header X-Content-Type-Options nosniff; 23 | add_header X-Frame-Options "SAMEORIGIN"; 24 | add_header X-XSS-Protection "1; mode=block"; 25 | add_header X-Robots-Tag none; 26 | 27 | # Use self-signed openssl certs 28 | ssl_certificate /var/www/local_certs/.crt; 29 | ssl_certificate_key /var/www/local_certs/.key; 30 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 31 | ssl_prefer_server_ciphers on; 32 | ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH"; 33 | ssl_session_cache shared:SSL:10m; 34 | 35 | location /simple-reverse-proxy-block { 36 | proxy_pass http://127.0.0.1:3000; 37 | proxy_set_header Host $host; 38 | proxy_set_header X-Real-IP $remote_addr; 39 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 40 | proxy_set_header X-Forwarded-Proto $scheme; 41 | } 42 | 43 | } 44 | 45 | server { 46 | listen 443 ssl spdy; 47 | listen [::]:443 ssl spdy; # listen on ipv6 48 | server_name ; 49 | 50 | # Use certbot generated certs 51 | ssl_certificate /etc/letsencrypt/live/thibmaek.strangled.net/fullchain.pem; 52 | ssl_certificate_key /etc/letsencrypt/live/thibmaek.strangled.net/privkey.pem; 53 | 54 | # Add headers to serve security related headers 55 | add_header Strict-Transport-Security "max-age=15768000; includeSubDomains; preload;"; 56 | add_header X-Content-Type-Options nosniff; 57 | add_header X-Frame-Options "SAMEORIGIN"; 58 | add_header X-XSS-Protection "1; mode=block"; 59 | add_header X-Robots-Tag none; 60 | 61 | location /simple-reverse-proxy-block { 62 | proxy_pass http://127.0.0.1:3000; 63 | proxy_set_header Host $host; 64 | proxy_set_header X-Real-IP $remote_addr; 65 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 66 | proxy_set_header X-Forwarded-Proto $scheme; 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /scripts/provisions/nextcloud-version-manager.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # ▲ Upgrades/downgrades a NextCloud install, also 4 | # making a backup of current installation. 5 | 6 | # @private: Nextcloud installation folder, temp backup dir 7 | _NCROOT=/var/www/nextcloud 8 | _TMPDIR=$HOME/.nextcloud 9 | 10 | # @public: URL to interpolate version in 11 | URL=https://download.nextcloud.com/server/releases/$VERSION.zip 12 | 13 | # -v : Specific version to upgrade/downgrade. Default: latest 14 | # -p: Keep the temp working directory @ $_TMP_DIR. 15 | VERSION=latest 16 | PERSIST=false 17 | 18 | if [[ ! -d $_NCROOT ]]; then 19 | echo "Current nextcloud installation not found at $_NCROOT! Exiting…" 20 | exit 1; 21 | fi 22 | 23 | occ="$_NCROOT/occ" && echo "occ binary is now: $occ" 24 | _SAFEPATH=$HOME/nextcloud-$($occ config:system:get version) 25 | 26 | while getopts "h?:v:p:" opt; do 27 | case $opt in 28 | v) VERSION=nextcloud-$OPTARG ;; 29 | p) PERSIST=true ;; 30 | h|?) 31 | echo "Syntax: ./nextcloud-version-manager.sh [-v ] [-p]" >&2 32 | exit 1 33 | ;; 34 | esac 35 | done 36 | 37 | # Create a temp directory to move files around and download 38 | if [[ ! -d $_TMPDIR ]]; then 39 | mkdir -p "$_TMPDIR" 40 | fi; 41 | 42 | # Download the latest/given version 43 | wget -P "$_TMPDIR" "$URL" 44 | 45 | # Unzip/untar the download 46 | if find "$_TMPDIR" -type f -name '*.zip'; then 47 | find "$_TMPDIR" -type f -name '*.zip' -print0 | xargs --null /usr/bin/unzip -d "$_TMPDIR/" 48 | else 49 | find "$_TMPDIR" -type f -name '*.bz2' -print0 | xargs --null /bin/tar xjf 50 | fi 51 | 52 | # Put the current installation in maintenance mode 53 | # and stop apache2 from running 54 | sudo -u www-data php $occ maintenance:mode --onn 55 | sudo systemctl stop apache2 56 | 57 | # Move the current installation somewhere safe 58 | sudo mv "$_NCROOT" "$_SAFEPATH" 59 | 60 | # Copy new version directory to webroot and copy old config back 61 | sudo rsync -Aax "$_TMPDIR/nextcloud" /var/www/ 62 | sudo rsync -Aax "$_SAFEPATH/config" "$_NCROOT" 63 | 64 | # Own the directory to apache user again & restart apache service 65 | sudo chown -R www-data:www-data "$_NCROOT" 66 | sudo systemctl start apache2 67 | 68 | # Start the upgrade process and exit maintenance mode once finished 69 | $occ upgrade && $occ maintenance:mode --off 70 | 71 | if [[ $PERSIST != true ]]; then 72 | rm -rf "$_TMPDIR" 73 | fi 74 | 75 | # Make sure we don't export occ to the path any longer 76 | unset occ; 77 | echo "Unset occ binary, is now value: $(occ)" 78 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/outputs.influxdb.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/outputs/influxdb 2 | [[outputs.influxdb]] 3 | ## The full HTTP or UDP URL for your InfluxDB instance. 4 | ## 5 | ## Multiple URLs can be specified for a single cluster, only ONE of the 6 | ## urls will be written to each interval. 7 | # urls = ["unix:///var/run/influxdb.sock"] 8 | # urls = ["udp://127.0.0.1:8089"] 9 | # urls = ["http://127.0.0.1:8086"] 10 | 11 | ## The target database for metrics; will be created as needed. 12 | database = "${INFLUXDB_DB}" 13 | 14 | ## If true, no CREATE DATABASE queries will be sent. Set to true when using 15 | ## Telegraf with a user without permissions to create databases or when the 16 | ## database already exists. 17 | # skip_database_creation = false 18 | 19 | ## Name of existing retention policy to write to. Empty string writes to 20 | ## the default retention policy. Only takes effect when using HTTP. 21 | # retention_policy = "" 22 | 23 | ## Write consistency (clusters only), can be: "any", "one", "quorum", "all". 24 | ## Only takes effect when using HTTP. 25 | # write_consistency = "any" 26 | 27 | ## Timeout for HTTP messages. 28 | # timeout = "5s" 29 | 30 | ## HTTP Basic Auth 31 | username = "${INFLUXDB_USER}" 32 | password = "${INFLUXDB_PASSWORD}" 33 | 34 | ## HTTP User-Agent 35 | # user_agent = "telegraf" 36 | 37 | ## UDP payload size is the maximum packet size to send. 38 | # udp_payload = "512B" 39 | 40 | ## Optional TLS Config for use on HTTP connections. 41 | # tls_ca = "/etc/telegraf/ca.pem" 42 | # tls_cert = "/etc/telegraf/cert.pem" 43 | # tls_key = "/etc/telegraf/key.pem" 44 | ## Use TLS but skip chain & host verification 45 | # insecure_skip_verify = false 46 | 47 | ## HTTP Proxy override, if unset values the standard proxy environment 48 | ## variables are consulted to determine which proxy, if any, should be used. 49 | # http_proxy = "http://corporate.proxy:3128" 50 | 51 | ## Additional HTTP headers 52 | # http_headers = {"X-Special-Header" = "Special-Value"} 53 | 54 | ## HTTP Content-Encoding for write request body, can be set to "gzip" to 55 | ## compress body or "identity" to apply no encoding. 56 | # content_encoding = "identity" 57 | 58 | ## When true, Telegraf will output unsigned integers as unsigned values, 59 | ## i.e.: "42u". You will need a version of InfluxDB supporting unsigned 60 | ## integer values. Enabling this option will result in field type errors if 61 | ## existing data has been written. 62 | # influx_uint_support = false 63 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.conf: -------------------------------------------------------------------------------- 1 | ## NOTE: This is a default config file generated by telegraf config > telegraf.conf 2 | ## All plugins are loaded from telegraf.d/ 3 | 4 | # Global tags can be specified here in key="value" format. 5 | [global_tags] 6 | # dc = "us-east-1" # will tag all metrics with dc=us-east-1 7 | # rack = "1a" 8 | ## Environment variables can be used as tags, and throughout the config file 9 | # user = "$USER" 10 | 11 | 12 | # Configuration for telegraf agent 13 | [agent] 14 | ## Default data collection interval for all inputs 15 | interval = "10s" 16 | ## Rounds collection interval to 'interval' 17 | ## ie, if interval="10s" then always collect on :00, :10, :20, etc. 18 | round_interval = true 19 | 20 | ## Telegraf will send metrics to outputs in batches of at most 21 | ## metric_batch_size metrics. 22 | ## This controls the size of writes that Telegraf sends to output plugins. 23 | metric_batch_size = 1000 24 | 25 | ## For failed writes, telegraf will cache metric_buffer_limit metrics for each 26 | ## output, and will flush this buffer on a successful write. Oldest metrics 27 | ## are dropped first when this buffer fills. 28 | ## This buffer only fills when writes fail to output plugin(s). 29 | metric_buffer_limit = 10000 30 | 31 | ## Collection jitter is used to jitter the collection by a random amount. 32 | ## Each plugin will sleep for a random time within jitter before collecting. 33 | ## This can be used to avoid many plugins querying things like sysfs at the 34 | ## same time, which can have a measurable effect on the system. 35 | collection_jitter = "0s" 36 | 37 | ## Default flushing interval for all outputs. Maximum flush_interval will be 38 | ## flush_interval + flush_jitter 39 | flush_interval = "10s" 40 | ## Jitter the flush interval by a random amount. This is primarily to avoid 41 | ## large write spikes for users running a large number of telegraf instances. 42 | ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s 43 | flush_jitter = "0s" 44 | 45 | ## By default or when set to "0s", precision will be set to the same 46 | ## timestamp order as the collection interval, with the maximum being 1s. 47 | ## ie, when interval = "10s", precision will be "1s" 48 | ## when interval = "250ms", precision will be "1ms" 49 | ## Precision will NOT be used for service inputs. It is up to each individual 50 | ## service input to set the timestamp at the appropriate precision. 51 | ## Valid time units are "ns", "us" (or "µs"), "ms", "s". 52 | precision = "" 53 | 54 | ## Logging configuration: 55 | ## Run telegraf with debug log messages. 56 | debug = false 57 | ## Run telegraf in quiet mode (error log messages only). 58 | quiet = false 59 | ## Specify the log file name. The empty string means to log to stderr. 60 | logfile = "" 61 | 62 | ## Override default hostname, if empty use os.Hostname() 63 | hostname = "" 64 | ## If set to true, do no set the "host" tag in the telegraf agent. 65 | omit_hostname = false 66 | -------------------------------------------------------------------------------- /scripts/backup_rsync_compress.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | TARGET_DIR="" 4 | DEST_DIR="$PWD" 5 | PURGE_OLD=false 6 | 7 | function log_scoped() { 8 | echo "[backup_rsync_compress]: $1" 9 | } 10 | 11 | function show_help() { 12 | echo "" 13 | echo "Backup files & folders using rsync and tar. Works great as a cronjob!" 14 | echo "" 15 | echo "Usage: ./backup_rsync_compress.sh [options]" 16 | echo "" 17 | echo "Options:" 18 | echo "" 19 | echo " -h, --help show this help information" 20 | echo " -i, --input the directory or file to backup" 21 | echo " -o, --output the directory where the files to backup are copied to" 22 | echo " -p, --purge delete backups created that are older than 30 days. This will delete any file older than 30 days in the output directory!" 23 | echo "" 24 | exit 0 25 | } 26 | 27 | while [[ $# -gt 0 ]]; do 28 | arg="$1" 29 | case $arg in 30 | -i|--input) 31 | TARGET_DIR=$2 32 | shift 33 | ;; 34 | -o|--output) 35 | DEST_DIR="$2" 36 | shift 37 | ;; 38 | -h|--help) 39 | show_help 40 | exit 0 41 | ;; 42 | -p|--purge) 43 | PURGE_OLD=true 44 | shift 45 | ;; 46 | *) 47 | echo "[Error] Unrecognized option $1" 48 | exit 1 49 | ;; 50 | esac 51 | shift 52 | done 53 | 54 | function do_backup() { 55 | local cleanDirName 56 | cleanDirName=$(basename "$TARGET_DIR" | sed 's/ /_/') 57 | 58 | local backupDirWithSuffix 59 | backupDirWithSuffix="$DEST_DIR/$cleanDirName-$(date +%Y-%m-%d)" 60 | 61 | log_scoped "$(date +%D" "%r): Starting backup task with params:" 62 | echo " Target directory: $TARGET_DIR" 63 | echo " Destination directory: $backupDirWithSuffix" 64 | echo "" 65 | 66 | if [ ! -d "$backupDirWithSuffix" ]; then 67 | log_scoped "Destination directory $backupDirWithSuffix does not exist. Creating it first" 68 | mkdir -p "$backupDirWithSuffix" 69 | fi 70 | 71 | log_scoped "Performing backup..." 72 | if rsync -Ra "$TARGET_DIR" "$backupDirWithSuffix"; then 73 | log_scoped "Successfully backed up $TARGET_DIR" 74 | else 75 | log_scoped "Failed backing up $TARGET_DIR" 76 | fi 77 | 78 | log_scoped "Compressing backup directory to tgz..." 79 | if tar -czf "$backupDirWithSuffix.tgz" "$backupDirWithSuffix" &> /dev/null; then 80 | rm -rf "$backupDirWithSuffix" 81 | echo " Compressed $backupDirWithSuffix to $backupDirWithSuffix.tgz" 82 | echo "" 83 | else 84 | rm -rf "$backupDirWithSuffix" 85 | echo " Failed compressing $backupDirWithSuffix to $backupDirWithSuffix.tgz" 86 | echo "" 87 | log_scoped "$(date +%D" "%r): Backup failed" 88 | echo "" 89 | exit 1 90 | fi 91 | 92 | if [ $PURGE_OLD == true ]; then 93 | log_scoped "Removing backups older than 30 days..." 94 | find "$DEST_DIR" -name '*.tgz' -mtime +30 -delete -print || \ 95 | echo " Failed purging older backups..." 96 | fi 97 | 98 | log_scoped "$(date +%D" "%r): Backup complete" 99 | } 100 | 101 | do_backup "$@" 102 | -------------------------------------------------------------------------------- /etc/telegraf/telegraf.d/inputs.mysql.conf: -------------------------------------------------------------------------------- 1 | # https://github.com/influxdata/telegraf/tree/master/plugins/inputs/mysql 2 | [[inputs.mysql]] 3 | ## specify servers via a url matching: 4 | ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify|custom]] 5 | ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name 6 | ## e.g. 7 | ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] 8 | ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] 9 | # 10 | ## If no servers are specified, then localhost is used as the host. 11 | servers = ["${MYSQL_USER}:${MYSQL_PASSWORD}@tcp(${MYSQL_HOST}:${MYSQL_PORT})/"] 12 | 13 | ## Selects the metric output format. 14 | ## 15 | ## This option exists to maintain backwards compatibility, if you have 16 | ## existing metrics do not set or change this value until you are ready to 17 | ## migrate to the new format. 18 | ## 19 | ## If you do not have existing metrics from this plugin set to the latest 20 | ## version. 21 | ## 22 | ## Telegraf >=1.6: metric_version = 2 23 | ## <1.6: metric_version = 1 (or unset) 24 | metric_version = 2 25 | 26 | ## if the list is empty, then metrics are gathered from all databasee tables 27 | # table_schema_databases = [] 28 | 29 | ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list 30 | gather_table_schema = true 31 | 32 | ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST 33 | # gather_process_list = false 34 | 35 | ## gather user statistics from INFORMATION_SCHEMA.USER_STATISTICS 36 | # gather_user_statistics = false 37 | 38 | ## gather auto_increment columns and max values from information schema 39 | # gather_info_schema_auto_inc = false 40 | 41 | ## gather metrics from INFORMATION_SCHEMA.INNODB_METRICS 42 | # gather_innodb_metrics = false 43 | 44 | ## gather metrics from SHOW SLAVE STATUS command output 45 | # gather_slave_status = false 46 | 47 | ## gather metrics from SHOW BINARY LOGS command output 48 | # gather_binary_logs = false 49 | 50 | ## gather metrics from PERFORMANCE_SCHEMA.GLOBAL_VARIABLES 51 | # gather_global_variables = true 52 | 53 | ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE 54 | # gather_table_io_waits = false 55 | 56 | ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS 57 | # gather_table_lock_waits = false 58 | 59 | ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE 60 | # gather_index_io_waits = false 61 | 62 | ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS 63 | # gather_event_waits = false 64 | 65 | ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME 66 | # gather_file_events_stats = false 67 | 68 | ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST 69 | # gather_perf_events_statements = false 70 | 71 | ## the limits for metrics form perf_events_statements 72 | # perf_events_statements_digest_text_limit = 120 73 | # perf_events_statements_limit = 250 74 | # perf_events_statements_time_limit = 86400 75 | 76 | ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) 77 | ## example: interval_slow = "30m" 78 | interval_slow = "30m" 79 | 80 | ## Optional TLS Config (will be used if tls=custom parameter specified in server uri) 81 | # tls_ca = "/etc/telegraf/ca.pem" 82 | # tls_cert = "/etc/telegraf/cert.pem" 83 | # tls_key = "/etc/telegraf/key.pem" 84 | ## Use TLS but skip chain & host verification 85 | # insecure_skip_verify = false 86 | -------------------------------------------------------------------------------- /etc/cups/cupsd.conf: -------------------------------------------------------------------------------- 1 | # 2 | # Configuration file for the CUPS scheduler. See "man cupsd.conf" for a 3 | # complete description of this file. 4 | ## Log general information in error_log - change "warn" to "debug" 5 | # for troubleshooting... 6 | LogLevel warn 7 | PageLogFormat 8 | 9 | # Deactivate CUPS' internal logrotating, as we provide a better one, especially 10 | # LogLevel debug2 gets usable now 11 | MaxLogSize 0 12 | 13 | # Only listen for connections from the local machine. 14 | # Broadcast service on port 631 15 | Port 631 16 | Listen /var/run/cups/cups.sock 17 | 18 | # Show shared printers on the local network. 19 | Browsing On 20 | BrowseLocalProtocols dnssd 21 | 22 | # Default authentication type, when authentication is required... 23 | DefaultAuthType Basic 24 | 25 | # Web interface setting... 26 | WebInterface Yes 27 | 28 | # Restrict access to the server... 29 | 30 | Order allow,deny 31 | # Allow all local devices access 32 | Allow @local 33 | 34 | 35 | # Restrict access to the admin pages... 36 | 37 | Order allow,deny 38 | # Allow all local devices access 39 | Allow @local 40 | 41 | 42 | # Restrict access to configuration files... 43 | 44 | AuthType Default 45 | Require user @SYSTEM 46 | Order allow,deny 47 | # Allow all local devices access 48 | Allow @local 49 | 50 | 51 | # Restrict access to log files... 52 | 53 | AuthType Default 54 | Require user @SYSTEM 55 | Order allow,deny 56 | 57 | 58 | # Set the default printer/job policies... 59 | 60 | # Job/subscription privacy... 61 | JobPrivateAccess default 62 | JobPrivateValues default 63 | SubscriptionPrivateAccess default 64 | SubscriptionPrivateValues default 65 | 66 | # Job-related operations must be done by the owner or an administrator... 67 | 68 | Order deny,allow 69 | 70 | 71 | 72 | Require user @OWNER @SYSTEM 73 | Order deny,allow 74 | 75 | 76 | # All administration operations require an administrator to authenticate... 77 | 78 | AuthType Default 79 | Require user @SYSTEM 80 | Order deny,allow 81 | 82 | 83 | # All printer operations require a printer operator to authenticate... 84 | 85 | AuthType Default 86 | Require user @SYSTEM 87 | Order deny,allow 88 | 89 | 90 | # Only the owner or an administrator can cancel or authenticate a job... 91 | 92 | Require user @OWNER @SYSTEM 93 | Order deny,allow 94 | 95 | 96 | 97 | Order deny,allow 98 | 99 | 100 | 101 | # Set the authenticated printer/job policies... 102 | 103 | # Job/subscription privacy... 104 | JobPrivateAccess default 105 | JobPrivateValues default 106 | SubscriptionPrivateAccess default 107 | SubscriptionPrivateValues default 108 | 109 | # Job-related operations must be done by the owner or an administrator... 110 | 111 | AuthType Default 112 | Order deny,allow 113 | 114 | 115 | 116 | AuthType Default 117 | Require user @OWNER @SYSTEM 118 | Order deny,allow 119 | 120 | 121 | # All administration operations require an administrator to authenticate... 122 | 123 | AuthType Default 124 | Require user @SYSTEM 125 | Order deny,allow 126 | 127 | 128 | # All printer operations require a printer operator to authenticate... 129 | 130 | AuthType Default 131 | Require user @SYSTEM 132 | Order deny,allow 133 | 134 | 135 | # Only the owner or an administrator can cancel or authenticate a job... 136 | 137 | AuthType Default 138 | Require user @OWNER @SYSTEM 139 | Order deny,allow 140 | 141 | 142 | 143 | Order deny,allow 144 | 145 | 146 | -------------------------------------------------------------------------------- /docker/nextcloud/web/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes 1; 2 | 3 | error_log /var/log/nginx/error.log warn; 4 | pid /var/run/nginx.pid; 5 | 6 | 7 | events { 8 | worker_connections 1024; 9 | } 10 | 11 | 12 | http { 13 | include /etc/nginx/mime.types; 14 | default_type application/octet-stream; 15 | 16 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 17 | '$status $body_bytes_sent "$http_referer" ' 18 | '"$http_user_agent" "$http_x_forwarded_for"'; 19 | 20 | access_log /var/log/nginx/access.log main; 21 | 22 | sendfile on; 23 | #tcp_nopush on; 24 | 25 | keepalive_timeout 65; 26 | 27 | #gzip on; 28 | 29 | upstream php-handler { 30 | server app:9000; 31 | } 32 | 33 | server { 34 | listen 80; 35 | 36 | # Add headers to serve security related headers 37 | # Before enabling Strict-Transport-Security headers please read into this 38 | # topic first. 39 | # add_header Strict-Transport-Security "max-age=15768000; 40 | # includeSubDomains; preload;"; 41 | # 42 | # WARNING: Only add the preload option once you read about 43 | # the consequences in https://hstspreload.org/. This option 44 | # will add the domain to a hardcoded list that is shipped 45 | # in all major browsers and getting removed from this list 46 | # could take several months. 47 | add_header X-Content-Type-Options nosniff; 48 | add_header X-XSS-Protection "1; mode=block"; 49 | add_header X-Robots-Tag none; 50 | add_header X-Download-Options noopen; 51 | add_header X-Permitted-Cross-Domain-Policies none; 52 | add_header Referrer-Policy no-referrer; 53 | 54 | root /var/www/html; 55 | 56 | location = /robots.txt { 57 | allow all; 58 | log_not_found off; 59 | access_log off; 60 | } 61 | 62 | location = /.well-known/carddav { 63 | return 301 $scheme://$host/remote.php/dav; 64 | } 65 | location = /.well-known/caldav { 66 | return 301 $scheme://$host/remote.php/dav; 67 | } 68 | 69 | # set max upload size 70 | client_max_body_size 10G; 71 | fastcgi_buffers 64 4K; 72 | 73 | # Enable gzip but do not remove ETag headers 74 | gzip on; 75 | gzip_vary on; 76 | gzip_comp_level 4; 77 | gzip_min_length 256; 78 | gzip_proxied expired no-cache no-store private no_last_modified no_etag auth; 79 | gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy; 80 | 81 | # Uncomment if your server is build with the ngx_pagespeed module 82 | # This module is currently not supported. 83 | #pagespeed off; 84 | 85 | location / { 86 | rewrite ^ /index.php$request_uri; 87 | } 88 | 89 | location ~ ^/(?:build|tests|config|lib|3rdparty|templates|data)/ { 90 | deny all; 91 | } 92 | location ~ ^/(?:\.|autotest|occ|issue|indie|db_|console) { 93 | deny all; 94 | } 95 | 96 | location ~ ^\/(?:index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[ms]-provider\/.+)\.php(?:$|\/) { 97 | fastcgi_split_path_info ^(.+\.php)(/.*)$; 98 | include fastcgi_params; 99 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 100 | fastcgi_param PATH_INFO $fastcgi_path_info; 101 | # fastcgi_param HTTPS on; 102 | #Avoid sending the security headers twice 103 | fastcgi_param modHeadersAvailable true; 104 | fastcgi_param front_controller_active true; 105 | fastcgi_pass php-handler; 106 | fastcgi_intercept_errors on; 107 | fastcgi_request_buffering off; 108 | } 109 | 110 | location ~ ^/(?:updater|oc[ms]-provider)(?:$|/) { 111 | try_files $uri/ =404; 112 | index index.php; 113 | } 114 | 115 | # Adding the cache control header for js and css files 116 | # Make sure it is BELOW the PHP block 117 | location ~ \.(?:css|js|woff2?|svg|gif)$ { 118 | try_files $uri /index.php$request_uri; 119 | add_header Cache-Control "public, max-age=15778463"; 120 | # Add headers to serve security related headers (It is intended to 121 | # have those duplicated to the ones above) 122 | # Before enabling Strict-Transport-Security headers please read into 123 | # this topic first. 124 | # add_header Strict-Transport-Security "max-age=15768000; 125 | # includeSubDomains; preload;"; 126 | # 127 | # WARNING: Only add the preload option once you read about 128 | # the consequences in https://hstspreload.org/. This option 129 | # will add the domain to a hardcoded list that is shipped 130 | # in all major browsers and getting removed from this list 131 | # could take several months. 132 | add_header X-Content-Type-Options nosniff; 133 | add_header X-XSS-Protection "1; mode=block"; 134 | add_header X-Robots-Tag none; 135 | add_header X-Download-Options noopen; 136 | add_header X-Permitted-Cross-Domain-Policies none; 137 | add_header Referrer-Policy no-referrer; 138 | 139 | # Optional: Don't log access to assets 140 | access_log off; 141 | } 142 | 143 | location ~ \.(?:png|html|ttf|ico|jpg|jpeg)$ { 144 | try_files $uri /index.php$request_uri; 145 | # Optional: Don't log access to other assets 146 | access_log off; 147 | } 148 | } 149 | 150 | } 151 | --------------------------------------------------------------------------------