├── .gitattributes ├── .github └── workflows │ └── shellcheck.yml ├── .gitignore ├── README.md ├── adguardhome └── exportAdGuardHomeBlocked.sh ├── gitPush.sh ├── pihole ├── exportPiholeQueryLog.sh └── piholeBlockratePerClient.sh ├── share └── docker-pihole-unbound │ └── docker-compose.yml └── unbound └── server.conf /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto eol=lf -------------------------------------------------------------------------------- /.github/workflows/shellcheck.yml: -------------------------------------------------------------------------------- 1 | name: Shellcheck Lint 2 | 3 | on: 4 | push: 5 | paths: 6 | # Run workflow on every push 7 | # only if a file within the specified paths has been changed: 8 | - '**/*.sh' 9 | 10 | # Allows you to run this workflow manually from the Actions tab 11 | workflow_dispatch: 12 | 13 | jobs: 14 | build: 15 | name: Shellcheck Lint 16 | 17 | # This job runs on Linux 18 | runs-on: ubuntu-latest 19 | 20 | steps: 21 | # Required to access files of this repository 22 | - uses: actions/checkout@v4 23 | 24 | # Download Shellcheck and add it to the workflow path 25 | - name: Download Shellcheck 26 | run: | 27 | wget -qO- "https://github.com/koalaman/shellcheck/releases/download/stable/shellcheck-stable.linux.x86_64.tar.xz" | tar -xJv 28 | chmod +x shellcheck-stable/shellcheck 29 | # Verify that Shellcheck can be executed 30 | - name: Check Shellcheck Version 31 | run: | 32 | shellcheck-stable/shellcheck --version 33 | 34 | # Run Shellcheck on repository 35 | # --- 36 | # https://github.com/koalaman/shellcheck 37 | # --- 38 | # Excluded checks: 39 | # https://www.shellcheck.net/wiki/SC1091 -- Not following: /etc/rc.status was... 40 | # https://www.shellcheck.net/wiki/SC1090 -- Can't follow non-constant source. .. 41 | # --- 42 | - name: Run Shellcheck 43 | run: | 44 | set +e 45 | find . -type f | grep -v "\.git" | while read -r sh; do 46 | if [ "$(file --brief --mime-type "$sh")" == 'text/x-shellscript' ]; then 47 | echo "shellcheck'ing $sh" 48 | if ! shellcheck-stable/shellcheck --color=always --severity=warning --exclude=SC1091,SC1090 "$sh"; then 49 | touch some_scripts_have_failed_shellcheck 50 | fi 51 | fi 52 | done 53 | if [ -f ./some_scripts_have_failed_shellcheck ]; then 54 | echo "Shellcheck failed for one or more shellscript(s)" 55 | exit 1 56 | fi 57 | 58 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hagezi/files/9bb2233d82804f7dd0103894a5dae1789dffd67f/.gitignore -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # files 2 | This, that and the other - things i would like to share. 3 | -------------------------------------------------------------------------------- /adguardhome/exportAdGuardHomeBlocked.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Description : Exports the domains blocked by AdGuard Home with the number of blockings. 4 | # Parameter $1: AdGuard Home workdir data directory in which the querylog.json is located (sudo find / -name querylog.json) 5 | # Example: ./adguardBlocked.sh docker/adguardhome/workdir/data/ 6 | # Requires: jq (sudo apt insatll jq) 7 | 8 | jq -r '. | select(.Result.IsFiltered==true) | [.QH] | @csv' $1/querylog.json | sed 's/"//g'| sort | uniq -c | sort -nr -k1 9 | -------------------------------------------------------------------------------- /gitPush.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | localgit=/media/nas/git/files 4 | comment=$1 5 | if [ -z "$1" ]; then 6 | comment=$(date +'%Y.%m.%d-%H:%M:%S') 7 | fi 8 | 9 | cd $localgit || exit 10 | git pull 11 | git add . 12 | git commit -m "$comment" 13 | git push origin main 14 | -------------------------------------------------------------------------------- /pihole/exportPiholeQueryLog.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # --- 4 | # Script to export parts of the PiHole query log to a text file. 5 | # 6 | # The following is exported: 7 | # blocked.gravity.txt - domains blocked by gravity 8 | # blocked.blacklist.txt - domains blocked by blacklist entries 9 | # blocked.upstream.txt - domains blocked by the used upstream DNS. 10 | # 11 | # Note: 12 | # The files are not overwritten, the exported domains are appended to 13 | # the existing files. 14 | # A unique sort ensures that the lists do not contain duplicate entries. 15 | # 16 | # The script requires the following package installed: 17 | # sudo apt install sqlite3 18 | # 19 | # Usage: 20 | # ./exportPiholeQueryLog.sh OUPTPUTDIR (e.g. /git/dns-data-collection/userdata) DBPATH (optional! e.g. /etc/pihole/pihole-FTL.db) 21 | # --- 22 | 23 | output=$1 24 | if [ -z "$1" ]; then 25 | echo "No output directory was specified!" 26 | exit 1 27 | fi 28 | 29 | piholedb=$2 30 | if [ -z "$2" ]; then 31 | piholedb=/etc/pihole/pihole-FTL.db 32 | echo "No pihole-FTL.db file was specified! Using default of $piholedb" 33 | fi 34 | 35 | # create output-dir if not already existing: 36 | [ ! -d "$output" ] && mkdir -p "$output" 37 | 38 | blockedgrav=$output/blocked.gravity.txt 39 | blockedblack=$output/blocked.blacklist.txt 40 | blockedupst=$output/blocked.upstream.txt 41 | 42 | sqlite3 "$piholedb" "SELECT DISTINCT domain FROM queries WHERE type IN (1,2) AND status IN(1,9) ORDER BY domain;" >>"$blockedgrav" 43 | sqlite3 "$piholedb" "SELECT DISTINCT additional_info FROM queries WHERE type IN (1,2) AND status IN(9,10,11) ORDER BY additional_info;" >>"$blockedgrav" 44 | sort -u "$blockedgrav" >"$blockedgrav".tmp 45 | mv "$blockedgrav".tmp "$blockedgrav" 46 | 47 | sqlite3 "$piholedb" "SELECT DISTINCT domain FROM queries WHERE type IN (1,2) AND status IN(4,5,10,11) ORDER BY domain;" >>"$blockedblack" 48 | sqlite3 "$piholedb" "SELECT DISTINCT additional_info FROM queries WHERE type IN (1,2) AND status IN(10,11) ORDER BY additional_info;" >>"$blockedblack" 49 | sort -u "$blockedblack" >"$blockedblack".tmp 50 | mv "$blockedblack".tmp "$blockedblack" 51 | 52 | sqlite3 "$piholedb" "SELECT DISTINCT domain FROM queries WHERE type IN (1,2) AND status IN(6,7,8) ORDER BY domain;" >>"$blockedupst" 53 | sort -u "$blockedupst" >"$blockedupst".tmp 54 | mv "$blockedupst".tmp "$blockedupst" 55 | -------------------------------------------------------------------------------- /pihole/piholeBlockratePerClient.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # --- 4 | # Script to gather some informations about the blockrate per client/IP from PiHole FTL-Database. 5 | # 6 | # The script requires the following package installed: 7 | # sudo apt install sqlite3 8 | # 9 | # Usage: 10 | # ./piholeBlockratePerClient.sh DBPATH (optional! e.g. /etc/pihole/pihole-FTL.db) 11 | # --- 12 | 13 | if [ -z "$1" ]; then 14 | echo "No pi-hole FTL database specified! e.g. /etc/pihole/pihole-FTL.db" 15 | exit 1 16 | fi 17 | piholedb=$1 18 | 19 | # second parameter: for which column do we want our result be sorted? 20 | if [ -n "$2" ]; then 21 | case "$2" in 22 | clientname) 23 | orderby="client_by_id.name" ;; 24 | ip) 25 | orderby="client_by_id.$2" ;; 26 | blocked) 27 | orderby="$2" ;; 28 | allowed) 29 | orderby="$2" ;; 30 | rate) 31 | orderby="$2" ;; 32 | *) 33 | echo "Invalid column '$2' for ORDER BY specified!" 34 | echo "Valid colums are: ip, clientname, blocked, allowed, rate." 35 | exit 1 36 | esac 37 | echo "Column '$2' for ORDER BY specified." 38 | else 39 | echo "No column for ORDER BY specified (e.g. IP, CLIENTNAME, BLOCKED, ALLOWED, RATE)" 40 | echo "Using 'clientname' as default." 41 | orderby="client_by_id.name" 42 | fi 43 | 44 | sql="SELECT \ 45 | client AS ip, client_by_id.name AS clientname, 46 | SUM(count) FILTER (WHERE flag = 'B') AS blocked, 47 | SUM(count) FILTER (WHERE flag = 'A') AS allowed, 48 | ROUND( 49 | (1.0 * SUM(count) FILTER (WHERE flag = 'B') * 100) 50 | / 51 | (1.0 * SUM(count) FILTER (WHERE flag = 'A') + 1.0 * SUM(count) FILTER (WHERE flag = 'B')) 52 | ,2) AS rate 53 | FROM 54 | ( 55 | SELECT * FROM 56 | ( 57 | SELECT client, count(id) as count, 'B' as flag FROM queries WHERE type IN (1,2) AND status IN(1,4,5,6,7,8,9,10,11) GROUP BY client 58 | HAVING COUNT(*) > 100 59 | UNION ALL 60 | SELECT client, count(id) as count, 'A' as flag FROM queries WHERE type IN (1,2) AND status IN(2,3,14) GROUP BY client 61 | HAVING COUNT(*) > 100 62 | ) 63 | ) 64 | JOIN client_by_id on client = client_by_id.ip 65 | GROUP BY client 66 | ORDER BY $orderby;" 67 | #ORDER BY client_by_id.name 68 | 69 | echo "" 70 | sqlite3 "$piholedb" "$sql" -header -column 71 | echo "" 72 | -------------------------------------------------------------------------------- /share/docker-pihole-unbound/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | pihole: 5 | container_name: pihole 6 | image: pihole/pihole:latest 7 | ports: 8 | - "53:53/tcp" 9 | - "53:53/udp" 10 | - "67:67/udp" 11 | - "80:80/tcp" 12 | environment: 13 | TZ: 'Europe/Berlin' 14 | WEBPASSWORD: 'YOUR_PASSWORD_HERE' 15 | PIHOLE_DNS_: "172.28.0.3;172.28.0.3" 16 | volumes: 17 | - './etc-pihole:/etc/pihole' 18 | - './etc-dnsmasq.d:/etc/dnsmasq.d' 19 | - "/etc/hosts:/etc/hosts:ro" 20 | cap_add: 21 | - NET_ADMIN 22 | restart: unless-stopped 23 | hostname: pihole 24 | networks: 25 | default: 26 | ipv4_address: 172.28.0.2 27 | 28 | unbound: 29 | container_name: pihole-unbound 30 | image: klutchell/unbound 31 | environment: 32 | TZ: 'Europe/Berlin' 33 | networks: 34 | default: 35 | ipv4_address: 172.28.0.3 36 | volumes: 37 | - './unbound-conf:/etc/unbound/custom.conf.d' 38 | restart: unless-stopped 39 | hostname: pihole-unbound 40 | 41 | networks: 42 | default: 43 | driver: bridge 44 | ipam: 45 | config: 46 | - subnet: 172.28.0.0/24 47 | gateway: 172.28.0.1 48 | -------------------------------------------------------------------------------- /unbound/server.conf: -------------------------------------------------------------------------------- 1 | server: 2 | # If no logfile is specified, syslog is used 3 | # logfile: "/var/log/unbound/unbound.log" 4 | verbosity: 0 5 | 6 | interface: 0.0.0.0@5335 7 | interface: ::0@5335 8 | port: 5335 9 | do-ip4: yes 10 | do-udp: yes 11 | do-tcp: yes 12 | 13 | # IPv6 ? 14 | # May be set to yes if you have IPv6 connectivity 15 | # Uncommment IPv6 root servers in auth-zone section at the end of this config file if you use IPv6! 16 | do-ip6: no 17 | 18 | # You want to leave this to no unless you have *native* IPv6. With 6to4 and 19 | # Terredo tunnels your web browser should favor IPv4 for the same reasons 20 | prefer-ip6: no 21 | 22 | # Use this only when you downloaded the list of primary root servers! 23 | # Install dns-root-data package instead: If you use the default dns-root-data package, 24 | # unbound will find it automatically! 25 | # Install: sudo apt install dns-root-data 26 | #root-hints: /usr/share/dns/root.hints 27 | 28 | # Trust glue only if it is within the servers authority 29 | harden-glue: yes 30 | 31 | # Require DNSSEC data for trust-anchored zones, if such data is absent, the zone becomes BOGUS 32 | harden-dnssec-stripped: yes 33 | 34 | # Don't use Capitalization randomization as it known to cause DNSSEC issues sometimes 35 | # see https://discourse.pi-hole.net/t/unbound-stubby-or-dnscrypt-proxy/9378 for further details 36 | use-caps-for-id: no 37 | 38 | # Reduce EDNS reassembly buffer size. 39 | # IP fragmentation is unreliable on the Internet today, and can cause 40 | # transmission failures when large DNS messages are sent via UDP. Even 41 | # when fragmentation does work, it may not be secure; it is theoretically 42 | # possible to spoof parts of a fragmented DNS message, without easy 43 | # detection at the receiving end. Recently, there was an excellent study 44 | # >>> Defragmenting DNS - Determining the optimal maximum UDP response size for DNS <<< 45 | # by Axel Koolhaas, and Tjeerd Slokker (https://indico.dns-oarc.net/event/36/contributions/776/) 46 | # in collaboration with NLnet Labs explored DNS using real world data from the 47 | # the RIPE Atlas probes and the researchers suggested different values for 48 | # IPv4 and IPv6 and in different scenarios. They advise that servers should 49 | # be configured to limit DNS messages sent over UDP to a size that will not 50 | # trigger fragmentation on typical network links. DNS servers can switch 51 | # from UDP to TCP when a DNS response is too big to fit in this limited 52 | # buffer size. This value has also been suggested in DNS Flag Day 2020. 53 | edns-buffer-size: 1232 54 | 55 | # Perform prefetching of close to expired message cache entries 56 | # This only applies to domains that have been frequently queried 57 | prefetch: yes 58 | 59 | # Fetch DS records earlier (DNSSEC): more cpu usage, less latency 60 | prefetch-key: yes 61 | 62 | # One thread should be sufficient, can be increased on beefy machines 63 | num-threads: 1 64 | 65 | # Ensure kernel buffer is large enough to not lose messages in traffic spikes 66 | so-rcvbuf: 1m 67 | 68 | # Increase cache size to utilize more RAM 69 | msg-cache-size: 128m 70 | rrset-cache-size: 256m 71 | 72 | # This attempts to reduce latency by serving the outdated record before 73 | # updating it instead of the other way around 74 | # cache-min-ttl: 0 75 | # serve-expired: yes 76 | 77 | # Ensure privacy of local IP ranges 78 | private-address: 192.168.0.0/16 79 | private-address: 169.254.0.0/16 80 | private-address: 172.16.0.0/12 81 | private-address: 10.0.0.0/8 82 | private-address: fd00::/8 83 | private-address: fe80::/10 84 | private-address: ::ffff:0:0/96 85 | 86 | # Allow access from the entire network 87 | access-control: 0.0.0.0/0 allow 88 | access-control: ::0/0 allow 89 | 90 | remote-control: 91 | control-enable: yes 92 | 93 | auth-zone: 94 | # Get data for all TLDs by IXFR (or AXFR) from root servers 95 | # these are the only servers that answer an IXFR query 96 | name: "." 97 | primary: 170.247.170.2 # b.root-servers.net 98 | primary: 192.33.4.12 # c.root-servers.net 99 | primary: 199.7.91.13 # d.root-servers.net 100 | primary: 192.5.5.241 # f.root-servers.net 101 | primary: 192.112.36.4 # g.root-servers.net 102 | primary: 193.0.14.129 # k.root-servers.net 103 | primary: 192.0.47.132 # xfr.cjr.dns.icann.org 104 | primary: 192.0.32.132 # xfr.lax.dns.icann.org 105 | 106 | # IPv6 ? Uncomment if you use IPv6! 107 | #primary: 2801:1b8:10::b # b.root-servers.net 108 | #primary: 2001:500:2::c # c.root-servers.net 109 | #primary: 2001:500:2d::d # d.root-servers.net 110 | #primary: 2001:500:2f::f # f.root-servers.net 111 | #primary: 2001:500:12::d0d # g.root-servers.net 112 | #primary: 2001:7fd::1 # k.root-servers.net 113 | #primary: 2620:0:2830:202::132 # xfr.cjr.dns.icann.org 114 | #primary: 2620:0:2d0:202::132 # xfr.lax.dns.icann.org 115 | 116 | fallback-enabled: yes 117 | for-downstream: no 118 | for-upstream: yes 119 | 120 | zonefile: /var/lib/unbound/root.zone 121 | --------------------------------------------------------------------------------