├── .github
└── workflows
│ ├── ci.yml
│ └── publish.yml
├── .gitlab-ci.yml
├── CHANGELOG
├── LICENSE
├── README.md
├── backup
├── backup.adoc
├── configuration.adoc
├── disk-preparation.adoc
├── monitoring.adoc
├── overview.adoc
├── references.adoc
└── restore.adoc
├── basic-configuration
├── basic-configuration.adoc
├── basic-firewall-configuration.adoc
├── openssh-server-configuration.adoc
├── os-installation.adoc
├── post-install-configuration.adoc
├── ssd-optimization.adoc
└── ups-configuration.adoc
├── dns-server
├── configuration.adoc
├── dns-server.adoc
├── installation.adoc
└── monitoring.adoc
├── docker
├── docker.adoc
├── installation.adoc
└── monitoring.adoc
├── domain-name
├── domain-name.adoc
├── dynamic-dns-update.adoc
└── monitoring.adoc
├── firefly
├── firefly.adoc
├── installation.adoc
├── monitoring.adoc
└── overview.adoc
├── git-server
├── configuration.adoc
├── git-server.adoc
└── overview.adoc
├── introduction
├── document-overview.adoc
├── generating-custom-doc.adoc
├── getting-involved.adoc
├── introduction.adoc
└── license.adoc
├── maintenance
├── keeping-system-clean.adoc
├── keeping-system-up-to-date.adoc
├── maintenance.adoc
└── monitoring.adoc
├── monitoring
├── login-notification.adoc
├── monit.adoc
├── monitoring.adoc
└── summary-email.adoc
├── nextcloud
├── certificate.adoc
├── configuration.adoc
├── installation.adoc
├── monitoring.adoc
├── nextcloud.adoc
└── overview.adoc
├── nfs-server
├── domain.adoc
├── kerberos.adoc
├── nfs-client-configuration.adoc
├── nfs-server-configuration.adoc
└── nfs-server.adoc
├── parameters.adoc
├── preamble.adoc
├── references.adoc
├── reverse-proxy
├── certificate.adoc
├── installation.adoc
├── monitoring.adoc
├── overview.adoc
└── reverse-proxy.adoc
├── server-overview
├── goals.adoc
├── hardware.adoc
├── high-level-overview.adoc
├── server-overview.adoc
└── software-and-services.adoc
├── silverbox-server.adoc
├── socks5-over-vpn
├── client-configuration.adoc
├── container.adoc
├── image.adoc
├── monitoring.adoc
└── socks5-over-vpn.adoc
└── transmission
├── container.adoc
├── image.adoc
├── monitoring.adoc
├── transmission.adoc
└── user-interface.adoc
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 |
9 | jobs:
10 | build:
11 |
12 | runs-on: ubuntu-latest
13 |
14 | steps:
15 | - name: SCM checkout
16 | uses: actions/checkout@v2
17 |
18 | - name: Build with Asciidoctor
19 | uses: docker://asciidoctor/docker-asciidoctor:latest
20 | with:
21 | args: "asciidoctor --failure-level WARN silverbox-server.adoc"
22 |
23 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: Publish
2 |
3 | on:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | publish:
9 |
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: SCM Checkout
14 | uses: actions/checkout@v2
15 | with:
16 | persist-credentials: false
17 |
18 | - name: Build with Asciidoctor
19 | uses: docker://asciidoctor/docker-asciidoctor:latest
20 | with:
21 | args: "asciidoctor --failure-level WARN --destination-dir dist -o index.html silverbox-server.adoc"
22 |
23 | - name: Publish
24 | uses: JamesIves/github-pages-deploy-action@releases/v3
25 | with:
26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
27 | BRANCH: gh-pages
28 | FOLDER: dist
29 |
30 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | image: asciidoctor/docker-asciidoctor:latest
2 |
3 | stages:
4 | - build
5 | - deploy
6 |
7 | build:
8 | stage: build
9 | script:
10 | - asciidoctor silverbox-server.adoc
11 | artifacts:
12 | paths:
13 | - silverbox-server.html
14 |
15 | pages:
16 | stage: deploy
17 | script:
18 | - mkdir public
19 | - cp silverbox-server.html public/index.html
20 | artifacts:
21 | paths:
22 | - public
23 | only:
24 | - master
25 |
--------------------------------------------------------------------------------
/CHANGELOG:
--------------------------------------------------------------------------------
1 | ## [1.3.4] - 2022-10-29
2 |
3 | - Update to Nextcloud 24.0.6 [6ae6b02]
4 |
5 | ## [1.3.3] - 2022-09-14
6 |
7 | - Update images [0164589]
8 | - Remove build network from Docker compose files [d873305, 3bbd13e]
9 |
10 | ## [1.3.2] - 2022-05-29
11 |
12 | - Update images [88bde97]
13 |
14 | ## [1.3.1] - 2022-03-27
15 |
16 | - Update images [899175e]
17 | Note that newer versions of Docker use BuildKit by default, which may fail to build compose files that use custom networks.
18 | If you see an error message similar to this: "network mode "common" not supported by buildkit. You can define a custom network for your builder using the network driver-opt in buildx create."
19 | try disabling BuildKit by settings environment variable DOCKER_BUILDKIT=0.
20 |
21 | ## [1.3.0] - 2022-01-09
22 |
23 | - Update Docker images [0a92490]
24 | - Update Nextcloud to 23.0.0 and PostgreSQL to 14.1
25 | Note that due to major version upgrade for PostgreSQL manual DB conversion is required:
26 | - Stop Nextcloud completely and backup its database by making a copy of /src/nextcloud/db directory.
27 | - Start nextcloud-db container and create a DB dump:
28 | $ sudo docker exec -i nextcloud-db pg_dumpall --username nextcloud --clean > nextcloud_db.sql
29 | - Stop nextcloud-db container, edit Nextcloud Docker Compose file and update the DB image to 14.1
30 | - Delete everythin inside /srv/nextcloud/db directory
31 | - Start the new nextcloud-db container using Docker Compose
32 | - Restore the DB from the dump:
33 | $ sudo docker exec -i nextcloud-db psql --username nextcloud nextcloud < nextcloud_db.sql
34 | - After the restore completes, stop the nextcloud-db container and replace host auth method back to md5 by editing /srv/nextcloud/db/pg_hba.conf file:
35 | host all all all md5
36 | - Start all Nextcloud containers
37 |
38 | - Update Firefly III to 5.5.13 and PostgreSQL to 14.1
39 | Note that due to major version upgrade for PostgreSQL manual DB conversion is required:
40 | - Stop firefly-app and firefly-db containers and backup the database by making a copy of /srv/firefly/db directory.
41 | - Start firefly-db container and create a DB dump:
42 | $ sudo docker exec -i firefly-db pg_dumpall --username firefly --clean > firefly_db.sql
43 | - Stop firefly-db container, edit Firefly Docker Compose file and update the DB image to 14.1
44 | - Delete everythin inside /srv/firefly/db directory
45 | - Start the new firefly-db container using Docker Compose
46 | - Restore the DB from the dump:
47 | $ sudo docker exec -i firefly-db psql --username firefly firefly < firefly_db.sql
48 | - After the restore completes, stop the firefly-db container and replace host auth method back to md5 by editing /srv/firefly/db/pg_hba.conf file:
49 | host all all all md5
50 | - Start all Firefly containers
51 |
52 | - Update to Docker Compose v2 [7323225]
53 | Note that Docker compose v2 command is 'docker compose' as opposed to 'docker-compose'.
54 | There are few places in the document where 'docker-compose' needs to be manually replaced with the 'docker compose'
55 | (for example, in systemctl units for starting up containers).
56 | The easiest way to find these places is to search for the 'docker compose' string.
57 |
58 | ## [1.2.5] - 2021-04-15
59 |
60 | - Update: Nextcloud 21.0.1, Debian 10.9 [f1d35a1]
61 |
62 | ## [1.2.4] - 2021-03-14
63 |
64 | - Update to Nextcloud 20.0.8, PostgreSQL 13.1 [193d520]
65 | - Remove port mapping from reverse proxy [14b21d2]
66 |
67 | ## [1.2.3] - 2021-02-11
68 |
69 | - Update to Nextcloud 20.0.7, Debian 10.8 [2519b3c]
70 | - More details on NFS client with Kerberos [6d5ef14]
71 |
72 | ## [1.2.2] - 2020-11-29
73 |
74 | - Update Nextcloud to 20.0.2 [6daecc9]
75 |
76 | ## [1.2.1] - 2020-11-22
77 |
78 | - Added missing Firefly III update section [f227717]
79 | - Update Nextcloud and Postgres [d62c45e]
80 |
81 | ## [1.2.0] - 2020-11-15
82 |
83 | - Add missing auto startup for reverse proxy [e804daf]
84 | - Added Firefly III instructions [97b07b8]
85 | - Add X-Forwarded-Proto header to reverse proxy [057e8ea]
86 |
87 | ## [1.1.0] - 2020-11-07
88 |
89 | - Added section on internal reverse proxy server setup [1d75b17]
90 | - Improve Nextcloud certificate instructions [2f7aa04]
91 | This update includes some minor changes to renewal script for Nextcloud.
92 | If you have Nextcloud deployed, the scripts should be manually updated to match the new document.
93 | - Fix spelling [8319a5f]
94 |
95 | ## [1.0.2] - 2020-10-10
96 |
97 | - Update Nextcloud to 19.0.4-fpm and Postgres to 13.0 [b53d512]
98 | Note that due to major version upgrade for PostgreSQL manual DB conversion is required:
99 | - First stop Nextcloud and backup its database (for example, by making a copy of /srv/nextcloud/db directory). Then start it again.
100 | - Unfortunately, there is no easy upgrade path between major PostgreSQL versions (see https://github.com/docker-library/postgres/issues/37 issue) when using Docker.
101 | One way would be to use pg_upgrade (for example, see https://github.com/tianon/docker-postgres-upgrade) but it appears to be unreliable.
102 | The easiest way seems to be to just dump and restore database:
103 | - Stop nextcloud-web and nextcloud-fpm containers
104 | - Create a full dump of the DB using currently running nextcloud-db container:
105 | $ sudo docker exec -i nextcloud-db pg_dumpall --username nextcloud --clean > db_dump.sql
106 | - Stop nextcloud-db container, edit Nextcloud docker-compose.yml file and update DB image to desired version (13.0)
107 | - Delete everything inside /srv/nextcloud/db directory (make sure you have backup of it before doing this)
108 | - Start new nextcloud-db container with the new PostgreSQL version
109 | - Restore the DB from the dump:
110 | $ sudo docker exec -i nextcloud-db psql --username nextcloud nextcloud < db_dump.sql
111 | - After restore completes the nextcloud-web and nextcloud-fpm containers can be started back up
112 |
113 | ## [1.0.1] - 2020-09-14
114 |
115 | - Update Nextcloud to 19.0.3-fpm and Postgres to 12.4 [23293c2]
116 |
117 | ## [1.0.0] - 2020-08-16
118 |
119 | - Add preamble and icon. Improve some sections. Fix typos. [af07d23]
120 | - Move ToC on the left [f7046dd]
121 | - Removed workaround for nextcloud/docker bug #345 as it now fixed [c5e2aef]
122 | - Update Nextcloud to 19.0.1. Fix nextcloud-fpm container. [0121898]
123 | - Put Nextcloud to maintenance mode during backup [3165e48]
124 | - Switch away from Alpine to Debian Docker images [41d46b0]
125 | * All Alpine images replaced with Debian (slim) images.
126 | Note that for two Nextcloud containers (HTTPD and Postgres) the update
127 | requires manual UID/GID update, as Debian uses different UID/GID for
128 | users postgres and www-data. To do the update, stop Nextcloud, and then
129 | run the following commands before starting it again with Debian based
130 | images:
131 | sudo find /srv/nextcloud -uid 82 -exec chown 33 {} \;
132 | sudo find /srv/nextcloud -gid 82 -exec chgrp 33 {} \;
133 | sudo find /srv/nextcloud -uid 70 -exec chown 999 {} \;
134 | sudo find /srv/nextcloud -gid 70 -exec chgrp 999 {} \;
135 | * Apache HTTPD updated to 2.4.46
136 | * Use build-in Docker init (added in Docker 19) instead of Tini
137 | - Set token_auth_enforced parameter in Nextcloud [2226387]
138 | - Bump docker image versions [01ce933]
139 | Postgres: 12.3
140 |
141 | ## [0.2.0] - 2019-11-17
142 |
143 | - Added chapter on Git server configuration. [38fcc2d5]
144 | - Fixed typo in copying public SSH key for SOCKS5-VPN proxy. [28c1bc20]
145 |
146 | ## [0.1.2] - 2019-10-20
147 |
148 | - Remove paragraph about max upload size config. Max upload size setting has been removed from UI and now is not necessary, as chunked upload was implemented. [7c0468d]
149 | - Bump Nextcloud's Postgres version to `12.0`. [1b400ab]
150 | - Bump Nextcloud version to `17.0.0`. [bbdc3e2]
151 | - Bump httpd version to `2.4.41`. [1a40a09]
152 | - Bump Alpine version to `3.10.2`. [aeb4ecb]
153 | - Fix typos. Fixes #1. [26e1bad]
154 |
155 | ## [0.1.1] - 2019-07-14
156 |
157 | - Added power consumption information. [6a793e0e]
158 | - Fixed invalid Docker network names in Docker Compose files.
159 | Added attributes for all Docker images versions.
160 | Bumped Docker images versions to current versions. [0da6c15d]
161 |
162 | ## [0.1.0] - 2019-07-06
163 |
164 | Initial version published.
165 |
166 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | This document is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License.
2 |
3 | For more details see:
4 |
5 | - https://creativecommons.org/licenses/by-nc/4.0
6 | - https://creativecommons.org/licenses/by-nc/4.0/legalcode
7 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | Guide on how to build compact, silent and energy-efficient GNU/Linux home server that runs:
6 |
7 | - **Unbound** as a forwarding DNS server that forwards queries to the DNS server of your choice and uses DNS-over-TLS and DNSSEC for
8 | extra security and privacy.
9 | - **NFS server** secured with Kerberos (clean NFSv4-only server).
10 | - **Nextcloud** accessible over HTTP(S) with Let's Encrypt certificates (renewed automatically using Certbot with DNS challenge).
11 | - **Transmission** BitTorent client that communicates only over a VPN connection.
12 | - **SOCKS5 proxy server** that proxies traffic securely over a VPN connection.
13 | - **Git server** for hosting Git repositories.
14 | - **Borg and Rclone** for automatic encrypted incremental backups (both on-site and off-site).
15 | - **Reverse proxy server** with HTTPS (using wildcard certificate) and basic authentication to access internal services.
16 | - **Firefly III** for personal finances management.
17 | - **Monit** for system monitoring and notifications.
18 | - Script to automatically update DNS record pointing to server's public IP address (in case of dynamic IP).
19 |
20 | The server also runs:
21 |
22 | - SSH server.
23 | - Docker engine (as most of the workloads are run as containers).
24 |
25 | The latest HTML version of the guide is hosted online using GitHub Pages
26 | and can be viewed here: https://ovk.github.io/silverbox
27 |
28 | # Compiling
29 | The guide is written in [AsciiDoc](https://en.wikipedia.org/wiki/AsciiDoc) format
30 | and can be compiled into different output formats, such as HTML or PDF.
31 |
32 | If you have Docker installed, you can use Asciidoctor Docker container.
33 | For example, to build HTML version:
34 |
35 | ```
36 | git clone https://github.com/ovk/silverbox.git
37 | docker run -it --rm -v $(pwd)/silverbox:/documents asciidoctor/docker-asciidoctor asciidoctor silverbox-server.adoc
38 | ```
39 |
40 | Or to build a PDF:
41 |
42 | ```
43 | docker run -it --rm -v $(pwd)/silverbox:/documents asciidoctor/docker-asciidoctor asciidoctor-pdf silverbox-server.adoc
44 | ```
45 |
46 | This should produce output file (`silverbox-server.html` or `silverbox-server.pdf`) in the `silverbox`
47 | directory, where all the placeholders replaced with your values.
48 |
49 | See [Generating Custom Document](https://ovk.github.io/silverbox/#generating_custom_document)
50 | section for more details.
51 |
52 | ## Customizing Document
53 | Most of the configuration-specific parameters (such as IP addresses, host names, port numbers etc.)
54 | are not hardcoded, but defined using AsciiDoc attributes.
55 | This way you can redefine these attributes with your specific parameter values
56 | and build your very own version of this document.
57 |
58 | By default these parameter values contain simple placeholders,
59 | such as `{SERVER_IP_ADDR}` for the server local IP address.
60 | You can replace them with the values you want by editing `parameters.adoc` file and then compiling the document.
61 |
62 | # License
63 | This document is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0) License.
64 |
65 | For more details see:
66 |
67 | - https://creativecommons.org/licenses/by-nc/4.0
68 | - https://creativecommons.org/licenses/by-nc/4.0/legalcode
69 |
70 |
--------------------------------------------------------------------------------
/backup/backup.adoc:
--------------------------------------------------------------------------------
1 | [[backup]]
2 | == Backup
3 | This section describes how to configure secure automatic backup of valuable files
4 | from the server to external drive and to the cloud.
5 |
6 | include::overview.adoc[]
7 |
8 | include::disk-preparation.adoc[]
9 |
10 | include::configuration.adoc[]
11 |
12 | include::monitoring.adoc[]
13 |
14 | include::restore.adoc[]
15 |
16 | include::references.adoc[]
17 |
18 |
--------------------------------------------------------------------------------
/backup/configuration.adoc:
--------------------------------------------------------------------------------
1 | === Configuration
2 | This section describes how to install all the necessary software and configure backup to the external drive and to the cloud.
3 |
4 | ==== Borg Backup
5 | Borg backup will be used to backup valuable files from the server to the external drive.
6 |
7 | ===== Installation
8 | Borg backup can be installed directly from the repositories:
9 |
10 | ----
11 | sudo apt install borgbackup
12 | ----
13 |
14 | ===== Backup Repository Creation
15 | Borg backups files to what it calls repository, which is essentially a directory on disk.
16 |
17 | Initialize a new empty Borg repository on the external drive:
18 |
19 | ----
20 | sudo borg init --encryption=repokey /mnt/backup/borgrepo
21 | ----
22 |
23 | You'll be prompted for a passphrase that will be used to generate encryption key for the backups.
24 |
25 | IMPORTANT: Store this passphrase somewhere outside of the server,
26 | so that it can be used to decrypt backups in the case of total server failure.
27 |
28 | ===== Automatic Backup Creation
29 | Create a directory where backup related scripts will be stored:
30 |
31 | ----
32 | sudo mkdir /root/silverbox/backup
33 | sudo chmod 700 /root/silverbox/backup
34 | ----
35 |
36 | Create the `/root/silverbox/backup/backup.sh` file with the following content:
37 |
38 | ./root/silverbox/backup/backup.sh
39 | [source,bash]
40 | ----
41 | #!/bin/sh
42 |
43 | if pidof -x borg >/dev/null; then
44 | echo "borg is already running"
45 | exit 1
46 | fi
47 |
48 | OCC_OUTPUT=$(docker exec --user www-data nextcloud-fpm php occ maintenance:mode)
49 | if [ "$?" -ne "0" ]; then
50 | echo "failed to check if Nextcloud is already in maintenance mode"
51 | exit 1
52 | fi
53 |
54 | if ! printf "%s" "$OCC_OUTPUT" | grep -q "Maintenance mode is currently disabled"; then
55 | echo "unexpected occ output: $OCC_OUTPUT"
56 | exit 1
57 | fi
58 |
59 | if ! docker exec --user www-data nextcloud-fpm php occ maintenance:mode --on; then
60 | echo "failed to enable Nextcloud maintenance mode"
61 | exit 1
62 | fi
63 |
64 | export BORG_PASSPHRASE='{BORG_PASSPHRASE}' # <1>
65 |
66 | # Create backup
67 | borg create -v --stats /mnt/backup/borgrepo::'{hostname}-{now:%Y-%m-%d}' \ # <2>
68 | /etc/letsencrypt/archive \ # <3>
69 | /srv/nextcloud \
70 | /srv/nfs \
71 | /srv/git \
72 | /srv/firefly \
73 | --exclude '/srv/nfs/torrents' \
74 | --exclude '/srv/nextcloud/html' \
75 | --exclude '/srv/nextcloud/data/*.log' \
76 | --exclude '/srv/nextcloud/data/*/preview' \
77 | --exclude '/srv/nextcloud/db/*.pid' \
78 | --exclude '/srv/nextcloud/db/*.opts' \
79 | --exclude '/srv/nextcloud/db/pg_stat_tmp' \
80 | --exclude '/srv/firefly/db/*.pid' \
81 | --exclude '/srv/firefly/db/*.opts' \
82 | --exclude '/srv/firefly/db/pg_stat_tmp'
83 |
84 | if [ "$?" -ne "0" ]; then
85 | echo "borg create failed"
86 | exit 2
87 | fi
88 |
89 | if ! docker exec --user www-data nextcloud-fpm php occ maintenance:mode --off; then
90 | echo "failed to disable Nextcloud maintenance mode"
91 | exit 1
92 | fi
93 |
94 | # Prune old backups
95 | borg prune -v --list /mnt/backup/borgrepo --keep-daily=3 --keep-weekly=4 --keep-monthly=6 # <4>
96 |
97 | if [ "$?" -ne "0" ]; then
98 | echo "borg prune failed"
99 | exit 3
100 | fi
101 |
102 | echo "backup completed"
103 | ----
104 | <1> Set `\{BORG_PASSPHRASE}` to your Borg passphrase.
105 | <2> Feel free to adjust the mask controlling how backups will be names.
106 | <3> This list of what to backup is just an example, adjust it according to your needs.
107 | <4> Feel free to adjust backup retention settings according to your needs.
108 |
109 | Mark this file as executable and only accessible by root:
110 |
111 | ----
112 | sudo chmod 700 /root/silverbox/backup/backup.sh
113 | ----
114 |
115 | To run backup script automatically on a schedule a Systemd timer is used.
116 | Create the `/etc/systemd/system/borg-backup.service` file with the following content:
117 |
118 | ./etc/systemd/system/borg-backup.service
119 | ----
120 | [Unit]
121 | Description=Create backup using Borg backup
122 |
123 | [Service]
124 | Type=oneshot
125 | ExecStart=/bin/sh -c "/root/silverbox/backup/backup.sh"
126 | ----
127 |
128 | Next, create the `/etc/systemd/system/borg-backup.timer` file with the following content:
129 |
130 | ./etc/systemd/system/borg-backup.timer
131 | ----
132 | [Unit]
133 | Description=Create backup using Borg backup
134 |
135 | [Timer]
136 | OnCalendar=*-*-* 00:00:00 # <1>
137 | AccuracySec=1h
138 | Persistent=true
139 |
140 | [Install]
141 | WantedBy=timers.target
142 | ----
143 | <1> In this configuration backup is created daily at midnight.
144 |
145 | Enable and start the timer:
146 |
147 | ----
148 | sudo systemctl daemon-reload
149 | sudo systemctl enable borg-backup.timer
150 | sudo systemctl start borg-backup.timer
151 | ----
152 |
153 | To create the first backup and verify that everything works run the service manually:
154 |
155 | ----
156 | sudo systemctl start borg-backup.service
157 | ----
158 |
159 | The first backup creation may take very long time.
160 |
161 | ==== Rclone
162 | Rclone is a tool that can synchronize local files with remote cloud storage.
163 | In this deployment it is used to sync backup files generated by Borg to remote cloud storage.
164 |
165 | The prerequisite to this section is to have cloud storage configured and ready for use.
166 | I chose to use OVH object storage, but you can chose any storage that is supported by Rclone
167 | (list of supported storages available on Rclone website, see link in the references section).
168 |
169 | ===== Installation
170 | Rclone can be installed directly from the repositories:
171 |
172 | ----
173 | sudo apt install rclone
174 | ----
175 |
176 | ===== Storage Configuration
177 | After installation, Rclone needs to be configured to work with your cloud storage.
178 | This can either be done by running `rclone config`
179 | or by putting configuration into the `/root/.config/rclone/rclone.conf` file.
180 |
181 | Since the configuration depends on what cloud provider you use, it is not described in this document.
182 | For OVH, there is a helpful article mentioned in the references to this section.
183 |
184 | Once Rclone is configured, you can test that it has access to the storage by doing:
185 |
186 | ----
187 | sudo rclone ls {REMOTE_STORAGE}:{STORAGE_PATH} -v # <1>
188 | ----
189 | <1> Replace `\{REMOTE_STORAGE}` and `\{STORAGE_PATH}` with remote storage that you configured and path respectively.
190 |
191 | ===== Automatic Backup Sync
192 | Create the `/root/silverbox/backup/sync.sh` file with the following content:
193 |
194 | ./root/silverbox/backup/sync.sh
195 | [source,bash]
196 | ----
197 | #!/bin/sh
198 |
199 | if pidof -x borg >/dev/null; then
200 | echo "borg is already running"
201 | exit 1
202 | fi
203 |
204 | if pidof -x rclone >/dev/null; then
205 | echo "rclone is already running"
206 | exit 1
207 | fi
208 |
209 | export BORG_PASSPHRASE='{BORG_PASSPHRASE}' # <1>
210 |
211 | # Check backup for consistency before syncing to the cloud
212 | borg check -v /mnt/backup/borgrepo
213 |
214 | if [ "$?" -ne "0" ]; then
215 | echo "borg check failed"
216 | exit 2
217 | fi
218 |
219 | # Sync backup
220 | rclone -v sync /mnt/backup/borgrepo {REMOTE_STORAGE}:{STORAGE_PATH} # <2>
221 |
222 | if [ "$?" -ne "0" ]; then
223 | echo "rclone sync failed"
224 | exit 3
225 | fi
226 |
227 | echo "backup sync completed"
228 | ----
229 | <1> Set `\{BORG_PASSPHRASE}` to your Borg passphrase.
230 | <2> Replace `\{REMOTE_STORAGE}` and `\{STORAGE_PATH}` with the actual values.
231 |
232 | Mark this file as executable and only accessible by root:
233 |
234 | ----
235 | sudo chmod 700 /root/silverbox/backup/sync.sh
236 | ----
237 |
238 | To run backup sync script automatically on a schedule a Systemd timer is used.
239 | Create the `/etc/systemd/system/sync-backup.service` file with the following content:
240 |
241 | ./etc/systemd/system/sync-backup.service
242 | ----
243 | [Unit]
244 | Description=Sync backup files to the cloud
245 |
246 | [Service]
247 | Type=oneshot
248 | ExecStart=/bin/sh -c "/root/silverbox/backup/sync.sh"
249 | ----
250 |
251 | Next, create the `/etc/systemd/system/sync-backup.timer` file with the following content:
252 |
253 | ./etc/systemd/system/sync-backup.timer
254 | ----
255 | [Unit]
256 | Description=Sync backup files to the cloud
257 |
258 | [Timer]
259 | OnCalendar=Mon *-*-* 03:00:00 # <1>
260 | AccuracySec=1h
261 | Persistent=true
262 |
263 | [Install]
264 | WantedBy=timers.target
265 | ----
266 | <1> In this configuration backup is synced every Monday at 3 am.
267 | The reason sync is done only once a week is to save some bandwidth and data.
268 |
269 | Enable and start the timer:
270 |
271 | ----
272 | sudo systemctl daemon-reload
273 | sudo systemctl enable sync-backup.timer
274 | sudo systemctl start sync-backup.timer
275 | ----
276 |
277 | To run the initial sync and verify that everything works run the service manually:
278 |
279 | ----
280 | sudo systemctl start sync-backup.service
281 | ----
282 |
283 | The first sync may take very long time (depending on your internet bandwidth and backup size).
284 |
285 |
--------------------------------------------------------------------------------
/backup/disk-preparation.adoc:
--------------------------------------------------------------------------------
1 | === Disk Preparation
2 | The first step is to partition and format the external drive in `ext4` filesystem.
3 | There are plenty of excellent guides available online so these steps are out of scope of this document.
4 |
5 | NOTE: Setting up `dmcrypt`, `ecryptfs` or other encryption solutions on this drive is not necessary,
6 | since backup files will be already encrypted. But you may still do it if you want.
7 |
8 | ==== Automatic Disk Mount
9 | Once partitioned and formatted, connect the disk to the server.
10 |
11 | Identify what name was assigned to the disk and partition by either looking at `dmesg | tail`
12 | output right after the disk was connected, or by looking at the output of `lsblk` command.
13 |
14 | For example, the following output indicates that the device name is `sdb` and partition name is `sdb1`:
15 |
16 | [subs="attributes+,macros+"]
17 | ----
18 | $ lsblk
19 | pass:q[_sdb 8:00 0 465.8G 0 disk_]
20 | pass:q[_└─sdb1 8:00 0 442.4G 0 part_]
21 | ----
22 |
23 | The next step is to get the UUID of the partition from the output of `blkid` command:
24 |
25 | [subs="attributes+,macros+"]
26 | ----
27 | > sudo blkid /dev/sdb1 # <1>
28 | pass:q[_/defv/sdb1: LABEL="label" UUID="XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" TYPE="ext4" PARTUUID="XXXXXXX"_]
29 | ----
30 | <1> Replace `/dev/sdb1` with the partition device name from the previous step.
31 |
32 | Create a directory where the partition will be mounted:
33 |
34 | ----
35 | sudo mkdir /mnt/backup
36 | ----
37 |
38 | Finally, add the following line to the `/etc/fstab` file:
39 |
40 | ----
41 | UUID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX /mnt/backup ext4 auto,lazytime,nodev,nofail,errors=remount-ro 0 2 # <1>
42 | ----
43 | <1> Replace `XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX` with the actual UUID value.
44 |
45 | NOTE: In this example, mount options include `lazytime` option, which is not necessary if you use HDD drive
46 | instead of SSD and can be removed in this case.
47 |
48 | Reboot the system and confirm that the backup partition was automatically mounted under `/mnt/backup`:
49 |
50 | [subs="attributes+,macros+"]
51 | ----
52 | > lsblk
53 | pass:q[_sda 8:0 0 465.8G 0 disk_]
54 | pass:q[_└─sda1 8:1 0 442.4G 0 part /mnt/backup_]
55 | ----
56 |
57 | NOTE: The device name (i.e. `/dev/sda`) can be different after reboot.
58 |
59 | ==== TRIM
60 | While TRIM should automatically work on all mounted drives that support it (thanks to default `fstrim.service`),
61 | it most likely will not work on drives connected via USB due to issues with USB <> SATA commands translation.
62 |
63 | If your drive supports TRIM you can check whether it works by doing `sudo fstrim /mnt/backup`,
64 | but most likely you'll see:
65 |
66 | ----
67 | fstrim: /mnt/backup: the discard operation is not supported
68 | ----
69 |
70 | Unfortunately, it looks like there is no way to fix this issue at the moment.
71 | However, TRIM support is not critical for backup drive as maximum write performance is not very important in this case.
72 |
73 |
--------------------------------------------------------------------------------
/backup/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | The monitoring for backups consists of three parts: monitoring backup disk status (space use, temperature, health),
3 | monitoring Borg service status and monitoring Rclone service status.
4 |
5 | ==== Backup Disk Monitoring
6 | The backup disk monitoring can be done exactly the same way as the main disk monitoring,
7 | as described in <> section (assuming your disk has temperature sensor).
8 | However, my disk wasn't in the `hddtemp` database so it had to be added manually.
9 |
10 | First check if the disk is supported by `smartctl` and if any extra parameters has to be added
11 | (in the case of my disk the `-d sat` extra parameter has to be passed to `smartctl`).
12 | For the list of USB disks supported by `smartctl` see the references section.
13 |
14 | To find in what field disk reports temperature check the output of:
15 |
16 | ----
17 | sudo smartctl -a -d sat /dev/sda # <1>
18 | ----
19 | <1> Replace `/dev/sda` with your backup disk device.
20 |
21 | Then append the following line to the `/etc/hddtemp.db` file:
22 |
23 | ./etc/hddtemp.db
24 | ----
25 | "Samsung Portable SSD T5" 190 C "Samsung Portable SSD T5 500GB" # <1>
26 | ----
27 | <1> Replace disk name with the name as it was reported by `smartctl`
28 | and replace `190` with the temperature field number.
29 |
30 | To monitor backup disk with Monit, append the following to the `/etc/monit/conf.d/10-system` file:
31 |
32 | ./etc/monit/conf.d/10-system
33 | ----
34 | # Backup Filesystem
35 | check filesystem backupfs with path /mnt/backup
36 | if space usage > 70% then alert
37 | if inode usage > 60% then alert
38 | if read rate > 2 MB/s for 10 cycles then alert
39 | if write rate > 1 MB/s for 30 cycles then alert
40 |
41 | # Backup disk temperature
42 | check program backup_disk_temp with path "/usr/local/etc/monit/scripts/disk_temp.sh {PART_UUID}" # <1>
43 | if status > 60 then alert
44 | if status < 15 then alert
45 | ----
46 | <1> Replace `\{PART_UUID}` with your backup partition UUID.
47 |
48 | Restart Monit with `sudo systemctl restart monit` and verify that monitoring works.
49 |
50 | Additionally, you can add backup disk temperature and health status reporting to the summary email
51 | (see <> section).
52 | Copy the lines for main disk status reporting and replace UUID with your backup disk UUID.
53 | Don't forget to add extra parameters to `smartctl` command if needed (e.g. `-d sat`).
54 |
55 | ==== Borg & Rclone Monitoring
56 | To monitor status of Borg and Rclone services,
57 | create the `/etc/monit/conf.d/80-backup` file with the following content:
58 |
59 | ./etc/monit/conf.d/80-backup
60 | ----
61 | check program borg_backup with path "/usr/local/etc/monit/scripts/is_systemd_unit_failed.sh borg-backup.service" every 60 cycles
62 | if status != 0 for 2 cycles then alert
63 |
64 | check program sync_backup with path "/usr/local/etc/monit/scripts/is_systemd_unit_failed.sh sync-backup.service" every 60 cycles
65 | if status != 0 for 2 cycles then alert
66 | ----
67 |
68 | Restart Monit to update the rules and verify that monitoring works.
69 |
70 |
--------------------------------------------------------------------------------
/backup/overview.adoc:
--------------------------------------------------------------------------------
1 | === Overview
2 | Below is a diagram that describes the general backup strategy for the server:
3 |
4 | ----
5 | Silverbox
6 | -------------- On-Site Backup Off-Site Backup
7 | | ---------- | ---------- ----------------
8 | | | Internal | | Borg Backup | External | Rclone | Cloud (OVH) |
9 | | | Drive |-------------->| Drive |-------->| Object Storage |
10 | | ---------- | (1) ---------- (2) ----------------
11 | --------------
12 | ----
13 |
14 | As this diagram shows, the backup process consists of two steps:
15 |
16 | 1. In the first step all valuable information backed up using Borg Backup <>
17 | to an external drive that is connected via USB.
18 | This step includes de-duplication, compression and encryption.
19 | 2. In the second step backup is synced from the external drive to the OVH cloud object storage <>
20 | using Rclone tool <>.
21 |
22 | NOTE: While this guide uses OVH cloud, it is possible to use any other cloud that is supported by the Rclone tool.
23 | The only difference will be in Rclone configuration but the main process will be the same.
24 |
25 | In this model, data leaves the server already encrypted
26 | and thus can be safely stored on an unencrypted external disk and in public cloud.
27 |
28 | In the case of main drive failure, the data can be restored from the external drive.
29 | In the case of total failure of main and external drives simultaneously, the data can be restored from the cloud.
30 |
31 |
--------------------------------------------------------------------------------
/backup/references.adoc:
--------------------------------------------------------------------------------
1 | === References
2 |
3 | - Borg documentation: https://borgbackup.readthedocs.io/en/stable/index.html
4 | - Rclone documentation: https://rclone.org/docs
5 | - Article on configuring Rclone with OVH object storage: https://docs.ovh.com/ca/en/storage/sync-rclone-object-storage
6 | - Article on using Borg and Rclone for backup: https://opensource.com/article/17/10/backing-your-machines-borg
7 | - Nextcloud documentation on restoring from backup: https://docs.nextcloud.com/server/latest/admin_manual/maintenance/restore.html
8 | - USB disks supported by `smartctl`: https://www.smartmontools.org/wiki/Supported_USB-Devices
9 |
10 |
--------------------------------------------------------------------------------
/backup/restore.adoc:
--------------------------------------------------------------------------------
1 | === Restore Procedure
2 | The exact restore procedures depend on the type of failure that occurred.
3 |
4 | To get files from the cloud, use `rclone` tool or download files manually.
5 |
6 | To extract particular backup from the Borg repository use `borg extract` command.
7 |
8 | Normally, the restored files can be just copied over lost or damaged files.
9 | For more details, please refer to references section below.
10 |
11 |
--------------------------------------------------------------------------------
/basic-configuration/basic-configuration.adoc:
--------------------------------------------------------------------------------
1 | == Basic Configuration
2 |
3 | include::os-installation.adoc[]
4 |
5 | include::post-install-configuration.adoc[]
6 |
7 | include::ssd-optimization.adoc[]
8 |
9 | include::ups-configuration.adoc[]
10 |
11 | include::openssh-server-configuration.adoc[]
12 |
13 | include::basic-firewall-configuration.adoc[]
14 |
15 |
--------------------------------------------------------------------------------
/basic-configuration/basic-firewall-configuration.adoc:
--------------------------------------------------------------------------------
1 | === Basic Firewall Configuration
2 | By default, Ubuntu will already have Uncomplicated Firewall (UFW) installed, but it will be disabled (inactive).
3 |
4 | Its status can be checked with:
5 |
6 | ----
7 | sudo ufw status verbose
8 | ----
9 |
10 | Before activating firewall, first enable rate limiting for SSH (so that the server can be accessed over SSH).
11 | Rate limiting will allow access over the port, but will limit connection attempts to 6 attempts within 30 seconds.
12 | For more flexible configuration (e.g. different number of attempts or duration) the `iptables` have to be used directly,
13 | as UFW doesn't allow this kind of flexibility.
14 |
15 | To enable SSH access with rate limiting do:
16 |
17 | ----
18 | sudo ufw limit ssh comment "SSH Access"
19 | ----
20 |
21 | You can optionally disable UFW logs (sometimes they can flood syslog if you have some service discovery protocols running):
22 |
23 | ----
24 | sudo ufw logging off
25 | ----
26 |
27 | Now UFW can be enabled with:
28 |
29 | ----
30 | sudo ufw enable
31 | ----
32 |
33 | In this configuration UFW will startup automatically and block all incoming connections (by default) but allow (rate limited) SSH.
34 | All outgoing connections are allowed.
35 |
--------------------------------------------------------------------------------
/basic-configuration/openssh-server-configuration.adoc:
--------------------------------------------------------------------------------
1 | [[openssh_server_configuration]]
2 | === OpenSSH Server Configuration
3 | To access the server over SSH, an OpenSSH server needs to be installed and configured.
4 |
5 | IMPORTANT: It is important to make sure that server is on isolated network or directly connected to the PC
6 | from which it will be accessed.
7 | It is important to keep the server isolated until it is properly hardened and secured.
8 |
9 | ==== OpenSSH Server Installation
10 | OpenSSH server can be installed with:
11 |
12 | ----
13 | sudo apt install openssh-server
14 | ----
15 |
16 | ==== Generating and Copying Key
17 | Only key authentication will be enabled for SSH server (with password and other authentication methods disabled).
18 | However, it is convenient to copy the access key over SSH while password authentication still enabled.
19 | The following steps need to be performed on the client PC.
20 |
21 | Generate key:
22 |
23 | ----
24 | ssh-keygen -t ed25519 -f ~/.ssh/silverbox-key -C "Silverbox key"
25 | ----
26 |
27 | Copy generated key to the server (`{SB_USER}` assumed to be your used on the server):
28 |
29 | [subs="attributes+,macros+"]
30 | ----
31 | ssh-copy-id -i ~/.ssh/silverbox-key {SB_USER}@{SB_IP}
32 | ----
33 |
34 | ==== SSH Server Configuration & Security Hardening
35 | The next step is to perform some basic configuration and security hardening for the SSH server.
36 |
37 | SSH server is configured by modifying the `/etc/ssh/sshd_config` file.
38 | Change (or add, if not present) the following parameters in this file:
39 |
40 | ./etc/ssh/sshd_config
41 | [subs="attributes+"]
42 | ----
43 | AddressFamily inet
44 | Protocol 2
45 |
46 | HostKey /etc/ssh/ssh_host_ed25519_key
47 | HostKey /etc/ssh/ssh_host_rsa_key
48 |
49 | KexAlgorithms curve25519-sha256@libssh.org
50 | Ciphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes256-ctr
51 | MACs hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com
52 |
53 | LogLevel VERBOSE
54 |
55 | LoginGraceTime 1m
56 | PermitRootLogin no
57 | MaxAuthTries 4
58 | MaxSessions 5
59 |
60 | ClientAliveCountMax 2
61 | ClientAliveInterval 60
62 | TCPKeepAlive no
63 |
64 | AuthenticationMethods publickey
65 |
66 | PubkeyAuthentication yes
67 |
68 | HostbasedAuthentication no
69 | IgnoreRhosts yes
70 |
71 | PasswordAuthentication no
72 | PermitEmptyPasswords no
73 |
74 | ChallengeResponseAuthentication no
75 |
76 | X11Forwarding no
77 | AllowAgentForwarding no
78 | AllowTcpForwarding local
79 |
80 | Banner none
81 | DebianBanner none
82 |
83 | AllowUsers {SB_USER}
84 | ----
85 |
86 | Restart SSH server service for the changes to take effect:
87 |
88 | ----
89 | sudo systemctl restart sshd
90 | ----
91 |
92 | ==== Additional Resources
93 | Below are some useful resources related to SSH server configuration:
94 |
95 | - Mozilla SSH server configuration guide: https://infosec.mozilla.org/guidelines/openssh
96 | - Good overview of SSH server cryptography: https://stribika.github.io/2015/01/04/secure-secure-shell.html
97 |
98 | Additionally, you can run SSH audit script <> against your SSH server. It haven't been updated since 2016 though.
99 |
--------------------------------------------------------------------------------
/basic-configuration/os-installation.adoc:
--------------------------------------------------------------------------------
1 | === OS Installation
2 | Ubuntu Server 18.04 was chosen as an operating system for the server.
3 | The reasoning behind this choice is that it is pretty mature and stable server OS,
4 | with lot of software and documentation available due to its popularity.
5 |
6 | The Ubuntu installation itself is pretty straightforward, and there are many excellent guides available,
7 | so it is not described in details in this document. Only some important points described here.
8 |
9 | IMPORTANT: At the moment of writing, the default Ubuntu image was using new installer
10 | that was missing quite a few important features, including full disk encryption.
11 | As a workaround, don't download default Ubuntu Server installer,
12 | and instead follow to the releases page and download _"Alternative Ubuntu Server installer"_.
13 | This installer should have full support for LVM and disk encryption.
14 | Hopefully, the new installer will be updated eventually and missing features will be added.
15 | It's a good idea to first try it on the virtual machine before installing on the real server.
16 |
17 | TIP: While internet is not required during installation, it makes it a bit easier and more convenient.
18 | So make sure you plug the server to the working internet connection.
19 |
20 | Roughly, the installation process looks like this:
21 |
22 | . Create a bootable USB flash drive with Ubuntu installer image.
23 | . Connect server to power, monitor, network, keyboard, insert USB stick and power it on. Ubuntu installation will begin.
24 | . Partition disk manually according to your needs. For example, the following partitioning scheme was used:
25 | - Bootable EFI system partition
26 | - 2GB `ext4` partition for `/boot` (to have some extra space for old kernels)
27 | - `dm-crypt` partition with LVM on it in the following configuration:
28 | * One volume group (VG) with:
29 | .. Root logical volume (LV) for `/` with `ext4` file system
30 | .. Swap logical volume (LV). The size needs to be greater than size of RAM if you need hibernation support.
31 | - Some unallocated (free) space to (hopefully) prolong life of SSD.
32 | . Make sure you enable automatic security updates during installation.
33 |
34 | No additional software packages were chosen during installation.
35 |
36 |
--------------------------------------------------------------------------------
/basic-configuration/post-install-configuration.adoc:
--------------------------------------------------------------------------------
1 | === Post-Installation Configuration
2 |
3 | ==== Set Correct Timezone
4 | If for whatever reason the correct time zone was not set during installation, set it now with:
5 |
6 | ----
7 | sudo timedatectl set-timezone {YOUR_TIMEZONE}
8 | ----
9 |
10 | Where `\{YOUR_TIMEZONE}` is your desired timezone (for example, `Europe/Athens`).
11 | The list of available time zones can be obtained with:
12 |
13 | ----
14 | timedatectl list-timezones
15 | ----
16 |
17 | ==== Disable Unused Hardware
18 | Optionally, you can disable hardware that you are not planning to use for security, privacy,
19 | boot speed, power saving and other reasons. Some examples of what you can disable below.
20 |
21 | ===== WiFi
22 | Wireless adapter can be easily disabled in BIOS.
23 |
24 | WARNING: After disabling wireless adapter your wired adapter name will likely change
25 | (due to the way Linux enumerates devices).
26 | In this case, network connectivity can be fixed by editing the file `/etc/netplan/01-netcfg.yaml`
27 | and updating wired interface name there.
28 |
29 | ===== Bluetooth
30 | Disabling Bluetooth adapter wasn't possible with default NUC BIOS and required BIOS update.
31 | After the update Bluetooth can be disabled in the bios.
32 |
33 | ===== Digital Microphone
34 | Microphone can be disabled in the BIOS as well.
35 |
36 | ==== Disable IPv6
37 | Unless you are planning on using IPv6, it is good idea to disable it for security reasons.
38 | The rest of this document assumes IPv6 is disabled and thus all configuration is for IPv4 only.
39 |
40 | To disable IPv6 edit the file `/etc/default/grub` and add (or set) the following parameters:
41 |
42 | ./etc/default/grub
43 | [source,ini]
44 | ----
45 | GRUB_CMDLINE_LINUX="ipv6.disable=1"
46 | GRUB_CMDLINE_LINUX_DEFAULT="ipv6.disable=1"
47 | ----
48 |
49 | Update Grub configuration:
50 |
51 | ----
52 | sudo update-grub
53 | ----
54 |
55 | And then reboot the system.
56 |
57 | To check that IPv6 is disabled you can grep for _IPv6_ in the `dmesg` output:
58 |
59 | [subs="attributes+,macros+"]
60 | ----
61 | dmesg | grep IPv6
62 | pass:q[_IPv6: Loaded, but administratively disabled, reboot required to enable_]
63 | ----
64 |
65 | ==== Configure Static IP
66 | To make lot of things easier and more predictable, a static network configuration is used for the server instead of DHCP.
67 | This also helps to prevent DHPC server from accidentally changing network configuration (especially DNS).
68 |
69 | From now on in this document when talking about the server network configuration the following conventions will be used:
70 |
71 | - Server IP address: `{SB_IP}`
72 | - Default gateway: `{SB_GW}`
73 |
74 | First, choose an IP address (`{SB_IP}`) and create a DHCP reservation on the DHCP server (most likely router) for it.
75 |
76 | To update network configuration, edit the `/etc/netplan/01-netcfg.yaml` file and update
77 | the `ethernets` section in it so that it matches desired network configuration:
78 |
79 | ./etc/netplan/01-netcfg.yaml
80 | [source,yaml,subs=attributes+]
81 | ----
82 | ethernets:
83 | enp2s0: # <1>
84 | addresses: [ {SB_IP}/24 ] # <2>
85 | gateway4: {SB_GW}
86 | nameservers:
87 | addresses: [ 127.0.0.1 ] # <3>
88 | dhcp4: no
89 | ----
90 | <1> This is the wired interface name, it may be different on your system.
91 | To find out what name to use check the `ifconfig` command output.
92 | <2> Replace this with your actual server IP address and your subnet size in bits (most likely 24).
93 | <3> Put your actual DNS server address here.
94 | This is temporary, and will be set back to `127.0.0.1` once DNS server is configured.
95 |
96 | To apply new configuration do:
97 |
98 | ----
99 | sudo netplan apply
100 | ----
101 |
102 | You can also reboot the system to double check that everything works.
103 |
104 | ==== Disable ICMP Redirects and Source Routing
105 | To disable ICMP redirects and IP source routing (for security reasons), edit the `/etc/sysctl.conf` file
106 | and uncomment the following lines:
107 |
108 | ./etc/sysctl.conf
109 | ----
110 | net.ipv4.conf.all.accept_redirects = 0
111 | net.ipv4.conf.all.send_redirects = 0
112 | net.ipv4.conf.all.accept_source_route = 0
113 | ----
114 |
115 | The changes will be applied after reboot.
116 |
117 | ==== Remove Unneeded Software
118 | Now is a good time to remove or disable any software that you are not planning to use.
119 |
120 | ==== Uninstall Snap
121 | There are many, many issues with Snap (aka `snapd`) which I'm not going to describe here.
122 |
123 | Unless you really need it, you can remove it with:
124 |
125 | ----
126 | sudo apt autoremove --purge snapd
127 | ----
128 |
129 | ==== Uninstall LXC/LXD
130 | Unless you are planning to use it, you can also remove LXC/LXD to free up some system resources:
131 |
132 | ----
133 | sudo apt autoremove --purge lxd lxcfs liblxc-common lxd-client
134 | ----
135 |
136 | It can always be installed later on if required.
137 |
--------------------------------------------------------------------------------
/basic-configuration/ssd-optimization.adoc:
--------------------------------------------------------------------------------
1 | === SSD Optimization
2 | Since the server is equipped with SSD, some configuration needs to be done in order to optimize SSD performance
3 | and minimize the number of writes (thus prolonging SSD life).
4 |
5 | A lot of useful information about SSD-related configuration in Linux
6 | can be found in the Arch Wiki article about SSD <>.
7 |
8 | ==== TRIM
9 | A TRIM command informs an SSD about what blocks are no longer used and can be recycled.
10 | Doing TRIM can improve SSD write performance.
11 |
12 | First make sure that your SSD supports TRIM.
13 | One way to do it is to check the output of the following command (replace `/dev/sda` with your disk device):
14 |
15 | [subs="attributes+,macros+"]
16 | ----
17 | sudo hdparm -I /dev/sda | grep TRIM
18 | pass:q[_* Data Set Management TRIM supported (limit 8 blocks)_]
19 | ----
20 |
21 | NOTE: If your disk doesn't support TRIM, you can skip the rest of this section.
22 |
23 | TRIM needs to be enabled on all abstraction layers,
24 | which in the case of the silverbox server means on the file system level, LVM level and dm-crypt level.
25 |
26 | ===== Enabling TRIM on File System Level
27 | Periodic file system TRIM should be enabled by default in Ubuntu 18.04.
28 | There should be Systemd timer that performs `fstrim` every week.
29 |
30 | To check its status, do:
31 |
32 | ----
33 | systemctl status fstrim.timer
34 | ----
35 |
36 | Logs from previous runs can be viewed with:
37 |
38 | ----
39 | journalctl -u fstrim.service
40 | ----
41 |
42 | You can run the service manually and inspect the logs to make sure it works.
43 | To run the service:
44 |
45 | ----
46 | sudo systemctl start fstrim.service
47 | ----
48 |
49 | ===== Enabling TRIM on LVM Level
50 | Edit the `/etc/lvm/lvm.conf` file and set `issue_discards` parameter to 1 (it should be under the `devices` section):
51 |
52 | ./etc/lvm/lvm.conf
53 | ----
54 | ...
55 | devices {
56 | ...
57 | issue_discards = 1
58 | ...
59 | }
60 | ...
61 | ----
62 |
63 | Most likely it will already be there and set to 1 so you just need to double check.
64 |
65 | Note that the `issue_discards` parameter here only controls whether to send discards during operations on LVM volumes,
66 | such as resizing or removing.
67 | Discards for deleted files should be passed through by default.
68 |
69 | ===== Enabling TRIM on dm-crypt Level
70 | Edit the `/etc/crypttab` file and add `discard` option to options for your device.
71 | Below is an example for `sda3`:
72 |
73 | ./etc/crypttab
74 | ----
75 | sda3_crypt UUID=XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX none luks,discard
76 | ----
77 |
78 | Most likely it will already be there so you just need to double check.
79 |
80 | ===== Verifying that TRIM Works
81 | A procedure for TRIM verification is described in this excellent StackOverflow answer:
82 | https://unix.stackexchange.com/questions/85865/trim-with-lvm-and-dm-crypt/85880#85880.
83 |
84 | ==== Write Caching
85 | Write caching can greatly improve performance and minimize writes on SSD.
86 | It should be enabled by default.
87 |
88 | To check if write caching is enabled, check the output of:
89 |
90 | [subs="attributes+,macros+"]
91 | ----
92 | sudo hdparm -W /dev/sda # <1>
93 | pass:q[_/dev/sda:_]
94 | pass:q[ _write-caching = 1 (on)_]
95 | ----
96 | <1> Replace `/dev/sda` with your disk device.
97 |
98 | Or alternatively:
99 |
100 | [subs="attributes+,macros+"]
101 | ----
102 | sudo hdparm -i /dev/sda | grep WriteCache # <1>
103 | pass:q[_... WriteCache=enabled ..._]
104 | ----
105 | <1> Replace `/dev/sda` with your disk device.
106 |
107 | ==== Swappiness
108 | Lowering system swappiness can increase the threshold of when memory pages will be swapped to disk,
109 | and thus potentially limit the number of writes to the SSD.
110 | More about swapiness here: https://en.wikipedia.org/wiki/Paging#Swappiness.
111 |
112 | Current (default) system swappiness can be checked with:
113 |
114 | [subs="attributes+,macros+"]
115 | ----
116 | sysctl vm.swappiness
117 | pass:q[_vm.swappiness = 60_]
118 | ----
119 |
120 | If you decide to change it, this can be done by editing the `/etc/sysctl.conf` file and adding (or chanigng)
121 | parameter `vm.swappiness`, for example:
122 |
123 | ./etc/sysctl.conf
124 | ----
125 | ...
126 | vm.swappiness = 40
127 | ...
128 | ----
129 |
130 | Change will be in effect after reboot.
131 |
132 | [[mounting_tmp_as_tmpfs]]
133 | ==== Mounting /tmp as tmpfs
134 | To minimize writes to SSD even further, the `/tmp` directory can be mounted as `tmpfs` (aka RAM file system) mount.
135 | This can be either done with Systemd `tmp.mount` unit or by editing `fstab`.
136 | According to the Systemd documentation (at least at the moment of writing), using the `fstab` is the preferred approach.
137 |
138 | To automatically mount `/tmp` as `tmpfs`, add the following line to the `/etc/fstab` file:
139 |
140 | ./etc/fstab
141 | ----
142 | tmpfs /tmp tmpfs defaults,noatime,nosuid,nodev,noexec,mode=1777,size=2G 0 0
143 | ----
144 |
145 | In this example, its size is limited to 2G, but you can adjust it if needed.
146 |
147 | IMPORTANT: A `noexec` option can sometime cause issues with programs that put something under `/tmp` and then try to execute it.
148 | If this happens, you can remove this option.
149 |
150 | Reboot the system and verify the output of `df -h` to make sure `/tmp` is now mounted as `tmpfs` with the limit you've set.
151 | It should contain line similar to this:
152 |
153 | ----
154 | tmpfs 2.0G 0 2.0G 0% /tmp
155 | ----
156 |
157 | ==== Monitoring Tools
158 | There are some tools that are useful for SSD monitoring, and will be used in the next sections.
159 |
160 | The first one is `hddtemp`, that is used to monitor disk temperature.
161 | To install it do:
162 |
163 | ----
164 | sudo apt install hddtemp
165 | ----
166 |
167 | The second one is `smartmontools`, that is used to monitor SSD wear (and other parameters) via SMART.
168 | To install it do:
169 |
170 | ----
171 | sudo apt install smartmontools --no-install-recommends
172 | ----
173 |
--------------------------------------------------------------------------------
/basic-configuration/ups-configuration.adoc:
--------------------------------------------------------------------------------
1 | === UPS Configuration
2 | For a graceful server shutdown in case of power outage, the server needs to be able to communicate to the UPS
3 | to get its status.
4 | Usually, UPS is connected to the server by USB and Network UPS Tools (NUT) is used to get UPS status.
5 |
6 | ==== Network UPS Tools (NUT) Installation
7 | NUT can be easily installed by doing:
8 |
9 | ----
10 | sudo apt install nut
11 | ----
12 |
13 | ==== NUT Configuration
14 | The NUT configuration consists of three different parts:
15 |
16 | . Configuring the _driver_ (talks to UPS)
17 | . Configuring the _server_ (`upsd` daemon, talks to the driver)
18 | . Configuring the _monitor_ (`upsmon`, monitors the server and takes action based on the received information)
19 |
20 | ===== Configuring NUT Driver
21 | The driver type for NUT can be checked on the NUT Compatibility Page <>.
22 | In the case of Eaton 3S UPS the driver is `usbhid-ups`.
23 |
24 | Edit the `/etc/nut/ups.conf` file and append section for your UPS, for example:
25 |
26 | ./etc/nut/ups.conf
27 | ----
28 | [eaton3s]
29 | driver=usbhid-ups
30 | port=auto
31 | ----
32 |
33 | Start the driver:
34 |
35 | ----
36 | sudo upsdrvctl start
37 | ----
38 |
39 | ===== Configuring NUT Server
40 | General `upsd` configuration is done by editing the `/etc/nut/upsd.conf` file, if necessary.
41 |
42 | Edit the `/etc/nut/nut.conf` file and change `MODE` to `standalone`:
43 |
44 | ./etc/nut/nut.conf
45 | ----
46 | MODE=standalone
47 | ----
48 |
49 | A user for the monitor needs to be added to the `/etc/nut/upsd.users` file (replace `\{SOME_PASSWORD}` with some random password):
50 |
51 | ./etc/nut/upsd.users
52 | ----
53 | [upsmon]
54 | password = {SOME_PASSWORD}
55 | upsmon master
56 | ----
57 |
58 | The `upsd` server can now be started with `sudo systemctl start nut-server` command.
59 | Once started successfully, the UPS info can be queried with:
60 |
61 | ----
62 | upsc eaton3s
63 | ----
64 |
65 | ===== Configuring NUT Monitor
66 | Change the `MONITOR` value in the `/etc/nut/upsmon.conf` file like so (use the same password you used in the previous step):
67 |
68 | ./etc/nut/upsmon.conf
69 | ----
70 | MONITOR eaton3s@localhost 1 upsmon {SOME_PASSWORD} master
71 | ----
72 |
73 | Start the monitor service with `sudo systemctl start nut-monitor` command.
74 |
75 | At this point system should be configured to do graceful shutdown when UPS is on battery and battery reaches `battery.charge.low` level.
76 | The `battery.charge.low` value can be obtained with `upsc eaton3s | grep 'battery.charge.low'`.
77 |
78 | ==== Useful References
79 | Some useful information to read about NUT:
80 |
81 | - Arch Wiki article about NUT: https://wiki.archlinux.org/index.php/Network_UPS_Tools
82 | - NUT Documentation: https://networkupstools.org/docs/user-manual.chunked/ar01s06.html
83 |
--------------------------------------------------------------------------------
/dns-server/configuration.adoc:
--------------------------------------------------------------------------------
1 | === Configuration
2 | Ubuntu 18.04 uses `systemd-resolved` as the default DNS server.
3 |
4 | To switch to Unbound, first `systemd-resolved` stub listener needs to be disabled.
5 | To do this, first edit the `/etc/systemd/resolved.conf` file and set the following parameter:
6 |
7 | ./etc/systemd/resolved.conf
8 | ----
9 | DNSStubListener=no
10 | ----
11 |
12 | Then restart the `systemd-resolved` service:
13 |
14 | ----
15 | sudo systemctl restart systemd-resolved
16 | ----
17 |
18 | You can also verify that `systemd-resolved` is not listening on port 53 anymore by checking the output of:
19 |
20 | ----
21 | sudo netstat -lnptu
22 | ----
23 |
24 | To configure Unbound as a simple forwarding DNS server create the `/etc/unbound/unbound.conf.d/dns-config.conf` file
25 | with the following content:
26 |
27 | ./etc/unbound/unbound.conf.d/dns-config.conf
28 | [source,yaml,subs="attributes+"]
29 | ----
30 | server:
31 | interface: 0.0.0.0
32 | outgoing-interface: {SB_IP} # <1>
33 | access-control: 127.0.0.0/8 allow
34 | access-control: {SB_SUBNET} allow # <2>
35 | do-ip4: yes
36 | do-ip6: no
37 | do-udp: yes
38 | do-tcp: yes
39 | minimal-responses: yes
40 | prefetch: yes
41 | qname-minimisation: yes
42 | hide-identity: yes
43 | hide-version: yes
44 | use-caps-for-id: yes
45 | private-address: 192.168.0.0/16
46 | private-address: 172.16.0.0/12
47 | private-address: 10.0.0.0/8
48 | unwanted-reply-threshold: 10000
49 | root-hints: /usr/share/dns/root.hints
50 | forward-zone:
51 | name: "."
52 | # tls-cert-bundle: /etc/ssl/certs/ca-certificates.crt # <3>
53 | forward-ssl-upstream: yes
54 | forward-addr: 1.1.1.1@853#one.one.one.one # <4>
55 | forward-addr: 1.0.0.1@853#one.one.one.one # <5>
56 | remote-control:
57 | control-interface: 127.0.0.1
58 | ----
59 | <1> Replace this with your server address.
60 | <2> Replace this with your LAN subnet.
61 | <3> This line is commented because Unbound 1.6.7 (default Ubuntu 18.04 version at the moment of writing)
62 | does not support this parameter.
63 | Without it there is no validation, however, the queries are still encrypted.
64 | Uncomment this line once Ubuntu gets newer version of Unbound.
65 | <4> Primary DNS server address.
66 | <5> Secondary DNS server address.
67 |
68 | NOTE: This configuration uses Cloudflare's DNS servers <>, due to their reasonable privacy policy and support for
69 | DNS over TLS and DNSSEC. Feel free to replace with DNS server of your choice.
70 |
71 | NOTE: It is also possible to make Unbound to block DNS requests to certain known advertisement/analytics addresses
72 | (similarly to what Pi-hole does) but this is outside of the scope of this document.
73 |
74 | IMPORTANT: The configuration above is for Unbound 1.6.7. Some parameters were added/modified in more recent version,
75 | so this config may need to be updated once Ubuntu package is upgraded to more recent version.
76 |
77 | NOTE: The tls-cert-bundle because Unbound 1.6.7 (default Ubuntu 18.04 version at this moment) does not support this paremeter yet.
78 | Without it there won't be any authentication, but the queries still are encrypted.
79 | Uncomment this line once Ubuntu gets newer version of Unbound.
80 |
81 | Next, remove the `/etc/resolv.conf` file (which is a link to SystemD resolver's file):
82 |
83 | ----
84 | sudo rm /etc/resolv.conf
85 | ----
86 |
87 | The `systemd-resolved` should detect it automatically and stop generating `resolv.conf` contents.
88 |
89 | Now you can create a new `/etc/resolv.conf` file with the following content:
90 |
91 | [subs="attributes+"]
92 | ----
93 | nameserver 127.0.0.1
94 | nameserver {SB_IP} # <1>
95 | ----
96 | <1> Replace `{SB_IP}` with the actual server IP address.
97 | While it doesn't make sense to have this line together with `127.0.0.1`, it is needed for the Docker's embedded
98 | DNS to work properly.
99 | At the moment of writing, Docker incorrectly filters out all localhost records from the `resolv.conf`,
100 | so this record is necessary to force it to use host's DNS server.
101 |
102 | Restart the `systemd-resolved` and `unbound` services:
103 |
104 | ----
105 | sudo systemctl restart systemd-resolved
106 | sudo systemctl restart unbound
107 | ----
108 |
109 | Check that the DNS resolution is working on the server.
110 |
111 | To verify that DNSSEC is working you can check the output of the following command:
112 |
113 | [subs="macros+"]
114 | ----
115 | dig weberdns.de
116 | pass:q[_..._]
117 | pass:q[_;; flags: qr rd ra *ad*; QUERY: 1, ANSWER: 1, AUTHORITY: 0, ADDITIONAL: 1_]
118 | pass:q[_..._]
119 | ----
120 |
121 | And verify that response has an `ad` flag present.
122 |
123 | To also verify that DNS queries are now encrypted check the output of:
124 |
125 | ----
126 | sudo tcpdump -vv -x -X -s 1500 -i {NETWORK_INTERFACE} 'port 853'
127 | ----
128 |
129 | While doing any DNS query.
130 |
131 | ==== Adding Firewall Rule
132 |
133 | To allow incoming DNS requests from the LAN do:
134 |
135 | [subs="attributes+"]
136 | ----
137 | sudo ufw allow proto tcp from {SB_SUBNET} to any port 53 comment "DNS TCP"
138 | sudo ufw allow proto udp from {SB_SUBNET} to any port 53 comment "DNS UDP"
139 | ----
140 |
141 | === Updating DHCP Server Configuration
142 |
143 | Now you can change your router's DHCP settings and set your server address as your DNS server.
144 | Thus all devices on the LAN will switch to using this DNS server automatically.
145 |
146 |
--------------------------------------------------------------------------------
/dns-server/dns-server.adoc:
--------------------------------------------------------------------------------
1 | [[dns_server]]
2 | == DNS Server
3 | This section describes how to install and configure a DNS server, which will serve clients on the local network.
4 | Client devices in the LAN will use this DNS server as the default DNS server (can be announced by the DHCP server),
5 | and the DNS server will forward queries securely (using DNS over TLS and DNSSEC) to the DNS server of your choice
6 | (this configuration uses Cloudflare's DNS server).
7 |
8 | include::installation.adoc[]
9 |
10 | include::configuration.adoc[]
11 |
12 | include::monitoring.adoc[]
13 |
14 |
--------------------------------------------------------------------------------
/dns-server/installation.adoc:
--------------------------------------------------------------------------------
1 | === Installation
2 | Unbound <> is used as the DNS server.
3 |
4 | It can be installed directly from the repositories with:
5 |
6 | ----
7 | sudo apt install unbound
8 | ----
9 |
10 |
--------------------------------------------------------------------------------
/dns-server/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | The DNS server will be monitored with Monit, which should by now be configured.
3 |
4 | Create the `/etc/monit/conf.d/30-unbound` file with the following content:
5 |
6 | ----
7 | check process unbound with pidfile /var/run/unbound.pid
8 | if does not exist then alert
9 | if cpu > 10% for 5 cycles then alert
10 | if total memory > 200.0 MB for 5 cycles then alert
11 | if failed port 53 type udp protocol dns then alert
12 | if failed port 53 type tcp protocol dns then alert
13 |
14 | check program unbound_stats with path "/usr/sbin/unbound-control stats" every 5 cycles
15 | if status != 0 then alert
16 | ----
17 |
18 | This will make Monit to check that Unbound process is running and DNS server is accessible over TCP and UDP
19 | and not consuming suspicious amounts of CPU and RAM.
20 | In addition to that, it will grab the Unbound stats every 5 cycles
21 | (which is 5 minutes, if you set cycle duration to a minute).
22 | The Unbound stats are cleared each time `stats` command is executed, so in this case Monit will essentially
23 | show the stats for the last 5 minutes.
24 |
25 | Restart the Monit service:
26 |
27 | ----
28 | sudo systemctl restart monit
29 | ----
30 |
31 | Check Monit web interface and make sure that DNS monitoring is working.
32 |
33 |
--------------------------------------------------------------------------------
/docker/docker.adoc:
--------------------------------------------------------------------------------
1 | [[docker]]
2 | == Docker
3 | This section describes how to setup Docker CE engine and Docker Compose.
4 | Docker will be required to run some workloads (such as Nextcloud, or Transmission) inside containers.
5 |
6 | include::installation.adoc[]
7 |
8 | include::monitoring.adoc[]
9 |
10 |
--------------------------------------------------------------------------------
/docker/installation.adoc:
--------------------------------------------------------------------------------
1 | === Installation
2 |
3 | ==== Docker CE
4 | To install Docker CE engine follow the instructions from the docker documentation:
5 | https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-docker-ce.
6 |
7 | IMPORTANT: For security reasons, it may be good idea not to add your user to the `docker` group.
8 | Membership in the `docker` group essentially grants user the `root` permissions, without requiring to enter
9 | password for privilege elevation (unlike `sudo`).
10 |
11 | [[docker_compose_install]]
12 | ==== Docker Compose
13 | Docker Compose <> is a useful tool for deploying and managing multi-container workloads.
14 |
15 | At the moment of writing, the preferred method of installation for Ubuntu was simply grabbing
16 | the latest binary from the GitHub releases page.
17 | The downside is that there won't be any automatic upgrades and newer versions of Docker Compose will
18 | have to be installed manually.
19 |
20 | NOTE: This guide has been updated to use Docker Compose v2
21 | (complete rewrite of the Docker Compose in Golang).
22 | If you have older Docker Compose version, make sure you remove it and install the version 2.
23 |
24 | To find the latest Docker Compose version number, visit the GitHub releases page at:
25 | https://github.com/docker/compose/releases.
26 |
27 | Next, download the `docker-compose` binary:
28 |
29 | ----
30 | sudo curl -L "https://github.com/docker/compose/releases/download/{COMPOSE_VERSION}/docker-compose-$(uname -s)-$(uname -m)" -o /usr/libexec/docker/cli-plugins/docker-compose # <1>
31 | ----
32 | <1> Replace `\{COMPOSE_VERSION}` with the actual latest Docker Compose version.
33 |
34 | And mark it as executable:
35 |
36 | ----
37 | sudo chmod +x /usr/libexec/docker/cli-plugins/docker-compose
38 | ----
39 |
40 | Verify that Docker Compose works by doing:
41 |
42 | ----
43 | docker compose version
44 | ----
45 |
46 | NOTE: If you see an error similar to this:
47 | `error while loading shared libraries: libz.so.1: failed to map segment from shared object`
48 | then remove `noexec` option from the `/tmp` partition.
49 | See <> for more details.
50 |
51 |
--------------------------------------------------------------------------------
/docker/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | To setup basic monitoring of the Docker daemon with Monit,
3 | create the `/etc/monit/conf.d/20-docker` file with the following content:
4 |
5 | ./etc/monit/conf.d/20-docker
6 | ----
7 | check process docker with pidfile /var/run/docker.pid
8 | if does not exist then alert
9 | if cpu > 30% for 10 cycles then alert
10 | if total memory > 300.0 MB for 5 cycles then alert
11 | ----
12 |
13 | Restart Monit service:
14 |
15 | ----
16 | sudo systemctl restart monit
17 | ----
18 |
19 | Check Monit web interface and verify that Docker monitoring is working.
20 |
21 |
--------------------------------------------------------------------------------
/domain-name/domain-name.adoc:
--------------------------------------------------------------------------------
1 | [[domain_name]]
2 | == Domain Name
3 | Having own domain name can be useful for a variety of reasons.
4 | In this guide, it is used for the following purposes:
5 |
6 | 1. To configure proper domain for Kerberos (instead of doing domain hijacking).
7 | 2. To access the server from the outside (from the internet) without having a static IP address.
8 | 3. To obtain certificates for Nextcloud (to use HTTPS) or other services.
9 |
10 | A domain name needs to be registered in order to proceed with this section of the document.
11 |
12 | TIP: Domain name registration and payment process may take a few days, so it is better to start it in advance.
13 |
14 | The main requirement to the domain name registrar is to provide an API that allows changing DNS records
15 | (at least host A and TXT records).
16 | After evaluating a few options, NameSilo <> was chosen as the domain name registrar,
17 | so this document provides DNS records update script for its API.
18 | If you decide to use different domain name registrar, you'll have to write similar script by yourself.
19 |
20 | In this document, the domain name is referred as `{SB_DOMAIN_NAME}` (an example of domain name is `example.com`).
21 | However, this domain name itself is not used directly.
22 | Instead, a subdomain is used, which referred as `{SB_SUBDOMAIN}` (an example would be `silverbox.example.com`).
23 | So the FQDN of the server as it seen from the internet is `{SB_SUBDOMAIN}`.
24 | This approach offers some flexibility, for example, the domain name itself (e.g. `example.com`)
25 | can be used for different purposes (like hosting some website), while subdomain (e.g. `silverbox.example.com`)
26 | is used to access the server over SSH.
27 |
28 | IMPORTANT: Some scripts listed in this document have one limitation: only domain names that consists of two components
29 | (i.e. one subdomain after TLD) are supported. For example, domain name `example.com` is supported while
30 | `example.co.uk` is not.
31 |
32 | include::dynamic-dns-update.adoc[]
33 |
34 | include::monitoring.adoc[]
35 |
36 |
--------------------------------------------------------------------------------
/domain-name/dynamic-dns-update.adoc:
--------------------------------------------------------------------------------
1 | === Dynamic DNS Update
2 | This section describes how to setup automatic DNS host A record update with the current public address of the server.
3 | This way the server can be accessed from the internet by its FQDN, even if its public IP address is dynamic.
4 |
5 | The dynamic DNS record update is initiated by Systemd timer, that runs Docker container with a python script
6 | that uses NameSilo API to make an update.
7 | The Docker container is used for convenience, isolation and resource limiting
8 | (NameSilo API uses XML which can exhaust system resources while parsing malformed or maliciously constructed XML).
9 |
10 | ==== Prerequisites
11 | First, login to your NameSilo account and generate API key, which will be used to authenticate to the NameSilo API.
12 |
13 | TIP: Keep the API key secure, as it grants complete access to your NameSilo account.
14 |
15 | Then create a host A DNS record for the `{SB_SUBDOMAIN}` with any content.
16 | This entry needs to be created manually because the DNS update script only updates existing record
17 | but doesn't create it.
18 |
19 | ==== Creating Docker Network
20 | First, create a separate Docker network that will be used to run containers other than VPN-related containers:
21 |
22 | [subs="attributes+"]
23 | ----
24 | sudo docker network create --driver=bridge --subnet={SB_COMMON_SUBNET} common # <1>
25 | ----
26 | <1> Replace `{SB_COMMON_SUBNET}` with the some subnet for the common Docker network. For example: `172.19.0.0/24`.
27 |
28 | ==== Preparing Image Files
29 | This section assumes that all steps from the <> section have been completed.
30 |
31 | Create a directory for the DNS records updater container:
32 |
33 | ----
34 | sudo mkdir /root/silverbox/containers/dns-updater
35 | sudo chmod 700 /root/silverbox/containers/dns-updater
36 | ----
37 |
38 | Inside the `dns-updater` directory, create a file named `Dockerfile` with the following content:
39 |
40 | ./root/silverbox/containers/dns-updater/Dockerfile
41 | [source,dockerfile,subs="attributes+"]
42 | ----
43 | FROM debian:{SB_DEBIAN_VERSION} # <1>
44 |
45 | RUN apt-get update && \
46 | apt-get install -y --no-install-recommends python3 ca-certificates
47 |
48 | COPY update-dns.py /usr/local/bin/
49 |
50 | VOLUME /secrets # <2>
51 |
52 | ENTRYPOINT [ "python3", "/usr/local/bin/update-dns.py" ]
53 | ----
54 | <1> Replace `{SB_DEBIAN_VERSION}` with the actual latest `debian` image version (can be checked at the Docker Hub).
55 | <2> For the lack of better option, the API key is passed inside the container in a file on a mapped volume.
56 |
57 | Next, create the `update-dns.py` file with the following content:
58 |
59 | ./root/silverbox/containers/dns-updater/update-dns.py
60 | [source,python,subs="attributes+"]
61 | ----
62 | #!/usr/bin/env python3
63 | import json
64 | import argparse
65 | import urllib.request
66 | import urllib.parse
67 | from xml.dom import minidom
68 |
69 | DEFAULT_HEADERS={ 'User-Agent': 'curl/7.58.0' } # <1>
70 |
71 | def namesilo_url(operation, api_key):
72 | return 'https://www.namesilo.com/api/' + operation + '?version=1&type=xml&key=' + api_key
73 |
74 | def check_reply_code(doc, code):
75 | actual=doc.getElementsByTagName('reply')[0].getElementsByTagName('code')[0].firstChild.nodeValue
76 | if actual != str(code):
77 | raise BaseException('Expecting code {} got {}'.format(code, actual))
78 |
79 | def get_dns_record(rec_type, subdomain, domain, api_key):
80 | response=urllib.request.urlopen(urllib.request.Request(url=(namesilo_url('dnsListRecords', api_key) + '&domain=' + domain), headers=DEFAULT_HEADERS), timeout=30)
81 | doc=minidom.parseString(response.read())
82 | check_reply_code(doc, 300)
83 | for e in doc.getElementsByTagName('reply')[0].getElementsByTagName('resource_record'):
84 | if e.getElementsByTagName('host')[0].firstChild.nodeValue == subdomain and e.getElementsByTagName('type')[0].firstChild.nodeValue == rec_type:
85 | return { 'val': e.getElementsByTagName('value')[0].firstChild.nodeValue,
86 | 'id': e.getElementsByTagName('record_id')[0].firstChild.nodeValue,
87 | 'ttl': e.getElementsByTagName('ttl')[0].firstChild.nodeValue }
88 | raise BaseException('DNS {} record for {} not found'.format(rec_type, subdomain))
89 |
90 | def update_dns_record(rec_type, subdomain, domain, rec_id, val, ttl, api_key):
91 | params='&domain={}&rrid={}&rrhost={}&rrvalue={}&rrttl={}'.format(domain, rec_id, '.'.join(subdomain.split('.')[:-2]), val, ttl)
92 | response=urllib.request.urlopen(urllib.request.Request(url=(namesilo_url('dnsUpdateRecord', api_key) + params), headers=DEFAULT_HEADERS), timeout=30)
93 | check_reply_code(minidom.parseString(response.read()), 300)
94 |
95 | def main(rec_type, val, subdomain, domain, api_key, force, verbose):
96 | if rec_type == 'A':
97 | val=json.loads(urllib.request.urlopen(url='https://api.ipify.org?format=json', timeout=30).read())['ip'] # <2>
98 | if verbose:
99 | print('Current external IP address: {}'.format(val))
100 |
101 | current_record=get_dns_record(rec_type, subdomain, domain, api_key)
102 | if verbose:
103 | print('Current DNS {} record for {}: "{}"'.format(rec_type, subdomain, current_record))
104 |
105 | if val != current_record['val'] or force:
106 | update_dns_record(rec_type, subdomain, domain, current_record['id'], val, current_record['ttl'], api_key)
107 | print('{} record for {} updated: "{}" -> "{}"'.format(rec_type, subdomain, current_record['val'], val))
108 |
109 | if __name__ == '__main__':
110 | parser=argparse.ArgumentParser()
111 | parser.add_argument('-v', '--verbose', help='verbose output', action='store_true')
112 | parser.add_argument('-f', '--force', help='force DNS record update (even if it contains the same value)', action='store_true')
113 | parser.add_argument('-d', '--domain', help='fully qualified domain name for which to update a record (i.e. server.example.com)', required=True)
114 | parser.add_argument('-a', '--action', help='action to perform: update host A record with the current external IP or update TXT record with a given value', required=True, choices=[ 'update-ip', 'update-txt' ])
115 | parser.add_argument('-t', '--txt', help='content of the TXT record', default='')
116 | parser.add_argument('-k', '--key', help='file name of the file containing API key', required=True)
117 | args=parser.parse_args()
118 |
119 | with open(args.key) as f:
120 | api_key=f.readline().strip()
121 |
122 | main('A' if args.action == 'update-ip' else 'TXT', args.txt, args.domain, '.'.join(args.domain.split('.')[-2:]), api_key, args.force, args.verbose)
123 | ----
124 | <1> Default `urllib` user agent has to be overwritten since NameSilo rejects it for some reason.
125 | <2> This script uses `https://www.ipify.org` service to get public IP.
126 | Feel free to replace it with different service if you want.
127 |
128 | ==== Storing API Key
129 | The API key will be stored on disk (only readable by root) and passed inside the container via mapped volume.
130 |
131 | Create a directory that will be mapped as a volume:
132 |
133 | ----
134 | sudo mkdir /root/silverbox/namesilo
135 | ----
136 |
137 | Create a file `/root/silverbox/namesilo/api-key` and write the NameSilo API key into it.
138 |
139 | Assign the following permissions to the directory and file:
140 |
141 | ----
142 | sudo chown root:root /root/silverbox/namesilo/api-key
143 | sudo chmod 400 /root/silverbox/namesilo/api-key
144 | sudo chmod 500 /root/silverbox/namesilo
145 | ----
146 |
147 | ==== Building Container Image
148 | To build the container image run the following command:
149 |
150 | ----
151 | sudo docker build -t dns-updater --network common /root/silverbox/containers/dns-updater
152 | ----
153 |
154 | ==== Automatic DNS Record Update
155 | To keep DNS record updated, a Systemd timer will periodically run disposable container
156 | from the image that was just built.
157 |
158 | Create the `/etc/systemd/system/update-dns-record.service` file with the following content:
159 |
160 | ./etc/systemd/system/update-dns-record.service
161 | [subs="attributes+"]
162 | ----
163 | [Unit]
164 | Description=Update DNS Host A record with the current external IP address
165 | Requires=docker.service
166 | After=docker.service
167 |
168 | [Service]
169 | Type=oneshot
170 | ExecStart=/usr/bin/docker run --rm --name dns-updater --network common --cpus="1" -v /root/silverbox/namesilo:/secrets dns-updater -k /secrets/api-key -a update-ip -d {SB_SUBDOMAIN} # <1>
171 | ----
172 | <1> Replace `{SB_SUBDOMAIN}` with your actual server public FQDN.
173 |
174 | You can run the service once to verify that it runs successfully:
175 |
176 | ----
177 | sudo systemctl daemon-reload
178 | sudo systemctl start update-dns-record.service
179 | ----
180 |
181 | Next, create the `/etc/systemd/system/update-dns-record.timer` file with the following content:
182 |
183 | ./etc/systemd/system/update-dns-record.timer
184 | ----
185 | [Unit]
186 | Description=Update DNS Host A record with the current external IP address
187 |
188 | [Timer]
189 | OnBootSec=5min # <1>
190 | OnUnitInactiveSec=30min # <2>
191 |
192 | [Install]
193 | WantedBy=timers.target
194 | ----
195 | <1> First time the timer runs 5 minutes after boot.
196 | <2> After first run, the timer will run every 30 minutes.
197 | You can adjust this value depending on how volatile your public IP is.
198 |
199 | Enable and start the timer:
200 |
201 | ----
202 | sudo systemctl daemon-reload
203 | sudo systemctl enable update-dns-record.timer
204 | sudo systemctl start update-dns-record.timer
205 | ----
206 |
207 | You can do `sudo systemctl list-timers` to verify that the timer appears in the output and to check the time till next activation.
208 |
209 |
--------------------------------------------------------------------------------
/domain-name/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | The status of the DNS updater Systemd service is monitored by Monit.
3 |
4 | First, create the `/usr/local/etc/monit/scripts/is_systemd_unit_failed.sh` file with the following content:
5 |
6 | ./usr/local/etc/monit/scripts/is_systemd_unit_failed.sh
7 | [source,bash]
8 | ----
9 | #!/bin/sh
10 | systemctl show $1 -p ExecMainStartTimestamp --value
11 | systemctl is-failed --quiet $1
12 | if [ $? -eq 0 ]; then
13 | exit 1
14 | else
15 | exit 0
16 | fi
17 | ----
18 |
19 | And mark it as executable:
20 |
21 | ----
22 | sudo chmod u+x /usr/local/etc/monit/scripts/is_systemd_unit_failed.sh
23 | ----
24 |
25 | This script checks whether the given Systemd unit had failed and prints last time it was executed.
26 |
27 | Next, create the `/etc/monit/conf.d/50-dns-updater` file with the following content:
28 |
29 | ./etc/monit/conf.d/50-dns-updater
30 | ----
31 | check program dns_updater with path "/usr/local/etc/monit/scripts/is_systemd_unit_failed.sh update-dns-record.service" every 30 cycles
32 | if status != 0 for 2 cycles then alert
33 | ----
34 |
35 | Restart Monit service:
36 |
37 | ----
38 | sudo systemctl restart monit
39 | ----
40 |
41 | Check Monit web interface and make sure that DNS updater monitoring is working.
42 |
43 |
--------------------------------------------------------------------------------
/firefly/firefly.adoc:
--------------------------------------------------------------------------------
1 | == Firefly III
2 | This section describes how to install and configure Firefly III (open source personal finances manager) <> on the server.
3 |
4 | This section depends on the following sections: <>, <>, <>.
5 |
6 | include::overview.adoc[]
7 |
8 | include::installation.adoc[]
9 |
10 | include::monitoring.adoc[]
11 |
12 |
--------------------------------------------------------------------------------
/firefly/installation.adoc:
--------------------------------------------------------------------------------
1 | === Installation
2 | This section describes how to install and run Firefly III.
3 |
4 | ==== Preparing Directory Structure
5 | The very first step is to create directories that will be mapped inside the Docker containers:
6 |
7 | [subs="attributes+"]
8 | ----
9 | sudo mkdir -p /srv/firefly/db
10 | sudo mkdir /srv/firefly/uploads
11 | sudo chown {SB_WWW_DATA_UID} /srv/firefly/uploads # <1>
12 | sudo chmod 750 -R /srv/firefly
13 | ----
14 | <1> `{SB_WWW_DATA_UID}` is the UID of the `www-data` user inside the Firefly III container.
15 |
16 | All the Firefly III data (including database and uploaded files)
17 | will be stored under the `/srv/firefly` directory in the following way:
18 |
19 | db::
20 | The `db` subdirectory will store PostgreSQL database files.
21 | uploads::
22 | The `uploads` subdirectory will store uploaded files.
23 |
24 | NOTE: It is important to keep `/src/firefly` directory owned by the root and have restrictive permissions
25 | since some content inside it will be owned by the `www-data` (UID `{SB_WWW_DATA_UID}`) user from the Docker containers.
26 |
27 | ==== Preparing Images
28 | Create a directory for Firefly III containers files:
29 |
30 | ----
31 | sudo mkdir /root/silverbox/containers/firefly
32 | sudo chmod 700 /root/silverbox/containers/firefly
33 | ----
34 |
35 | Inside it, create the `docker-compose.yml` file with the following content:
36 |
37 | ./root/silverbox/containers/firefly/docker-compose.yml
38 | [source,yaml,subs="attributes+"]
39 | ----
40 | version: '3.8'
41 |
42 | networks:
43 | default:
44 | name: firefly
45 | driver: bridge
46 | ipam:
47 | config:
48 | - subnet: {SB_FIREFLY_SUBNET} # <1>
49 |
50 | services:
51 | firefly-db:
52 | container_name: firefly-db
53 | image: postgres:{SB_POSTGRES_VERSION} # <2>
54 | restart: on-failure:5
55 | shm_size: 256mb
56 | logging:
57 | driver: json-file
58 | options:
59 | max-size: 10mb
60 | max-file: '3'
61 | volumes:
62 | - /srv/firefly/db:/var/lib/postgresql/data
63 | environment:
64 | - POSTGRES_USER=firefly
65 | - POSTGRES_PASSWORD=\{POSTGRES_PASSWORD} # <3>
66 | - POSTGRES_DB=firefly
67 | - POSTGRES_INITDB_ARGS="--data-checksums"
68 |
69 | firefly-app:
70 | container_name: firefly-app
71 | image: jc5x/firefly-iii:{SB_FIREFLY_VERSION} # <4>
72 | restart: on-failure:5
73 | logging:
74 | driver: json-file
75 | options:
76 | max-size: 10mb
77 | max-file: '3'
78 | depends_on:
79 | - firefly-db
80 | volumes:
81 | - /srv/firefly/uploads:/var/www/html/storage/upload
82 | ports:
83 | - 127.0.0.1:{SB_FIREFLY_PORT}:8080/tcp # <5>
84 | environment:
85 | - DB_HOST=firefly-db
86 | - DB_PORT=5432
87 | - DB_CONNECTION=pgsql
88 | - DB_DATABASE=firefly
89 | - DB_USERNAME=firefly
90 | - DB_PASSWORD=\{POSTGRES_PASSWORD} # <6>
91 | - APP_KEY=\{APP_KEY} # <7>
92 | - SITE_OWNER=mail@example.com # <8>
93 | - TZ=America/Toronto # <9>
94 | - TRUSTED_PROXIES=**
95 | - APP_URL=https://firefly.{SB_INTERNAL_DOMAIN} # <10>
96 | - MAIL_MAILER=smtp # <11>
97 | - MAIL_HOST=smtp.example.com
98 | - MAIL_PORT=2525
99 | - MAIL_FROM=changeme@example.com
100 | - MAIL_USERNAME=foo
101 | - MAIL_PASSWORD=bar
102 | - MAIL_ENCRYPTION=tls
103 | ----
104 | <1> Replace `{SB_FIREFLY_SUBNET}` with the actual subnet you want to use for the Firefly III.
105 | <2> Replace `{SB_POSTGRES_VERSION}` with the actual latest `postgres` (Debian based) image version (can be checked at the Docker Hub).
106 | <3> Replace `\{POSTGRES_PASSWORD}` with some random password.
107 | <4> Replace `{SB_FIREFLY_VERSION}` with the actual latest `jc5x/firefly-iii` image version (can be checked at the Docker Hub).
108 | <5> Replace `{SB_FIREFLY_PORT}` with the actual port number you chose for Firefly III.
109 | <6> Replace `\{POSTGRES_PASSWORD}` with the same password as above.
110 | <7> Replace `\{APP_KEY}` with random alphanumeric string exactly 32 characters long.
111 | Such string can be obtained by running the following command: `head /dev/urandom | LANG=C tr -dc 'A-Za-z0-9' | head -c 32`.
112 | <8> Replace `mail@example.com` with your email address.
113 | <9> Replace `America/Toronto` with your preferred timezone. To check system timezone run: `cat /etc/timezone`.
114 | <10> Replace `{SB_INTERNAL_DOMAIN}` with the actual value.
115 | <11> This block of variables refers to email delivery configuration.
116 | Configure it accordingly to your email delivery settings.
117 | Refer to Firefly III documentation for more information.
118 |
119 | ==== Running Firefly III
120 | To start all the containers do:
121 |
122 | ----
123 | sudo docker compose -f /root/silverbox/containers/firefly/docker-compose.yml up -d
124 | ----
125 |
126 | Verify that all containers have started successfully and check logs for errors:
127 |
128 | ----
129 | sudo docker ps
130 | sudo docker logs firefly-db
131 | sudo docker logs firefly-app
132 | ----
133 |
134 | ==== Firefly III Cron
135 | Firefly III container doesn't include cron or anything else for running periodic jobs.
136 | Therefore a Systemd timer will be used to trigger periodic jobs for Firefly III.
137 |
138 | Create the `/etc/systemd/system/firefly-iii-cron.service` file with the following content:
139 |
140 | ./etc/systemd/system/firefly-iii-cron.service
141 | [subs="attributes+"]
142 | ----
143 | [Unit]
144 | Description=Run Firefly III cron jobs
145 | Requires=docker.service
146 | After=docker.service
147 |
148 | [Service]
149 | Type=oneshot
150 | ExecStart=/usr/bin/docker exec --user www-data firefly-app /usr/local/bin/php /var/www/html/artisan firefly-iii:cron
151 | ----
152 |
153 | You can run the service once to verify that it runs successfully:
154 |
155 | ----
156 | sudo systemctl daemon-reload
157 | sudo systemctl start firefly-iii-cron.service
158 | ----
159 |
160 | Next, create the `/etc/systemd/system/firefly-iii-cron.timer` file with the following content:
161 |
162 | ./etc/systemd/system/firefly-iii-cron.timer
163 | ----
164 | [Unit]
165 | Description=Run Firefly III cron jobs
166 |
167 | [Timer]
168 | OnBootSec=15min # <1>
169 | OnCalendar=daily # <2>
170 |
171 | [Install]
172 | WantedBy=timers.target
173 | ----
174 | <1> First time the timer runs 15 minutes after boot.
175 | <2> After first run, the timer will run daily, as suggested in the Firefly III documentation.
176 |
177 | Enable and start the timer:
178 |
179 | ----
180 | sudo systemctl daemon-reload
181 | sudo systemctl enable firefly-iii-cron.timer
182 | sudo systemctl start firefly-iii-cron.timer
183 | ----
184 |
185 | You can do `sudo systemctl list-timers` to verify that the timer appears in the output and to check the time till next activation.
186 |
187 | ==== Automatic Containers Startup
188 | To start containers automatically (in the correct order)
189 | on boot create the `/etc/systemd/system/firefly-start.service` file with the following content:
190 |
191 | ./etc/systemd/system/firefly-start.service
192 | ----
193 | [Unit]
194 | Description=Start Firefly III
195 | Requires=docker.service
196 | After=docker.service
197 |
198 | [Service]
199 | Type=oneshot
200 | ExecStart=/usr/bin/docker compose -f /root/silverbox/containers/firefly/docker-compose.yml up -d
201 |
202 | [Install]
203 | WantedBy=multi-user.target
204 | ----
205 |
206 | Enable the service, so that it will be started on system boot:
207 |
208 | ----
209 | sudo systemctl daemon-reload
210 | sudo systemctl enable firefly-start.service
211 | ----
212 |
213 | ==== Adding DNS Record
214 | To add internal DNS record for the Firefly III edit the
215 | `/etc/unbound/unbound.conf.d/dns-config.conf` file and add `local-data` record
216 | pointing to the server IP `{SB_IP}`:
217 |
218 | ./etc/unbound/unbound.conf.d/dns-config.conf
219 | [source,yaml,subs="attributes+"]
220 | ----
221 | local-data: "firefly.{SB_INTERNAL_DOMAIN}. IN A {SB_IP}" # <1>
222 | ----
223 | <1> In this and the next line replace `{SB_INTERNAL_DOMAIN}` and `{SB_IP}` with the actual values.
224 |
225 | Restart the Unbound server to apply the changes:
226 |
227 | ----
228 | sudo systemctl restart unbound.service
229 | ----
230 |
231 | ==== Adding Reverse Proxy Configuration
232 | To add Firefly III to the reverse proxy configuration edit the `/root/silverbox/containers/reverse-proxy/httpd.conf` file
233 | and add the following `VirtualHost` section to it:
234 |
235 | ./root/silverbox/containers/reverse-proxy/httpd.conf
236 | [source,apache,subs="attributes+"]
237 | ----
238 | # Firefly III
239 |
240 | ServerName firefly.{SB_INTERNAL_DOMAIN} # <1>
241 | ProxyPass "/" "http://127.0.0.1:{SB_FIREFLY_PORT}/" # <2>
242 | ProxyPassReverse "/" "http://127.0.0.1:{SB_FIREFLY_PORT}/"
243 |
244 | ----
245 | <1> Replace `{SB_INTERNAL_DOMAIN}` with the actual value.
246 | <2> Replace `{SB_FIREFLY_PORT}` in this and next line with the actual port number you've chosen for the Firefly III.
247 |
248 | NOTE: This `VirtualHost` section above doesn't include basic authentication configuration.
249 | This is deliberate, as Firefly III has its own authentication.
250 |
251 | Restart reverse proxy container to pick up new changes:
252 |
253 | ----
254 | sudo docker restart reverse-proxy
255 | ----
256 |
257 | You should now be able to access Firefly III at `https://firefly.{SB_INTERNAL_DOMAIN}`.
258 |
259 |
--------------------------------------------------------------------------------
/firefly/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | To monitor Firefly III status with Monit create the `/etc/monit/conf.d/70-firefly` file with the following content:
3 |
4 | ./etc/monit/conf.d/70-firefly
5 | [subs="attributes+"]
6 | ----
7 | # Containers status
8 | check program firefly_app with path "/usr/local/etc/monit/scripts/container_status.sh firefly-app .State.Status running"
9 | if status != 0 for 5 cycles then alert
10 |
11 | check program firefly_db with path "/usr/local/etc/monit/scripts/container_status.sh firefly-db .State.Status running"
12 | if status != 0 for 5 cycles then alert
13 |
14 | # HTTP
15 | check host firefly with address localhost every 5 cycles
16 | if failed port {SB_FIREFLY_PORT} protocol http request / status = 302 for 2 cycles then alert # <1>
17 | ----
18 | <1> Replace `{SB_FIREFLY_PORT}` with the actual value.
19 |
20 | Restart Monit and verify that Firefly III monitoring is working.
21 |
22 |
--------------------------------------------------------------------------------
/firefly/overview.adoc:
--------------------------------------------------------------------------------
1 | === Overview
2 | The Firefly III will be deployed using Docker Compose with two Docker containers:
3 | one is the database (PostgreSQL) and the other is the official Firefly III container
4 | (which contains Apache web server and PHP application server).
5 |
6 | Below is a diagram that shows high level overview of the Firefly III deployment:
7 |
8 | ----
9 | firefly.home.example.com
10 | | Firefly III Docker Network
11 | | --------------------------------------------------------
12 | | | ------------ ------------ |
13 | | HTTPS --------- | HTTP | | 5432/tcp | | |
14 | \------->| Reverse |------------->| Firefly III|---------->| PostgreSQL | |
15 | | Proxy | | | | | | |
16 | --------- | ------------ ------------ |
17 | | | | |
18 | | {/var/www/html/storage/upload} | |
19 | | | {/var/lib/postgresql/data} |
20 | | | | |
21 | | v v |
22 | | /srv/firefly/uploads /srv/firefly/db |
23 | | |
24 | --------------------------------------------------------
25 | ----
26 |
27 | NOTE: In this diagram `home.example.com` is used as an example value for your `{SB_INTERNAL_DOMAIN}`.
28 |
29 | NOTE: In the diagram above, a path inside curly braces indicates a path as it seen inside Docker container,
30 | while path without curly braces indicates the real path on the host file system.
31 |
32 | Both containers are stateless (i.e. don't contain any important data inside the container),
33 | since all user data is stored on the host file system and mounted inside containers.
34 | This way containers can be safely deleted and re-deployed, which makes upgrades very easy.
35 |
36 | In this setup, Firefly III will only be accessible from the internal network via the reverse proxy
37 | (as configured in <>).
38 | Not exposing Firefly III to the internet is a deliberate decision, partially due to security concerns,
39 | but also for simplicity of the setup and because it seems to me that having it publicly accessible doesn't add to much of convenience.
40 | If you wish Firefly III to be accessible from the internet you can either implement similar setup as was done for Nextcloud,
41 | or rely on VPN/WireGuard to get access to the internal network from the internet.
42 |
43 |
--------------------------------------------------------------------------------
/git-server/configuration.adoc:
--------------------------------------------------------------------------------
1 | === Configuration
2 | First, install `git` on the server (if you don't have it installed already):
3 |
4 | ----
5 | sudo apt update
6 | sudo apt install git
7 | ----
8 |
9 | Create `{SB_GIT_GROUP}` group:
10 |
11 | [subs="attributes+"]
12 | ----
13 | sudo addgroup --system {SB_GIT_GROUP}
14 | ----
15 |
16 | NOTE: A separate group is needed so that it would be possible to have more than just one user with access to Git repositories.
17 | This can be helpful, for instance, if you need to provide another user access under different permissions
18 | (for example, read-only access to repositories, as described in the next section).
19 |
20 | Create a directory where all of the Git repositories will reside, and assign proper ownership and permissions:
21 |
22 | [subs="attributes+"]
23 | ----
24 | sudo mkdir /srv/git
25 | sudo chgrp {SB_GIT_GROUP} /srv/git
26 | sudo chmod 750 /srv/git
27 | ----
28 |
29 | ==== Create Git User Account
30 | In this model, all repositories under `/srv/git` directory will be accessible by `{SB_GIT_USER}` account created on the server.
31 | To give someone read/write access to Git repositories a separate SSH key pair needs to be generated and its public key
32 | added to `{SB_GIT_USER}` authorized SSH keys list.
33 |
34 | Create `{SB_GIT_USER}` user account:
35 |
36 | [subs="attributes+"]
37 | ----
38 | sudo adduser --disabled-password --gecos "" --shell `which git-shell` {SB_GIT_USER}
39 | ----
40 |
41 | NOTE: Having `git-shell` as the `{SB_GIT_USER}` user shell will only allow to execute Git related commands
42 | over SSH and nothing else (also no SCP).
43 | You can additionally customize the message that user will see on an attempt to SSH as the `{SB_GIT_USER}` user.
44 | To do this, create executable script `git-shell-commands/no-interactive-login` under `{SB_GIT_USER}` user's home directory,
45 | that prints desired message and exits.
46 | For more details see https://git-scm.com/docs/git-shell.
47 |
48 | Make `{SB_GIT_USER}` user a member of the `{SB_GIT_GROUP}` group:
49 |
50 | [subs="attributes+"]
51 | ----
52 | sudo usermod -a -G {SB_GIT_GROUP} {SB_GIT_USER}
53 | ----
54 |
55 | Login as `{SB_GIT_USER}` user and create `.ssh` directory and `authorized_keys` file with proper permissions,
56 | as well as `.hushlogin` file to suppress default Ubuntu MOTD (message of the day) banner:
57 |
58 | [subs="attributes+"]
59 | ----
60 | sudo su --shell $SHELL {SB_GIT_USER}
61 | cd
62 | mkdir .ssh && chmod 700 .ssh
63 | touch .ssh/authorized_keys && chmod 600 .ssh/authorized_keys
64 | touch .hushlogin
65 | exit
66 | ----
67 |
68 | As `root` user edit the `/etc/ssh/sshd_config` file and add `{SB_GIT_USER}` user to the list of allowed users:
69 |
70 | ./etc/ssh/sshd_config
71 | [subs="attributes+"]
72 | ----
73 | AllowUsers {SB_USER} {SB_GIT_USER}
74 | ----
75 |
76 | Restart `sshd` service for the changes to take effect: `sudo systemctl restart sshd.service`.
77 |
78 | ===== Read-Only Access
79 | As mentioned above, the `{SB_GIT_USER}` user account will have read/write access to all Git repositories on the server.
80 | However, sometimes it might be necessary to give someone read-only access,
81 | so that they can read (clone) from all repositories but not write (push).
82 |
83 | One way this can be achieved is by creating one more account on the server,
84 | for example `gitro` (where `ro` stands for read only), and adding this account to the `{SB_GIT_GROUP}` group.
85 | Since repositories are owned by the `{SB_GIT_USER}` user (as you will see in the next section) and not `gitro`,
86 | the `gitro` won't be able to write.
87 | But because `gitro` belongs to `{SB_GIT_GROUP}` group, it will be able to read.
88 |
89 | To create such `gitro` account follow the same steps as above for creating `{SB_GIT_USER}` account.
90 | You will also need to generate SSH keys for the `gitro` account in the same way as for `{SB_GIT_USER}` account
91 | (as described in the sections below).
92 |
93 | Additionally, you can put individual repositories in read-only mode for everyone by using Git server-side hooks
94 | (such as `pre-receive` hook).
95 | For more information on how to do this check Git documentation.
96 |
97 | ==== Creating Git Repository
98 | To create a new Git repository on the server, first create a directory for it
99 | (the repository is called `example` in this case).
100 | Make this directory owned by the `{SB_GIT_USER}` user and read-only accessible by the members of `{SB_GIT_GROUP}` group:
101 |
102 | [subs="attributes+"]
103 | ----
104 | sudo mkdir /srv/git/example
105 | sudo chown {SB_GIT_USER}:{SB_GIT_GROUP} /srv/git/example
106 | sudo chmod 750 /srv/git/example
107 | ----
108 |
109 | Login as `{SB_GIT_USER}` user and initialize empty Git repository:
110 |
111 | [subs="attributes+"]
112 | ----
113 | sudo su --shell $SHELL {SB_GIT_USER}
114 | cd /srv/git/example
115 | git init --bare --shared=0640 # <1>
116 | ----
117 | <1> The `--shared=0640` argument means that the repository will be shared in such a way
118 | that it is writeable by owner (`{SB_GIT_USER}` user), and readable (and only readable) by anyone in the `{SB_GIT_GROUP}` group.
119 | See `man git-init` for more information.
120 |
121 | ==== Providing Access to Git
122 | Git over SSH uses SSH keys for authentication.
123 | To provide access to someone to all Git repositories on the server (under either `{SB_GIT_USER}` or `gitro` user, if you created one),
124 | this person's public key must be added to the list of authorized SSH keys for the `{SB_GIT_USER}`
125 | (or `gitro`, but this section will assume `{SB_GIT_USER}`) account.
126 | Below is an example of how to give yourself read/write access from your client PC to all Git repositories on the server.
127 |
128 | First, generate a new key pair on your client PC:
129 |
130 | ----
131 | ssh-keygen -t ed25519 -f ~/.ssh/silverbox-git -C "Silverbox Git key"
132 | ----
133 |
134 | This will generate a pair of keys: private `~/.ssh/silverbox-git` and public `~/.ssh/silverbox-git.pub`.
135 |
136 | Copy generated public key to the server:
137 |
138 | [subs="attributes+"]
139 | ----
140 | scp ~/.ssh/silverbox-git.pub $USER@{SB_IP}:.
141 | ----
142 |
143 | Login to the server as your user and run the following command to add the public key to the list of authorized keys for `{SB_GIT_USER}` user:
144 |
145 | [subs="attributes+"]
146 | ----
147 | sudo bash -c "printf '%s ' 'no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty' | cat - /home/$USER/silverbox-git.pub >> /home/{SB_GIT_USER}/.ssh/authorized_keys" # <1>
148 | rm silverbox-git.pub
149 | ----
150 | <1> This also disables SSH tunneling, X forwarding
151 | (although it should be disabled in your sshd config if you followed this guide) and PTY for the `{SB_GIT_USER}` user.
152 |
153 | On your client PC edit the `~/.ssh/config` file and add the following:
154 |
155 | [subs="attributes+"]
156 | ----
157 | host silverbox-git
158 | HostName {SB_IP} # <1>
159 | IdentityFile ~/.ssh/silverbox-git
160 | User {SB_GIT_USER}
161 | ----
162 | <1> Replace this with your server IP address or hostname.
163 |
164 | Now to clone example repository run:
165 |
166 | ----
167 | git clone silverbox-git:/srv/git/example
168 | ----
169 |
170 |
--------------------------------------------------------------------------------
/git-server/git-server.adoc:
--------------------------------------------------------------------------------
1 | == Git Server
2 | This section describes how to configure the server to host some private Git repositories.
3 |
4 | Having your own private Git repositories can be very helpful for keeping configuration files organized,
5 | notes, personal projects or forks that you prefer to keep private.
6 | Keeping your Git repositories locally on the server (as opposed to cloud services such as Github or Gitlab)
7 | has an advantage of being able to access them even when the cloud service or the internet connection is down.
8 | Server backups (described in section <>) can be utilized for backing up Git repositories.
9 |
10 | include::overview.adoc[]
11 |
12 | include::configuration.adoc[]
13 |
14 |
--------------------------------------------------------------------------------
/git-server/overview.adoc:
--------------------------------------------------------------------------------
1 | === Overview
2 | There are many ways to host Git repositories.
3 | One of the approaches it to host complete collaborative software development platform that usually includes
4 | version control, issue tracking, documentation (e.g. wiki), code review, CI/CD pipelines,
5 | fine-grained permission control etc.
6 | Some of the popular open-source options include Gitlab, Gitea and Gogs.
7 |
8 | However, hosting a full-blown platform like this has its disadvantages:
9 | it increases complexity of the setup and hardware resource consumption,
10 | requires maintenance to keep platform secure and up-to-date,
11 | complicates configuration and "vendor-locks" you into a specific platform.
12 | I find that extra complexity of running such platforms is hardly justified,
13 | as for home setup most of the extra features provided are rarely needed.
14 |
15 | Instead, this guide describes how to run bare Git repositories accessible over SSH
16 | (that has been configured in the <> section).
17 | This approach doesn't depend on any third-party solutions and only requires Git and SSH to function.
18 | It is very simple, transparent and maintenance-free.
19 |
20 | The configuration described below is very similar to the one described in the Git Book's section
21 | on setting up the Git server <>.
22 |
23 |
--------------------------------------------------------------------------------
/introduction/document-overview.adoc:
--------------------------------------------------------------------------------
1 | === Document Overview
2 | Please note that this document is _not_ an exhaustive guide on building a home server,
3 | that deeply explores all different options and possibilities, and explains every single step great in detail.
4 | It is more or less just a documentation for the setup I choose, and it can be opinionated at times.
5 | Whenever there was a choice, I leaned towards secure and simple solutions, rather than fancy and "feature-rich".
6 |
7 | Following this guide is probably not the most efficient way to build home server,
8 | and you'd be better off finding some Ansible playbooks that will do everything for you,
9 | or even just a virtual machine image with all the services already configured.
10 | However, I think reading through this guide can be useful if you want to understand how everything works,
11 | especially if you are planning on running and maintaining your server for a long time.
12 |
13 | While some decisions I had to make were influenced by the specific hardware I use (see <> section),
14 | where possible, this document tries to stay hardware agnostic.
15 | You don't have to use the exact same hardware as I did (you can even do it all in a virtual machine).
16 | I just wouldn't recommend using Raspberry Pi for performance reasons.
17 |
18 | ==== Required Knowledge
19 | This document expects reader to have some GNU/Linux experience and at least some knowledge in the following areas:
20 |
21 | - Be comfortable in the GNU/Linux terminal and familiar with SSH.
22 | - Understand simple shell scripting (e.g. `sh` or `bash` scripts).
23 | - Be familiar with basic GNU/Linux command line utilities, such as: `sudo`, `cp/mv/rm`, `find`, `grep`, `sed` etc.
24 | - Be familiar with `man` pages and be able to explore them on your own.
25 | - Be at least somewhat familiar with Docker.
26 | - Be at least somewhat familiar with systemd.
27 |
28 | The document doesn't try to explain everything, as I believe it is simply impractical are a lot of good documentation already written.
29 | Instead, it provides references to the existing documentation where needed.
30 |
31 | ==== Structure
32 | The document is split into few top-level chapters, where each chapter (with a few exceptions) represents a separate,
33 | standalone feature, for example: NFS Server, Nextcloud, Torrent Client etc.
34 | While it is possible to skip some chapters, some may have a dependency on another chapters.
35 |
36 | The document is structured more or less in the order of the configuration.
37 |
38 | ==== Formatting and Naming Conventions
39 | In this document, parts of the sentence that require extra attention are marked with *bold* font.
40 |
41 | Inline system commands, arguments and file names are formatted with `monospace` font.
42 |
43 | Commands that need to be executed in a shell are formatted in monospace blocks.
44 | Command output is formatted as italic (if there is any output).
45 |
46 | For example:
47 | [subs="attributes+,macros+"]
48 | ----
49 | some-command --with --some --arguments
50 | pass:q[_example command output_]
51 | ----
52 |
53 | When a file needs to be edited, the file content is formatted in a similar monospace block.
54 | However, in this case the block will also have header with a file name, indicating what file is edited:
55 |
56 | .example-file.txt
57 | ----
58 | File content goes here
59 | ----
60 |
61 | By default, all parameters that are specific to the concrete setup are displayed as placeholders in a curly braces,
62 | for example: `{SB_IP}` is what you should replace with your server IP address.
63 | However, you can generate version of this document where all such placeholders replaced with the actual values you want,
64 | more on this in next section.
65 |
66 | There are also few blocks that are used to draw attention to a specific statement:
67 |
68 | NOTE: This is a note.
69 |
70 | TIP: This is some useful tip.
71 |
72 | IMPORTANT: This is very important point.
73 |
74 | WARNING: This is a warning.
75 |
76 | In the document, the server itself is referred as either _"the server"_ or _"silverbox"_.
77 | footnote:[The name _"silverbox"_ originates from some Intel NUCs visual appearance - literally a little silver box. ]
78 | When discussing client(s) that communicate to the server, the client device is usually referred as _"client PC"_,
79 | even though it could be laptop, tablet, smartphone or any other device.
80 |
--------------------------------------------------------------------------------
/introduction/generating-custom-doc.adoc:
--------------------------------------------------------------------------------
1 | [[generating_custom_document]]
2 | === Generating Custom Document
3 | This document contains a lot of placeholder values that will have to be replaced with the actual values,
4 | specific to your setup. Some examples of such values are host names, IP addresses, subnets, usernames etc.
5 | It may be cumbersome to manually keep track of what needs to be replaced with what,
6 | especially when copying scripts or configuration files.
7 |
8 | Fortunately, you can generate your own version of this document
9 | where all placeholders will be automatically replaced with the actual values that you want.
10 |
11 | This can easily be done in three steps using Docker:
12 |
13 | . Get the source code of this document from Git:
14 | +
15 | ----
16 | git clone https://github.com/ovk/silverbox.git
17 | ----
18 |
19 | . Edit `silverbox/parameters.adoc` file and replace placeholder values with the values you want.
20 | . Run disposable Docker container with Asciidoctor to compile the document to desired output:
21 | .. For HTML output:
22 | +
23 | ----
24 | docker run -it --rm -v $(pwd)/silverbox:/documents asciidoctor/docker-asciidoctor asciidoctor silverbox-server.adoc
25 | ----
26 |
27 | .. For PDF output:
28 | +
29 | ----
30 | docker run -it --rm -v $(pwd)/silverbox:/documents asciidoctor/docker-asciidoctor asciidoctor-pdf silverbox-server.adoc
31 | ----
32 |
33 | This should produce output file (`silverbox-server.html` or `silverbox-server.pdf`) in the `silverbox` directory,
34 | where all the placeholders replaced with your values.
35 |
36 | Now you can mostly just copy-paste code snippets without having to manually edit them first.
37 |
38 |
--------------------------------------------------------------------------------
/introduction/getting-involved.adoc:
--------------------------------------------------------------------------------
1 | === Getting Involved
2 | If you find a typo or error in this document, or if you think that some part could be explained in more details,
3 | updated, or improved in any way - please open an issue at https://github.com/ovk/silverbox/issues or make a pull request.
4 |
5 |
--------------------------------------------------------------------------------
/introduction/introduction.adoc:
--------------------------------------------------------------------------------
1 | == Introduction
2 |
3 | include::document-overview.adoc[]
4 |
5 | include::generating-custom-doc.adoc[]
6 |
7 | include::getting-involved.adoc[]
8 |
9 | include::license.adoc[]
10 |
11 |
--------------------------------------------------------------------------------
/introduction/license.adoc:
--------------------------------------------------------------------------------
1 | === License
2 | This document is licensed under Creative Commons Attribution-NonCommercial 4.0 International (CC BY-NC 4.0).
3 |
4 | For more details see:
5 |
6 | - https://creativecommons.org/licenses/by-nc/4.0
7 | - https://creativecommons.org/licenses/by-nc/4.0/legalcode
8 |
--------------------------------------------------------------------------------
/maintenance/keeping-system-clean.adoc:
--------------------------------------------------------------------------------
1 | === Keeping System Clean
2 | This section gives some ideas on how to keep the system clean
3 | and how to prevent accumulation of unnecessary or obsolete information.
4 |
5 | ==== Cleaning System Packages
6 | To remove no longer required packages do:
7 |
8 | ----
9 | sudo apt autoremove
10 | ----
11 |
12 | ==== Cleaning Docker
13 | To view how much space is used by Docker:
14 |
15 | ----
16 | sudo docker system df
17 | ----
18 |
19 | A lot of unused Docker images can accumulate after upgrades.
20 | To remove all dangling images do:
21 |
22 | ----
23 | sudo docker image prune
24 | ----
25 |
26 | This, however, only removes dangling images, but leaves images that are not dangling but can still be unused.
27 | All unused images can be removed with the `-a` flag,
28 | but this is dangerous as some images that are _not used_ at the moment
29 | can still be required later (for example, the `dns-updater` image).
30 |
31 | One solution is to remove all currently unused images semi-manually:
32 |
33 | ----
34 | sudo docker rmi `sudo docker images -q nextcloud:*`
35 | sudo docker rmi `sudo docker images -q httpd:*`
36 | sudo docker rmi `sudo docker images -q postgres:*`
37 | ----
38 |
39 | NOTE: This will generate errors and skip deletion of images that are currently in use.
40 |
41 | To clean Docker build cache do:
42 |
43 | ----
44 | sudo docker builder prune
45 | ----
46 |
47 | ==== Cleaning and Minimizing Logs
48 |
49 | ===== Cleaning Old System Logs
50 | System logs will grow over time.
51 | To check size of all Journald logs:
52 |
53 | ----
54 | journalctl --disk-usage
55 | ----
56 |
57 | Journald logs can be easily cleaned by size:
58 |
59 | ----
60 | sudo journalctl --vacuum-size=500M
61 | ----
62 |
63 | or by time:
64 |
65 | ----
66 | sudo journalctl --vacuum-time=2years
67 | ----
68 |
69 | ===== Adjusting Journald Configuration
70 | In the default Journald configuration in Ubuntu the Journald messages are also forwarded to syslog,
71 | which is unnecessary (unless you use specific tools that rely on that).
72 | This can be disabled by setting `ForwardToSyslog` parameter to `no` in the `/etc/systemd/journald.conf` file.
73 |
74 | Additionally, to potentially reduce writes to disk you can increase `SyncIntervalSec` parameter
75 | in the `/etc/systemd/journald.conf` file.
76 | This parameter controls how frequently Journald messages are synced to disk,
77 | so only increase it if the server is connected to reliable UPS and unexpected shutdowns are unlikely.
78 |
79 | ==== Disabling Motd-News
80 | By default, Ubuntu will fetch news daily to show them in the message of the day (motd),
81 | which I find rather annoying and unnecessary flooding the logs.
82 | To disable it, edit the `/etc/default/motd-news` file and change `ENABLED` parameter to `0`.
83 | While this removes news from the motd, it doesn't stop `motd-news` timer.
84 | To stop and disable the timer, do:
85 |
86 | ----
87 | sudo systemctl stop motd-news.timer
88 | sudo systemctl disable motd-news.timer
89 | ----
90 |
91 |
--------------------------------------------------------------------------------
/maintenance/maintenance.adoc:
--------------------------------------------------------------------------------
1 | == Maintenance
2 | This section describes some basic maintenance procedures that will hopefully help to keep the server up to date,
3 | healthy and running.
4 | However, don't consider this to be a complete checklist.
5 |
6 | include::keeping-system-up-to-date.adoc[]
7 |
8 | include::monitoring.adoc[]
9 |
10 | include::keeping-system-clean.adoc[]
11 |
12 |
--------------------------------------------------------------------------------
/maintenance/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | This section gives some ideas on how to monitor system health and overall status.
3 |
4 | ==== Monit
5 | Monit has a lot of useful information about the system and running services,
6 | and it worth checking it from time to time.
7 |
8 | This information can either be viewed in the Monit web interface, or via the `sudo monit status` command.
9 |
10 | ==== Logs
11 | Logs from the system and different services can provide a lot of useful information
12 | and should be checked periodically for anomalies and errors.
13 |
14 | ===== System Logs
15 | Ubuntu collects system logs with Systemd Journald service, so most of the logs can be viewed using `journalctl` tool.
16 |
17 | For example, to view all log messages since system boot with priority level warning or above do:
18 |
19 | ----
20 | journalctl -p warning -b
21 | ----
22 |
23 | For more examples on how to use `journalctl` to view logs refer to `journalctl` documentation.
24 |
25 | Some logs are also written into files under `/var/log` directory
26 | and can be viewed with any text editor or with `cat`/`tail` commands.
27 |
28 | ===== Systemd Service Logs
29 | Logs from Systemd services can be viewed with the `journalctl` command as well.
30 | For example, to view Docker service logs do:
31 |
32 | ----
33 | journalctl -u docker.service
34 | ----
35 |
36 | ===== Docker Logs
37 | By convention, processes running inside Docker containers write log output to the standard output stream.
38 | These logs then collected by the Docker engine and can be viewed with `docker logs` command.
39 | For example, to view Nextcloud's PostgreSQL container logs do:
40 |
41 | ----
42 | sudo docker logs nextcloud-db
43 | ----
44 |
45 | ===== Nextcloud Logs
46 | Apart from the Nextcloud containers logs,
47 | Nextcloud maintains its own log that can be viewed on the _Settings -> Logging_ page.
48 |
49 | ==== Disk Health
50 | Monitoring disk status and health is crucial in preventing data loss and diagnosing performance issues.
51 | It is especially important for SSDs since every SSD cell have limited number of times it can be erased and written.
52 |
53 | Some information about disks and file systems is available in Monit.
54 |
55 | To view how much data was read/written to the disk since system boot you can use `vmstat -d` command.
56 | The output of this command is in sectors rather than bytes.
57 | To find sector size check the output of `sudo fdisk -l` command.
58 |
59 | NOTE: It appears that the system counts discarded blocks
60 | (i.e. free blocks reported to SSD when `fstrim` is done, by default once a week) as writes,
61 | thus inflating total sectors written count as reported by the `vmstat -d` command.
62 | This means that the `vmstat -d` output will only be accurate since reboot until the first `fstrim` run.
63 |
64 | To view what processes are utilizing the disk you can use `iotop` tool, for example:
65 |
66 | ----
67 | sudo iotop -aoP
68 | ----
69 |
70 | To install `iotop` do: `sudo apt install iotop`.
71 |
72 | To retrieve SMART (Self-Monitoring, Analysis and Reporting Technology) data from the disk you can use `smartctl` tool
73 | from the `smartmontools` package.
74 |
75 | To read SMART data from the disk do:
76 |
77 | ----
78 | sudo smartctl -a /dev/sda # <1>
79 | ----
80 | <1> Replace `/dev/sda` with the actual disk device.
81 |
82 | The actual output of this command will depend on the disk model and manufacturer.
83 | Usually it has a lot of useful information such as total number of blocks written, media wearout, errors, etc.
84 |
85 | However, the output from `smartctl` is only accurate if the disk is present in the `smartctl` database so that
86 | SMART fields can be decoded and interpreted correctly.
87 | Usually the database has most of the consumer SSDs, however, Ubuntu uses extremely outdated version of this database
88 | so there is a good chance you disk won't be there.
89 | If in the `smartctl` output you see line similar to this: `Device is: Not in smartctl database`,
90 | this means your disk is not in the current database and you cannot really trust the output.
91 |
92 | Normally the `smartctl` database can be easily updated using `update-smart-drivedb` script, however,
93 | for dubious reasons Ubuntu package maintainers decided not to include this script in the `smartmontools` package.
94 | Fortunately, this database is just a single file that can be downloaded from the `smartmontools` GitHub mirror:
95 |
96 | ----
97 | wget https://github.com/mirror/smartmontools/raw/master/drivedb.h
98 | ----
99 |
100 | This new database file can then be passed to `smartctl` like so:
101 |
102 | ----
103 | sudo smartctl -a /dev/sda -B drivedb.h
104 | ----
105 |
106 | It is very important to monitor SSD media wearout and quickly find and diagnose abnormally high write rates
107 | to prevent possible unexpected data loss and disk failure.
108 | Even though modern SSDs are quite durable and smart about wear leveling,
109 | one service writing tons of logs non stop could be enough to wear the disk prematurely.
110 |
111 |
--------------------------------------------------------------------------------
/monitoring/login-notification.adoc:
--------------------------------------------------------------------------------
1 | === Login Notification
2 | If you want to get an email notification for each login to the server,
3 | create the `/usr/local/sbin/login-notify.sh` file with the following content:
4 |
5 | ./usr/local/sbin/login-notify.sh
6 | [source,bash]
7 | ----
8 | #!/bin/sh
9 | TRUSTED_HOSTS="" # <1>
10 |
11 | [ "$PAM_TYPE" = "open_session" ] || exit 0
12 |
13 | for i in $TRUSTED_HOSTS; do
14 | if [ "$i" = "$PAM_RHOST" ]; then
15 | exit 0
16 | fi
17 | done
18 |
19 | MSG="Subject: Login Notification\n\n\
20 | Date: `date`\n\
21 | User: $PAM_USER\n\
22 | Ruser: $PAM_RUSER\n\
23 | Rhost: $PAM_RHOST\n\
24 | Service: $PAM_SERVICE\n\
25 | TTY: $PAM_TTY\n"
26 |
27 | echo "$MSG" | ssmtp root
28 | ----
29 | <1> You can set `TRUSTED_HOSTS` variable to a space-delimited list of addresses
30 | for logins from which you don't want to generate notifications.
31 |
32 | Mark this file as executable:
33 |
34 | ----
35 | sudo chmod u+x /usr/local/sbin/login-notify.sh
36 | ----
37 |
38 | Edit the `/etc/pam.d/common-session` file and append the following line to it:
39 |
40 | ./etc/pam.d/common-session
41 | ----
42 | session optional pam_exec.so /usr/local/sbin/login-notify.sh
43 | ----
44 |
45 | For some reason, the `common-session` file is not included in `/etc/pam.d/sudo`
46 | (even though the relevant Debian bug was closed footnote:[https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=519700]).
47 | So if you also want to get notifications for `sudo` command, you will need to append the same line
48 | to the `/etc/pam.d/sudo` file as well.
49 |
50 |
--------------------------------------------------------------------------------
/monitoring/monitoring.adoc:
--------------------------------------------------------------------------------
1 | == Monitoring
2 | A monitoring system is needed to monitor server's hardware, system, environment, services
3 | and send notifications if something goes wrong or looks suspicious.
4 | As mentioned in the <> section, the server needs to be as automated as possible,
5 | so the monitoring systems needs to be as autonomous as possible, and not require any supervision or maintenance.
6 |
7 | A number of solutions was considered for the server monitoring (and quite few of them I tried),
8 | but most of them were discarded due to being too complicated to install and configure (and thus being an overkill for a home server),
9 | or due to having too many dependencies and bloat, or being outdated and not well supported,
10 | or the opposite - changing at insane pace.
11 | Some of the solutions that were considered are: Nagios, Zabbix, Netdata, Munin, Cacti
12 | and some combination of Prometheus/Grafana/Collectd/Graphite/Telegraf/InfluxDB.
13 |
14 | Eventually, Monit was chosen as the main monitoring tool.
15 | While it may seem very limited at the first glance, it is actually quite powerful tool to monitor wide range of parameters
16 | and react to specific conditions.
17 | Monit is easy to extend and customize, it is maintained, very lightweight, with minimum dependencies and very low
18 | runtime overhead.
19 | The only significant downside is that it has no support for time series, and thus it is not possible to see
20 | historical data, graphs or analyze trends.
21 |
22 | In addition to Monit, a simple script was created to generate and deliver regular emails
23 | with system summary information.
24 |
25 | include::monit.adoc[]
26 |
27 | include::summary-email.adoc[]
28 |
29 | include::login-notification.adoc[]
30 |
31 |
--------------------------------------------------------------------------------
/monitoring/summary-email.adoc:
--------------------------------------------------------------------------------
1 | === Summary Email
2 | The summary email is just an email that is delivered automatically at regular intervals (weekly) and contains
3 | some system information.
4 |
5 | While there are tools that do similar job (for example _logcheck_ and _logwatch_),
6 | I found them quite obsolete and noisy, with almost no documentation.
7 |
8 | [[email_content_generation]]
9 | ==== Email Content Generation
10 | The email content is generated with a script, which is invoked by a Systemd timer.
11 | Here is a working example of such script (modify it to suit your needs):
12 |
13 | ./usr/local/sbin/system-summary.sh
14 | [source,bash]
15 | ----
16 | #!/bin/sh
17 |
18 | if [ $# -lt 1 ]; then
19 | PERIOD="week"
20 | else
21 | PERIOD="$1"
22 | fi
23 |
24 | case $PERIOD in
25 | day | week | month)
26 | ;;
27 | *)
28 | echo "Unknown time period: $PERIOD. Defaulting to week." 1>&2
29 | PERIOD="week"
30 | ;;
31 | esac
32 |
33 | SINCE_DATE=`date --date="1 $PERIOD ago" +"%F %T"`
34 |
35 | MAIN_DISK=$(readlink -f "/dev/disk/by-uuid/{PART_UUID}") # <1>
36 |
37 | echo "Subject: System Summary Report ($PERIOD)"
38 | echo ""
39 | echo "Report Period: $PERIOD"
40 | echo "Report Generated: $(date)"
41 | echo "Uptime: $(uptime)"
42 | echo "Memory Usage: RAM: $(free -m | grep Mem | awk '{print $3/$2 * 100}')%, Swap: $(free -m | grep Swap | awk '{print $3/$2 * 100}')%"
43 | echo "Disk (main): Temp: $(hddtemp -n SATA:"$MAIN_DISK"), Health: $(smartctl -H "$MAIN_DISK" | grep overall-health | sed 's/^.\+:\s\+//')"
44 |
45 | echo "--------------------------------------------------------------------------------"
46 | df -h
47 |
48 | echo "--------------------------------------------------------------------------------"
49 | echo "Temperatures:"
50 | sensors -A
51 |
52 | echo "--------------------------------------------------------------------------------"
53 | echo "Top CPU:"
54 | ps -eo pid,user,%cpu,%mem,cmd --sort=-%cpu | head
55 |
56 | echo "--------------------------------------------------------------------------------"
57 | echo "Top RAM:"
58 | ps -eo pid,user,%cpu,%mem,cmd --sort=-%mem | head
59 |
60 | echo "--------------------------------------------------------------------------------"
61 | echo "SSH logins during this $PERIOD:"
62 | last -s "$SINCE_DATE"
63 |
64 | echo "--------------------------------------------------------------------------------"
65 | echo "Last user logins:"
66 | lastlog | grep -iv "Never logged in"
67 |
68 | echo "--------------------------------------------------------------------------------"
69 | echo "Logged errors:"
70 | journalctl --since "$SINCE_DATE" -p err --no-pager
71 | ----
72 | <1> Replace `\{PART_UUID}` with UUID of your `/boot` partition (can be copied from the `/etc/fstab` file).
73 |
74 | NOTE: When this script is called without arguments it will generate weekly summary.
75 | It can also be called with an argument, such as: `day`, `week` or `month`, to generate summary for the specific period.
76 |
77 | Save this file as `/usr/local/sbin/system-summary.sh` and mark is as executable:
78 |
79 | ----
80 | sudo chmod u+x /usr/local/sbin/system-summary.sh
81 | ----
82 |
83 | To verify that it works do `sudo system-summary.sh` and check the output.
84 |
85 | ==== Email Delivery
86 | To send emails the `ssmtp` program is used.
87 | This is extremely lightweight tool with minimum dependencies, that sends mail to a configured mail server.
88 |
89 | NOTE: Since `ssmtp` is not an actual mail server (or MTA), you will need some SMTP server to send mail.
90 | You can use one provided by your ISP or any of free ones; however, the security and privacy of such setup is
91 | at best - questionable. This is why `ssmtp` only used in this guide for non-sensitive mail, such as monitoring
92 | emails and system status emails.
93 |
94 | To install `ssmtp` do:
95 |
96 | ----
97 | sudo apt install ssmtp
98 | ----
99 |
100 | Edit the `/etc/ssmtp/ssmtp.conf` file, set `root` option to your desired email address,
101 | `mailhub` to your SMTP server address and enable the use of TLS and STARTTLS:
102 |
103 | ./etc/ssmtp/ssmtp.conf
104 | [subs="attributes+"]
105 | ----
106 | root={SB_EMAIL} # <1>
107 |
108 | mailhub={SB_SMTP_ADDR}:{SB_SMTP_PORT} # <2>
109 |
110 | UseTLS=Yes
111 | UseSTARTTLS=Yes
112 | ----
113 | <1> This email address will receive all mail for UIDs < 1000.
114 | <2> Set this to your SMTP server address and port.
115 |
116 | There are other parameters that may need to be configured (for example, if your SMTP server requires authentication).
117 | The Arch Wiki article on `ssmtp` is a good source of information on this topic -
118 | https://wiki.archlinux.org/index.php/SSMTP.
119 |
120 | To test that email delivery works try: `echo "test" | ssmtp root`.
121 |
122 | NOTE: If this command is successful but email is not delivered, it was probably filtered.
123 | You can run `ssmtp` with `-v` argument to get more verbose output, but I found no good solution to troubleshoot
124 | filtering issues. Sometimes changing sender address, subject, content and hostname helps to avoid filtering.
125 |
126 | As root user, create `/etc/systemd/system/system-summary-report.service` file with the following content:
127 |
128 | ./etc/systemd/system/system-summary-report.service
129 | [subs="attributes+"]
130 | ----
131 | [Unit]
132 | Description=Email system summary report
133 | After=network-online.target
134 |
135 | [Service]
136 | Type=oneshot
137 | ExecStart=/bin/sh -c '/usr/local/sbin/system-summary.sh | ssmtp root'
138 | ----
139 |
140 | You can run this service manually and verify that you get the email:
141 |
142 | ----
143 | sudo systemctl daemon-reload
144 | sudo systemctl start system-summary-report.service
145 | ----
146 |
147 | As root user, create `/etc/systemd/system/system-summary-report.timer` file with the following content:
148 |
149 | ./etc/systemd/system/system-summary-report.timer
150 | ----
151 | [Unit]
152 | Description=Email system summary report
153 |
154 | [Timer]
155 | OnCalendar=Fri 18:00 # <1>
156 | AccuracySec=1h
157 | Persistent=true
158 |
159 | [Install]
160 | WantedBy=timers.target
161 | ----
162 | <1> Adjust this as needed, especially if using period other than week.
163 |
164 | Enable and start the timer:
165 |
166 | ----
167 | sudo systemctl daemon-reload
168 | sudo systemctl enable system-summary-report.timer
169 | sudo systemctl start system-summary-report.timer
170 | ----
171 |
172 | To check timer status and time until next activation use:
173 |
174 | ----
175 | sudo systemctl list-timers
176 | ----
177 |
178 |
--------------------------------------------------------------------------------
/nextcloud/certificate.adoc:
--------------------------------------------------------------------------------
1 | [[nextcloud_certificate]]
2 | === Certificate
3 | This section describes how to obtain and maintain publicly trusted SSL/TLS Certificate
4 | that will be used to setup HTTPS for the Nextcloud.
5 |
6 | The certificate will be issued by Let's Encrypt <>
7 | and will be obtained and renewed using Certbot <>.
8 |
9 | The certificate needs to be obtained before Nextcloud installation,
10 | since Nextcloud will only be reachable via HTTPS (not plain HTTP) and the web server won't start without certificate.
11 |
12 | ACME DNS challenge is used for domain validation (needed to issue the initial certificate and for subsequent renewals).
13 | One major advantage DNS challenge has over more widely used HTTP challenge
14 | is that it doesn't require your web server to use standard ports: 80 and 443.
15 | This allows to host Nextcloud on non-standard port which can be advantageous for two reasons:
16 | using non-standard port can dramatically reduce amount of unwanted requests from bots,
17 | and it may be the only option if your ISP blocks standard ports 80 and 443.
18 |
19 | ==== Installing Certbot
20 | Certbot can be installed from the PPA maintained by the EFF team
21 | (the installation is described in more details in Certbot documentation
22 | footnote:[https://certbot.eff.org/lets-encrypt/ubuntubionic-other]):
23 |
24 | ----
25 | sudo add-apt-repository ppa:certbot/certbot
26 | sudo apt install certbot
27 | ----
28 |
29 | ==== Preparing Domain Name
30 | While its possible to get certificate for the `{SB_SUBDOMAIN}` domain and use it to access the Nextcloud,
31 | it may be better to use a subdomain, like for example `nextcloud.silverbox.example.com`.
32 | This offers some extra flexibility and you can take advantage of the same origin policy.
33 | In this document, the subdomain for Nextcloud is referred as `{SB_NEXTCLOUD_DOMAIN}`.
34 |
35 | The first step is to create a CNAME DNS record to point `{SB_NEXTCLOUD_DOMAIN}` to `{SB_SUBDOMAIN}`.
36 | Thus, you won't have to also dynamically update host A record for the `{SB_NEXTCLOUD_DOMAIN}`,
37 | as it is already updated for the `{SB_SUBDOMAIN}`.
38 |
39 | The second step is to create a TXT DNS record for the `_acme-challenge.{SB_NEXTCLOUD_DOMAIN}` with any value
40 | (the value is just a placeholder and will be replaced with the actual DNS ACME challenge during
41 | domain validation).
42 |
43 | ==== Preparing Certbot Scripts
44 | To confirm ownership of the domain (for certificate generation and renewal) a DNS challenge will be used.
45 | The reason is that HTTP(S) challenge is way to restrictive, in particular, it forces using the standard
46 | 80 and 443 ports which is not always desirable.
47 | DNS challenge, however, only requires you to be able to create a TXT record with a given content,
48 | without enforcing any specific port numbers.
49 |
50 | At the moment of writing, Certbot did not support Namesilo API, which is why DNS challenge is done using manual hooks.
51 |
52 | Create a directory where all Certbot related scripts will reside:
53 |
54 | ----
55 | sudo mkdir /root/silverbox/certbot
56 | sudo chmod 700 /root/silverbox/certbot
57 | ----
58 |
59 | Inside it, create the `dns-challenge-auth.sh` file with the following content:
60 |
61 | ./root/silverbox/certbot/dns-challenge-auth.sh
62 | [source,bash]
63 | ----
64 | #!/bin/bash
65 |
66 | ACME_SUBDOMAIN="_acme-challenge"
67 | DOMAIN=`awk -F '.' '{print $(NF-1)"."$NF}' <<< "$CERTBOT_DOMAIN"` || exit 1
68 | NS=`dig "$DOMAIN" NS +short | head -1` || exit 2
69 | echo "Performing DNS Challenge for domain: $CERTBOT_DOMAIN in $DOMAIN with authoritative NS $NS"
70 | docker run --rm --network common --cpus="1" -v /root/silverbox/namesilo:/secrets dns-updater -k /secrets/api-key -a update-txt -d "$ACME_SUBDOMAIN.$CERTBOT_DOMAIN" -t "$CERTBOT_VALIDATION" || exit 3
71 |
72 | for i in {1..20}; do # <1>
73 | echo "Checking if DNS updated, attempt $i..."
74 | TXT=`dig "@$NS" "$ACME_SUBDOMAIN.$CERTBOT_DOMAIN" TXT +short | sed 's/"//g'`
75 | if [ "$TXT" == "$CERTBOT_VALIDATION" ]; then
76 | echo "Record updated. Waiting extra minute before returning."
77 | sleep 60 # <2>
78 | exit 0
79 | else
80 | echo "Record still contains '$TXT'. Waiting for 1 minute..."
81 | sleep 60 # <3>
82 | fi
83 | done
84 |
85 | exit 4
86 | ----
87 | <1> 20 is the number of attempts to check if the TXT record has been updated.
88 | <2> Without this extra wait sometime Certbot won't pick up the updated TXT value.
89 | <3> How long to wait between attempts.
90 |
91 | Next, create the `dns-challenge-cleanup.sh` file with the following content:
92 |
93 | ./root/silverbox/certbot/dns-challenge-cleanup.sh
94 | [source,bash]
95 | ----
96 | #!/bin/bash
97 |
98 | ACME_SUBDOMAIN="_acme-challenge"
99 | DOMAIN=`awk -F '.' '{print $(NF-1)"."$NF}' <<< "$CERTBOT_DOMAIN"` || exit 1
100 | NS=`dig "$DOMAIN" NS +short | head -1` || exit 2
101 | echo "Performing DNS Challenge Cleanup for domain: $CERTBOT_DOMAIN in $DOMAIN with authoritative NS $NS"
102 | docker run --rm --network common --cpus="1" -v /root/silverbox/namesilo:/secrets dns-updater -k /secrets/api-key -a update-txt -d "$ACME_SUBDOMAIN.$CERTBOT_DOMAIN" -t "none" || exit 3 # <1>
103 | echo "Record cleaned up"
104 | ----
105 | <1> In this example, `none` value is used as the new TXT record value (since empty value is not allowed).
106 |
107 | Assign the following permissions to these files:
108 |
109 | ----
110 | sudo chmod 770 /root/silverbox/certbot/dns-challenge-auth.sh
111 | sudo chmod 770 /root/silverbox/certbot/dns-challenge-cleanup.sh
112 | ----
113 |
114 | The next step is to create renewal hooks that will stop Apache web server before renewal
115 | and start it once new certificate is obtained.
116 |
117 | To create directories for the renewal hooks run:
118 |
119 | ----
120 | sudo certbot certificates
121 | ----
122 |
123 | Create the `/etc/letsencrypt/renewal-hooks/post/nextcloud-web-restart.sh` file with the following content:
124 |
125 | ./etc/letsencrypt/renewal-hooks/post/nextcloud-web-restart.sh
126 | [source,bash,subs="attributes+"]
127 | ----
128 | #!/bin/bash
129 | if [ "$CERTBOT_DOMAIN" = "{SB_NEXTCLOUD_DOMAIN}" ]; then # <1>
130 | echo "Restarting Nextcloud web server"
131 | docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml restart nextcloud-web
132 | else
133 | echo "Skipping Nextcloud web server restart - different domain: $CERTBOT_DOMAIN"
134 | fi
135 | ----
136 | <1> Replace `{SB_NEXTCLOUD_DOMAIN}` with the actual domain name.
137 |
138 | This script will be executed automatically by Certbot during the renewal of a certificate,
139 | and if the renewal is for the Nextcloud certificate it will restart Nextcloud's web server
140 | to use the new certificate.
141 |
142 | And assign the following permissions to this file:
143 |
144 | ----
145 | sudo chmod 770 /etc/letsencrypt/renewal-hooks/post/nextcloud-web-restart.sh
146 | ----
147 |
148 | Finally, install the `dnsutils` package which contains `dig` tool:
149 |
150 | ----
151 | sudo apt install dnsutils
152 | ----
153 |
154 | ==== Test Certificate
155 | To test that domain validation and certificate renewal works, it is possible to use Let's Encrypt test server
156 | to generate test (not trusted) certificate.
157 |
158 | To get test certificate run:
159 |
160 | [subs="attributes+"]
161 | ----
162 | sudo certbot certonly --test-cert \
163 | --agree-tos \
164 | -m {SB_EMAIL} \ # <1>
165 | --manual \
166 | --preferred-challenges=dns \
167 | --manual-auth-hook /root/silverbox/certbot/dns-challenge-auth.sh \
168 | --manual-cleanup-hook /root/silverbox/certbot/dns-challenge-cleanup.sh \
169 | --must-staple \
170 | -d {SB_NEXTCLOUD_DOMAIN} # <2>
171 | ----
172 | <1> Replace `{SB_EMAIL}` with the email address you wish to use for certificate generation.
173 | <2> Replace `{SB_NEXTCLOUD_DOMAIN}` with the actual domain name.
174 |
175 | NOTE: This may take a while.
176 |
177 | To view information about the generated certificate:
178 |
179 | ----
180 | sudo certbot certificates
181 | ----
182 |
183 | To test certificate renewal:
184 |
185 | [subs="attributes+"]
186 | ----
187 | sudo certbot renew --test-cert --dry-run --cert-name {SB_NEXTCLOUD_DOMAIN}
188 | ----
189 |
190 | To revoke and delete the test certificate:
191 |
192 | [subs="attributes+"]
193 | ----
194 | sudo certbot revoke --test-cert --cert-name {SB_NEXTCLOUD_DOMAIN}
195 | ----
196 |
197 | ==== Getting Real Certificate
198 | To get the real certificate run:
199 |
200 | [subs="attributes+"]
201 | ----
202 | sudo certbot certonly \
203 | --agree-tos \
204 | -m {SB_EMAIL} \
205 | --manual \
206 | --preferred-challenges=dns \
207 | --manual-auth-hook /root/silverbox/certbot/dns-challenge-auth.sh \
208 | --manual-cleanup-hook /root/silverbox/certbot/dns-challenge-cleanup.sh \
209 | --must-staple \
210 | -d {SB_NEXTCLOUD_DOMAIN}
211 | ----
212 |
213 | ==== Automatic Certificate Renewal
214 | The certificate should be automatically renewed by the Certbot's Systemd service.
215 | The service should run automatically triggered by the corresponding timer.
216 | To check the status of the timer:
217 |
218 | ----
219 | systemctl status certbot.timer
220 | ----
221 |
222 |
--------------------------------------------------------------------------------
/nextcloud/configuration.adoc:
--------------------------------------------------------------------------------
1 | === Configuration
2 | This section only describes some generic post-install configuration
3 | as your configuration will highly depend on the use case.
4 |
5 | ==== Fixing Security and Setup Warnings
6 | Navigate to _Settings -> Overview_ page and check the "`Security & setup warnings`" section.
7 |
8 | Most likely you'll see at least one warning here: `Some columns in the database are missing a conversion to big int`.
9 | To fix it, run the following commands:
10 |
11 | ----
12 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml exec --user www-data nextcloud-fpm php occ maintenance:mode --on
13 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml exec --user www-data nextcloud-fpm php occ db:convert-filecache-bigint --no-interaction
14 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml exec --user www-data nextcloud-fpm php occ maintenance:mode --off
15 | ----
16 |
17 | Verify that warning disappeared.
18 |
19 | If there are any other warnings, refer to the Nextcloud Admin Guide for resolutions.
20 |
21 | ==== Editing Nextcloud Config File
22 | Edit the `/srv/nextcloud/html/config/config.php` file and add/modify the following parameters:
23 |
24 | ./srv/nextcloud/html/config/config.php
25 | [subs="attributes+"]
26 | ----
27 | 'loglevel' => 1, # <1>
28 | 'overwrite.cli.url' => 'https://{SB_NEXTCLOUD_DOMAIN}:{SB_NEXTCLOUD_PORT}', # <2>
29 | 'htaccess.RewriteBase' => '/', # <3>
30 | 'default_language' => 'en',
31 | 'default_locale' => 'en_CA',
32 | 'knowledgebaseenabled' => false, # <4>
33 | 'token_auth_enforced' => true, # <5>
34 | 'default_phone_region' => 'CA' # <6>
35 | ----
36 | <1> Sets log level to _info_.
37 | <2> This and the next line will enable pretty URLs (essentially eliminating `index.php` from the URLs).
38 | More info: https://docs.nextcloud.com/server/stable/admin_manual/installation/source_installation.html#pretty-urls.
39 | <3> Same as above.
40 | <4> This line disables arguably useless knowledge base page.
41 | <5> Enforces token authentication for API clients for better security (will block requests using the user password).
42 | <6> Set to your country code (more in the Nextcloud documentation).
43 |
44 | Next, run the following command:
45 |
46 | ----
47 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml exec --user www-data nextcloud-fpm php occ maintenance:update:htaccess
48 | ----
49 |
50 | And restart Nextcloud:
51 |
52 | ----
53 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml restart
54 | ----
55 |
56 | Refresh the Nextcloud page and verify that pretty URLs work.
57 |
58 | ==== Background Jobs, Email Delivery
59 | Navigate to _Settings -> Basic settings_ page.
60 |
61 | Make sure Background Jobs scheduling is set to *Cron* and last run was within 15 minutes.
62 |
63 | Also, on this page you can configure Email server parameters and test email delivery.
64 |
65 | ==== Access to NFS Share
66 | It may be convenient to be able to access some directories that are shared with NFS from the Nextcloud.
67 | It can be done with the "`External storage support`" Nextcloud app,
68 | that allows mounting local directories inside the Nextcloud.
69 |
70 | However, since Nextcloud is running inside a container, it is isolated from the host file system
71 | and won't be able to access the NFS directories unless they are explicitly mounted inside the container.
72 |
73 | To mount some directories into the Nextcloud container,
74 | edit the `/root/silverbox/containers/nextcloud/docker-compose.yml` file and append the directories to the
75 | `volumes` list of the `nextcloud-fpm` service. For example:
76 |
77 | ./root/silverbox/containers/nextcloud/docker-compose.yml
78 | ----
79 | ...
80 | nextcloud-fpm:
81 | ...
82 | volumes:
83 | ...
84 | - /srv/nfs/videos:/nfs/videos
85 | - /srv/nfs/photos:/nfs/photos
86 | ...
87 | ----
88 |
89 | To apply changes do:
90 |
91 | ----
92 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml stop
93 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml kill nextcloud-fpm
94 | sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml up -d
95 | ----
96 |
97 | To add these directories to Nextcloud navigate to _Settings -> External Storages_ and add two
98 | "`Local`" storage entries for `/nfs/videos` and `/nfs/photos`.
99 |
100 | ===== Permissions
101 | There are some permissions issues when accessing files created via NFS from the Nextcloud and vice versa.
102 |
103 | In particular, files created inside NFS directories from the Nextcloud will be owned by `www-data:www-data` (`{SB_WWW_DATA_UID}:{SB_WWW_DATA_GID}`)
104 | and with default `umask` will only allow modifications by the owner.
105 | Thus, users accessing these files over NFS won't be able to modify them.
106 |
107 | The permissions/ownership for such files can be adjusted with the following command:
108 |
109 | [subs="attributes+"]
110 | ----
111 | sudo find /srv/nfs -uid {SB_WWW_DATA_UID} -exec chown {SB_USER}:{SB_NFS_GROUP} \{} \; -exec chmod g+w {} \; -exec echo {} \;
112 | ----
113 |
114 | This example command changes ownership for all files under `/srv/nfs` directory that are currently owned by UID `{SB_WWW_DATA_UID}`,
115 | to be owned by your user and `{SB_NFS_GROUP}` group, and also adds write permissions to the group.
116 |
117 | There is a similar issue with the files created via NFS and accessed via Nextcloud.
118 | Such files by default will have ownership `{SB_USER}:{SB_USER}` and won't be modifiable by the Nextcloud
119 | (unless your `umask` allows modification by everyone).
120 | One way to allow modifications from the Nextcloud is to set ownership to `{SB_USER}:{SB_NFS_GROUP}`,
121 | which can be done with the following command:
122 |
123 | [subs="attributes+"]
124 | ----
125 | sudo find /srv/nfs -user $USER -group $USER -exec chgrp {SB_NFS_GROUP} {} \; -exec echo {} \;
126 | ----
127 |
128 | IMPORTANT: When creating files from outside of the Nextcloud (e.g. over NFS), the files won't be immediately visible
129 | in the Nextcloud. Similarly, the changed permissions on such files won't be immediately noticed by the Nextcloud.
130 | To force Nextcloud to rescan the files use the following command:
131 | `sudo docker compose -f /root/silverbox/containers/nextcloud/docker-compose.yml exec --user www-data nextcloud-fpm php occ files:scan admin`
132 |
133 | If desired, the permission correction can be automated with `inotify-tools` or similar tools
134 |
135 | ==== Security Scanning
136 | It may be useful to run some security scanners against the Nextcluod.
137 | Here are some example:
138 |
139 | Nextcloud Security Scanner::
140 | https://scan.nextcloud.com.
141 | SSL Labs Scanner::
142 | https://www.ssllabs.com/ssltest. Note that it only works over default HTTPS port 443, so to use it
143 | you can temporary change port forwarding rule to forward from external port 443 to internal port `{SB_NEXTCLOUD_PORT}`.
144 | ImmuniWeb SSL Scanner::
145 | https://www.immuniweb.com/ssl
146 |
147 | ==== Reduce Autovacuum Frequency
148 | This is completely optional step, but it may help to minimize disk writes.
149 | In the default configuration, PostgreSQL autovacuum runs every minute,
150 | which I find extremely excessive for my limited Nextcloud use.
151 | Running it so frequently produces excessive disk writes by the `postgres: stats collector` process.
152 |
153 | To reduce autovaccum frequency, edit the `/srv/nextcloud/db/postgresql.conf` file and change the
154 | `autovacuum_naptime` parameter to desired value, for example:
155 |
156 | ./srv/nextcloud/db/postgresql.conf
157 | ----
158 | autovacuum_naptime = 15min
159 | ----
160 |
161 | Restart the Nextcloud database for the setting to take effect.
162 |
163 |
--------------------------------------------------------------------------------
/nextcloud/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | To monitor Nextcloud status with Monit create the `/etc/monit/conf.d/70-nextcloud` file with the following content:
3 |
4 | ./etc/monit/conf.d/70-nextcloud
5 | [subs="attributes+"]
6 | ----
7 | # Containers status
8 | check program nextcloud_web with path "/usr/local/etc/monit/scripts/container_status.sh nextcloud-web .State.Status running"
9 | if status != 0 for 5 cycles then alert
10 |
11 | check program nextcloud_fpm with path "/usr/local/etc/monit/scripts/container_status.sh nextcloud-fpm .State.Status running"
12 | if status != 0 for 5 cycles then alert
13 |
14 | check program nextcloud_db with path "/usr/local/etc/monit/scripts/container_status.sh nextcloud-db .State.Status running"
15 | if status != 0 for 5 cycles then alert
16 |
17 | # HTTPS & Certificate check
18 | check host nextcloud with address {SB_NEXTCLOUD_DOMAIN} every 5 cycles # <1>
19 | if failed port {SB_NEXTCLOUD_PORT} protocol https request /?monit and certificate valid > 15 days for 2 cycles then alert # <2>
20 |
21 | # Apache status
22 | check host nextcloud_local with address {SB_IP} every 5 cycles # <3>
23 | if failed port {SB_NEXTCLOUD_PORT} protocol apache-status path /apache-server-status.html # <4>
24 | replylimit > 50% or
25 | requestlimit > 50% or
26 | closelimit > 50% or
27 | gracefullimit > 50% or
28 | waitlimit < 20%
29 | with ssl options {verify: disable}
30 | for 2 cycles
31 | then alert
32 | ----
33 | <1> Replace `{SB_NEXTCLOUD_DOMAIN}` with the actual Nextcloud domain name.
34 | <2> Replace `{SB_NEXTCLOUD_PORT}` with the actual value.
35 | Also, if you changed query parameter in rewrite condition for filtering Monit logs in the <> section
36 | to some random string, replace `monit` in the `request /?monit` part with the exact same string.
37 | <3> Replace `{SB_IP}` with the actual value.
38 | <4> Replace `{SB_NEXTCLOUD_PORT}` with the actual value.
39 |
40 | Restart Monit and verify that Nextcloud monitoring is working.
41 |
42 |
--------------------------------------------------------------------------------
/nextcloud/nextcloud.adoc:
--------------------------------------------------------------------------------
1 | == Nextcloud
2 | This section describes how to install and configure Nextcloud <> on the server,
3 | in such a way that it can be accessed from the internet securely over HTTPS.
4 |
5 | This section depends on the following sections: <>, <>, <>.
6 |
7 | include::overview.adoc[]
8 |
9 | include::certificate.adoc[]
10 |
11 | include::installation.adoc[]
12 |
13 | include::configuration.adoc[]
14 |
15 | include::monitoring.adoc[]
16 |
17 |
--------------------------------------------------------------------------------
/nextcloud/overview.adoc:
--------------------------------------------------------------------------------
1 | === Overview
2 | The Nextcloud will be deployed using Docker (more specifically - Docker Compose).
3 | Below is a diagram that shows high level overview of the Nextcloud deployment:
4 |
5 | ----
6 | Nextcloud Docker Network
7 | ----------------------------------------------------------------------------------
8 | | ------------ ----------- ------------ |
9 | | HTTPS | Apache | 9000/tcp | Nextcloud | 5432/tcp | | |
10 | ------------->| Web Server |----------->| PHP |----------->| PostgreSQL | |
11 | | _____| (httpd) | | FPM | | | |
12 | | | ------------ ----------- ------------ |
13 | | | | | | | | |
14 | | | {/usr/local/apache2/htdocs} | | | | |
15 | | | | {/var/www/html} | | {/var/lib/postgresql/data} |
16 | | {/certs} | | | | | |
17 | | | v v _____| | v |
18 | | | /srv/nextcloud/html | {/data} /srv/nextcloud/db |
19 | | v | | |
20 | | /etc/letsencrypt {/nfs/*} v |
21 | | | /srv/nextcloud/data |
22 | | v |
23 | | /srv/nfs/* |
24 | ----------------------------------------------------------------------------------
25 | ----
26 |
27 | NOTE: In the diagram above, a path inside curly braces indicates a path as it seen inside Docker container,
28 | while path without curly braces indicates the real path on the host file system.
29 |
30 | As the diagram shows, the only external entry point to the Nextcloud system is over HTTPS
31 | via the container with Apache Web Server.
32 |
33 | NOTE: All Nextcloud services (web interface, WebDAV, CalDAV, CardDAV, Sync app) work over HTTPS.
34 |
35 | HTTP requests are handled in the following way:
36 |
37 | - If this is a request to a PHP file:
38 | * The request is proxied to the Nextcloud PHP FPM container using `mod_proxy_fcgi` module.
39 | - Otherwise:
40 | * The request is served directly by the Apache Web Server (statically).
41 |
42 | All containers are stateless (i.e. don't contain any important data), since all user data is stored
43 | on the host file system and mounted inside containers.
44 | This way containers can be safely deleted and re-deployed, which makes upgrades very easy.
45 |
46 | Having three separate containers (instead of just one big container) allows for stopping,
47 | restarting and upgrading containers independently, which is useful in many cases.
48 | It also allows every container to have its own logs and logs configuration.
49 | But more importantly, compromising or DoS-ing one container doesn't compromise the whole system.
50 |
51 |
--------------------------------------------------------------------------------
/nfs-server/domain.adoc:
--------------------------------------------------------------------------------
1 | === Domain
2 | This section describes how to configure internal (local) domain for the LAN, that is required to configure
3 | Kerberos realm in the next step.
4 |
5 | The internal domain will be direct subdomain of your domain `{SB_DOMAIN_NAME}`
6 | and in the document will be referred as `{SB_INTERNAL_DOMAIN}`.
7 | For example, if your domain is `example.com`, the internal domain could be `home.example.com`.
8 |
9 | Before proceeding to creating local zone for the internal domain, create an address reservations
10 | on your DHCP server for all the client devices that will be in the domain (or configure static IP addresses).
11 |
12 | [[nfs_configuring_dns]]
13 | ==== Configuring Local DNS Zone
14 | To create local DNS zone for your internal domain,
15 | edit the `/etc/unbound/unbound.conf.d/dns-config.conf` file
16 | and append the following content under the `server` section:
17 |
18 | ./etc/unbound/unbound.conf.d/dns-config.conf
19 | [source,yaml,subs="attributes+"]
20 | ----
21 | local-zone: "{SB_INTERNAL_DOMAIN}." static # <1>
22 | local-data: "silverbox.{SB_INTERNAL_DOMAIN}. IN A {SB_IP}" # <2>
23 | local-data: "client-pc.{SB_INTERNAL_DOMAIN}. IN A \{CLIENT_PC_IP_ADDR}" # <3>
24 | local-data-ptr: "{SB_IP} silverbox.{SB_INTERNAL_DOMAIN}" # <4>
25 | local-data-ptr: "\{CLIENT_PC_IP_ADDR} client-pc.{SB_INTERNAL_DOMAIN}" # <5>
26 | ----
27 | <1> Replace `{SB_INTERNAL_DOMAIN}` with your internal domain name.
28 | Dot at the end is required.
29 | <2> This is forward DNS record for the server. It assumes the server FQDN in the internal domain is
30 | `silverbox.{SB_INTERNAL_DOMAIN}`, but you can change it of course.
31 | <3> This is an example forward record for the client PC.
32 | You can add as many records as you need, for each device you have.
33 | <4> This is reverse DNS record for the server.
34 | <5> This is an example reverse record for the client PC.
35 |
36 | Restart the DNS server:
37 |
38 | ----
39 | sudo systemctl restart unbound.service
40 | ----
41 |
42 | Make sure you can resolve all records that were added using FQDNS and IP addresses. For example:
43 |
44 | [subs="attributes+"]
45 | ----
46 | nslookup silverbox.{SB_INTERNAL_DOMAIN}
47 | nslookup client-pc.{SB_INTERNAL_DOMAIN}
48 | nslookup {SB_IP}
49 | nslookup \{CLIENT_PC_IP_ADDR}
50 | ----
51 |
52 | ==== Configuring Server Domain and FQDN
53 | Since the server uses static network configuration, its domain needs to be configured manually.
54 | To do this, edit the `/etc/resolv.conf` file and add the following line:
55 |
56 | ./etc/resolv.conf
57 | [subs="attributes+"]
58 | ----
59 | search {SB_INTERNAL_DOMAIN}
60 | ----
61 |
62 | Verify that resolution by host name only works:
63 |
64 | ----
65 | nslookup silverbox
66 | nslookup client-pc
67 | ----
68 |
69 | To configure server's FQDN edit the `/etc/hosts` file and insert FQDN before host name in the record for `127.0.1.1`.
70 | So the line for `127.0.1.1` should look something like this:
71 |
72 | ./etc/hosts
73 | [subs="attributes+"]
74 | ----
75 | 127.0.1.1 silverbox.{SB_INTERNAL_DOMAIN} silverbox
76 | ----
77 |
78 | To verify that FQDN is set correctly check the output of the `hostname -f` command: it should print FQDN.
79 |
80 | ==== Configuring Client's Domain and FQDN
81 | If clients use DHCP, the domain can be easily configured on the DHCP server and thus it will be automatically
82 | pushed to the client devices.
83 | If static network configuration is used, then domain will have to be configured manually
84 | (the actual instructions will depend on the client OS).
85 |
86 | The FQDN configuration will also differ depending on the client OS, but for the Ubuntu 18.04 it is identical
87 | to FQDN configuration on the server.
88 |
89 | Once domain and FQDN configured on client PC, verify that it works (using `nslookup` or similar command)
90 | and that both client and server can resolve each other names (both short and FQDN).
91 |
92 |
--------------------------------------------------------------------------------
/nfs-server/kerberos.adoc:
--------------------------------------------------------------------------------
1 | === Kerberos
2 | Kerberos will be used for secure NFS authentication, and optionally integrity validation and encryption.
3 |
4 | [[nfs_kerberos_configuring_kdc]]
5 | ==== Configuring KDC
6 | Install MIT Kerberos KDC (key distribution center):
7 |
8 | ----
9 | sudo apt install krb5-kdc
10 | ----
11 |
12 | During installation, you will be prompted for the following parameters:
13 |
14 | Realm::
15 | Enter fully capitalized internal domain name (`{SB_INTERNAL_DOMAIN}`)
16 | Kerberos servers for your realm::
17 | Enter the server internal FQDN (e.g. `silverbox.{SB_INTERNAL_DOMAIN}`)
18 | Administrative server for your Kerberos realm::
19 | Enter the server internal FQDN (e.g. `silverbox.{SB_INTERNAL_DOMAIN}`)
20 |
21 | Then, edit the `/etc/krb5kdc/kdc.conf` file and add/change the following parameters:
22 |
23 | ./etc/krb5kdc/kdc.conf
24 | [subs="attributes+"]
25 | ----
26 | [kdcdefaults]
27 | kdc_ports = 88
28 |
29 | [realms]
30 | {SB_INTERNAL_DOMAIN} = { # <1>
31 | kdc_ports = 88
32 | max_life = 24h 0m 0s
33 | max_renewable_life = 7d 0h 0m 0s # <2>
34 | master_key_type = aes256-cts
35 | supported_enctypes = aes256-cts:normal aes128-cts:normal
36 | }
37 | ----
38 | <1> The internal domain `{SB_INTERNAL_DOMAIN}` here must be fully capitalized (since it is realm).
39 | <2> The `max_renewable_life` parameter effectively controls maximum ticket lifetime.
40 | You can adjust this parameter if you need to.
41 |
42 | Next, create new Kerberos database (you'll be prompted to create Kerberos DB master password):
43 |
44 | ----
45 | sudo kdb5_util create -s
46 | ----
47 |
48 | Overwrite the `/etc/krb5.conf` file with the following content:
49 |
50 | ./etc/krb5.conf
51 | [subs="attributes+"]
52 | ----
53 | [libdefaults]
54 | default_realm = {SB_INTERNAL_DOMAIN} # <1>
55 | allow_weak_crypto = false
56 | ccache_type = 4
57 | kdc_timesync = 1
58 | [realms]
59 | {SB_INTERNAL_DOMAIN} = { # <2>
60 | kdc = silverbox.{SB_INTERNAL_DOMAIN} # <3>
61 | admin_server = silverbox.{SB_INTERNAL_DOMAIN} # <4>
62 | }
63 | ----
64 | <1> The internal domain `{SB_INTERNAL_DOMAIN}` here must be fully capitalized (since it is realm).
65 | <2> Same as above.
66 | <3> The `kdc` should be set to the server FQDN.
67 | <4> The `admin_server` should be set to the server FQDN.
68 |
69 | Start the KDC service and verify that it starts successfully:
70 |
71 | ----
72 | sudo systemctl start krb5-kdc
73 | ----
74 |
75 | Install the `krb5-admin-server` package which is (weirdly enough) needed to use the `kadmin.local` tool:
76 |
77 | ----
78 | sudo apt install krb5-admin-server
79 | ----
80 |
81 | Unless you are planning to use remote `kadmin`, the admin service can be disabled:
82 |
83 | ----
84 | sudo systemctl disable krb5-admin-server.service
85 | ----
86 |
87 | Finally, to add Kerberos principal for your user run `sudo kadmin.local` and then type:
88 |
89 | [subs="attributes+"]
90 | ----
91 | addprinc {SB_USER} # <1>
92 | ----
93 | <1> Replace `{SB_USER}` with your actual user name.
94 |
95 | ==== Adding Firewall Rules
96 | To allow access to the KDC from the LAN, add the firewall rules:
97 |
98 | [subs="attributes+"]
99 | ----
100 | sudo ufw allow proto tcp from {SB_SUBNET} to any port 88 comment 'Kerberos TCP' # <1>
101 | sudo ufw allow proto udp from {SB_SUBNET} to any port 88 comment 'Kerberos UDP' # <2>
102 | ----
103 | <1> Replace `{SB_SUBNET}` with the actual LAN subnet.
104 | <2> Same as above.
105 |
106 | ==== Configuring Kerberos on the Client
107 | First, install the MIT Kerberos user package:
108 |
109 | ----
110 | sudo apt install krb5-user
111 | ----
112 |
113 | When prompted, set the same parameters as described in the <> section.
114 |
115 | Next, overwrite the `/etc/krb5.conf` file with the same content as described in the
116 | <> section.
117 |
118 | Verify that Kerberos authentication works by doing `kinit` as your user,
119 | and typing your principal's password (same password as was used during principal creation).
120 | The `kinit` command should succeed and no error message should appear.
121 | Do `klist` to see the ticket, and then `kdestroy` to destroy it.
122 |
123 |
--------------------------------------------------------------------------------
/nfs-server/nfs-client-configuration.adoc:
--------------------------------------------------------------------------------
1 | === NFS Client Configuration
2 | This section describes how to configure NFS client on the client PC (instructions are for Ubuntu 18.04 Desktop).
3 |
4 | ==== Installing NFS Client
5 | To install NFS client do:
6 |
7 | ----
8 | sudo apt install nfs-common
9 | ----
10 |
11 | ==== Enabling User ID Mapping
12 | IMPORTANT: It is not clear whether these steps are really required or not,
13 | as it seems like the ID translation works even without these module parameters.
14 |
15 | Create the `/etc/modprobe.d/nfs.conf` file with the following content:
16 |
17 | ./etc/modprobe.d/nfs.conf
18 | ----
19 | options nfs nfs4_disable_idmapping=0
20 | ----
21 |
22 | Reboot the system, and verify that ID mapping is *not* disabled by executing:
23 |
24 | ----
25 | sudo modprobe nfs
26 | cat /sys/module/nfs/parameters/nfs4_disable_idmapping
27 | ----
28 |
29 | Which should return `N`.
30 |
31 | ==== Creating Kerberos Principal
32 | First, add Kerberos principal on the server for the client machine,
33 | and save it to a separate keytab file (do these commands on the server):
34 |
35 | [subs="attributes+"]
36 | ----
37 | sudo kadmin.local
38 | addprinc -randkey nfs/client-pc.{SB_INTERNAL_DOMAIN} # <1>
39 | ktadd -k /root/krb5.keytab nfs/client-pc.{SB_INTERNAL_DOMAIN} # <2>
40 | ----
41 | <1> Replace `client-pc.{SB_INTERNAL_DOMAIN}` with your client PC FQDN.
42 | <2> Same as above.
43 |
44 | Then move the `/root/krb5.keytab` file to the client PC to `/etc/krb5.keytab`.
45 |
46 | On the client PC, assign proper ownership and permissions to the keytab file:
47 |
48 | ----
49 | sudo chown root:root /etc/krb5.keytab
50 | sudo chmod 600 /etc/krb5.keytab
51 | ----
52 |
53 | Next, on the client PC edit the `/etc/default/nfs-common` file and change/add the following lines:
54 |
55 | ./etc/default/nfs-common
56 | ----
57 | NEED_GSSD="yes"
58 | ----
59 |
60 | Reboot the client PC and verify that the `rpc-gssd` service is now running:
61 |
62 | ----
63 | sudo systemctl status rpc-gssd.service
64 | ----
65 |
66 | ==== Disabling Unused Services
67 | The `rpcbind` service will be running (and listening on some ports),
68 | even though it is not needed for NFSv4.
69 | To disable it do:
70 |
71 | ----
72 | sudo systemctl stop rpcbind.service rpcbind.socket
73 | sudo systemctl disable rpcbind.service rpcbind.socket
74 | sudo systemctl mask rpcbind.service rpcbind.socket
75 | ----
76 |
77 | Reboot the system and verify that the `rpcbind` service is not running anymore.
78 |
79 | ==== Creating NFS Share Group
80 | Create the same `{SB_NFS_GROUP}` group on the client machine, and add your user into it:
81 |
82 | [subs="attributes+"]
83 | ----
84 | sudo groupadd {SB_NFS_GROUP}
85 | sudo usermod -a -G {SB_NFS_GROUP} {SB_USER}
86 | ----
87 |
88 | NOTE: There is no need to synchronize UID/GID for the user and group between server and client,
89 | because ID mapping process in NFSv4 is name based, rather than UID/GID based.
90 | It is important to make sure that the user and group have the same names on server and client.
91 |
92 | ==== Performing Test Mount
93 | Create the `/mnt/nfs` directory:
94 |
95 | ----
96 | sudo mkdir /mnt/nfs
97 | ----
98 |
99 | To test that everything works, perform a test NFS mount with:
100 |
101 | [subs="attributes+"]
102 | ----
103 | sudo mount -t nfs4 -o proto=tcp,port=2049,sec=krb5 silverbox.{SB_INTERNAL_DOMAIN}:/ /mnt/nfs -vvvv
104 | ----
105 |
106 | The output should look something like this:
107 |
108 | ----
109 | mount.nfs4: timeout set for ...
110 | mount.nfs4: trying text-based options 'proto=tcp,port=2049,sec=krb5,vers=4.2,addr=xxx.xxx.xxx.xxx,clientaddr=xxx.xxx.xxx.xxx'
111 | ----
112 |
113 | Try accessing the mount as root user by doing `sudo ls /mnt/nfs`.
114 | You should see `Permission denied` message as the root
115 | user is mapped to `nobody:nogroup` and don't have permissions to access the share.
116 |
117 | Now try accessing the share as your user by doing `ls /mnt/nfs`.
118 | You should see either `Permission denied` or `Stale file handle` message, because your user don't have Kerberos ticket.
119 |
120 | Finally, do `kinit` to obtain the Kerberos ticket for your user and try accessing share again. It should work now.
121 |
122 | TIP: It is worth at this point to do some testing by creating files to make sure ID mapping works properly (both ways)
123 | and user/group ownership is assigned correctly.
124 |
125 | ==== Automatic NFS Share Mount
126 | This section describes how to setup automatic mounting of the NFS share on your user login (without any interaction).
127 |
128 | The first step is to configure an automatic mount of the NFS share on boot.
129 | To do this, append the following line to the `/etc/fstab` file (on the client PC):
130 |
131 | ./etc/fstab
132 | [subs="attributes+"]
133 | ----
134 | silverbox.{SB_INTERNAL_DOMAIN}:/ /mnt/nfs nfs4 proto=tcp,port=2049,sec=krb5,lazytime,auto,_netdev,x-gvfs-show 0 0 # <1>
135 | ----
136 | <1> Replace `silverbox.{SB_INTERNAL_DOMAIN}` with the actual server FQDN.
137 |
138 | NOTE: The `x-gvfs-show` option will make NFS share to appear in Nautilus file manager panel automatically.
139 | If you are not using Nautilus you can remove this option.
140 |
141 | NOTE: If you prefer NFS share to be mounted only on first access, change `auto` parameter to `noauto`
142 | and add `x-systemd.automount` parameter (for additional options, refer to systemd.mount documentation).
143 |
144 | With this change in the `fstab`, the share will be mounted on boot using credentials from the `/etc/krb5.keytab` file.
145 | However, since this keytab only contains machine key, it won't allow any access to the content of the share.
146 |
147 | The next step is to export your user's Kerberos key into a separate keytab file,
148 | and create a user Systemd service which will do `kinit` for your user automatically on login.
149 | Since this `kinit` will use key from the user's keytab file, no interaction (such as entering password)
150 | will be required.
151 |
152 | NOTE: Another (and, perhaps, a more convenient) way to automatically do Kerberos authentication on login
153 | is to use `pam-krb5` PAM module.
154 | If your Kerberos principal has the same password as your local user, you can install `pam-krb5`
155 | and add the following line (after line for regular auth) to appropriate configuration file
156 | under `/etc/pam.d` (depends on the distribution): `auth optional pam_krb5.so minimum_uid=1000 use_first_pass`.
157 |
158 | To export your principal's key, run the following commands on the server:
159 |
160 | [subs="attributes+"]
161 | ----
162 | sudo kadmin.local
163 | ktadd -k /root/krb5.keytab {SB_USER}
164 | ----
165 |
166 | Move the `/root/krb5.keytab` file from the server to the client PC, for example under your users home `.config`
167 | directory: `~/.config/krb5.keytab`.
168 |
169 | IMPORTANT: It is important to have either full disk encryption or at least user's home directory encryption,
170 | since the Kerberos principal key will be stored on disk.
171 |
172 | Change permission on this file so that only your user can read it:
173 |
174 | [subs="attributes+"]
175 | ----
176 | chown {SB_USER}:{SB_USER} ~/.config/krb5.keytab
177 | chmod 400 ~/.config/krb5.keytab
178 | ----
179 |
180 | Create directory (on the client PC) for user Systemd services, if it doesn't exist yet:
181 |
182 | ----
183 | mkdir -p ~/.local/share/systemd/user/
184 | ----
185 |
186 | Inside this directory, create `nfs-kinit.service` file with the following content:
187 |
188 | .~/.local/share/systemd/user/kinit.service
189 | ----
190 | [Unit]
191 | Description=Perform kinit automatically
192 |
193 | [Service]
194 | Type=oneshot
195 | ExecStart=/bin/bash -c "kinit -r 7d -k -t ~/.config/krb5.keytab $USER" # <1>
196 |
197 | [Install]
198 | WantedBy=default.target
199 | ----
200 | <1> Replace `7d` with the value of the `max_renewable_life` option that you set in the `kdc.conf` file on the server.
201 |
202 | Enable this service, so it will start automatically on login:
203 |
204 | ----
205 | systemctl --user daemon-reload
206 | systemctl --user enable kinit.service
207 | ----
208 |
209 | Reboot the system and verify that you can access the content of the NFS share.
210 |
211 | NOTE: Since the service only started on login, if the user session will last longer than `max_renewable_life`,
212 | the Kerberos ticket will eventually expire.
213 | If you planning on having long user sessions, you can either increase `max_renewable_life` or make this service
214 | run periodically to obtain a new ticket before old one expires.
215 |
216 | NOTE: If user's home directory is encrypted, the Systemd service won't start on login.
217 | It appears that user Systemd services are scanned before home directory is mounted and thus Systemd won't see
218 | the service.
219 | The only workaround I found for this is to add `systemctl --user daemon-reload` and
220 | `systemctl --user start kinit.service` commands to the script that runs after user login
221 | (it will depend on your system, but in Gnome it can be set with "`Startup Applications`").
222 |
223 |
--------------------------------------------------------------------------------
/nfs-server/nfs-server-configuration.adoc:
--------------------------------------------------------------------------------
1 | === NFS Server Configuration
2 | This section describes how to install and configure NFSv4 (and only version 4) server to share files in the LAN.
3 |
4 | ==== Installing NFS Server
5 | To install the NFS server do:
6 |
7 | ----
8 | sudo apt install nfs-kernel-server
9 | ----
10 |
11 | ==== Preparing NFS Share
12 | First, create a new group `{SB_NFS_GROUP}` that will be used to control the access to the NFS share contents.
13 | This is of course just one way to manage the share access and you don't have to do it exactly this way.
14 |
15 | [subs="attributes+"]
16 | ----
17 | sudo groupadd {SB_NFS_GROUP}
18 | ----
19 |
20 | Members of this group will be able to access NFS share directory (and subdirectories, where permitted) on the server.
21 |
22 | Add your user to this group:
23 |
24 | [subs="attributes+"]
25 | ----
26 | sudo usermod -a -G {SB_NFS_GROUP} {SB_USER}
27 | ----
28 |
29 | NOTE: You'll need to re-login or start a new session to apply group membership.
30 |
31 | Create the `/srv/nfs` directory, set its group ownership to `{SB_NFS_GROUP}` and set following access mask:
32 |
33 | [subs="attributes+"]
34 | ----
35 | sudo mkdir -p /srv/nfs
36 | sudo chown root:{SB_NFS_GROUP} /srv/nfs
37 | sudo chmod 770 /srv/nfs
38 | ----
39 |
40 | Contents of the `/srv/nfs` directory will be shared using NFS server.
41 | To share other directories you can bind-mount them under the `/srv/nfs`.
42 |
43 | Next, create the `/etc/exports` file with the following content:
44 |
45 | ./etc/exports
46 | [subs="attributes+"]
47 | ----
48 | /srv/nfs {SB_SUBNET}(rw,sync,crossmnt,no_subtree_check,root_squash,fsid=0,sec=krb5) # <1>
49 | ----
50 | <1> Replace `{SB_SUBNET}` with the actual LAN subnet.
51 |
52 | NOTE: This configuration uses `sec=krb5` parameter, which will use Kerberos for authentication only.
53 | Other possible options are: `sec=krb5i` for authentication and integrity validation,
54 | `sec=krb5p` for authentication, integrity validation and encryption.
55 | However, encryption may add performance penalty and may be unnecessary in certain scenarios.
56 |
57 | ==== Fixing `nfsdcltrack` Issue
58 | Restart the NFS server:
59 |
60 | ----
61 | sudo systemctl restart nfs-server
62 | ----
63 |
64 | Most likely you'll find the following message in the `/var/log/syslog`:
65 |
66 | ----
67 | nfsdcltrack Failed to init database: -13
68 | ----
69 |
70 | This is due to the bug in the `nfsdcltrack` which will hopefully be fixed in the future.
71 |
72 | The workaround is to initialize `nfsdcltrack` database manually:
73 |
74 | ----
75 | sudo mkdir -p /var/lib/nfs/nfsdcltrack
76 | sudo nfsdcltrack init
77 | ----
78 |
79 | Restart the NFS server again and make sure the error is now gone from the logs.
80 |
81 | ==== Configuring NFS Protocol Versions
82 | First, check the output of:
83 |
84 | ----
85 | sudo cat /proc/fs/nfsd/versions
86 | ----
87 |
88 | Most likely it will be: `-2 +3 +4 +4.1 +4.2`.
89 | This shows while NFSv4 is enabled, NFSv3 is enabled as well.
90 |
91 | To disable all NFS versions except 4 (and also to enable `svcgssd` needed for Kerberos),
92 | edit the `/etc/default/nfs-kernel-server` file and change/add the following lines:
93 |
94 | ./etc/default/nfs-kernel-server
95 | ----
96 | RPCMOUNTDOPTS="--manage-gids -N2 -N3 -V4 --no-udp"
97 | NEED_SVCGSSD="yes"
98 | RPCNFSDOPTS="-N2 -N3 -V4 --no-udp"
99 | ----
100 |
101 | Restart the NFS server with `sudo systemctl restart nfs-server`
102 | and check the output of `sudo cat /proc/fs/nfsd/versions` again.
103 | Now it should be `-2 -3 +4 +4.1 +4.2` indicating that only NFSv4 is now enabled.
104 |
105 | ==== Disabling Unused Services
106 | The `rpcbind` and `rpc-gssd` services will be running (and even listening on some ports),
107 | even though they are not needed for pure NFSv4 server.
108 | To disable them, run the following commands:
109 |
110 | ----
111 | sudo systemctl stop {rpcbind,rpc-gssd}.service rpcbind.socket
112 | sudo systemctl disable {rpcbind,rpc-gssd}.service rpcbind.socket
113 | sudo systemctl mask {rpcbind,rpc-gssd}.service rpcbind.socket
114 | ----
115 |
116 | Reboot the system and verify that `rpcbind` and `rpc-gssd` are not running.
117 |
118 | ==== Checking for Listening Ports
119 | At this point all legacy services (services not related to NFSv4) should be disabled
120 | and only NFS kernel server should be listening only on TCP port 2049.
121 | Verify this by checking the output of:
122 |
123 | ----
124 | sudo netstat -lnptu
125 | ----
126 |
127 | NOTE: Most likely process name won't be shown for the NFS server as socket is opened from the kernel.
128 |
129 | ==== Adding Firewall Rule
130 | Add firewall rule to allow NFS traffic from the LAN:
131 |
132 | [subs="attributes+"]
133 | ----
134 | sudo ufw allow proto tcp from {SB_SUBNET} to any port 2049 comment 'NFSv4 Server' # <1>
135 | ----
136 | <1> Replace `{SB_SUBNET}` with the actual LAN subnet.
137 |
138 | ==== Enabling User ID Mapping
139 | IMPORTANT: It is not clear whether these steps are really required or not,
140 | as it seems like the ID translation works even without these module parameters.
141 |
142 | Create the `/etc/modprobe.d/nfsd.conf` file with the following content:
143 |
144 | ./etc/modprobe.d/nfsd.conf
145 | ----
146 | options nfsd nfs4_disable_idmapping=0
147 | ----
148 |
149 | Reboot the system, and verify that ID mapping is *not* disabled by executing:
150 |
151 | ----
152 | cat /sys/module/nfsd/parameters/nfs4_disable_idmapping
153 | ----
154 |
155 | Which should return `N`.
156 |
157 | ==== Creating Kerberos Principal
158 | Run `sudo kadmin.local` and add NFS service principal:
159 |
160 | [subs="attributes+"]
161 | ----
162 | addprinc -randkey nfs/silverbox.{SB_INTERNAL_DOMAIN} # <1>
163 | ktadd nfs/silverbox.{SB_INTERNAL_DOMAIN} # <2>
164 | ----
165 | <1> Replace `{SB_INTERNAL_DOMAIN}` with the actual internal domain name.
166 | <2> Same as above.
167 |
168 | This will create keytab file (in the default location `/etc/krb5.keytab`)
169 | containing principal's key and add principal to the Kerberos database.
170 |
171 | Creation of default keytab file should trigger `rpc-svcgssd` service.
172 | Reboot the server and verify that `rpc-svcgssd` service is now automatically started (is enabled and active):
173 |
174 | ----
175 | sudo systemctl status rpc-svcgssd.service
176 | ----
177 |
178 |
--------------------------------------------------------------------------------
/nfs-server/nfs-server.adoc:
--------------------------------------------------------------------------------
1 | [[nfs_server]]
2 | == NFS Server
3 | This section describes how to install and configure NFS (version 4 only) server with Kerberos authentication
4 | (and optional encryption and integrity validation).
5 | It also provides an example configuration for an automatic NFS share mounting on the client PC.
6 |
7 | This section assumes that DNS server and domain name have been configured as described in the
8 | <> and <> sections.
9 |
10 | include::domain.adoc[]
11 |
12 | include::kerberos.adoc[]
13 |
14 | include::nfs-server-configuration.adoc[]
15 |
16 | include::nfs-client-configuration.adoc[]
17 |
18 |
--------------------------------------------------------------------------------
/parameters.adoc:
--------------------------------------------------------------------------------
1 | // Local IP address of the server. Example: 192.168.1.100
2 | // To use specific IP address instead of the placeholder, replace the next line like so:
3 | // :SB_IP: 192.168.1.100
4 | :SB_IP: \{SERVER_IP_ADDR}
5 | // Subnet of the LAN where the server is deployed. Example: 192.168.1.0/24
6 | :SB_SUBNET: \{SERVER_SUBNET}
7 | // Default gateway address. Example: 192.168.1.1
8 | :SB_GW: \{SERVER_DEFAULT_GATEWAY}
9 | // Your user on the server. Example: john
10 | :SB_USER: \{SERVER_USER}
11 | // Email address to which deliver emails from the server. Example: john@somewhere.com
12 | :SB_EMAIL: \{YOUR_EMAIL_ADDR}
13 | // SMTP server address. Example: smtp.somewhere.com
14 | :SB_SMTP_ADDR: \{SMTP_SERVER_ADDR}
15 | // SMTP server port. Example: 25
16 | :SB_SMTP_PORT: \{SMTP_SERVER_PORT}
17 | // VPN Docker network. Example: 172.18.0.0/24
18 | :SB_VPN_SUBNET: \{DOCKER_VPN_NETWORK}
19 | // Port number on which VPN proxy container will listen for connections. Example: 12345
20 | :SB_VPN_PROXY_PORT: \{VPN_PROXY_PORT}
21 | // Desired IP address of the VPN proxy container (from the {DOCKER_VPN_NETWORK}). Example: 172.18.0.10
22 | :SB_VPN_PROXY_ADDR: \{VPN_PROXY_ADDR}
23 | // Domain name. Example: example.com
24 | :SB_DOMAIN_NAME: \{DOMAIN_NAME}
25 | // Server's FQDN. Example: silverbox.example.com
26 | :SB_SUBDOMAIN: \{SERVER_SUBDOMAIN}
27 | // Nextcloud FQDN. Example: nextcloud.silverbox.example.com
28 | :SB_NEXTCLOUD_DOMAIN: \{NEXTCLOUD_DOMAIN}
29 | // Common Docker network. Example: 172.19.0.0/24
30 | :SB_COMMON_SUBNET: \{DOCKER_COMMON_NETWORK}
31 | // Internal domain. Example: home.example.com
32 | :SB_INTERNAL_DOMAIN: \{INTERNAL_DOMAIN}
33 | // NFS share group. Example: nfsgroup
34 | :SB_NFS_GROUP: \{NFS_SHARE_GROUP}
35 | // Port number on which Transmission web UI will listen for connections. Example: 12345
36 | :SB_TRANSMISSION_PORT: \{TRANSMISSION_UI_PORT}
37 | // Desired IP address of the Transmission container (from the {DOCKER_VPN_NETWORK}). Example: 172.18.0.20
38 | :SB_TRANSMISSION_ADDR: \{TRANSMISSION_ADDR}
39 | // Nextcloud Docker network. Example: 172.18.0.0/24
40 | :SB_NEXTCLOUD_SUBNET: \{DOCKER_NEXTCLOUD_NETWORK}
41 | // Port number on which Nextcloud web server container fill listen for connections. Example: 12345
42 | :SB_NEXTCLOUD_PORT: \{NEXTCLOUD_PORT}
43 | // Group that allows reading from Git repositories.
44 | :SB_GIT_GROUP: gitusers
45 | // Account that allows read/write access to Git repositories.
46 | :SB_GIT_USER: git
47 | // UID of the www-data user inside Apache httpd, Nextcloud FPM and Firefly III containers
48 | :SB_WWW_DATA_UID: 33
49 | // GID of the www-data user inside Apache httpd, Nextcloud FPM and Firefly III containers
50 | :SB_WWW_DATA_GID: 33
51 | // Firefly III Docker network. Example: 172.18.0.0/24
52 | :SB_FIREFLY_SUBNET: \{DOCKER_FIREFLY_NETWORK}
53 | // Port number on which Firefly III web UI will listen for connections. Example: 12345
54 | :SB_FIREFLY_PORT: \{FIREFLY_UI_PORT}
55 |
56 | // Attributes for latest Docker images versions (for convenience)
57 |
58 | // Debian Linux (slim)
59 | :SB_DEBIAN_VERSION: 11.5-slim
60 | // PostgreSQL
61 | :SB_POSTGRES_VERSION: 14.5
62 | // Nextcloud FPM
63 | :SB_NEXTCLOUD_FPM_VERSION: 24.0.6-fpm
64 | // HTTPD
65 | :SB_HTTPD_VERSION: 2.4.54
66 | // Firefly III
67 | :SB_FIREFLY_VERSION: version-5.5.13
68 |
69 |
--------------------------------------------------------------------------------
/preamble.adoc:
--------------------------------------------------------------------------------
1 | // Logo hosted in GitHub issues
2 | image::https://user-images.githubusercontent.com/693072/70379633-1406bd80-18fd-11ea-82d6-209c2bb80f24.png[Logo,align=center]
3 |
4 | This document describes my setup for a simple home server running GNU/Linux (Ubuntu 18.04).
5 | The server is based on Intel NUC hardware and runs DNS server, NFS server, Nextcloud, SOCKS5 over VPN proxy server,
6 | Transmission (over VPN), Git server and some other services (see <> for detailed list).
7 |
8 | The document is always updated to reflect the current state of the software. The revision history can be found in changelog <>.
9 |
10 | The document is hosted via GitHub Pages and the source is available at https://github.com/ovk/silverbox.
11 |
12 |
--------------------------------------------------------------------------------
/references.adoc:
--------------------------------------------------------------------------------
1 | [bibliography]
2 | == References
3 |
4 | - [[[changelog, 1]]] "`Silverbox Changelog`": https://github.com/ovk/silverbox/blob/master/CHANGELOG
5 | - [[[nut_hcl, 2]]] "`Network UPS Tools: Hardware compatibility list`": https://networkupstools.org/stable-hcl.html
6 | - [[[arch_wiki_ssd, 3]]] "`Arch Wiki: Solid state drive`": https://wiki.archlinux.org/index.php/Solid_state_drive
7 | - [[[ssh_audit, 4]]] "`ssh-audit script`": https://github.com/arthepsy/ssh-audit
8 | - [[[arch_wiki_hddtemp, 5]]] "`Arch Wiki: Hddtemp`": https://wiki.archlinux.org/index.php/Hddtemp
9 | - [[[unbound, 6]]] "`Unbound DNS Server`": https://nlnetlabs.nl/projects/unbound
10 | - [[[cloudflare_dns, 7]]] "`Cloudflare DNS Server`": https://www.cloudflare.com/learning/dns/what-is-1.1.1.1
11 | - [[[docker_compose, 8]]] "`Docker Compose`": https://docs.docker.com/compose
12 | - [[[name_silo, 9]]] "`NameSilo`": https://namesilo.com
13 | - [[[transmission, 10]]] "`Transmission`": https://transmissionbt.com
14 | - [[[nextcloud, 11]]] "`Nextcloud`": https://nextcloud.com
15 | - [[[lets_encrypt, 12]]] "`Let's Encrypt`": https://letsencrypt.org
16 | - [[[certbot, 13]]] "`Certbot`": https://certbot.eff.org
17 | - [[[borgbackup, 14]]] "`Borg Backup`": https://www.borgbackup.org
18 | - [[[ovh_object_storage, 15]]] "`OVH Object Storage`": https://www.ovh.com/world/public-cloud/storage/object-storage
19 | - [[[rclone, 16]]] "`Rclone`": https://rclone.org
20 | - [[[watchtower, 17]]] "`Watchtower`": https://containrrr.github.io/watchtower
21 | - [[[git_server, 18]]] "`Git Book: Git Server`": https://git-scm.com/book/en/v2/Git-on-the-Server-Setting-Up-the-Server
22 | - [[[htpasswd, 19]]] "`htpasswd`": https://httpd.apache.org/docs/2.4/programs/htpasswd.html
23 | - [[[firefly, 20]]] "`Firefly III`": https://www.firefly-iii.org
24 |
25 |
--------------------------------------------------------------------------------
/reverse-proxy/certificate.adoc:
--------------------------------------------------------------------------------
1 | === Certificate
2 | This section describes how to obtain and maintain wildcard certificate that the reverse proxy will use to setup HTTPS for all services
3 | (hence the need for wildcard and not just regular certificate).
4 |
5 | The process is very similar to what was described in the <> section for Nextcloud and it relies on that section being done.
6 |
7 | ==== Preparing Domain Name
8 | It's assumed that the wildcard certificate will be obtained on your internal domain name `{SB_INTERNAL_DOMAIN}`.
9 | If this is not the case, adjust instructions below accordingly.
10 |
11 | The individual services will be sub-domains of the `{SB_INTERNAL_DOMAIN}`.
12 | For example, if your `{SB_INTERNAL_DOMAIN}` is `home.example.com` then service addresses will look like `service-name.home.example.com`.
13 |
14 | Since it's intended that the services will be only accessible from the internal network,
15 | there is no need to create any public A or CNAME records for the `{SB_INTERNAL_DOMAIN}`.
16 |
17 | Only the TXT DNS record for the `_acme-challenge.{SB_INTERNAL_DOMAIN}` with any value
18 | (the value is just a placeholder and will be replaced with the actual DNS ACME challenge during
19 | domain validation) needs to be created.
20 |
21 | ==== Preparing Certbot Scripts
22 | Create the `/etc/letsencrypt/renewal-hooks/post/reverse-proxy-restart.sh` file with the following content:
23 |
24 | ./etc/letsencrypt/renewal-hooks/post/reverse-proxy-restart.sh
25 | [source,bash,subs="attributes+"]
26 | ----
27 | #!/bin/bash
28 | if [ "$CERTBOT_DOMAIN" = "{SB_INTERNAL_DOMAIN}" ]; then # <1>
29 | echo "Restarting reverse proxy server"
30 | docker compose -f /root/silverbox/containers/reverse-proxy/docker-compose.yml restart proxy
31 | else
32 | echo "Skipping reverse proxy server restart - different domain: $CERTBOT_DOMAIN"
33 | fi
34 | ----
35 | <1> Replace `{SB_INTERNAL_DOMAIN}` with the actual domain name.
36 |
37 | This script will be executed automatically by Certbot during the renewal of a certificate,
38 | and if the renewal is for the reverse proxy certificate it will restart reverse proxy server
39 | to use the new certificate.
40 |
41 | And assign the following permissions to this file:
42 |
43 | ----
44 | sudo chmod 770 /etc/letsencrypt/renewal-hooks/post/reverse-proxy-restart.sh
45 | ----
46 |
47 | ==== Test Certificate
48 | To test that domain validation and certificate renewal works, it is possible to use Let's Encrypt test server
49 | to generate test (not trusted) certificate.
50 |
51 | To get test certificate run:
52 |
53 | [subs="attributes+"]
54 | ----
55 | sudo certbot certonly --test-cert \
56 | --agree-tos \
57 | -m {SB_EMAIL} \ # <1>
58 | --manual \
59 | --preferred-challenges=dns \
60 | --manual-auth-hook /root/silverbox/certbot/dns-challenge-auth.sh \
61 | --manual-cleanup-hook /root/silverbox/certbot/dns-challenge-cleanup.sh \
62 | --must-staple \
63 | -d *.{SB_INTERNAL_DOMAIN} # <2>
64 | ----
65 | <1> Replace `{SB_EMAIL}` with the email address you wish to use for certificate generation.
66 | <2> Replace `{SB_INTERNAL_DOMAIN}` with the actual domain name. The `*.` before domain name is significant - it is needed to request wildcard certificate.
67 |
68 | NOTE: This may take a while.
69 |
70 | To view information about the generated certificate:
71 |
72 | ----
73 | sudo certbot certificates
74 | ----
75 |
76 | To test certificate renewal:
77 |
78 | [subs="attributes+"]
79 | ----
80 | sudo certbot renew --test-cert --dry-run --cert-name {SB_INTERNAL_DOMAIN}
81 | ----
82 |
83 | To revoke and delete the test certificate:
84 |
85 | [subs="attributes+"]
86 | ----
87 | sudo certbot revoke --test-cert --cert-name {SB_INTERNAL_DOMAIN}
88 | ----
89 |
90 | ==== Getting Real Certificate
91 | To get the real certificate run:
92 |
93 | [subs="attributes+"]
94 | ----
95 | sudo certbot certonly \
96 | --agree-tos \
97 | -m {SB_EMAIL} \ # <1>
98 | --manual \
99 | --preferred-challenges=dns \
100 | --manual-auth-hook /root/silverbox/certbot/dns-challenge-auth.sh \
101 | --manual-cleanup-hook /root/silverbox/certbot/dns-challenge-cleanup.sh \
102 | --must-staple \
103 | -d *.{SB_INTERNAL_DOMAIN} # <2>
104 | ----
105 | <1> Replace `{SB_EMAIL}` with the email address you wish to use for certificate generation.
106 | <2> Replace `{SB_INTERNAL_DOMAIN}` with the actual domain name. The `*.` before domain name is significant - it is needed to request wildcard certificate.
107 |
108 | ==== Automatic Certificate Renewal
109 | The certificate should be automatically renewed by the Certbot's Systemd service.
110 | The service should run automatically triggered by the corresponding timer.
111 | To check the status of the timer:
112 |
113 | ----
114 | systemctl status certbot.timer
115 | ----
116 |
117 |
--------------------------------------------------------------------------------
/reverse-proxy/installation.adoc:
--------------------------------------------------------------------------------
1 | === Installation
2 | This section describes how to install and run the reverse proxy server.
3 |
4 | ==== Preparing Container
5 | Create a directory for the reverse proxy container files:
6 |
7 | ----
8 | sudo mkdir /root/silverbox/containers/reverse-proxy
9 | sudo chmod 700 /root/silverbox/containers/reverse-proxy
10 | ----
11 |
12 | Inside it, create the `docker-compose.yml` file with the following content:
13 |
14 | ./root/silverbox/containers/reverse-proxy/docker-compose.yml
15 | [source,yaml,subs="attributes+"]
16 | ----
17 | version: '3.8'
18 |
19 | services:
20 | proxy:
21 | container_name: reverse-proxy
22 | image: 'httpd:{SB_HTTPD_VERSION}' # <1>
23 | restart: on-failure:5
24 | network_mode: host # <2>
25 | logging:
26 | driver: json-file
27 | options:
28 | max-size: 10mb
29 | max-file: '3'
30 | volumes:
31 | - /etc/letsencrypt/live/{SB_INTERNAL_DOMAIN}:/certs/live/{SB_INTERNAL_DOMAIN}:ro # <3>
32 | - /etc/letsencrypt/archive/{SB_INTERNAL_DOMAIN}:/certs/archive/{SB_INTERNAL_DOMAIN}:ro # <4>
33 | - ./httpd.conf:/usr/local/apache2/conf/httpd.conf:ro
34 | - ./htpasswd:/usr/local/apache2/.htpasswd:ro
35 | ----
36 | <1> Replace `{SB_HTTPD_VERSION}` with the actual latest `httpd` (Debian based) image version (can be checked at the Docker Hub).
37 | <2> This puts container on the host network, rather than creating bridge network.
38 | While it may be not perfect from the isolation standpoint, it makes it very easy to proxy traffic to different services,
39 | regardless of what interface they are listening on.
40 | <3> Replace `{SB_INTERNAL_DOMAIN}` with the actual domain name.
41 | <4> Same as above.
42 |
43 | ===== Configuring HTTPD
44 | Create the `/root/silverbox/containers/reverse-proxy/httpd.conf` file with the following content:
45 |
46 | ./root/silverbox/containers/reverse-proxy/httpd.conf
47 | [source,apache,subs="attributes+"]
48 | ----
49 | Listen 443
50 |
51 | LoadModule mpm_event_module modules/mod_mpm_event.so
52 | LoadModule authn_core_module modules/mod_authn_core.so
53 | LoadModule authz_core_module modules/mod_authz_core.so
54 | LoadModule authz_host_module modules/mod_authz_host.so
55 | LoadModule authz_user_module modules/mod_authz_user.so
56 | LoadModule auth_basic_module modules/mod_auth_basic.so
57 | LoadModule authn_file_module modules/mod_authn_file.so
58 | LoadModule mime_module modules/mod_mime.so
59 | LoadModule log_config_module modules/mod_log_config.so
60 | LoadModule env_module modules/mod_env.so
61 | LoadModule headers_module modules/mod_headers.so
62 | LoadModule proxy_module modules/mod_proxy.so
63 | LoadModule proxy_http_module modules/mod_proxy_http.so
64 | LoadModule unixd_module modules/mod_unixd.so
65 | LoadModule socache_shmcb_module modules/mod_socache_shmcb.so
66 | LoadModule ssl_module modules/mod_ssl.so
67 | LoadModule http2_module modules/mod_http2.so
68 |
69 | User www-data
70 | Group www-data
71 |
72 | Protocols h2 http/1.1
73 |
74 | SSLEngine On
75 | SSLCipherSuite EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH
76 | SSLHonorCipherOrder On
77 | SSLProtocol -all +TLSv1.3 +TLSv1.2
78 | SSLUseStapling on
79 | SSLStaplingCache "shmcb:/usr/local/apache2/logs/ssl_stapling(128000)"
80 | SSLSessionTickets Off
81 | SSLSessionCache "shmcb:/usr/local/apache2/logs/ssl_scache(512000)"
82 | SSLSessionCacheTimeout 300
83 | SSLCertificateFile /certs/live/{SB_INTERNAL_DOMAIN}/fullchain.pem # <1>
84 | SSLCertificateKeyFile /certs/live/{SB_INTERNAL_DOMAIN}/privkey.pem
85 |
86 |
87 | AllowOverride none
88 | Require all denied
89 |
90 |
91 | DocumentRoot "/usr/local/apache2/htdocs"
92 |
93 | Header always set Strict-Transport-Security "max-age=15552000; includeSubDomains; preload" # <2>
94 | Header always set X-Frame-Options "DENY"
95 | Header always set X-Content-Type-Options "nosniff"
96 | Header always set X-XSS-Protection "1; mode=block"
97 |
98 | RequestHeader set X-Forwarded-Proto "https"
99 |
100 | # Monit
101 |
102 | ServerName monit.{SB_INTERNAL_DOMAIN} # <3>
103 | ProxyPass "/" "http://127.0.0.1:\{MONIT_PORT}/" # <4>
104 | ProxyPassReverse "/" "http://127.0.0.1:\{MONIT_PORT}/"
105 |
106 | Authtype Basic
107 | Authname "Authentication required"
108 | AuthUserFile /usr/local/apache2/.htpasswd
109 | Require valid-user
110 |
111 |
112 |
113 | # Transmission
114 |
115 | ServerName transmission.{SB_INTERNAL_DOMAIN} # <5>
116 | ProxyPass "/" "http://127.0.0.1:{SB_TRANSMISSION_PORT}/" # <6>
117 | ProxyPassReverse "/" "http://127.0.0.1:{SB_TRANSMISSION_PORT}/"
118 |
119 | Authtype Basic
120 | Authname "Authentication required"
121 | AuthUserFile /usr/local/apache2/.htpasswd
122 | Require valid-user
123 |
124 |
125 |
126 |
127 | Require all denied
128 |
129 |
130 | ErrorLog /proc/self/fd/2
131 | LogLevel warn
132 | LogFormat "%h %l %u %t \"%r\" %>s %b \"%\{Referer}i\" \"%\{User-Agent}i\"" combined
133 | LogFormat "%h %l %u %t \"%r\" %>s %b" common
134 | CustomLog /proc/self/fd/1 common env=!dont_log
135 |
136 | Include conf/extra/httpd-mpm.conf
137 |
138 | ServerTokens Prod
139 | TraceEnable off
140 | ----
141 | <1> Replace `{SB_INTERNAL_DOMAIN}` in this and next line with the actual value.
142 | <2> This and next three lines add some of the standard security-related headers for all proxied services.
143 | Feel free to customize this.
144 | <3> Replace `{SB_INTERNAL_DOMAIN}` with the actual value.
145 | <4> Replace `\{MONIT_PORT}` in this and next line with the actual port number you've chosen for Monit UI.
146 | <5> Replace `{SB_INTERNAL_DOMAIN}` with the actual value.
147 | <6> Replace `\{SB_TRANSMISSION_PORT}` in this and next line with the actual port number you've chosen for Transmission UI.
148 |
149 | If you want to add additional services to the proxy, it can be done in the similar manner, by adding `VirtualHost` block for each service.
150 |
151 | ===== Adding Users
152 | Install the `apache2-utils` package that contains `htpasswd` utility that is needed to generate file containing users and hashed passwords:
153 |
154 | ----
155 | sudo apt install apache2-utils
156 | ----
157 |
158 | Create the users database file, initially containing one user (you will be prompted for user's password):
159 |
160 | ----
161 | sudo htpasswd -B -c /root/silverbox/containers/reverse-proxy/htpasswd {USERNAME} # <1>
162 | ----
163 | <1> Replace `\{USERNAME}` with the actual desired username.
164 |
165 | To add more users, refer to `htpasswd` documentation <>.
166 |
167 | ==== Adding Firewall Rule
168 | To add Firewall rule to allow accessing the reverse proxy:
169 |
170 | [subs="attributes+"]
171 | ----
172 | sudo ufw allow proto tcp to any port 443 comment "Reverse proxy"
173 | ----
174 |
175 | ==== Configuring DNS
176 | This part assumes you have configured local DNS zone as described in <>.
177 |
178 | To add DNS records for the services that go through the reverse proxy edit the
179 | `/etc/unbound/unbound.conf.d/dns-config.conf` file and add `local-data` record
180 | pointing to the server IP `{SB_IP}` for each service you want to proxy.
181 |
182 | Below are example records for Monit and Transmission:
183 |
184 | ./etc/unbound/unbound.conf.d/dns-config.conf
185 | [source,yaml,subs="attributes+"]
186 | ----
187 | server:
188 | local-data: "monit.{SB_INTERNAL_DOMAIN}. IN A {SB_IP}" # <1>
189 | local-data: "transmission.{SB_INTERNAL_DOMAIN}. IN A {SB_IP}"
190 | ----
191 | <1> In this and the next line replace `{SB_INTERNAL_DOMAIN}` and `{SB_IP}` with the actual values.
192 |
193 | Restart the Unbound server to apply the changes:
194 |
195 | ----
196 | sudo systemctl restart unbound.service
197 | ----
198 |
199 | ==== Running Reverse Proxy Server
200 | To start the reverse proxy server do:
201 |
202 | ----
203 | sudo docker compose -f /root/silverbox/containers/reverse-proxy/docker-compose.yml up -d
204 | ----
205 |
206 | ==== Automatic Container Startup
207 | To start the reverse proxy container automatically on boot create the
208 | `/etc/systemd/system/reverse-proxy-start.service` file with the following content:
209 |
210 | ./etc/systemd/system/reverse-proxy-start.service
211 | ----
212 | [Unit]
213 | Description=Start Apache Reverse Proxy
214 | Requires=docker.service
215 | After=docker.service
216 |
217 | [Service]
218 | Type=oneshot
219 | ExecStart=/usr/bin/docker compose -f /root/silverbox/containers/reverse-proxy/docker-compose.yml up -d
220 |
221 | [Install]
222 | WantedBy=multi-user.target
223 | ----
224 |
225 | Enable the service, so that it will be started on system boot:
226 |
227 | ----
228 | sudo systemctl daemon-reload
229 | sudo systemctl enable reverse-proxy-start.service
230 | ----
231 |
232 |
--------------------------------------------------------------------------------
/reverse-proxy/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | To monitor the reverse proxy server with Monit create the `/etc/monit/conf.d/70-reverse-proxy` file with the following content:
3 |
4 | ./etc/monit/conf.d/70-reverse-proxy
5 | [subs="attributes+"]
6 | ----
7 | # Container status
8 | check program reverse_proxy with path "/usr/local/etc/monit/scripts/container_status.sh reverse-proxy .State.Status running"
9 | if status != 0 for 5 cycles then alert
10 |
11 | # HTTPS & Certificate check
12 | check host reverse_proxy_monit with address monit.{SB_INTERNAL_DOMAIN} every 5 cycles # <1>
13 | if failed port 443 protocol https request / status = 401 and certificate valid > 15 days for 2 cycles then alert # <2>
14 | ----
15 | <1> Replace `{SB_INTERNAL_DOMAIN}` with the actual value.
16 | In this example, Monit UI host is used to check the certificate.
17 | <2> Port 443 is used here, change it if you used different port for the proxy.
18 |
19 | Restart Monit and verify that the reverse proxy monitoring is working.
20 |
21 |
--------------------------------------------------------------------------------
/reverse-proxy/overview.adoc:
--------------------------------------------------------------------------------
1 | === Overview
2 | Below is a diagram with an overview how such reverse proxy could be deployed:
3 |
4 | ----
5 | Silverbox Server
6 | --------------------------------------------
7 | | -------- |
8 | monit.home.example.com ------\ | | Apache |---> Internal Monit Addr |
9 | transmission.home.example.com--[HTTPS, Basic Auth]------>| HTTP |---> Internal Transmission Addr|
10 | service-a.home.example.com --/ | | Server |---> Internal Service A Addr |
11 | | -------- |
12 | | | |
13 | | {/certs} |
14 | | | |
15 | | V |
16 | | /etc/letsencrypt - *.home.example.com |
17 | --------------------------------------------
18 | ----
19 |
20 | NOTE: In the diagram above, a path inside curly braces indicates a path as it seen inside Docker container,
21 | while path without curly braces indicates the real path on the host file system.
22 |
23 | This diagram assumes you have domain for your home services `home.example.com` and three services:
24 | Monit, Transmission and some hypothetical Service A (DNS names for these services need to be configured in Unbound).
25 |
26 | Apache web server serves as HTTPS to HTTP reverse proxy, while also (optionally) performing Basic HTTP Authentication.
27 | Wildcard Let's Encrypt certificate for `*.home.example.com` is used for HTTPS in this example, thus allowing easy addition of services without the need to get new certificates.
28 |
29 | This approach allows to have both authentication (using HTTP Basic Auth which is secure when used with HTTPS) and encryption (HTTPS).
30 | The basic auth can be turned off for some services if they offer strong built-in authentication.
31 |
32 | Additionally, the reverse proxy enables HTTP2 and adds some common security headers.
33 |
34 | This section is completely optional, as for many people who may only have one or two services which they rarely use it maybe not worth the effort of initial configuration.
35 | But if you plan on running many selfhosted services it may be very convenient to have such reverse proxy with easily expandable rules to add additional services.
36 |
37 |
--------------------------------------------------------------------------------
/reverse-proxy/reverse-proxy.adoc:
--------------------------------------------------------------------------------
1 | [[reverse_proxy]]
2 | == Reverse Proxy
3 | There are some services described in this guide (such as Monit Web UI or Transmission Web UI)
4 | that are internal and only intended to be accessible from inside the home (LAN) network.
5 | In addition, you may want to install some other services, not described in the guide, which should only be accessible from inside.
6 | As described in this guide, the suggested way of accessing such services securely (with authentication and encryption)
7 | is by establishing an SSH tunnel to each service first, and then accessing the service over the tunnel.
8 |
9 | While this approach works, it may become quite inconvenient to access internal services this way, especially for the larger number of services.
10 | This section describes an alternative approach: using internal reverse proxy to provide access to all internal services in secure way.
11 |
12 | include::overview.adoc[]
13 |
14 | include::certificate.adoc[]
15 |
16 | include::installation.adoc[]
17 |
18 | include::monitoring.adoc[]
19 |
20 |
--------------------------------------------------------------------------------
/server-overview/goals.adoc:
--------------------------------------------------------------------------------
1 | [[server_overview_goals]]
2 | === Goals
3 | Main goals that affected overall design of the server are:
4 |
5 | Simplicity::
6 | Where possible, this document prefers simple solutions, i.e. solutions that require less configuration,
7 | fewer components and minimize bloat. Simple solutions are often more secure (due to limited attack surface),
8 | easy to understand and maintain, easy to extend and use less resources.
9 | Stability::
10 | The server design is heavily focused on stability of configuration and setup.
11 | While there are many new "fancy" and "feature-rich" tools and programs that could be used,
12 | I find that vast majority of them have very short life span,
13 | minimal to no backward/forward compatibility and meaningful upgrade path, often very poor support and documentation,
14 | lack of security updates etc.
15 | Instead, the choices were made in favor of mature, stable, proven software, that doesn't break compatibility every
16 | minor release.
17 | Security::
18 | Major considerations were given to security when designing the server setup.
19 | While I think the solution in general is secure enough for home use, I still would not recommend to keep any
20 | sensitive information on the server.
21 | Low Maintenance::
22 | Another major focus was keeping the server maintenance as small as possible.
23 | This goes hand in hand with stability, and also relies on tools and automation to keep server maintenance as minimal
24 | as possible.
25 |
26 | It is important to mention that this server is *not* a:
27 |
28 | NAS::
29 | It is not intended to be used for safe storage of massive amounts of data (at least not in the described configuration).
30 | Media Server::
31 | It is not running anything related to the media server, no Kodi, not even X server.
32 | Although, turning it into a media server is definitely possible
33 | (I know of a person who added containerized Kodi on top of this guide and took advantage of NUCs infrared port).
34 | Proprietary Server::
35 | It is not running any proprietary, closed-source solutions, even if they are free (free as in beer).
36 | So you won't find anything like Plex or Mineraft server here, but you could definitely add these on your own if you wish.
37 |
38 |
--------------------------------------------------------------------------------
/server-overview/hardware.adoc:
--------------------------------------------------------------------------------
1 | [[hardware]]
2 | === Hardware
3 | Originally, the server was placed on a shelf inside a small apartment, so the main criteria for the hardware were
4 | low noise (i.e. no fans, HDD or other moving parts), nice design (so no open PCBs or wires)
5 | and reasonable power consumption for 24/7 operation.
6 |
7 | Below is the list of hardware that was originally used for the server (just for the reference).
8 |
9 | ==== Computer
10 | Intel NUC (Next Unit of Computing) BOXNUC6CAYH Barebone Systems.
11 |
12 | It uses Intel(R) Celeron(R) CPU J3455 (4 cores, up to 2.3 GHz).
13 |
14 | ==== Storage
15 | Main disk: WD Blue 500Gb Sata SSD.
16 |
17 | External backup disk: Samsung T5 500Gb Portable SSD (USB 3.1 Gen2).
18 |
19 | ==== Memory
20 | 8GB 1600mhz DDR3L SODIMM KVR16LS11/8.
21 |
22 | ==== UPS
23 | While strictly speaking this is not a part of the server, and technically not required, UPS is highly recommended.
24 |
25 | The UPS that was used: Eaton 3S US550VA 5-15P (8) 5-15R 4-UPS.
26 |
27 | TIP: When choosing UPS, make sure you buy one that has decent Linux support.
28 | Compatibility with Network UPS Tools can be checked on the NUT hardware compatibility list <>.
29 |
30 | ==== Additional Hardware
31 | You will also need a monitor, USB keyboard, patch cord and USB flash drive for the OS installation and initial configuration.
32 | Later on, you will probably want to have a separate keyboard attached to the server,
33 | to type LUKS password after reboots.
34 |
35 | ==== Power Consumption
36 | Since the server doesn't have power-hungry devices attached (such as HDDs or monitors), it is fairly low power.
37 | I didn't measure power consumption precisely, but based on the rate of UPS battery discharge
38 | my guess was around 8-12W at idle.
39 | However, as reported by a Reddit user with a similar hardware, the power consumption at idle is around 6W.
40 |
41 |
--------------------------------------------------------------------------------
/server-overview/high-level-overview.adoc:
--------------------------------------------------------------------------------
1 | === High Level Overview
2 | The diagram below shows the server place in the home network:
3 |
4 | ----
5 | LAN
6 | ---------------------------------
7 | -------- | ----------- |
8 | WAN | WiFi | | ----------- | Client PC | |
9 | <------->| Router |<----->| | Silverbox | ----------- |
10 | -------- | ----------- ----------- |
11 | - DHCP | - DNS | Client PC | |
12 | | - ... ----------- |
13 | | ----------- |
14 | | | ..... | |
15 | | ----------- |
16 | ---------------------------------
17 | ----
18 |
19 | The server is on the Local Area Network (together with regular clients such as PCs, smartphones etc.) and
20 | it acts as DNS server (apart from all the other services and programs it runs).
21 | It is separated from the internet by the router (and thus sits behind NAT).
22 |
23 | Of course, this is just one of the options (but probably one of the most common ones) and it can be adjusted to suit
24 | your needs.
25 |
26 | NOTE: In my case, all the clients are also Linux-based. This is not a requirement, and you may have clients running
27 | Windows, MacOS or other OS, but in such case client configuration will obviously be different.
28 | In some parts of this document it is assumed that your client is x86 64-bit PC running Desktop Ubuntu Linux 18.04.
29 |
30 |
--------------------------------------------------------------------------------
/server-overview/server-overview.adoc:
--------------------------------------------------------------------------------
1 | [[server_overview]]
2 | == Server Overview
3 |
4 | include::goals.adoc[]
5 |
6 | include::high-level-overview.adoc[]
7 |
8 | include::software-and-services.adoc[]
9 |
10 | include::hardware.adoc[]
11 |
--------------------------------------------------------------------------------
/server-overview/software-and-services.adoc:
--------------------------------------------------------------------------------
1 | === Software and Services
2 | This is the list of services running on the server that are described in this document:
3 |
4 | - *Unbound* as a forwarding DNS server that forwards queries to the DNS server of your choice and uses DNS-over-TLS and DNSSEC for
5 | extra security and privacy.
6 | - *NFS server* secured with Kerberos (clean NFSv4-only server).
7 | - *Nextcloud* accessible over HTTPS with Let's Encrypt certificates (renewed automatically using Certbot with DNS challenge).
8 | - *Transmission* BitTorent client that communicates only over a VPN connection.
9 | - *SOCKS5 proxy server* that proxies traffic securely over a VPN connection.
10 | - *Git server* for hosting Git repositories.
11 | - *Borg and Rclone* for automatic encrypted incremental backups (both on-site and off-site).
12 | - *Reverse proxy server* with HTTPS (using wildcard certificate) and basic authentication to access internal services.
13 | - *Firefly III* for personal finances management.
14 | - *Monit* for system monitoring and notifications.
15 | - Script to automatically update DNS record pointing to server's public IP address (in case of dynamic IP).
16 |
17 | The server also runs:
18 |
19 | - SSH server.
20 | - Docker engine (as most of the workloads are run as containers).
21 |
22 |
--------------------------------------------------------------------------------
/silverbox-server.adoc:
--------------------------------------------------------------------------------
1 | = Silverbox: GNU/Linux Home Server
2 | ovk
3 | v1.3.4, October 29, 2022
4 | :homepage: https://github.com/ovk/silverbox
5 | :doctype: article
6 | :reproducible:
7 | :sectnums:
8 | :prewrap!:
9 | :attribute-missing: warn
10 | :source-highlighter: rouge
11 | :icons: font
12 | :toc: left
13 |
14 | include::parameters.adoc[]
15 |
16 | include::preamble.adoc[]
17 |
18 | include::introduction/introduction.adoc[]
19 |
20 | include::server-overview/server-overview.adoc[]
21 |
22 | include::basic-configuration/basic-configuration.adoc[]
23 |
24 | include::monitoring/monitoring.adoc[]
25 |
26 | include::dns-server/dns-server.adoc[]
27 |
28 | include::docker/docker.adoc[]
29 |
30 | include::socks5-over-vpn/socks5-over-vpn.adoc[]
31 |
32 | include::domain-name/domain-name.adoc[]
33 |
34 | include::nfs-server/nfs-server.adoc[]
35 |
36 | include::transmission/transmission.adoc[]
37 |
38 | include::nextcloud/nextcloud.adoc[]
39 |
40 | include::git-server/git-server.adoc[]
41 |
42 | include::reverse-proxy/reverse-proxy.adoc[]
43 |
44 | include::firefly/firefly.adoc[]
45 |
46 | include::backup/backup.adoc[]
47 |
48 | include::maintenance/maintenance.adoc[]
49 |
50 | include::references.adoc[]
51 |
52 |
--------------------------------------------------------------------------------
/socks5-over-vpn/client-configuration.adoc:
--------------------------------------------------------------------------------
1 | === Client Configuration
2 | On the client PC, create an entry in the `~/.ssh/config` file (to simplify tunnel creation):
3 |
4 | .~/.ssh/config
5 | [subs="attributes+"]
6 | ----
7 | host silverbox-proxy-tunnel
8 | HostName {SB_IP} # <1>
9 | Port {SB_VPN_PROXY_PORT} # <2>
10 | User proxytunnel
11 | IdentityFile ~/.ssh/silverbox-proxy-tunnel
12 | DynamicForward 127.0.0.1:{SB_VPN_PROXY_PORT} # <3>
13 | ----
14 | <1> Replace with your server address.
15 | <2> Replace with the VPN proxy port number.
16 | <3> Replace with the VPN proxy port number.
17 |
18 | Establish the tunnel manually for testing:
19 |
20 | ----
21 | ssh -N silverbox-proxy-tunnel
22 | ----
23 |
24 | To verify that proxy works over VPN, you can run the following commands and verify that returned IPs are different:
25 |
26 | [subs="attributes+"]
27 | ----
28 | curl -v -x socks5://127.0.0.1:{SB_VPN_PROXY_PORT} http://api.ipify.org?format=json
29 | curl -v http://api.ipify.org?format=json
30 | ----
31 |
32 | ==== Automatic Tunnel Creation
33 | This section describes how to establish tunnel automatically on user login into the Gnome session on Ubuntu 18.04.
34 |
35 | Create a script `vpn-proxy-tunnel.sh` somewhere with the following content:
36 |
37 | .vpn-proxy-tunnel.sh
38 | [source,bash]
39 | ----
40 | #!/bin/bash
41 |
42 | while true; do
43 | ssh -N silverbox-proxy-tunnel &>/dev/null
44 | notify-send --urgency=normal -i error "VPN proxy tunnel disconnected. Retrying in 20 seconds."
45 | sleep 20s
46 | done
47 | ----
48 |
49 | Mark it as executable:
50 |
51 | ----
52 | chmod a+x vpn-proxy-tunnel.sh
53 | ----
54 |
55 | Add this script to your client PC auto start.
56 | On Ubuntu 18.04 it can be added using the "`Startup Applications`" GUI tool.
57 |
58 | Reboot the client PC and verify that script is running and tunnel works.
59 |
60 | Now for all applications that you wish to use VPN (for example web browser),
61 | you can configure use of SOCKS5 proxy server `127.0.0.1:{SB_VPN_PROXY_PORT}`.
62 |
63 |
--------------------------------------------------------------------------------
/socks5-over-vpn/container.adoc:
--------------------------------------------------------------------------------
1 | === Container
2 | This section describes how to run the VPN proxy container.
3 |
4 | ==== Running Container
5 | To build the image and run the container do:
6 |
7 | ----
8 | sudo docker compose -f /root/silverbox/containers/vpn-proxy/docker-compose.yaml up -d
9 | ----
10 |
11 | NOTE: When you run container that exposes some ports to the host interface,
12 | by default Docker will automatically add netfilter rules to allow forwarding for these ports.
13 | That's why there's no need to add UFW rule for the proxy tunnel.
14 |
15 | When started this way, container will be automatically restarted in case of failure (up to 15 consecutive restarts).
16 |
17 | ==== Automatic Container Startup
18 | To start container automatically on boot create the `/etc/systemd/system/vpn-proxy-start.service` file
19 | with the following content:
20 |
21 | ./etc/systemd/system/vpn-proxy-start.service
22 | ----
23 | [Unit]
24 | Description=Start VPN proxy container
25 | Requires=docker.service
26 | After=docker.service
27 |
28 | [Service]
29 | Type=oneshot
30 | ExecStart=/usr/bin/docker start vpn-proxy
31 |
32 | [Install]
33 | WantedBy=multi-user.target
34 | ----
35 |
36 | Enable the service, so that it will be started on system boot:
37 |
38 | ----
39 | sudo systemctl daemon-reload
40 | sudo systemctl enable vpn-proxy-start.service
41 | ----
42 |
43 | ==== Automatic VPN Server Rotation
44 | The idea of VPN server rotation is to restart VPN proxy container periodically,
45 | so that every time it starts it will pick up new random VPN profile and thus switch to a new VPN server.
46 | This may be useful for privacy and security reasons, however, this step is optional.
47 |
48 | The rotation is achieved by creating a simple Systemd timer, that, when triggered, will restart VPN proxy container.
49 |
50 | Create the `/etc/systemd/system/vpn-proxy-restart.service` file with the following content:
51 |
52 | ./etc/systemd/system/vpn-proxy-restart.service
53 | ----
54 | [Unit]
55 | Description=Restart VPN proxy container
56 | Requires=docker.service
57 | After=docker.service
58 |
59 | [Service]
60 | Type=oneshot
61 | ExecStart=/usr/bin/docker restart vpn-proxy
62 | ----
63 |
64 | You can run the service once to verify that it works and restarts the container:
65 |
66 | ----
67 | sudo systemctl daemon-reload
68 | sudo systemctl start vpn-proxy-restart.service
69 | ----
70 |
71 | Netx, create the `/etc/systemd/system/vpn-proxy-restart.timer` file with the following content:
72 |
73 | ./etc/systemd/system/vpn-proxy-restart.timer
74 | ----
75 | [Unit]
76 | Description=Restart VPN proxy container
77 |
78 | [Timer]
79 | OnCalendar=*-*-* 01:00:00
80 | AccuracySec=1h
81 | Persistent=true
82 |
83 | [Install]
84 | WantedBy=timers.target
85 | ----
86 |
87 | In this configuration, the timer will be activated every day at 1am.
88 |
89 | Enable and start the timer:
90 |
91 | ----
92 | sudo systemctl daemon-reload
93 | sudo systemctl enable vpn-proxy-restart.timer
94 | sudo systemctl start vpn-proxy-restart.timer
95 | ----
96 |
97 | You can do `systemctl list-timers` to verify that the timer appears in the output
98 | and to check the time till next activation.
99 |
100 |
--------------------------------------------------------------------------------
/socks5-over-vpn/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | The status of the VPN proxy container is monitored by Monit.
3 |
4 | First, create the `/usr/local/etc/monit/scripts/container_status.sh` file with the following content:
5 |
6 | ./usr/local/etc/monit/scripts/container_status.sh
7 | [source,bash]
8 | ----
9 | #!/bin/sh
10 | STATUS=$(docker inspect --format="{{$2}}. Started: {{.State.StartedAt}}. Restarts: {{.RestartCount}}." "$1")
11 | echo $STATUS
12 | case "$STATUS" in
13 | "$3"*) exit 0 ;;
14 | *) exit 1 ;;
15 | esac
16 | ----
17 |
18 | And mark it as executable:
19 |
20 | ----
21 | sudo chmod u+x /usr/local/etc/monit/scripts/container_status.sh
22 | ----
23 |
24 | This script checks given Docker container status field and compares it against desired value.
25 |
26 | Next, create the `/etc/monit/conf.d/40-docker-vpn-proxy` file with the following content:
27 |
28 | ./etc/monit/conf.d/40-docker-vpn-proxy
29 | ----
30 | check program vpn_proxy with path "/usr/local/etc/monit/scripts/container_status.sh vpn-proxy .State.Health.Status healthy"
31 | if status != 0 for 10 cycles then alert
32 | ----
33 |
34 | Restart Monit service:
35 |
36 | ----
37 | sudo systemctl restart monit
38 | ----
39 |
40 | Check Monit web interface and make sure that VPN proxy monitoring is working.
41 |
42 |
--------------------------------------------------------------------------------
/socks5-over-vpn/socks5-over-vpn.adoc:
--------------------------------------------------------------------------------
1 | [[socks5_over_vpn]]
2 | == SOCKS5 Over VPN
3 | This section describes how to setup a SOCKS5 proxy on the server,
4 | such that it will redirect all traffic through the VPN connection.
5 |
6 | The reason it may be useful (as opposed to just running everything over VPN connection on the client PC)
7 | is to allow more granular control over what application's traffic goes through VPN connection.
8 | For example, one may choose to direct all web browser traffic over a VPN connection,
9 | while Steam and music streaming apps will access the internet directly.
10 |
11 | The way it is achieved is by running an OpenVPN client inside a docker container together with SOCKS5 server
12 | in such a way that traffic received by the SOCKS5 server will be forwarded via the VPN tunnel and vice versa.
13 |
14 | Since SOCKS5 protocol offers very weak authentication and no encryption,
15 | it will be additionally encapsulated in SSH tunnel.
16 |
17 | Below is a diagram that demonstrates the idea:
18 |
19 | ----
20 | Client PC
21 | -----------------
22 | | | Silverbox Server
23 | | ------------- | ------------------------------
24 | | | Application | | | |
25 | | ------------- | | Container |
26 | | | SOCKS5 | | ------------------------ |
27 | | v | /--------------------\ | | | |
28 | | 127.0.0.1:XXXX ------ SSH Tunnel -----------SOCKS5 Server | | Internet
29 | ----------------- \--------------------/ | | | ---------->
30 | | | \ routing / | |
31 | | | ------- | |
32 | | ------------------------ |
33 | ------------------------------
34 | ----
35 |
36 | The prerequisites to this section are having Docker installed
37 | and having a VPN provider that supports OpenVPN and can provide OpenVPN profiles
38 | (or information on how to create them).
39 |
40 | include::image.adoc[]
41 |
42 | include::container.adoc[]
43 |
44 | include::monitoring.adoc[]
45 |
46 | include::client-configuration.adoc[]
47 |
48 |
--------------------------------------------------------------------------------
/transmission/container.adoc:
--------------------------------------------------------------------------------
1 | === Container
2 | This section describes how to run the container from the Transmission image.
3 |
4 | ==== Running Container
5 | To build the image and run the container do:
6 |
7 | ----
8 | sudo docker compose -f /root/silverbox/containers/transmission/docker-compose.yaml up -d
9 | ----
10 |
11 | ==== Automatic Container Startup
12 | To start container automatically on boot create the `/etc/systemd/system/transmission-start.service` file
13 | with the following content:
14 |
15 | ./etc/systemd/system/transmission-start.service
16 | ----
17 | [Unit]
18 | Description=Start Transmission container
19 | Requires=docker.service
20 | After=docker.service
21 |
22 | [Service]
23 | Type=oneshot
24 | ExecStart=/usr/bin/docker start transmission
25 |
26 | [Install]
27 | WantedBy=multi-user.target
28 | ----
29 |
30 | Enable the service, so that it will be started on system boot:
31 |
32 | ----
33 | sudo systemctl daemon-reload
34 | sudo systemctl enable transmission-start.service
35 | ----
36 |
37 | ==== Automatic VPN Server Rotation
38 | VPN server rotation is configured in the similar way to SOCKS5 proxy VPN server rotation.
39 |
40 | Create the `/etc/systemd/system/transmission-restart.service` file with the following content:
41 |
42 | ./etc/systemd/system/transmission-restart.service
43 | ----
44 | [Unit]
45 | Description=Restart Transmission container
46 | Requires=docker.service
47 | After=docker.service
48 |
49 | [Service]
50 | Type=oneshot
51 | ExecStart=/usr/bin/docker restart transmission
52 | ----
53 |
54 | Create the `/etc/systemd/system/transmission-restart.timer` file with the following content:
55 |
56 | ./etc/systemd/system/transmission-restart.timer
57 | ----
58 | [Unit]
59 | Description=Restart Transmission container
60 |
61 | [Timer]
62 | OnCalendar=Fri *-*-* 01:00:00 # <1>
63 | AccuracySec=1h
64 | Persistent=true
65 |
66 | [Install]
67 | WantedBy=timers.target
68 | ----
69 | <1> In this configuration timer is activated every Friday at 1am. Feel free to adjust this.
70 |
71 | Enable and start the timer:
72 |
73 | ----
74 | sudo systemctl daemon-reload
75 | sudo systemctl enable transmission-restart.timer
76 | sudo systemctl start transmission-restart.timer
77 | ----
78 |
79 | You can do `systemctl list-timers` to verify that the timer appears in the output
80 | and to check the time till next activation.
81 |
82 |
--------------------------------------------------------------------------------
/transmission/monitoring.adoc:
--------------------------------------------------------------------------------
1 | === Monitoring
2 | The status of the Transmission container is monitored by Monit.
3 |
4 | Create the `/etc/monit/conf.d/60-docker-transmission` file with the following content:
5 |
6 | ./etc/monit/conf.d/60-docker-transmission
7 | ----
8 | check program transmission with path "/usr/local/etc/monit/scripts/container_status.sh transmission .State.Health.Status healthy"
9 | if status != 0 for 10 cycles then alert
10 | ----
11 |
12 | Restart Monit service:
13 |
14 | ----
15 | sudo systemctl restart monit
16 | ----
17 |
18 | Check Monit web interface and make sure that Transmission monitoring is working.
19 |
20 |
--------------------------------------------------------------------------------
/transmission/transmission.adoc:
--------------------------------------------------------------------------------
1 | == Transmission
2 | This section describes hot to setup Transmission <> BitTorrent client on the server.
3 | Similarly to how SOCKS5 proxy server was deployed (as described in the <> section),
4 | the Transmission will be running inside a Docker container together with the OpenVPN,
5 | such that all Transmission traffic will be tunneled though the VPN.
6 | Transmission will be managed using web user interface, that will be exposed outside of the container.
7 |
8 | This section assumes that <> and <> sections were completed.
9 |
10 | include::image.adoc[]
11 |
12 | include::container.adoc[]
13 |
14 | include::monitoring.adoc[]
15 |
16 | include::user-interface.adoc[]
17 |
18 |
--------------------------------------------------------------------------------
/transmission/user-interface.adoc:
--------------------------------------------------------------------------------
1 | === User Interface
2 | Transmission has web based user interface that is exposed outside of the Docker container.
3 | However, it is exposed on the localhost only, so it is not accessible from the outside.
4 |
5 | To access Transmission web UI securely first SSH tunnel needs to be created (similarly to how Monit UI is accessed).
6 | For example, from the client PC establish a SSH tunnel:
7 |
8 | [subs="attributes+"]
9 | ----
10 | ssh {SB_USER}@{SB_IP} -N -L 127.0.0.1:\{LOCAL_PORT}:127.0.0.1:{SB_TRANSMISSION_PORT}
11 | ----
12 |
13 | Here `\{LOCAL_PORT}` is port on which SSH will be listening on the client PC.
14 | Web interface now can be accessed on the client pc at `http://127.0.0.1:\{LOCAL_PORT}`.
15 |
16 | To create this tunnel in more convenient way, you can add the following entry to your SSH config file `~/.ssh/config`:
17 |
18 | .~/.ssh/config
19 | [subs="attributes+"]
20 | ----
21 | host silverbox-transmission-ui-tunnel
22 | HostName {SB_IP} # <1>
23 | IdentityFile ~/.ssh/silverbox-key
24 | LocalForward 127.0.0.1:\{LOCAL_PORT} 127.0.0.1:{SB_TRANSMISSION_PORT}
25 | ----
26 | <1> IP can be replaced with the server FQDN.
27 |
28 | Now the tunnel can be established simply with:
29 |
30 | ----
31 | ssh -N silverbox-transmission-ui-tunnel
32 | ----
33 |
34 | NOTE: More convenient way of accessing Transmission web interface and other internal services is described in the <> section.
35 |
36 |
--------------------------------------------------------------------------------