├── .exclude
├── reclone.conf
├── swatch.service
├── fstab
├── rcup
├── rclone.service
├── reclone
└── README.md
/.exclude:
--------------------------------------------------------------------------------
1 | Games/**
2 | lost+found/**
3 | Misc/**
4 | Music/**
5 | Software/**
6 | .Trash/**
7 | *.fuse_hidden*
8 |
--------------------------------------------------------------------------------
/reclone.conf:
--------------------------------------------------------------------------------
1 | watchfor /googleapi: Error 403: Rate Limit Exceeded, rateLimitExceeded/
2 | exec sudo bash /home/username/reclone
3 |
--------------------------------------------------------------------------------
/swatch.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Swatch Log Monitoring Daemon
3 | After=syslog.target network.target auditd.service sshd.service
4 |
5 | [Service]
6 | ExecStart=/usr/bin/swatchdog -c /home/username/reclone.conf -t /home/username/logs/rclone.log --pid-file=/var/run/swatch.pid --daemon
7 | ExecStop=/bin/kill -s KILL $(cat /var/run/swatch.pid)
8 | Restart=on-failure
9 | Type=forking
10 | PIDFile=/var/run/swatch.pid
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/fstab:
--------------------------------------------------------------------------------
1 | #Cache Drives
2 | UUID="b6ed03ee-b1dc-417f-b5a6-fe5603f03c13" /media/disk01 ext4 defaults,nofail,x-systemd.device-timeout=10 0 2
3 | UUID="d3689128-4477-4328-a7a0-46b5b699f738" /media/disk02 ext4 defaults,nofail,x-systemd.device-timeout=10 0 2
4 | UUID="7ffe898b-30f9-4ac7-9a07-c5cdeab7bc76" /media/disk03 ext4 defaults,nofail,x-systemd.device-timeout=10 0 2
5 |
6 | #MergerFS
7 | /media/disk* /files fuse.mergerfs defaults,sync_read,allow_other,category.action=all,category.create=ff,minfreespace=100G,fsname=Files 0 0
8 |
--------------------------------------------------------------------------------
/rcup:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # RClone Config file
3 | RCLONE_CONFIG=/home/username/.config/rclone/rclone.conf
4 | export RCLONE_CONFIG
5 | LOCKFILE="/var/lock/`basename $0`"
6 |
7 |
8 | new="gtcrypt1"
9 |
10 | for newfolder in "$1"
11 | do
12 | case $newfolder in
13 | "1" )
14 | new="gtcrypt1";;
15 | "2" )
16 | new="gtcrypt2";;
17 | "3" )
18 | new="gtcrypt3";;
19 | "4" )
20 | new="gtcrypt4";;
21 | "5" )
22 | new="gtcrypt5";;
23 | "6" )
24 | new="gtcrypt6";;
25 | "7" )
26 | new="gtcrypt7";;
27 | "8" )
28 | new="gtcrypt8";;
29 | esac
30 | done
31 |
32 | (
33 | # Wait for lock for 5 seconds
34 | flock -x -w 5 200 || exit 1
35 |
36 | # Move older local files to the cloud
37 | /usr/bin/rclone move /disk/ $new: --checkers 3 --max-transfer 730G --fast-list --log-file /home/username/rcup.log --tpslimit 3 --transfers 3 -v --exclude-from /home/username/.exclude --delete-empty-src-dirs
38 |
39 | ) 200> ${LOCKFILE}
40 |
--------------------------------------------------------------------------------
/rclone.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=RClone Service
3 | Wants=network-online.target
4 | After=network-online.target
5 | AssertPathIsDirectory=/media/diskdrive
6 |
7 | [Service]
8 | Type=notify
9 | Environment=RCLONE_CONFIG=/home/username/.config/rclone/rclone.conf
10 | KillMode=none
11 | RestartSec=10
12 | ExecStart=/usr/bin/rclone mount gd: /media/diskdrive \
13 | --allow-other \
14 | --dir-cache-time 5000h \
15 | --syslog \
16 | --poll-interval 10s \
17 | --umask 002 \
18 | --user-agent GoogleDrive \
19 | --rc \
20 | --rc-addr 127.0.0.1:5572 \
21 | --rc-no-auth \
22 | --cache-dir=/media/disk00/Misc/.cache \
23 | --vfs-cache-mode full \
24 | --volname GoogleDrive \
25 | --vfs-cache-max-size 600G \
26 | # If you are building a library from scratch and need to analyze a large amount of data, setting a very small read chunk size
27 | # will make things download a lot less data. So you can uncomment this section.
28 | # If everything is already scanned, you can leave it at the default as it helps things start up a little faster.
29 | # --vfs-read-chunk-size 1M \
30 | # This adds a little buffer for read ahead
31 | #--vfs-read-ahead 256M \
32 | # This limits the age in the cache if the size is reached and it removes the oldest files first
33 | --vfs-read-ahead 2G \
34 | --vfs-cache-max-age 5000h \
35 | --bwlimit-file 100M
36 |
37 | ExecStop=/bin/fusermount -uz /media/diskdrive
38 | ExecStartPre=-/bin/fusermount -uz /media/diskdrive
39 | ExecStartPost=/usr/bin/rclone rc vfs/refresh recursive=true --rc-addr 127.0.0.1:5572 _async=true
40 | Restart=on-failure
41 | User=username
42 | Group=plex
43 |
44 | [Install]
45 | WantedBy=multi-user.target
46 |
--------------------------------------------------------------------------------
/reclone:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #Made by Torkiliuz
3 |
4 | #This is used multiple times, so it's a variable (tr -dc '0-9' gets only numbers from the output)
5 | check=$(grep [0-99]\: /etc/systemd/system/rclone.service|awk '{print $3}'|tr -dc '0-9')
6 |
7 | #Define amount of rclone-mounts here
8 | num=8
9 |
10 | #---------------------------------------------------------------------------------------#
11 |
12 | #Adding option for force-parameters
13 | force=0
14 | while getopts f option; do
15 | case $option in
16 | f)
17 | force=1 >&2; echo "forcing rotation" ;;
18 | \?)
19 | echo "Invalid option: -$OPTARG" >&2; exit 2 ;;
20 | esac
21 | done
22 |
23 | #Check if Plex Transcodes are running, as they will crash if so
24 | while pgrep -x "Plex Transcoder" >/dev/null; do
25 | if [ $(echo $force) -lt 1 ]; then
26 | echo -e "ERROR: Plex Transcoder is currently running. Please stop any open transcodes\nSleeping for 10 seconds" >&2
27 | sleep 10
28 | elif [ $(echo $force) == 1 ]; then
29 | break
30 | fi
31 | done
32 |
33 | ###Inform and also unmount drive
34 | echo "ReeeCloning" && fusermount -uz /media/diskdrive
35 |
36 | #If we're on the highest number we should start at the beginning (uses exactly the highest number here)
37 | if [ "$check" == "$num" ]; then
38 | sed -i "s/gtcrypt[0-9]\+\:/gtcrypt1\:/" /etc/systemd/system/rclone.service
39 | echo "changed to gtcrypt1:"
40 | systemctl daemon-reload && systemctl restart rclone.service
41 | exit 0
42 | fi
43 |
44 | #Runs a rolling increment of drive-numbers (uses 1 less than what you want to go up to here)
45 | if [[ "$check" -ge 1 && "$check" -le $((num-1)) ]]; then
46 | ((check++))
47 | sed -i "s/gtcrypt[0-9]\+\:/gtcrypt$check\:/" /etc/systemd/system/rclone.service
48 | echo "changed to gtcrypt"$check":"
49 | systemctl daemon-reload && systemctl restart rclone.service
50 | exit 0
51 | fi
52 |
53 |
54 | #Kill it with fire if everything else fails
55 | exit 1
56 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Rclone + Google Drive + Plex
2 |

3 |
4 | For a while I've been running my media server setup as a hybrid cloud solution. It has been working well for me, and although there's a small amount of added latency it's still snappy enough to compete with a fully local setup. There are many things that make a hybrid cloud setup immensely more complicated than a strictly local setup though, and the learning curve is fairly steep. If you feel up for it and have some time available, this might be for you.
5 | In order to keep this from being a really long post I will not go as much into detail about each step, but I'll rather point to the resources necessary to understand how the setup works. Some of the scripts that are used in this guide are written by me, and they do a simple form of error checking, but they're in no way flawless. Use at your own risk.
6 | I know that some of the solutions in here can be automated and improved upon, this is not a *be all and end all* solution by any means.
7 |
8 | First and foremost; using Google Drive has its limitations; most importantly the limit of `750 GB` uploaded per user every `24 hour`. One way to work around this limitation is to use Google Team Drive. Google Team Drive does not limit you to only use Google Business-registered accounts, so create a few free gmail accounts; link them to your Team Drive, and that limitation is circumvented. In my testing the maximum allowed free Google accounts created in a day is `3`; so it's a good idea to start creating these before going through the next steps.
9 | *We'll also use the same account-switching strategy when we mount drives in order to bypass restrictions set on the amount of API-requests allowed per user per day.*
10 |
11 | Tools we use in order to get this set up and working:
12 | [MergerFS](https://github.com/trapexit/mergerfs) - (*configuration*)
13 | [Rclone](https://rclone.org/) - (*configuration*)
14 | [Systemd Rclone Mount](https://raw.githubusercontent.com/animosity22/homescripts/master/rclone-systemd/gmedia-rclone.service) - (*modified*)
15 | [Nightly upload-script](https://raw.githubusercontent.com/animosity22/homescripts/master/scripts/upload_cloud) - (*modified*)
16 | [Swatchdog](https://github.com/ToddAtkins/swatchdog) - (configuration files)
17 | [Reclone](https://gist.github.com/Torkiliuz/90c7d50845dec168cc0de2c82c3672c3) - (shell script)
18 | [Plex Autoscan](https://github.com/l3uddz/plex_autoscan) - (*optional*)
19 |
20 | # MergerFS
21 | MergerFS is best installed by grabbing the appropriate release for your distribution [here](https://github.com/trapexit/mergerfs/releases).
22 | Set up your cache drives as the first drives that files get written to. You'll need a set amount of drives based on how much media you process through a week. I'd recommend at least `8 TB`, just in order to have a bit of a buffer in case an upload session fails, etc..
23 | Your `/etc/fstab` should look a bit like this:
24 | ```
25 | #Cache Drives
26 | UUID="b6ed03ee-b1dc-417f-b5a6-fe5603f03c13" /media/disk01 ext4 defaults,nofail,x-systemd.device-timeout=10 0 2
27 | UUID="d3689128-4477-4328-a7a0-46b5b699f738" /media/disk02 ext4 defaults,nofail,x-systemd.device-timeout=10 0 2
28 | UUID="7ffe898b-30f9-4ac7-9a07-c5cdeab7bc76" /media/disk03 ext4 defaults,nofail,x-systemd.device-timeout=10 0 2
29 |
30 | #MergerFS
31 | /media/disk* /files fuse.mergerfs defaults,sync_read,allow_other,category.action=all,category.create=ff,minfreespace=100G,fsname=Files 0 0
32 | ```
33 | Check the MergerFS github for an explanation of the parameters used. Most importantly it gets the Cache Drives listed as the first drives, and therefore `category.create=ff` makes sure that every write is tried on these drives first, instead of writing to the Google Drive mount directly.
34 | We're not using `/etc/fstab` to mount the Google Drive remote, as we need a more dynamic way to remount based on when rate limiting occurs. We'll use `Systemd` combined with `Swatchdog` to solve this.
35 |
36 | # Rclone
37 | First you'll install [Rclone](https://rclone.org/downloads/). This differs from platform to platform, and also depends a bit on if you want the latest development release, latest stable release, or if you want to go for the one already in your package manager. I'd recommend going with the latest stable release.
38 | In order to configure Rclone run `rclone config` in your terminal.
39 | Press `n` to create a new remote, which will define your connection to Google Drive.
40 | Name it something that makes sense, and add a number to the end of it. For example `gtdrive1`. We'll use the number for autorotation of users later.
41 | Press the number that corresponds to **Google Drive** in the next part; this number changes throughout different versions of Rclone.
42 | Create a custom `Client Id` by following **[this guide(!)](https://rclone.org/drive/#making-your-own-client-id)**; add that to **Application Client Id**, press `Enter`.
43 | Do the same for **Application Client Secret**.
44 | Choose **Full access all files** if you get asked.
45 | Leave **ID of the root folder** blank, as we're only using a subfolder on the Team Drive; just press `Enter`.
46 | Just press `Enter` for **Service Account Credentials JSON file path**.
47 | Press `n` to skip **advanced config**.
48 | Answer the question about auto config based on how you're running the commands. If you're running through SSH choose `n`.
49 | Open the URL you're giving in a browser locally if you're running through SSH.
50 | Choose the account to log in with (use an account that is linked to the Team Drive you want to access).
51 | Paste the verification code that you get back into the SSH-session.
52 | Press `y` when asked to configure it as a Team Drive.
53 | Choose the Team Drive you want to access from the list of choices that gets printed by writing the number corresponding to it.
54 | Verify that things look OK, and press `y` if it is.
55 | Now we're back to the list of configured remotes.
56 | Press `n` to add another remote; we will encrypt our data so only we can access it, and also so that we don't get metadata leaked.
57 | This drive also needs a name. Building on the name from the first remote, name it something that makes sense, and add a number to the end for scripted account-rolling. For example `gtcrypt1`.
58 | Since we're encrypting a remote we'll choose the option **Encrypt/Decrypt a remote** in the menu.
59 | We then get asked for the remote to encrypt. In order to have the ability to use the Google Drive for other files than media we will use a subfolder for media. This way our top level directory only has 1 folder and doesn't get as cluttered. Add the remote you previosly created. In the example this would look like `gtdrive1:Media`.
60 | For encryption choose **Encrypt the filenames**, also known as **standard** (option `2` as of writing this guide).
61 | Then choose **Encrypt directory names** (`true`).
62 | Since we'll only have one account connected at a time it's important that we set the encryption password and salt the same for each account/remote that we set up. Otherwise we will only see the files uploaded by that particular user for each remote. Therefore you'll choose **Yes type in my own password** for both. The password and salt can and should be different though, but use the same password and salt for each account.
63 | We do not need to edit advanced config, so choose `n` for this.
64 | If everything looks OK, type `y`.
65 |
66 | To check that the remote mount works you can run `rclone lsd gtcrypt1:`. If you don't get a warning message everything should be OK.
67 |
68 | ## Systemd Rclone Mount
69 | In order to mount our remote reliably with rolling accounts we'll use Systemd combined with log-watching done by Swatchdog. Animosity22 inspired this part, and the Systemd-file we're using is very similar to [his](https://raw.githubusercontent.com/animosity22/homescripts/master/rclone-systemd/gmedia-rclone.service).
70 | The Systemd-file we're using should be written to `/etc/systemd/system/rclone.service` and contain the following:
71 | ```
72 | [Unit]
73 | Description=RClone Service
74 | After=network-online.target
75 | Wants=network-online.target
76 |
77 | [Service]
78 | Type=notify
79 | Environment=RCLONE_CONFIG=/home/username/.config/rclone/rclone.conf
80 | ExecStart=/usr/bin/rclone mount gtcrypt1: /media/diskdrive \
81 | --allow-other \
82 | --dir-cache-time 96h \
83 | --drive-chunk-size 32M \
84 | --log-level INFO \
85 | --log-file /home/username/logs/rclone.log \
86 | --timeout 1h \
87 | --umask 002 \
88 | --rc
89 | ExecStop=/bin/fusermount -uz /media/diskdrive
90 | Restart=on-failure
91 | User=username
92 | Group=plex
93 |
94 | [Install]
95 | WantedBy=multi-user.target
96 | ```
97 | Replace every instance of `/home/username` with the homefolder of the user you want to run Rclone as. When running `rclone config` the configuration is automatically saved to the homefolder of the user running that command. In the case of running as the user `username`, the configuration would be saved to `/home/username/.config/rclone/rclone.conf`.
98 | In order to have the logs for rclone be saved to `/home/username/logs/rclone.log` the folder `/home/username/logs` needs to exist first. Create the folder by running `mkdir /home/username/logs` in the terminal.
99 | Also change `User=username` to the correct username.
100 |
101 | ## Nightly upload script
102 | Once again we're getting some inspiration from Animosity22's [github](https://raw.githubusercontent.com/animosity22/homescripts/master/scripts/upload_cloud). The modified version we're using is named `/home/username/rcup`, and looks like this:
103 | ```
104 | #!/bin/bash
105 | # RClone Config file
106 | RCLONE_CONFIG=/home/username/.config/rclone/rclone.conf
107 | export RCLONE_CONFIG
108 | LOCKFILE="/var/lock/`basename $0`"
109 |
110 |
111 | new="gtcrypt1"
112 |
113 | for newfolder in "$1"
114 | do
115 | case $newfolder in
116 | "1" )
117 | new="gtcrypt1";;
118 | "2" )
119 | new="gtcrypt2";;
120 | "3" )
121 | new="gtcrypt3";;
122 | "4" )
123 | new="gtcrypt4";;
124 | "5" )
125 | new="gtcrypt5";;
126 | "6" )
127 | new="gtcrypt6";;
128 | "7" )
129 | new="gtcrypt7";;
130 | "8" )
131 | new="gtcrypt8";;
132 | esac
133 | done
134 |
135 | (
136 | # Wait for lock for 5 seconds
137 | flock -x -w 5 200 || exit 1
138 |
139 | # Move older local files to the cloud
140 | /usr/bin/rclone move /disk/ $new: --checkers 3 --max-transfer 730G --fast-list --log-file /home/username/rcup.log --tpslimit 3 --transfers 3 -v --exclude-from /home/username/.exclude --delete-empty-src-dirs
141 |
142 | ) 200> ${LOCKFILE}
143 | ```
144 | We're limiting the upload to 730 GB, as it's better to have uploaded a complete file and not hit the rate limit, than getting halfway through an uploaded file, and then fail because of rate limiting.
145 | There's an added functionality to be able to push to different remotes manually by writing a specific number after `rcup`, for example `bash /home/username/rcup 3` to use `gtcrypt3`.
146 | For the nightly upload it will use `gtcrypt1` by default.
147 | We use `cron` in order to run this every night at 02:00. Edit crontab for root by typing `sudo crontab -e` in terminal.
148 | Add the following to crontab:
149 | ```
150 | 0 2 * * * /home/username/rcup 2>&1
151 | ```
152 |
153 | We're using `/home/username/.exclude` to ignore some files and folders that we don't want to upload:
154 | ```
155 | Games/**
156 | lost+found/**
157 | Misc/**
158 | Music/**
159 | Software/**
160 | .Trash/**
161 | *.fuse_hidden*
162 | ```
163 |
164 | # Swatchdog
165 | Swatchdog is part of your distributions package manager; install it as any other application you'd install through the package manager of your respective distribution.
166 | For Swatchdog we'll also create a Systemd-file. Edit it as `/etc/systemd/system/swatch.service`, and add the following to it:
167 | ```
168 | [Unit]
169 | Description=Swatch Log Monitoring Daemon
170 | After=syslog.target network.target auditd.service sshd.service
171 |
172 | [Service]
173 | ExecStart=/usr/bin/swatchdog -c /home/username/reclone.conf -t /home/username/logs/rclone.log --pid-file=/var/run/swatch.pid --daemon
174 | ExecStop=/bin/kill -s KILL $(cat /var/run/swatch.pid)
175 | Restart=on-failure
176 | Type=forking
177 | PIDFile=/var/run/swatch.pid
178 |
179 | [Install]
180 | WantedBy=multi-user.target
181 | ```
182 | Edit `/home/username` as applicable here too.
183 | We're telling Swatchdog to run with the configuration from `/home/username/reclone.conf`; that file looks like this:
184 | ```
185 | watchfor /googleapi: Error 403: Rate Limit Exceeded, rateLimitExceeded/
186 | exec sudo bash /home/username/reclone
187 | ```
188 | Rather simple stuff, right? This configuration tells Swatchdog to look for any event in the log file `/home/username/logs/rclone.log` that matches with `Error 403: Rate Limit Exceeded`.
189 | Everytime a `Rate Limit Exceeded` event occurs we get poor performance from the Google Drive mount. Therefore we switch over to a different user account that does not have the Rate Limit Exceeded for that day.
190 | The switching is done by using the following script (Swatchdog fixes this automatically):
191 | ```
192 | #!/bin/bash
193 | #Made by Torkiliuz
194 |
195 | #This is used multiple times, so it's a variable (tr -dc '0-9' gets only numbers from the output)
196 | check=$(grep [0-99]\: /etc/systemd/system/rclone.service|awk '{print $3}'|tr -dc '0-9')
197 |
198 | #Define amount of rclone-mounts here
199 | num=8
200 |
201 | #---------------------------------------------------------------------------------------#
202 |
203 | #Adding option for force-parameters
204 | force=0
205 | while getopts f option; do
206 | case $option in
207 | f)
208 | force=1 >&2; echo "forcing rotation" ;;
209 | \?)
210 | echo "Invalid option: -$OPTARG" >&2; exit 2 ;;
211 | esac
212 | done
213 |
214 | #Check if Plex Transcodes are running, as they will crash if so
215 | while pgrep -x "Plex Transcoder" >/dev/null; do
216 | if [ $(echo $force) -lt 1 ]; then
217 | echo -e "ERROR: Plex Transcoder is currently running. Please stop any open transcodes\nSleeping for 10 seconds" >&2
218 | sleep 10
219 | elif [ $(echo $force) == 1 ]; then
220 | break
221 | fi
222 | done
223 |
224 | ###Inform and also unmount drive
225 | echo "ReeeCloning" && fusermount -uz /media/diskdrive
226 |
227 | #If we're on the highest number we should start at the beginning (uses exactly the highest number here)
228 | if [ "$check" == "$num" ]; then
229 | sed -i "s/gtcrypt[0-9]\+\:/gtcrypt1\:/" /etc/systemd/system/rclone.service
230 | echo "changed to gtcrypt1:"
231 | systemctl daemon-reload && systemctl restart rclone.service
232 | exit 0
233 | fi
234 |
235 | #Runs a rolling increment of drive-numbers (uses 1 less than what you want to go up to here)
236 | if [[ "$check" -ge 1 && "$check" -le $((num-1)) ]]; then
237 | ((check++))
238 | sed -i "s/gtcrypt[0-9]\+\:/gtcrypt$check\:/" /etc/systemd/system/rclone.service
239 | echo "changed to gtcrypt"$check":"
240 | systemctl daemon-reload && systemctl restart rclone.service
241 | exit 0
242 | fi
243 |
244 |
245 | #Kill it with fire if everything else fails
246 | exit 1
247 | ```
248 | In this example we have 8 accounts, going from `gtcrypt1` all the way up to `gtcrypt8`. Adjust the numbers to the amount of accounts you've set up with `rclone config`.
249 |
250 | # Plex Autoscan
251 | The guide on [Github](https://github.com/l3uddz/plex_autoscan) is already quite good here, so I'll just make this short. As an additional tool, Plex Autoscan can make you have less API-requests to Google Drive, so you don't need to do as much account-rolling. This tool is most useful for cases when you have Sonarr, Radarr and Lidarr autodownloading files.
252 |
--------------------------------------------------------------------------------