├── .gitattributes
├── .gitmodules
├── .vscode
├── launch.json
└── settings.json
├── LICENSE
├── README.md
├── pve_nas_installer.sh
├── pve_nas_toolbox.sh
├── shared
├── pve_nas_create_lvm_build.sh
├── pve_nas_create_singledisk_build.sh
├── pve_nas_create_users.sh
├── pve_nas_create_zfs_build.sh
├── pve_nas_create_zfs_cacheaddon.sh
├── pve_nas_fs_list.sh
└── pve_nas_select_fs_build.sh
└── src
├── omv
└── pve_nas_vm_nas_installer.sh
└── ubuntu
├── email_templates
└── pve_nas_ct_newuser_msg.sh
├── proftpd_settings
├── global_default.conf
├── global_desktopdir.conf
├── pve_nas_ct_proftpdsettings.sh
└── sftp.conf
├── pve-nas_sw.sh
├── pve_nas_ct_addjailuser.sh
├── pve_nas_ct_addpoweruser.sh
├── pve_nas_ct_deleteuser.sh
├── pve_nas_ct_nas_chrootapplist
├── pve_nas_ct_nas_installer.sh
├── pve_nas_ct_nas_toolbox.sh
└── pve_nas_ct_restoredirperm.sh
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 | * text eol=lf
4 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 |
2 | [submodule "common"]
3 | path = common
4 | url = https://github.com/ahuacate/common.git
5 | branch = main
6 |
--------------------------------------------------------------------------------
/.vscode/launch.json:
--------------------------------------------------------------------------------
1 | {
2 | // Use IntelliSense to learn about possible attributes.
3 | // Hover to view descriptions of existing attributes.
4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5 | "version": "0.2.0",
6 | "configurations": [
7 | {
8 | "type": "bashdb",
9 | "request": "launch",
10 | "name": "Bash-Debug (type in script name)",
11 | "cwd": "${workspaceFolder}",
12 | "program": "${command:AskForScriptName}",
13 | "args": []
14 | },
15 | {
16 | "type": "bashdb",
17 | "request": "launch",
18 | "name": "Bash-Debug (select script from list of sh files)",
19 | "cwd": "${workspaceFolder}",
20 | "program": "${command:SelectScriptName}",
21 | "args": []
22 | },
23 | ]
24 | }
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "cSpell.ignoreWords": [
3 | "attempt",
4 | "attempts",
5 | "awk",
6 | "counter",
7 | "ctid",
8 | "dev",
9 | "do",
10 | "done",
11 | "echo",
12 | "eq",
13 | "exit",
14 | "fi",
15 | "grep",
16 | "hello",
17 | "if",
18 | "max",
19 | "null",
20 | "pct",
21 | "print",
22 | "reached",
23 | "running",
24 | "sleep",
25 | "status",
26 | "then",
27 | "until"
28 | ]
29 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
PVE File Server (NAS)
2 |
3 | **Lightweight - Ubuntu NAS (CT)**
4 | A lightweight NAS built on a CT with 512Mb RAM. Ubuntu OS and Webmin WebGui frontend.
5 |
6 | Backend storage is by Proxmox. Choose between LVM or ZFS Raid or a basic single disk ext4 file system. A USB disk-based NAS is also supported.
7 |
8 | **Heavyweight - OMV NAS (VM)**
9 | Open Media Vault (OMV) NAS built in a Proxmox VM. Requires direct attached storage or PCI SAS/SATA/NVMe HBA Card pass-through. Our default file system is EXT4 or BTRFS using MergerFS and SnapRaid.
10 |
11 | Features
12 | All builds include:
13 |
14 | * Power User & Group Accounts
15 | * Groups: medialab:65605, homelab:65606, privatelab:65607, chrootjail:65608
16 | * Users: media:1605, home:1606, private:1607
17 | * Users media, home and private are our default CT App users
18 | * Chrootjail Group for general User accounts
19 | * Support all Medialab file permissions required by Sonarr, Radarr, JellyFin, NZBGet and more
20 | * Includes all storage folders ready for all CT applications
21 | * Folder and user permissions are set including ACLs
22 | * NFS 4.0 exports ready for PVE host's backend storage mounts
23 | * SMB 3.0 shares with access permissions set ( by User Group accounts )
24 | * Setting Local Domain option( i.e .local, .localdomain, .home.arpa, .lan )
25 | * Easy Script Toolbox to create or delete User accounts, perform OS upgrades and install add-on services (i.e SSMTP, ProFTP and ZFS Cache)
26 |
27 | Prerequisites
28 |
29 | Read about our system-wide requirements before proceeding any further.
30 |
31 | **Network Prerequisites**
32 |
33 | - [x] Layer 2/3 Network Switches
34 | - [x] Network Gateway (*recommend xxx.xxx.xxx.5*)
35 | - [x] Network DHCP server (*recommend xxx.xxx.xxx.5*)
36 | - [x] Network DNS server (*recommend xxx.xxx.xxx.5*)
37 | - [x] Network Name Server
38 | - [x] Network Name Server resolves all device hostnames (*static and dhcp IP*)
39 | - [x] Local domain name is set on all network devices (*see note below*)
40 | - [x] PVE host hostnames are suffixed with a numeric (*i.e pve-01 or pve01 or pve1*)
41 | - [x] PVE host has internet access
42 |
43 | **Required Prerequisites**
44 |
45 | - [x] PVE host installed or USB connection with a minimum of 1x spare empty disk.
46 |
47 | **Optional Prerequisites**
48 |
49 | - [ ] PVE Host installed SSD/NVMe ZFS Cache (Ubuntu CT builds)
50 | - [ ] HBA installed SSD/NVMe ZFS Cache (OMV VM builds)
51 | - [ ] PCIe SAS/SATA/NVMe HBA Adapter Card (i.e LSI 9207-8i)
52 |
53 | Local DNS Records
54 |
55 | Before proceeding, we strongly advise that you familiarize yourself with network Local DNS and the importance of having a PiHole server. To learn more, click here.
56 |
57 | It is essential to set your network's Local Domain or Search domain. For residential and small networks, we recommend using only top-level domain (spTLD) names because they cannot be resolved across the internet. Routers and DNS servers understand that ARPA requests they do not recognize should not be forwarded onto the public internet. It is best to select one of the following names: local, home.arpa, localdomain, or lan only. We strongly advise against using made-up names.
58 |
59 |
60 | Installation Options
61 |
62 | Our lightweight NAS is for PVE hosts limited by RAM.
63 |
64 | Ubuntu NAS CT - PVE SATA/NVMe
65 |
66 | LVM or ZFS backend filesystem, Ubuntu frontend.
67 |
68 | LVM and ZFS Raid levels depend on the number of disks installed. You also have the option of configuring ZFS cache using SSD/NVMe drives. ZFS cache is for High-Speed disk I/O.
69 |
70 | Ubuntu NAS CT - Basic USB disk
71 |
72 | USB disk ext4 backend, Ubuntu frontend.
73 |
74 | All data is stored on a single external USB disk. A basic ext4 file system backend is managed by your Proxmox host.
75 |
76 |
77 | The heavyweight NAS option is our OMV VM. If you have adequate RAM (32Gb or more) and want a user-friendly NAS WebGUI interface we recommend you install OMV.
78 |
79 |
80 | OMV NAS VM - Direct attached storage
81 |
82 | **Physical Disk pass-through**
83 | Physical disks are configured to pass through to the VM as SCSI devices. You can configure as many disks as you like. This is a cost-effective solution because you can use native SATA ports on your PVE hosts' mainboard. OMV manages both the backend and front end. Requires PVE host bootloader kernel config file edits shown [here](https://pve.proxmox.com/wiki/Pci_passthrough#Introduction) before installing.
84 |
85 | **PCIe SAS/SATA/NVMe HBA Card**
86 | PCIe SAS/SATA/NVMe HBA Adapter Card (i.e LSI 9207-8i) pass-through will likely deliver superior NAS performance.
87 |
88 | A dedicated PCIe SAS/SATA/NVMe HBA Adapter Card (i.e LSI 9207-8i) is required for all NAS storage disks. All OMV storage disks, including any LVM/ZFS Cache SSDs, must be connected to the HBA Adapter Card. You cannot co-mingle OMV disks with the PVE host's mainboard onboard SATA/NVMe devices. OMV manages both the backend and front end. Requires PVE host bootloader kernel config file edits shown [here](https://pve.proxmox.com/wiki/Pci_passthrough#Introduction) before installing.
89 |
90 |
91 | For a dedicated hard-metal NAS, not Proxmox hosted, go to this GitHub [repository](https://github.com/ahuacate/nas-hardmetal). Options include between Easy Scripts to configure a Synology v7 or OMV NAS appliance.
92 |
93 | Easy Scripts
94 |
95 | Easy Scripts simplify the process of installing and configuring preset configurations. To use them, all you have to do is copy and paste the Easy Script command into your terminal window, hit Enter, and follow the prompts and terminal instructions.
96 |
97 | Please note that all Easy Scripts assume that your network is VLAN and DHCP IPv4 ready. If this is not the case, you can decline the Easy Script prompt to accept our default settings. Simply enter 'n' to proceed without the default settings. After declining the default settings, you can configure all your PVE container variables.
98 |
99 | However, before proceeding, we highly recommend that you read our guide to fully understand the input requirements.
100 |
101 | 1) PVE NAS Installer Easy Script
102 | Use this script to start the PVE NAS installer for all PVE NAS types. The User will be prompted to select an installation type (i.e Ubuntu, USB, OMV). Run in a PVE host SSH terminal.
103 |
104 | ```bash
105 | bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_installer.sh)"
106 | ```
107 | 2) PVE Ubuntu NAS Toolbox Easy Script
108 | For creating and deleting user accounts, installing optional add-ons and upgrading your Ubuntu NAS OS. Run in your PVE host SSH terminal.
109 |
110 | ```bash
111 | bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_toolbox.sh)"
112 | ```
113 |
114 |
115 |
116 | Table of Contents
117 |
118 |
119 | - [1. Introduction](#1-introduction)
120 | - [1.1. Backend storage - PVE Ubuntu NAS](#11-backend-storage---pve-ubuntu-nas)
121 | - [1.2. Direct Attached storage - OMV NAS](#12-direct-attached-storage---omv-nas)
122 | - [1.3. PVE RAM recommendations](#13-pve-ram-recommendations)
123 | - [2. OMV NAS VM](#2-omv-nas-vm)
124 | - [2.1. Create the OMV NAS VM](#21-create-the-omv-nas-vm)
125 | - [2.2. PCIe Passthrough (optional)](#22-pcie-passthrough-optional)
126 | - [2.3. Configuring OMV NAS VM](#23-configuring-omv-nas-vm)
127 | - [3. Ubuntu NAS CT](#3-ubuntu-nas-ct)
128 | - [3.1. Create the Ubuntu NAS CT](#31-create-the-ubuntu-nas-ct)
129 | - [3.2. Supported File Systems](#32-supported-file-systems)
130 | - [3.2.1. ZFS storage](#321-zfs-storage)
131 | - [3.2.1.1. ZFS Cache support](#3211-zfs-cache-support)
132 | - [3.2.2. LVM storage](#322-lvm-storage)
133 | - [3.3. Easy Script Toolbox options](#33-easy-script-toolbox-options)
134 | - [4. Preparation & General requirements](#4-preparation--general-requirements)
135 | - [4.1. Required Installer Inputs](#41-required-installer-inputs)
136 | - [4.2. A System designated Administrator Email](#42-a-system-designated-administrator-email)
137 | - [4.3. SMTP Server Credentials](#43-smtp-server-credentials)
138 | - [4.4. NAS Hostname](#44-nas-hostname)
139 | - [4.5. NAS IPv4 Address](#45-nas-ipv4-address)
140 | - [4.6. NAS Search Domain or Local Domain](#46-nas-search-domain-or-local-domain)
141 | - [4.7. Network VLAN Aware](#47-network-vlan-aware)
142 | - [4.8. NAS Gateway IPv4 Address](#48-nas-gateway-ipv4-address)
143 | - [4.9. NAS Root Password](#49-nas-root-password)
144 | - [5. Ubuntu NAS Administration Toolbox](#5-ubuntu-nas-administration-toolbox)
145 | - [5.1. Create new User Accounts](#51-create-new-user-accounts)
146 | - [5.1.1. Create "Power User" Accounts](#511-create-power-user-accounts)
147 | - [5.1.2. Create Restricted and Jailed User Accounts (Standard Users)](#512-create-restricted-and-jailed-user-accounts-standard-users)
148 | - [6. Q&A](#6-qa)
149 | - [6.1. What's the NAS root password?](#61-whats-the-nas-root-password)
150 | - [6.2. Ubuntu NAS with a USB disk has I/O errors?](#62-ubuntu-nas-with-a-usb-disk-has-io-errors)
151 |
152 |
153 |
154 |
155 |
156 | # 1. Introduction
157 |
158 | When selecting your NAS type to build you have the option of PVE backend or direct attached storage (PCIe HBA card or disk pass-through).
159 |
160 | ## 1.1. Backend storage - PVE Ubuntu NAS
161 |
162 | Choose ZFS Raid, LVM Raid or basic single-disk storage for your NAS build. Your PVE NAS host hardware configuration determines your NAS options:
163 |
164 | - Ubuntu Frontend, PVE LVM or ZFS backend (SATA/SAS).
165 | - Ubuntu Frontend, PVE ext4 backend (USB only).
166 |
167 | ## 1.2. Direct Attached storage - OMV NAS
168 | - OMV requires a PCIe SAS/SATA/NVMe HBA Adapter Card (i.e LSI 9207-8i)
169 | - OMV Physical Disk Pass-through (i.e as SCSI devices).
170 |
171 | ## 1.3. PVE RAM recommendations
172 | A Ubuntu NAS CT requires only 512MB RAM because the LVM or ZFS backend storage is managed by the Proxmox host. In practice, install as much RAM as you can get for your hardware/budget.
173 |
174 | A ZFS backend depends heavily on RAM, so you need at least 16GB (32GB recommended).
175 |
176 | OMV specifies a minimum of 8GB RAM.
177 |
178 |
179 |
180 | # 2. OMV NAS VM
181 | Prepare your PVE host with a PCIe SAS/SATA/NVMe HBA Adapter Card (i.e LSI 9207-8i) or use Direct Attached Storage (requires [IOMMU enabled](https://pve.proxmox.com/wiki/Pci_passthrough)). Connect all the storage and parity disks. Make a note of the device IDs (i.e /dev/sda).
182 |
183 | ## 2.1. Create the OMV NAS VM
184 | Use this script to start the PVE NAS Installer. You will be prompted to select an installation type. Select `Omv Nas - OMV based NAS`. Run in a PVE host SSH terminal.
185 |
186 | ```bash
187 | bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_installer.sh)"
188 | ```
189 |
190 | ## 2.2. PCIe Passthrough (optional)
191 | PCI passthrough allows you to use a physical mainboard PCI SATA or HBA device inside a PVE VM (KVM virtualization only).
192 |
193 | If you configure a "PCI passthrough" device, the device is not available to the host anymore.
194 |
195 | Navigate using the Proxmox web interface to VM `vmid (nas-xx)` > `Hardware` > `Add` > `PCI device` and select a PCIe HBA device. The selected device will be passed through to your NAS.
196 |
197 | ## 2.3. Configuring OMV NAS VM
198 | After creating your OMV VM go to our detailed [configuration guide](https://github.com/ahuacate/nas-hardmetal) to complete the installation. Follow all the OMV-related steps.
199 |
200 |
201 |
202 | # 3. Ubuntu NAS CT
203 | Prepare all new disks by wiping them. You can also re-connect to an existing Ubuntu NAS storage backend.
204 |
205 | ## 3.1. Create the Ubuntu NAS CT
206 | Use this script to start the PVE NAS Installer. The User will be prompted to select an installation type. Select `Ubuntu Nas - Ubuntu CT based NAS`. Run in your PVE host SSH terminal.
207 |
208 | ```bash
209 | bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_installer.sh)"
210 | ```
211 |
212 | ## 3.2. Supported File Systems
213 |
214 | The User can choose to create a new file system or re-connect to an existing file system.
215 |
216 | The installer script allows for the re-connection to a previously prepared storage volume. Supported filesystems are LVM, ZFS and USB ext4 storage.
217 |
218 | ### 3.2.1. ZFS storage
219 |
220 | The User has the option to set different ZFS Raid levels.
221 |
222 | We recommend you install a minimum of 3x NAS-certified rotational hard disks in your host. When installing the disks make a note of the logical SATA port IDs ( i.e sdc, sdd, sde ) you are connecting to. This helps you identify which disks to format and add to your new ZFS storage pool.
223 |
224 | Our Ubuntu NAS Easy Script has the option to use the following ZFS Raid builds:
225 |
226 | |ZFS Raid Type|Description
227 | |----|----|
228 | |RAID0|Also called “striping”. No redundancy, so the failure of a single drive makes the volume unusable.
229 | |RAID1|Also called “mirroring”. Data is written identically to all disks. The resulting capacity is that of a single disk.
230 | |RAID10|A combination of RAID0 and RAID1. Requires at least 4 disks.
231 | |RAIDZ1|A variation on RAID-5, single parity. Requires at least 3 disks.
232 | |RAIDZ2|A variation on RAID-5, double parity. Requires at least 4 disks.
233 | |RAIDZ3|A variation on RAID-5, triple parity. Requires at least 5 disks.
234 |
235 | Remember, our Easy Script will destroy all existing data on these storage hard disks!
236 |
237 | #### 3.2.1.1. ZFS Cache support
238 |
239 | The User can add ZFS Cache with our `NAS Toolbox Easy Script` addon. Use only SSD or NVMe drives. Do not co-mingle SSD and NVMe cache devices together. We recommend a maximum of 2x devices only.
240 |
241 | The devices will be erased and wiped of all data and partitioned ready for ZIL and ARC or L2ARC cache. The ARC or L2ARC and ZIL cache build options are:
242 | 1. Standard Cache: Select 1x device only. No ARC, L2ARC or ZIL disk redundancy
243 | 2. Accelerated Cache: Select 2x devices. ARC or L2ARC cache set to Raid0 (stripe) and ZIL set to Raid1 (mirror)."
244 |
245 | The maximum size of a ZIL log should be about half the size of your host's installed physical RAM BUT not less than 8GB. The ARC or L2ARC cache size should not be less than 64GB but will be sized to use the whole ZFS cache device. The installer will automatically calculate the best partition sizes for you. A device over-provisioning factor will be applied.
246 |
247 | ### 3.2.2. LVM storage
248 |
249 | The Ubuntu NAS Easy Script has the option to use the following LVM Raid builds:
250 |
251 | |LVM Raid Type|Description
252 | |----|----|
253 | |RAID0|Also called “striping”. Fast but no redundancy, so the failure of a single drive makes the volume unusable.
254 | |RAID1|Also called “mirroring”. Data is written identically to all disks. The resulting capacity is that of a single disk.
255 | |RAID5|Striping with single parity. Minimum 3 disks.
256 | |RAID6|Striping with double parity. Minimum 5 disks.
257 | |RAID10|A combination of RAID0 and RAID1. Minimum 4 disks (even unit number only).
258 |
259 | Remember, our Easy Script will destroy all existing data on these storage hard disks!
260 |
261 | ## 3.3. Easy Script Toolbox options
262 |
263 | Once you have completed your Ubuntu NAS installation you can perform administration tasks using our Easy Script Toolbox.
264 |
265 | Tasks include:
266 |
267 | * Create user accounts
268 | * Delete user accounts
269 | * Upgrade your NAS OS
270 | * Install options:
271 | * Fail2Ban
272 | * SMTP
273 | * ProFTPd
274 | * Add ZFS Cache - create ARC/L2ARC/ZIL cache
275 | * Restore & update default storage folders & permissions
276 |
277 | Run the following Easy Script, select your and select the task you want to perform. Run in a PVE host SSH terminal.
278 |
279 | ```Ubuntu NAS administration tasks
280 | bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_toolbox.sh)"
281 | ```
282 |
283 |
284 |
285 | # 4. Preparation & General requirements
286 |
287 | Get your hardware in order before running a NAS build script.
288 |
289 | ## 4.1. Required Installer Inputs
290 |
291 | Our Easy Script requires the User to provide some inputs. The installer will be given default values to use or the option to input your values.
292 |
293 | We recommend you prepare the following and have your credentials ready before running our NAS build scripts.
294 |
295 | ## 4.2. A System designated Administrator Email
296 |
297 | You need a designated administrator email address. All server alerts and activity notifications will be sent to this email address. Gmail works fine.
298 |
299 | ## 4.3. SMTP Server Credentials
300 |
301 | Before proceeding with this installer we recommend you first configure all PVE hosts to support SMTP email services. A working SMTP server emails the NAS System Administrator all-new User login credentials, SSH keys, application-specific login credentials and written guidelines.
302 |
303 | A PVE host SMTP server makes NAS administration much easier. Also, be alerted about unwarranted login attempts and other system critical alerts. PVE Host SMTP Server installer is available in our PVE Host Toolbox located at GitHub:
304 | * https://github.com/ahuacate/pve-host
305 |
306 | We recommend you create an account at [Mailgun](https://mailgun.com) to relay your NAS system emails to your designated administrator. With [Mailgun](https://mailgun.com) you are not potentially exposing your private email server credentials held within a text file on your NAS. This is an added layer of security.
307 |
308 | ## 4.4. NAS Hostname
309 |
310 | The default hostname is `nas-01`. Our naming convention stipulates all NAS hostnames end with a numeric suffix. Extra NAS appliances should be named `nas-02`, `nas-03` and so on. You may change the hostname to whatever you like. But for networking, integration with our Easy Scripts, hostname resolving, we recommend you use our default hostname naming convention ( `nas-01` ).
311 |
312 | ## 4.5. NAS IPv4 Address
313 |
314 | By default DHCP IPv4 is enabled. We recommend you use DHCP IP reservation at your DHCP server ( i.e router, PiHole ) to create a static IP address to avoid any local DNS lookup issues. You may change to whatever IPv4 or IPv6 address you want. Just note the VLAN ID.
315 |
316 | ## 4.6. NAS Search Domain or Local Domain
317 |
318 | The default search domain is 'local'. The User must set a 'search domain' or 'local domain' name.
319 |
320 | The search domain name must match the setting used in your router configuration setting labeled as 'Local Domain' or 'Search Domain' depending on the device manufacturer.
321 |
322 | We recommend top-level domain (spTLD) names for residential and small network names because they cannot be resolved across the internet. Routers and DNS servers know, in theory, not to forward ARPA requests they do not understand onto the public internet. It is best to choose one of our listed names.
323 |
324 | * local ( Recommended )
325 | * home.arpa ( Recommended )
326 | * lan
327 | * localdomain
328 |
329 | If you insist on using a made-up search domain name, then DNS requests may go unfulfilled by your router and be forwarded onto global internet DNS root servers. This leaks information about your network such as device names. Alternatively, you can use a registered domain name or subdomain if you know what you are doing by selecting the 'Other' option.
330 |
331 | ## 4.7. Network VLAN Aware
332 |
333 | You must answer an Easy Script prompt asking if your network is VLAN aware. The script will resolve your NAS VLAN ID automatically.
334 |
335 | ## 4.8. NAS Gateway IPv4 Address
336 |
337 | The script will attempt to find your Gateway IPv4 address. Confirm with `Enter` or type in the correct Gateway IP address.
338 |
339 | ## 4.9. NAS Root Password
340 |
341 | The default root password is 'ahuacate'. You can always change it at a later stage.
342 |
343 |
344 |
345 | # 5. Ubuntu NAS Administration Toolbox
346 | Once you have completed your Ubuntu NAS installation you can perform administration tasks using our Easy Script Toolbox.
347 |
348 | Tasks include:
349 |
350 | * Create user accounts
351 | * Upgrade your NAS OS
352 | * Install options:
353 | * Fail2Ban
354 | * SMTP
355 | * ProFTPd
356 | * Add ZFS Cache - create ARC/L2ARC/ZIL cache
357 | * Restore & update default storage folders & permissions
358 |
359 | Run the following Easy Script and select the task you want to perform. Run in a PVE host SSH terminal.
360 |
361 | ```Ubuntu NAS administration tasks
362 | bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_toolbox.sh)"
363 | ```
364 |
365 | Your User account options are as follows.
366 |
367 | ## 5.1. Create new User Accounts
368 |
369 | New user accounts can be created using our Ubuntu NAS administration tool.
370 |
371 | The Easy Script will prompt the installer with selectable options:
372 |
373 | ### 5.1.1. Create "Power User" Accounts
374 |
375 | Power Users are trusted persons with privileged access to data and application resources hosted on your PVE NAS. Power Users are NOT standard users! Standard users are added with the 'Jailed User Account' option. Each new Power Users security permissions are controlled by Linux groups. Group security permission levels are as follows:
376 |
377 | | GROUP NAME | PERMISSIONS |
378 | |--------------|--------------------------------------------------------|
379 | | `medialab` | Everything to do with media (i.e movies, TV and music) |
380 | | `homelab` | Everything to do with a smart home including medialab |
381 | | `privatelab` | Private storage including medialab & homelab rights |
382 |
383 | ### 5.1.2. Create Restricted and Jailed User Accounts (Standard Users)
384 |
385 | Every new user is restricted or jailed within their own home folder. In Linux, this is called a chroot jail. But you can select a level of restriction which is applied to each newly created user. This technique can be quite useful if you want a particular user to be provided with a limited system environment, limited folder access and at the same time keep them separate from your main server system and other personal data. The chroot technique will automatically jail selected users belonging to the `chrootjail` user group upon ssh or ProFTPd SFTP login (standard FTP mode is disabled).
386 |
387 | An example of a jailed user is a person who has remote access to your PVE NAS but is restricted to your video library (TV, movies, documentary), public folders and their home folder for cloud storage only. Remote access to your PVE NAS is restricted to sftp, ssh and rsync using private SSH RSA encrypted keys. The user can backup their mobile, tablet, notebook or any device.
388 |
389 | When creating a new user you are given the choice to select a Level of `chrootjail` group permissions and access rights per user. We have pre-configured 3 Levels to choose from with varying degrees of file access for different types of users.
390 |
391 | **Level 1** - This user is restricted to their private home folder for data storage and the NAS public folder only. This is ideal for persons whom you DO NOT want to share any media data with. Typical users maybe: persons wanting Cloud storage and nothing more.
392 |
393 | **Level 2** - This user is restricted to their private home folder for data storage, limited access to the NAS public folder and media library (i.e Restricted to movies, tv, documentary, homevideo folders only). The user is also setup with a downloads folder and special folders within their chrootjail home folder for sharing photos and homevideos with other users or a media server like Emby or Jellyfin. Typical users are: family, close friends and children because of limited media access.
394 |
395 | **Level 3** - This user is restricted to their private home folder for data storage, limited access to the NAS public, audio, books folders, and media library (i.e This user level is NOT restricted so they can view ALL media content). The user is also set up with a downloads folder and special folders within their chrootjail home folder for sharing photos and home videos with other users or a media server like Emby or Jellyfin. Typical users are Power users and adults with full media library access.
396 |
397 | The options are options are:
398 |
399 | | GROUP NAME | USER NAME |
400 | |--------------|---------------------------------------------------------|
401 | | `chrootjail` | /srv/hostname/homes/chrootjail/`username_injail` |
402 | | | |
403 | | LEVEL 1 | FOLDER |
404 | | -rwx---- | /srv/hostname/homes/chrootjail/`username_injail` |
405 | | | Bind Mounts - mounted at ~/public folder |
406 | | -rwxrwxrw- | /srv/hostname/homes/chrootjail/`username_injail`/public |
407 | | | |
408 | | LEVEL 2 | FOLDER |
409 | | -rwx---- | /srv/hostname/homes/chrootjail/`username_injail` |
410 | | | Bind Mounts - mounted at ~/share folder |
411 | | -rwxrwxrw- | /srv/hostname/downloads/user/`username_downloads` |
412 | | -rwxrwxrw- | /srv/hostname/photo/`username_photo` |
413 | | -rwxrwxrw- | /srv/hostname/public |
414 | | -rwxrwxrw- | /srv/hostname/video/homevideo/`username_homevideo` |
415 | | -rwxr--- | /srv/hostname/video/movies |
416 | | -rwxr--- | /srv/hostname/video/tv |
417 | | -rwxr--- | /srv/hostname/video/documentary |
418 | | | |
419 | | LEVEL 3 | FOLDER |
420 | | -rwx---- | /srv/`hostname`/homes/chrootjail/`username_injail` |
421 | | | Bind Mounts - mounted at ~/share folder |
422 | | -rwxr--- | /srv/hostname/audio |
423 | | -rwxr--- | /srv/hostname/books |
424 | | -rwxrwxrw- | /srv/hostname/downloads/user/`username_downloads` |
425 | | -rwxr--- | /srv/hostname/music |
426 | | -rwxrwxrw- | /srv/hostname/photo/`username_photo` |
427 | | -rwxrwxrw- | /srv/hostname/public |
428 | | -rwxrwxrw- | /srv/hostname/video/homevideo/`username_homevideo` |
429 | | -rwxr--- | /srv/hostname/video (All) |
430 |
431 | All Home folders are automatically suffixed: `username_injail`.
432 |
433 |
434 |
435 | # 6. Q&A
436 | ## 6.1. What's the NAS root password?
437 | Installation default credentials for Ubuntu based NAS:
438 | * User: root
439 | * Password: ahuacate
440 |
441 | Default credentials for OMV NAS:
442 | * User: admin
443 | * Password: openmediavault
444 |
445 | ## 6.2. Ubuntu NAS with a USB disk has I/O errors?
446 | A known issue is a USB power management called autosuspend. Our install uses UDEV rule to disable autosuspend but the problem might your USB hub or SATA adapter. Try these fixes:
447 | * [Kernel Patch](https://unix.stackexchange.com/questions/91027/how-to-disable-usb-autosuspend-on-kernel-3-7-10-or-above)
448 | * [USB Autosuspend deaktivieren](https://blog.vulkanbox.dontexist.com/promox-mit-zram/)
449 |
450 |
--------------------------------------------------------------------------------
/pve_nas_installer.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_installer.sh
4 | # Description: Installer script for PVE Homelab
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 |
9 | #---- Source Github
10 | # bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_installer.sh)"
11 |
12 | #---- Source local Git
13 | # /mnt/pve/nas-01-git/ahuacate/pve-nas/pve_nas_installer.sh
14 |
15 | #---- Installer Vars ---------------------------------------------------------------
16 |
17 | # Git server
18 | GIT_SERVER='https://github.com'
19 | # Git user
20 | GIT_USER='ahuacate'
21 | # Git repository
22 | GIT_REPO='pve-nas'
23 | # Git branch
24 | GIT_BRANCH='main'
25 | # Git common
26 | GIT_COMMON='0'
27 |
28 | # Edit this list to set installer product(s).
29 | # vm_LIST=( "name:build:vm_type:desc" )
30 | # name ---> name of the main application
31 | # build_model ---> build model/version of the name (i.e omv build version for a nas)
32 | # vm_type ---> 'vm' or 'ct'
33 | # desc ---> description of the main application name
34 | # Fields must match GIT_APP_SCRIPT dir and filename:
35 | # i.e ...//${GIT_REPO}___installer.sh '(i.e .../ubuntu/pve_nas_ct_nas_installer.sh')
36 | vm_LIST=( "nas:ubuntu:ct:Ubuntu CT based NAS" \
37 | "nas:debian:ct:Debian Cockpit CT based NAS" \
38 | "nas:omv:vm:OMV based NAS (Requires PCIe HBA or disk passthrough)")
39 |
40 | #-----------------------------------------------------------------------------------
41 | # NO NOT EDIT HERE DOWN
42 | #---- Dependencies -----------------------------------------------------------------
43 |
44 | #---- Check for Internet connectivity
45 |
46 | # List of well-known websites to test connectivity (in case one is blocked)
47 | websites=( "google.com 443" "github.com 443" "cloudflare.com 443" "apple.com 443" "amazon.com 443" )
48 | # Loop through each website in the list
49 | for website in "${websites[@]}"
50 | do
51 | # Test internet connectivity
52 | nc -zw1 $website > /dev/null 2>&1
53 | # Check the exit status of the ping command
54 | if [ $? = 0 ]
55 | then
56 | # Flag to track if internet connection is up
57 | connection_up=0
58 | break
59 | else
60 | # Flag to track if internet connection is down
61 | connection_up=1
62 | fi
63 | done
64 | # On connection fail
65 | if [ "$connection_up" = 1 ]
66 | then
67 | echo "Checking for internet connectivity..."
68 | echo -e "Internet connectivity status: \033[0;31mDown\033[0m\n\nCannot proceed without a internet connection.\nFix your PVE hosts internet connection and try again..."
69 | echo
70 | exit 0
71 | fi
72 |
73 | #---- Static Variables -------------------------------------------------------------
74 |
75 | #---- Set Package Installer Temp Dir
76 |
77 | # Set 'rep_temp' dir
78 | REPO_TEMP='/tmp'
79 | # Change to 'repo temp' dir
80 | cd $REPO_TEMP
81 |
82 | #---- Local Repo path (check if local)
83 |
84 | # For local SRC a 'developer_settings.git' file must exist in repo dir
85 | REPO_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P | sed "s/${GIT_USER}.*/$GIT_USER/" )"
86 |
87 | #---- Other Variables --------------------------------------------------------------
88 | #---- Other Files ------------------------------------------------------------------
89 |
90 | #---- Package loader
91 |
92 | # Check for local source
93 | if [ -f "$REPO_PATH/common/bash/src/pve_repo_loader.sh" ] && [ "$(sed -n 's/^dev_git_mount=//p' $REPO_PATH/developer_settings.git 2> /dev/null)" = 0 ]
94 | then
95 | # Download Local loader (developer)
96 | source $REPO_PATH/common/bash/src/pve_repo_loader.sh
97 | else
98 | # Download Github loader
99 | wget -qL - https://raw.githubusercontent.com/$GIT_USER/common/main/bash/src/pve_repo_loader.sh -O $REPO_TEMP/pve_repo_loader.sh
100 | chmod +x $REPO_TEMP/pve_repo_loader.sh
101 | source $REPO_TEMP/pve_repo_loader.sh
102 | fi
103 |
104 | #---- Body -------------------------------------------------------------------------
105 |
106 | #---- Run Installer
107 |
108 | # Run repo installer (repo product selector)
109 | source $REPO_PATH/$GIT_REPO/common/bash/src/pve_repo_installer_main.sh
110 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/pve_nas_toolbox.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_toolbox.sh
4 | # Description: Toolbox script for VM/LXC/CT and Apps
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 |
9 | #---- Source Github
10 | # bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_toolbox.sh)"
11 |
12 | #---- Source local Git
13 | # /mnt/pve/nas-01-git/ahuacate/pve-nas/pve_nas_toolbox.sh
14 |
15 | #---- Installer Vars ---------------------------------------------------------------
16 | # This is a installer script. Only edit the variables in this section.
17 |
18 | # Git server
19 | GIT_SERVER='https://github.com'
20 | # Git user
21 | GIT_USER='ahuacate'
22 | # Git repository
23 | GIT_REPO='pve-nas'
24 | # Git branch
25 | GIT_BRANCH='main'
26 | # Git common
27 | GIT_COMMON='0'
28 |
29 | #-----------------------------------------------------------------------------------
30 | # NO NOT EDIT HERE DOWN
31 | #---- Dependencies -----------------------------------------------------------------
32 |
33 | #---- Check for Internet connectivity
34 |
35 | # List of well-known websites to test connectivity (in case one is blocked)
36 | websites=( "google.com 443" "github.com 443" "cloudflare.com 443" "apple.com 443" "amazon.com 443" )
37 | # Loop through each website in the list
38 | for website in "${websites[@]}"
39 | do
40 | # Test internet connectivity
41 | nc -zw1 $website > /dev/null 2>&1
42 | # Check the exit status of the ping command
43 | if [ $? = 0 ]
44 | then
45 | # Flag to track if internet connection is up
46 | connection_up=0
47 | break
48 | else
49 | # Flag to track if internet connection is down
50 | connection_up=1
51 | fi
52 | done
53 | # On connection fail
54 | if [ "$connection_up" = 1 ]
55 | then
56 | echo "Checking for internet connectivity..."
57 | echo -e "Internet connectivity status: \033[0;31mDown\033[0m\n\nCannot proceed without a internet connection.\nFix your PVE hosts internet connection and try again..."
58 | echo
59 | exit 0
60 | fi
61 |
62 | #---- Static Variables -------------------------------------------------------------
63 |
64 | #---- Set Package Installer Temp Dir
65 |
66 | # Set 'rep_temp' dir
67 | REPO_TEMP='/tmp'
68 | # Change to 'repo temp' dir
69 | cd $REPO_TEMP
70 |
71 | #---- Local Repo path (check if local)
72 |
73 | # For local SRC a 'developer_settings.git' file must exist in repo dir
74 | REPO_PATH="$( cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P | sed "s/${GIT_USER}.*/$GIT_USER/" )"
75 |
76 | #---- Other Variables --------------------------------------------------------------
77 | #---- Other Files ------------------------------------------------------------------
78 |
79 | #---- Package loader
80 |
81 | # Check for local source
82 | if [ -f "$REPO_PATH/common/bash/src/pve_repo_loader.sh" ] && [[ $(sed -n 's/^dev_git_mount=//p' $REPO_PATH/developer_settings.git 2> /dev/null) == '0' ]]
83 | then
84 | # Download Local loader (developer)
85 | source $REPO_PATH/common/bash/src/pve_repo_loader.sh
86 | else
87 | # Download Github loader
88 | wget -qL - https://raw.githubusercontent.com/$GIT_USER/common/main/bash/src/pve_repo_loader.sh -O $REPO_TEMP/pve_repo_loader.sh
89 | chmod +x $REPO_TEMP/pve_repo_loader.sh
90 | source $REPO_TEMP/pve_repo_loader.sh
91 | fi
92 |
93 | #---- Body -------------------------------------------------------------------------
94 |
95 | #---- Run Installer
96 |
97 | # Run repo installer (repo product selector)
98 | source $REPO_PATH/$GIT_REPO/common/bash/src/pve_repo_toolbox_main.sh
99 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/shared/pve_nas_create_singledisk_build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_create_singledisk_build.sh
4 | # Description: Source script for building single ext4 disk storage
5 | # ----------------------------------------------------------------------------------
6 |
7 |
8 | #---- Source -----------------------------------------------------------------------
9 | #---- Dependencies -----------------------------------------------------------------
10 |
11 | # Requires arg 'usb' or 'onboard' to be set in source command
12 | # Sets the validation input type: input_lvm_vgname_val usb
13 | if [ -z "$1" ]
14 | then
15 | input_tran=""
16 | input_tran_arg=""
17 | elif [[ "$1" =~ 'usb' ]]
18 | then
19 | input_tran='(usb)'
20 | input_tran_arg='usb'
21 | elif [[ "$1" =~ 'onboard' ]]
22 | then
23 | input_tran='(sata|ata|scsi|nvme)'
24 | input_tran_arg='onboard'
25 | fi
26 |
27 | # Install parted (for partprobe)
28 | if [[ ! $(dpkg -s parted 2>/dev/null) ]]
29 | then
30 | apt-get install -y parted > /dev/null
31 | fi
32 |
33 |
34 | #---- Static Variables -------------------------------------------------------------
35 |
36 | # Disk Over-Provisioning (value is % of disk)
37 | disk_op_ssd='10'
38 | disk_op_rota='0'
39 |
40 | # Basic storage disk label
41 | basic_disklabel='(.*_hba(_[0-9])?|.*_usb(_[0-9])?|.*_onboard(_[0-9])?)$'
42 |
43 | #---- Other Variables --------------------------------------------------------------
44 |
45 | # USB Disk Storage minimum size (GB)
46 | stor_min='30'
47 |
48 | #---- Other Files ------------------------------------------------------------------
49 | #---- Functions --------------------------------------------------------------------
50 | #---- Body -------------------------------------------------------------------------
51 |
52 | #---- Prerequistes
53 |
54 | # Check default udev rule exists
55 | if [ -f "/etc/udev/rules.d/80-mount-usb-to-media-by-label.rules" ] && [ "$input_tran_arg" = usb ]
56 | then
57 | # Remove old udev rule version
58 | rm -f /etc/udev/rules.d/80-mount-usb-to-media-by-label.rules
59 | # Re-Activate udev rules
60 | udevadm control --reload-rules
61 | sleep 2
62 | fi
63 |
64 | # Disable USB autosuspend
65 |
66 | display_msg="#### PLEASE READ CAREFULLY - USB POWER MANAGEMENT ####
67 |
68 | Proxmox default power management suspends USB disks when they are idle. On restart you may find your NAS storage mount is broken with I/O errors. This is often caused by the USB disk assigning itself a different device id (i.e '/dev/sdd1' to '/dev/sde1') despite the NAS CT bind mount point staying the same. For now the only fix is to disable auto suspend features for USB disks.
69 |
70 | Our UDEV Rule performs the USB disk mount and disables power management on the device. This method does NOT always work (when it does its 100% reliable).
71 |
72 | If you have any issues or I/O errors read our GitHub guide for any known fixes."
73 |
74 | if [ "$input_tran_arg" = usb ]
75 | then
76 | section "USB Power management & Autosuspend"
77 |
78 | # Display msg
79 | msg_box "$display_msg"
80 |
81 | # USB menu option
82 | msg "Make your selection..."
83 | OPTIONS_VALUES_INPUT=( "TYPE01" "TYPE00")
84 | OPTIONS_LABELS_INPUT=( "I understand. Continue the install (Recommended)" "Exit this installer" )
85 |
86 | # Run menu selection
87 | makeselect_input2
88 | singleselect SELECTED "$OPTIONS_STRING"
89 |
90 | if [ "$RESULTS" = 'TYPE00' ]
91 | then
92 | msg "You have chosen not to proceed. Aborting. Bye..."
93 | echo
94 | exit 0
95 | fi
96 | fi
97 |
98 |
99 | #---- Select a Ext4 build option
100 | section "Select a build option"
101 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
102 |
103 | while true
104 | do
105 | # Create fs/disk lists for LVM, ZFS, Basic (onboard & usb)
106 | source $SHARED_DIR/pve_nas_fs_list.sh
107 |
108 | # Create menu labels
109 | OPTIONS_LABELS_INPUT=$(printf '%s\n' "${basic_option_labels}" \
110 | | cut -d: -f1,2,4 \
111 | | sed -e '$a\None. Exit this installer::::' \
112 | | column -t -s ":" -N "BASIC OPTIONS,DESCRIPTION,SIZE" -T DESCRIPTION -c 150 -d)
113 | # Create menu values
114 | OPTIONS_VALUES_INPUT=$(printf '%s\n' "${basic_option_values}" \
115 | | sed -e '$a\TYPE00:0')
116 |
117 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
118 | singleselect SELECTED "$OPTIONS_STRING"
119 |
120 | # Set Build
121 | BUILD_TYPE=$(echo "$RESULTS" | awk -F':' '{ print $1 }')
122 |
123 | # Create input disk list array
124 | inputdiskLIST=( "$(echo "$RESULTS" | cut -d: -f2-15)" )
125 |
126 |
127 | #---- Destroy Disk
128 | if [ "$BUILD_TYPE" = TYPE03 ]
129 | then
130 | # Create device list
131 | inputdevLIST=()
132 | while read pkname
133 | do
134 | if [[ "$pkname" =~ ^(sd[a-z]|nvme[0-9]n[0-9]) ]]
135 | then
136 | inputdevLIST+=( $(lsblk -n -o path /dev/${pkname}) )
137 | fi
138 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $3 }' ) # file listing of disks
139 |
140 | # Create print display
141 | print_DISPLAY=()
142 | while read dev
143 | do
144 | print_DISPLAY+=( "$(lsblk -l -o PATH,FSTYPE,SIZE,MOUNTPOINT $dev)" )
145 | print_DISPLAY+=( " " )
146 | done < <( printf '%s\n' "${inputdevLIST[@]}" | grep 'sd[a-z]$\|nvme[0-9]n[0-9]$' ) # dev device listing
147 |
148 | msg_box "#### PLEASE READ CAREFULLY - DESTROYING A DISK ####\n\nYou have chosen to destroy & wipe a disk. This action will result in permanent data loss of all data stored on the following devices:\n\n$(printf '%s\n' "${print_DISPLAY[@]}" | indent2)\n\nThe disks will be erased and made available for a Basic single disk NAS build."
149 | echo
150 |
151 | # User confirmation to destroy disk & partitions
152 | while true
153 | do
154 | read -p "Are you sure you want to destroy the disk : [y/n]?" -n 1 -r YN
155 | echo
156 | case $YN in
157 | [Yy]*)
158 | # Remove any existing mount points
159 | while read dev
160 | do
161 | # Disk uuid
162 | uuid=$(lsblk -d -n -o uuid $dev 2> /dev/null)
163 | # Get existing disk label name
164 | label=$(lsblk -d -n -o label $dev 2> /dev/null)
165 | # Remove UUID from /etc/fstab
166 | if [ -n "$uuid" ]
167 | then
168 | sed -i "/^UUID=$uuid/d" /etc/fstab
169 | fi
170 | # Remove label from /etc/fstab
171 | if [ -n "$label" ]
172 | then
173 | sed -i "/^LABEL=$label/d" /etc/fstab
174 | fi
175 | # Remove dev from /etc/fstab
176 | if [ -n "$dev" ]
177 | then
178 | sed -i "\|^${dev}.*|d" /etc/fstab
179 | fi
180 | # Check for existing mnt points
181 | if [[ $(findmnt -n -S $dev) ]]
182 | then
183 | # Get existing mount point
184 | existing_mnt_point=$(lsblk -no mountpoint $dev)
185 | # Umount dev
186 | umount -f -q $dev > /dev/null 2>&1 || /bin/true
187 | # Remove mnt point
188 | rm -R -f $existing_mnt_point 2> /dev/null
189 | fi
190 | done < <( printf '%s\n' "${inputdevLIST[@]}" ) # dev device listing
191 |
192 | # Erase / Wipe disks
193 | msg "Erasing disks..."
194 | while read dev
195 | do
196 | # Full device erase
197 | sgdisk --zap $dev >/dev/null 2>&1
198 | wipefs --all --force $dev >/dev/null 2>&1
199 | info "Erased device: $dev"
200 | done < <( printf '%s\n' "${inputdevLIST[@]}" | grep 'sd[a-z]$\|nvme[0-9]n[0-9]$' | uniq ) # file listing of disks to erase
201 |
202 | # Wait for pending udev events
203 | udevadm settle
204 | sleep 1
205 |
206 | # Re-read the partition table
207 | partprobe
208 |
209 | # Update storage list array (function)
210 | storage_list
211 |
212 | # Create a working list array (function)
213 | stor_LIST
214 | echo
215 | break
216 | ;;
217 | [Nn]*)
218 | echo
219 | msg "You have chosen not to proceed with destroying a disk.\nTry again..."
220 | sleep 2
221 | echo
222 | break
223 | ;;
224 | *)
225 | warn "Error! Entry must be 'y' or 'n'. Try again..."
226 | echo
227 | ;;
228 | esac
229 | done
230 | elif [ "$BUILD_TYPE" = TYPE00 ]
231 | then
232 | # Exit installer
233 | msg "You have chosen not to proceed. Aborting. Bye..."
234 | echo
235 | exit 0
236 | else
237 | # Proceed with build option
238 | break
239 | fi
240 | done
241 |
242 |
243 | #---- TYPE01: Basic Storage Build
244 | if [ "$BUILD_TYPE" = TYPE01 ]
245 | then
246 | # Set mnt base dir
247 | if [ "$input_tran_arg" = usb ]
248 | then
249 | # Set mnt dir (base dir)
250 | mnt_base_dir="media"
251 | else
252 | # Set mnt dir (base dir)
253 | mnt_base_dir="mnt"
254 | fi
255 |
256 | # Disk uuid
257 | disk_uuid=$(printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $11 }')
258 |
259 | # Get existing disk label name
260 | existing_disk_label=$(blkid -s LABEL -o value /dev/disk/by-uuid/$disk_uuid)
261 |
262 | # Set default mnt point name
263 | mnt_name="nas_basic_$input_tran_arg"
264 |
265 | # Remove any existing mount points
266 | while IFS=':' read dev uuid label
267 | do
268 | # Remove UUID from /etc/fstab
269 | if [ -n "$uuid" ]
270 | then
271 | sed -i "/^UUID=$uuid/d" /etc/fstab
272 | fi
273 | # Remove label from /etc/fstab
274 | if [ -n "$label" ]
275 | then
276 | sed -i "/^LABEL=$label/d" /etc/fstab
277 | fi
278 | # Remove dev from /etc/fstab
279 | if [ -n "$dev" ]
280 | then
281 | sed -i "\|^${dev}.*|d" /etc/fstab
282 | fi
283 | # Check for existing mnt points
284 | if [[ $(findmnt -n -S $dev) ]] && [ -n "$dev" ]
285 | then
286 | # Get existing mount point
287 | existing_mnt_point=$(lsblk -no mountpoint $dev)
288 | # Umount dev
289 | umount -f -q $dev > /dev/null 2>&1 || /bin/true
290 | # Remove mnt point
291 | rm -R -f $existing_mnt_point 2> /dev/null
292 | fi
293 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' -v OFS=':' '{ print $1, $11, $13 }' ) # dev device listing
294 |
295 | # Check for existing mnt points
296 | if [ -d "/$mnt_base_dir/$mnt_name" ] && [[ $(ls -A "/$mnt_base_dir/$mnt_name") ]]
297 | then
298 | i=1
299 | while [ -d "/$mnt_base_dir/${mnt_name}_$i" ] && [[ $(ls -A "/$mnt_base_dir/${mnt_name}_$i") ]]
300 | do
301 | # Suffix name by +1
302 | i=$(( $i + 1 ))
303 | done
304 | # Suffix the mnt name
305 | mnt_name="${mnt_name}_${i}"
306 | fi
307 |
308 | # New USB disk label
309 | disk_label="$mnt_name"
310 |
311 | # Erase / Wipe disks
312 | msg "Erasing disks..."
313 | while read dev
314 | do
315 | # Full device wipeout
316 | sgdisk --zap $dev >/dev/null 2>&1
317 | # dd if=/dev/zero of=$dev bs=100M status=progress
318 | wipefs -a -f $dev >/dev/null 2>&1
319 | # Wait for pending udev events
320 | udevadm settle
321 | info "Erased device: $dev"
322 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of disks to erase
323 |
324 | # Create primary partition
325 | msg "Partitioning, formatting & labelling new disk..."
326 | # Partition number start
327 | num=1
328 | # Create dev list
329 | inputdevLIST=()
330 | while read dev
331 | do
332 | # Create single partition
333 | echo 'type=83' | sfdisk $dev
334 | # Create new partition (part1)
335 | if [[ "$dev" =~ ^/dev/sd[a-z]$ ]]
336 | then
337 | # Format to default ext4
338 | mkfs.ext4 -F $dev$num
339 | # Wait for pending udev events
340 | udevadm settle
341 | # Create disk label
342 | e2label $dev$num $disk_label
343 | # Wait for pending udev events
344 | udevadm settle
345 | # Set new disk uuid var
346 | disk_uuid=$(blkid -s UUID -o value $dev$num 2> /dev/null)
347 | # Create device array
348 | inputdevLIST+=( "$(echo "$dev$num:$disk_uuid:$disk_label")" )
349 | info "Ext4 disk partition created: ${YELLOW}$dev$num${NC}"
350 | echo
351 | elif [[ $dev =~ ^/dev/nvme[0-9]n[0-9]$ ]]
352 | then
353 | # Format to default ext4
354 | mkfs.ext4 -F ${dev}p$num
355 | # Wait for pending udev events
356 | udevadm settle
357 | # Create disk label
358 | e2label ${dev}p$num $disk_label
359 | # Wait for pending udev events
360 | udevadm settle
361 | # Set new disk uuid var
362 | disk_uuid=$(blkid -s UUID -o value ${dev}p$num 2> /dev/null)
363 | # Create device array
364 | inputdevLIST+=( "$(echo "${dev}p$num:$disk_uuid:$disk_label")" )
365 | info "Ext4 disk partition created: ${YELLOW}${dev}p$num${NC}"
366 | echo
367 | fi
368 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of disks
369 |
370 | # Disk Over-Provisioning
371 | msg "Applying over-provisioning factor % to disk..."
372 | while read dev
373 | do
374 | if [ "$(hdparm -I $dev 2> /dev/null | awk -F':' '/Nominal Media Rotation Rate/ { print $2 }' | sed 's/ //g')" = 'SolidStateDevice' ]
375 | then
376 | # Set over-provisioning factor %
377 | tune2fs -m $disk_op_ssd $dev > /dev/null
378 | # Wait for pending udev events
379 | udevadm settle
380 | info "SSD disk reserved block percentage: ${YELLOW}${disk_op_ssd}%${NC}"
381 | echo
382 | else
383 | # Set over-provisioning factor %
384 | tune2fs -m $disk_op_rota $dev > /dev/null
385 | # Wait for pending udev events
386 | udevadm settle
387 | info "Rotational disk reserved block percentage: ${YELLOW}${disk_op_rota}%${NC}"
388 | echo
389 | fi
390 | done < <( printf '%s\n' "${inputdevLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of new devs
391 |
392 | # Set SRC mount point
393 | PVE_SRC_MNT="/$mnt_base_dir/$mnt_name"
394 | fi
395 |
396 | #---- TYPE02: Mount existing disk
397 | if [ "$BUILD_TYPE" = TYPE02 ]
398 | then
399 | # set mnt base dir
400 | if [ "$input_tran_arg" = usb ]
401 | then
402 | # Set mnt dir (base dir)
403 | mnt_base_dir="media"
404 | else
405 | # Set mnt dir (base dir)
406 | mnt_base_dir="mnt"
407 | fi
408 |
409 | # Disk uuid
410 | disk_uuid=$(printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $11 }')
411 |
412 | # Get existing disk label name
413 | existing_disk_label=$(blkid -s LABEL -o value /dev/disk/by-uuid/$disk_uuid)
414 |
415 | # Set default mnt point name
416 | mnt_name="nas_basic_$input_tran_arg"
417 |
418 | # Remove any existing mount points
419 | while IFS=':' read dev uuid label
420 | do
421 | # Remove UUID from /etc/fstab
422 | if [ -n "$uuid" ]
423 | then
424 | sed -i "/^UUID=$uuid/d" /etc/fstab
425 | fi
426 | # Remove label from /etc/fstab
427 | if [ -n "$label" ]
428 | then
429 | sed -i "/^LABEL=$label/d" /etc/fstab
430 | fi
431 | # Remove dev from /etc/fstab
432 | if [ -n "$dev" ]
433 | then
434 | sed -i "\|^${dev} |d" /etc/fstab
435 | fi
436 | # Check for existing mnt points
437 | if [[ $(findmnt -n -S $dev) ]] && [ -n "$dev" ]
438 | then
439 | # Get existing mount point
440 | existing_mnt_point=$(lsblk -no mountpoint $dev)
441 | # Umount dev
442 | umount -f -q $dev > /dev/null 2>&1 || /bin/true
443 | # Remove mnt point
444 | rm -R -f $existing_mnt_point 2> /dev/null
445 | fi
446 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' -v OFS=':' '{ print $1, $11, $13 }' ) # dev device listing
447 |
448 | # Check for existing mnt points
449 | if [ -d "/$mnt_base_dir/$mnt_name" ] && [[ $(ls -A "/$mnt_base_dir/$mnt_name") ]]
450 | then
451 | i=1
452 | while [ -d "/$mnt_base_dir/${mnt_name}_$i" ] && [[ $(ls -A "/$mnt_base_dir/${mnt_name}_$i") ]]
453 | do
454 | # Suffix name by +1
455 | i=$(( $i + 1 ))
456 | done
457 | # Suffix the mnt name
458 | mnt_name="${mnt_name}_${i}"
459 | fi
460 |
461 | # New USB disk label
462 | disk_label="$mnt_name"
463 |
464 | # Validate disk label name
465 | if [ ! "$existing_disk_label" = $disk_label ]
466 | then
467 | # Set disk label
468 | dev=$(blkid -o device -t UUID="$disk_uuid" | awk 'NR==1{print $1}')
469 | e2label $dev $disk_label
470 | # Wait for pending udev events
471 | udevadm settle
472 | fi
473 |
474 | # Disk Over-Provisioning
475 | msg "Applying over-provisioning factor % to disk..."
476 | while read dev
477 | do
478 | if [[ "$(hdparm -I $dev 2> /dev/null | awk -F':' '/Nominal Media Rotation Rate/ { print $2 }' | sed 's/ //g')" == 'SolidStateDevice' ]]
479 | then
480 | # Set over-provisioning factor %
481 | tune2fs -m $disk_op_ssd $dev > /dev/null
482 | info "SSD disk reserved block percentage: ${YELLOW}${disk_op_ssd}%${NC}"
483 | echo
484 | else
485 | # Set over-provisioning factor %
486 | tune2fs -m $disk_op_rota $dev > /dev/null
487 | info "Rotational disk reserved block percentage: ${YELLOW}${disk_op_rota}%${NC}"
488 | echo
489 | fi
490 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of new devs
491 |
492 | # Set SRC mount point
493 | PVE_SRC_MNT="/$mnt_base_dir/$mnt_name"
494 | fi
495 |
496 | #---- TYPE04: Destroy, wipe and use partition
497 | if [ "$BUILD_TYPE" = TYPE04 ]
498 | then
499 | # Set mnt base dir
500 | if [ "$input_tran_arg" = usb ]
501 | then
502 | # Set mnt dir (base dir)
503 | mnt_base_dir="media"
504 | else
505 | # Set mnt dir (base dir)
506 | mnt_base_dir="mnt"
507 | fi
508 |
509 | # Disk uuid
510 | disk_uuid=$(printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $11 }')
511 |
512 | # Get existing disk label name
513 | existing_disk_label=$(blkid -s LABEL -o value /dev/disk/by-uuid/$disk_uuid)
514 |
515 | # Set default mnt point name
516 | mnt_name="nas_basic_$input_tran_arg"
517 |
518 | # Remove any existing mount points
519 | while IFS=':' read dev uuid label
520 | do
521 | # Remove UUID from /etc/fstab
522 | if [ -n "$uuid" ]
523 | then
524 | sed -i "/^UUID=$uuid/d" /etc/fstab
525 | fi
526 | # Remove label from /etc/fstab
527 | if [ -n "$label" ]
528 | then
529 | sed -i "/^LABEL=$label/d" /etc/fstab
530 | fi
531 | # Remove dev from /etc/fstab
532 | if [ -n "$dev" ]
533 | then
534 | sed -i "\|^${dev} |d" /etc/fstab
535 | fi
536 | # Check for existing mnt points
537 | if [[ $(findmnt -n -S $dev) ]] && [ -n "$dev" ]
538 | then
539 | # Get existing mount point
540 | existing_mnt_point=$(lsblk -no mountpoint $dev)
541 | # Umount dev
542 | umount -f -q $dev > /dev/null 2>&1 || /bin/true
543 | # Remove mnt point
544 | rm -R -f $existing_mnt_point 2> /dev/null
545 | fi
546 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' -v OFS=':' '{ print $1, $11, $13 }' ) # dev device listing
547 |
548 | # Check for existing mnt points
549 | if [ -d "/$mnt_base_dir/$mnt_name" ] && [[ $(ls -A "/$mnt_base_dir/$mnt_name") ]]
550 | then
551 | i=1
552 | while [ -d "/$mnt_base_dir/${mnt_name}_$i" ] && [[ $(ls -A "/$mnt_base_dir/${mnt_name}_$i") ]]
553 | do
554 | # Suffix name by +1
555 | i=$(( $i + 1 ))
556 | done
557 | # Suffix the mnt name
558 | mnt_name="${mnt_name}_${i}"
559 | fi
560 |
561 | # New USB disk label
562 | disk_label="$mnt_name"
563 |
564 | # Erase / Wipe disks
565 | msg "Erasing disks..."
566 | while read dev
567 | do
568 | # Full device erase
569 | wipefs -a -f $dev >/dev/null 2>&1
570 | # Wait for pending udev events
571 | udevadm settle
572 | info "Erased device: $dev"
573 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of disks to erase
574 |
575 | # Format existing partition
576 | msg "Partitioning, formatting & labelling new disk..."
577 | inputdevLIST=()
578 | while read dev
579 | do
580 | if [[ "$dev" =~ ^/dev/sd[a-z][1-9]$ ]]
581 | then
582 | # Format to default ext4
583 | mkfs.ext4 -F $dev
584 | # Wait for pending udev events
585 | udevadm settle
586 | # Create disk label
587 | e2label $dev $disk_label
588 | # Wait for pending udev events
589 | udevadm settle
590 | # Set new disk uuid var
591 | disk_uuid=$(blkid -s UUID -o value $dev 2> /dev/null)
592 | # Create device array
593 | inputdevLIST+=( "$(echo "$dev:$disk_uuid:$disk_label")" )
594 | info "Ext4 disk partition created: ${YELLOW}$dev${NC}"
595 | echo
596 | elif [[ $dev =~ ^/dev/nvme[0-9]n[0-9]p[0-9]$ ]]
597 | then
598 | # Format to default ext4
599 | mkfs.ext4 -F $dev
600 | # Wait for pending udev events
601 | udevadm settle
602 | # Create disk label
603 | e2label $dev $disk_label
604 | # Wait for pending udev events
605 | udevadm settle
606 | # Set new disk uuid var
607 | disk_uuid=$(blkid -s UUID -o value $dev 2> /dev/null)
608 | # Create device array
609 | inputdevLIST+=( "$(echo "$dev:$disk_uuid:$disk_label")" )
610 | info "Ext4 disk partition created: ${YELLOW}$dev${NC}"
611 | echo
612 | fi
613 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of disks
614 |
615 | # Disk Over-Provisioning
616 | msg "Applying over-provisioning factor % to disk..."
617 | while read dev
618 | do
619 | if [ "$(hdparm -I $dev 2> /dev/null | awk -F':' '/Nominal Media Rotation Rate/ { print $2 }' | sed 's/ //g')" = 'SolidStateDevice' ]
620 | then
621 | # Set over-provisioning factor %
622 | tune2fs -m $disk_op_ssd $dev > /dev/null
623 | # Wait for pending udev events
624 | udevadm settle
625 | info "SSD disk reserved block percentage: ${YELLOW}${disk_op_ssd}%${NC}"
626 | echo
627 | else
628 | # Set over-provisioning factor %
629 | tune2fs -m $disk_op_rota $dev > /dev/null
630 | # Wait for pending udev events
631 | udevadm settle
632 | info "Rotational disk reserved block percentage: ${YELLOW}${disk_op_rota}%${NC}"
633 | echo
634 | fi
635 | done < <( printf '%s\n' "${inputdevLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of new devs
636 |
637 | # Set SRC mount point
638 | PVE_SRC_MNT="/$mnt_base_dir/$mnt_name"
639 | fi
640 |
641 | #---- PVE disk mount ---------------------------------------------------------------
642 |
643 | if [ "$BUILD_TYPE" = TYPE01 ] || [ "$BUILD_TYPE" = TYPE02 ] || [ "$BUILD_TYPE" = TYPE04 ]
644 | then
645 | if [ "$input_tran_arg" = usb ]
646 | then
647 | #---- USB service
648 | # USB reset
649 | usb_reset
650 | # Get the device path associated with the UUID
651 | device_path=$(blkid -l -o device -t UUID="$disk_uuid")
652 | # Get the mount point of the device
653 | mount_point=$(findmnt -n -o TARGET --first-only "$device_path")
654 | # Remove any old mount (umount)
655 | if [ -n "$mount_point" ]
656 | then
657 | umount -fq $mount_point
658 | fi
659 | # Copy latest udev rule version
660 | cp -f $COMMON_DIR/bash/src/80-mount-usb-to-media-by-label.rules /etc/udev/rules.d/
661 | # Activate udev rules
662 | udevadm control --reload-rules
663 | # Trigger add event for dev
664 | trig_dev=$(blkid -t UUID=$disk_uuid -o device | awk -F/ '{print $NF}')
665 | udevadm trigger --action=change --sysname-match=$trig_dev
666 | # Check udev event usb mnt dir exists
667 | cnt=10 # Sets attempt number
668 | for i in $(seq 1 $cnt)
669 | do
670 | if [ -d "$PVE_SRC_MNT" ] && [[ $(ls -A "$PVE_SRC_MNT") ]]
671 | then
672 | # Print display msg
673 | info "Disk mount created: ${YELLOW}$PVE_SRC_MNT${NC}\n (Disk Label: $disk_label)"
674 | echo
675 | # Break on success
676 | break
677 | else
678 | sleep 1
679 | if [ $i -eq $cnt ]
680 | then
681 | # Print display msg
682 | warn "Directory '$PVE_SRC_MNT' does not exist. You have a USB issue to resolve.\nThe system checked ${cnt}x times.\nThe mount action is performed automatically by our udev rule '80-mount-usb-to-media-by-label.rules'. Check you do not have a conflicting udev rule (located in: /etc/udev/rules.d/) or a fstab entry for the particular disk, partition or shared folder being mounted.\n\nAlso try a different USB port and make sure its USB3.0 or later and use a external USB power supply if available.\n\nExiting the installer..."
683 | echo
684 | # Exit on fail
685 | exit 0
686 | fi
687 | fi
688 | done
689 | else
690 | #---- Onboard mount
691 | # Create PVE local disk mount
692 | mkdir -p "$PVE_SRC_MNT"
693 | # Create PVE local disk mount /etc/fstab
694 | echo -e "UUID=$disk_uuid $PVE_SRC_MNT ext4 defaults,nofail,rw,user_xattr,acl 0 0" >> /etc/fstab
695 | # Run mount command
696 | mount "$PVE_SRC_MNT"
697 | # Print display msg
698 | info "Disk mount created: ${YELLOW}$PVE_SRC_MNT${NC}\n (Disk UUID: $disk_uuid)"
699 | echo
700 | fi
701 | fi
702 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/shared/pve_nas_create_users.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: nas_create_users.sh
4 | # Description: Create Ahuacate base Groups and Users (medialab, homelab, private etc)
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 | #---- Source -----------------------------------------------------------------------
9 | #---- Dependencies -----------------------------------------------------------------
10 | #---- Static Variables -------------------------------------------------------------
11 | #---- Other Variables --------------------------------------------------------------
12 | #---- Other Files ------------------------------------------------------------------
13 | #---- Body -------------------------------------------------------------------------
14 |
15 |
16 | #---- Create users groups
17 | msg "Creating default user groups..."
18 | # Group 'medialab'
19 | if [[ ! $(getent group medialab) ]]
20 | then
21 | groupadd -g 65605 medialab > /dev/null
22 | info "Default user group created: ${YELLOW}medialab${NC}"
23 | fi
24 | # Group 'homelab'
25 | if [[ ! $(getent group homelab) ]]
26 | then
27 | groupadd -g 65606 homelab > /dev/null
28 | info "Default user group created: ${YELLOW}homelab${NC}"
29 | fi
30 | # Group 'privatelab'
31 | if [[ ! $(getent group privatelab) ]]
32 | then
33 | groupadd -g 65607 privatelab > /dev/null
34 | info "Default user group created: ${YELLOW}privatelab${NC}"
35 | fi
36 | # Group 'chrootjail'
37 | if [[ ! $(getent group chrootjail) ]]
38 | then
39 | groupadd -g 65608 chrootjail > /dev/null
40 | info "Default user group created: ${YELLOW}chrootjail${NC}"
41 | fi
42 | echo
43 |
44 | #---- Create Base User Accounts
45 | msg "Creating default users..."
46 | mkdir -p "$DIR_SCHEMA/homes" >/dev/null
47 | chgrp -R root "$DIR_SCHEMA/homes" >/dev/null
48 | chmod -R 0755 "$DIR_SCHEMA/homes" >/dev/null
49 | # User 'media'
50 | if [[ ! $(id -u media 2> /dev/null) ]]
51 | then
52 | # Remove old dir
53 | if [ -d "$DIR_SCHEMA/homes/media" ]
54 | then
55 | rm -R -f "$DIR_SCHEMA/homes/media"
56 | fi
57 | # Add user
58 | useradd -m -d "$DIR_SCHEMA/homes/media" -u 1605 -g medialab -s /bin/bash media >/dev/null
59 | chmod 0700 "$DIR_SCHEMA/homes/media"
60 | info "Default user created: ${YELLOW}media${NC} of group medialab"
61 | fi
62 | # User 'home'
63 | if [[ ! $(id -u home 2> /dev/null) ]]
64 | then
65 | # Remove old dir
66 | if [ -d "$DIR_SCHEMA/homes/home" ]
67 | then
68 | rm -R -f "$DIR_SCHEMA/homes/home"
69 | fi
70 | # Add user
71 | useradd -m -d "$DIR_SCHEMA/homes/home" -u 1606 -g homelab -G medialab -s /bin/bash home >/dev/null
72 | chmod 0700 "$DIR_SCHEMA/homes/home"
73 | info "Default user created: ${YELLOW}home${NC} of groups medialab, homelab"
74 | fi
75 | # User 'private'
76 | if [[ ! $(id -u private 2> /dev/null) ]]
77 | then
78 | # Remove old dir
79 | if [ -d "$DIR_SCHEMA/homes/private" ]
80 | then
81 | rm -R -f "$DIR_SCHEMA/homes/private"
82 | fi
83 | # Add user
84 | useradd -m -d "$DIR_SCHEMA/homes/private" -u 1607 -g privatelab -G medialab,homelab -s /bin/bash private >/dev/null
85 | chmod 0700 "$DIR_SCHEMA/homes/private"
86 | info "Default user created: ${YELLOW}private${NC} of groups medialab, homelab and privatelab"
87 | fi
88 | echo
89 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/shared/pve_nas_create_zfs_build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_create_zfs_build.sh
4 | # Description: Source script for building zfs disk storage
5 | # Onboard disks only
6 | # ----------------------------------------------------------------------------------
7 |
8 | #---- Source -----------------------------------------------------------------------
9 | #---- Dependencies -----------------------------------------------------------------
10 |
11 | # Requires '/shared/pve_nas_bash_utility.sh'
12 | # Loaded from parent 'pve_nas_create_storagediskbuild.sh'
13 |
14 | # Requires arg 'usb' or 'onboard' to be set in source command
15 | # Sets the validation input type: input_lvm_vgname_val usb
16 | if [ -z "$1" ]
17 | then
18 | input_tran=""
19 | input_tran_arg=""
20 | elif [[ "$1" =~ 'usb' ]]
21 | then
22 | input_tran='(usb)'
23 | input_tran_arg='usb'
24 | elif [[ "$1" =~ 'onboard' ]]
25 | then
26 | input_tran='(sata|ata|scsi|nvme)'
27 | input_tran_arg='onboard'
28 | fi
29 |
30 | #---- Static Variables -------------------------------------------------------------
31 |
32 | # Basic storage disk label
33 | basic_disklabel='(.*_hba|.*_usb|.*_onboard)$'
34 |
35 | #---- Other Variables --------------------------------------------------------------
36 |
37 | # USB Disk Storage minimum size (GB)
38 | stor_min='30'
39 |
40 | #---- ZFS variables
41 |
42 | # ZFS ashift
43 | ashift_hd='12' # Generally for rotational disks of 4k sectors
44 | ashift_ssd='13' # More modern SSD with 8K sectors
45 |
46 | # ZFS compression
47 | zfs_compression='lz4'
48 |
49 | #---- Other Files ------------------------------------------------------------------
50 | #---- Functions --------------------------------------------------------------------
51 | #---- Body -------------------------------------------------------------------------
52 |
53 | #---- Select a ZFS build option
54 |
55 | section "Select a ZFS build option"
56 |
57 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
58 | while true
59 | do
60 | # Create fs/disk lists for LVM, ZFS, Basic (onboard & usb)
61 | source $SHARED_DIR/pve_nas_fs_list.sh
62 |
63 | # Create labels
64 | OPTIONS_LABELS_INPUT=$(printf '%s\n' "${zfs_option_labels}" \
65 | | sed -e '$a\None. Exit this installer:::TYPE00' \
66 | | column -t -s ":" -N "LVM OPTIONS,DESCRIPTION,SIZE,TYPE" -H TYPE -T DESCRIPTION -c 150 -d)
67 |
68 | # Create values
69 | OPTIONS_VALUES_INPUT=$(printf '%s\n' "${zfs_option_values}" \
70 | | sed -e '$a\TYPE00:0')
71 |
72 | # Create display
73 | msg_box "#### PLEASE READ CAREFULLY - USER OPTIONS FOR ZFS STORAGE ####\n
74 | The User must select from the available ZFS build options.
75 |
76 | $(printf '%s\n' "${zfs_display}" \
77 | | column -s : -t -N "BUILD OPTIONS,DESCRIPTION,STORAGE SIZE,ZFS POOL" | indent2)
78 |
79 | Option A - Use Existing ZPool
80 | Select an existing 'ZPool' to store a new ZFS File System without affecting existing 'ZPool' datasets.
81 |
82 | Option B - Destroy & Wipe ZPool
83 | Select and destroy a 'ZPool' and all data stored on all the 'ZPool' member disks. The User can then recreate a ZPool. This will result in 100% loss of all 'ZPool' dataset data.
84 |
85 | Option C - Create a new ZPool
86 | The installer has identified free disks available for creating a new ZFS Storage Pool. All data on the selected new member disks will be permanently destroyed."
87 | echo
88 |
89 | # Make menu selection
90 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
91 | singleselect SELECTED "$OPTIONS_STRING"
92 | # Set ZPOOL_BUILD
93 | ZPOOL_BUILD=$(echo "$RESULTS" | awk -F':' '{ print $1 }')
94 | ZPOOL_BUILD_VAR=$(echo "$RESULTS" | awk -F':' '{ print $2 }')
95 |
96 |
97 | #---- Destroy & Wipe ZPool
98 |
99 | if [ "$ZPOOL_BUILD" = TYPE02 ]
100 | then
101 | msg_box "#### PLEASE READ CAREFULLY - DESTROY & WIPE ZPOOL ####\n\nYou have chosen to destroy & wipe ZFS Storage Pool named '$ZPOOL_BUILD_VAR' on PVE $(echo $(hostname)). This action will result in permanent data loss of all data stored in ZPool '$ZPOOL_BUILD_VAR'.\n\n$(printf "\tZPool and Datasets selected for destruction")\n$(zfs list | grep "^${ZPOOL_BUILD_VAR}.*" | awk '{ print "\t-- "$1 }')\n\nThe wiped disks will then be available for the creation of a new ZPool."
102 | echo
103 | while true
104 | do
105 | read -p "Are you sure you want to destroy ZPool '$ZPOOL_BUILD_VAR' and its datasets: [y/n]?" -n 1 -r YN
106 | echo
107 | case $YN in
108 | [Yy]*)
109 | msg "Destroying ZPool '$ZPOOL_BUILD_VAR'..."
110 |
111 | # Existing ZPool disk member list (byid)
112 | # This matches the ZPool disk ID with /dev/disk/by-id/
113 | zpoolbyiddisk_LIST=()
114 | for line in $(zpool list -v -H $ZPOOL_BUILD_VAR | sed '1d' | awk -F'\t' '{ print $2 }' | sed '/^$/d' | sed 's/-part[0-9]$//' | uniq)
115 | do
116 | # Create ZPool disk member list
117 | zpoolbyiddisk_LIST+=( $(ls /dev/disk/by-id | egrep "$line$") )
118 | done
119 |
120 | # Existing ZPool part member list (by-id)
121 | # This matches the ZPool disk ID with /dev/disk/by-id/
122 | zpoolbyiddiskpart_LIST=()
123 | for line in $(zpool list -v -H $ZPOOL_BUILD_VAR | sed '1d' | awk -F'\t' '{ print $2 }' | sed '/^$/d' | egrep '\-part([0-9]+)?$' | uniq)
124 | do
125 | # Create ZPool disk part member list
126 | zpoolbyiddiskpart_LIST+=( $(ls /dev/disk/by-id/ | grep $line) )
127 | done
128 |
129 | # ZPool umount
130 | # Sorted by order in unmounting
131 | msg "Unmounting ZPool '$ZPOOL_BUILD_VAR'..."
132 | while read -r var
133 | do
134 | # Umount the zpool
135 | zfs unmount -f $var 2> /dev/null
136 | done < <( zfs list -r $ZPOOL_BUILD_VAR | awk '{ print $1 }' | sed '1d' | sort -r -n )
137 | udevadm settle
138 |
139 | # ZPool delete
140 | msg "Destroying ZPool '$ZPOOL_BUILD_VAR'..."
141 | zpool destroy -f $ZPOOL_BUILD_VAR &> /dev/null
142 | if [ ! $? = 0 ]
143 | then
144 | warn "ZFS Pool '$ZPOOL_BUILD_VAR' cannot be destroyed because it is busy. Try another option or exit this installer and manually fix the problem ('$ZPOOL_BUILD_VAR' may be in use by an existing VM or LXC)."
145 | echo
146 | break
147 | fi
148 | udevadm settle
149 |
150 | # ZPool label clear
151 | msg "ZPool member disk part label clear..."
152 | while read dev
153 | do
154 | zpool labelclear -f /dev/disk/by-id/$dev 2> /dev/null
155 | done < <( printf '%s\n' "${zpoolbyiddiskpart_LIST[@]}" ) # file listing of disks to erase
156 | info "ZPool '$ZPOOL_BUILD_VAR' status: ${YELLOW}destroyed${NC}"
157 | udevadm settle
158 |
159 | # Destroy and wipe disks
160 | msg "Erasing device..."
161 | while read dev
162 | do
163 | # Full device erase
164 | sgdisk --zap /dev/disk/by-id/$dev >/dev/null 2>&1
165 | #dd if=/dev/urandom of=/dev/disk/by-id/$dev bs=1M count=1 conv=notrunc 2>/dev/null
166 | wipefs --all --force /dev/disk/by-id/$dev >/dev/null 2>&1
167 | # Wait for pending udev events
168 | udevadm settle
169 | info "Erased device:\n /dev/disk/by-id/$dev"
170 | done < <( printf '%s\n' "${zpoolbyiddisk_LIST[@]}" ) # file listing of disks to erase
171 |
172 | # Wait for pending udev events
173 | udevadm settle
174 | sleep 1
175 |
176 | # Re-read the partition table
177 | partprobe
178 |
179 | echo
180 | break
181 | ;;
182 | [Nn]*)
183 | echo
184 | msg "You have chosen not to proceed with destroying ZFS Storage Pool '$ZPOOL_BUILD_VAR'.\nTry again..."
185 | sleep 2
186 | echo
187 | break
188 | ;;
189 | *)
190 | warn "Error! Entry must be 'y' or 'n'. Try again..."
191 | echo
192 | ;;
193 | esac
194 | done
195 | elif [ "$ZPOOL_BUILD" = TYPE00 ]
196 | then
197 | # Exit installer
198 | msg "You have chosen not to proceed. Aborting. Bye..."
199 | echo
200 | exit 0
201 | else
202 | # Proceed with build option
203 | break
204 | fi
205 | done
206 |
207 |
208 | #---- Create new ZPOOL (SSD and HDD)
209 |
210 | if [ "$ZPOOL_BUILD" = TYPE03 ]
211 | then
212 | section "Create new ZPool"
213 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
214 |
215 | # Set Pool name
216 | input_zfs_name_val POOL
217 |
218 | # Disk count by type
219 | disk_CNT=$(printf '%s\n' "${storLIST[@]}" | awk -F':' -v stor_min="$stor_min" -v var="$ZPOOL_BUILD_VAR" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" \
220 | 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
221 | {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == var && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { print $0 }}' | wc -l)
222 |
223 | # Select member disks
224 | # Onboard build
225 | msg_box "#### PLEASE READ CAREFULLY - SELECT ZFS POOL DISKS ####\n\nThe User has ${disk_CNT}x disk(s) available for a new ZPool. When selecting your disks remember ZFS RaidZ will format all disks to the size of the smallest member disk. So its best to select disks of near identical storage sizes. $(if [ ${ZPOOL_BUILD_VAR} = 0 ]; then echo "\nDo NOT select any SSD disks which you intend to use for ZFS Zil or L2ARC cache."; fi)"
226 | echo
227 |
228 | #---- Make member disk selection
229 | msg "The User must now select member disks to create ZFS pool '$POOL'."
230 | OPTIONS_VALUES_INPUT=$(printf '%s\n' "${storLIST[@]}" | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v var="$ZPOOL_BUILD_VAR" -v basic_disklabel="$basic_disklabel" \
231 | 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
232 | {if ($5 ~ input_tran && $3 == 0 && $4 != "zfs_member" && $9 == "disk" && size >= stor_min && $10 == var && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { print $0 } }')
233 | OPTIONS_LABELS_INPUT=$(printf '%s\n' "${storLIST[@]}" | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v var="$ZPOOL_BUILD_VAR" -v basic_disklabel="$basic_disklabel" \
234 | 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
235 | {if ($5 ~ input_tran && $3 == 0 && $4 != "zfs_member" && $9 == "disk" && size >= stor_min && $10 == var && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { sub(/1/,"HDD",$10);sub(/0/,"SSD",$10); print $1, $6, $8, $10 } }' \
236 | | column -t -s :)
237 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
238 | multiselect_confirm SELECTED "$OPTIONS_STRING"
239 |
240 | # Create input disk list array
241 | inputdiskLIST=()
242 | for i in "${RESULTS[@]}"
243 | do
244 | inputdiskLIST+=( $(echo $i) )
245 | done
246 |
247 |
248 | #---- Select ZFS Raid level
249 |
250 | # Set Raid level
251 | section "Select a ZFS Raid level for ZPool '$POOL'"
252 |
253 | raidoptionLIST=()
254 | raidoptionLIST+=( "raid0:1:Also called 'striping'. Fast but no redundancy." \
255 | "raid1:2:Also called 'mirroring'. The resulting capacity is that of a single disk." \
256 | "raid10:4:A combination of RAID0 and RAID1. Minimum 4 disks (even unit number only)." \
257 | "raidZ1:3:A variation on RAID-5, single parity. Minimum 3 disks." \
258 | "raidZ2:4:A variation on RAID-5, double parity. Minimum 4 disks." \
259 | "raidZ3:5:A variation on RAID-5, triple parity. Minimum 5 disks." )
260 |
261 | # Select RaidZ level
262 | msg "The User must now select a ZFS RaidZ level based on your ${#inputdiskLIST[@]}x disk selection..."
263 | OPTIONS_VALUES_INPUT=$(printf '%s\n' "${raidoptionLIST[@]}" | awk -F':' -v INPUT_CNT=${#inputdiskLIST[@]} 'BEGIN{OFS=FS} \
264 | {if ($2 <= INPUT_CNT) { print $1} }')
265 | OPTIONS_LABELS_INPUT=$(printf '%s\n' "${raidoptionLIST[@]}" | awk -F':' -v INPUT_CNT=${#inputdiskLIST[@]} 'BEGIN{OFS=FS} \
266 | {if ($2 <= INPUT_CNT) { print toupper($1) " | " $3} }')
267 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
268 | singleselect_confirm SELECTED "$OPTIONS_STRING"
269 | # Selected RaidZ level
270 | inputRAIDLEVEL="$RESULTS"
271 |
272 |
273 | #---- Create new ZPool
274 |
275 | section "Create new ZFS Pool '${POOL^}'"
276 |
277 | # Erase / Wipe ZFS pool disks
278 | msg "Erasing ZFS pool disks..."
279 | while read dev
280 | do
281 | # Full device erase
282 | sgdisk --zap $dev >/dev/null 2>&1
283 | #dd if=/dev/urandom of=$dev count=1 bs=1M conv=notrunc 2>/dev/null
284 | wipefs --all --force $dev >/dev/null 2>&1
285 | info "Erased device: $dev"
286 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $1 }' ) # file listing of disks to erase
287 | echo
288 |
289 | # Wait for pending udev events
290 | udevadm settle
291 | sleep 1
292 |
293 | # Re-read the partition table
294 | partprobe
295 |
296 | # Set ZFS ashift
297 | if [ "$ZPOOL_BUILD_VAR" = 0 ]
298 | then
299 | ashift="$ashift_ssd"
300 | elif [ "$ZPOOL_BUILD_VAR" = 1 ]
301 | then
302 | ashift="$ashift_hd"
303 | fi
304 |
305 | # Determine disk cnt parity ( prune smallest disk if odd cnt for Raid10 )
306 | if [ ! $((number%${#inputdiskLIST[@]})) = 0 ] && [ ${inputRAIDLEVEL} = 'raid10' ]
307 | then
308 | # Set smallest member disk for removal for Raid10 build
309 | inputdiskLISTPARITY=1
310 | deleteDISK=( $(printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' 'NR == 1 {line = $0; min = $8} \
311 | NR > 1 && $3 < min {line = $0; min = $8} \
312 | END{print line}') )
313 | for target in "${deleteDISK[@]}"
314 | do
315 | for i in "${!inputdiskLIST[@]}"
316 | do
317 | if [[ ${inputdiskLIST[i]} = $target ]]
318 | then
319 | unset 'inputdiskLIST[i]'
320 | fi
321 | done
322 | done
323 | else
324 | inputdiskLISTPARITY=0
325 | fi
326 |
327 | # Create disk list by-id
328 | byiddisk_LIST=()
329 | while read serial
330 | do
331 | # Create array
332 | byiddisk_LIST+=( $(ls /dev/disk/by-id | egrep "$serial$") )
333 | done < <( printf '%s\n' "${inputdiskLIST[@]}" | awk -F':' '{ print $7 }' )
334 |
335 | # Create ZFS Pool Tank
336 | msg "Creating Zpool '$POOL'..."
337 | if [ "$inputRAIDLEVEL" = 'raid0' ]
338 | then
339 | # Raid 0
340 | zfs_ARG=$(printf '%s\n' "${byiddisk_LIST[@]}" | awk '{ print "/dev/disk/by-id/"$1 }' | xargs)
341 | zfs_DISPLAY="ZPool '$POOL' status: ${YELLOW}${inputRAIDLEVEL^^}${NC} - ${#inputdiskLIST[@]}x member disks"
342 | elif [ "$inputRAIDLEVEL" = 'raid1' ]
343 | then
344 | # Raid 1
345 | zfs_ARG=$(printf '%s\n' "${byiddisk_LIST[@]}" | awk '{ print "/dev/disk/by-id/"$1 }' | xargs | sed 's/^/mirror /')
346 | zfs_DISPLAY="ZPool '$POOL' status: ${YELLOW}${inputRAIDLEVEL^^}${NC} - ${#inputdiskLIST[@]}x member disks"
347 | elif [ "$inputRAIDLEVEL" = 'raid10' ]
348 | then
349 | # Raid 10
350 | zfs_ARG=$(printf '%s\n' "${byiddisk_LIST[@]}" | awk '{ print "/dev/disk/by-id/"$1 }' | xargs | sed '-es/ / mirror /'{1000..1..2} | sed 's/^/mirror /')
351 | zfs_DISPLAY="ZPool '$POOL' status: ${YELLOW}${inputRAIDLEVEL^^}${NC} - ${#inputdiskLIST[@]}x member disks\n$(if [ ${inputdiskLISTPARITY} = 1 ]; then msg "Disk '$(printf '%s\n' "${deleteDISK[@]}" | awk -F':' '{ print $5 "-" $6 "_" $7 }')' was NOT INCLUDED in ZPool '${POOL}'. Raid 10 requires a even number of member disks so it was removed. You can manually configure this disk as a hot spare."; fi)"
352 | elif [ "$inputRAIDLEVEL" = 'raidz1' ]
353 | then
354 | # RaidZ1
355 | zfs_ARG="raidz1 $(printf '%s\n' "${byiddisk_LIST[@]}" | awk '{ print "/dev/disk/by-id/"$1 }' | xargs)"
356 | zfs_DISPLAY="Creating ZPool '$POOL': ${YELLOW}${inputRAIDLEVEL^^}${NC} - ${#inputdiskLIST[@]}x member disks"
357 | elif [ "$inputRAIDLEVEL" = 'raidz2' ]
358 | then
359 | # RaidZ2
360 | zfs_ARG="raidz2 $(printf '%s\n' "${byiddisk_LIST[@]}" | awk '{ print "/dev/disk/by-id/"$1 }' | xargs)"
361 | zfs_DISPLAY="Creating ZPool '$POOL': ${YELLOW}${inputRAIDLEVEL^^}${NC} - ${#inputdiskLIST[@]}x member disks"
362 | elif [ "$inputRAIDLEVEL" = 'raidz3' ]
363 | then
364 | # Raid Z3
365 | zfs_ARG="raidz3 $(printf '%s\n' "${byiddisk_LIST[@]}" | awk '{ print "/dev/disk/by-id/"$1 }' | xargs)"
366 | zfs_DISPLAY="Creating ZPool '$POOL': ${YELLOW}${inputRAIDLEVEL^^}${NC} - ${#inputdiskLIST[@]}x member disks"
367 | fi
368 |
369 | # Create ZFS Pool
370 | zpool create -f -o ashift=$ashift $POOL $zfs_ARG
371 | info "$zfs_DISPLAY"
372 | info "ZFS Storage Pool status: ${YELLOW}$(zpool status -x $POOL)${NC}"
373 | echo
374 | fi # End of Create new ZPOOL ( TYPE02 action )
375 |
376 |
377 | #---- Reconnect to ZPool
378 | if [ "$ZPOOL_BUILD" = TYPE01 ]
379 | then
380 | section "Reconnect to existing ZPool"
381 |
382 | # Set ZPOOL if TYPE01
383 | if [ "$ZPOOL_BUILD" = TYPE01 ]
384 | then
385 | POOL="$ZPOOL_BUILD_VAR"
386 | fi
387 |
388 | # Reconnect to ZPool
389 | msg "Reconnecting to existing ZFS '$POOL'..."
390 | zpool export $POOL
391 | zpool import -d /dev/disk/by-id $POOL
392 | info "ZFS Storage Pool status: ${YELLOW}$(zpool status -x $POOL)${NC}"
393 | echo
394 | fi
395 |
396 |
397 | #---- Create PVE ZFS File System
398 | if [ "$ZPOOL_BUILD" = TYPE01 ] || [ "$ZPOOL_BUILD" = TYPE03 ]
399 | then
400 | section "Create ZFS file system"
401 |
402 | # Set ZPOOL if TYPE01
403 | if [ "$ZPOOL_BUILD" = TYPE01 ]
404 | then
405 | POOL="$ZPOOL_BUILD_VAR"
406 | fi
407 |
408 | # Check if ZFS file system name is set
409 | if [ -z ${HOSTNAME+x} ]
410 | then
411 | input_zfs_name_val ZFS_NAME
412 | else
413 | ZFS_NAME=${HOSTNAME,,}
414 | fi
415 |
416 | # Create PVE ZFS
417 | if [ ! -d "/$POOL/$ZFS_NAME" ]
418 | then
419 | msg "Creating ZFS file system $POOL/$ZFS_NAME..."
420 | zfs create -o compression=$zfs_compression $POOL/$ZFS_NAME >/dev/null
421 | zfs set acltype=posixacl aclinherit=passthrough xattr=sa $POOL/$ZFS_NAME >/dev/null
422 | zfs set xattr=sa dnodesize=auto $POOL >/dev/null
423 | info "ZFS file system settings:\n -- Compresssion: ${YELLOW}$zfs_compression${NC}\n -- Posix ACL type: ${YELLOW}posixacl${NC}\n -- ACL inheritance: ${YELLOW}passthrough${NC}\n -- LXC with ACL on ZFS: ${YELLOW}auto${NC}"
424 | echo
425 | elif [ -d "/$POOL/$ZFS_NAME" ]
426 | then
427 | msg "Modifying existing ZFS file system settings /$POOL/$ZFS_NAME..."
428 | zfs set compression=$zfs_compression $POOL/$ZFS_NAME
429 | zfs set acltype=posixacl aclinherit=passthrough xattr=sa $POOL/$ZFS_NAME >/dev/null
430 | zfs set xattr=sa dnodesize=auto $POOL >/dev/null
431 | info "Changes to existing ZFS file system settings ( $POOL/$ZFS_NAME ):\n -- Compresssion: ${YELLOW}$zfs_compression${NC}\n -- Posix ACL type: ${YELLOW}posixacl${NC}\n -- ACL inheritance: ${YELLOW}passthrough${NC}\n -- LXC with ACL on ZFS: ${YELLOW}auto${NC}\nCompression will only be performed on new stored data."
432 | echo
433 | fi
434 | fi
435 |
436 | # Wait for pending udev events
437 | udevadm settle
438 | sleep 1
439 |
440 | # Re-read the partition table
441 | partprobe
442 |
443 | # Update storage list array (function)
444 | storage_list
445 |
446 | # Create a working list array (function)
447 | stor_LIST
448 |
449 | # Set SRC mount point
450 | PVE_SRC_MNT="/$POOL/$ZFS_NAME"
451 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/shared/pve_nas_create_zfs_cacheaddon.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_create_zfs_cacheaddon.sh
4 | # Description: Source script for adding ZFS Cache to a existing ZFS raid storage
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Source -----------------------------------------------------------------------
8 |
9 | # NAS bash utility
10 | source $COMMON_DIR/nas/src/nas_bash_utility.sh
11 |
12 | #---- Dependencies -----------------------------------------------------------------
13 | #---- Static Variables -------------------------------------------------------------
14 |
15 | # Sets the validation input type
16 | input_tran='(sata|ata|scsi|nvme)'
17 | input_tran_arg='onboard'
18 |
19 | # Basic storage disk label
20 | basic_disklabel='(.*_hba|.*_usb|.*_onboard)$'
21 |
22 | # Disk Over-Provisioning (value is % of disk)
23 | disk_op_ssd=10
24 |
25 | # Disk device regex
26 | type_ssd='(^/dev/sd[a-z])'
27 | type_nvme='(^/dev/nvme[0-9]n[0-9])'
28 |
29 | #---- Other Variables --------------------------------------------------------------
30 | #---- Other Files ------------------------------------------------------------------
31 |
32 | # USB Disk Storage minimum size (GB)
33 | stor_min=5
34 |
35 | #---- Functions --------------------------------------------------------------------
36 | #---- Body -------------------------------------------------------------------------
37 |
38 | #---- Prerequisites
39 |
40 | # Create storage list array
41 | storage_list
42 |
43 | # Create a working list array
44 | stor_LIST
45 |
46 | # Create ZPool list
47 | zpool_LIST=()
48 | while read zpool
49 | do
50 | # Check if ZPool is already configured for ZFS cache
51 | if [[ ! $(zpool status $zpool | grep -w 'logs\|cache') ]]
52 | then
53 | zpool_LIST+=( "$zpool" )
54 | fi
55 | done < <( zpool list -H -o name | sed '/^rpool/d' ) # file listing of zpools
56 |
57 | # Check for existing ZPools
58 | if [ ${#zpool_LIST[@]} = 0 ]
59 | then
60 | msg "We could NOT detect any existing ZPools to add ZFS Cache. First create a ZPool and try again. Bye..."
61 | echo
62 | return
63 | fi
64 |
65 |
66 | #---- ZFS cache disk list
67 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
68 | # build:description:tran:size|action:all
69 |
70 | zfs_cache_option_input=$(printf '%s\n' "${storLIST[@]}" | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" -v type_ssd="$type_ssd" -v type_nvme="$type_nvme" \
71 | 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
72 | # TYPE01: Select SSD
73 | {if ($1 ~ type_ssd && $5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 0 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) print "TYPE01", "SSD", $0 } \
74 | # TYPE02: Select NVMe
75 | {if ($1 ~ type_nvme && $5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 0 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) print "TYPE02", "NVMe", $0 }')
76 |
77 | # Create selection labels & values
78 | zfs_cache_option_labels=$(printf '%s\n' "$zfs_cache_option_input" | sed '/^$/d' | awk 'BEGIN{FS=OFS=":"} { print $3, $8, $2, $10 }')
79 | zfs_cache_option_values=$(printf '%s\n' "$zfs_cache_option_input" | sed '/^$/d' | cut -d: -f1,3-)
80 |
81 | # ZFS option cnt
82 | zfs_cache_option_cnt=$(echo "$zfs_cache_option_values" | sed '/^$/d' | wc -l)
83 |
84 | # Create display
85 | zfs_cache_option_display=$(printf '%s\n' "$zfs_cache_option_input" | sed '/^$/d' | awk 'BEGIN{FS=OFS=":"} { print $3, $8, $2, $10 }' | column -s : -t -N "DEVICE PATH,DESCRIPTION,TYPE,SIZE" | indent2)
86 |
87 | # Check SSD/NVMe storage is available
88 | if [ "$zfs_cache_option_cnt" = 0 ]
89 | then
90 | msg "We could NOT detect any unused available SSD or NVMe storage devices. Unused disk(s) might have been wrongly identified as 'system drives' if they contain Linux system or OS partitions. To fix this issue, manually format the disk erasing all data before running this installation again. USB disks cannot be used for ZFS cache. Bye..."
91 | echo
92 | return
93 | fi
94 |
95 |
96 | #---- Select ZFS Cache devices
97 |
98 | section "Select ZFS Cache devices"
99 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
100 |
101 | # Select cache member disks
102 | while true
103 | do
104 | # Create labels
105 | OPTIONS_LABELS_INPUT=$(printf '%s\n' "$zfs_cache_option_labels" | column -t -s :)
106 |
107 | # Create values
108 | OPTIONS_VALUES_INPUT=$(printf '%s\n' "$zfs_cache_option_values")
109 |
110 | # Display msg
111 | msg_box "#### PLEASE READ CAREFULLY - ZFS CACHE SETUP ####\n\nThere are ${zfs_cache_option_cnt}x available device(s) for ZFS Cache. Do not co-mingle SSD and NVMe cache devices together.\n\n$(printf '%s\n' "$zfs_cache_option_display")\n\nIn the next steps the User must select their ZFS cache devices (recommend a maximum of 2x devices). The devices will be erased and wiped of all data and partitioned ready for ZIL and ARC or L2ARC cache.\n\nThe ARC or L2ARC and ZIL cache build options are:\n\n1. Standard Cache: Select 1x device only. No ARC,L2ARC or ZIL disk redundancy.\n2. Accelerated Cache: Select 2x devices. ARC or L2ARC cache set to Raid0 (stripe) and ZIL set to Raid1 (mirror)."
112 |
113 | # Make selection
114 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
115 | multiselect_confirm SELECTED "$OPTIONS_STRING"
116 |
117 | # Create input disk list array
118 | inputcachedisk_LIST=()
119 | for i in "${RESULTS[@]}"
120 | do
121 | inputcachedisk_LIST+=( $(echo $i) )
122 | done
123 |
124 | # Check device number and co-mingling status of selected devices
125 | if [ "${#inputcachedisk_LIST[@]}" = 0 ] || [[ "${inputcachedisk_LIST[*]}" =~ ^TYPE01 ]] && [[ "${inputcachedisk_LIST[*]}" =~ ^TYPE02 ]]
126 | then
127 | msg "The User selected ${#inputcachedisk_LIST[@]}x devices. The requirement is:\n -- Minimum of '1x' device\n -- A recommended maximum of '2x' devices\n -- Cannot co-mingled SSD and NVMe devices together\nTry again..."
128 | elif [ "${#inputcachedisk_LIST[@]}" -ge 1 ]
129 | then
130 | break
131 | fi
132 | done
133 |
134 |
135 | #---- Set ZFS cache partition sizes
136 |
137 | section "Set ZFS cache partition sizes"
138 |
139 | msg_box "#### Set ARC or L2ARC cache and ZIL disk partition sizes ####
140 |
141 | You have allocated ${#inputcachedisk_LIST[@]}x device(s) for ZFS cache partitioning.
142 |
143 | The maximum size of a ZIL log should be about half the size of your hosts $(grep MemTotal /proc/meminfo | awk '{printf "%.0fGB\n", $2/1024/1024}') installed physical RAM memory BUT not less than 8GB.
144 |
145 | The ARC or L2ARC cache size should not be less than 64GB but will be sized to use the whole ZFS cache device.
146 |
147 | The system will automatically calculate the best partition sizes for you. A device over-provisioning factor of ${disk_op_ssd}% will be applied."
148 | echo
149 |
150 | # Set ZIL partition size
151 | if [ $(free -g | awk '/^Mem:/ {print $2}') -le 16 ]
152 | then
153 | # Set ZIL partition size to default minimum
154 | zil_size_var=8
155 | msg "PVE host $(grep MemTotal /proc/meminfo | awk '{printf "%.0fGB\n", $2/1024/1024}') of RAM is below the minimum threshold. Setting ZIL size to the default minimum..."
156 | info "ZIL size: ${YELLOW}${zil_size_var}GB${NC} (default minimum)"
157 | echo
158 | elif [ $(free -g | awk '/^Mem:/ {print $2}') -gt 16 ]
159 | then
160 | # Set ZIL partition size
161 | if [ $(free -g | awk '/^Mem:/ {print $2}') -lt 24 ]
162 | then
163 | zil_seq_size=1
164 | else
165 | zil_seq_size=4
166 | fi
167 | msg "The User must select a ZIL size. The available options are based on your PVE hosts installed $(grep MemTotal /proc/meminfo | awk '{printf "%.0fGB\n", $2/1024/1024}') RAM in ${zil_seq_size}GB increments. Now select your ZIL size..."
168 |
169 | OPTIONS_VALUES_INPUT=$(seq $(grep MemTotal /proc/meminfo | awk '{printf "%.0f\n", $2/1024/1024/2}') -${zil_seq_size} 8)
170 | OPTIONS_LABELS_INPUT=$(seq $(grep MemTotal /proc/meminfo | awk '{printf "%.0f\n", $2/1024/1024/2}') -${zil_seq_size} 8 | sed 's/$/GB/' | sed '1 s/$/ (Recommended)/')
171 | # Make selection
172 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
173 | singleselect SELECTED "$OPTIONS_STRING"
174 | # Set ZIL size
175 | zil_size_var="$RESULTS"
176 | fi
177 |
178 | # Set ARC partition size (based on smallest device)
179 | arc_size_var=$(( $(printf '%s\n' "${inputcachedisk_LIST[@]}" | sort -t ':' -k 9 | awk -F':' 'NR==1{print $9}' | sed 's/[[:alpha:]]//' | awk '{print ($0-int($0)<0.499)?int($0):int($0)+1}') * ( 100 - $disk_op_ssd ) / 100 - $zil_size_var ))
180 |
181 | # Set final ZIL and ARC variables into bytes
182 | zil_size="$(( ($zil_size_var * 1073741824)/512 ))"
183 | arc_size="$(( ($arc_size_var * 1073741824)/512 ))"
184 |
185 | # GPT label & wipe devices
186 | msg "GPT label & wipe ZFS cache devices..."
187 | while read dev
188 | do
189 | # Full device wipeout
190 | dd if=/dev/urandom of=$dev count=1 bs=1M conv=notrunc 2>/dev/null
191 | # Label device
192 | echo 'label: gpt' | sfdisk --quiet --wipe=always --force $dev
193 | info "GPT labelled: $dev"
194 | done < <( printf '%s\n' "${inputcachedisk_LIST[@]}" | awk -F':' '{ print $2 }' ) # file listing of disks
195 | echo
196 |
197 | # Partition ZFS cache device(s)
198 | msg "Partition ZFS cache device(s)..."
199 | part_LIST=()
200 | part_LIST+=( ",$zil_size,L" )
201 | part_LIST+=( ",$arc_size,L" )
202 | inputcachedevLIST=()
203 | while read dev
204 | do
205 | #---- Create disk partitions
206 | sfdisk --quiet --force $dev <<<$(printf '%s\n' "${part_LIST[@]}")
207 | udevadm settle
208 |
209 | #---- Zil cache
210 | i=1
211 | # Remove the "/dev/" prefix from the device name
212 | dev_name=$(echo "$dev$i" | sed 's/\/dev\///g')
213 | # Get the by-id name for the specified device
214 | by_id_name="$(ls -l /dev/disk/by-id | grep -v "wwn-" | grep "$dev_name" | awk '{print $9}')"
215 | # Create cache disk input list
216 | inputcachedevLIST+=( "$dev${i}:$by_id_name:zil" )
217 | info "ZIL cache partition created: $dev${i}"
218 |
219 | #---- Arc cache
220 | i=$(( $i + 1 ))
221 | # Remove the "/dev/" prefix from the device name
222 | dev_name=$(echo "$dev$i" | sed 's/\/dev\///g')
223 | # Get the by-id name for the specified device
224 | by_id_name=$(ls -l /dev/disk/by-id | grep -v "wwn-" | grep "$dev_name" | awk '{print $9}')
225 | # Create cache disk input list
226 | inputcachedevLIST+=( "$dev${i}:$by_id_name:arc" )
227 | info "ARC cache partition created: $dev${i}"
228 | done < <( printf '%s\n' "${inputcachedisk_LIST[@]}" | awk -F':' '{ print $2 }' ) # file listing of disks
229 |
230 |
231 | # Create ZFS ZIL arg
232 | if [ "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" = 1 ]
233 | then
234 | zil_ARG=$(printf '%s\n' "${inputcachedevLIST[@]}" | awk -F':' 'BEGIN{OFS=FS} { if ($3 == "zil") print "/dev/disk/by-id/"$2 }')
235 | zil_DISPLAY="ZIL cache set:\n 1. "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)"x disk Raid0 (single only)"
236 | elif [ "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" -gt 1 ] && [ "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" -le 3 ]
237 | then
238 | zil_ARG=$(printf '%s\n' "${inputcachedevLIST[@]}" | awk -F':' 'BEGIN{OFS=FS} { if ($3 == "zil") print "/dev/disk/by-id/"$2 }' | xargs | sed 's/^/mirror /')
239 | zil_DISPLAY="ZIL cache set:\n 1. "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)"x disk Raid1 (mirror only)"
240 | elif [ "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" -ge 4 ]
241 | then
242 | count="$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)"
243 | if [ "$((count% 2))" -eq 0 ]
244 | then
245 | # Even cnt
246 | zil_ARG=$(printf '%s\n' "${inputcachedevLIST[@]}" | awk -F':' 'BEGIN{OFS=FS} { if ($3 == "zil") print "/dev/disk/by-id/"$2 }' | xargs | sed '-es/ / mirror /'{1000..1..2} | sed 's/^/mirror /')
247 | zil_DISPLAY="ZIL cache set:\n 1. $(( "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" / 2 ))x disk Raid0 (stripe).\n 2. $(( "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" / 2 ))x disk Raid1 (mirror)"
248 | else
249 | # Odd cnt (fix)
250 | zil_ARG=$(printf '%s\n' "${inputcachedevLIST[@]}" | awk -F':' 'BEGIN{OFS=FS} { if ($3 == "zil") print "/dev/disk/by-id/"$2 }' | sed '$ d' | xargs | sed '-es/ / mirror /'{1000..1..2} | sed 's/^/mirror /')
251 | zil_DISPLAY="ZIL cache set:\n 1. $(( "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" / 2 ))x disk Raid0 (stripe).\n 2. $(( "$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "zil" | wc -l)" / 2 ))x disk Raid1 (mirror)"
252 | fi
253 | fi
254 |
255 | # Create ZFS ARC arg
256 | if [ $(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l) -le 3 ]
257 | then
258 | arc_ARG=$(printf '%s\n' "${inputcachedevLIST[@]}" | awk -F':' 'BEGIN{OFS=FS} { if ($3 == "arc") print "/dev/disk/by-id/"$2 }' | xargs)
259 | arc_DISPLAY="ARC cache set:\n 1. $(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l)x disk Raid0 (stripe only)"
260 | elif [ $(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l) -ge 4 ]
261 | then
262 | count=$(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l)
263 | if [ "$((count% 2))" -eq 0 ]
264 | then
265 | # Even cnt
266 | arc_ARG=$(printf '%s\n' "${inputcachedevLIST[@]}" | awk -F':' 'BEGIN{OFS=FS} { if ($3 == "arc") print "/dev/disk/by-id/"$2 }' | xargs | sed '-es/ / mirror /'{1000..1..2} | sed 's/^/mirror /')
267 | arc_DISPLAY="ARC cache set:\n 1. $(( $(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l) / 2 ))x disk Raid0 (stripe)\n 2. $(( $(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l) / 2 ))x disk Raid1 (mirror)"
268 | else
269 | # Odd cnt (fix)
270 | arc_ARG=$(printf '%s\n' "${inputcachedevLIST[@]}" | awk -F':' 'BEGIN{OFS=FS} { if ($3 == "arc") print "/dev/disk/by-id/"$2 }' | sed '$ d' | xargs | sed '-es/ / mirror /'{1000..1..2} | sed 's/^/mirror /')
271 | arc_DISPLAY="ARC cache set:\n 1. $(( $(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l) / 2 ))x disk Raid0 (stripe)\n 2. $(( $(printf '%s\n' "${inputcachedevLIST[@]}" | grep -w "arc" | wc -l) / 2 ))x disk Raid1 (mirror)"
272 | fi
273 | fi
274 |
275 | #---- Apply ZFS Cache to ZPool
276 |
277 | section "Apply ZFS Cache to an existing ZPool"
278 |
279 | # Select a ZPool to add cache to
280 | if [[ -z "$POOL" ]]
281 | then
282 | OPTIONS_VALUES_INPUT=$(printf '%s\n' "${zpool_LIST[@]}" | sed -e '$a\TYPE00')
283 | OPTIONS_LABELS_INPUT=$(printf '%s\n' "${zpool_LIST[@]}" | sed -e '$a\None. Exit this ZFS Cache installer')
284 | # Make selection
285 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
286 | singleselect SELECTED "$OPTIONS_STRING"
287 | if [ "$RESULTS" = TYPE00 ]
288 | then
289 | # Exit installer
290 | msg "You have chosen not to proceed. Bye..."
291 | echo
292 | return
293 | else
294 | # Set ZPOOL
295 | POOL="$RESULTS"
296 | fi
297 | fi
298 |
299 | # Add ZFS Cache to ZPool
300 | msg "Creating ZIL Cache..."
301 | zpool add -f $POOL log $zil_ARG
302 | info "$zil_DISPLAY"
303 | echo
304 |
305 | msg "Creating ARC Cache..."
306 | zpool add -f $POOL cache $arc_ARG
307 | info "$arc_DISPLAY"
308 | echo
309 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/shared/pve_nas_fs_list.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_fs_list.sh
4 | # Description: Create fs list for NAS LVM, ZFS, Basic
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Source -----------------------------------------------------------------------
8 | #---- Dependencies -----------------------------------------------------------------
9 |
10 | # PVE NAS bash utility
11 | source $COMMON_DIR/nas/src/nas_bash_utility.sh
12 |
13 | # Requires arg 'usb' or 'onboard' to be set in source command
14 | # Sets the validation input type: input_lvm_vgname_val usb
15 | if [ -z "$1" ]
16 | then
17 | input_tran=""
18 | input_tran_arg=""
19 | elif [[ "$1" =~ 'usb' ]]
20 | then
21 | input_tran='(usb)'
22 | input_tran_arg='usb'
23 | elif [[ "$1" =~ 'onboard' ]]
24 | then
25 | input_tran='(sata|ata|scsi|nvme)'
26 | input_tran_arg='onboard'
27 | fi
28 |
29 | #---- Static Variables -------------------------------------------------------------
30 |
31 | # Basic storage disk label
32 | basic_disklabel='(.*_hba(_[0-9])?|.*_usb(_[0-9])?|.*_onboard(_[0-9])?)$'
33 |
34 | #---- Other Variables --------------------------------------------------------------
35 |
36 | # Disk Storage minimum size (GB)
37 | stor_min='30'
38 |
39 | #---- Other Files ------------------------------------------------------------------
40 | #---- Functions --------------------------------------------------------------------
41 | #---- Body -------------------------------------------------------------------------
42 |
43 | #---- Prerequisites
44 |
45 | # Clean out inactive/dormant /etc/fstab mounts
46 | while read target
47 | do
48 | if [[ ! $(findmnt $target -n -o source) ]]
49 | then
50 | msg "Deleting inactive mount point..."
51 | sed -i "\|$target|d" /etc/fstab
52 | info "Deleted inactive mount point: ${YELLOW}$target${NC}"
53 | echo
54 | fi
55 | done < <( cat /etc/fstab | awk '$2 ~ /^\/mnt\/.*/ {print $2}' ) # /mnt mount point listing
56 |
57 | # Wakeup USB disks
58 | wake_usb
59 |
60 | #---- Create lists
61 |
62 | # Create storage list array
63 | storage_list
64 |
65 | # Create a working list array
66 | stor_LIST
67 |
68 | #---- LVM option list
69 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
70 |
71 | # LVM options
72 | # build:description:tran:size|action:rota
73 | lvm_option_input=$(printf '%s\n' "${storLIST[@]}" | awk -F':' '$5 != "usb"' | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" \
74 | 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
75 | # Type01: Mount an existing LV
76 | {if($1 !~ /.*(root|tmeta|tdata|tpool|swap)$/ && $5 ~ input_tran && $9 == "lvm" && $13 !~ basic_disklabel && $15 == 0 && (system("lvs " $1 " --quiet --noheadings --segments -o type 2> /dev/null | grep -v 'thin-pool' | grep -q 'thin' > /dev/null") == 0 || system("lvs " $1 " --quiet --noheadings --segments -o type 2> /dev/null | grep -v 'thin-pool' | grep -q 'linear' > /dev/null") == 0)) \
77 | {cmd = "lvs " $1 " --noheadings -o lv_name | grep -v 'thinpool' | uniq | xargs | sed -r 's/[[:space:]]/,/g'"; cmd | getline lv_name; close(cmd); print "Mount existing LV", "LV name - "lv_name, "-", $8, "TYPE01", lv_name }} \
78 | # Type02: Create LV in an existing Thin-pool
79 | {if($1 !~ /.*(root|tmeta|tdata|tpool|swap)$/ && $5 ~ input_tran && $4 == "" && $9 == "lvm" && $13 !~ basic_disklabel && $15 == 0 && system("lvs " $1 " --quiet --noheadings --segments -o type 2> /dev/null | grep -q 'thin-pool' > /dev/null") == 0 ) \
80 | {cmd = "lvs " $1 " --noheadings -o lv_name | uniq | xargs | sed -r 's/[[:space:]]/,/g'"; cmd | getline thinpool_name; close(cmd); print "Create LV in existing Thin-pool", "Thin-pool name - "thinpool_name, "-", $8, "TYPE02", thinpool_name }} \
81 | # Type03: Create LV in an existing VG
82 | {if ($5 ~ input_tran && $4 == "LVM2_member" && $13 !~ basic_disklabel && $15 == 0) \
83 | print "Create LV in an existing VG", "VG name - "$14, "-", $8, "TYPE03", $14 } \
84 | # Type04: Destroy VG
85 | {if ($5 ~ input_tran && $4 == "LVM2_member" && $13 !~ basic_disklabel && $15 == 0) { cmd = "lvs " $14 " --noheadings -o lv_name | xargs | sed -r 's/[[:space:]]/,/g'"; cmd | getline $16; close(cmd); print "Destroy VG ("$14")", "Destroys LVs/Pools - "$16, "-", "-", "TYPE04", $14 }} \
86 | # Type05: Build a new LVM VG/LV - SSD Disks
87 | {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 0 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { ssd_count++ }} END { if (ssd_count >= 1) print "Build a new LVM VG/LV - SSD Disks", "Select from "ssd_count"x SSD disks", $5, "-", "TYPE05", "0" } \
88 | # Type06: Build a new LVM VG/LV - HDD Disks
89 | {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 1 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { hdd_count++ }} END { if (hdd_count >= 1) print "Build a new LVM VG/LV - HDD Disks", "Select from "hdd_count"x HDD disks", $5, "-", "TYPE06", "1" }' \
90 | | sed '/^$/d' \
91 | | sort -t: -s -k 4,4 \
92 | | awk -F':' '!seen[$1$2]++')
93 | # Create selection labels & values
94 | lvm_option_labels=$(printf '%s\n' "$lvm_option_input" | sed '/^$/d' | cut -d: -f1,2,3,4)
95 | lvm_option_values=$(printf '%s\n' "$lvm_option_input" | sed '/^$/d' | cut -d: -f5,6)
96 |
97 |
98 | #---- ZFS option list
99 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
100 |
101 | # ZFS options
102 | # build:description:tran:size|action:zpoolname
103 | zfs_option_input=$(printf '%s\n' "${storLIST[@]}" | awk -F':' '$5 != "usb"' | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" \
104 | 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
105 | # Type01: Use Existing ZPool
106 | {if ($5 ~ input_tran && $3 != 0 && $4 == "zfs_member" && $9 == "part" && $13 !~ basic_disklabel && $14!=/[0-9]+/ && $15 == 0) print "Use Existing ZPool - "$14"", "-", $8, "-", "TYPE01", $14 } \
107 | # Type02: Destroy & Wipe ZPool
108 | {if ($5 ~ input_tran && $3 != 0 && $4 == "zfs_member" && $9 == "part" && $13 !~ basic_disklabel && $14!=/[0-9]+/ && $15 == 0) print "Destroy & Wipe ZPool - "$14"", "-", $8, "-", "TYPE02", $14 } \
109 | # Type03: Create new ZPool - SSD
110 | {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 0 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { ssd_count++ }} END { if (ssd_count >= 1) print "Create new ZPool - SSD", ssd_count"x SSD disks available", "-", "-", "TYPE03", "0"} \
111 | # Type04: Create new ZPool - HDD
112 | {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 1 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { hdd_count++ }} END { if (hdd_count >= 1) print "Create new ZPool - HDD", hdd_count"x HDD disks available", "-", "-", "TYPE03", "1"}' \
113 | | sed '/^$/d' \
114 | | awk -F':' '!seen[$1]++')
115 | # Create selection labels & values
116 | zfs_option_labels=$(printf '%s\n' "$zfs_option_input" | sed '/^$/d' | cut -d: -f1,2,3,4)
117 | zfs_option_values=$(printf '%s\n' "$zfs_option_input" | sed '/^$/d' | cut -d: -f5,6)
118 | # Create display
119 | zfs_display=$(printf '%s\n' "$zfs_option_input" | sed '/^$/d' | cut -d: -f1,2,3,4)
120 |
121 |
122 | #---- Basic option list
123 | # 1=PATH:2=KNAME:3=PKNAME:4=FSTYPE:5=TRAN:6=MODEL:7=SERIAL:8=SIZE:9=TYPE:10=ROTA:11=UUID:12=RM:13=LABEL:14=ZPOOLNAME:15=SYSTEM
124 |
125 | # Basic options
126 | # build:description:tran:size|action:all
127 | basic_option_input=$(printf '%s\n' "${storLIST[@]}" | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" \
128 | 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
129 | # Type01: Basic single disk build
130 | {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) print "Basic single disk build", "Format "$1" to ext4", $5, $8, "TYPE01", $0} \
131 | # TYPE02: Mount existing NAS storage disk
132 | {if ($5 ~ input_tran && $3 != 0 && $4 == "ext4" && $9 == "part" && size >= stor_min && $13 ~ basic_disklabel && $14 == 0 && $15 == 0) print "Mount existing NAS storage disk", "Mount "$1" (disk label - "$13")", $5, $8, "TYPE02", $0} \
133 | # TYPE03: Destroy & wipe disk
134 | {if ($5 ~ input_tran && $3 != 0 && $4 == "ext4" && $9 == "part" && size >= stor_min && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) print "Destroy & wipe disk", "Destroy disk /dev/"$3" (disk label - "$13")", $5, $8, "TYPE03", $0} \
135 | # TYPE04: Destroy, wipe and use partition
136 | {if ($5 ~ input_tran && $3 != 0 && ($4 == "ext2" || $4 == "ext3" || $4 == "ext4" || $4 == "btrfs" || $4 == "xfs") && $9 == "part" && size >= stor_min && $14 == 0 && $15 == 0) print "Destroy, wipe & use partition", "Use partition "$1" (disk label - "$13")", $5, $8, "TYPE04", $0}' \
137 | | sed '/^$/d' \
138 | | awk -F':' '!seen[$1$2$3$4]++')
139 | # Create selection labels & values
140 | basic_option_labels=$(printf '%s\n' "$basic_option_input" | sed '/^$/d' | cut -d: -f1,2,3,4)
141 | basic_option_values=$(printf '%s\n' "$basic_option_input" | sed '/^$/d' | cut -d: -f5-)
142 |
143 |
144 | #---- FS option count
145 |
146 | # LVM option count
147 | lvm_option_cnt=$(echo "$lvm_option_labels" | sed '/^$/d' | wc -l)
148 | # ZFS option cnt
149 | zfs_option_cnt=$(echo "$zfs_option_labels" | sed '/^$/d' | wc -l)
150 | # Basic disk count
151 | basic_option_cnt=$(echo "$basic_option_labels" | awk -F':' '$3 != "usb"' | sed '/^$/d' | wc -l)
152 | # USB disk count
153 | usb_option_cnt=$(echo "$basic_option_labels" | awk -F':' '$3 == "usb"' | sed '/^$/d' | wc -l)
154 |
155 |
156 | #---- Validate available storage to proceed
157 |
158 | # Check if any available storage is available (usb and onboard)
159 | # If no disk or storage is available then exits
160 | if [ "$usb_option_cnt" = 0 ] && [ "$lvm_option_cnt" = 0 ] && [ "$zfs_option_cnt" = 0 ] && [ "$basic_option_cnt" = 0 ]
161 | then
162 | # Exit installer
163 | warn "We could NOT detect any new available disks, LVs, ZPools or Basic NAS storage disks. New disk(s) might have been wrongly identified as 'system drives' if they contain Linux system or OS partitions. To fix this issue, manually format the disk erasing all data before running this installation again. All USB disks must have a data capacity greater than ${stor_min}G to be detected.
164 | Exiting the installation script. Bye..."
165 | echo
166 | exit 0
167 | fi
168 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/shared/pve_nas_select_fs_build.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_select_fs_build.sh
4 | # Description: Select storage fs for NAS internal SATA or Nvme or USB disk setup
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Source -----------------------------------------------------------------------
8 | #---- Dependencies -----------------------------------------------------------------
9 |
10 | # NAS bash utility
11 | source $COMMON_DIR/nas/src/nas_bash_utility.sh
12 |
13 | # Requires arg 'usb' or 'onboard' to be set in source command
14 | # Sets the validation input type: input_lvm_vgname_val usb
15 | if [ -z "$1" ]
16 | then
17 | input_tran=""
18 | input_tran_arg=""
19 | elif [[ "$1" =~ 'usb' ]]
20 | then
21 | input_tran='(usb)'
22 | input_tran_arg='usb'
23 | elif [[ "$1" =~ 'onboard' ]]
24 | then
25 | input_tran='(sata|ata|scsi|nvme)'
26 | input_tran_arg='onboard'
27 | fi
28 |
29 | #---- Static Variables -------------------------------------------------------------
30 |
31 | # Basic storage disk label
32 | basic_disklabel='(.*_hba(_[0-9])?|.*_usb(_[0-9])?|.*_onboard(_[0-9])?)$'
33 |
34 | #---- Other Variables --------------------------------------------------------------
35 |
36 | # USB Disk Storage minimum size (GB)
37 | stor_min='5'
38 |
39 | #---- Other Files ------------------------------------------------------------------
40 | #---- Functions --------------------------------------------------------------------
41 | #---- Body -------------------------------------------------------------------------
42 |
43 | #---- Prerequisites
44 |
45 | # Clean out inactive/dormant /etc/fstab mounts
46 | while read target
47 | do
48 | if [[ ! $(findmnt $target -n -o source) ]]
49 | then
50 | msg "Deleting inactive mount point..."
51 | sed -i "\|$target|d" /etc/fstab
52 | info "Deleted inactive mount point: ${YELLOW}$target${NC}"
53 | echo
54 | fi
55 | done < <( cat /etc/fstab | awk '$2 ~ /^\/mnt\/.*/ {print $2}' ) # /mnt mount point listing
56 |
57 | # # Wakeup USB disks
58 | # wake_usb
59 |
60 | # # Create storage list array
61 | # storage_list
62 |
63 | # # Create a working list array
64 | # stor_LIST
65 |
66 | #---- Create fs/disk lists by type (lvm,zfs,basic)
67 |
68 | # Create fs/disk lists for LVM, ZFS, Basic (onboard & usb)
69 | source $SHARED_DIR/pve_nas_fs_list.sh
70 |
71 |
72 | # # LVM options
73 | # lvm_options=$(printf '%s\n' "${storLIST[@]}" | awk -F':' '$5 != "usb"' | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" \
74 | # 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
75 | # # Type01: Mount an existing LV
76 | # {if($1 !~ /.*(root|tmeta|tdata|tpool|swap)$/ && $5 ~ input_tran && $9 == "lvm" && $13 !~ basic_disklabel && $15 == 0 && (system("lvs " $1 " --quiet --noheadings --segments -o type 2> /dev/null | grep -v 'thin-pool' | grep -q 'thin' > /dev/null") == 0 || system("lvs " $1 " --quiet --noheadings --segments -o type 2> /dev/null | grep -v 'thin-pool' | grep -q 'linear' > /dev/null") == 0)) \
77 | # {cmd = "lvs " $14 " --noheadings -o lv_name | grep -v 'thinpool' | uniq | xargs | sed -r 's/[[:space:]]/,/g'"; cmd | getline lv_list; close(cmd); print "Mount an existing LV", "Available LVs - "lv_list, "-", $8, $14, "TYPE01"}} \
78 | # # Type02: Create LV in an existing Thin-pool
79 | # {if($1 !~ /.*(root|tmeta|tdata|tpool|swap)$/ && $5 ~ input_tran && $4 == "" && $9 == "lvm" && $13 !~ basic_disklabel && $15 == 0 && system("lvs " $1 " --quiet --noheadings --segments -o type 2> /dev/null | grep -q 'thin-pool' > /dev/null") == 0 ) \
80 | # {cmd = "lvs " $14 " --noheadings -o pool_lv | uniq | xargs | sed -r 's/[[:space:]]/,/g'"; cmd | getline thin_list; close(cmd); print "Create LV in an existing Thin-pool", "Available pools - "thin_list, "-", $8, $14, "TYPE02"}} \
81 | # # Type03: Create LV in an existing VG
82 | # {if ($5 ~ input_tran && $4 == "LVM2_member" && $13 !~ basic_disklabel && $15 == 0) \
83 | # print "Create LV in an existing VG", "VG name - "$14, "-", $8, $14, "TYPE03" } \
84 | # # Type04: Destroy VG
85 | # {if ($5 ~ input_tran && $4 == "LVM2_member" && $13 !~ basic_disklabel && $15 == 0) { cmd = "lvs " $14 " --noheadings -o lv_name | xargs | sed -r 's/[[:space:]]/,/g'"; cmd | getline $16; close(cmd); print "Destroy VG ("$14")", "Destroys LVs/Pools - "$16, "-", $8, $14, "TYPE04" }} \
86 | # # Type05: Build a new LVM VG/LV - SSD
87 | # {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 0 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { ssd_count++ }} END { if (ssd_count >= 1) print "Build a new LVM VG/LV - SSD", ssd_count"x SSD disks", $5, "-", "-", "TYPE05" } \
88 | # # Type05: Build a new LVM VG/LV - HDD
89 | # {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 1 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { hdd_count++ }} END { if (hdd_count >= 1) print "Build a new LVM VG/LV - HDD", hdd_count"x HDD disks", $5, "-", "-", "TYPE06" }' | sort -t: -us -k 1,1 -k 2,2 -k 5,5 \
90 | # | awk -F':' '!seen[$1$2$4$5]++' \
91 | # | sed '1 i\LVM OPTIONS:DESCRIPTION::SIZE:VG NAME:SELECTION')
92 | # # ZFS options
93 | # zfs_options=$(printf '%s\n' "${storLIST[@]}" | awk -F':' '$5 != "usb"' | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" 'BEGIN{OFS=FS} $8 ~ /G$/ {size=0.0+$8} \
94 | # # Use existing ZPool
95 | # {if ($5 ~ input_tran && $3 != 0 && $4 == "zfs_member" && $9 == "part" && $13 !~ basic_disklabel && $14!=/[0-9]+/ && $15 == 0) print "Use Existing ZPool", "-", "-", $8, $14, "TYPE01" } \
96 | # # Destroy & Wipe ZPool
97 | # {if ($5 ~ input_tran && $3 != 0 && $4 == "zfs_member" && $9 == "part" && $13 !~ basic_disklabel && $14!=/[0-9]+/ && $15 == 0) print "Destroy & Wipe ZPool", "-", "-", $8, $14, "TYPE02" } \
98 | # # Create new ZPool - SSD
99 | # {size=0.0+$8; if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 0 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { ssd_count++ }} END { if (ssd_count >= 1) print "Create new ZPool - SSD", ssd_count"x SSD disks", "-", "-", "-", "TYPE03" } \
100 | # # Create new ZPool - HDD
101 | # {size=0.0+$8; if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $10 == 1 && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) { hdd_count++ }} END { if (hdd_count >= 1) print "Create new ZPool - HDD", hdd_count"x HDD disks", "-", "-", "-", "TYPE04" }' \
102 | # | awk -F':' '!seen[$1$5]++' \
103 | # | sed '1 i\ZFS OPTIONS:DESCRIPTION::SIZE:ZFS POOL:SELECTION')
104 |
105 | # # basic_options=$(printf '%s\n' "${storLIST[@]}" | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" \
106 | # # 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
107 | # # # TYPE07: Basic single disk build
108 | # # {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) print "Basic single disk build", "Format "$1" only", $8, "-", "TYPE07" } \
109 | # # # TYPE08: Mount existing NAS storage disk
110 | # # {if ($5 ~ input_tran && $3 != 0 && $4 == "ext4" && $9 == "part" && size >= stor_min && $13 ~ basic_disklabel && $14 == 0 && $15 == 0) print "Mount existing NAS storage disk", "Mount "$1" (disk label - "$13")", $8, "-", "TYPE08" } \
111 | # # # TYPE09: Destroy and Wipe disk
112 | # # {if ($5 ~ input_tran && $3 != 0 && $4 == "ext4" && $9 == "part" && size >= stor_min && $13 ~ basic_disklabel && $14 == 0 && $15 == 0) print "Destroy & wipe disk", "Destroy disk /dev/"$3" (disk label - "$13")", $8, "-", "TYPE09" }' \
113 | # # | awk -F':' '!seen[$1$2$3$4]++' \
114 | # # | sed '1 i\BASIC OPTIONS:DESCRIPTION:SIZE::SELECTION')
115 |
116 | # # Basic single disk option
117 | # basic_options=$(printf '%s\n' "${storLIST[@]}" | awk -F':' -v stor_min="$stor_min" -v input_tran="$input_tran" -v basic_disklabel="$basic_disklabel" \
118 | # 'BEGIN{OFS=FS} {$8 ~ /G$/} {size=0.0+$8} \
119 | # # TYPE07: Basic single disk build
120 | # {if ($5 ~ input_tran && $3 == 0 && ($4 != "LVM2_member" || $4 != "zfs_member") && $9 == "disk" && size >= stor_min && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) print "Basic single disk build", "Format "$1" only", $5, $8, "-", "TYPE07" } \
121 | # # TYPE08: Mount existing NAS storage disk
122 | # {if ($5 ~ input_tran && $3 != 0 && $4 == "ext4" && $9 == "part" && size >= stor_min && $13 ~ basic_disklabel && $14 == 0 && $15 == 0) print "Mount existing NAS storage disk", "Mount "$1" (disk label - "$13")", $5, $8, "-", "TYPE08" } \
123 | # # TYPE09: Destroy and Wipe disk
124 | # {if ($5 ~ input_tran && $3 != 0 && $4 == "ext4" && $9 == "part" && size >= stor_min && $13 !~ basic_disklabel && $14 == 0 && $15 == 0) print "Destroy & wipe disk", "Destroy disk /dev/"$3" (disk label - "$13")", $5, $8, "-", "TYPE09" } \
125 | # # TYPE10: Destroy, wipe and use partition
126 | # {if ($5 ~ input_tran && $3 != 0 && ($4 == "ext2" || $4 == "ext3" || $4 == "ext4" || $4 == "btrfs" || $4 == "xfs") && $9 == "part" && size >= stor_min && $14 == 0 && $15 == 0) print "Destroy, wipe & use partition", "Use partition "$1" (disk label - "$13")", $5, $8, "-", "TYPE10" }' \
127 | # | awk -F':' '!seen[$1$2$3$4]++' \
128 | # | sed '1 i\BASIC OPTIONS:DESCRIPTION::SIZE::SELECTION')
129 |
130 | # # LVM option count
131 | # lvm_option_cnt=$(echo "$lvm_options" | sed '/^$/d' | sed '1d' | wc -l)
132 | # # ZFS option cnt
133 | # zfs_option_cnt=$(echo "$zfs_options" | sed '/^$/d' | sed '1d' | wc -l)
134 | # # Basic disk count
135 | # basic_option_cnt=$(echo "$basic_options" | sed '/^$/d' | sed '1d' | awk -F':' '$3 != "usb"' | wc -l)
136 | # # USB disk count
137 | # usb_option_cnt=$(echo "$basic_options" | sed '/^$/d'| sed '1d' | awk -F':' '$3 == "usb"' | wc -l)
138 |
139 | # # Check if any available storage is available (usb and onboard)
140 | # # If no disk or storage is available then exits
141 | # if [ "$usb_option_cnt" = 0 ] && [ "$lvm_option_cnt" = 0 ] && [ "$zfs_option_cnt" = 0 ] && [ "$basic_option_cnt" = 0 ]
142 | # then
143 | # # Exit installer
144 | # warn "We could NOT detect any new available disks, LVs, ZPools or Basic NAS storage disks. New disk(s) might have been wrongly identified as 'system drives' if they contain Linux system or OS partitions. To fix this issue, manually format the disk erasing all data before running this installation again. All USB disks must have a data capacity greater than ${stor_min}G to be detected.
145 | # Exiting the installation script. Bye..."
146 | # echo
147 | # exit 0
148 | # fi
149 |
150 |
151 | #----- Set installer trans selection (check for USB devices)
152 |
153 | # Sets trans (usb or onboard) if usb exist. If no usb then defaults to onboard
154 | if [ ! "$usb_option_cnt" = 0 ]
155 | then
156 | # Set installer trans selection
157 | msg "The installer has detected available USB devices. The User must select a storage option location..."
158 |
159 | # Create menu options
160 | OPTIONS_VALUES_INPUT=()
161 | OPTIONS_LABELS_INPUT=()
162 | # Onboard menu option
163 | if [ ! "$lvm_option_cnt" = 0 ] || [ ! "$zfs_option_cnt" = 0 ] || [ ! "$basic_option_cnt" = 0 ]
164 | then
165 | OPTIONS_VALUES_INPUT+=( "onboard" )
166 | OPTIONS_LABELS_INPUT+=( "Onboard SAS/SATA/NVMe/HBA storage (internal)" )
167 | fi
168 | # USB menu option
169 | if [ ! "$usb_option_cnt" = 0 ]
170 | then
171 | OPTIONS_VALUES_INPUT+=( "usb" )
172 | OPTIONS_LABELS_INPUT+=( "USB disk storage (external)" )
173 | fi
174 |
175 | # Run menu selection
176 | makeselect_input2
177 | singleselect SELECTED "$OPTIONS_STRING"
178 |
179 | # Set installer Trans option
180 | if [ "$RESULTS" = 'usb' ]
181 | then
182 | # Set for usb only
183 | input_tran='(usb)'
184 | input_tran_arg='usb'
185 | # Wakeup USB disks
186 | wake_usb
187 | # Create storage list array
188 | storage_list
189 | # Create a working list array
190 | stor_LIST
191 | elif [ "$RESULTS" = 'onboard' ]
192 | then
193 | # Set for onboard only
194 | input_tran='(sata|ata|scsi|nvme)'
195 | input_tran_arg='onboard'
196 | # Create storage list array
197 | storage_list
198 | # Create a working list array
199 | stor_LIST
200 | fi
201 | else
202 | # Set for onboard only
203 | input_tran='(sata|ata|scsi|nvme)'
204 | input_tran_arg='onboard'
205 | fi
206 |
207 |
208 | #---- Make selection (onboard)
209 | if [ "$input_tran_arg" = 'onboard' ]
210 | then
211 | display_msg="#### PLEASE READ CAREFULLY - STORAGE OPTIONS ####\n
212 | Depending on your available options you must choose either ZFS Raid or LVM Raid or a Basic single disk storage for your NAS build. Basic single disk storage uses a ext4 file system and is default for USB disk storage devices.
213 |
214 | If an option to create new storage is missing its because the disk(s) may have been wrongly identified as 'system disks' or the disk contains a working ZFS, LVM or Basic NAS file system. To fix this issue, exit the installation and use Proxmox PVE WebGUI to:
215 |
216 | -- destroy a ZFS ZPool or LVM VG (which resides on the missing disk)
217 | -- run PVE disk wipe tool on all the 'missing' disk devices
218 |
219 | The above operations will result in permanent loss of data so make sure you select the correct disk. Re-run the installation and the disks should be available for selection."
220 |
221 | # Display options
222 | display_LIST=()
223 | OPTIONS_VALUES_INPUT=()
224 | OPTIONS_LABELS_INPUT=()
225 | echo
226 |
227 | # LVM build
228 | if [ ! "$lvm_option_cnt" = 0 ] && [ "$input_tran_arg" = 'onboard' ]
229 | then
230 | display_LIST+=( "$(printf '%s\n' "${lvm_option_labels}" | sort -t: -us -k 1,1 -k 2,2 -k 5,5 | sed '1 i\LVM OPTIONS:DESCRIPTION::SIZE:VG NAME:SELECTION')" )
231 | display_LIST+=( ":" )
232 | OPTIONS_VALUES_INPUT+=("STORAGE_LVM")
233 | OPTIONS_LABELS_INPUT+=("LVM Raid filesystem")
234 | fi
235 |
236 | # ZFS build
237 | if [ ! "$zfs_option_cnt" = 0 ] && [ "$input_tran_arg" = 'onboard' ]
238 | then
239 | display_LIST+=( "$(printf '%s\n' "${zfs_option_labels}" | sed '1 i\ZFS OPTIONS:DESCRIPTION::SIZE:ZFS POOL:SELECTION')" )
240 | display_LIST+=( ":" )
241 | OPTIONS_VALUES_INPUT+=( "STORAGE_ZFS" )
242 | OPTIONS_LABELS_INPUT+=( "ZFS Raid filesystem" )
243 | fi
244 |
245 | # Basic build (onboard)
246 | if [ ! "$basic_option_cnt" = 0 ] && [ "$input_tran_arg" = 'onboard' ]
247 | then
248 | display_LIST+=( "$(printf '%s\n' "${basic_option_labels}" | awk -F':' '$3 != "usb"' | sed '1 i\BASIC OPTIONS:DESCRIPTION::SIZE::SELECTION')" )
249 | display_LIST+=( ":" )
250 | OPTIONS_VALUES_INPUT+=( "STORAGE_BASIC" )
251 | OPTIONS_LABELS_INPUT+=( "Basic single disk filesystem" )
252 | fi
253 |
254 | # Add Exit option
255 | OPTIONS_VALUES_INPUT+=( "STORAGE_EXIT" )
256 | OPTIONS_LABELS_INPUT+=( "None - Exit this installer" )
257 |
258 | # Display msg
259 | msg_box "$display_msg"
260 |
261 | # Print available option list
262 | printf '%s\n' "${display_LIST[@]}" | cut -d: -f1,2,4 | column -s : -t -N "1,2,3" -d -W 2 -c 120 | indent2
263 |
264 | # Menu selection for onboard device
265 | makeselect_input2
266 | singleselect SELECTED "$OPTIONS_STRING"
267 | fi
268 |
269 | #---- Make selection (usb)
270 | if [ "$input_tran_arg" = 'usb' ]
271 | then
272 | # Manual set Results var to usb
273 | RESULTS='STORAGE_BASIC'
274 | fi
275 |
276 | #---- Run selection ----------------------------------------------------------------
277 |
278 | #---- Exit selection
279 | if [ "$RESULTS" = 'STORAGE_EXIT' ]
280 | then
281 | msg "You have chosen not to proceed. Aborting. Bye..."
282 | echo
283 | exit 0
284 | fi
285 |
286 | #---- Basic EXT4 STORAGE (onboard and usb)
287 | if [ "$RESULTS" = 'STORAGE_BASIC' ]
288 | then
289 | # Format disk
290 | source $SHARED_DIR/pve_nas_create_singledisk_build.sh "$input_tran_arg"
291 | fi
292 |
293 |
294 | #---- LVM STORAGE
295 | if [ "$RESULTS" = 'STORAGE_LVM' ]
296 | then
297 | # Create LVM
298 | source $SHARED_DIR/pve_nas_create_lvm_build.sh "$input_tran_arg"
299 | fi
300 |
301 |
302 | #---- ZFS STORAGE
303 | if [ "$RESULTS" = 'STORAGE_ZFS' ]
304 | then
305 | # Create ZFS
306 | source $SHARED_DIR/pve_nas_create_zfs_build.sh "$input_tran_arg"
307 | fi
308 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/src/omv/pve_nas_vm_nas_installer.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_vm_nas_installer.sh
4 | # Description: This script is for creating a PVE VM OMV based NAS
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 |
9 | #---- Source Github
10 | # bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_installer.sh)"
11 |
12 | #---- Source local Git
13 | # /mnt/pve/nas-01-git/ahuacate/pve-nas/pve_nas_installer.sh
14 |
15 | #---- Source -----------------------------------------------------------------------
16 | #---- Dependencies -----------------------------------------------------------------
17 |
18 | # Check SMTP Status
19 | check_smtp_status
20 |
21 | # Set Audio
22 | if [ -f "/proc/asound/cards" ]
23 | then
24 | # Set audio device
25 | if [[ $(cat /proc/asound/cards | grep -i 'HDA-Intel') ]]
26 | then
27 | VM_DEVICE_VAR='intel-hda'
28 | VM_DRIVER_VAR='none'
29 | elif [[ $(cat /proc/asound/cards | grep -i 'ICH9\|Intel ICH9') ]]
30 | then
31 | VM_DEVICE_VAR='ich9-intel-hda'
32 | VM_DRIVER_VAR='none'
33 | elif [[ $(cat /proc/asound/cards | grep -i 'ac97') ]]
34 | then
35 | VM_DEVICE_VAR='AC97'
36 | VM_DRIVER_VAR='none'
37 | elif [[ ! $(cat /proc/asound/cards | grep -i 'ICH9\|Intel ICH9') ]]
38 | then
39 | VM_DEVICE_VAR=''
40 | VM_DRIVER_VAR=''
41 | fi
42 | fi
43 |
44 | #---- Static Variables -------------------------------------------------------------
45 |
46 | # Easy Script Section Head
47 | SECTION_HEAD='PVE OMV NAS'
48 |
49 | # PVE host IP
50 | PVE_HOST_IP=$(hostname -i)
51 | PVE_HOSTNAME=$(hostname)
52 |
53 | # SSHd Status (0 is enabled, 1 is disabled)
54 | SSH_ENABLE=1
55 |
56 | # Developer enable git mounts inside CT (0 is enabled, 1 is disabled)
57 | DEV_GIT_MOUNT_ENABLE=1
58 |
59 | # Validate & set architecture dependent variables
60 | ARCH=$(dpkg --print-architecture)
61 |
62 | # Set file source (path/filename) of preset variables for 'pvesource_vm_createvm.sh'
63 | PRESET_VAR_SRC="$( dirname "${BASH_SOURCE[0]}" )/$( basename "${BASH_SOURCE[0]}" )"
64 |
65 | #---- Other Variables --------------------------------------------------------------
66 |
67 | #---- Common Machine Variables
68 | # VM Type ( 'ct' or 'vm' only lowercase )
69 | VM_TYPE='vm'
70 | # Use DHCP. '0' to disable, '1' to enable.
71 | NET_DHCP='1'
72 | # Set address type 'dhcp4'/'dhcp6' or '0' to disable. Use in conjunction with 'NET_DHCP'.
73 | NET_DHCP_TYPE='dhcp4'
74 | # CIDR IPv4
75 | CIDR='24'
76 | # CIDR IPv6
77 | CIDR6='64'
78 | # SSHd Port
79 | SSH_PORT='22'
80 |
81 | #----[COMMON_GENERAL_OPTIONS]
82 | # Hostname
83 | HOSTNAME='nas-01'
84 | # Description for the vm/ct (one word only, no spaces). Shown in the web-interface vm/ct's summary.
85 | DESCRIPTION=''
86 | # Allocated memory or RAM (MiB). Minimum 512Gb. This is the maximum available memory when you use the balloon device.
87 | MEMORY='2048'
88 | # Limit number of CPU sockets to use. Value 0 indicates no CPU limit.
89 | CPULIMIT='0'
90 | # CPU weight for a VM. Argument is used in the kernel fair scheduler. The larger the number is, the more CPU time this VM gets.
91 | CPUUNITS='1024'
92 | # The number of cores assigned to the vm/ct. Do not edit - its auto set.
93 | CORES='1'
94 |
95 | #----[COMMON_NET_OPTIONS]
96 | # Network Card Model. The virtio model provides the best performance with very low CPU overhead. Otherwise use e1000. (virtio | e1000)
97 | MODEL='virtio'
98 | # Bridge to attach the network device to.
99 | BRIDGE='vmbr0'
100 | # A common MAC address with the I/G (Individual/Group) bit not set.
101 | HWADDR=""
102 | # Controls whether this interface’s firewall rules should be used.
103 | FIREWALL='1'
104 | # VLAN tag for this interface (value 0 for none, or VLAN[2-N] to enable).
105 | TAG='0'
106 | # VLAN ids to pass through the interface
107 | TRUNKS=""
108 | # Apply rate limiting to the interface (MB/s). Value "" for unlimited.
109 | RATE=""
110 | # MTU - Maximum transfer unit of the interface.
111 | MTU=""
112 |
113 | #----[COMMON_NET_DNS_OPTIONS]
114 | # Nameserver server IP (IPv4 or IPv6) (value "" for none).
115 | NAMESERVER='192.168.1.5'
116 | # Search domain name (local domain)
117 | SEARCHDOMAIN=$(hostname -d)
118 |
119 | #----[COMMON_NET_STATIC_OPTIONS]
120 | # IP address (IPv4). Only works with static IP (DHCP=0).
121 | IP='192.168.1.10'
122 | # IP address (IPv6). Only works with static IP (DHCP=0).
123 | IP6=''
124 | # Default gateway for traffic (IPv4). Only works with static IP (DHCP=0).
125 | GW='192.168.1.5'
126 | # Default gateway for traffic (IPv6). Only works with static IP (DHCP=0).
127 | GW6=''
128 |
129 | #---- PVE VM
130 | # Do not edit here down unless you know what you are doing.
131 | # ---- Common variable aliases
132 | # Virtual Disk Size (GB).
133 | VM_SIZE=10
134 |
135 | #----[VM_GENERAL_OPTIONS]
136 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
137 | OPTION_STATUS='1:0'
138 | # Name (Set a name for the VM. Only used on the configuration web interface.)
139 | VM_NAME="${HOSTNAME}"
140 | # Description. Shown in the web-interface VM’s summary.
141 | VM_DESCRIPTION="${DESCRIPTION}"
142 | # Specifies whether a VM will be started during system bootup.
143 | VM_ONBOOT='1'
144 | # Virtual OS/processor architecture. Use '' to default to host. ( <'' | aarch64 | x86_64>)
145 | VM_ARCH=''
146 | # Automatic restart after crash
147 | VM_AUTOSTART='1'
148 | # Hotplug. Selectively enable hotplug features. Default network, disk, usb. Use '0' to disable '1' to enable.
149 | VM_HOTPLUG='1'
150 | # Enable/disable the USB tablet device. Set to '0' when using Spice. This device is usually needed to allow absolute mouse positioning with VNC.
151 | VM_TABLET='1'
152 |
153 | #----[VM_SYSTEM_OPTIONS]
154 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
155 | OPTION_STATUS='1:0'
156 | # Specifies the Qemu machine type. Best use default ''. Q35 supports PCIe so I it can do GPU passthrough etc. (pc|pc(-i440fx)?-\d+(\.\d+)+(\+pve\d+)?(\.pxe)?|q35|pc-q35-\d+(\.\d+)+(\+pve\d+)?(\.pxe)?|virt(?:-\d+(\.\d+)+)?(\+pve\d+)?)
157 | VM_MACHINE=''
158 | # SCSI controller model ( recommend 'virtio-scsi' or 'virtio-scsi-single'. Virtio scsi single use 1 scsi controller by disk , virtio scsi classic use 1 controller for 16disk. Iothread only work by controller.)
159 | VM_SCSIHW='virtio-scsi-pci'
160 | # BIOS implementation
161 | VM_BIOS='seabios'
162 |
163 | #----[VM_BOOT_OPTIONS]
164 | # Specify guest boot order.
165 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
166 | OPTION_STATUS='1:boot'
167 | # Set boot order. Default is 'scsi0,ide2' (ide2=cdrom).
168 | VM_ORDER='scsi0;ide2'
169 |
170 | #----[VM_QEMU_OPTIONS]
171 | # Qemu agent. Enable/disable communication with the Qemu Guest Agent and its properties. Use '0' to disable '1' to enable. (default = 0)
172 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
173 | OPTION_STATUS='1:agent'
174 | VM_QEMU_ENABLED='1'
175 | # Run fstrim after moving a disk or migrating the VM. (default = 0)
176 | VM_QEMU_FSTRIM_CLONED_DISKS='0'
177 | # Select the agent type (isa | virtio). (default = virtio)
178 | VM_QEMU_TYPE='virtio'
179 |
180 | #----[VM_SPICE_OPTIONS]
181 | # Other required options must be set: --tablet 0; --vga qx1,memory=32; --usb spice,usb3=0; --audio0 device=?,driver=spice
182 | # Configure additional enhancements for SPICE.
183 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
184 | OPTION_STATUS='0:spice_enhancements'
185 | # Foldersharing enables you to share a local folder with the VM you are connecting to. The "spice-webdavd" daemon needs to be installed in the VM. (1|0)
186 | VM_FOLDERSHARING='1'
187 | # Videostreaming will encode fast refreshing areas in a lossy video stream. (off | all | filter)
188 | VM_VIDEOSTREAMING='all'
189 |
190 | #----[VM_AUDIO_OPTIONS]
191 | # Values determined by script:
192 | # VM_DEVICE="${VM_DEVICE_VAR}"
193 | # VM_DRIVER="${VM_DRIVER_VAR}"
194 | # Or manual overwrite with your own values. Spice must be manually set.
195 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
196 | OPTION_STATUS='0:audio0'
197 | # Configure a audio device, useful in combination with QXL/Spice. (ich9-intel-hda|intel-hda|AC97)
198 | VM_DEVICE="$VM_DEVICE_VAR"
199 | # Driver select. (spice|none)
200 | VM_DRIVER="$VM_DRIVER_VAR"
201 |
202 | #----[VM_VGA_OPTIONS]
203 | # Configure the VGA Hardware. Since QEMU 2.9 the default VGA display type is 'std' for all OS types besides older Windows versions (XP and older) which use cirrus.
204 | # Display type: cirrus | none | qxl | qxl2 | qxl3 | qxl4 | serial0 | serial1 | serial2 | serial3 | std | virtio | virtio-gl | vmware> (default = std)
205 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
206 | OPTION_STATUS='1:vga'
207 | # Set display type to 'qx1' when using Spice.
208 | VM_VGA_TYPE='std'
209 | # GPU memory (MiB) (4 - 512). Sets the VGA memory (in MiB). Has no effect with serial display.
210 | VM_VGA_MEMORY='32'
211 |
212 | #----[VM_CPUSPECS_OPTIONS]
213 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
214 | OPTION_STATUS='1:0'
215 | # The number of cores per socket. Auto set by script.(default = 1)
216 | VM_CORES=$CORES
217 | # Limit of CPU usage.
218 | VM_CPULIMIT="$CPULIMIT"
219 | # CPU weight for a VM. Argument is used in the kernel fair scheduler. The larger the number is, the more CPU time this VM gets.
220 | VM_CPUUNITS="$CPUUNITS"
221 | # The number of CPU sockets.
222 | VM_SOCKETS='1'
223 | # Number of hotplugged vcpus. Default is ''.
224 | VM_VCPUS=''
225 | # Enable/disable NUMA. Default is '0'.
226 | VM_NUMA='0'
227 |
228 | #----[VM_MEMORY_OPTIONS]
229 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
230 | OPTION_STATUS='1:0'
231 | # Memory. Amount of RAM for the VM in MB. This is the maximum available memory when you use the balloon device.
232 | VM_MEMORY="$MEMORY"
233 | # Amount of target RAM for the VM in MB. Using zero disables the ballon driver.
234 | VM_BALLOON='512'
235 |
236 | #----[VM_NET_OPTIONS]
237 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
238 | OPTION_STATUS='1:net0'
239 | # Network Card Model. The virtio model provides the best performance with very low CPU overhead. Otherwise use e1000. (virtio | e1000)
240 | VM_MODEL="$MODEL"
241 | # Bridge to attach the network device to.
242 | VM_BRIDGE="$BRIDGE"
243 | # A common MAC address with the I/G (Individual/Group) bit not set.
244 | VM_MACADDR="$HWADDR"
245 | # Controls whether this interface’s firewall rules should be used.
246 | VM_FIREWALL="$FIREWALL"
247 | # VLAN tag for this interface (value 0 for none, or VLAN[2-N] to enable).
248 | VM_TAG="$TAG"
249 | # VLAN ids to pass through the interface.
250 | VM_TRUNKS="$TRUNKS"
251 | # Apply rate limiting to the interface (MB/s). Value "" for unlimited.
252 | VM_RATE="$RATE"
253 | # MTU - Maximum transfer unit of the interface.
254 | VM_MTU="$MTU"
255 |
256 | #----[VM_GUEST_OS_OPTIONS]
257 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
258 | OPTION_STATUS='1:0'
259 | # OS. Set args: l26 (Linux 2.6 Kernel) | l24 (Linux 2.4 Kernel) | other | solaris | w2k | w2k3 | w2k8 | win10 | win11 | win7 | win8 | wvista | wxp
260 | VM_OSTYPE='l26'
261 |
262 | #----[VM_CPU_OPTIONS]
263 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
264 | OPTION_STATUS='1:cpu'
265 | # Emulated CPU type.
266 | VM_CPUTYPE='kvm64'
267 |
268 | #----[VM_STARTUP_OPTIONS]
269 | # Startup and shutdown behavior ( '--startup order=1,up=1,down=1' ).
270 | # Order is a non-negative number defining the general startup order. Up=1 means first to start up. Shutdown in done with reverse ordering so down=1 means last to shutdown.
271 | # Up: Startup delay. Defines the interval between this container start and subsequent containers starts. For example, set it to 240 if you want to wait 240 seconds before starting other containers.
272 | # Down: Shutdown timeout. Defines the duration in seconds Proxmox VE should wait for the container to be offline after issuing a shutdown command. By default this value is set to 60, which means that Proxmox VE will issue a shutdown request, wait 60s for the machine to be offline, and if after 60s the machine is still online will notify that the shutdown action failed.
273 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
274 | OPTION_STATUS='1:startup'
275 | VM_ORDER='1'
276 | VM_UP='30'
277 | VM_DOWN='60'
278 |
279 | #----[VM_SCSI0_OPTIONS]
280 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
281 | OPTION_STATUS='1:scsi0'
282 | # Virtual Disk Size (GB).
283 | VM_SCSI0_SIZE="$VM_SIZE"
284 | # Cache
285 | VM_SCSI0_CACHE=''
286 | # Allows the node to reclaim the free space that does not have any data. Must use 'VirtIO SCSI controller'. Enable for ZFS. Set
287 | VM_SCSI0_DISCARD=''
288 | # SSD emulation
289 | VM_SCSI0_SSD='1'
290 | # Include volume in backup job
291 | VM_SCSI0_BACKUP='1'
292 | # IOThread. Creates one I/O thread per storage controller, rather than a single thread for all I/O. Works with 'virtio-scsi-single' only.
293 | VM_SCSI0_IOTHREAD=''
294 |
295 | #----[VM_SCSI1_OPTIONS]
296 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
297 | OPTION_STATUS='0:scsi1'
298 | # Virtual Disk Size (GB).
299 | VM_SCSI1_SIZE=''
300 | # Cache
301 | VM_SCSI1_CACHE=''
302 | # Allows the node to reclaim the free space that does not have any data. Must use 'VirtIO SCSI controller'. Enable for ZFS. Set
303 | VM_SCSI1_DISCARD=''
304 | # SSD emulation
305 | VM_SCSI1_SSD=''
306 | # Include volume in backup job
307 | VM_SCSI1_BACKUP=''
308 | # IOThread. Creates one I/O thread per storage controller, rather than a single thread for all I/O. Works with 'virtio-scsi-single' only.
309 | VM_SCSI1_IOTHREAD=''
310 |
311 | #----[VM_CDROM_OPTIONS]
312 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
313 | OPTION_STATUS='1:cdrom'
314 | # ISO src
315 | VM_ISO_SRC='OS_TMPL'
316 | # Media type
317 | VM_MEDIA=cdrom
318 |
319 | #----[VM_CLOUD_INIT_OPTIONS]
320 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
321 | OPTION_STATUS='1:0'
322 | # Root credentials
323 | VM_CIUSER='root'
324 | VM_CIPASSWORD='ahuacate'
325 | # Specifies the cloud-init configuration format. Use the nocloud format for Linux, and configdrive2 for windows.
326 | VM_CITYPE='nocloud'
327 | # Sets DNS server IP address for a container.
328 | VM_NAMESERVER=$NAMESERVER
329 | # Sets DNS search domains for a container.
330 | VM_SEARCHDOMAIN=$SEARCHDOMAIN
331 | # SSH Keys. Setup public SSH keys (one key per line, OpenSSH format).
332 | VM_SSHKEYS=''
333 |
334 | #----[VM_CLOUD_INIT_IPCONFIG_OPTIONS]
335 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
336 | OPTION_STATUS='1:ipconfig0'
337 | # IP address (IPv4). Set IPv4 or 'dhcp'.
338 | VM_IP="$IP"
339 | # IP address (IPv6). Set IPv6 or 'dhcp'.
340 | VM_IP6=""
341 | # Default gateway for traffic (IPv4).
342 | VM_GW="$GW"
343 | # Default gateway for traffic (IPv6).
344 | VM_GW6=""
345 |
346 | #----[VM_SERIAL_OPTIONS]
347 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>'). Default is '0' (disabled).
348 | OPTION_STATUS='0:0'
349 | # Create a serial device inside the VM (n is 0 to 3)
350 | VM_SERIAL0='socket'
351 | VM_VGA='serial0'
352 |
353 | #----[VM_USB_OPTIONS]
354 | # Configure an USB device (n is 0 to 4). (HOSTUSBDEVICE | spice)
355 | # The Host USB device or port or the value spice. HOSTUSBDEVICE syntax is:
356 | # 'bus-port(.port)*' (decimal numbers) or
357 | # 'vendor_id:product_id' (hexadeciaml numbers) or 'spice'
358 | # Use '0' to disable, '1' to enable to enable ('<0 | 1>:<0 | string name>').
359 | OPTION_STATUS='0:usb0'
360 | # Set host to 'spice' when using Spice. ()
361 | VM_HOST=''
362 | # Enable usb3. Specifies whether if given host option is a USB3 device or port. Use '0' to disable '1' to enable.
363 | VM_USB3=''
364 |
365 | #----[VM_OTHER]
366 | # OS Name (options are: 'ubuntu', 'debian'. Set '' when setting custom URLs - "VM_OTHER_OS_URL")
367 | VM_OS_DIST=''
368 | # OS Version (options for ubuntu: '18.04', '20.04', '21.10', '22.04' ; options for debian: '9', '10'. Set '' when setting custom URLs - "VM_OTHER_OS_URL")
369 | VM_OSVERSION=''
370 | # OS Other URL ()
371 | # For custom URLS to ISO files. If not used leave empty ''.
372 | VM_OTHER_OS_URL='http://sourceforge.net/projects/openmediavault/files/latest/download?source=files'
373 | # PCI passthrough. Use '0' to disable, '1' to enable to enable ('<0 | 1>').
374 | VM_PCI_PT='1'
375 | # VM numeric ID of the given machine.
376 | VMID='110'
377 |
378 | #----[App_UID_GUID]
379 | # App user
380 | APP_USERNAME='root'
381 | # App user group
382 | APP_GRPNAME='root'
383 |
384 | #----[REPO_PKG_NAME]
385 | # Repo package name
386 | REPO_PKG_NAME='pve-nas'
387 |
388 | #---- Other Files ------------------------------------------------------------------
389 |
390 | # Required PVESM Storage Mounts for VM ( new version )
391 | unset pvesm_required_LIST
392 | pvesm_required_LIST=()
393 | while IFS= read -r line; do
394 | [[ "$line" =~ ^\#.*$ ]] && continue
395 | pvesm_required_LIST+=( "$line" )
396 | done << EOF
397 | # Example
398 | # backup:CT settings backup storage
399 | EOF
400 |
401 | #---- Functions --------------------------------------------------------------------
402 | #---- Body -------------------------------------------------------------------------
403 |
404 | #---- Prerequisites
405 |
406 | # PCI IOMMU passthru
407 | source $COMMON_PVE_SRC_DIR/pvesource_precheck_iommu.sh
408 |
409 | # Host SMTP support
410 | source $COMMON_PVE_SRC_DIR/pvesource_precheck_hostsmtp.sh
411 |
412 |
413 | #---- Introduction
414 | source $COMMON_PVE_SRC_DIR/pvesource_vm_intro.sh
415 |
416 |
417 | #---- Set variables
418 | source $COMMON_PVE_SRC_DIR/pvesource_set_allvmvars.sh
419 |
420 |
421 | #---- Create VM
422 | source $COMMON_PVE_SRC_DIR/pvesource_vm_createvm.sh
423 |
424 |
425 | #---- Set PCI HBA or physical disk pass-through
426 | source $COMMON_PVE_SRC_DIR/pvesource_vm_diskpassthru.sh "onboard"
427 |
428 |
429 | #---- Finish Line ------------------------------------------------------------------
430 | section "Completion Status"
431 |
432 | #---- Set display text
433 | if [ "$VM_DISK_PT" = 0 ]
434 | then
435 | # Aborted disk pass-through
436 | display_msg1="You have chosen to abort setting disk pass-through for your VM '${HOSTNAME,,}'. No disk pass-through has been configured. You can manually configure your pass-through options using the Proxmox WebGUI interface > ''${VMID} (${HOSTNAME,,})' > 'Hardware' to pass-through your required devices.\n\nOr you can destroy VM '${VMID} (${HOSTNAME,,})', fix any issue you have with your disks and run the installer again."
437 | elif [ "$VM_DISK_PT" = 1 ]
438 | then
439 | # PCIe disk Pass-through
440 | display_msg1="Using the PVE web interface on PVE host '$(hostname)', go to VM '${VMID} (${HOSTNAME,,})' > '_Shell' and start the VM. The OMV installation frontend WebGUI will start. Complete the OMV installation as per our Github guide:\n\n -- https://github.com/ahuacate/nas-hardmetal\n\nAfter completing the OMV installation your login credentials are:\n\n Web interface\n -- URL: http://${HOSTNAME,,}.$(hostname -d) (hostname.domain)\n -- User: admin\n -- Password: openmediavault\n\n Client (SSH, console)\n -- User: root\n -- Password: The password that you have set during installation."
441 | elif [ "$VM_DISK_PT" = 2 ]
442 | then
443 | # PCIe HBA Pass-through
444 | display_msg1="You have chosen to configure your NAS with PCIe HBA card pass-through. Steps to be taken are:\n\n -- Connect your NAS disks to your SATA/SAS/NVMe PCIe HBA card\n -- Follow the steps in our guide: https://github.com/ahuacate/nas-hardmetal\n\nAfter configuring your PCIe HBA card pass-through using the PVE web interface on PVE host '$(hostname)', go to VM '${VMID} (${HOSTNAME,,})' > '_Shell' and start the VM. The OMV installation frontend WebGUI will start. Complete the OMV installation as per our Github guide:\n\n -- https://github.com/ahuacate/nas-hardmetal\n\nAfter completing the OMV installation your login credentials are:\n\n Web interface\n -- URL: http://${HOSTNAME,,}.$(hostname -d) (hostname.domain)\n -- User: admin\n -- Password: openmediavault\n\n Client (SSH, console)\n -- User: root\n -- Password: The password that you have set during installation."
445 | fi
446 |
447 | msg_box "${HOSTNAME^^} VM creation was a success. To complete the build you must now follow these steps.
448 |
449 | $(echo ${display_msg1})"
450 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/src/ubuntu/email_templates/pve_nas_ct_newuser_msg.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_ct_newuser_msg.sh
4 | # Description: Email template for PVE NAS user credentials
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Source -----------------------------------------------------------------------
8 | #---- Dependencies -----------------------------------------------------------------
9 | #---- Static Variables -------------------------------------------------------------
10 |
11 | if [ -f "/etc/proftpd/conf.d/global_default.conf" ]
12 | then
13 | # Check for Remote WAN Address status
14 | if [ "$(cat /etc/proftpd/conf.d/global_default.conf 2> /dev/null | grep '^#\s*SFTP_REMOTE_WAN_ADDRESS=*' | awk -F'=' '{ print $2}')" = 1 ]
15 | then
16 | SFTP_REMOTE_WAN_ADDRESS='Not available'
17 | elif [ ! "$(cat /etc/proftpd/conf.d/global_default.conf 2> /dev/null | grep '^#\s*SFTP_REMOTE_WAN_ADDRESS=*' | awk -F'=' '{ print $2}')" = 1 ]
18 | then
19 | SFTP_REMOTE_WAN_ADDRESS=$(cat /etc/proftpd/conf.d/global_default.conf | grep '^#\s*SFTP_REMOTE_WAN_ADDRESS=*' | awk -F'=' '{ print $2 }' | sed 's/^[ \t]*//;s/[ \t]*$//')
20 | fi
21 | # Check for Remote Port Address status
22 | if [ "$(cat /etc/proftpd/conf.d/global_default.conf 2> /dev/null | grep '^#\s*SFTP_REMOTE_WAN_PORT=*' | awk -F'=' '{ print $2}')" = 1 ]
23 | then
24 | SFTP_REMOTE_WAN_PORT='Not available'
25 | elif [ ! "$(cat /etc/proftpd/conf.d/global_default.conf 2> /dev/null | grep '^#\s*SFTP_REMOTE_WAN_PORT=*' | awk -F'=' '{ print $2}')" = 1 ]
26 | then
27 | SFTP_REMOTE_WAN_PORT=$(cat /etc/proftpd/conf.d/global_default.conf | grep '^#\s*SFTP_REMOTE_WAN_PORT=*' | awk -F'=' '{ print $2 }' | sed 's/^[ \t]*//;s/[ \t]*$//')
28 | fi
29 | else
30 | SFTP_REMOTE_WAN_ADDRESS='Not available'
31 | SFTP_REMOTE_WAN_PORT='Not available'
32 | fi
33 |
34 | # Check SFTP LAN Port
35 | if [ -f "/etc/proftpd/conf.d/sftp.conf" ]
36 | then
37 | LOCAL_LAN_PORT=$(cat /etc/proftpd/conf.d/sftp.conf 2> /dev/null | grep '^\s*Port.*[0-9]$' | sed 's/^[ \t]*//;s/[ \t]*$//' | awk -F' ' '{ print $2}')
38 | else
39 | LOCAL_LAN_PORT='Not available'
40 | fi
41 |
42 | #---- Other Variables --------------------------------------------------------------
43 | #---- Other Files ------------------------------------------------------------------
44 | #---- Body -------------------------------------------------------------------------
45 |
46 | #---- Email user credentials
47 | # Email body text
48 | cat <<-EOF > email_body.html
49 | To: ${PVE_ROOT_EMAIL}
50 | From: donotreply@${HOSTNAME}.local
51 | Subject: Login Credentials for NAS user: ${USER}
52 | Mime-Version: 1.0
53 | Content-Type: multipart/mixed; boundary="ahuacate"
54 |
55 | --ahuacate
56 | Content-Type: text/html
57 |
58 | ---- Login credentials for user '${USER^^}' ${HOSTNAME^} account
59 | Use the attached SSH keys for authentication when you are sFTP or SSH connecting to our NAS server. Remember to always keep your private keys safe. SSH keys should never be accessible to anyone other than the NAS user account holder.
60 | The Users login credentials details are:
61 |
62 | - Username : ${USER}
63 | - Password : ${PASSWORD}
64 | - Primary User Group : ${GROUP}
65 | - Supplementary User Group : $(if [ ${GROUP} = chrootjail ]; then echo "None"; else echo -e ${USERMOD} | sed 's/^...//' | sed 's/,/, /'; fi)
66 | - Private SSH Key (Standard) : id_${USER,,}_ed25519
67 | - Private SSH Key (PPK version) : id_${USER,,}_ed25519.ppk
68 | - NAS LAN IP Address : $(hostname -I)
69 | - NAS WAN Address : ${SFTP_REMOTE_WAN_ADDRESS}
70 | - SMB Status : Enabled
71 |
72 |
73 | ---- Account type (folder access level)
74 | The User has been issued a '${GROUP}' level account type. The User's folder access rights are as follows:
75 | $(if [ ${GROUP} = privatelab ]; then
76 | echo '
77 |
78 | - privatelab - Private storage including 'medialab' & 'homelab' rights
79 |
80 |
'
81 | elif [ ${GROUP} = homelab ]; then
82 | echo '
83 |
84 | - homelab - Everything to do with a smart home including 'medialab'
85 |
86 |
'
87 | elif [ ${GROUP} = medialab ]; then
88 | echo '
89 |
90 | - medialab - Everything to do with media (i.e movies, series & music)
91 |
92 |
'
93 | elif [ ${GROUP} = chrootjail ]; then
94 | echo ''
95 | echo '
'
96 | echo '- chrootjail - The User is safe and secure in a jailed account ( a good thing )
'
97 | echo "- Jail Level - $(if [ ${JAIL_TYPE} = level01 ]; then echo -e ${LEVEL01}; elif [ ${JAIL_TYPE} = level02 ]; then echo -e ${LEVEL02}; elif [ ${JAIL_TYPE} = level03 ]; then echo -e ${LEVEL03}; fi)
"
98 | echo '
'
99 | echo '
'
100 | fi)
101 |
102 | ---- Client SMB LAN ${HOSTNAME^} connection
103 | SMB, or Server Message Block, is the method used by Microsoft Windows networking, and with the Samba protocol on Apple Mac and Linux/Unix.
104 | 1) MS Window Clients
105 |
106 | - Server IP address : \\\\$(hostname -I)
107 | - Server FQDN address : \\\\$(hostname).$(hostname -d)
108 | - User name : ${USER}
109 | - Password : ${PASSWORD}
110 |
111 | 2) Apple Mac or Linux Clients
112 |
113 | - Server IP address : smb://$(hostname -I)
114 | - Server FQDN address : smb://$(hostname).$(hostname -d)
115 | - Connect as : Registered User
116 | - Name : ${USER}
117 | - Password : ${PASSWORD}
118 |
119 |
120 | ---- Client SFTP ${HOSTNAME^} connection
121 | Only SFTP is enabled (standard FTP connections are denied) with a login type by SSH key only. For connecting we recommend the free FileZilla FTP client software ( https://filezilla-project.org/download.php ). Use the FileZilla connection tool 'File' > 'Site Manager' and create a 'New Site' account with the following credentials.
122 |
123 | - Protocol : SFTP - SSH File Transfer Protocol
124 | - Login Type : Key file
125 | - User : ${USER}
126 | - Key file : id_${USER,,}_ed25519.ppk
127 |
128 | Depending on your account type you can select either a local and/or remote SFTP connection method.
129 | 1) LAN Access - For LAN access only.
130 |
131 | - Host IP address : $(hostname -I)
132 | - Host FQDN address : $(hostname).$(hostname -d)
133 | - Port : ${LOCAL_LAN_PORT}
134 |
135 | 2) WAN Access - For remote internet access only.
136 |
137 | - Host address : ${SFTP_REMOTE_WAN_ADDRESS}
138 | - Port : ${SFTP_REMOTE_WAN_PORT}
139 |
140 | Note: FileZilla requires the PPK private SSH key "id_${USER,,}_ed25519.ppk" not the standard private SSH key "id_${USER,,}_ed25519".
141 |
142 |
143 |
144 |
145 |
---- Attachment Details
146 |
Attached files are:
147 |
148 |
149 | - Private SSH Key (Standard) : id_${USER,,}_ed25519
150 | - Private SSH Key (PPK version) : id_${USER,,}_ed25519.ppk
151 |
152 |
153 | --ahuacate
154 | Content-Type: application/zip
155 | Content-Disposition: attachment; filename="id_${USER,,}_ed25519"
156 | Content-Transfer-Encoding: base64
157 | $(if [ ${GROUP} = privatelab ] || [ ${GROUP} = homelab ] || [ ${GROUP} = medialab ]; then
158 | echo '$(openssl base64 < /srv/${HOSTNAME}/homes/${USER}/.ssh/id_${USER,,}_ed25519)'
159 | elif [ ${GROUP} = chrootjail ]; then
160 | echo '$(openssl base64 < /${HOME_BASE}${USER}/.ssh/id_${USER,,}_ed25519)'
161 | fi)
162 |
163 | --ahuacate
164 | Content-Type: application/zip
165 | Content-Disposition: attachment; filename="id_${USER,,}_ed25519.ppk"
166 | Content-Transfer-Encoding: base64
167 | $(if [ ${GROUP} = privatelab ] || [ ${GROUP} = homelab ] || [ ${GROUP} = medialab ]; then
168 | echo '$(openssl base64 < /srv/${HOSTNAME}/homes/${USER}/.ssh/id_${USER,,}_ed25519.ppk)'
169 | elif [ ${GROUP} = chrootjail ]; then
170 | echo '$(openssl base64 < /${HOME_BASE}${USER}/.ssh/id_${USER,,}_ed25519.ppk)'
171 | fi)
172 |
173 | --ahuacate
174 | EOF
--------------------------------------------------------------------------------
/src/ubuntu/proftpd_settings/global_default.conf:
--------------------------------------------------------------------------------
1 |
2 | # Copy file to /etc/proftpd/conf.d/global_default.conf
3 |
4 | # Set value 1 means not available. Required by SSMTP email service to send User details.
5 | # To set your WAN address example: # REMOTE_WAN_ADDRESS='sftp.myavocado.stupid.me' (note the preceding # MUST be included)
6 | # SFTP_LOCAL_LAN_ADDRESS=$(hostname -i)
7 | # SFTP_LOCAL_LAN_PORT=2222
8 | # SFTP_REMOTE_WAN_ADDRESS=1
9 | # SFTP_REMOTE_WAN_PORT=1
10 |
11 | # Chroot default folder
12 | DefaultRoot ~
13 |
14 | # Here are the default values
15 | MultilineRFC2228 off
16 | RootLogin off
17 | RequireValidShell off
18 | MaxLoginAttempts 5
19 | MaxClientsPerHost 10
20 | DefaultTransferMode binary
21 | ShowSymlinks on
22 |
23 | #Limit login times and timeouts to drop dead clients.
24 | TimeoutLogin 300
25 | TimeoutIdle 300
26 | TimeoutNoTransfer 300
27 | TimeoutStalled 300
28 |
29 | # Allow to resume not only the downloads but the uploads too
30 | AllowRetrieveRestart on
31 | AllowStoreRestart on
32 |
33 | # Hide all dotfiles, hidden files and folders
34 |
35 | HideFiles ^\..*
36 | HideNoAccess on
37 |
38 | IgnoreHidden on
39 |
40 |
41 |
42 | # Deny folder access
43 |
44 |
45 | DenyGroup medialab
46 |
47 |
48 |
49 | # Limit Chmod command
50 |
51 | DenyAll
52 |
53 |
54 |
55 |
--------------------------------------------------------------------------------
/src/ubuntu/proftpd_settings/global_desktopdir.conf:
--------------------------------------------------------------------------------
1 |
2 | # Copy file to /etc/proftpd/conf.d/global_desktopdir.conf
3 |
4 | # Protect user desktop base folders
5 | # Desktop folder
6 |
7 |
8 | DenyAll
9 |
10 |
11 |
12 |
13 | AllowAll
14 |
15 |
16 |
17 | # Documents folder
18 |
19 |
20 | DenyAll
21 |
22 |
23 |
24 |
25 | AllowAll
26 |
27 |
28 |
29 | # Downloads folder
30 |
31 |
32 | DenyAll
33 |
34 |
35 |
36 |
37 | AllowAll
38 |
39 |
40 |
41 | # Music folder
42 |
43 |
44 | DenyAll
45 |
46 |
47 |
48 |
49 | AllowAll
50 |
51 |
52 |
53 | # Pictures folder
54 |
55 |
56 | DenyAll
57 |
58 |
59 |
60 |
61 | AllowAll
62 |
63 |
64 |
65 | # Public folder
66 |
67 |
68 | DenyAll
69 |
70 |
71 |
72 |
73 | AllowAll
74 |
75 |
76 |
77 | # Template folder
78 |
79 |
80 | DenyAll
81 |
82 |
83 |
84 |
85 | AllowAll
86 |
87 |
88 |
89 | # Videos folder
90 |
91 |
92 | DenyAll
93 |
94 |
95 |
96 |
97 | AllowAll
98 |
99 |
100 |
101 |
102 |
--------------------------------------------------------------------------------
/src/ubuntu/proftpd_settings/pve_nas_ct_proftpdsettings.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_ct_proftpdsettings.sh
4 | # Description: ProFTPd settings script for PVE Ubuntu NAS
5 | # ----------------------------------------------------------------------------------
6 |
7 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
8 | COMMON_PVE_SRC_DIR="$DIR/../../../common/pve/src"
9 |
10 | #---- Dependencies -----------------------------------------------------------------
11 |
12 | # Run Bash Header
13 | source $COMMON_PVE_SRC_DIR/pvesource_bash_defaults.sh
14 |
15 | #---- Static Variables -------------------------------------------------------------
16 | #---- Other Variables --------------------------------------------------------------
17 |
18 | # Section Header Body Text
19 | SECTION_HEAD='PVE NAS'
20 |
21 | # Check if IP is static or DHCP
22 | if [[ $(ip r | head -n 1 | grep -n 'proto dhcp') ]]
23 | then
24 | DHCP=1
25 | else
26 | DHCP=0
27 | fi
28 |
29 | #---- Other Files ------------------------------------------------------------------
30 | #---- Functions --------------------------------------------------------------------
31 | #---- Body -------------------------------------------------------------------------
32 |
33 | #---- Setting Folder Permissions
34 | section "Setup ProFTPd SFTP service."
35 |
36 | # Check for ProFTPd installation
37 | msg "Checking for ProFTPd status..."
38 | if [ "$(dpkg -s proftpd-core >/dev/null 2>&1; echo $?)" = 0 ]
39 | then
40 | info "ProFTPd status: ${GREEN}installed.${NC} ( $(proftpd --version) )"
41 | echo
42 | else
43 | info "ProFTPd is not installed. Exiting ProFTP installation script..."
44 | echo
45 | exit 0
46 | fi
47 |
48 | # Creating sftp Configuration
49 | msg "Checking sftp configuration..."
50 | echo
51 | if [ -f "/etc/proftpd/conf.d/sftp.conf" ] || [ -f "/etc/proftpd/conf.d/global_default.conf" ] || [ -f "/etc/proftpd/conf.d/global_desktopdir.conf" ]
52 | then
53 | msg_box "#### PLEASE READ CAREFULLY - SFTP CONFIGURATION ####\n
54 | Existing ProFTPd settings files have been found. Updating will overwrite the following settings files:
55 |
56 | -- /etc/proftpd/conf.d/sftp.conf
57 | -- /etc/proftpd/conf.d/global_default.conf
58 | -- /etc/proftpd/conf.d/global_desktopdir.conf
59 |
60 | The User also has the option to set the following:
61 | -- SFTP WAN address ( i.e a HAProxy URL, DynDNS provider or static IP)
62 | -- SFTP WAN port
63 | -- SFTP local port
64 |
65 | If the User has made custom changes to the existing ProFTPd configuration files DO NOT proceed to update this file (first make a backup). Otherwise we RECOMMEND you update (overwrite) ProFTPd settings file with our latest version."
66 | echo
67 | while true
68 | do
69 | read -p "Update your ProFTPd settings (Recommended) [y/n]? " -n 1 -r YN
70 | echo
71 | case $YN in
72 | [Yy]*)
73 | PROFTPD_SETTING=0
74 | echo
75 | break
76 | ;;
77 | [Nn]*)
78 | PROFTPD_SETTING=1
79 | info "You have chosen to skip this step."
80 | echo
81 | break
82 | ;;
83 | *)
84 | warn "Error! Entry must be 'y' or 'n'. Try again..."
85 | echo
86 | ;;
87 | esac
88 | done
89 | else
90 | PROFTPD_SETTING=0
91 | fi
92 |
93 | # WAN address and Port settings
94 | msg_box "#### PLEASE READ CAREFULLY - PROFTP Settings ####
95 |
96 | Our ProFTPd settings are tailored and configured to work out of the box. But the User may want change our basic default settings to meet their network requirements:
97 |
98 | Local ProFTPd server settings.
99 | -- SFTP server local LAN port : 2222
100 | -- SFTP server local IPv4 address : $(if [ ${DHCP} == '0' ]; then echo "$(hostname -i) ( static IP )"; else echo "$(hostname).$(hostname -d) ( dhcp IP )"; fi)
101 |
102 | If you have configured your network for remote access using HAProxy or by DynDNS you should enter the details when prompted. It will be included in all new user account instruction emails along with their user credentials.
103 | -- SFTP remote WAN HTTPS URL address : none
104 | -- SFTP server WAN port : none"
105 | echo
106 |
107 | # Set
108 | msg "The User can custom set a ProFTPd server SFTP local LAN Port number. The SFTP default LAN Port number is : 2222 ( Recommended ). Valid ports can be from 1 to 65535; however, ports less than 1024 are reserved for other protocols. It is best to choose ports greater than or equal to 50000 for SFTP mode."
109 | echo
110 | while true
111 | do
112 | read -p "Enter a ProFTPd SFTP Port number: " -e -i 2222 SFTP_LOCAL_LAN_PORT
113 | if [[ "$SFTP_LOCAL_LAN_PORT" =~ ^[0-9]+$ ]]
114 | then
115 | info "SFTP local LAN Port is set: ${YELLOW}$SFTP_LOCAL_LAN_PORT${NC}"
116 | echo
117 | break
118 | else
119 | warn "There are problems with your input:
120 |
121 | 1. A WAN Port number must be integers only (numerics).
122 |
123 | Try again..."
124 | echo
125 | fi
126 | done
127 |
128 | # Set remote connection address
129 | msg "Select a connection method from the menu. To connect remotely you must have HAProxy, Cloudflare or a Dynamic DNS service provider account up and running ( and know your connection address URL ). If the User has none of these then select 'None'."
130 | echo
131 | OPTIONS_VALUES_INPUT=( "TYPE01" "TYPE02" )
132 | OPTIONS_LABELS_INPUT=( "Remote Access Address - connect remotely from the internet" "None - connect using LAN $(if [ ${DHCP} == '0' ]; then echo "$(hostname -i)"; else echo "$(hostname).$(hostname -d)"; fi)" )
133 | makeselect_input2
134 | singleselect SELECTED "$OPTIONS_STRING"
135 | if [ "$RESULTS" = TYPE01 ]
136 | then
137 | while true
138 | do
139 | msg "The User must input a valid internet HTTPS URL. This could be a Dynamic DNS server URL, domain address URL ( i.e Cloudflare hosted web address ) or even a static WAN IP address if your have one."
140 | read -p "Enter a valid HTTPS URL address: " SFTP_REMOTE_WAN_ADDRESS_VAR
141 | SFTP_REMOTE_WAN_ADDRESS=${SFTP_REMOTE_WAN_ADDRESS_VAR,,}
142 | if ping -c1 $SFTP_REMOTE_WAN_ADDRESS &>/dev/null; then
143 | info "SFTP connection address is set: ${YELLOW}$SFTP_REMOTE_WAN_ADDRESS${NC}"
144 | SFTP_REMOTE_WAN_PORT=0
145 | echo
146 | break
147 | else
148 | warn "There are problems with your input:
149 |
150 | 1. HTTPS URL '$SFTP_REMOTE_WAN_ADDRESS' is not reachable.
151 | 2. A valid URL resembles: sftp-site1.foo.bar or mysftp.dyndns.org
152 |
153 | Check your URL address, remember to include any subdomain and try again..."
154 | echo
155 | fi
156 | done
157 | elif [ "$RESULTS" = TYPE02 ]
158 | then
159 | SFTP_REMOTE_WAN_ADDRESS=1
160 | SFTP_REMOTE_WAN_PORT=1
161 | msg "You can always add a SFTP remote WAN HTTPS URL address at a later stage."
162 | info "SFTP connection address is set: ${YELLOW}$(hostname -i)${NC}"
163 | echo
164 | fi
165 |
166 | # Set remote port number
167 | if [ "$SFTP_REMOTE_WAN_PORT" = 0 ]
168 | then
169 | msg "Your remote internet connection URL is set: ${WHITE}$SFTP_REMOTE_WAN_ADDRESS${NC}.
170 | The User must provide a incoming WAN Port number used to access the ${HOSTNAME^^} LAN network. If the User has configured pfSense HAProxy with Cloudflare the port would be '443'. For a Dynamic DNS provider configuration the WAN Port number is set by the User at the network Gateway device (modem/USG) port forwarding settings table ( i.e mysftp.dyndns.org WAN: ${WHITE}502222${NC} --> LAN: $(hostname -i):2222 )."
171 | echo
172 | while true
173 | do
174 | read -p "Enter a WAN Port number: " -e -i 443 SFTP_REMOTE_WAN_PORT
175 | if [[ "$SFTP_REMOTE_WAN_PORT" =~ ^[0-9]+$ ]]
176 | then
177 | info "SFTP WAN Port is set: ${YELLOW}$SFTP_REMOTE_WAN_PORT${NC}"
178 | echo
179 | break
180 | else
181 | warn "There are problems with your input:
182 |
183 | 1. A WAN Port number must be integers only (numerics).
184 |
185 | Try again..."
186 | echo
187 | fi
188 | done
189 | fi
190 |
191 | # Modifying ProFTPd Defaults
192 | if [ "$PROFTPD_SETTING" = 0 ]
193 | then
194 | msg "Modifying ProFTPd defaults and settings..."
195 | if [ "$(systemctl is-active proftpd)" = 'active' ]
196 | then
197 | systemctl stop proftpd
198 | while ! [[ "$(systemctl is-active proftpd)" == "inactive" ]]; do
199 | echo -n .
200 | done
201 | fi
202 | eval "echo \"$(cat $DIR/sftp.conf)\"" > /etc/proftpd/conf.d/sftp.conf
203 | eval "echo \"$(cat $DIR/global_default.conf)\"" > /etc/proftpd/conf.d/global_default.conf
204 | eval "echo \"$(cat $DIR/global_desktopdir.conf)\"" > /etc/proftpd/conf.d/global_desktopdir.conf
205 | sed -i 's|# DefaultRoot.*|DefaultRoot ~|g' /etc/proftpd/proftpd.conf
206 | sed -i 's|ServerName.*|ServerName \"'$(echo ${HOSTNAME^^})'\"|g' /etc/proftpd/proftpd.conf
207 | sed -i 's|UseIPv6.*|UseIPv6 off|g' /etc/proftpd/proftpd.conf
208 | sed -i 's|#LoadModule mod_sftp.c|LoadModule mod_sftp.c|g' /etc/proftpd/modules.conf
209 | sed -i 's|#LoadModule mod_sftp_pam.c|LoadModule mod_sftp_pam.c|g' /etc/proftpd/modules.conf
210 | sed -i "s|^#\s*SFTP_LOCAL_LAN_ADDRESS=.*|# SFTP_LOCAL_LAN_ADDRESS='$(if [ "$DHCP" = 0 ]; then echo "$(hostname -i)"; else echo "$(hostname).$(hostname -d)"; fi)'|g" /etc/proftpd/conf.d/global_default.conf
211 | sed -i "s|^#\s*SFTP_LOCAL_LAN_PORT=.*|# SFTP_LOCAL_LAN_PORT='${SFTP_LOCAL_LAN_PORT}'|g" /etc/proftpd/conf.d/global_default.conf
212 | sed -i "s|^#\s*SFTP_REMOTE_WAN_ADDRESS=.*|# SFTP_REMOTE_WAN_ADDRESS='${SFTP_REMOTE_WAN_ADDRESS}'|g" /etc/proftpd/conf.d/global_default.conf
213 | sed -i "s|^\s*SFTP_REMOTE_WAN_PORT.*|# SFTP_REMOTE_WAN_PORT='${SFTP_REMOTE_WAN_PORT}'|g" /etc/proftpd/conf.d/global_default.conf
214 | # SFTP Conf
215 | sed -i "s|^\s*Port.*| Port ${SFTP_LOCAL_LAN_PORT}|g" /etc/proftpd/conf.d/sftp.conf
216 | info "ProFTPd settings status: ${YELLOW}updated${NC}"
217 | echo
218 | fi
219 |
220 | # ProFTPd Status
221 | # Starting ProFTPd service
222 | msg "Checking ProFTP status..."
223 | if [ "$(systemctl is-active proftpd)" = 'inactive' ]
224 | then
225 | msg "Starting ProFTPd..."
226 | systemctl start proftpd
227 | msg "Waiting to hear from ProFTPd..."
228 | while ! [[ "$(systemctl is-active proftpd)" == "active" ]]
229 | do
230 | echo -n .
231 | done
232 | sleep 1
233 | info "ProFTPd status: ${GREEN}running${NC}"
234 | echo
235 | fi
236 |
237 | #---- Finish Line ------------------------------------------------------------------
238 | if [ ! "$PROFTPD_SETTING" = 1 ]
239 | then
240 | section "Completion Status."
241 |
242 | info "${WHITE}Success.${NC} ProFTPd settings have been updated."
243 | echo
244 | fi
245 |
246 | # Cleanup
247 | if [ -z "${PARENT_EXEC+x}" ]
248 | then
249 | trap cleanup EXIT
250 | fi
251 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/src/ubuntu/proftpd_settings/sftp.conf:
--------------------------------------------------------------------------------
1 |
2 | # Copy file to /etc/proftpd/conf.d/sftp.conf
3 |
4 |
5 | SFTPPAMEngine on
6 | SFTPPAMServiceName sftp
7 |
8 |
9 | # Server Details
10 | Port 2222
11 | ServerName "Ahuacate SFTP Server"
12 | AccessGrantMsg "-- Welcome to Ahuacate SFTP Server --"
13 | SFTPEngine on
14 | SFTPLog /var/log/proftpd/sftp.log
15 |
16 | # Configure both the RSA and DSA host keys, using the same host key files that OpenSSH uses.
17 | SFTPHostKey /etc/ssh/ssh_host_rsa_key
18 | SFTPHostKey /etc/ssh/ssh_host_dsa_key
19 |
20 | #SFTPAuthMethods publickey password keyboard-interactive
21 | SFTPAuthMethods publickey
22 |
23 | # Max Attempts
24 | MaxLoginAttempts 6
25 |
26 | # User SHH Key file location
27 | SFTPAuthorizedUserKeys file:/etc/proftpd/authorized_keys/%u
28 |
29 | # Enable compression
30 | SFTPCompression delayed
31 |
32 | # SFTP Options
33 | SFTPOptions IgnoreSFTPUploadPerms
34 |
35 | # User Chroot home
36 | DefaultRoot /srv/$HOSTNAME/homes/chrootjail/homes/%u chrootjail
37 | DefaultRoot /srv/$HOSTNAME privatelab
38 | DefaultRoot /srv/$HOSTNAME medialab
39 |
40 | # User Group Access
41 |
42 | AllowGroup chrootjail
43 | AllowGroup medialab
44 | AllowGroup privatelab
45 | DenyAll
46 |
47 |
48 |
49 |
50 |
--------------------------------------------------------------------------------
/src/ubuntu/pve-nas_sw.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve-nas_sw.sh
4 | # Description: Setup for Ubuntu NAS server
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 | #---- Source -----------------------------------------------------------------------
9 |
10 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
11 | COMMON_DIR="$DIR/../../common"
12 | COMMON_PVE_SRC_DIR="$DIR/../../common/pve/src"
13 | SHARED_DIR="$DIR/../../shared"
14 |
15 | #---- Dependencies -----------------------------------------------------------------
16 |
17 | # Run Bash Header
18 | source $COMMON_PVE_SRC_DIR/pvesource_bash_defaults.sh
19 |
20 | #---- Static Variables -------------------------------------------------------------
21 | #---- Other Variables --------------------------------------------------------------
22 |
23 | # Easy Script Section Header Body Text
24 | SECTION_HEAD='PVE NAS'
25 |
26 | #---- Other Files ------------------------------------------------------------------
27 |
28 | # Copy default lists of folder shares
29 | mv /tmp/nas_basefolderlist .
30 | mv /tmp/nas_basefoldersubfolderlist .
31 | mv /tmp/nas_basefolderlist_extra .
32 |
33 | #---- Body -------------------------------------------------------------------------
34 |
35 | #---- Prerequisites
36 | section "Performing Prerequisites"
37 |
38 | # Setting Variables
39 | msg "Setting the $SECTION_HEAD variables..."
40 | if [ -f "/tmp/pve_nas_ct_variables.sh" ]
41 | then
42 | mv /tmp/pve_nas_ct_variables.sh . 2>/dev/null
43 | # Import Variables
44 | . ./pve_nas_ct_variables.sh
45 | info "${SECTION_HEAD} variables are set."
46 | echo
47 | fi
48 |
49 | # Checking NAS storage mount point
50 | if [ ! -d "/srv/$HOSTNAME" ]
51 | then
52 | warn "Cannot locate, identify and PVE storage backend: "/srv/${HOSTNAME}"\nAborting installation."
53 | exit 0
54 | fi
55 |
56 | # Download and Install Prerequisites
57 | msg "Installing ACL..."
58 | apt-get install -y acl >/dev/null
59 | msg "Installing Putty Tools..."
60 | apt-get install -y putty-tools >/dev/null
61 | echo
62 |
63 |
64 | #---- Creating PVE NAS Users and Groups
65 | section "Creating Users and Groups"
66 |
67 | # Change Home folder permissions
68 | msg "Setting default adduser home folder permissions (DIR_MODE)..."
69 | sed -i "s/^DIR_MODE=.*/DIR_MODE=0750/g" /etc/adduser.conf
70 | info "Default adduser permissions set: ${WHITE}0750${NC}"
71 | msg "Setting default HOME folder destination..."
72 | sed -i "s|^DHOME=.*|DHOME=$DIR_SCHEMA/homes|g" /etc/adduser.conf
73 | sed -i "s|^# HOME=.*|HOME=$DIR_SCHEMA/homes|g" /etc/default/useradd
74 | echo "HOME_MODE 0750" | sudo tee -a /etc/login.defs
75 | info "Default HOME destination folder set: ${WHITE}$DIR_SCHEMA/homes${NC}"
76 |
77 | # Create User Acc
78 | # Set base dir
79 | DIR_SCHEMA="/srv/$(hostname)"
80 | source $SHARED_DIR/pve_nas_create_users.sh
81 |
82 | # Creating Chroot jail environment
83 | source $COMMON_PVE_SRC_DIR/pvesource_ct_ubuntu_installchroot.sh
84 |
85 |
86 | #---- Validating your network setup
87 |
88 | # Run Check Host IP
89 | # source $COMMON_DIR/nas/src/nas_set_nasip.sh
90 |
91 | # Identify PVE host IP
92 | source $COMMON_DIR/nas/src/nas_identify_pvehosts.sh
93 | # source $COMMON_PVE_SRC_DIR/pvesource_identify_pvehosts.sh
94 |
95 | # Modifying SSHd
96 | cat <
> /etc/ssh/sshd_config
97 | # Settings for privatelab
98 | Match Group privatelab
99 | AuthorizedKeysFile /srv/$HOSTNAME/homes/%u/.ssh/authorized_keys
100 | PubkeyAuthentication yes
101 | PasswordAuthentication no
102 | AllowTCPForwarding no
103 | X11Forwarding no
104 | # Settings for medialab
105 | Match Group medialab
106 | AuthorizedKeysFile /srv/$HOSTNAME/homes/%u/.ssh/authorized_keys
107 | PubkeyAuthentication yes
108 | PasswordAuthentication no
109 | AllowTCPForwarding no
110 | X11Forwarding no
111 | EOF
112 |
113 |
114 | #---- Install and Configure Samba
115 | source $COMMON_DIR/nas/src/nas_installsamba.sh
116 |
117 |
118 | #---- Install and Configure NFS
119 | source $COMMON_DIR/nas/src/nas_installnfs.sh
120 |
121 |
122 | #---- Install and Configure Webmin
123 | source $COMMON_PVE_SRC_DIR/pvesource_install_webmin.sh
124 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/src/ubuntu/pve_nas_ct_addpoweruser.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_ct_addpoweruser.sh
4 | # Description: Create a new PVE NAS Power User
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 |
9 | # Command to run script
10 | # bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/src/ubuntu/pve_nas_ct_addpoweruser.sh)"
11 |
12 | #---- Source -----------------------------------------------------------------------
13 |
14 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
15 | COMMON_PVE_SRC_DIR="$DIR/../../common/pve/src"
16 |
17 | #---- Dependencies -----------------------------------------------------------------
18 |
19 | # Run Bash Header
20 | source $COMMON_PVE_SRC_DIR/pvesource_bash_defaults.sh
21 |
22 | # Install libcrack2
23 | if [[ ! $(dpkg -s libcrack2) ]]
24 | then
25 | apt-get install -y libcrack2 > /dev/null
26 | fi
27 |
28 | # Check user is root
29 | if [ ! "$(id -u)" = 0 ]
30 | then
31 | warn "This script needs to run under 'root'. Exiting in 2 seconds.\nTry again..."
32 | sleep 2
33 | exit 0
34 | fi
35 |
36 | # Check PVE host SMTP status
37 | check_smtp_status
38 | if [ "$SMTP_STATUS" = 0 ]
39 | then
40 | display_msg='\nBefore proceeding with this installer we RECOMMEND you first configure all PVE hosts to support SMTP email services. A working SMTP server emails the NAS System Administrator all new User login credentials, SSH keys, application specific login credentials and written guidelines. A PVE host SMTP server makes NAS administration much easier. Also be alerted about unwarranted login attempts and other system critical alerts. PVE Host SMTP Server installer is available in our PVE Host Toolbox located at GitHub:\n\n -- https://github.com/ahuacate/pve-host\n'
41 | fi
42 |
43 | #---- Static Variables -------------------------------------------------------------
44 |
45 | # List of new users
46 | NEW_USERS=usersfile
47 | # Homes folder
48 | HOSTNAME=$(hostname)
49 | HOME_BASE="/srv/$HOSTNAME/homes"
50 |
51 | #---- Other Variables --------------------------------------------------------------
52 |
53 | # Easy Script Section Header Body Text
54 | SECTION_HEAD='PVE NAS'
55 |
56 | #---- Other Files ------------------------------------------------------------------
57 | #---- Body -------------------------------------------------------------------------
58 |
59 | #---- Create New Power User Accounts
60 | section "Create a New Power User Account"
61 |
62 | msg_box "#### PLEASE READ CAREFULLY - CREATING POWER USER ACCOUNTS ####
63 | $(if [ "$SMTP_STATUS" = 0 ]; then echo -e ${display_msg}; fi)
64 | Power Users are trusted persons with privileged access to data and application resources hosted on your PVE NAS. Power Users are NOT standard users! Standard users are added at a later stage. Each new Power Users security permissions are controlled by Linux groups. Group security permission levels are as follows:
65 |
66 | -- GROUP NAME -- PERMISSIONS
67 | -- 'medialab' -- Everything to do with media (i.e movies, series & music)
68 | -- 'homelab' -- Everything to do with a smart home including 'medialab'
69 | -- 'privatelab' -- Private storage including 'medialab' & 'homelab' rights
70 |
71 | A Personal Home Folder will be created for each new user. The folder name is the new users name. You can access Personal Home Folders and other shares via CIFS/Samba and NFS.
72 |
73 | Remember your PVE NAS is also pre-configured with user names specifically tasked for running hosted applications (i.e Proxmox LXC,CT,VM - Sonarr, Radarr, Lidarr). These application users names are as follows:
74 |
75 | -- GROUP NAME -- USER NAME
76 | -- 'medialab' -- /srv/CT_HOSTNAME/homes/'media'
77 | -- 'homelab' -- /srv/CT_HOSTNAME/homes/'home'
78 | -- 'privatelab' -- /srv/CT_HOSTNAME/homes/'private'"
79 | echo
80 | OPTIONS_VALUES_INPUT=( "TYPE01" "TYPE00" )
81 | OPTIONS_LABELS_INPUT=( "Power User Account - add a new user to the system" \
82 | "None. Exit this User account installer" )
83 | makeselect_input2
84 | singleselect SELECTED "$OPTIONS_STRING"
85 |
86 |
87 | #---- Create New Power User Accounts
88 | if [ "$RESULTS" = TYPE01 ]
89 | then
90 | section "Create a Power User Account"
91 |
92 | # Create new user list
93 | new_user_LIST=()
94 |
95 | while true
96 | do
97 | #---- Create a new username
98 | while true
99 | do
100 | input_username_val
101 | if [ "$(egrep "^${USERNAME}" /etc/passwd > /dev/null; echo $?)" = 0 ]
102 | then
103 | warn "The user '$USERNAME' already exists."
104 | while true
105 | do
106 | read -p "Do you want to try another user name [y/n]? " -n 1 -r YN
107 | echo
108 | case $YN in
109 | [Yy]*)
110 | info "You have chosen to try another user name.\nTry again..."
111 | echo
112 | break 1
113 | ;;
114 | [Nn]*)
115 | echo
116 | break 3
117 | ;;
118 | *)
119 | warn "Error! Entry must be 'y' or 'n'. Try again..."
120 | echo
121 | ;;
122 | esac
123 | done
124 | else
125 | break
126 | fi
127 | done
128 | echo
129 |
130 | msg "Choose your new user's group member account..."
131 | OPTIONS_VALUES_INPUT=( "GRP01" "GRP02" "GRP03" )
132 | OPTIONS_LABELS_INPUT=( "Medialab - Everything to do with media (i.e movies, series and music)" \
133 | "Homelab - Everything to do with a smart home including medialab" \
134 | "Privatelab - Private storage including medialab & homelab rights" )
135 | makeselect_input2
136 | singleselect SELECTED "$OPTIONS_STRING"
137 |
138 | if [ "$RESULTS" = GRP01 ]
139 | then
140 | USERGRP='medialab'
141 | elif [ "$RESULTS" = GRP02 ]
142 | then
143 | USERGRP='homelab -G medialab'
144 | elif [ "$RESULTS" = GRP03 ]
145 | then
146 | USERGRP='privatelab -G medialab,homelab'
147 | fi
148 |
149 | # Create User password
150 | input_userpwd_val
151 | echo
152 |
153 | # Add Username, password, and group to list
154 | new_user_LIST+=( "$USERNAME $USER_PWD $USERGRP" )
155 |
156 | # List new user details
157 | msg "Your new user details are as follows:\n"
158 | printf '%s\n' "${new_user_LIST[@]}" | sed '1 i\USERNAME PASSWORD GROUP' | column -t | indent2
159 | echo
160 | # Option to create another user account
161 | msg "Do you want to create another new jailed user account..."
162 | OPTIONS_VALUES_INPUT=( "NO" "YES" )
163 | OPTIONS_LABELS_INPUT=( "No - I do not want to create another user account" \
164 | "Yes - I want to create another user account" )
165 | makeselect_input2
166 | singleselect SELECTED "$OPTIONS_STRING"
167 | if [ "$RESULTS" = YES ]
168 | then
169 | echo
170 | # break
171 | elif [ "$RESULTS" = NO ]
172 | then
173 | break 2
174 | fi
175 | done
176 |
177 | if [ ! "${#new_user_LIST[@]}" = 0 ]
178 | then
179 | # Add user to the system
180 | while read USER PASSWORD GROUP USERMOD
181 | do
182 | pass=$(perl -e 'print crypt($ARGV[0], 'password')' $PASSWORD)
183 | # User home folder pre-existing
184 | if [ -d "$HOME_BASE/$USER" ]
185 | then
186 | # Chattr set user desktop folder attributes to -a
187 | while read dir
188 | do
189 | chattr -i $HOME_BASE/$USER/$dir/.foo_protect
190 | done < <( ls $HOME_BASE/$USER )
191 | msg "Creating new user ${USER}..."
192 | useradd -g $GROUP -p $pass $USERMOD -m -d $HOME_BASE/$USER -s /bin/bash $USER
193 | msg "Creating default home folders (xdg-user-dirs-update)..."
194 | sudo -iu $USER xdg-user-dirs-update
195 | msg "Creating SSH folder and authorised keys file for user ${USER}..."
196 | mkdir -p $HOME_BASE/$USER/.ssh
197 | touch $HOME_BASE/$USER/.ssh/authorized_keys
198 | chmod -R 0700 $HOME_BASE/$USER
199 | chown -R $USER:$GROUP $HOME_BASE/$USER
200 | ssh-keygen -o -q -t ed25519 -a 100 -f $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519 -N ""
201 | cat $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.pub >> $HOME_BASE/$USER/.ssh/authorized_keys
202 | # Create ppk key for Putty or Filezilla or ProFTPd
203 | msg "Creating a private PPK key..."
204 | puttygen $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519 -o $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.ppk
205 | msg "Creating a public ProFTPd RFC4716 format compliant key..."
206 | mkdir -p /etc/proftpd/authorized_keys
207 | touch /etc/proftpd/authorized_keys/${USER}
208 | ssh-keygen -e -f $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.pub >> $HOME_BASE/$USER/.ssh/authorized_keys
209 | ssh-keygen -e -f $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.pub >> /etc/proftpd/authorized_keys/${USER}
210 | msg "Backing up ${USER} latest SSH keys..."
211 | BACKUP_DATE=$(date +%Y%m%d-%T)
212 | mkdir -p /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}
213 | chown -R root:privatelab /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}
214 | chmod 0750 /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}
215 | cp $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519* /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}/
216 | msg "Creating $USER smb account..."
217 | (echo ${PASSWORD}; echo ${PASSWORD} ) | smbpasswd -s -a $USER
218 | info "User $USER has been added to the system. Existing home folder found.\nUsing existing home folder."
219 | echo
220 | elif [ ! -d "$HOME_BASE/$USER" ]; then # Create new user home folder
221 | msg "Creating new user $USER..."
222 | useradd -g $GROUP -p $pass $USERMOD -m -d $HOME_BASE/$USER -s /bin/bash $USER
223 | msg "Creating default home folders (xdg-user-dirs-update)..."
224 | sudo -iu $USER xdg-user-dirs-update --force
225 | msg "Creating SSH folder and authorised keys file for user $USER..."
226 | mkdir -p $HOME_BASE/$USER/.ssh
227 | touch $HOME_BASE/$USER/.ssh/authorized_keys
228 | chmod -R 0700 $HOME_BASE/$USER
229 | chown -R $USER:$GROUP $HOME_BASE/$USER
230 | ssh-keygen -o -q -t ed25519 -a 100 -f $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519 -N ""
231 | cat $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.pub >> $HOME_BASE/$USER/.ssh/authorized_keys
232 | # Create ppk key for Putty or Filezilla or ProFTPd
233 | msg "Creating a private PPK key..."
234 | puttygen $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519 -o $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.ppk
235 | msg "Creating a public ProFTPd RFC4716 format compliant key..."
236 | mkdir -p /etc/proftpd/authorized_keys
237 | touch /etc/proftpd/authorized_keys/$USER
238 | ssh-keygen -e -f $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.pub >> $HOME_BASE/$USER/.ssh/authorized_keys
239 | ssh-keygen -e -f $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519.pub >> /etc/proftpd/authorized_keys/$USER
240 | msg "Backing up ${USER} latest SSH keys..."
241 | BACKUP_DATE=$(date +%Y%m%d-%T)
242 | mkdir -p /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}
243 | chown -R root:privatelab /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}
244 | chmod 0750 /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}
245 | cp $HOME_BASE/$USER/.ssh/id_${USER,,}_ed25519* /srv/$HOSTNAME/sshkey/${HOSTNAME}_users/${USER,,}_${BACKUP_DATE}/
246 | msg "Creating ${USER} smb account..."
247 | (echo ${PASSWORD}; echo ${PASSWORD} ) | smbpasswd -s -a $USER
248 | info "User '$USER' has been added to the system."
249 | echo
250 | fi
251 | # Chattr set user desktop folder attributes to +i
252 | while read dir
253 | do
254 | touch $HOME_BASE/$USER/$dir/.foo_protect
255 | chattr +i $HOME_BASE/$USER/$dir/.foo_protect
256 | done < <( ls $HOME_BASE/$USER )
257 | done < <( printf '%s\n' "${new_user_LIST[@]}" )
258 |
259 | #---- Email User SSH Keys
260 | if [ "$SMTP_STATUS" = 1 ]
261 | then
262 | section "Email User Credentials & SSH keys"
263 | echo
264 | msg_box "#### PLEASE READ CAREFULLY - EMAIL NEW USER CREDENTIALS ####\n
265 | You can email a new user's login credentials and ssh keys to the NAS system administrator. The NAS system administrator can then forward the email(s) to each new user.
266 |
267 | The email will include the following information and attachments:
268 | -- Username
269 | -- Password
270 | -- User Group
271 | -- Private SSH Key (Standard)
272 | -- Private SSH Key (PPK Version)
273 | -- SMB NAS Server connection credentials
274 | -- SMB Status
275 | -- SFTP NAS connection credentials
276 | -- Account type (folder access level)"
277 | echo
278 | while true
279 | do
280 | read -p "Email new users credentials & SSH key to your systems administrator [y/n]? " -n 1 -r YN
281 | echo
282 | case $YN in
283 | [Yy]*)
284 | while read USER PASSWORD GROUP USERMOD
285 | do
286 | source $DIR/email_templates/pve_nas_ct_newuser_msg.sh
287 | msg "Sending '$USER' credentials and ssh key package to '$PVE_ROOT_EMAIL'..."
288 | sendmail -t < email_body.html
289 | info "Email sent. Check your system administrators inbox."
290 | done < <( printf '%s\n' "${new_user_LIST[@]}" )
291 | break
292 | ;;
293 | [Nn]*)
294 | info "You have chosen to skip this step. Not sending any email(s)."
295 | echo
296 | break
297 | ;;
298 | *)
299 | warn "Error! Entry must be 'y' or 'n'. Try again..."
300 | echo
301 | ;;
302 | esac
303 | done
304 | fi
305 | echo
306 | else
307 | msg "No new users have been created."
308 | echo
309 | fi
310 | fi
311 |
312 |
313 | #---- Exit the script
314 | if [ "$RESULTS" = TYPE00 ]
315 | then
316 | msg "You have chosen not to proceed. Moving on..."
317 | echo
318 | fi
319 |
320 | #---- Finish Line ------------------------------------------------------------------
321 |
322 | # Cleanup
323 | if [ -z "${PARENT_EXEC+x}" ]
324 | then
325 | trap cleanup EXIT
326 | fi
327 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/src/ubuntu/pve_nas_ct_deleteuser.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_ct_addjailuser.sh
4 | # Description: Create a new PVE NAS Jail User
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 |
9 | # Command to run script
10 | # bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_toolbox.sh)"
11 |
12 | #---- Source -----------------------------------------------------------------------
13 |
14 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
15 | COMMON_PVE_SRC_DIR="$DIR/../../common/pve/src"
16 |
17 | #---- Dependencies -----------------------------------------------------------------
18 |
19 | # Run Bash Header
20 | source $COMMON_PVE_SRC_DIR/pvesource_bash_defaults.sh
21 |
22 | # Check user is root
23 | if [ ! "$(id -u)" = 0 ]
24 | then
25 | warn "This script needs to run under 'root'. Exiting in 2 seconds.\nTry again..."
26 | sleep 2
27 | exit 0
28 | fi
29 |
30 | #---- Static Variables -------------------------------------------------------------
31 |
32 | # Easy Script Section Header Body Text
33 | SECTION_HEAD='PVE NAS'
34 |
35 | #---- Other Variables --------------------------------------------------------------
36 | #---- Other Files ------------------------------------------------------------------
37 | #---- Functions --------------------------------------------------------------------
38 |
39 | # Delete a username (permanent action)
40 | function delete_username() {
41 | # Usage: delete_username "harry" "medialab"
42 |
43 | local username="$1"
44 | local group="$2"
45 | # Homes folder
46 | local HOSTNAME=$(hostname)
47 | local HOME_BASE="/srv/$HOSTNAME/homes"
48 |
49 | # Deleting existing user name
50 | while true
51 | do
52 | read -p "Also delete the user '${WHITE}$username${NC}' home folder including their files [y/n]?: " -n 1 -r YN < /dev/tty
53 | echo
54 | case $YN in
55 | [Yy]*)
56 | # Chattr set user desktop folder attributes to -i
57 | while read dir
58 | do
59 | if [ -f "$(awk -F: -v v="$username" '{if ($1==v) print $6}' /etc/passwd)/$dir/.foo_protect" ]
60 | then
61 | chattr -i $(awk -F: -v v="$username" '{if ($1==v) print $6}' /etc/passwd)/$dir/.foo_protect
62 | fi
63 | done <<< $( ls $(awk -F: -v v="$username" '{if ($1==v) print $6}' /etc/passwd) )
64 |
65 | # Delete ProFTPd key
66 | rm -f /etc/proftpd/authorized_keys/$username
67 |
68 | # Delete SMB user
69 | smbpasswd -x $username 2>/dev/null
70 |
71 | # Delete Unix Account
72 | userdel -r $username 2>/dev/null
73 | echo
74 | break
75 | ;;
76 | [Nn]*)
77 | # Delete SMB user
78 | smbpasswd -x $username 2>/dev/null
79 |
80 | # Delete Unix Account
81 | userdel $username 2>/dev/null
82 | echo
83 | break
84 | ;;
85 | *)
86 | warn "Error! Entry must be 'y' or 'n'. Try again..."
87 | echo
88 | ;;
89 | esac
90 | done
91 | }
92 |
93 | # Delete a username (permanent action)
94 | function delete_jailed_username() {
95 | # Usage: delete_jailed_username "harry" "chrootjail"
96 | local username="$1"
97 | local group="$2"
98 | # Args
99 | local HOSTNAME=$(hostname)
100 | local CHROOT="/srv/$HOSTNAME/homes/chrootjail"
101 | local HOME_BASE="$CHROOT/homes"
102 |
103 | # Umount & remove existing user bind mounts
104 | if [[ $(grep "$HOME_BASE/$username" /etc/fstab) ]]
105 | then
106 | while read -r path
107 | do
108 | # Umount
109 | if [[ $(mount | grep $path) ]]
110 | then
111 | umount $path 2>/dev/null
112 | fi
113 |
114 | # Remove the entry from fstab
115 | escaped_path="$(echo "$path" | sed 's/\//\\\//g')"
116 | sed -i "/${escaped_path}/d" /etc/fstab
117 | done < <( grep $HOME_BASE/$username /etc/fstab | awk '{print $2}' ) # listing of bind mounts
118 | fi
119 |
120 | # Deleting user
121 | while true
122 | do
123 | read -p "Also delete user '${WHITE}$username${NC}' home folder including user files[y/n]?: " -n 1 -r YN < /dev/tty
124 | echo
125 | case $YN in
126 | [Yy]*)
127 | # Chattr set user desktop folder attributes to -i
128 | while read dir
129 | do
130 | if [ -f "$HOME_BASE/$username/$dir/.foo_protect" ]
131 | then
132 | chattr -i $HOME_BASE/$username/$dir/.foo_protect
133 | fi
134 | done < <( ls $HOME_BASE/$username )
135 |
136 | # Delete ProFTPd key
137 | rm -f /etc/proftpd/authorized_keys/$username
138 |
139 | # Delete user
140 | userdel $username 2>/dev/null
141 | rm -R $HOME_BASE/$username 2>/dev/null
142 | sed -i "/^$username/d" $CHROOT/etc/passwd
143 |
144 | # Remove other User folders
145 | if [ -d "/srv/$HOSTNAME/downloads/user/$(echo "$username" | awk -F '_' '{print $1}')_downloads" ]
146 | then
147 | chattr -i /srv/$HOSTNAME/downloads/user/$(echo "$username" | awk -F '_' '{print $1}')_downloads/.foo_protect
148 | rm -R /srv/$HOSTNAME/downloads/user/$(echo "$username" | awk -F '_' '{print $1}')_downloads
149 | fi
150 | if [ -d "/srv/$HOSTNAME/photo/$(echo "$username" | awk -F '_' '{print $1}')_photo" ]
151 | then
152 | chattr -i /srv/$HOSTNAME/photo/$(echo "$username" | awk -F '_' '{print $1}')_photo/.foo_protect
153 | rm -R /srv/$HOSTNAME/photo/$(echo "$username" | awk -F '_' '{print $1}')_photo
154 | fi
155 | if [ -d "/srv/$HOSTNAME/video/homevideo/$(echo "$username" | awk -F '_' '{print $1}')_homevideo" ]
156 | then
157 | chattr -i /srv/$HOSTNAME/video/homevideo/$(echo "$username" | awk -F '_' '{print $1}')_homevideo/.foo_protect
158 | rm -R /srv/$HOSTNAME/video/homevideo/$(echo "$username" | awk -F '_' '{print $1}')_homevideo
159 | fi
160 | echo
161 | break
162 | ;;
163 | [Nn]*)
164 | # Delete user
165 | userdel $username 2>/dev/null
166 | sed -i "/^$username/d" $CHROOT/etc/passwd
167 | echo
168 | break
169 | ;;
170 | *)
171 | warn "Error! Entry must be 'y' or 'n'. Try again..."
172 | echo
173 | ;;
174 | esac
175 | done
176 | }
177 |
178 | #---- Body -------------------------------------------------------------------------
179 |
180 | #---- Prerequisites
181 |
182 | # Create user list
183 | user_LIST=()
184 | # user_LIST+=( $(cat /etc/passwd | egrep "^*injail\:" | awk -F':' 'BEGIN{OFS=FS} {if ($4 ~ /65608/) ($4="chrootjail"); print $1, $4 }') )
185 | user_LIST+=( $(cat /etc/passwd | awk -F':' 'BEGIN{OFS=FS} {if ($4 ~ /65605|65606|65607|65608/ && $3 !~ /1605|1606|1607/ ) {print $1, $4}}' | awk -F':' '{if ($2 == "65605") $2="medialab"; else if ($2 == "65606") $2="homelab"; else if ($2 == "65607") $2="privatelab"; else if ($2 == "65608") $2="chrootjail"; print $1":"$2}') )
186 |
187 | # Check if users exist for deletion
188 | if [ "${#user_LIST[@]}" = 0 ]
189 | then
190 | warn "There are no valid users for deletion. This script can only delete users who are members of medialab, homelab, privatelab and chrootjail groups. Users which belong to other groups can be deleted using the NAS Webmin webGUI. Bye.."
191 | echo
192 | exit 0
193 | fi
194 |
195 | #---- Select user for deletion
196 |
197 | section "$SECTION_HEAD - Select the users for deletion"
198 |
199 | msg_box "#### PLEASE READ CAREFULLY - USER ACCOUNT DELETION ####\n
200 | Select any number of users you want to permanently delete. The User will be prompted with the option to keep or remove each selected users home folder and their private files. If you choose to delete a user and their home folder all personal files will be permanently lost and not recoverable."
201 | echo
202 | OPTIONS_VALUES_INPUT=$(printf '%s\n' "${user_LIST[@]}" | sed -e '$aTYPE00')
203 | OPTIONS_LABELS_INPUT=$(printf '%s\n' "${user_LIST[@]}" | awk -F':' '{ print "User name: "$1, "| Member of user group: "$2; }' | sed -e '$aNone. Exit this installer')
204 | makeselect_input1 "$OPTIONS_VALUES_INPUT" "$OPTIONS_LABELS_INPUT"
205 | multiselect SELECTED "$OPTIONS_STRING"
206 |
207 | # Abort option
208 | if [ "$RESULTS" = 'TYPE00' ] || [ -z ${RESULTS} ]
209 | then
210 | msg "You have chosen not to proceed. Aborting. Bye..."
211 | echo
212 | exit 0
213 | fi
214 |
215 | #---- Delete the user
216 |
217 | # Delete each selected username
218 | while IFS=':' read username group
219 | do
220 | # Run delete function
221 | if [[ "$group" =~ ^chrootjail$ ]]
222 | then
223 | # Delete Chrootjail user
224 | delete_jailed_username "$username" "$group"
225 | elif [[ "$group" =~ ^(privatelab|homelab|medialab)$ ]]
226 | then
227 | # Delete standard user
228 | delete_username "$username" "$group"
229 | fi
230 | done < <( printf '%s\n' "${RESULTS[@]}" )
231 | #-----------------------------------------------------------------------------------
232 |
--------------------------------------------------------------------------------
/src/ubuntu/pve_nas_ct_nas_chrootapplist:
--------------------------------------------------------------------------------
1 | /bin/bash
2 | /bin/rm
3 | /bin/ls
4 | /bin/cat
5 | /bin/echo
6 | /bin/cp
7 | /bin/mkdir
8 | /bin/nano
9 | /bin/mv
10 | /bin/touch
11 | /bin/date
12 | /usr/bin/rsync
13 |
--------------------------------------------------------------------------------
/src/ubuntu/pve_nas_ct_nas_installer.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_ct_ubuntu_installer.sh
4 | # Description: This script is for creating a PVE Ubuntu based NAS
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 |
9 | #---- Source Github
10 | # bash -c "$(wget -qLO - https://raw.githubusercontent.com/ahuacate/pve-nas/main/pve_nas_installer.sh)"
11 |
12 | #---- Source local Git
13 | # /mnt/pve/nas-01-git/ahuacate/pve-nas/pve_nas_installer.sh
14 |
15 | #---- Source -----------------------------------------------------------------------
16 | #---- Dependencies -----------------------------------------------------------------
17 |
18 | # Check SMTP Status
19 | check_smtp_status
20 |
21 | # Check PVE host subid mapping
22 | check_host_subid
23 |
24 | #---- Static Variables -------------------------------------------------------------
25 |
26 | # Easy Script Section Head
27 | SECTION_HEAD='PVE Ubuntu NAS'
28 |
29 | # PVE host IP
30 | PVE_HOST_IP=$(hostname -i)
31 | PVE_HOSTNAME=$(hostname)
32 |
33 | # SSHd Status (0 is enabled, 1 is disabled)
34 | SSH_ENABLE=1
35 |
36 | # Developer enable git mounts inside CT (0 is enabled, 1 is disabled)
37 | DEV_GIT_MOUNT_ENABLE=1
38 |
39 | # Set file source (path/filename) of preset variables for 'pvesource_ct_createvm.sh'
40 | PRESET_VAR_SRC="$( dirname "${BASH_SOURCE[0]}" )/$( basename "${BASH_SOURCE[0]}" )"
41 |
42 | #---- Other Variables --------------------------------------------------------------
43 |
44 | #---- Common Machine Variables
45 | # VM Type ( 'ct' or 'vm' only lowercase )
46 | #VM_TYPE='' Set at NAS type selection
47 | # Use DHCP. '0' to disable, '1' to enable.
48 | NET_DHCP='1'
49 | # Set address type 'dhcp4'/'dhcp6' or '0' to disable.
50 | NET_DHCP_TYPE='dhcp4'
51 | # CIDR IPv4
52 | CIDR='24'
53 | # CIDR IPv6
54 | CIDR6='64'
55 | # SSHd Port
56 | SSH_PORT='22'
57 |
58 | #----[COMMON_GENERAL_OPTIONS]
59 | # Hostname
60 | HOSTNAME='nas-01'
61 | # Description for the Container (one word only, no spaces). Shown in the web-interface CT’s summary.
62 | DESCRIPTION=''
63 | # Virtual OS/processor architecture.
64 | ARCH='amd64'
65 | # Allocated memory or RAM (MiB).
66 | MEMORY='512'
67 | # Limit number of CPU sockets to use. Value 0 indicates no CPU limit.
68 | CPULIMIT='0'
69 | # CPU weight for a VM. Argument is used in the kernel fair scheduler. The larger the number is, the more CPU time this VM gets.
70 | CPUUNITS='1024'
71 | # The number of cores assigned to the vm/ct. Do not edit - its auto set.
72 | CORES='1'
73 |
74 | #----[COMMON_NET_OPTIONS]
75 | # Bridge to attach the network device to.
76 | BRIDGE='vmbr0'
77 | # A common MAC address with the I/G (Individual/Group) bit not set.
78 | HWADDR=""
79 | # Controls whether this interface’s firewall rules should be used.
80 | FIREWALL='1'
81 | # VLAN tag for this interface (value 0 for none, or VLAN[2-N] to enable).
82 | TAG='0'
83 | # VLAN ids to pass through the interface
84 | TRUNKS=""
85 | # Apply rate limiting to the interface (MB/s). Value "" for unlimited.
86 | RATE=""
87 | # MTU - Maximum transfer unit of the interface.
88 | MTU=""
89 |
90 | #----[COMMON_NET_DNS_OPTIONS]
91 | # Nameserver server IP (IPv4 or IPv6) (value "" for none).
92 | NAMESERVER='192.168.1.5'
93 | # Search domain name (local domain)
94 | SEARCHDOMAIN='local'
95 |
96 | #----[COMMON_NET_STATIC_OPTIONS]
97 | # IP address (IPv4). Only works with static IP (DHCP=0).
98 | IP='192.168.1.10'
99 | # IP address (IPv6). Only works with static IP (DHCP=0).
100 | IP6=''
101 | # Default gateway for traffic (IPv4). Only works with static IP (DHCP=0).
102 | GW='192.168.1.5'
103 | # Default gateway for traffic (IPv6). Only works with static IP (DHCP=0).
104 | GW6=''
105 |
106 | #---- PVE CT
107 | #----[CT_GENERAL_OPTIONS]
108 | # Unprivileged container. '0' to disable, '1' to enable/yes.
109 | CT_UNPRIVILEGED='0'
110 | # Memory swap
111 | CT_SWAP='512'
112 | # OS
113 | CT_OSTYPE='ubuntu'
114 | # Onboot startup
115 | CT_ONBOOT='1'
116 | # Timezone
117 | CT_TIMEZONE='host'
118 | # Root credentials
119 | CT_PASSWORD='ahuacate'
120 | # Virtual OS/processor architecture.
121 | CT_ARCH='amd64'
122 |
123 | #----[CT_FEATURES_OPTIONS]
124 | # Allow using fuse file systems in a container.
125 | CT_FUSE='0'
126 | # For unprivileged containers only: Allow the use of the keyctl() system call.
127 | CT_KEYCTL='0'
128 | # Allow mounting file systems of specific types. (Use 'nfs' or 'cifs' or 'nfs;cifs' for both or leave empty "")
129 | CT_MOUNT='nfs'
130 | # Allow nesting. Best used with unprivileged containers with additional id mapping.
131 | CT_NESTING='1'
132 | # A public key for connecting to the root account over SSH (insert path).
133 |
134 | #----[CT_ROOTFS_OPTIONS]
135 | # Virtual Disk Size (GB).
136 | CT_SIZE='5'
137 | # Explicitly enable or disable ACL support.
138 | CT_ACL='1'
139 |
140 | #----[CT_STARTUP_OPTIONS]
141 | # Startup and shutdown behavior ( '--startup order=1,up=1,down=1' ).
142 | # Order is a non-negative number defining the general startup order. Up=1 means first to start up. Shutdown in done with reverse ordering so down=1 means last to shutdown.
143 | # Up: Startup delay. Defines the interval between this container start and subsequent containers starts. For example, set it to 240 if you want to wait 240 seconds before starting other containers.
144 | # Down: Shutdown timeout. Defines the duration in seconds Proxmox VE should wait for the container to be offline after issuing a shutdown command. By default this value is set to 60, which means that Proxmox VE will issue a shutdown request, wait 60s for the machine to be offline, and if after 60s the machine is still online will notify that the shutdown action failed.
145 | CT_ORDER='1'
146 | CT_UP='30'
147 | CT_DOWN='60'
148 |
149 | #----[CT_NET_OPTIONS]
150 | # Name of the network device as seen from inside the VM/CT.
151 | CT_NAME='eth0'
152 | CT_TYPE='veth'
153 |
154 | #----[CT_OTHER]
155 | # OS Version
156 | CT_OSVERSION='22.04'
157 | # CTID numeric ID of the given container.
158 | CTID='112'
159 |
160 |
161 | #----[App_UID_GUID]
162 | # App user
163 | APP_USERNAME='root'
164 | # App user group
165 | APP_GRPNAME='root'
166 |
167 | #----[REPO_PKG_NAME]
168 | # Repo package name
169 | REPO_PKG_NAME='pve-nas'
170 |
171 | #---- Other Files ------------------------------------------------------------------
172 |
173 | # Required PVESM Storage Mounts for CT ( new version )
174 | unset pvesm_required_LIST
175 | pvesm_required_LIST=()
176 | while IFS= read -r line; do
177 | [[ "$line" =~ ^\#.*$ ]] && continue
178 | pvesm_required_LIST+=( "$line" )
179 | done << EOF
180 | # Example
181 | # backup:CT settings backup storage
182 | EOF
183 |
184 | #---- Functions --------------------------------------------------------------------
185 | #---- Body -------------------------------------------------------------------------
186 |
187 | #---- Introduction
188 | source $COMMON_PVE_SRC_DIR/pvesource_ct_intro.sh
189 |
190 | #---- Check SMTP status
191 | if [ "$SMTP_STATUS" = 0 ]
192 | then
193 | # Options if SMTP is inactive
194 | display_msg='Before proceeding with this installer we RECOMMEND you first configure all PVE hosts to support SMTP email services. A working SMTP server emails the NAS System Administrator all new User login credentials, SSH keys, application specific login credentials and written guidelines. A PVE host SMTP server makes NAS administration much easier. Also be alerted about unwarranted login attempts and other system critical alerts. PVE Host SMTP Server installer is available in our PVE Host Toolbox located at GitHub:\n\n -- https://github.com/ahuacate/pve-host'
195 |
196 | msg_box "#### PLEASE READ CAREFULLY ####\n\n$(echo ${display_msg})"
197 | echo
198 | msg "Select your options..."
199 | OPTIONS_VALUES_INPUT=( "TYPE01" "TYPE02" "TYPE00" )
200 | OPTIONS_LABELS_INPUT=( "Agree - Install PVE host SMTP email support" \
201 | "Decline - Proceed without SMTP email support" \
202 | "None. Exit this installer" )
203 | makeselect_input2
204 | singleselect SELECTED "$OPTIONS_STRING"
205 |
206 | if [ "$RESULTS" = 'TYPE01' ]
207 | then
208 | # Exit and install SMTP
209 | msg "Go to our Github site and run our PVE Host Toolbox selecting our 'SMTP Email Setup' option:\n\n -- https://github.com/ahuacate/pve-host\n\nRe-run the NAS installer after your have configured '$(hostname)' SMTP email support. Bye..."
210 | echo
211 | exit 0
212 | elif [ "$RESULTS" = 'TYPE02' ]
213 | then
214 | # Proceed without SMTP email support
215 | msg "You have chosen to proceed without SMTP email support. You can always manually configure Postfix SMTP services at a later stage."
216 | echo
217 | elif [ "$RESULTS" = 'TYPE00' ]
218 | then
219 | msg "You have chosen not to proceed. Aborting. Bye..."
220 | echo
221 | exit 0
222 | fi
223 | fi
224 |
225 |
226 | #---- Exit selection
227 | if [ "$TYPE" = TYPE01 ]
228 | then
229 | msg "Sorry. Your selected option is not available. Try again. Bye..."
230 | echo
231 | return
232 | elif [ "$TYPE" = TYPE00 ]
233 | then
234 | msg "You have chosen not to proceed. Aborting. Bye..."
235 | echo
236 | exit 0
237 | fi
238 |
239 |
240 | #---- Setup PVE CT Variables
241 |
242 | # VM Type ( 'ct' or 'vm' only lowercase )
243 | VM_TYPE='ct'
244 | # Ubuntu NAS (all)
245 | source $COMMON_PVE_SRC_DIR/pvesource_set_allvmvars.sh
246 |
247 |
248 | #---- Prepare disk storage
249 |
250 | # Ubuntu NAS (PVE LVM/ZFS/Basic)
251 | # source $SHARED_DIR/pve_nas_identify_storagedisk.sh
252 | source $SHARED_DIR/pve_nas_select_fs_build.sh
253 |
254 |
255 | #---- Setup PVE CT Variables
256 | source $COMMON_PVE_SRC_DIR/pvesource_ct_createvm.sh
257 |
258 |
259 | #---- Pre-Configuring PVE CT
260 | # Create CT Bind Mounts
261 | source $COMMON_PVE_SRC_DIR/pvesource_ct_createbindmounts.sh
262 |
263 | # Create LXC Mount Points
264 | section "Create NAS CT mount point to host storage pool"
265 |
266 | # Add LXC mount points
267 | if [ -f pvesm_input_list ] && [ "$(cat pvesm_input_list | wc -l)" -ge 1 ]
268 | then
269 | msg "Creating NAS CT mount points..."
270 | i=$(cat pvesm_input_list | wc -l)
271 | pct set $CTID -mp${i} ${PVE_SRC_MNT},mp=/srv/${HOSTNAME},acl=1 >/dev/null
272 | # pct set $CTID -mp${i} /${POOL}/${HOSTNAME},mp=/srv/${HOSTNAME},acl=1 >/dev/null
273 | info "CT $CTID mount point created: ${YELLOW}/srv/${HOSTNAME}${NC}"
274 | echo
275 | else
276 | pct set $CTID -mp0 ${PVE_SRC_MNT},mp=/srv/${HOSTNAME},acl=1 >/dev/null
277 | # pct set $CTID -mp0 /${POOL}/${HOSTNAME},mp=/srv/${HOSTNAME},acl=1 >/dev/null
278 | info "CT $CTID mount point created: ${YELLOW}/srv/${HOSTNAME}${NC}"
279 | echo
280 | fi
281 |
282 | #---- Configure New CT OS
283 | source $COMMON_PVE_SRC_DIR/pvesource_ct_ubuntubasics.sh
284 |
285 |
286 | #---- PVE NAS ----------------------------------------------------------------------
287 |
288 | #---- PVE NAS build
289 |
290 | # Set DIR Schema ( PVE host or CT mkdir )
291 | if [ "$(uname -a | grep -Ei --color=never '.*pve*' &> /dev/null; echo $?)" = 0 ]
292 | then
293 | DIR_SCHEMA="$PVE_SRC_MNT"
294 | # DIR_SCHEMA="/${POOL}/${HOSTNAME}"
295 | else
296 | # Select or input a storage path ( set DIR_SCHEMA )
297 | source $COMMON_DIR/nas/src/nas_identify_storagepath.sh
298 | fi
299 |
300 | #---- Create default base and sub folders
301 | source $COMMON_DIR/nas/src/nas_basefoldersetup.sh
302 | # Create temporary files of lists
303 | printf "%s\n" "${nas_subfolder_LIST[@]}" > nas_basefoldersubfolderlist
304 | printf '%s\n' "${nas_basefolder_LIST[@]}" > nas_basefolderlist
305 | printf '%s\n' "${nas_basefolder_extra_LIST[@]}" > nas_basefolderlist_extra
306 |
307 | #---- Configure PVE NAS Ubuntu CT
308 | section "Configure PVE NAS Ubuntu CT"
309 |
310 | # Start container
311 | msg "Starting NAS CT..."
312 | pct_start_waitloop
313 |
314 | # Pushing variables to NAS CT
315 | msg "Pushing variables and conf to NAS CT..."
316 | printf "%b\n" '#!/usr/bin/env bash' \
317 | "POOL='${POOL}'" \
318 | "HOSTNAME='${HOSTNAME}'" \
319 | "SECTION_HEAD='${SECTION_HEAD}'" \
320 | "XTRA_SHARES='${XTRA_SHARES}'" \
321 | "SSH_PORT='22'" \
322 | "PVE_HOST_IP='${PVE_HOST_IP}'" \
323 | "DIR_SCHEMA='/srv/${HOSTNAME}'" \
324 | "GIT_REPO='${GIT_REPO}'" \
325 | "APP_NAME='nas'" \
326 | "PVE_HOSTNAME='${PVE_HOSTNAME}'" > $TEMP_DIR/pve_nas_ct_variables.sh
327 | pct push $CTID $TEMP_DIR/pve_nas_ct_variables.sh /tmp/pve_nas_ct_variables.sh -perms 755
328 | # Share folder lists
329 | pct push $CTID $TEMP_DIR/nas_basefolderlist /tmp/nas_basefolderlist
330 | pct push $CTID $TEMP_DIR/nas_basefoldersubfolderlist /tmp/nas_basefoldersubfolderlist
331 | pct push $CTID $TEMP_DIR/nas_basefolderlist_extra /tmp/nas_basefolderlist_extra
332 |
333 | # Pushing PVE-nas setup scripts to NAS CT
334 | msg "Pushing NAS configuration scripts to NAS CT..."
335 | pct push $CTID /tmp/${GIT_REPO}.tar.gz /tmp/${GIT_REPO}.tar.gz
336 | pct exec $CTID -- tar -zxf /tmp/${GIT_REPO}.tar.gz -C /tmp
337 | echo
338 |
339 | #---- Start NAS setup script
340 | pct exec $CTID -- bash -c "/tmp/pve-nas/src/ubuntu/pve-nas_sw.sh"
341 |
342 | #---- Install and Configure Fail2ban
343 | pct exec $CTID -- bash -c "export SSH_PORT=\$(grep Port /etc/ssh/sshd_config | sed '/^#/d' | awk '{ print \$2 }') && /tmp/pve-nas/common/pve/src/pvesource_ct_ubuntu_installfail2ban.sh"
344 |
345 | #---- Install and Configure SSMTP Email Alerts
346 | source $COMMON_PVE_SRC_DIR/pvesource_install_postfix_client.sh
347 |
348 |
349 | #---- Finish Line ------------------------------------------------------------------
350 |
351 | section "Completion Status"
352 |
353 | # Interface
354 | interface=$(pct exec $CTID -- ip route ls | grep default | grep -Po '(?<=dev )(\S+)')
355 | # Get IP type (ip -4 addr show eth0 )
356 | if [[ "$(pct exec $CTID -- ip addr show $interface | grep -q dynamic > /dev/null; echo $?)" = 0 ]]
357 | then
358 | ip_type='dhcp - best use dhcp IP reservation'
359 | else
360 | ip_type='static IP'
361 | fi
362 |
363 | #---- Set display text
364 | # Check Webmin/Cockpit Status
365 | if [[ $(pct exec $CTID -- dpkg -s webmin 2> /dev/null) ]]
366 | then
367 | # Webmin port
368 | port=10000
369 | # Webmin access URL
370 | display_msg1=( "https://$(pct exec $CTID -- hostname).$(pct exec $CTID -- hostname -d):$port/" )
371 | display_msg1+=( "https://$(pct exec $CTID -- hostname -I | sed -r 's/\s+//g'):$port/ ($ip_type)" )
372 | # Set webgui
373 | webgui_type=webmin
374 | elif [[ $(pct exec $CTID -- dpkg -s cockpit 2> /dev/null) ]]
375 | then
376 | # Cockpit port
377 | port=9090
378 | # Cockpit access URL
379 | display_msg1=( "https://$(pct exec $CTID -- hostname).$(pct exec $CTID -- hostname -d):$port/" )
380 | display_msg1+=( "https://$(pct exec $CTID -- hostname -I | sed -r 's/\s+//g'):$port/ ($ip_type)" )
381 | # Set webgui
382 | webgui_type=cockpit
383 | fi
384 |
385 | # Check Fail2ban Status
386 | if [[ $(pct exec $CTID -- dpkg -s fail2ban 2> /dev/null) ]]
387 | then
388 | display_msg2=( "Fail2ban SW:installed" )
389 | else
390 | display_msg2=( "Fail2ban SW:not installed" )
391 | fi
392 | # Check SMTP Mailserver Status
393 | if [ "$(pct exec $CTID -- bash -c 'if [ -f /etc/postfix/main.cf ]; then grep --color=never -Po "^ahuacate_smtp=\K.*" "/etc/postfix/main.cf" || true; else echo 0; fi')" = 1 ]
394 | then
395 | display_msg2+=( "SMTP Mail Server:installed" )
396 | else
397 | display_msg2+=( "SMTP Mail Server:not installed ( recommended install )" )
398 | fi
399 | # Check ProFTPd Status
400 | if [[ $(pct exec $CTID -- dpkg -s proftpd-core 2> /dev/null) ]]
401 | then
402 | display_msg2+=( "ProFTPd Server:installed" )
403 | else
404 | display_msg2+=( "ProFTPd Server:not installed" )
405 | fi
406 | # Upgrade NAS
407 | display_msg2+=( "Upgrade NAS OS:OS updates, releases, software packages and patches" )
408 | # Add ZFS Cache
409 | display_msg2+=( "Add ZFS Cache:ARC/L2ARC cache and ZIL log using SSD/NVMe" )
410 | # User Management
411 | display_msg3=( "Power User Accounts:For all privatelab, homelab or medialab accounts" )
412 | display_msg3+=( "Jailed User Accounts:For all jailed and restricted user accounts" )
413 | # File server login
414 | x='\\\\'
415 | display_msg4=( "$x${HOSTNAME}.$(hostname -d)\:" )
416 | display_msg4+=( "$x$(pct exec $CTID -- hostname -I | sed -r 's/\s+//g')\: (${ip_type})" )
417 |
418 | # Display msg
419 | msg_box "${HOSTNAME^^} installation was a success.\n\nTo manage your new Ubuntu NAS use ${webgui_type^} (a Linux web management tool). ${webgui_type^} login credentials are user 'root' and password '$CT_PASSWORD'. You can change your 'root' password using the ${webgui_type^} WebGUI.\n\n$(printf '%s\n' "${display_msg1[@]}" | indent2)\n\nUse our 'Easy Script Toolbox' to install add-ons and perform other tasks. More information is available here: https://github.com/ahuacate/pve-nas\n\n$(printf '%s\n' "${display_msg2[@]}" | column -s ":" -t -N "APPLICATION,STATUS" | indent2)\n\nAlso use our 'Easy Scripts Toolbox' to create or delete NAS user accounts.\n\n$(printf '%s\n' "${display_msg3[@]}" | column -s ":" -t -N "USER ACCOUNT TYPE,DESCRIPTION" | indent2)\n\nTo access ${HOSTNAME^^} files use SMB.\n\n$(printf '%s\n' "${display_msg4[@]}" | column -s ":" -t -N "SMB NETWORK ADDRESS" | indent2)\n\nNFSv4 is enabled and ready for creating PVE host storage mounts.\n\n${HOSTNAME^^} will now reboot."
420 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/src/ubuntu/pve_nas_ct_nas_toolbox.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_toolbox.sh
4 | # Description: Installer script for Proxmox Ubuntu NAS administration toolbox & Add-Ons
5 | # ----------------------------------------------------------------------------------
6 | #---- Source -----------------------------------------------------------------------
7 | #---- Dependencies -----------------------------------------------------------------
8 | #---- Static Variables -------------------------------------------------------------
9 | #---- Other Variables --------------------------------------------------------------
10 | #---- Other Files ------------------------------------------------------------------
11 | #---- Body -------------------------------------------------------------------------
12 |
13 | #---- Prerequisites
14 | # Check SMTP status
15 | check_smtp_status
16 |
17 | if [ "$SMTP_STATUS" = 0 ]
18 | then
19 | # Options if SMTP is inactive
20 | display_msg='Before proceeding with this installer we RECOMMEND you first configure all PVE hosts to support SMTP email services. A working SMTP server emails the NAS System Administrator all new User login credentials, SSH keys, application specific login credentials and written guidelines. A PVE host SMTP server makes NAS administration much easier. Also be alerted about unwarranted login attempts and other system critical alerts. PVE Host SMTP Server installer is available in our PVE Host Toolbox located at GitHub:\n\n -- https://github.com/ahuacate/pve-host'
21 |
22 | msg_box "#### PLEASE READ CAREFULLY ####\n\n$(echo ${display_msg})"
23 | echo
24 | msg "Select your options..."
25 | OPTIONS_VALUES_INPUT=( "TYPE01" "TYPE02" "TYPE00" )
26 | OPTIONS_LABELS_INPUT=( "Agree - Install PVE host SMTP email support" \
27 | "Decline - Proceed without SMTP email support" \
28 | "None. Exit this installer" )
29 | makeselect_input2
30 | singleselect SELECTED "$OPTIONS_STRING"
31 |
32 | if [ "$RESULTS" = 'TYPE01' ]
33 | then
34 | # Exit and install SMTP
35 | msg "Go to our Github site and run our PVE Host Toolbox selecting our 'SMTP Email Setup' option:\n\n -- https://github.com/ahuacate/pve-host\n\nRe-run the NAS installer after your have configured '$(hostname)' SMTP email support. Bye..."
36 | echo
37 | exit 0
38 | elif [ "$RESULTS" = 'TYPE02' ]
39 | then
40 | # Proceed without SMTP email support
41 | msg "You have chosen to proceed without SMTP email support. You can always manually configure Postfix SMTP services at a later stage."
42 | echo
43 | elif [ "$RESULTS" = 'TYPE00' ]
44 | then
45 | msg "You have chosen not to proceed. Aborting. Bye..."
46 | echo
47 | exit 0
48 | fi
49 | fi
50 |
51 | # Pushing PVE-nas setup scripts to NAS CT
52 | msg "Pushing NAS configuration scripts to NAS CT..."
53 | pct push $CTID $REPO_TEMP/${GIT_REPO}.tar.gz /tmp/${GIT_REPO}.tar.gz
54 | pct exec $CTID -- tar -zxf /tmp/${GIT_REPO}.tar.gz -C /tmp
55 | echo
56 |
57 |
58 | #---- Run Installer
59 | section "Select a Ubuntu NAS toolbox option"
60 | OPTIONS_VALUES_INPUT=( "TYPE01" "TYPE02" "TYPE03" "TYPE04" "TYPE05" "TYPE06" "TYPE07" "TYPE08" "TYPE09" "TYPE00" )
61 | OPTIONS_LABELS_INPUT=( "Power User Account - add a new user to the system" \
62 | "Jailed User Account - add a new user to the system" \
63 | "Delete Users - delete any user account (option to users keep home folder)" \
64 | "Upgrade NAS OS - software packages, OS and patches" \
65 | "Install Fail2Ban $(if [ "$(pct exec $CTID -- dpkg -s fail2ban >/dev/null 2>&1; echo $?)" = 0 ]; then echo "( installed & active )"; else echo "( not installed )"; fi)" \
66 | "Install SMTP Email Support $(if [ "$(pct exec $CTID -- bash -c 'if [ -f /etc/postfix/main.cf ]; then grep --color=never -Po "^ahuacate_smtp=\K.*" "/etc/postfix/main.cf" || true; else echo 0; fi')" = 1 ]; then echo "( installed & active )"; else echo "( not installed - recommended installation )"; fi)" \
67 | "Install ProFTPd Server $(if [ "$(pct exec $CTID -- dpkg -s proftpd-core >/dev/null 2>&1; echo $?)" = 0 ]; then echo "( installed & active )"; else echo "( not installed )"; fi)" \
68 | "Add ZFS Cache - create ARC/L2ARC/ZIL cache with dedicated SSD/NVMe drives" \
69 | "Restore & update default storage - reset default dirs, permissions and ACLs" \
70 | "None. Exit this installer" )
71 | makeselect_input2
72 | singleselect SELECTED "$OPTIONS_STRING"
73 |
74 | if [ "$RESULTS" = TYPE01 ]
75 | then
76 | #---- Check for SMTP support
77 | if [ "$SMTP_STATUS" = 1 ]
78 | then
79 | # PVE SMTP supported, check NAS
80 | if [ ! "$(pct exec $CTID -- bash -c 'if [ -f /etc/postfix/main.cf ]; then grep --color=never -Po "^ahuacate_smtp=\K.*" "/etc/postfix/main.cf" || true; else echo 0; fi')" = 1 ]
81 | then
82 | # Install and Configure SMTP Email on NAS
83 | source $REPO_TEMP/$GIT_REPO/common/pve/src/pvesource_install_postfix_client.sh
84 | fi
85 | fi
86 | #---- Create New Power User Accounts
87 | pct exec $CTID -- bash -c "export PVE_ROOT_EMAIL=$(pveum user list | awk -F " │ " '$1 ~ /root@pam/' | awk -F " │ " '{ print $3 }') && /tmp/$GIT_REPO/src/ubuntu/pve_nas_ct_addpoweruser.sh"
88 | elif [ "$RESULTS" = TYPE02 ]
89 | then
90 | #---- Check for SMTP support
91 | if [ "$SMTP_STATUS" = 1 ]
92 | then
93 | # PVE SMTP supported, check NAS
94 | if [ ! "$(pct exec $CTID -- bash -c 'if [ -f /etc/postfix/main.cf ]; then grep --color=never -Po "^ahuacate_smtp=\K.*" "/etc/postfix/main.cf" || true; else echo 0; fi')" = 1 ]
95 | then
96 | # Install and Configure SMTP Email on NAS
97 | source $REPO_TEMP/$GIT_REPO/common/pve/src/pvesource_install_postfix_client.sh
98 | fi
99 | fi
100 | #---- Create New Jailed User Accounts
101 | pct exec $CTID -- bash -c "export PVE_ROOT_EMAIL=$(pveum user list | awk -F " │ " '$1 ~ /root@pam/' | awk -F " │ " '{ print $3 }') && /tmp/$GIT_REPO/src/ubuntu/pve_nas_ct_addjailuser.sh"
102 | elif [ "$RESULTS" = TYPE03 ]
103 | then
104 | #---- Delete a User Account
105 | pct exec $CTID -- bash -c "/tmp/$GIT_REPO/src/ubuntu/pve_nas_ct_deleteuser.sh"
106 | elif [ "$RESULTS" = TYPE04 ]
107 | then
108 | #---- Perform a NAS upgrade
109 | pct exec $CTID -- bash -c "/tmp/$GIT_REPO/common/pve/tool/pvetool_ct_ubuntu_versionupdater.sh"
110 | elif [ "$RESULTS" = TYPE05 ]; then
111 | #---- Install and Configure Fail2ban
112 | pct exec $CTID -- bash -c "export SSH_PORT=\$(grep Port /etc/ssh/sshd_config | sed '/^#/d' | awk '{ print \$2 }') && /tmp/$GIT_REPO/common/pve/src/pvesource_ct_ubuntu_installfail2ban.sh"
113 | elif [ "$RESULTS" = TYPE06 ]
114 | then
115 | #---- Install and Configure SMTP Email
116 | source $REPO_TEMP/$GIT_REPO/common/pve/src/pvesource_install_postfix_client.sh
117 | elif [ "$RESULTS" = TYPE07 ]
118 | then
119 | #---- Install and Configure ProFTPd
120 | # Check if ProFTPd is installed
121 | if [ ! "$(pct exec $CTID -- dpkg -s proftpd-core >/dev/null 2>&1; echo $?)" = 0 ]
122 | then
123 | pct exec $CTID -- bash -c "/tmp/$GIT_REPO/common/pve/src/pvesource_ct_ubuntu_installproftpd.sh"
124 | else
125 | msg "ProFTPd is already installed..."
126 | fi
127 | pct exec $CTID -- bash -c "/tmp/$GIT_REPO/src/ubuntu/proftpd_settings/pve_nas_ct_proftpdsettings.sh"
128 | elif [ "$RESULTS" = TYPE08 ]
129 | then
130 | #---- Setup ZFS Cache
131 | source $REPO_TEMP/pve-nas/shared/pve_nas_create_zfs_cacheaddon.sh
132 | elif [ "$RESULTS" = TYPE09 ]
133 | then
134 | #---- Restore, update default storage folder permissions
135 | pct exec $CTID -- bash -c "/tmp/$GIT_REPO/src/ubuntu/pve_nas_ct_restoredirperm.sh"
136 | elif [ "$RESULTS" = TYPE00 ]
137 | then
138 | # Exit installation
139 | msg "You have chosen not to proceed. Aborting. Bye..."
140 | echo
141 | sleep 1
142 | fi
143 |
144 | #---- Finish Line ------------------------------------------------------------------
145 |
146 | # section "Completion Status"
147 |
148 | # msg "Success. Task complete."
149 | # echo
150 |
151 | #---- Cleanup
152 | # Clean up CT tmp files
153 | pct exec $CTID -- bash -c "rm -R /tmp/${GIT_REPO} &> /dev/null; rm /tmp/${GIT_REPO}.tar.gz &> /dev/null"
154 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------
/src/ubuntu/pve_nas_ct_restoredirperm.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ----------------------------------------------------------------------------------
3 | # Filename: pve_nas_ct_restoredirperm.sh
4 | # Description: Restore or update PVE NAS Ubuntu storage folders and permissions
5 | # ----------------------------------------------------------------------------------
6 |
7 | #---- Bash command to run script ---------------------------------------------------
8 | #---- Source -----------------------------------------------------------------------
9 |
10 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
11 | COMMON_PVE_SRC_DIR="$DIR/../../common/pve/src"
12 | COMMON_DIR="$DIR/../../common"
13 |
14 | #---- Dependencies -----------------------------------------------------------------
15 |
16 | # Run Bash Header
17 | source $COMMON_PVE_SRC_DIR/pvesource_bash_defaults.sh
18 |
19 | #---- Static Variables -------------------------------------------------------------
20 | #---- Other Variables --------------------------------------------------------------
21 |
22 | # Easy Script Section Header Body Text
23 | SECTION_HEAD='PVE NAS'
24 |
25 | #---- Other Files ------------------------------------------------------------------
26 | #---- Body -------------------------------------------------------------------------
27 |
28 | #---- Restore, update default storage folder permissions
29 |
30 | source $COMMON_DIR/nas/src/nas_identify_storagepath.sh
31 | source $COMMON_DIR/nas/src/nas_basefoldersetup.sh
32 | #-----------------------------------------------------------------------------------
--------------------------------------------------------------------------------