├── .check ├── ConvertLineEndings.py ├── ShellCheck.py ├── UpdateFunctionIndex.py ├── UpdateUtilityDocumentation.py ├── VerifySourceCalls.py ├── _RunChecks.bat └── _RunChecks.sh ├── .gitattributes ├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug-report.md │ ├── documentation-issue.md │ └── feature-request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ ├── release.yml │ └── static.yml ├── .site ├── SingleLineCommand.png ├── index.html ├── robots.txt ├── scripts.js ├── serve.bat ├── sitemap.xml └── styles.css ├── CCPVE.sh ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Cluster ├── AddNodes.sh ├── CreateCluster.sh ├── DeleteCluster.sh └── RemoveClusterNode.sh ├── Firewall ├── BulkAddFirewallLXCVM.sh └── EnableFirewallSetup.sh ├── GUI.sh ├── HighAvailability ├── AddResources.sh ├── CreateHAGroup.sh ├── DisableHAClusterWide.sh └── DisableHighAvailability.sh ├── Host ├── -Backup.sh ├── Bulk │ ├── FirstTimeProxmoxSetup.sh │ ├── ProxmoxEnableMicrocode.sh │ ├── SetTimeZone.sh │ ├── UpgradeAllServers.sh │ └── UpgradeRepositories.sh ├── FanControl │ ├── DellIPMIFanControl.sh │ └── EnablePWMFanControl.sh ├── FixDPKGLock.sh ├── Hardware │ ├── EnableCPUScalingGoverner.sh │ ├── EnableGPUPassthroughVM.sh │ ├── EnableIOMMU.sh │ ├── EnablePCIPassthroughLXC.sh │ ├── EnableX3DOptimization.sh │ ├── OnlineMemoryTest.sh │ └── OptimizeNestedVirtualization.sh ├── QuickDiagnostic.sh ├── RemoveLocalLVMAndExpand.sh ├── SeparateNode.sh └── Storage │ └── ExpandEXT4Partition.sh ├── LICENSE ├── LXC ├── BulkAddIPToNote.sh ├── Hardware │ ├── BulkSetCPU.sh │ └── BulkSetMemory.sh ├── Networking │ ├── BulkAddSSHKey.sh │ ├── BulkChangeDNS.sh │ ├── BulkChangeIP.sh │ ├── BulkChangeNetwork.sh │ └── BulkChangeUserPass.sh ├── Operations │ ├── BulkClone.sh │ ├── BulkDeleteAllLocal.sh │ ├── BulkDeleteRange.sh │ ├── BulkStart.sh │ ├── BulkStop.sh │ └── BulkUnlock.sh ├── Options │ ├── BulkStartAtBoot.sh │ └── BulkToggleProtectionMode.sh ├── Storage │ ├── BulkChangeStorage.sh │ ├── BulkMoveVolume.sh │ └── BulkResizeStorage.sh └── UpdateAll.sh ├── MakeScriptsExecutable.sh ├── Networking ├── AddNetworkBond.sh ├── BulkPrintVMIDMacAddresses.sh ├── BulkSetDNS.sh ├── FindVMFromMacAddress.sh ├── HostIPerfTest.sh ├── UpdateNetworkInterfaceNames.sh └── UplinkSpeedTest.sh ├── README.md ├── RemoteManagement ├── ApacheGuacamole │ ├── BulkDeleteConnectionGuacamole.sh │ ├── GetGuacamoleAuthenticationToken.sh │ ├── RDP │ │ ├── BulkAddRDPConnectionGuacamole.sh │ │ ├── BulkAddSFTPServer.sh │ │ ├── BulkPrintRDPConfiguration.sh │ │ ├── BulkRemoveDriveRedirection.sh │ │ ├── BulkRemoveSFTPServer.sh │ │ └── BulkUpdateDriveRedirection.sh │ └── RemoveGuacamoleAuthenticationToken.sh └── ConfigureOverSSH │ ├── -AddGuestAgentDebian.sh │ ├── BulkCloneSetIPDebian.sh │ ├── BulkCloneSetIPUbuntu.sh │ └── BulkCloneSetIPWindows.sh ├── Resources ├── ExportProxmoxResources.sh └── FindLinkedClone.sh ├── SECURITY.md ├── Security ├── PenetrationTest.sh └── PortScan.sh ├── Storage ├── Benchmark.sh ├── Ceph │ ├── CreateOSDs.sh │ ├── EditCrushmap.sh │ ├── RestartManagers.sh │ ├── RestartMetadata.sh │ ├── RestartMonitors.sh │ ├── RestartOSDs.sh │ ├── SetPoolMinSize1.sh │ ├── SetPoolSize1.sh │ ├── SetScrubInterval.sh │ ├── SingleDrive.sh │ ├── SparsifyDisk.sh │ ├── StartStoppedOSDs.sh │ └── WipeDisk.sh ├── DiskDeleteBulk.sh ├── DiskDeleteWithSnapshot.sh ├── FilesystemTrimAll.sh ├── OptimizeSpindown.sh ├── PassthroughStorageToLXC.sh └── UpdateStaleMount.sh ├── UpdateProxmoxScripts.sh ├── Utilities ├── Colors.sh ├── Communication.sh ├── Conversion.sh ├── Prompts.sh ├── Queries.sh ├── SSH.sh ├── Utilities.md ├── _ExampleScript.sh ├── _TestColors.sh ├── _TestCommunication.sh ├── _TestConversion.sh ├── _TestPrompts.sh ├── _TestQueries.sh └── _TestSSH.sh └── VirtualMachines ├── BulkAddIPToNote.sh ├── CloudInit ├── BulkAddSSHKey.sh ├── BulkChangeDNS.sh ├── BulkChangeIP.sh ├── BulkChangeUserPass.sh ├── BulkMoveCloudInit.sh └── BulkTogglePackageUpgrade.sh ├── CreateFromISO.sh ├── Hardware ├── BulkChangeNetwork.sh ├── BulkSetCPUTypeCoreCount.sh ├── BulkSetMemoryConfig.sh ├── BulkUnmountISOs.sh └── VMAddTerminalTTYS0.sh ├── ISOList.csv ├── Operations ├── BulkBackup.sh ├── BulkClone.sh ├── BulkCloneCloudInit.sh ├── BulkDelete.sh ├── BulkDeleteAllLocal.sh ├── BulkRemoteMigrate.sh ├── BulkReset.sh ├── BulkStart.sh ├── BulkStop.sh └── BulkUnlock.sh ├── Options ├── BulkEnableGuestAgent.sh ├── BulkToggleProtectionMode.sh └── BulkToggleStartAtBoot.sh ├── RestoreVM.sh └── Storage ├── BulkChangeStorage.sh ├── BulkMoveDisk.sh └── BulkResizeStorage.sh /.check/ConvertLineEndings.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | 6 | def convert_line_endings_to_unix(directory): 7 | """ 8 | Recursively walk `directory`, converting Windows-style line endings (\r\n) 9 | to Unix-style (\n) in all files EXCEPT: 10 | - any folders named '.github' 11 | - any files named '.gitattributes' 12 | """ 13 | for root, dirs, files in os.walk(directory): 14 | # Skip the .github directory 15 | if ".github" in dirs: 16 | dirs.remove(".git") 17 | dirs.remove(".github") 18 | dirs.remove(".site") 19 | dirs.remove(".check") 20 | 21 | for filename in files: 22 | # Skip .gitattributes files 23 | if filename == ".gitattributes": 24 | continue 25 | 26 | file_path = os.path.join(root, filename) 27 | 28 | # Read the file in binary mode 29 | try: 30 | with open(file_path, "rb") as f: 31 | content = f.read() 32 | except OSError as e: 33 | print(f"[ERROR] Could not open {file_path}: {e}") 34 | continue 35 | 36 | # Replace CRLF with LF 37 | new_content = content.replace(b"\r\n", b"\n") 38 | 39 | # Only write back if there's a difference 40 | if new_content != content: 41 | try: 42 | with open(file_path, "wb") as f: 43 | f.write(new_content) 44 | print(f"[INFO] Converted line endings in {file_path}") 45 | except OSError as e: 46 | print(f"[ERROR] Could not write to {file_path}: {e}") 47 | 48 | 49 | def main(): 50 | if len(sys.argv) != 2: 51 | print("Usage: python convert_line_endings.py ") 52 | sys.exit(1) 53 | 54 | directory = sys.argv[1] 55 | 56 | if not os.path.isdir(directory): 57 | print(f"Error: {directory} is not a valid directory.") 58 | sys.exit(1) 59 | 60 | convert_line_endings_to_unix(directory) 61 | 62 | if __name__ == "__main__": 63 | main() 64 | -------------------------------------------------------------------------------- /.check/ShellCheck.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Requires Shellcheck: https://github.com/koalaman/shellcheck#user-content-installing 4 | 5 | import os 6 | import sys 7 | import subprocess 8 | import shutil 9 | 10 | def find_sh_files(base_dir): 11 | """ 12 | Recursively find all .sh files under base_dir. 13 | Returns a list of file paths. 14 | """ 15 | sh_files = [] 16 | for root, dirs, files in os.walk(base_dir): 17 | for filename in files: 18 | if filename.endswith(".sh"): 19 | sh_files.append(os.path.join(root, filename)) 20 | return sh_files 21 | 22 | def run_shellcheck(file_path): 23 | """ 24 | Runs ShellCheck on a given file path. 25 | Returns stdout, stderr, and the process return code. 26 | """ 27 | result = subprocess.run( 28 | ["shellcheck", "-e", "SC1090", file_path], 29 | capture_output=True, 30 | text=True 31 | ) 32 | return result.stdout, result.stderr, result.returncode 33 | 34 | def naive_sh_check(file_path): 35 | """ 36 | A very basic check that looks for a few common issues: 37 | - Missing or non-bash shebang. 38 | - File not marked as executable. 39 | Feel free to add more checks here as needed. 40 | """ 41 | errors = [] 42 | 43 | # 1. Check shebang in the first line 44 | with open(file_path, "r", encoding="utf-8", errors="ignore") as f: 45 | first_line = f.readline().strip() 46 | if not first_line.startswith("#!"): 47 | errors.append("Missing shebang (#!/bin/bash or similar) in first line.") 48 | elif "bash" not in first_line and "sh" not in first_line: 49 | errors.append(f"Shebang does not specify bash/sh: {first_line}") 50 | 51 | # 2. Check if file has execute permission 52 | if not os.access(file_path, os.X_OK): 53 | errors.append("File is not set as executable (chmod +x).") 54 | 55 | return errors 56 | 57 | def main(): 58 | if len(sys.argv) != 2: 59 | print("Usage: python ShellCheck.py ") 60 | sys.exit(1) 61 | 62 | base_dir = sys.argv[1] 63 | if not os.path.isdir(base_dir): 64 | print(f"Error: {base_dir} is not a valid directory.") 65 | sys.exit(1) 66 | 67 | # Find all .sh files in the specified directory 68 | sh_files = find_sh_files(base_dir) 69 | if not sh_files: 70 | print("No .sh files found.") 71 | return 72 | 73 | # Check if ShellCheck is installed 74 | shellcheck_path = shutil.which("shellcheck") 75 | shellcheck_installed = shellcheck_path is not None 76 | 77 | # For each .sh file, either use ShellCheck or fallback checks 78 | for sh_file in sh_files: 79 | print(f"=== Checking file: {sh_file} ===") 80 | 81 | if shellcheck_installed: 82 | # Run ShellCheck 83 | stdout, stderr, returncode = run_shellcheck(sh_file) 84 | if returncode == 0: 85 | print("No issues found by ShellCheck.") 86 | else: 87 | # ShellCheck warnings/errors 88 | print("ShellCheck issues:") 89 | if stdout.strip(): 90 | print(stdout.strip()) 91 | if stderr.strip(): 92 | print(stderr.strip()) 93 | else: 94 | # Fallback: run naive checks 95 | errors = naive_sh_check(sh_file) 96 | if errors: 97 | print("Naive check found potential issues:") 98 | for err in errors: 99 | print(f" - {err}") 100 | 101 | if __name__ == "__main__": 102 | main() 103 | -------------------------------------------------------------------------------- /.check/_RunChecks.bat: -------------------------------------------------------------------------------- 1 | python .\ConvertLineEndings.py ../ 2 | REM python .\ShellCheck.py ../ 3 | python .\UpdateFunctionIndex.py ../ 4 | python .\VerifySourceCalls.py --fix 5 | -------------------------------------------------------------------------------- /.check/_RunChecks.sh: -------------------------------------------------------------------------------- 1 | python ./ConvertLineEndings.py ../ 2 | #python ./ShellCheck.py ../ 3 | python ./UpdateFunctionIndex.py ../ 4 | python ./VerifySourceCalls.py --fix 5 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Force *.sh files to use LF 2 | *.sh text eol=lf 3 | 4 | # Python files LF 5 | *.py text eol=lf 6 | 7 | # CSV files LF 8 | *.csv text eol=lf 9 | 10 | # MD files LF 11 | *.md text eol=lf 12 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [coelacant1] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: # Replace with a single Buy Me a Coffee username 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Describe this issue template's purpose here. 4 | title: '[BUG] ' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Description 11 | A clear and concise description of the bug. 12 | 13 | ## Steps to Reproduce 14 | 1. 15 | 2. 16 | 3. 17 | 18 | ## Expected Behavior 19 | A clear and concise description of what you expected to happen. 20 | 21 | ## Screenshots/Logs 22 | If applicable, add any relevant screenshots or error logs to help explain your problem. 23 | 24 | ## Environment 25 | - **Browser** [if applicable]: [e.g. Chrome 80] 26 | - **Version** [if relevant]: [e.g. 1.0.0] 27 | 28 | ## Additional Context 29 | Add any other context about the problem here. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation Issue 3 | about: Report missing, unclear, or incorrect documentation 4 | title: "[DOC] " 5 | labels: documentation 6 | assignees: '' 7 | --- 8 | 9 | ## Description 10 | A clear and concise description of the documentation issue (e.g., missing steps, outdated information, or unclear instructions). 11 | 12 | ## Location 13 | Where in the documentation does this issue appear? 14 | - File name(s) or URL(s) 15 | - Section or heading 16 | 17 | ## Suggested Changes 18 | Explain how you think the documentation should be improved or clarified. Include any relevant examples or details that will help us address the issue. 19 | 20 | ## Additional Context 21 | Add any other context, such as: 22 | - Screenshots or excerpts 23 | - References to related documentation 24 | - Version information (if applicable) 25 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Suggest a new feature or enhancement 4 | title: "[FEATURE] " 5 | labels: enhancement 6 | assignees: '' 7 | --- 8 | 9 | ## Summary 10 | Briefly describe the new feature or improvement you’d like to see. 11 | 12 | ## Motivation 13 | Explain why this feature is needed. What problem does it solve, or what use case does it fulfill? 14 | 15 | ## Proposed Solution 16 | Provide details about how you think this feature could be implemented. Include any relevant code snippets, design docs, or examples. 17 | 18 | ## Alternatives 19 | Discuss any alternative solutions you’ve considered or existing workarounds. 20 | 21 | ## Additional Context 22 | Add any other context or screenshots to illustrate your request. 23 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Pull Request 3 | about: Add or update code in this project 4 | title: "[PR] " 5 | labels: enhancement 6 | assignees: '' 7 | --- 8 | 9 | ## Description 10 | A clear and concise description of what this pull request does. 11 | 12 | ## Type of Change 13 | - [ ] Bug fix (non-breaking change fixing an issue) 14 | - [ ] New feature (non-breaking change adding functionality) 15 | - [ ] Breaking change (fix or feature that would break existing functionality) 16 | - [ ] Documentation update 17 | - [ ] Other (please describe): 18 | 19 | ## How Has This Been Tested? 20 | Please describe the tests that you ran to verify your changes. Provide instructions so we can reproduce. Please also list any relevant details about your test configuration. 21 | 22 | ## Checklist 23 | - [ ] I have performed a self-review of my own code. 24 | - [ ] I have commented my code where necessary. 25 | - [ ] I have made corresponding changes to the documentation (if applicable). 26 | - [ ] My changes do not generate new warnings or errors. 27 | - [ ] I have tested this code. 28 | 29 | ## Related Issues 30 | If this pull request addresses an open issue, please link it here: `#` 31 | 32 | ## Additional Context 33 | Add any additional context or screenshots about the pull request here. 34 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release on .sh changes 2 | 3 | on: 4 | push: 5 | paths: 6 | - '**/*.sh' # Triggers only if .sh files are changed/added 7 | 8 | jobs: 9 | release: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Check out code 13 | uses: actions/checkout@v3 14 | 15 | # Create a version tag "v1." 16 | - name: Set up release tag 17 | run: echo "TAG_NAME=v1.${{ github.run_number }}" >> $GITHUB_ENV 18 | 19 | # Create the GitHub release 20 | - name: Publish release 21 | uses: actions/create-release@v1 22 | env: 23 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 24 | with: 25 | tag_name: ${{ env.TAG_NAME }} 26 | release_name: ${{ env.TAG_NAME }} 27 | body: | 28 | **Commit Message**: ${{ github.event.head_commit.message }} 29 | **Build Number**: ${{ github.run_number }} 30 | -------------------------------------------------------------------------------- /.github/workflows/static.yml: -------------------------------------------------------------------------------- 1 | # Simple workflow for deploying static content to GitHub Pages 2 | name: Deploy static content to Pages 3 | 4 | on: 5 | push: 6 | branches: ["main"] 7 | workflow_dispatch: 8 | 9 | permissions: 10 | contents: read 11 | pages: write 12 | id-token: write 13 | 14 | concurrency: 15 | group: "pages" 16 | cancel-in-progress: false 17 | 18 | jobs: 19 | deploy: 20 | environment: 21 | name: github-pages 22 | url: ${{ steps.deployment.outputs.page_url }} 23 | runs-on: ubuntu-latest 24 | steps: 25 | - name: Checkout 26 | uses: actions/checkout@v4 27 | - name: Setup Pages 28 | uses: actions/configure-pages@v5 29 | - name: Upload artifact 30 | uses: actions/upload-pages-artifact@v3 31 | with: 32 | # Upload only the 'site' folder 33 | path: '.site' 34 | - name: Deploy to GitHub Pages 35 | id: deployment 36 | uses: actions/deploy-pages@v4 37 | -------------------------------------------------------------------------------- /.site/SingleLineCommand.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coelacant1/ProxmoxScripts/5a50566042a1d6403aa0e082712ca724488d9ee7/.site/SingleLineCommand.png -------------------------------------------------------------------------------- /.site/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Allow: /ProxmoxScripts/ 3 | 4 | Sitemap: https://coelacant.com/ProxmoxScripts/sitemap.xml 5 | -------------------------------------------------------------------------------- /.site/serve.bat: -------------------------------------------------------------------------------- 1 | python -m http.server 8000 -------------------------------------------------------------------------------- /.site/sitemap.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | https://coelacant.com/ProxmoxScripts/ 5 | 2024-12-21 6 | 1.0 7 | 8 | 9 | https://coelacant.com/ 10 | 2024-12-21 11 | 0.9 12 | 13 | 14 | -------------------------------------------------------------------------------- /CCPVE.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CCPVE.sh 4 | # 5 | # The main script to download and extract the ProxmoxScripts repository, then make all scripts 6 | # in the repository executable and finally call CCPVEOffline.sh. 7 | # 8 | # Usage: 9 | # ./CCPVE.sh [-nh] 10 | # 11 | # This script requires 'unzip' and 'wget'. If not installed, it will prompt to install them. 12 | # 13 | # Example: 14 | # bash -c "$(wget -qLO - https://github.com/coelacant1/ProxmoxScripts/raw/main/CCPVE.sh)" 15 | # 16 | 17 | set -e 18 | 19 | apt update || true 20 | 21 | SHOW_HEADER="true" 22 | 23 | while [[ $# -gt 0 ]]; do 24 | case "$1" in 25 | -nh) 26 | SHOW_HEADER="false" 27 | shift 28 | ;; 29 | *) 30 | echo "Error: Unknown argument '$1'" 31 | exit 1 32 | ;; 33 | esac 34 | done 35 | 36 | # --- Check Dependencies ----------------------------------------------------- 37 | if ! command -v unzip &>/dev/null; then 38 | echo "The 'unzip' utility is required to extract the downloaded files but is not installed." 39 | read -r -p "Would you like to install 'unzip' now? [y/N]: " response 40 | if [[ "$response" =~ ^[Yy]$ ]]; then 41 | apt-get install -y unzip 42 | else 43 | echo "Aborting script because 'unzip' is not installed." 44 | exit 1 45 | fi 46 | fi 47 | 48 | if ! command -v wget &>/dev/null; then 49 | echo "The 'wget' utility is required to download the repository ZIP but is not installed." 50 | read -r -p "Would you like to install 'wget' now? [y/N]: " response 51 | if [[ "$response" =~ ^[Yy]$ ]]; then 52 | apt-get install -y wget 53 | else 54 | echo "Aborting script because 'wget' is not installed." 55 | exit 1 56 | fi 57 | fi 58 | 59 | # --- Configuration ---------------------------------------------------------- 60 | REPO_ZIP_URL="https://github.com/coelacant1/ProxmoxScripts/archive/refs/heads/main.zip" 61 | TARGET_DIR="/tmp/cc_pve" 62 | 63 | # --- Download and Extract --------------------------------------------------- 64 | rm -rf "$TARGET_DIR" 65 | mkdir -p "$TARGET_DIR" 66 | 67 | echo "Downloading repository ZIP from $REPO_ZIP_URL..." 68 | if ! wget -q -O "$TARGET_DIR/repo.zip" "$REPO_ZIP_URL"; then 69 | echo "Error: Failed to download from $REPO_ZIP_URL" 70 | exit 1 71 | fi 72 | 73 | echo "Extracting ZIP..." 74 | if ! unzip -q "$TARGET_DIR/repo.zip" -d "$TARGET_DIR"; then 75 | echo "Error: Failed to unzip the downloaded file." 76 | exit 1 77 | fi 78 | 79 | # Find the first extracted folder that isn't a dot-folder 80 | BASE_EXTRACTED_DIR=$(find "$TARGET_DIR" -mindepth 1 -maxdepth 1 -type d ! -name ".*" | head -n1) 81 | if [ -z "$BASE_EXTRACTED_DIR" ]; then 82 | echo "Error: No extracted content found." 83 | exit 1 84 | fi 85 | 86 | echo "Repository extracted into: $BASE_EXTRACTED_DIR" 87 | 88 | # --- Make Scripts Executable ----------------------------------------------- 89 | echo "Making all scripts executable..." 90 | cd "$BASE_EXTRACTED_DIR" || exit 1 91 | if [ -f "./MakeScriptsExecutable.sh" ]; then 92 | bash "./MakeScriptsExecutable.sh" 93 | else 94 | echo "Warning: MakeScriptsExecutable.sh not found. Skipping." 95 | fi 96 | 97 | # --- Call GUI.sh -------------------------------------------------- 98 | if [ -f "./GUI.sh" ]; then 99 | echo "Calling GUI.sh..." 100 | if [ "$SHOW_HEADER" != "true" ]; then 101 | bash "./GUI.sh" -nh 102 | else 103 | bash "./GUI.sh" 104 | fi 105 | else 106 | echo "Warning: GUI.sh not found. Skipping." 107 | fi 108 | 109 | echo "Done." -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to make participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, gender identity and expression, level of experience, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | - Demonstrating empathy and kindness toward other people 11 | - Being respectful of differing opinions, viewpoints, and experiences 12 | - Giving and gracefully accepting constructive feedback 13 | - Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience 14 | - Focusing on what is best not just for us as individuals, but for the overall community 15 | 16 | Examples of unacceptable behavior include: 17 | - The use of sexualized language or imagery, and sexual attention or advances of any kind 18 | - Trolling, insulting or derogatory comments, and personal or political attacks 19 | - Public or private harassment 20 | - Publishing others’ private information, such as a physical or email address, without their explicit permission 21 | - Other conduct which could reasonably be considered inappropriate in a professional setting 22 | 23 | ## Our Responsibilities 24 | 25 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 26 | 27 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, edits, issues, and other contributions that do not align with this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. 28 | 29 | ## Scope 30 | 31 | This Code of Conduct applies both within project spaces and in public spaces when an individual is officially representing the project or its community. Examples of representing a project or community include using an official project email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. 32 | 33 | ## Enforcement 34 | 35 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project head at coelacannot@gmail.com. All complaints will be reviewed and investigated promptly and fairly. 36 | 37 | All project maintainers are obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | ## Attribution 40 | 41 | This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html](https://www.contributor-covenant.org/version/2/1/code_of_conduct.html). 42 | 43 | For answers to common questions about this code of conduct, see [https://www.contributor-covenant.org/faq](https://www.contributor-covenant.org/faq). 44 | -------------------------------------------------------------------------------- /Cluster/CreateCluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CreateCluster.sh 4 | # 5 | # Creates a new Proxmox cluster on a single host. This script requires: 6 | # 1) A cluster name (e.g. "MyCluster") 7 | # 2) A management (Corosync) IP for cluster communication 8 | # 9 | # Usage: 10 | # ./CreateCluster.sh 11 | # 12 | # Example: 13 | # # Create a cluster named 'myCluster' using 192.168.100.10 as Corosync IP 14 | # ./CreateCluster.sh myCluster 192.168.100.10 15 | # 16 | # After running this script, you can join other Proxmox nodes to the cluster with: 17 | # pvecm add 18 | # 19 | 20 | source "${UTILITYPATH}/Prompts.sh" 21 | 22 | ############################################################################### 23 | # Checks and Setup 24 | ############################################################################### 25 | __check_root__ 26 | __check_proxmox__ 27 | 28 | if [[ $# -lt 2 ]]; then 29 | echo "Error: Missing arguments." 30 | echo "Usage: $0 " 31 | exit 1 32 | fi 33 | 34 | CLUSTER_NAME="$1" 35 | MON_IP="$2" 36 | 37 | # Check if host is already part of a cluster 38 | if [[ -f "/etc/pve/.members" ]]; then 39 | echo "WARNING: This host appears to have an existing cluster config (/etc/pve/.members)." 40 | echo "If it's already part of a cluster, creating a new one may cause conflicts." 41 | echo "Press Ctrl-C to abort, or wait 5 seconds to continue..." 42 | sleep 5 43 | fi 44 | 45 | ############################################################################### 46 | # Main 47 | ############################################################################### 48 | echo "Creating new Proxmox cluster: \"${CLUSTER_NAME}\"" 49 | echo "Using IP for link0: \"${MON_IP}\"" 50 | 51 | pvecm create "${CLUSTER_NAME}" --link0 address="${MON_IP}" 52 | 53 | echo 54 | echo "Cluster \"${CLUSTER_NAME}\" created with link0 address set to \"${MON_IP}\"." 55 | echo "To verify status: pvecm status" 56 | echo "To join another node to this cluster (from that node):" 57 | echo " pvecm add \"${MON_IP}\"" 58 | 59 | ############################################################################### 60 | # Testing status 61 | ############################################################################### 62 | # Tested single-node 63 | # Tested multi-node 64 | -------------------------------------------------------------------------------- /Cluster/DeleteCluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # DeleteCluster.sh 4 | # 5 | # Script to remove a single-node Proxmox cluster configuration, 6 | # returning the node to a standalone setup. 7 | # 8 | # Usage: 9 | # ./DeleteCluster.sh 10 | # 11 | # Warning: 12 | # - If this node is part of a multi-node cluster, first remove other nodes 13 | # from the cluster (pvecm delnode ) until this is the last node. 14 | # - This process is DESTRUCTIVE and will remove cluster configuration. 15 | # 16 | 17 | source "${UTILITYPATH}/Prompts.sh" 18 | 19 | ############################################################################### 20 | # Preliminary Checks 21 | ############################################################################### 22 | __check_root__ # Ensure script is run as root 23 | __check_proxmox__ # Ensure this is a Proxmox node 24 | 25 | ############################################################################### 26 | # Main Script Logic 27 | ############################################################################### 28 | echo "=== Proxmox Cluster Removal (Single-Node) ===" 29 | echo "This will remove Corosync/cluster configuration from this node." 30 | read -r -p "Proceed? (y/N): " confirm 31 | if [[ "$confirm" != "y" && "$confirm" != "Y" ]]; then 32 | echo "Aborted." 33 | exit 1 34 | fi 35 | 36 | nodeCount="$(__get_number_of_cluster_nodes__)" 37 | if [[ "$nodeCount" -gt 1 ]]; then 38 | echo "Error: This script is for a single-node cluster only." 39 | echo "Current cluster shows \"$nodeCount\" nodes. Remove other nodes first, then re-run." 40 | exit 2 41 | fi 42 | 43 | echo "Stopping cluster services..." 44 | systemctl stop corosync || true 45 | systemctl stop pve-cluster || true 46 | 47 | echo "Removing Corosync config from /etc/pve and /etc/corosync..." 48 | rm -f "/etc/pve/corosync.conf" 2>/dev/null || true 49 | rm -rf "/etc/corosync/"* 2>/dev/null || true 50 | 51 | # Optionally remove additional cluster-related config (use caution): 52 | # rm -f /etc/pve/cluster.conf 2>/dev/null || true 53 | 54 | echo "Restarting pve-cluster (it will now run standalone)..." 55 | systemctl start pve-cluster 56 | 57 | echo "Verifying that corosync is not running..." 58 | systemctl stop corosync 2>/dev/null || true 59 | systemctl disable corosync 2>/dev/null || true 60 | 61 | echo "=== Done ===" 62 | echo "This node is no longer part of any Proxmox cluster." 63 | echo "You can verify by running 'pvecm status' (it should show no cluster)." 64 | 65 | ############################################################################### 66 | # Testing status 67 | ############################################################################### 68 | # Tested single-node 69 | # Tested multi-node 70 | -------------------------------------------------------------------------------- /HighAvailability/AddResources.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # AddResourcesToHAGroup.sh 4 | # 5 | # This script adds LXC containers or VMs (found anywhere in the cluster) to a 6 | # specified High Availability (HA) group in a Proxmox VE cluster. 7 | # 8 | # Usage: 9 | # ./AddResourcesToHAGroup.sh [ ... ] 10 | # 11 | # Example: 12 | # # Adds VM/LXC IDs 100, 101, and 200 to the 'Primary' HA group 13 | # # even if they are located on different nodes 14 | # ./AddResourcesToHAGroup.sh Primary 100 101 200 15 | # 16 | # Notes: 17 | # - You must be root or run via sudo. 18 | # - This script assumes you have a working Proxmox VE cluster. 19 | # - Group names must not be purely numeric (e.g., '123'). 20 | # - The script relies on utility functions that must be sourced elsewhere. 21 | # 22 | 23 | source "${UTILITYPATH}/Prompts.sh" 24 | source "${UTILITYPATH}/Queries.sh" 25 | 26 | ############################################################################### 27 | # MAIN 28 | ############################################################################### 29 | 30 | __check_root__ 31 | __check_proxmox__ 32 | __check_cluster_membership__ 33 | 34 | if [[ "$#" -lt 2 ]]; then 35 | echo "Usage: \"$0\" [ ... ]" 36 | exit 1 37 | fi 38 | 39 | declare GROUP_NAME="$1" 40 | shift 41 | declare -a RESOURCE_IDS=("$@") 42 | 43 | # Make sure the group name is not purely numeric, 44 | # because Proxmox HA groups cannot be numeric only. 45 | if [[ "$GROUP_NAME" =~ ^[0-9]+$ ]]; then 46 | echo "Error: The group name \"${GROUP_NAME}\" is invalid; it cannot be purely numeric." 47 | exit 1 48 | fi 49 | 50 | # Gather all LXC and VM IDs across the entire cluster 51 | readarray -t ALL_CLUSTER_LXC < <( __get_cluster_lxc__ ) 52 | readarray -t ALL_CLUSTER_VMS < <( __get_cluster_vms__ ) 53 | 54 | for resourceId in "${RESOURCE_IDS[@]}"; do 55 | # Determine if this resource ID belongs to an LXC or a VM 56 | if [[ " ${ALL_CLUSTER_LXC[*]} " == *" ${resourceId} "* ]]; then 57 | resourceType="ct" 58 | elif [[ " ${ALL_CLUSTER_VMS[*]} " == *" ${resourceId} "* ]]; then 59 | resourceType="vm" 60 | else 61 | echo "Error: Resource ID \"${resourceId}\" not found in the cluster as a VM or LXC container." 62 | continue 63 | fi 64 | 65 | echo "Adding resource \"${resourceType}:${resourceId}\" to HA group \"${GROUP_NAME}\"..." 66 | if pvesh create /cluster/ha/resources --sid "${resourceType}:${resourceId}" --group "${GROUP_NAME}"; then 67 | echo " - Successfully added \"${resourceType}:${resourceId}\" to HA group \"${GROUP_NAME}\"." 68 | else 69 | echo " - Failed to add \"${resourceType}:${resourceId}\" to HA group \"${GROUP_NAME}\"." 70 | fi 71 | done 72 | 73 | echo "=== HA resource addition process completed! ===" 74 | 75 | ############################################################################### 76 | # Testing status 77 | ############################################################################### 78 | # Tested single-node 79 | # Tested multi-node 80 | -------------------------------------------------------------------------------- /HighAvailability/CreateHAGroup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CreateHAGroup.sh 4 | # 5 | # This script creates a High Availability (HA) group in the Proxmox VE cluster 6 | # and assigns the specified nodes to that group. 7 | # 8 | # Usage: 9 | # ./CreateHAGroup.sh [ ... ] 10 | # 11 | # Example: 12 | # # Creates a group named 'Primary' and adds nodes 'pve01' and 'pve02' 13 | # ./CreateHAGroup.sh Primary pve01 pve02 14 | # 15 | # Notes: 16 | # - You must be root or run via sudo. 17 | # - This script assumes you have a working Proxmox VE cluster. 18 | # - The script relies on utility functions that must be sourced elsewhere. 19 | # 20 | 21 | source "${UTILITYPATH}/Prompts.sh" 22 | source "${UTILITYPATH}/Queries.sh" 23 | 24 | ############################################################################### 25 | # MAIN 26 | ############################################################################### 27 | 28 | __check_root__ 29 | __check_proxmox__ 30 | __check_cluster_membership__ 31 | 32 | if [[ "$#" -lt 2 ]]; then 33 | echo "Usage: ${0} [ ... ]" 34 | exit 1 35 | fi 36 | 37 | declare GROUP_NAME="$1" 38 | shift 39 | declare -a NODES=("$@") 40 | 41 | # Convert the array of nodes into a comma-separated string 42 | declare NODES_STRING 43 | NODES_STRING="$(IFS=,; echo "${NODES[*]}")" 44 | 45 | echo "Creating HA group: '${GROUP_NAME}' with the following node(s): '${NODES_STRING}'..." 46 | 47 | if ! pvesh create /cluster/ha/groups \ 48 | --group "${GROUP_NAME}" \ 49 | --nodes "${NODES_STRING}" \ 50 | --comment "HA group created by script"; then 51 | echo "Error: Failed to create HA group: '${GROUP_NAME}'" 52 | exit 1 53 | fi 54 | 55 | echo "HA group '${GROUP_NAME}' created successfully." 56 | echo "=== HA group setup process completed! ===" 57 | 58 | ############################################################################### 59 | # Testing status 60 | ############################################################################### 61 | # Tested single-node 62 | # Tested multi-node 63 | -------------------------------------------------------------------------------- /HighAvailability/DisableHAClusterWide.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # DisableHAClusterWide.sh 4 | # 5 | # This script disables High Availability (HA) cluster-wide by: 6 | # 1. Removing all HA resources found in the cluster (pvesh /cluster/ha/resources). 7 | # 2. Stopping and disabling the HA services (pve-ha-crm and pve-ha-lrm) on every node. 8 | # 9 | # Usage: 10 | # ./DisableHAClusterWide.sh 11 | # 12 | # Example: 13 | # ./DisableHAClusterWide.sh 14 | # 15 | # Notes: 16 | # - This script can only run on a Proxmox node that is part of a cluster. 17 | # - This script expects passwordless SSH or valid root credentials for all nodes. 18 | # - Once completed, no node in the cluster will run HA services, and no HA 19 | # resource definitions will remain. 20 | # 21 | 22 | source "${UTILITYPATH}/Prompts.sh" 23 | source "${UTILITYPATH}/Queries.sh" 24 | 25 | ############################################################################### 26 | # MAIN 27 | ############################################################################### 28 | 29 | # 0. Basic checks 30 | __check_root__ 31 | __check_proxmox__ 32 | __check_cluster_membership__ 33 | 34 | # 1. Ensure required commands are installed 35 | # 'jq' is not installed by default on Proxmox 8 36 | __install_or_prompt__ "jq" 37 | 38 | echo "=== Disabling HA on the entire cluster ===" 39 | 40 | # 2. Retrieve and remove all HA resources from the cluster 41 | echo "=== Retrieving all HA resources ===" 42 | ALL_RESOURCES="$(pvesh get /cluster/ha/resources --output-format json | jq -r '.[].sid')" 43 | 44 | if [[ -z "${ALL_RESOURCES}" ]]; then 45 | echo " - No HA resources found in the cluster." 46 | else 47 | echo " - The following HA resources will be removed:" 48 | echo "${ALL_RESOURCES}" 49 | echo 50 | 51 | for RES in ${ALL_RESOURCES}; do 52 | echo "Removing HA resource: ${RES} ..." 53 | if pvesh delete "/cluster/ha/resources/${RES}"; then 54 | echo " - Successfully removed: ${RES}" 55 | else 56 | echo " - Failed to remove: ${RES}" 57 | fi 58 | echo 59 | done 60 | fi 61 | 62 | # 3. Stop and disable HA services on every node in the cluster using IPs 63 | echo "=== Disabling HA services (CRM, LRM) on all nodes ===" 64 | readarray -t REMOTE_NODE_IPS < <( __get_remote_node_ips__ ) 65 | 66 | for NODE_IP in "${REMOTE_NODE_IPS[@]}"; do 67 | echo " - Processing node with IP: ${NODE_IP}" 68 | echo " Stopping pve-ha-crm and pve-ha-lrm..." 69 | ssh "root@${NODE_IP}" "systemctl stop pve-ha-crm pve-ha-lrm" 70 | 71 | echo " Disabling pve-ha-crm and pve-ha-lrm on startup..." 72 | ssh "root@${NODE_IP}" "systemctl disable pve-ha-crm pve-ha-lrm" 73 | 74 | echo " Done for node: ${NODE_IP}" 75 | echo 76 | done 77 | 78 | echo "=== HA has been disabled on all nodes in the cluster. ===" 79 | echo "No HA resources remain, and HA services are stopped & disabled cluster-wide." 80 | 81 | # 4. Prompt to remove any packages installed during this session 82 | __prompt_keep_installed_packages__ 83 | -------------------------------------------------------------------------------- /HighAvailability/DisableHighAvailability.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # DisableHAOnNode.sh 4 | # 5 | # This script disables High Availability (HA) on a single Proxmox node by: 6 | # 1. Disabling or removing any HA resources tied to this node. 7 | # 2. Stopping and disabling the HA services (pve-ha-crm, pve-ha-lrm) on the node. 8 | # 9 | # Usage: 10 | # ./DisableHAOnNode.sh 11 | # 12 | # Example: 13 | # ./DisableHAOnNode.sh pve-node2 14 | # 15 | # Notes: 16 | # - If you're using a multi-node cluster, ensure that no critical HA resources rely on this node. 17 | # - A single-node "cluster" does not benefit from HA, so this script effectively cleans up HA configs. 18 | # - This script expects passwordless SSH or valid root credentials for the target node. 19 | # - You must run this script as root on a Proxmox node that is part of the same cluster as . 20 | # 21 | 22 | source "${UTILITYPATH}/Prompts.sh" 23 | source "${UTILITYPATH}/Queries.sh" 24 | 25 | ############################################################################### 26 | # MAIN 27 | ############################################################################### 28 | 29 | # 1. Validate input 30 | if [[ -z "$1" ]]; then 31 | echo "Usage: $0 " 32 | echo "Example: $0 pve-node2" 33 | exit 1 34 | fi 35 | 36 | targetNodeName="$1" 37 | 38 | # 2. Basic checks 39 | __check_root__ 40 | __check_proxmox__ 41 | __check_cluster_membership__ 42 | 43 | # 3. Ensure 'jq' is installed (not included by default in Proxmox 8) 44 | __install_or_prompt__ "jq" 45 | 46 | echo "=== Disabling HA on node: \"$targetNodeName\" ===" 47 | 48 | # 4. Convert node name to IP for SSH calls 49 | echo "=== Resolving IP address for node \"$targetNodeName\" ===" 50 | declare nodeIp 51 | if ! nodeIp="$(__get_ip_from_name__ "$targetNodeName")"; then 52 | echo "Error: Could not resolve node name \"$targetNodeName\" to an IP." 53 | exit 1 54 | fi 55 | echo " - Node \"$targetNodeName\" resolved to IP: \"$nodeIp\"" 56 | echo 57 | 58 | # 5. Identify HA resources referencing this node by name 59 | echo "=== Checking for HA resources on node \"$targetNodeName\"... ===" 60 | 61 | haResources="$(pvesh get /cluster/ha/resources --output-format json \ 62 | | jq -r '.[] | select(.statePath | contains("'"$targetNodeName"'")) | .sid')" 63 | 64 | if [[ -z "$haResources" ]]; then 65 | echo " - No HA resources found referencing node \"$targetNodeName\"." 66 | else 67 | echo " - Found HA resources referencing node \"$targetNodeName\":" 68 | echo "$haResources" 69 | echo 70 | 71 | # Remove these HA resources 72 | declare res 73 | for res in $haResources; do 74 | echo "Removing HA resource \"$res\" ..." 75 | if pvesh delete "/cluster/ha/resources/${res}"; then 76 | echo " - Successfully removed HA resource: \"$res\"" 77 | else 78 | echo " - Failed to remove HA resource: \"$res\"" 79 | fi 80 | echo 81 | done 82 | fi 83 | 84 | # 6. Stop and disable HA services on the target node 85 | echo "=== Stopping and disabling HA services on node \"$targetNodeName\" ===" 86 | echo "Stopping pve-ha-crm and pve-ha-lrm on IP: \"$nodeIp\" ..." 87 | ssh "root@${nodeIp}" "systemctl stop pve-ha-crm pve-ha-lrm" 88 | 89 | echo "Disabling pve-ha-crm and pve-ha-lrm on IP: \"$nodeIp\" ..." 90 | ssh "root@${nodeIp}" "systemctl disable pve-ha-crm pve-ha-lrm" 91 | 92 | echo "=== HA has been disabled on node: \"$targetNodeName\" (IP: \"$nodeIp\") ===" 93 | echo "You can verify via: ssh root@${nodeIp} 'systemctl status pve-ha-crm pve-ha-lrm'" 94 | echo 95 | 96 | # 7. Prompt to remove any packages installed during this session 97 | __prompt_keep_installed_packages__ 98 | -------------------------------------------------------------------------------- /Host/-Backup.sh: -------------------------------------------------------------------------------- 1 | # backup one or all nodes to ssh target -------------------------------------------------------------------------------- /Host/Bulk/ProxmoxEnableMicrocode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # ProxmoxEnableMicrocode.sh 4 | # 5 | # This script enables microcode updates for all nodes in a Proxmox VE cluster. 6 | # 7 | # Usage: 8 | # ./ProxmoxEnableMicrocode.sh 9 | # 10 | # Example: 11 | # ./ProxmoxEnableMicrocode.sh 12 | # 13 | # Description: 14 | # 1. Checks prerequisites (root privileges, Proxmox environment, cluster membership). 15 | # 2. Installs microcode packages on each node (remote + local). 16 | # 3. Prompts to keep or remove installed packages afterward. 17 | # 18 | # Function Index: 19 | # - enable_microcode 20 | # 21 | 22 | source "${UTILITYPATH}/Prompts.sh" 23 | source "${UTILITYPATH}/Queries.sh" 24 | 25 | ############################################################################### 26 | # Preliminary Checks 27 | ############################################################################### 28 | __check_root__ 29 | __check_proxmox__ 30 | __check_cluster_membership__ 31 | 32 | ############################################################################### 33 | # Function to enable microcode updates 34 | ############################################################################### 35 | enable_microcode() { 36 | echo "Enabling microcode updates on node: $(hostname)" 37 | apt-get update 38 | apt-get install -y intel-microcode amd64-microcode 39 | echo " - Microcode updates enabled." 40 | } 41 | 42 | ############################################################################### 43 | # Main Script Logic 44 | ############################################################################### 45 | echo "Gathering remote node IPs..." 46 | readarray -t REMOTE_NODES < <( __get_remote_node_ips__ ) 47 | 48 | if [[ "${#REMOTE_NODES[@]}" -eq 0 ]]; then 49 | echo " - No remote nodes detected; this might be a single-node cluster." 50 | fi 51 | 52 | for nodeIp in "${REMOTE_NODES[@]}"; do 53 | echo "Connecting to node: \"${nodeIp}\"" 54 | ssh root@"${nodeIp}" "$(declare -f enable_microcode); enable_microcode" 55 | echo " - Microcode update completed for node: \"${nodeIp}\"" 56 | echo 57 | done 58 | 59 | enable_microcode 60 | echo "Microcode updates enabled on the local node." 61 | 62 | ############################################################################### 63 | # Cleanup Prompt 64 | ############################################################################### 65 | __prompt_keep_installed_packages__ 66 | 67 | echo "Microcode updates have been enabled on all nodes!" 68 | 69 | 70 | ############################################################################### 71 | # Testing status 72 | ############################################################################### 73 | # Tested single-node 74 | # Tested multi-node 75 | -------------------------------------------------------------------------------- /Host/Bulk/SetTimeZone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # SetTimeServer.sh 4 | # 5 | # A script to set the timezone across all nodes in a Proxmox VE cluster. 6 | # Defaults to "America/New_York" if no argument is provided. 7 | # 8 | # Usage: 9 | # ./SetTimeServer.sh 10 | # 11 | # Examples: 12 | # ./SetTimeServer.sh 13 | # ./SetTimeServer.sh "Europe/Berlin" 14 | # 15 | # This script will: 16 | # 1. Check if running as root (__check_root__). 17 | # 2. Check if on a valid Proxmox node (__check_proxmox__). 18 | # 3. Verify the node is part of a cluster (__check_cluster_membership__). 19 | # 4. Gather remote node IPs from __get_remote_node_ips__. 20 | # 5. Set the specified timezone on each remote node and then on the local node. 21 | # 22 | 23 | source "${UTILITYPATH}/Communication.sh" 24 | source "${UTILITYPATH}/Prompts.sh" 25 | source "${UTILITYPATH}/Queries.sh" 26 | 27 | ############################################################################### 28 | # Pre-flight checks 29 | ############################################################################### 30 | __check_root__ 31 | __check_proxmox__ 32 | __check_cluster_membership__ 33 | 34 | ############################################################################### 35 | # Main 36 | ############################################################################### 37 | TIMEZONE="${1:-America/New_York}" 38 | echo "Selected timezone: \"${TIMEZONE}\"" 39 | 40 | # Gather IP addresses of all remote nodes 41 | readarray -t REMOTE_NODES < <( __get_remote_node_ips__ ) 42 | 43 | # Set timezone on each remote node 44 | for nodeIp in "${REMOTE_NODES[@]}"; do 45 | __info__ "Setting timezone to \"${TIMEZONE}\" on node: \"${nodeIp}\"" 46 | if ssh "root@${nodeIp}" "timedatectl set-timezone \"${TIMEZONE}\""; then 47 | __ok__ " - Timezone set successfully on node: \"${nodeIp}\"" 48 | else 49 | __err__ " - Failed to set timezone on node: \"${nodeIp}\"" 50 | fi 51 | done 52 | 53 | # Finally, set the timezone on the local node 54 | __info__ "Setting timezone to \"${TIMEZONE}\" on local node..." 55 | if timedatectl set-timezone "${TIMEZONE}"; then 56 | __ok__ " - Timezone set successfully on local node" 57 | else 58 | __err__ " - Failed to set timezone on local node" 59 | fi 60 | 61 | echo "Timezone setup completed for all nodes!" 62 | 63 | ############################################################################### 64 | # Testing status 65 | ############################################################################### 66 | # Tested single-node 67 | # Tested multi-node 68 | -------------------------------------------------------------------------------- /Host/Bulk/UpgradeAllServers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # UpgradeAllServers.sh 4 | # 5 | # A script to update all servers in the Proxmox cluster by running: 6 | # apt-get update && apt-get -y upgrade 7 | # on each node (local + remote). 8 | # 9 | # Usage: 10 | # ./UpgradeAllServers.sh 11 | # 12 | # Description: 13 | # 1. Checks root privileges and confirms this is a Proxmox node. 14 | # 2. Prompts to install 'ssh' if not already installed (though it's usually present on Proxmox). 15 | # 3. Ensures the node is part of a cluster. 16 | # 4. Gathers remote cluster node IPs using __get_remote_node_ips__ (from our utility functions). 17 | # 5. Updates the local node and all remote nodes in the cluster. 18 | # 6. Prompts whether to keep or remove any newly installed packages. 19 | # 20 | # Example: 21 | # ./UpgradeAllServers.sh 22 | # 23 | 24 | source "${UTILITYPATH}/Communication.sh" 25 | source "${UTILITYPATH}/Prompts.sh" 26 | source "${UTILITYPATH}/Queries.sh" 27 | 28 | ############################################################################### 29 | # Preliminary Checks 30 | ############################################################################### 31 | __check_root__ 32 | __check_proxmox__ 33 | __check_cluster_membership__ 34 | 35 | ############################################################################### 36 | # Gather Node Information 37 | ############################################################################### 38 | LOCAL_NODE_IP="$(hostname -I | awk '{print $1}')" 39 | readarray -t REMOTE_NODE_IPS < <( __get_remote_node_ips__ ) 40 | ALL_NODE_IPS=("$LOCAL_NODE_IP" "${REMOTE_NODE_IPS[@]}") 41 | 42 | ############################################################################### 43 | # Main Script Logic 44 | ############################################################################### 45 | echo "Updating all servers in the Proxmox cluster..." 46 | 47 | for nodeIp in "${ALL_NODE_IPS[@]}"; do 48 | echo "------------------------------------------------" 49 | __info__ "Updating node at IP: \"${nodeIp}\"" 50 | 51 | if [[ "${nodeIp}" == "${LOCAL_NODE_IP}" ]]; then 52 | apt-get update && apt-get -y upgrade 53 | __ok__ "Local node update completed." 54 | else 55 | if ssh "root@${nodeIp}" "apt-get update && apt-get -y upgrade"; then 56 | __ok__ "Remote node \"${nodeIp}\" update completed." 57 | else 58 | __err__ "Failed to update node \"${nodeIp}\"." 59 | fi 60 | fi 61 | done 62 | 63 | echo "All servers have been successfully updated." 64 | 65 | ############################################################################### 66 | # Testing status 67 | ############################################################################### 68 | # Tested single-node 69 | # Tested multi-node 70 | -------------------------------------------------------------------------------- /Host/FixDPKGLock.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # FixDpkgLock.sh 4 | # 5 | # This script removes stale dpkg lock files and repairs interrupted dpkg operations 6 | # on a Proxmox node. It then updates the apt cache. 7 | # 8 | # Usage: 9 | # ./FixDpkgLock.sh 10 | # 11 | # Example: 12 | # # To fix dpkg locks on the local Proxmox node 13 | # ./FixDpkgLock.sh 14 | # 15 | source "${UTILITYPATH}/Prompts.sh" 16 | 17 | __check_root__ # Ensure script is run as root 18 | __check_proxmox__ # Ensure we're on a Proxmox node 19 | 20 | ############################################################################### 21 | # Remove stale locks 22 | ############################################################################### 23 | rm -f "/var/lib/dpkg/lock-frontend" 24 | rm -f "/var/lib/dpkg/lock" 25 | rm -f "/var/lib/apt/lists/lock" 26 | rm -f "/var/cache/apt/archives/lock" 27 | rm -f "/var/lib/dpkg/lock"* 28 | 29 | ############################################################################### 30 | # Reconfigure dpkg 31 | ############################################################################### 32 | if ! dpkg --configure -a; then 33 | echo "Error: Failed to configure dpkg." >&2 34 | exit 1 35 | fi 36 | 37 | ############################################################################### 38 | # Update apt cache 39 | ############################################################################### 40 | if ! apt-get update; then 41 | echo "Error: Failed to update apt cache." >&2 42 | exit 1 43 | fi 44 | 45 | echo "dpkg locks removed and apt cache updated successfully." 46 | -------------------------------------------------------------------------------- /Host/Hardware/OnlineMemoryTest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # OnlineMemoryTest.sh 4 | # 5 | # A script to perform an in-memory RAM test on a running Proxmox server without fully shutting down. 6 | # Uses the 'memtester' utility to allocate and test a portion of system memory in gigabytes. 7 | # 8 | # Usage: 9 | # ./OnlineMemoryTest.sh 10 | # 11 | # Examples: 12 | # ./OnlineMemoryTest.sh 1 13 | # This command tests 1GB (1024MB) of RAM in a running system. 14 | # 15 | # ./OnlineMemoryTest.sh 2 16 | # This command tests 2GB (2048MB) of RAM in a running system. 17 | # 18 | # Note: 19 | # - Running this script may temporarily reduce available memory for other processes. 20 | # - For best results, stop or pause non-critical workloads before testing. 21 | # - This script MUST be run as root and on a Proxmox host. 22 | # 23 | 24 | source "${UTILITYPATH}/Prompts.sh" 25 | 26 | ############################################################################### 27 | # Preliminary Checks 28 | ############################################################################### 29 | __check_root__ 30 | __check_proxmox__ 31 | 32 | if [[ $# -lt 1 ]]; then 33 | echo "Error: Missing argument." 34 | echo "Usage: $0 " 35 | exit 1 36 | fi 37 | 38 | TEST_SIZE_GB="$1" 39 | re='^[0-9]+$' 40 | if ! [[ "$TEST_SIZE_GB" =~ $re ]]; then 41 | echo "Error: must be a positive integer." 42 | exit 2 43 | fi 44 | 45 | TEST_SIZE_MB=$(( TEST_SIZE_GB * 1024 )) 46 | 47 | ############################################################################### 48 | # Check for and Possibly Install 'memtester' 49 | ############################################################################### 50 | __install_or_prompt__ "memtester" 51 | 52 | ############################################################################### 53 | # Main Script Logic 54 | ############################################################################### 55 | echo "Starting in-memory test for \"${TEST_SIZE_MB}MB\" (\"${TEST_SIZE_GB}GB\")..." 56 | memtester "${TEST_SIZE_MB}M" 1 57 | echo "Memory test completed. Check output above for any errors or failures." 58 | 59 | ############################################################################### 60 | # Prompt to Keep or Remove Installed Packages 61 | ############################################################################### 62 | __prompt_keep_installed_packages__ 63 | 64 | exit 0 65 | -------------------------------------------------------------------------------- /Host/Hardware/OptimizeNestedVirtualization.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # OptimizeNestedVirtualization.sh 4 | # 5 | # A script to enable nested virtualization on a Proxmox node. 6 | # This script detects whether you have an Intel or AMD CPU and adjusts 7 | # kernel module parameters to enable nested virtualization. It then reloads 8 | # the necessary modules and verifies that nested virtualization is enabled. 9 | # 10 | # Usage: 11 | # ./OptimizeNestedVirtualization.sh 12 | # 13 | # Examples: 14 | # ./OptimizeNestedVirtualization.sh 15 | # - Enables nested virtualization for the CPU vendor detected on this Proxmox node. 16 | # 17 | # Note: 18 | # After running this script, you may need to set the CPU type to "host" for any 19 | # VM that you want to run nested hypervisors inside of. For example: 20 | # qm set --cpu host 21 | # A reboot of the Proxmox host might be required in some cases. 22 | # 23 | 24 | source "${UTILITYPATH}/Prompts.sh" 25 | 26 | ############################################################################### 27 | # Preliminary Checks 28 | ############################################################################### 29 | __check_root__ 30 | __check_proxmox__ 31 | __install_or_prompt__ "lscpu" 32 | 33 | CPU_VENDOR="$(lscpu | awk -F: '/Vendor ID:/ {gsub(/^[ \t]+|[ \t]+$/, "", $2); print $2}')" 34 | if [[ -z "${CPU_VENDOR}" ]]; then 35 | echo "Error: Unable to detect CPU vendor." 36 | exit 3 37 | fi 38 | 39 | echo "Detected CPU vendor: \"${CPU_VENDOR}\"" 40 | 41 | ############################################################################### 42 | # Main Script Logic 43 | ############################################################################### 44 | if [[ "${CPU_VENDOR}" =~ [Ii]ntel ]]; then 45 | echo "Enabling nested virtualization for Intel CPU..." 46 | echo "options kvm-intel nested=Y" > /etc/modprobe.d/kvm-intel.conf 47 | if lsmod | grep -q kvm_intel; then 48 | echo "Reloading kvm_intel module..." 49 | rmmod kvm_intel 50 | fi 51 | modprobe kvm_intel 52 | 53 | NESTED_STATUS="$(cat /sys/module/kvm_intel/parameters/nested 2>/dev/null)" 54 | if [[ "${NESTED_STATUS}" == "Y" || "${NESTED_STATUS}" == "1" ]]; then 55 | echo "Nested virtualization enabled successfully for Intel CPU." 56 | else 57 | echo "Warning: Unable to confirm nested virtualization is enabled (check manually)." 58 | fi 59 | 60 | elif [[ "${CPU_VENDOR}" =~ [Aa][Mm][Dd] ]]; then 61 | echo "Enabling nested virtualization for AMD CPU..." 62 | echo "options kvm-amd nested=1" > /etc/modprobe.d/kvm-amd.conf 63 | if lsmod | grep -q kvm_amd; then 64 | echo "Reloading kvm_amd module..." 65 | rmmod kvm_amd 66 | fi 67 | modprobe kvm_amd 68 | 69 | NESTED_STATUS="$(cat /sys/module/kvm_amd/parameters/nested 2>/dev/null)" 70 | if [[ "${NESTED_STATUS}" == "1" || "${NESTED_STATUS}" == "Y" ]]; then 71 | echo "Nested virtualization enabled successfully for AMD CPU." 72 | else 73 | echo "Warning: Unable to confirm nested virtualization is enabled (check manually)." 74 | fi 75 | 76 | else 77 | echo "Warning: Unknown CPU vendor detected. Attempting Intel approach by default..." 78 | echo "options kvm-intel nested=Y" > /etc/modprobe.d/kvm-intel.conf 79 | if lsmod | grep -q kvm_intel; then 80 | rmmod kvm_intel 81 | fi 82 | modprobe kvm_intel 83 | fi 84 | 85 | ############################################################################### 86 | # Post-Script Instructions 87 | ############################################################################### 88 | echo "Done. If nested virtualization is still not working, please reboot the node." 89 | echo "Also, ensure your VMs' CPU type is set to 'host' for nested guests." 90 | 91 | __prompt_keep_installed_packages__ 92 | -------------------------------------------------------------------------------- /Host/SeparateNode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # SeparateNode.sh 4 | # 5 | # This script forcibly removes the current node from a Proxmox cluster. 6 | # 7 | # Usage: 8 | # ./SeparateNode.sh 9 | # 10 | # Note: 11 | # After removing the node from the cluster, it will still have access 12 | # to any shared storage. Ensure you set up separate storage for this 13 | # node and move all data/VMs before detaching from the cluster, as 14 | # shared storage cannot be safely used across cluster boundaries. 15 | # 16 | 17 | ############################################################################### 18 | # Pre-flight Checks 19 | ############################################################################### 20 | source "${UTILITYPATH}/Prompts.sh" 21 | __check_root__ 22 | __check_proxmox__ 23 | 24 | ############################################################################### 25 | # Confirmation Prompt 26 | ############################################################################### 27 | echo "WARNING: This action will forcibly remove the node from the cluster." 28 | read -r -p "Are you sure you want to proceed? [y/N]: " userResponse 29 | case "$userResponse" in 30 | [yY]|[yY][eE][sS]) 31 | echo "Proceeding with node removal..." 32 | ;; 33 | *) 34 | echo "Aborting node removal." 35 | exit 0 36 | ;; 37 | esac 38 | 39 | ############################################################################### 40 | # Stop Cluster Services 41 | ############################################################################### 42 | echo "Stopping cluster services..." 43 | systemctl stop pve-cluster 44 | systemctl stop corosync 45 | 46 | ############################################################################### 47 | # Unmount the pmxcfs filesystem (if still running) and remove Corosync config 48 | ############################################################################### 49 | echo "Unmounting pmxcfs (if active) and removing Corosync configuration..." 50 | pmxcfs -l 51 | rm /etc/pve/corosync.conf 52 | rm -r /etc/corosync/* 53 | 54 | ############################################################################### 55 | # Kill pmxcfs process if still active 56 | ############################################################################### 57 | echo "Killing pmxcfs if still active..." 58 | killall pmxcfs 59 | 60 | ############################################################################### 61 | # Restart pve-cluster and set cluster expectation to single node 62 | ############################################################################### 63 | echo "Restarting pve-cluster and setting expected cluster size to 1..." 64 | systemctl start pve-cluster 65 | pvecm expected 1 66 | 67 | ############################################################################### 68 | # Remove Corosync data 69 | ############################################################################### 70 | echo "Removing Corosync data..." 71 | rm /var/lib/corosync/* 72 | 73 | echo "Node has been forcibly removed from the cluster." 74 | echo "Make sure no shared storage is still in use by multiple clusters." 75 | 76 | ############################################################################### 77 | # Testing status 78 | ############################################################################### 79 | # Tested single-node 80 | -------------------------------------------------------------------------------- /LXC/Hardware/BulkSetCPU.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkSetCPU.sh 4 | # 5 | # This script sets the CPU type and core count for a range of LXC containers. 6 | # 7 | # Usage: 8 | # ./BulkSetCPU.sh [sockets] 9 | # 10 | # Example: 11 | # # Sets containers 400..402 to CPU type=host and 4 cores 12 | # ./BulkSetCPU.sh 400 402 host 4 13 | # 14 | # # Sets containers 400..402 to CPU type=host, 4 cores, 2 sockets 15 | # ./BulkSetCPU.sh 400 402 host 4 2 16 | # 17 | # Notes: 18 | # - Must be run as root on a Proxmox node. 19 | # - 'pct' is required (part of the PVE/LXC utilities). 20 | # 21 | 22 | source "${UTILITYPATH}/Prompts.sh" 23 | source "${UTILITYPATH}/Queries.sh" 24 | 25 | ############################################################################### 26 | # MAIN 27 | ############################################################################### 28 | # --- Parse arguments ------------------------------------------------------- 29 | if [[ $# -lt 4 ]]; then 30 | echo "Usage: $0 [sockets]" 31 | echo "Example:" 32 | echo " $0 400 402 host 4" 33 | echo " (Sets containers 400..402 to CPU type=host, 4 cores)" 34 | echo " $0 400 402 host 4 2" 35 | echo " (Sets containers 400..402 to CPU type=host, 4 cores, 2 sockets)" 36 | exit 1 37 | fi 38 | 39 | START_CT_ID="$1" 40 | END_CT_ID="$2" 41 | CPU_TYPE="$3" 42 | CORE_COUNT="$4" 43 | SOCKETS="${5:-1}" # Default to 1 socket if not provided 44 | 45 | # --- Basic checks ---------------------------------------------------------- 46 | __check_root__ 47 | __check_proxmox__ 48 | __check_cluster_membership__ 49 | 50 | # --- Display summary ------------------------------------------------------- 51 | echo "=== Starting CPU config update for containers from $START_CT_ID to $END_CT_ID ===" 52 | echo " - CPU Type: \"$CPU_TYPE\"" 53 | echo " - Core Count: \"$CORE_COUNT\"" 54 | echo " - Sockets: \"$SOCKETS\"" 55 | 56 | # --- Main Loop ------------------------------------------------------------- 57 | for (( ctId=START_CT_ID; ctId<=END_CT_ID; ctId++ )); do 58 | if pct config "$ctId" &>/dev/null; then 59 | echo "Updating CPU for container \"$ctId\"..." 60 | if pct set "$ctId" -cpu "$CPU_TYPE" -cores "$CORE_COUNT" -sockets "$SOCKETS"; then 61 | echo " - Successfully updated CPU settings for CT \"$ctId\"." 62 | else 63 | echo " - Failed to update CPU settings for CT \"$ctId\"." 64 | fi 65 | else 66 | echo " - Container \"$ctId\" does not exist. Skipping." 67 | fi 68 | done 69 | 70 | echo "=== Bulk CPU config change process complete! ===" 71 | -------------------------------------------------------------------------------- /LXC/Hardware/BulkSetMemory.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkSetMemory.sh 4 | # 5 | # This script sets the memory (RAM) and optional swap allocation for a range of LXC containers. 6 | # 7 | # Usage: 8 | # ./BulkSetMemory.sh [swap_MB] 9 | # 10 | # Examples: 11 | # # Sets containers 400..402 to 2048 MB of RAM, no swap 12 | # ./BulkSetMemory.sh 400 402 2048 13 | # 14 | # # Sets containers 400..402 to 2048 MB of RAM and 1024 MB of swap 15 | # ./BulkSetMemory.sh 400 402 2048 1024 16 | # 17 | # Notes: 18 | # - Must be run as root on a Proxmox node. 19 | # - 'pct' is included by default on Proxmox 8. 20 | # 21 | 22 | source "${UTILITYPATH}/Prompts.sh" 23 | 24 | ############################################################################### 25 | # MAIN 26 | ############################################################################### 27 | 28 | # Check argument count 29 | if [[ $# -lt 3 ]]; then 30 | echo "Usage: $0 [swap_MB]" 31 | echo "Examples:" 32 | echo " $0 400 402 2048" 33 | echo " (Sets containers 400..402 to 2048 MB of RAM, no swap)" 34 | echo " $0 400 402 2048 1024" 35 | echo " (Sets containers 400..402 to 2048 MB of RAM and 1024 MB of swap)" 36 | exit 1 37 | fi 38 | 39 | START_CT_ID="$1" 40 | END_CT_ID="$2" 41 | MEMORY_MB="$3" 42 | SWAP_MB="${4:-0}" 43 | 44 | __check_root__ 45 | __check_proxmox__ 46 | 47 | echo "=== Starting memory config update for containers from \"$START_CT_ID\" to \"$END_CT_ID\" ===" 48 | echo " - Memory (MB): \"$MEMORY_MB\"" 49 | echo " - Swap (MB): \"$SWAP_MB\"" 50 | 51 | for (( currentCtId="$START_CT_ID"; currentCtId<="$END_CT_ID"; currentCtId++ )); do 52 | if pct config "$currentCtId" &>/dev/null; then 53 | echo "Updating memory for container \"$currentCtId\"..." 54 | pct set "$currentCtId" -memory "$MEMORY_MB" -swap "$SWAP_MB" 55 | if [[ $? -eq 0 ]]; then 56 | echo " - Successfully updated memory for CT \"$currentCtId\"." 57 | else 58 | echo " - Failed to update memory for CT \"$currentCtId\"." 59 | fi 60 | else 61 | echo " - Container \"$currentCtId\" does not exist. Skipping." 62 | fi 63 | done 64 | 65 | echo "=== Bulk memory config change process complete! ===" 66 | -------------------------------------------------------------------------------- /LXC/Networking/BulkAddSSHKey.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkAddSSHKey.sh 4 | # 5 | # This script appends an SSH public key to the root user's authorized_keys 6 | # for a specified range of LXC containers (no existing keys are removed). 7 | # 8 | # Usage: 9 | # ./BulkAddSSHKey.sh "" 10 | # 11 | # Example: 12 | # ./BulkAddSSHKey.sh 400 402 "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ..." 13 | # 14 | # Notes: 15 | # - Containers must be running for 'pct exec' to succeed. 16 | # - If you want to add keys for another user, replace '/root/.ssh' with that user’s home directory. 17 | # - This script must be run as root on a Proxmox node. 18 | # 19 | 20 | source "${UTILITYPATH}/Prompts.sh" 21 | 22 | ############################################################################### 23 | # MAIN 24 | ############################################################################### 25 | 26 | # Parse arguments 27 | if [[ $# -ne 3 ]]; then 28 | echo "Usage: $0 \"\"" 29 | echo "Example:" 30 | echo " $0 400 402 \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQ...\"" 31 | exit 1 32 | fi 33 | 34 | startCtId="$1" 35 | endCtId="$2" 36 | sshKey="$3" 37 | 38 | # Basic checks 39 | __check_root__ 40 | __check_proxmox__ 41 | 42 | echo "=== Starting SSH key addition for containers from \"$startCtId\" to \"$endCtId\" ===" 43 | echo " - SSH key to append: \"$sshKey\"" 44 | 45 | # Main loop 46 | for (( ctId=startCtId; ctId<=endCtId; ctId++ )); do 47 | if pct config "$ctId" &>/dev/null; then 48 | echo "Adding SSH key to container \"$ctId\"..." 49 | pct exec "$ctId" -- bash -c " 50 | mkdir -p /root/.ssh && 51 | chmod 700 /root/.ssh && 52 | echo \"$sshKey\" >> /root/.ssh/authorized_keys && 53 | chmod 600 /root/.ssh/authorized_keys 54 | " 55 | if [[ $? -eq 0 ]]; then 56 | echo " - Successfully appended SSH key for CT \"$ctId\"." 57 | else 58 | echo " - Failed to append SSH key for CT \"$ctId\" (container stopped or other error?)." 59 | fi 60 | else 61 | echo " - Container \"$ctId\" does not exist. Skipping." 62 | fi 63 | done 64 | 65 | echo "=== Bulk SSH key addition process complete! ===" 66 | -------------------------------------------------------------------------------- /LXC/Networking/BulkChangeDNS.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeDNS.sh 4 | # 5 | # This script updates DNS nameservers for a series of LXC containers, from a specified 6 | # start ID to a specified end ID (inclusive). 7 | # 8 | # Usage: 9 | # ./BulkChangeDNS.sh [ ...] 10 | # 11 | # Example: 12 | # ./BulkChangeDNS.sh 400 402 8.8.8.8 1.1.1.1 13 | # This updates containers 400, 401, and 402 to use DNS servers 8.8.8.8 and 1.1.1.1 14 | # 15 | # Note: 16 | # - You can pass more than two DNS servers if desired. They get appended. 17 | # - If you want to specify a single DNS server, omit the rest. 18 | # - Must be run as root on a Proxmox node. 19 | # 20 | 21 | source "${UTILITYPATH}/Prompts.sh" 22 | source "${UTILITYPATH}/Queries.sh" 23 | 24 | ############################################################################### 25 | # MAIN 26 | ############################################################################### 27 | 28 | if [[ $# -lt 3 ]]; then 29 | echo "Usage: $0 [ ...]" 30 | exit 1 31 | fi 32 | 33 | START_CT_ID="$1" 34 | END_CT_ID="$2" 35 | shift 2 36 | DNS_SERVERS="$*" 37 | 38 | echo "DNS servers to set: \"$DNS_SERVERS\"" 39 | echo "=== Starting DNS update for containers in range ${START_CT_ID}..${END_CT_ID} ===" 40 | 41 | __check_root__ 42 | __check_proxmox__ 43 | # If a cluster check is required, uncomment: 44 | # __check_cluster_membership__ 45 | 46 | for (( CT_ID=START_CT_ID; CT_ID<=END_CT_ID; CT_ID++ )); do 47 | if pct config "$CT_ID" &>/dev/null; then 48 | echo "Updating DNS for container $CT_ID to: \"$DNS_SERVERS\"" 49 | if pct set "$CT_ID" -nameserver "$DNS_SERVERS"; then 50 | echo " - Successfully updated DNS for CT $CT_ID." 51 | else 52 | echo " - Failed to update DNS for CT $CT_ID." 53 | fi 54 | else 55 | echo " - Container $CT_ID does not exist. Skipping." 56 | fi 57 | done 58 | 59 | echo "=== Bulk DNS change process complete! ===" 60 | -------------------------------------------------------------------------------- /LXC/Networking/BulkChangeIP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeIP.sh 4 | # 5 | # This script automates changing the IP addresses of a range of existing LXC containers on Proxmox VE. 6 | # Instead of specifying how many containers to update, you provide a start and end container ID. 7 | # It then assigns sequential IP addresses based on a starting IP/CIDR. An optional gateway can also be set. 8 | # 9 | # Usage: 10 | # ./BulkChangeIP.sh [gateway] 11 | # 12 | # Example: 13 | # # Updates containers 400..404 with IPs 192.168.1.50..54/24 on vmbr0, gateway 192.168.1.1 14 | # ./BulkChangeIP.sh 400 404 192.168.1.50/24 vmbr0 192.168.1.1 15 | # 16 | # # Same as above, but does not set a gateway. 17 | # ./BulkChangeIP.sh 400 404 192.168.1.50/24 vmbr0 18 | # 19 | # Notes: 20 | # - Must be run as root on a Proxmox node. 21 | # - 'pct' is part of the standard Proxmox LXC utilities. 22 | # - IP increment logic uses the __ip_to_int__ and __int_to_ip__ functions from the sourced Utilities. 23 | # 24 | 25 | source "${UTILITYPATH}/Conversion.sh" 26 | source "${UTILITYPATH}/Prompts.sh" 27 | 28 | ############################################################################### 29 | # MAIN 30 | ############################################################################### 31 | 32 | # Parse and validate arguments 33 | if [[ $# -lt 4 ]]; then 34 | echo "Usage: $0 [gateway]" 35 | echo "Example:" 36 | echo " $0 400 404 192.168.1.50/24 vmbr0 192.168.1.1" 37 | exit 1 38 | fi 39 | 40 | START_CT_ID="$1" 41 | END_CT_ID="$2" 42 | START_IP_CIDR="$3" 43 | BRIDGE="$4" 44 | GATEWAY="${5:-}" 45 | 46 | # Ensure we are root and on a Proxmox node 47 | __check_root__ 48 | __check_proxmox__ 49 | 50 | # Split IP and subnet 51 | IFS='/' read -r START_IP SUBNET_MASK <<< "$START_IP_CIDR" 52 | if [[ -z "$START_IP" || -z "$SUBNET_MASK" ]]; then 53 | echo "Error: Unable to parse start_ip/cidr: \"$START_IP_CIDR\". Format must be X.X.X.X/XX." 54 | exit 1 55 | fi 56 | 57 | # Convert start IP to integer 58 | START_IP_INT="$(__ip_to_int__ "$START_IP")" 59 | 60 | # Summary 61 | echo "=== Starting IP update for containers from \"$START_CT_ID\" to \"$END_CT_ID\" ===" 62 | echo " - Starting IP: \"$START_IP/$SUBNET_MASK\"" 63 | if [[ -n "$GATEWAY" ]]; then 64 | echo " - Gateway: \"$GATEWAY\"" 65 | else 66 | echo " - No gateway specified" 67 | fi 68 | 69 | # Update IPs for each container in the specified range 70 | for (( ctId=START_CT_ID; ctId<=END_CT_ID; ctId++ )); do 71 | offset=$(( ctId - START_CT_ID )) 72 | currentIpInt=$(( START_IP_INT + offset )) 73 | newIp="$(__int_to_ip__ "$currentIpInt")" 74 | 75 | if pct config "$ctId" &>/dev/null; then 76 | echo "Updating IP for container \"$ctId\" to \"$newIp/$SUBNET_MASK\" on \"$BRIDGE\"..." 77 | if [[ -z "$GATEWAY" ]]; then 78 | pct set "$ctId" -net0 name=eth0,bridge="$BRIDGE",ip="$newIp/$SUBNET_MASK" 79 | else 80 | pct set "$ctId" -net0 name=eth0,bridge="$BRIDGE",ip="$newIp/$SUBNET_MASK",gw="$GATEWAY" 81 | fi 82 | 83 | if [[ $? -eq 0 ]]; then 84 | echo " - Successfully updated container \"$ctId\"." 85 | else 86 | echo " - Failed to update container \"$ctId\"." 87 | fi 88 | else 89 | echo " - Container \"$ctId\" does not exist. Skipping." 90 | fi 91 | done 92 | 93 | echo "=== Bulk IP change process complete! ===" 94 | echo "If containers are running, consider restarting them or reapplying networking." 95 | -------------------------------------------------------------------------------- /LXC/Networking/BulkChangeNetwork.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeNetwork.sh 4 | # 5 | # This script changes the network interface for a range of LXC containers in Proxmox. 6 | # Typically, this means changing the bridge (e.g., vmbr0 -> vmbr1) and/or the interface name (eth0 -> eth1). 7 | # 8 | # Usage: 9 | # ./BulkChangeNetwork.sh [interface_name] 10 | # 11 | # Example usage: 12 | # # This changes containers 400..402 to use net0 => name=eth1,bridge=vmbr1 13 | # ./BulkChangeNetwork.sh 400 402 vmbr1 eth1 14 | # 15 | # # This changes containers 400..402 to use net0 => name=eth0,bridge=vmbr1 (default eth0) 16 | # ./BulkChangeNetwork.sh 400 402 vmbr1 17 | # 18 | # Further explanation: 19 | # The script takes a starting container ID, an ending container ID, the new bridge name, 20 | # and optionally a new interface name (defaults to eth0). It loops over the specified range 21 | # and sets 'net0' with the new configuration if the container exists. 22 | # 23 | 24 | source "${UTILITYPATH}/Prompts.sh" 25 | 26 | ############################################################################### 27 | # Ensure script is run as root and on a Proxmox node 28 | ############################################################################### 29 | __check_root__ 30 | __check_proxmox__ 31 | 32 | ############################################################################### 33 | # Argument Parsing 34 | ############################################################################### 35 | if [ $# -lt 3 ]; then 36 | echo "Error: Missing arguments." 37 | echo "Usage: $0 [interface_name]" 38 | exit 1 39 | fi 40 | 41 | START_CT_ID="$1" 42 | END_CT_ID="$2" 43 | BRIDGE="$3" 44 | IF_NAME="${4:-eth0}" 45 | 46 | if [[ "${END_CT_ID}" -lt "${START_CT_ID}" ]]; then 47 | echo "Error: end_ct_id must be greater than or equal to start_ct_id." 48 | exit 1 49 | fi 50 | 51 | ############################################################################### 52 | # Main Logic 53 | ############################################################################### 54 | echo "=== Starting network interface update ===" 55 | echo " - Container range: \"${START_CT_ID}\" to \"${END_CT_ID}\"" 56 | echo " - New bridge: \"${BRIDGE}\"" 57 | echo " - Interface name: \"${IF_NAME}\"" 58 | 59 | for (( ctId="${START_CT_ID}"; ctId<="${END_CT_ID}"; ctId++ )); do 60 | if pct config "${ctId}" &>/dev/null; then 61 | echo "Updating network interface for container \"${ctId}\"..." 62 | pct set "${ctId}" -net0 "name=${IF_NAME},bridge=${BRIDGE}" 63 | if [ $? -eq 0 ]; then 64 | echo " - Successfully updated CT \"${ctId}\"." 65 | else 66 | echo " - Failed to update CT \"${ctId}\"." 67 | fi 68 | else 69 | echo " - Container \"${ctId}\" does not exist. Skipping." 70 | fi 71 | done 72 | 73 | echo "=== Bulk interface change process complete! ===" 74 | -------------------------------------------------------------------------------- /LXC/Networking/BulkChangeUserPass.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeUserPass.sh 4 | # 5 | # This script changes a specified user’s password in a range of LXC containers. 6 | # It uses 'pct exec' to run 'chpasswd' inside each container. 7 | # 8 | # Usage: 9 | # ./BulkChangeUserPass.sh 10 | # 11 | # Example: 12 | # # Updates the root password on CTs 400..402 to 'MyNewPass123'. 13 | # ./BulkChangeUserPass.sh 400 402 root MyNewPass123 14 | # 15 | # Note: 16 | # - The container(s) must be running for 'pct exec' to succeed. 17 | # - Adjust logic if you want to handle containers that are stopped. 18 | # 19 | 20 | source "${UTILITYPATH}/Prompts.sh" 21 | 22 | ############################################################################### 23 | # Initial Checks 24 | ############################################################################### 25 | __check_root__ 26 | __check_proxmox__ 27 | 28 | ############################################################################### 29 | # Argument Parsing 30 | ############################################################################### 31 | if [ "$#" -ne 4 ]; then 32 | echo "Usage: $0 " 33 | exit 1 34 | fi 35 | 36 | START_CT_ID="$1" 37 | END_CT_ID="$2" 38 | USERNAME="$3" 39 | NEW_PASSWORD="$4" 40 | 41 | ############################################################################### 42 | # Main Logic 43 | ############################################################################### 44 | echo "=== Starting password update for containers from \"$START_CT_ID\" through \"$END_CT_ID\" ===" 45 | echo "Target user: \"$USERNAME\"" 46 | 47 | for (( ctId=START_CT_ID; ctId<=END_CT_ID; ctId++ )); do 48 | if pct config "$ctId" &>/dev/null; then 49 | echo "Changing password for container \"$ctId\"..." 50 | pct exec "$ctId" -- bash -c "echo \"$USERNAME:$NEW_PASSWORD\" | chpasswd" 51 | if [ "$?" -eq 0 ]; then 52 | echo " - Successfully changed password on CT \"$ctId\"." 53 | else 54 | echo " - Failed to change password on CT \"$ctId\" (container stopped or other error?)." 55 | fi 56 | else 57 | echo " - Container \"$ctId\" does not exist. Skipping." 58 | fi 59 | done 60 | 61 | echo "=== Bulk password change process complete! ===" 62 | -------------------------------------------------------------------------------- /LXC/Operations/BulkClone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkClone.sh 4 | # 5 | # This script automates the process of cloning LXC containers within a Proxmox VE environment. 6 | # It clones a source LXC container into a specified number of new containers, assigning them 7 | # unique IDs, names based on a user-provided base name, and sets static IP addresses. 8 | # Adding cloned containers to a designated pool is optional. 9 | # 10 | # Usage: 11 | # ./BulkClone.sh [gateway] [pool_name] 12 | # 13 | # Arguments: 14 | # source_ct_id - The ID of the LXC container that will be cloned. 15 | # base_ct_name - The base name for the new containers, which will be appended with a numerical index. 16 | # start_ct_id - The starting container ID for the first clone. 17 | # num_cts - The number of containers to clone. 18 | # start_ip/cidr - The new IP address and subnet mask of the container (e.g., 192.168.1.50/24). 19 | # bridge - The bridge to be used for the network configuration. 20 | # gateway - Optional. The gateway for the IP configuration (e.g., 192.168.1.1). 21 | # pool_name - Optional. The name of the pool to which the new containers will be added. 22 | # If not provided, containers are not added to any pool. 23 | # 24 | # Examples: 25 | # # Clones container 110, creating 30 new containers with IPs starting at 192.168.1.50/24 on vmbr0, 26 | # # gateway 192.168.1.1, and assigns them to a pool named 'PoolName'. 27 | # ./BulkClone.sh 110 Ubuntu-2C-20GB 400 30 192.168.1.50/24 vmbr0 192.168.1.1 PoolName 28 | # 29 | # # Same as above but without specifying a gateway or pool. 30 | # ./BulkClone.sh 110 Ubuntu-2C-20GB 400 30 192.168.1.50/24 vmbr0 31 | # 32 | 33 | source "${UTILITYPATH}/Conversion.sh" 34 | source "${UTILITYPATH}/Prompts.sh" 35 | 36 | ############################################################################### 37 | # Environment Checks 38 | ############################################################################### 39 | __check_root__ 40 | __check_proxmox__ 41 | 42 | ############################################################################### 43 | # Argument Parsing 44 | ############################################################################### 45 | if [ "$#" -lt 6 ]; then 46 | echo "Error: Not enough arguments." 47 | echo "Usage: $0 [gateway] [pool_name]" 48 | exit 1 49 | fi 50 | 51 | SOURCE_CT_ID="$1" 52 | BASE_CT_NAME="$2" 53 | START_CT_ID="$3" 54 | NUM_CTS="$4" 55 | START_IP_CIDR="$5" 56 | BRIDGE="$6" 57 | GATEWAY="${7:-}" 58 | POOL_NAME="${8:-}" 59 | 60 | ############################################################################### 61 | # Main 62 | ############################################################################### 63 | IFS='/' read -r startIp subnetMask <<< "${START_IP_CIDR}" 64 | startIpInt="$( __ip_to_int__ "${startIp}" )" 65 | 66 | for (( i=0; i/dev/null 46 | 47 | echo "Destroying CT \"$ctId\" ..." 48 | pct destroy "$ctId" &>/dev/null 49 | 50 | if [ $? -eq 0 ]; then 51 | echo " - Successfully deleted CT \"$ctId\"" 52 | else 53 | echo " - Failed to delete CT \"$ctId\"" 54 | fi 55 | done 56 | 57 | echo "=== All LXC containers on this node have been deleted. ===" 58 | -------------------------------------------------------------------------------- /LXC/Operations/BulkDeleteRange.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkDeleteRange.sh 4 | # 5 | # This script deletes a range of LXC containers by ID, stopping them first if needed, 6 | # then destroying them. 7 | # 8 | # Usage: 9 | # ./BulkDeleteRange.sh 10 | # 11 | # Example: 12 | # ./BulkDeleteRange.sh 200 204 13 | # This will delete CTs 200, 201, 202, 203, 204 14 | # 15 | 16 | source "${UTILITYPATH}/Prompts.sh" 17 | 18 | ############################################################################### 19 | # Environment Checks 20 | ############################################################################### 21 | __check_root__ 22 | __check_proxmox__ 23 | 24 | ############################################################################### 25 | # Input Validation 26 | ############################################################################### 27 | if [ "$#" -ne 2 ]; then 28 | echo "Usage: ./BulkDeleteRange.sh " 29 | exit 1 30 | fi 31 | 32 | START_ID="$1" 33 | END_ID="$2" 34 | 35 | if [ "$START_ID" -gt "$END_ID" ]; then 36 | echo "Error: START_ID (\"$START_ID\") is greater than END_ID (\"$END_ID\")." 37 | exit 1 38 | fi 39 | 40 | ############################################################################### 41 | # Main 42 | ############################################################################### 43 | echo "=== Deleting containers from ID \"$START_ID\" to \"$END_ID\" ===" 44 | for (( currentId=START_ID; currentId<=END_ID; currentId++ )); do 45 | if pct config "$currentId" &>/dev/null; then 46 | echo "Stopping CT \"$currentId\"..." 47 | pct stop "$currentId" &>/dev/null 48 | 49 | echo "Destroying CT \"$currentId\"..." 50 | pct destroy "$currentId" &>/dev/null 51 | 52 | if [ $? -eq 0 ]; then 53 | echo " - Successfully deleted CT \"$currentId\"" 54 | else 55 | echo " - Failed to delete CT \"$currentId\"" 56 | fi 57 | else 58 | echo " - CT \"$currentId\" does not exist, skipping." 59 | fi 60 | done 61 | 62 | echo "=== Bulk deletion complete. ===" 63 | -------------------------------------------------------------------------------- /LXC/Operations/BulkStart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkStart.sh 4 | # 5 | # This script starts multiple LXC containers in a range defined by a start ID and an end ID. 6 | # 7 | # Usage: 8 | # ./BulkStart.sh 9 | # 10 | # Example: 11 | # ./BulkStart.sh 200 202 12 | # This will start containers 200, 201, and 202 13 | # 14 | 15 | source "${UTILITYPATH}/Prompts.sh" 16 | 17 | ############################################################################### 18 | # Setup Checks 19 | ############################################################################### 20 | __check_root__ 21 | __check_proxmox__ 22 | 23 | ############################################################################### 24 | # Main 25 | ############################################################################### 26 | if [ "$#" -ne 2 ]; then 27 | echo "Error: You must specify exactly two arguments: ." 28 | echo "Usage: $0 " 29 | exit 1 30 | fi 31 | 32 | startId="$1" 33 | endId="$2" 34 | 35 | if [ "$startId" -gt "$endId" ]; then 36 | echo "Error: START_ID cannot be greater than END_ID." 37 | exit 1 38 | fi 39 | 40 | echo "=== Starting LXC containers from '${startId}' to '${endId}' ===" 41 | for (( ctId=startId; ctId<=endId; ctId++ )); do 42 | if pct config "${ctId}" &>/dev/null; then 43 | echo "Starting CT '${ctId}' ..." 44 | pct start "${ctId}" 45 | if [ "$?" -eq 0 ]; then 46 | echo " - CT '${ctId}' started." 47 | else 48 | echo " - Failed to start CT '${ctId}'." 49 | fi 50 | else 51 | echo " - CT '${ctId}' does not exist, skipping." 52 | fi 53 | done 54 | 55 | echo "=== Bulk start process complete. ===" 56 | -------------------------------------------------------------------------------- /LXC/Operations/BulkStop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkStop.sh 4 | # 5 | # This script stops multiple LXC containers using a provided start and end ID. 6 | # It iterates through the range [START_ID ... END_ID] and attempts to stop each one. 7 | # 8 | # Usage: 9 | # ./BulkStop.sh 10 | # 11 | # Example: 12 | # ./BulkStop.sh 200 202 13 | # This will stop containers 200, 201, and 202 14 | # 15 | 16 | source "${UTILITYPATH}/Prompts.sh" 17 | 18 | ############################################################################### 19 | # Initialization 20 | ############################################################################### 21 | __check_root__ 22 | __check_proxmox__ 23 | 24 | if [ "$#" -ne 2 ]; then 25 | echo "Usage: $0 " 26 | exit 1 27 | fi 28 | 29 | START_ID="$1" 30 | END_ID="$2" 31 | 32 | ############################################################################### 33 | # Main Logic 34 | ############################################################################### 35 | echo "=== Stopping LXC containers in the range [$START_ID ... $END_ID] ===" 36 | for ctId in $(seq "$START_ID" "$END_ID"); do 37 | if pct config "$ctId" &>/dev/null; then 38 | echo "Stopping CT \"$ctId\" ..." 39 | pct stop "$ctId" 40 | if [ $? -eq 0 ]; then 41 | echo " - CT \"$ctId\" stopped." 42 | else 43 | echo " - Failed to stop CT \"$ctId\"." 44 | fi 45 | else 46 | echo " - CT \"$ctId\" does not exist, skipping." 47 | fi 48 | done 49 | 50 | ############################################################################### 51 | # End 52 | ############################################################################### 53 | echo "=== Bulk stop process complete. ===" 54 | -------------------------------------------------------------------------------- /LXC/Operations/BulkUnlock.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkUnlock.sh 4 | # 5 | # This script unlocks a range of LXC containers (CT) by ID, from a start ID to an end ID. 6 | # 7 | # Usage: 8 | # ./BulkUnlock.sh 9 | # 10 | # Examples: 11 | # # Unlock containers 100 through 105 12 | # ./BulkUnlock.sh 100 105 13 | # 14 | # Notes: 15 | # - Must be run as root on a Proxmox node. 16 | # - 'pct' is required (part of the PVE/LXC utilities). 17 | # 18 | 19 | source "${UTILITYPATH}/Prompts.sh" 20 | 21 | ############################################################################### 22 | # MAIN 23 | ############################################################################### 24 | # --- Parse arguments ------------------------------------------------------- 25 | if [[ $# -lt 2 ]]; then 26 | echo "Usage: $0 " 27 | echo "Example:" 28 | echo " $0 100 105" 29 | echo " (Unlocks containers 100..105)" 30 | exit 1 31 | fi 32 | 33 | START_CT_ID="$1" 34 | END_CT_ID="$2" 35 | 36 | # --- Basic checks ---------------------------------------------------------- 37 | __check_root__ 38 | __check_proxmox__ 39 | 40 | # --- Display summary ------------------------------------------------------- 41 | echo "=== Starting unlock process for containers from \"$START_CT_ID\" to \"$END_CT_ID\" ===" 42 | 43 | # --- Main Loop ------------------------------------------------------------- 44 | for (( ctId=START_CT_ID; ctId<=END_CT_ID; ctId++ )); do 45 | if pct config "$ctId" &>/dev/null; then 46 | echo "Unlocking container \"$ctId\"..." 47 | if pct unlock "$ctId"; then 48 | echo " - Successfully unlocked CT \"$ctId\"." 49 | else 50 | echo " - Failed to unlock CT \"$ctId\"." 51 | fi 52 | else 53 | echo " - Container \"$ctId\" does not exist. Skipping." 54 | fi 55 | done 56 | 57 | echo "=== Bulk unlock process complete! ===" 58 | 59 | # --- Prompt to remove installed packages if any were installed in this session 60 | __prompt_keep_installed_packages__ 61 | -------------------------------------------------------------------------------- /LXC/Options/BulkStartAtBoot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkStartAtBoot.sh 4 | # 5 | # This script bulk-sets multiple LXC containers to start automatically at boot 6 | # within a Proxmox VE environment. It iterates over a specified range of container 7 | # IDs and enables the onboot parameter for each. This is useful for ensuring that 8 | # a group of containers starts automatically after a system reboot. 9 | # 10 | # Usage: 11 | # ./BulkStartAtBoot.sh 12 | # 13 | # Example: 14 | # ./BulkStartAtBoot.sh 400 30 15 | # This command sets containers with IDs from 400 to 429 to start at boot. 16 | # 17 | 18 | source "${UTILITYPATH}/Prompts.sh" 19 | 20 | ############################################################################### 21 | # Dependencies and environment checks 22 | ############################################################################### 23 | __check_root__ 24 | __check_proxmox__ 25 | 26 | ############################################################################### 27 | # Argument validation 28 | ############################################################################### 29 | if [ "$#" -ne 2 ]; then 30 | echo "Usage: $0 " 31 | exit 1 32 | fi 33 | 34 | START_CT_ID="$1" 35 | NUM_CTS="$2" 36 | 37 | if ! [[ "$START_CT_ID" =~ ^[0-9]+$ ]] || ! [[ "$NUM_CTS" =~ ^[0-9]+$ ]]; then 38 | echo "Error: start_ct_id and num_cts must be positive integers." 39 | exit 1 40 | fi 41 | 42 | ############################################################################### 43 | # Main logic 44 | ############################################################################### 45 | for (( i=0; i/dev/null; then 48 | echo "Setting onboot=1 for container ID '$TARGET_CT_ID'..." 49 | pct set "$TARGET_CT_ID" -onboot 1 50 | if [ $? -eq 0 ]; then 51 | echo "Successfully set onboot for container ID '$TARGET_CT_ID'." 52 | else 53 | echo "Failed to set onboot for container ID '$TARGET_CT_ID'." 54 | fi 55 | else 56 | echo "Container ID '$TARGET_CT_ID' does not exist. Skipping." 57 | fi 58 | done 59 | 60 | echo "Bulk onboot configuration completed!" 61 | -------------------------------------------------------------------------------- /LXC/Options/BulkToggleProtectionMode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkToggleProtectionMode.sh 4 | # 5 | # This script bulk enables or disables the protection mode for multiple LXC 6 | # containers within a Proxmox VE environment. Protection mode prevents 7 | # containers from being accidentally deleted or modified. This script is useful 8 | # for managing the protection status of a group of containers efficiently. 9 | # 10 | # Usage: 11 | # ./BulkToggleProtectionMode.sh 12 | # 13 | # Examples: 14 | # # The following command will enable protection for LXC containers 15 | # # with IDs from 400 to 429 (30 containers total). 16 | # ./BulkToggleProtectionMode.sh enable 400 30 17 | # 18 | # # The following command will disable protection for LXC containers 19 | # # with IDs from 200 to 209 (10 containers total). 20 | # ./BulkToggleProtectionMode.sh disable 200 10 21 | # 22 | # Function Index: 23 | # - set_protection 24 | # 25 | 26 | source "${UTILITYPATH}/Prompts.sh" 27 | 28 | ############################################################################### 29 | # Validate Environment and Permissions 30 | ############################################################################### 31 | __check_root__ 32 | __check_proxmox__ 33 | 34 | ############################################################################### 35 | # Parse and Validate Arguments 36 | ############################################################################### 37 | if [ "$#" -ne 3 ]; then 38 | echo "Usage: $0 " 39 | echo " action: enable | disable" 40 | exit 1 41 | fi 42 | 43 | ACTION="$1" 44 | START_CT_ID="$2" 45 | NUM_CTS="$3" 46 | 47 | if [[ "$ACTION" != "enable" && "$ACTION" != "disable" ]]; then 48 | echo "Error: action must be either 'enable' or 'disable'." 49 | exit 1 50 | fi 51 | 52 | if ! [[ "$START_CT_ID" =~ ^[0-9]+$ ]] || ! [[ "$NUM_CTS" =~ ^[0-9]+$ ]]; then 53 | echo "Error: start_ct_id and num_cts must be positive integers." 54 | exit 1 55 | fi 56 | 57 | ############################################################################### 58 | # Determine Desired Protection State 59 | ############################################################################### 60 | if [ "$ACTION" == "enable" ]; then 61 | PROTECTION_STATE=1 62 | else 63 | PROTECTION_STATE=0 64 | fi 65 | 66 | ############################################################################### 67 | # Define Helper Function 68 | ############################################################################### 69 | set_protection() { 70 | local containerId="$1" 71 | local protectionState="$2" 72 | pct set "${containerId}" --protected "${protectionState}" 73 | } 74 | 75 | ############################################################################### 76 | # Main Loop 77 | ############################################################################### 78 | for (( i=0; i /dev/null; then 82 | echo "Setting protection to '${ACTION}' for container ID '${CURRENT_CT_ID}'..." 83 | set_protection "${CURRENT_CT_ID}" "${PROTECTION_STATE}" 84 | if [ $? -eq 0 ]; then 85 | echo "Successfully set protection to '${ACTION}' for container ID '${CURRENT_CT_ID}'." 86 | else 87 | echo "Failed to set protection for container ID '${CURRENT_CT_ID}'." 88 | fi 89 | else 90 | echo "Container ID '${CURRENT_CT_ID}' does not exist. Skipping." 91 | fi 92 | done 93 | 94 | echo "Bulk protection configuration completed!" 95 | -------------------------------------------------------------------------------- /LXC/Storage/BulkChangeStorage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeStorageLXC.sh 4 | # 5 | # This script automates the process of updating the storage location specified in 6 | # the configuration files of LXC containers on a Proxmox server. 7 | # It is designed to bulk-update the storage paths for a range of LXC IDs 8 | # from one storage identifier to another. 9 | # 10 | # Usage: 11 | # ./BulkChangeStorageLXC.sh 12 | # 13 | # Arguments: 14 | # start_id - The starting LXC ID for the operation. 15 | # end_id - The ending LXC ID for the operation. 16 | # hostname - The hostname of the Proxmox node where the LXCs are configured. 17 | # current_storage - The current identifier of the storage used in the LXC config (e.g., 'local-lvm'). 18 | # new_storage - The new identifier of the storage to replace the current one (e.g., 'local-zfs'). 19 | # 20 | # Example: 21 | # ./BulkChangeStorageLXC.sh 100 200 pve-node1 local-lvm local-zfs 22 | # 23 | 24 | source "${UTILITYPATH}/Prompts.sh" 25 | 26 | ############################################################################### 27 | # Check environment and parse arguments 28 | ############################################################################### 29 | __check_root__ 30 | __check_proxmox__ 31 | 32 | if [ $# -lt 5 ]; then 33 | echo "Error: Missing arguments." 34 | echo "Usage: ./BulkChangeStorageLXC.sh " 35 | exit 1 36 | fi 37 | 38 | START_ID="$1" 39 | END_ID="$2" 40 | HOST_NAME="$3" 41 | CURRENT_STORAGE="$4" 42 | NEW_STORAGE="$5" 43 | 44 | ############################################################################### 45 | # Bulk update storage configuration in LXC containers 46 | ############################################################################### 47 | for CT_ID in $(seq "$START_ID" "$END_ID"); do 48 | CONFIG_FILE="/etc/pve/nodes/${HOST_NAME}/lxc/${CT_ID}.conf" 49 | if [ -f "${CONFIG_FILE}" ]; then 50 | echo "Processing LXC ID: '${CT_ID}'" 51 | if grep -q "${CURRENT_STORAGE}" "${CONFIG_FILE}"; then 52 | sed -i "s/${CURRENT_STORAGE}/${NEW_STORAGE}/g" "${CONFIG_FILE}" 53 | echo " - Storage location changed from '${CURRENT_STORAGE}' to '${NEW_STORAGE}'." 54 | else 55 | echo " - '${CURRENT_STORAGE}' not found in config. No changes made." 56 | fi 57 | else 58 | echo "LXC ID: '${CT_ID}' does not exist (no config file). Skipping..." 59 | fi 60 | done 61 | 62 | echo "Bulk storage identifier update complete." 63 | -------------------------------------------------------------------------------- /LXC/Storage/BulkMoveVolume.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkMoveVolume.sh 4 | # 5 | # This script moves the specified volume (e.g., 'rootfs', 'mp0') for each LXC 6 | # container in a given range to a new storage location using 'pct move-volume'. 7 | # 8 | # Usage: 9 | # ./BulkMoveVolume.sh 10 | # 11 | # Arguments: 12 | # start_id - The starting LXC ID. 13 | # end_id - The ending LXC ID. 14 | # source_volume - The volume identifier to move (e.g. 'rootfs', 'mp0'). 15 | # target_storage - The storage name to move the volume onto (e.g. 'local-zfs'). 16 | # 17 | # Example: 18 | # ./BulkMoveVolume.sh 100 105 rootfs local-zfs 19 | # This will move the 'rootfs' volume of LXCs 100..105 to 'local-zfs'. 20 | # 21 | source "${UTILITYPATH}/Prompts.sh" 22 | 23 | ############################################################################### 24 | # Initial Checks 25 | ############################################################################### 26 | __check_root__ 27 | __check_proxmox__ 28 | 29 | ############################################################################### 30 | # Parse Arguments 31 | ############################################################################### 32 | if [ $# -lt 4 ]; then 33 | echo "Error: Insufficient arguments." 34 | echo "Usage: $0 " 35 | exit 1 36 | fi 37 | 38 | START_ID="$1" 39 | END_ID="$2" 40 | DISK_ID="$3" 41 | TARGET_STORAGE="$4" 42 | 43 | echo "=== Bulk Move Volume for LXC Containers ===" 44 | echo "Range: \"$START_ID\" to \"$END_ID\"" 45 | echo "Volume to move: \"$DISK_ID\"" 46 | echo "Target storage: \"$TARGET_STORAGE\"" 47 | echo 48 | 49 | ############################################################################### 50 | # Main Logic 51 | ############################################################################### 52 | for ctId in $(seq "$START_ID" "$END_ID"); do 53 | if pct config "$ctId" &>/dev/null; then 54 | echo "Processing LXC \"$ctId\"..." 55 | runningState=$(pct status "$ctId" | awk '{print $2}') 56 | 57 | if [ "$runningState" == "running" ]; then 58 | echo " - Container \"$ctId\" is running. Stopping container..." 59 | pct stop "$ctId" 60 | fi 61 | 62 | echo " - Moving \"$DISK_ID\" of CT \"$ctId\" to \"$TARGET_STORAGE\"..." 63 | if pct move-volume "$ctId" "$DISK_ID" "$TARGET_STORAGE"; then 64 | echo " - Successfully moved \"$DISK_ID\" of CT \"$ctId\" to \"$TARGET_STORAGE\"." 65 | else 66 | echo "Error: Failed to move volume for CT \"$ctId\"." 67 | fi 68 | echo 69 | else 70 | echo "LXC \"$ctId\" does not exist. Skipping." 71 | fi 72 | done 73 | 74 | echo "=== Bulk volume move complete! ===" 75 | -------------------------------------------------------------------------------- /LXC/Storage/BulkResizeStorage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkResizeStorage.sh 4 | # 5 | # This script resizes a specified disk (typically rootfs) of each LXC container 6 | # in a specified range to a new size (e.g., 20G or +5G). 7 | # 8 | # Usage: 9 | # ./BulkResizeStorage.sh 10 | # 11 | # Arguments: 12 | # start_id - The starting LXC ID 13 | # end_id - The ending LXC ID 14 | # disk_id - The disk identifier (e.g., 'rootfs' or 'mp0') 15 | # new_size - The new size or size increment (e.g., '20G' or '+5G') 16 | # 17 | # Example: 18 | # # Resizes the rootfs of LXCs 100..105 to 20G each 19 | # ./BulkResizeStorage.sh 100 105 rootfs 20G 20 | # 21 | # # Increases the rootfs size of LXCs 100..105 by 5G 22 | # ./BulkResizeStorage.sh 100 105 rootfs +5G 23 | # 24 | 25 | source "${UTILITYPATH}/Prompts.sh" 26 | 27 | ############################################################################### 28 | # Environment Checks 29 | ############################################################################### 30 | __check_root__ 31 | __check_proxmox__ 32 | 33 | ############################################################################### 34 | # Usage Check 35 | ############################################################################### 36 | if [ $# -lt 4 ]; then 37 | echo "Error: Missing arguments." 38 | echo "Usage: $0 " 39 | exit 1 40 | fi 41 | 42 | ############################################################################### 43 | # Variable Initialization 44 | ############################################################################### 45 | START_ID="$1" 46 | END_ID="$2" 47 | DISK_ID="$3" 48 | NEW_SIZE="$4" 49 | 50 | echo "=== Bulk Resize for LXC Containers ===" 51 | echo "Range: \"$START_ID\" to \"$END_ID\"" 52 | echo "Disk: \"$DISK_ID\"" 53 | echo "New size: \"$NEW_SIZE\"" 54 | echo 55 | 56 | ############################################################################### 57 | # Main Logic 58 | ############################################################################### 59 | for ctId in $(seq "$START_ID" "$END_ID"); do 60 | if pct config "$ctId" &>/dev/null; then 61 | echo "Resizing \"$DISK_ID\" of LXC \"$ctId\" to \"$NEW_SIZE\"..." 62 | pct resize "$ctId" "$DISK_ID" "$NEW_SIZE" 63 | if [ $? -eq 0 ]; then 64 | echo " - Successfully resized \"$DISK_ID\" of CT \"$ctId\"." 65 | else 66 | echo " - Failed to resize disk for CT \"$ctId\"." 67 | fi 68 | echo 69 | else 70 | echo "LXC \"$ctId\" does not exist. Skipping." 71 | fi 72 | done 73 | 74 | echo "=== Bulk resize complete! ===" 75 | -------------------------------------------------------------------------------- /LXC/UpdateAll.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # UpdateAll.sh 4 | # 5 | # A script to apply package updates to all Linux containers (LXC) on every host 6 | # in a Proxmox cluster. Requires root privileges and passwordless SSH between nodes. 7 | # 8 | # Usage: 9 | # ./UpdateAll.sh 10 | # 11 | # Example: 12 | # ./UpdateAll.sh 13 | # 14 | # Description: 15 | # 1. Checks if this script is run as root (__check_root__). 16 | # 2. Verifies this node is a Proxmox node (__check_proxmox__). 17 | # 3. Installs 'ssh' if missing (__install_or_prompt__ "ssh"). 18 | # 4. Ensures the node is part of a Proxmox cluster (__check_cluster_membership__). 19 | # 5. Finds the local node IP and remote node IPs. 20 | # 6. Iterates over all nodes (local + remote), enumerates their LXC containers, 21 | # and applies package updates inside each container. 22 | # 23 | 24 | source "${UTILITYPATH}/Prompts.sh" 25 | source "${UTILITYPATH}/Queries.sh" 26 | 27 | ############################################################################### 28 | # Preliminary Checks via Utilities 29 | ############################################################################### 30 | __check_root__ 31 | __check_proxmox__ 32 | __check_cluster_membership__ 33 | 34 | ############################################################################### 35 | # Gather Node IP Addresses 36 | ############################################################################### 37 | LOCAL_NODE_IP="$(hostname -I | awk '{print $1}')" 38 | 39 | # Gather remote node IPs (excludes the local node) 40 | readarray -t REMOTE_NODE_IPS < <( __get_remote_node_ips__ ) 41 | 42 | # Combine local + remote IPs 43 | ALL_NODE_IPS=("$LOCAL_NODE_IP" "${REMOTE_NODE_IPS[@]}") 44 | 45 | ############################################################################### 46 | # Main Script Logic 47 | ############################################################################### 48 | echo "Updating LXC containers on all nodes in the cluster..." 49 | 50 | # Iterate over all node IPs 51 | for nodeIp in "${ALL_NODE_IPS[@]}"; do 52 | echo "--------------------------------------------------" 53 | echo "Processing LXC containers on node: \"${nodeIp}\"" 54 | 55 | # 'pct list' header is removed by tail -n +2 56 | containers="$(ssh "root@${nodeIp}" "pct list | tail -n +2 | awk '{print \$1}'" 2>/dev/null)" 57 | 58 | if [[ -z "$containers" ]]; then 59 | echo " No LXC containers found on \"${nodeIp}\"" 60 | continue 61 | fi 62 | 63 | # Update each container 64 | while read -r containerId; do 65 | [[ -z "$containerId" ]] && continue 66 | echo " Updating container CTID: \"${containerId}\" on node \"${nodeIp}\"..." 67 | if ssh "root@${nodeIp}" "pct exec ${containerId} -- apt-get update && apt-get upgrade -y"; then 68 | echo " Update complete for CTID: \"${containerId}\"" 69 | else 70 | echo " Update failed for CTID: \"${containerId}\"" 71 | fi 72 | done <<< "$containers" 73 | done 74 | 75 | echo "All LXC containers have been updated across the cluster." 76 | -------------------------------------------------------------------------------- /MakeScriptsExecutable.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script enables execute permissions (chmod +x) on all scripts in the current folder and its subfolders. 4 | # 5 | # Usage: 6 | # ./MakeScriptsExecutable.sh 7 | # 8 | 9 | # Find all files with a .sh extension in the current directory and subdirectories 10 | # and add execute permissions to them. 11 | find . -type f -name "*.sh" -exec chmod +x {} \; 12 | 13 | echo "All scripts in the current folder and subfolders are now executable." 14 | -------------------------------------------------------------------------------- /Networking/BulkPrintVMIDMacAddresses.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script retrieves the network configuration details for all virtual machines (VMs) across all nodes in a Proxmox cluster. 4 | # It outputs the MAC addresses associated with each VM, helping in network configuration audits or inventory management. 5 | # The script utilizes the Proxmox VE command-line tool pvesh to fetch information in JSON format and parses it using jq. 6 | # 7 | # Usage: 8 | # Simply run this script on a Proxmox cluster host that has permissions to access the Proxmox VE API: 9 | # ./BulkPrintVMIDMacAddresses.sh 10 | # 11 | 12 | # Source utility scripts (adjust UTILITYPATH as needed) 13 | source "${UTILITYPATH}/Prompts.sh" 14 | source "${UTILITYPATH}/Queries.sh" 15 | 16 | ############################################################################### 17 | # Pre-flight checks 18 | ############################################################################### 19 | check_root 20 | check_proxmox 21 | install_or_prompt "jq" 22 | check_cluster_membership 23 | 24 | # Print header for CSV output 25 | echo "Nodename, CTID/VMID, VM or CT, Mac Address" 26 | 27 | ############################################################################### 28 | # Main Logic: Iterate locally through /etc/pve/nodes//qemu-server and /lxc 29 | ############################################################################### 30 | 31 | # Loop over each node directory in /etc/pve/nodes 32 | for nodeDir in /etc/pve/nodes/*; do 33 | if [ -d "$nodeDir" ]; then 34 | nodeName=$(basename "$nodeDir") 35 | 36 | # Process QEMU virtual machine configuration files 37 | qemuDir="$nodeDir/qemu-server" 38 | if [ -d "$qemuDir" ]; then 39 | for configFile in "$qemuDir"/*.conf; do 40 | if [ -f "$configFile" ]; then 41 | vmid=$(basename "$configFile" .conf) 42 | # Look for lines starting with "net" and extract MAC addresses (format: XX:XX:XX:XX:XX:XX) 43 | macs=$(grep -E '^net[0-9]+:' "$configFile" \ 44 | | grep -Eo '([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}' \ 45 | | tr '\n' ' ' \ 46 | | sed 's/ *$//') 47 | [ -z "$macs" ] && macs="None" 48 | echo "$nodeName, $vmid, VM, $macs" 49 | fi 50 | done 51 | fi 52 | 53 | # Process LXC container configuration files 54 | lxcDir="$nodeDir/lxc" 55 | if [ -d "$lxcDir" ]; then 56 | for configFile in "$lxcDir"/*.conf; do 57 | if [ -f "$configFile" ]; then 58 | ctid=$(basename "$configFile" .conf) 59 | macs=$(grep -E '^net[0-9]+:' "$configFile" \ 60 | | grep -Eo '([0-9A-Fa-f]{2}:){5}[0-9A-Fa-f]{2}' \ 61 | | tr '\n' ' ' \ 62 | | sed 's/ *$//') 63 | [ -z "$macs" ] && macs="None" 64 | echo "$nodeName, $ctid, CT, $macs" 65 | fi 66 | done 67 | fi 68 | 69 | fi 70 | done -------------------------------------------------------------------------------- /Networking/BulkSetDNS.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkSetDNS.sh 4 | # 5 | # Sets the DNS servers and search domain for all nodes in the Proxmox VE cluster, 6 | # using the IPs reported from the Proxmox utilities (skipping the local node). 7 | # 8 | # Usage: 9 | # ./BulkSetDNS.sh 10 | # 11 | # Example: 12 | # ./BulkSetDNS.sh 8.8.8.8 8.8.4.4 mydomain.local 13 | # 14 | # Explanation: 15 | # - Retrieves the IP addresses of remote nodes in the Proxmox cluster. 16 | # - Uses SSH to overwrite each remote node's /etc/resolv.conf with the specified 17 | # DNS servers and search domain. 18 | # - Also applies the same changes to the local node. 19 | # 20 | source "${UTILITYPATH}/Prompts.sh" 21 | source "${UTILITYPATH}/Queries.sh" 22 | 23 | ############################################################################### 24 | # Check environment and validate arguments 25 | ############################################################################### 26 | __check_root__ 27 | __check_proxmox__ 28 | 29 | if [ "$#" -ne 3 ]; then 30 | echo "Usage: $0 " 31 | exit 1 32 | fi 33 | 34 | DNS1="$1" 35 | DNS2="$2" 36 | SEARCH_DOMAIN="$3" 37 | 38 | ############################################################################### 39 | # Get remote node IPs 40 | ############################################################################### 41 | readarray -t REMOTE_NODES < <( __get_remote_node_ips__ ) 42 | 43 | ############################################################################### 44 | # Update DNS on each remote node 45 | ############################################################################### 46 | for nodeIp in "${REMOTE_NODES[@]}"; do 47 | echo "-----------------------------------------------------------" 48 | echo "Setting DNS on remote node IP: \"${nodeIp}\"" 49 | echo " DNS1=\"${DNS1}\", DNS2=\"${DNS2}\", SEARCH_DOMAIN=\"${SEARCH_DOMAIN}\"" 50 | 51 | ssh -o StrictHostKeyChecking=no "root@${nodeIp}" \ 52 | "echo -e 'search ${SEARCH_DOMAIN}\nnameserver ${DNS1}\nnameserver ${DNS2}' > /etc/resolv.conf" 53 | if [ $? -eq 0 ]; then 54 | echo " - DNS configured successfully on \"${nodeIp}\"" 55 | else 56 | echo " - Failed to configure DNS on \"${nodeIp}\"" 57 | fi 58 | echo 59 | done 60 | 61 | ############################################################################### 62 | # Update DNS on the local node 63 | ############################################################################### 64 | echo "-----------------------------------------------------------" 65 | echo "Setting DNS on the local node:" 66 | echo " DNS1=\"${DNS1}\", DNS2=\"${DNS2}\", SEARCH_DOMAIN=\"${SEARCH_DOMAIN}\"" 67 | echo -e "search ${SEARCH_DOMAIN}\nnameserver ${DNS1}\nnameserver ${DNS2}" > /etc/resol 68 | -------------------------------------------------------------------------------- /Networking/FindVMFromMacAddress.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # FindMacAddress.sh 4 | # 5 | # This script retrieves the network configuration details for all virtual machines (VMs) across 6 | # all nodes in a Proxmox cluster. It outputs the MAC addresses associated with each VM, helping 7 | # in network configuration audits or inventory management. 8 | # 9 | # Usage: 10 | # ./FindMacAddress.sh 11 | # 12 | # Example: 13 | # # Simply run this script on a Proxmox host within a cluster 14 | # ./FindMacAddress.sh 15 | # 16 | # The script uses 'pvesh' to fetch JSON data and parses it with 'jq'. 17 | # 18 | 19 | source "${UTILITYPATH}/Prompts.sh" 20 | source "${UTILITYPATH}/Queries.sh" 21 | 22 | ############################################################################### 23 | # Pre-flight checks 24 | ############################################################################### 25 | __check_root__ 26 | __check_proxmox__ 27 | __install_or_prompt__ "jq" 28 | __check_cluster_membership__ 29 | 30 | ############################################################################### 31 | # Main Logic 32 | ############################################################################### 33 | nodes="$(pvesh get /nodes --output-format=json | jq -r '.[] | .node')" 34 | 35 | for node in $nodes; do 36 | echo "Checking node: \"$node\"" 37 | vmIds="$(pvesh get /nodes/"$node"/qemu --output-format=json | jq -r '.[] | .vmid')" 38 | 39 | for vmId in $vmIds; do 40 | echo "VMID: \"$vmId\" on Node: \"$node\"" 41 | pvesh get /nodes/"$node"/qemu/"$vmId"/config \ 42 | | grep -i 'net' \ 43 | | grep -i 'macaddr' 44 | done 45 | done 46 | -------------------------------------------------------------------------------- /Networking/HostIPerfTest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # HostIPerfTest.sh 4 | # 5 | # Automates an Iperf throughput test between two specified hosts, allowing you 6 | # to define which is the server and which is the client by hostname. 7 | # 8 | # Usage: 9 | # ./HostIPerfTest.sh 10 | # 11 | # Example: 12 | # ./HostIPerfTest.sh 192.168.1.10 192.168.1.11 5001 13 | # 14 | # This script will: 15 | # 1. Ensure iperf3 is installed locally on Proxmox. 16 | # 2. Start an iperf3 server on the specified server host using SSH. 17 | # 3. Run the iperf3 client on the specified client host to display throughput results. 18 | # 4. Kill the iperf3 server process automatically upon completion. 19 | # 5. Prompt whether to keep or remove any newly installed packages. 20 | # 21 | 22 | source "${UTILITYPATH}/Prompts.sh" 23 | 24 | ############################################################################### 25 | # Preliminary Checks 26 | ############################################################################### 27 | __check_root__ 28 | __check_proxmox__ 29 | 30 | if [[ $# -lt 3 ]]; then 31 | echo "Usage: $0 " 32 | exit 1 33 | fi 34 | 35 | ############################################################################### 36 | # Argument Parsing 37 | ############################################################################### 38 | serverHost="$1" 39 | clientHost="$2" 40 | port="$3" 41 | 42 | ############################################################################### 43 | # Iperf Installation Check 44 | ############################################################################### 45 | __install_or_prompt__ "iperf3" 46 | 47 | ############################################################################### 48 | # Start Iperf Server on Server Host 49 | ############################################################################### 50 | echo "Starting iperf3 server on '${serverHost}'..." 51 | ssh "root@${serverHost}" "pkill -f 'iperf3 -s' || true" 52 | ssh "root@${serverHost}" "iperf3 -s -p '${port}' &" 53 | 54 | echo "Waiting 5 seconds for the iperf3 server to be ready..." 55 | sleep 5 56 | 57 | ############################################################################### 58 | # Run Iperf Client on Client Host 59 | ############################################################################### 60 | echo "Running iperf3 client on '${clientHost}' connecting to '${serverHost}'..." 61 | ssh "root@${clientHost}" "iperf3 -c '${serverHost}' -p '${port}' -t 10" 62 | 63 | ############################################################################### 64 | # Kill Iperf Server 65 | ############################################################################### 66 | echo "Stopping iperf3 server on '${serverHost}'..." 67 | ssh "root@${serverHost}" "pkill -f 'iperf3 -s'" 68 | 69 | echo "Iperf test completed successfully." 70 | 71 | ############################################################################### 72 | # Prompt to Keep or Remove Packages 73 | ############################################################################### 74 | __prompt_keep_installed_packages__ 75 | -------------------------------------------------------------------------------- /RemoteManagement/ApacheGuacamole/GetGuacamoleAuthenticationToken.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # GetGuacamoleAuthenticationToken.sh 4 | # 5 | # Retrieves an authentication token from the Apache Guacamole REST API 6 | # and saves it to /tmp/cc_pve/guac_token for later use. 7 | # 8 | # Usage: 9 | # ./GetGuacamoleAuthenticationToken.sh GUAC_SERVER_URL GUAC_ADMIN_USER GUAC_ADMIN_PASS 10 | # 11 | # Example: 12 | # # Using default port 8080 on guac.example.com 13 | # ./GetGuacamoleAuthenticationToken.sh "http://guac.example.com:8080/guacamole" "admin" "pass123" 14 | # 15 | 16 | source "${UTILITYPATH}/Prompts.sh" 17 | 18 | __check_root__ 19 | __check_proxmox__ 20 | __install_or_prompt__ "jq" 21 | 22 | GUAC_URL="$1" 23 | GUAC_ADMIN_USER="$2" 24 | GUAC_ADMIN_PASS="$3" 25 | 26 | if [[ -z "$GUAC_URL" || -z "$GUAC_ADMIN_USER" || -z "$GUAC_ADMIN_PASS" ]]; then 27 | echo "Error: Missing required arguments." >&2 28 | echo "Usage: ./GetGuacToken.sh GUAC_SERVER_URL GUAC_ADMIN_USER GUAC_ADMIN_PASS" >&2 29 | exit 1 30 | fi 31 | 32 | mkdir -p "/tmp/cc_pve" 33 | 34 | ############################################################################### 35 | # Main Logic 36 | ############################################################################### 37 | TOKEN_RESPONSE="$(curl -s -X POST \ 38 | -d "username=${GUAC_ADMIN_USER}&password=${GUAC_ADMIN_PASS}" \ 39 | "${GUAC_URL}/api/tokens")" 40 | 41 | AUTH_TOKEN="$(echo "$TOKEN_RESPONSE" | jq -r '.authToken')" 42 | 43 | if [[ -z "$AUTH_TOKEN" || "$AUTH_TOKEN" == "null" ]]; then 44 | echo "Error: Failed to retrieve Guacamole auth token." >&2 45 | exit 1 46 | fi 47 | 48 | echo "$AUTH_TOKEN" > "/tmp/cc_pve/guac_token" 49 | echo "Guacamole auth token saved to /tmp/cc_pve/guac_token" 50 | 51 | __prompt_keep_installed_packages__ 52 | -------------------------------------------------------------------------------- /RemoteManagement/ApacheGuacamole/RemoveGuacamoleAuthenticationToken.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # RemoveGuacamoleAuthenticationToken.sh 4 | # 5 | # This script deletes the locally saved Guacamole authentication token 6 | # stored in /tmp/cc_pve/guac_token. 7 | # 8 | # Usage: 9 | # ./RemoveGuacamoleAuthenticationToken.sh 10 | # 11 | # Example: 12 | # ./RemoveGuacamoleAuthenticationToken.sh 13 | # 14 | 15 | source "${UTILITYPATH}/Prompts.sh" 16 | 17 | __check_root__ 18 | __check_proxmox__ 19 | 20 | ############################################################################### 21 | # Main Logic 22 | ############################################################################### 23 | TOKEN_PATH="/tmp/cc_pve/guac_token" 24 | 25 | if [[ ! -f "$TOKEN_PATH" ]]; then 26 | echo "No Guacamole auth token found at '$TOKEN_PATH'. Nothing to delete." 27 | else 28 | rm -f "$TOKEN_PATH" 29 | echo "Guacamole auth token deleted from '$TOKEN_PATH'." 30 | fi 31 | 32 | __prompt_keep_installed_packages__ 33 | -------------------------------------------------------------------------------- /RemoteManagement/ConfigureOverSSH/-AddGuestAgentDebian.sh: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/coelacant1/ProxmoxScripts/5a50566042a1d6403aa0e082712ca724488d9ee7/RemoteManagement/ConfigureOverSSH/-AddGuestAgentDebian.sh -------------------------------------------------------------------------------- /RemoteManagement/ConfigureOverSSH/BulkCloneSetIPDebian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkCloneSetIPDebian.sh 4 | # 5 | # Clones a Debian-based VM multiple times, updates each clone's IP/network, 6 | # sets a default gateway, and restarts networking. Uses SSH with username/password. 7 | # Minimal comments, name prefix added for the cloned VMs. 8 | # 9 | # Usage: 10 | # ./BulkCloneSetIPDebian.sh 11 | # 12 | # Example: 13 | # # Clones VM ID 100 five times, starting IP at 172.20.83.100 with mask /24, 14 | # # gateway 172.20.83.1, base VM ID 200, SSH login root:pass123, prefix "CLOUD-" 15 | # ./BulkCloneSetIPDebian.sh 172.20.83.22 172.20.83.100/24 172.20.83.1 5 100 200 root pass123 CLOUD- 16 | # 17 | 18 | source "${UTILITYPATH}/Prompts.sh" 19 | source "${UTILITYPATH}/SSH.sh" 20 | 21 | ############################################################################### 22 | # Environment Checks 23 | ############################################################################### 24 | __check_root__ 25 | __check_proxmox__ 26 | 27 | ############################################################################### 28 | # Argument Parsing 29 | ############################################################################### 30 | if [ "$#" -lt 9 ]; then 31 | echo "Error: Missing arguments." 32 | echo "Usage: $0 " 33 | exit 1 34 | fi 35 | 36 | templateIpAddr="$1" 37 | startIpCidr="$2" 38 | newGateway="$3" 39 | instanceCount="$4" 40 | templateId="$5" 41 | baseVmId="$6" 42 | sshUsername="$7" 43 | sshPassword="$8" 44 | vmNamePrefix="$9" 45 | 46 | IFS='/' read -r startIpAddrOnly startMask <<<"$startIpCidr" 47 | ipInt="$(__ip_to_int__ "$startIpAddrOnly")" 48 | 49 | ############################################################################### 50 | # Main Logic 51 | ############################################################################### 52 | for ((i=0; i/dev/null 2>&1 &" 73 | 74 | __wait_for_ssh__ "$currentIp" "$sshUsername" "$sshPassword" 75 | ipInt=$((ipInt + 1)) 76 | done 77 | 78 | ############################################################################### 79 | # Testing status 80 | ############################################################################### 81 | # Tested single-node 82 | # Tested multi-node 83 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | This repository contains Bash scripts used for Proxmox management and automation. We take security issues seriously and appreciate any reports that help us maintain a secure and reliable environment. 4 | 5 | ## Supported Versions 6 | 7 | We make every effort to support all recent versions of these scripts. Specifically: 8 | - **Main Branch (latest)**: Actively maintained, security updates are provided promptly. 9 | - **Release Tags**: Critical security patches may be backported to recent versions, but users should keep current for the best support. 10 | 11 | ## Reporting a Vulnerability 12 | 13 | If you discover any security vulnerabilities: 14 | 1. **Do not create a public issue.** Instead, please email the maintainers directly: 15 | - [Maintainer’s Email] (coelacannot@gmail.com) 16 | 2. Provide as much detail as possible, including: 17 | - Steps to reproduce or proof of concept (if available). 18 | - Potential impact of the vulnerability. 19 | - Any suggested fixes or patches (if you have them). 20 | 21 | We will make every effort to: 22 | - Respond to security-related messages as fast as possible. 23 | - Provide an initial resolution or mitigation as fast as possible, depending on the severity and complexity of the issue. 24 | 25 | ## Scope and Expectations 26 | 27 | - **Scope**: This policy covers potential security issues within the bash scripts (e.g., command injection, privilege escalation, or insecure storage of credentials). 28 | - **Out of Scope**: Vulnerabilities in external Proxmox environments, third-party dependencies, or issues related to general system administration (outside the repository) are not handled within this policy. However, we may provide guidance or mitigation strategies if they relate to this project. 29 | 30 | ## Handling Confidential Information 31 | 32 | Users should avoid committing or sharing any sensitive data (tokens, passwords, or API keys) in this repository. If you discover such data has been accidentally committed, please report it using the steps above, so it can be removed from history and replaced with a secure alternative. 33 | 34 | ## Thank You 35 | 36 | We appreciate your efforts in responsibly disclosing security issues and helping to keep our Proxmox automation scripts secure. If you have any questions or concerns, please contact us at the email address above. 37 | -------------------------------------------------------------------------------- /Security/PenetrationTest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # PenetrationTest.sh 4 | # 5 | # A script to conduct a basic vulnerability assessment (pentest) on one or multiple Proxmox hosts. 6 | # 7 | # Usage: 8 | # ./PenetrationTest.sh [ ...] 9 | # ./PenetrationTest.sh all 10 | # 11 | # Examples: 12 | # ./PenetrationTest.sh 192.168.1.50 13 | # Conducts a pentest on a single host at "192.168.1.50". 14 | # 15 | # ./PenetrationTest.sh 192.168.1.50 192.168.1.51 192.168.1.52 16 | # Conducts a pentest on multiple specified hosts. 17 | # 18 | # ./PenetrationTest.sh all 19 | # Discovers all cluster nodes from the Proxmox cluster configuration, 20 | # then runs nmap-based checks on each node. 21 | # 22 | # Note: This script performs a non-exhaustive scan and should be used 23 | # only with explicit permission. Pentesting without permission is illegal. 24 | # 25 | # Function Index: 26 | # - usage 27 | # 28 | source "${UTILITYPATH}/Prompts.sh" 29 | source "${UTILITYPATH}/Queries.sh" 30 | 31 | ############################################################################### 32 | # Preliminary Checks 33 | ############################################################################### 34 | __check_root__ 35 | __check_proxmox__ 36 | 37 | ############################################################################### 38 | # Usage 39 | ############################################################################### 40 | usage() { 41 | echo "Usage: $0 [ ...]" 42 | echo " $0 all" 43 | echo 44 | echo "Examples:" 45 | echo " $0 192.168.1.50" 46 | echo " $0 192.168.1.50 192.168.1.51 192.168.1.52" 47 | echo " $0 all" 48 | exit 1 49 | } 50 | 51 | ############################################################################### 52 | # Main Script Logic 53 | ############################################################################### 54 | __install_or_prompt__ "nmap" 55 | 56 | if [[ $# -lt 1 ]]; then 57 | usage 58 | fi 59 | 60 | if [[ "$1" == "all" ]]; then 61 | echo "[*] Discovering all remote nodes in the Proxmox cluster..." 62 | readarray -t REMOTE_NODES < <( __get_remote_node_ips__ ) 63 | if [[ "${#REMOTE_NODES[@]}" -eq 0 ]]; then 64 | echo "Error: No remote nodes discovered. Are you sure this node is part of a cluster?" 65 | exit 2 66 | fi 67 | TARGETS=("${REMOTE_NODES[@]}") 68 | else 69 | TARGETS=("$@") 70 | fi 71 | 72 | for host in "${TARGETS[@]}"; do 73 | echo "=======================================================================" 74 | echo "[*] Starting vulnerability scan for host: \"$host\"" 75 | echo "=======================================================================" 76 | nmap -sV --script vuln "$host" 77 | echo "=======================================================================" 78 | echo "[*] Finished scanning \"$host\"" 79 | echo "=======================================================================" 80 | echo 81 | done 82 | 83 | __prompt_keep_installed_packages__ 84 | 85 | ############################################################################### 86 | # Testing status 87 | ############################################################################### 88 | # Tested single-node 89 | # Tested multi-node -------------------------------------------------------------------------------- /Security/PortScan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # PortScan.sh 4 | # 5 | # A script to scan one or multiple Proxmox hosts (or all in a cluster) to identify 6 | # which TCP ports are open. This script uses nmap (installing it if missing), then 7 | # optionally removes it when finished. 8 | # 9 | # Usage: 10 | # ./PortScan.sh [ ...] 11 | # ./PortScan.sh all 12 | # 13 | # Examples: 14 | # # Scans a single host at 192.168.1.50 for open TCP ports. 15 | # ./PortScan.sh 192.168.1.50 16 | # 17 | # # Scans multiple specified hosts for open TCP ports. 18 | # ./PortScan.sh 192.168.1.50 192.168.1.51 192.168.1.52 19 | # 20 | # # Discovers all cluster nodes from the Proxmox cluster configuration 21 | # # and runs the open port scan on each node. 22 | # ./PortScan.sh all 23 | # 24 | # Note: Use responsibly and only with explicit permission. 25 | # Unauthorized port scanning may be illegal. 26 | # 27 | # Function Index: 28 | # - usage_info 29 | # 30 | 31 | source "${UTILITYPATH}/Prompts.sh" 32 | source "${UTILITYPATH}/Queries.sh" 33 | 34 | ############################################################################### 35 | # Preliminary Checks 36 | ############################################################################### 37 | __check_root__ 38 | __check_proxmox__ 39 | 40 | ############################################################################### 41 | # Usage Information 42 | ############################################################################### 43 | function usage_info() { 44 | echo "Usage:" 45 | echo " $0 [ ...]" 46 | echo " $0 all" 47 | echo 48 | echo "Examples:" 49 | echo " # Scans a single host at 192.168.1.50 for open TCP ports." 50 | echo " $0 192.168.1.50" 51 | echo 52 | echo " # Scans multiple specified hosts for open TCP ports." 53 | echo " $0 192.168.1.50 192.168.1.51 192.168.1.52" 54 | echo 55 | echo " # Discovers all cluster nodes from the Proxmox cluster configuration," 56 | echo " # then runs the open port scan on each node." 57 | echo " $0 all" 58 | exit 1 59 | } 60 | 61 | ############################################################################### 62 | # Main Script Logic 63 | ############################################################################### 64 | if [[ $# -lt 1 ]]; then 65 | usage_info 66 | fi 67 | 68 | __install_or_prompt__ "nmap" 69 | 70 | if [[ "$1" == "all" ]]; then 71 | __check_cluster_membership__ 72 | readarray -t discoveredHosts < <( __get_remote_node_ips__ ) 73 | 74 | if [[ ${#discoveredHosts[@]} -eq 0 ]]; then 75 | echo "Error: No hosts discovered in the cluster. Exiting." 76 | exit 2 77 | fi 78 | 79 | echo "[*] Discovered the following cluster node IPs:" 80 | for ip in "${discoveredHosts[@]}"; do 81 | echo "$ip" 82 | done 83 | echo 84 | 85 | targets=("${discoveredHosts[@]}") 86 | else 87 | targets=("$@") 88 | fi 89 | 90 | for host in "${targets[@]}"; do 91 | echo "=======================================================================" 92 | echo "[*] Scanning open TCP ports for host: \"${host}\"" 93 | echo "=======================================================================" 94 | nmap -p- --open -n "${host}" 95 | echo "=======================================================================" 96 | echo "[*] Finished scanning \"${host}\"" 97 | echo "=======================================================================" 98 | echo 99 | done 100 | 101 | __prompt_keep_installed_packages__ 102 | -------------------------------------------------------------------------------- /Storage/Ceph/CreateOSDs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CephCreateOSDsAllNodes.sh 4 | # 5 | # This script runs on all nodes in the Proxmox cluster to automatically create 6 | # Ceph OSDs on all unused block devices (e.g., /dev/sd*, /dev/nvme*, /dev/hd*). 7 | # 8 | # Usage: 9 | # ./CephCreateOSDsAllNodes.sh 10 | # 11 | # Requirements/Assumptions: 12 | # 1. Passwordless SSH or valid SSH keys for root on all nodes. 13 | # 2. Each node is in a functioning Proxmox cluster (pvecm available). 14 | # 3. Each node has Ceph installed and configured sufficiently to run 'ceph-volume'. 15 | # 4. Devices that need to be skipped are either already mounted or in pvs. 16 | # 17 | # Function Index: 18 | # - create_osds 19 | # 20 | source "${UTILITYPATH}/Prompts.sh" 21 | source "${UTILITYPATH}/Queries.sh" 22 | 23 | __check_root__ 24 | __check_proxmox__ 25 | __check_cluster_membership__ 26 | 27 | ############################################################################### 28 | # FUNCTION: create_osds 29 | # Iterates over block devices (/dev/sd*, /dev/nvme*, /dev/hd*) and: 30 | # - Checks if the device is valid (-b) 31 | # - Ensures the device is unused (not mounted, not in pvs) 32 | # - Creates a Ceph OSD via ceph-volume 33 | ############################################################################### 34 | create_osds() { 35 | echo "=== Checking for devices on node: $(hostname) ===" 36 | for device in /dev/sd* /dev/nvme* /dev/hd* 2>/dev/null; do 37 | [ -e "$device" ] || continue 38 | if [ -b "$device" ]; then 39 | if lsblk -no MOUNTPOINT "$device" | grep -q '^$' && ! pvs 2>/dev/null | grep -q "$device"; then 40 | echo "Creating OSD for \"$device\"..." 41 | if ceph-volume lvm create --data "$device"; then 42 | echo "Successfully created OSD for \"$device\"." 43 | else 44 | echo "Failed to create OSD for \"$device\". Continuing..." 45 | fi 46 | else 47 | echo "\"$device\" is in use (mounted or in pvs). Skipping." 48 | fi 49 | else 50 | echo "\"$device\" is not a valid block device. Skipping." 51 | fi 52 | done 53 | echo "=== OSD creation complete on node: $(hostname) ===" 54 | } 55 | 56 | ############################################################################### 57 | # MAIN SCRIPT 58 | ############################################################################### 59 | echo "=== Starting OSD creation on all nodes ===" 60 | 61 | readarray -t REMOTE_NODES < <( __get_remote_node_ips__ ) 62 | if [ "${#REMOTE_NODES[@]}" -eq 0 ]; then 63 | echo "Error: No remote nodes found in the cluster." 64 | exit 1 65 | fi 66 | 67 | for NODE_IP in "${REMOTE_NODES[@]}"; do 68 | echo "=> Connecting to node: \"$NODE_IP\"" 69 | ssh root@"$NODE_IP" "$(typeset -f create_osds); create_osds" 70 | done 71 | 72 | echo "=== Ceph OSD creation process completed on all nodes! ===" 73 | -------------------------------------------------------------------------------- /Storage/Ceph/EditCrushmap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CephEditCrushmap.sh 4 | # 5 | # This script manages the decompilation and recompilation of the Ceph cluster's CRUSH map, 6 | # facilitating custom modifications. Administrators can either decompile the current CRUSH 7 | # map into a human-readable format or recompile it for use in the cluster. 8 | # 9 | # Usage: 10 | # ./CephEditCrushmap.sh 11 | # 12 | # Examples: 13 | # # Decompile the CRUSH map 14 | # ./CephEditCrushmap.sh decompile 15 | # 16 | # # Recompile the CRUSH map 17 | # ./CephEditCrushmap.sh compile 18 | # 19 | # Function Index: 20 | # - decompileCrushMap 21 | # - recompileCrushMap 22 | # 23 | source "${UTILITYPATH}/Prompts.sh" 24 | 25 | ############################################################################### 26 | # Environment Checks 27 | ############################################################################### 28 | __check_root__ 29 | __check_proxmox__ 30 | 31 | ############################################################################### 32 | # Variables 33 | ############################################################################### 34 | userCommand="$1" 35 | 36 | ############################################################################### 37 | # Functions 38 | ############################################################################### 39 | function decompileCrushMap() { 40 | echo "Getting and decompiling the CRUSH map..." 41 | ceph osd getcrushmap -o "/tmp/crushmap.comp" 42 | crushtool -d "/tmp/crushmap.comp" -o "/tmp/crushmap.decomp" 43 | echo "Decompiled CRUSH map is at /tmp/crushmap.decomp" 44 | } 45 | 46 | function recompileCrushMap() { 47 | echo "Recompiling and setting the CRUSH map..." 48 | crushtool -c "/tmp/crushmap.decomp" -o "/tmp/crushmap.comp" 49 | ceph osd setcrushmap -i "/tmp/crushmap.comp" 50 | echo "CRUSH map has been recompiled and set." 51 | } 52 | 53 | ############################################################################### 54 | # Main Logic 55 | ############################################################################### 56 | if [ -z "$userCommand" ]; then 57 | echo "Error: Missing command. Use 'decompile' or 'compile'." 58 | exit 1 59 | fi 60 | 61 | case "$userCommand" in 62 | decompile) 63 | decompileCrushMap 64 | ;; 65 | compile) 66 | recompileCrushMap 67 | ;; 68 | *) 69 | echo "Error: Invalid command. Use 'decompile' or 'compile'." 70 | exit 2 71 | ;; 72 | esac 73 | -------------------------------------------------------------------------------- /Storage/Ceph/SetPoolMinSize1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CephSetPoolMinSize1.sh 4 | # 5 | # This script sets the 'min_size' parameter of a specified Ceph storage pool to 1. 6 | # This allows the pool to operate with a single replica in degraded mode when necessary. 7 | # 8 | # Usage: 9 | # ./CephSetPoolMinSize1.sh 10 | # 11 | # Example: 12 | # # Sets the min_size to 1 for the 'mypool' storage pool 13 | # ./CephSetPoolMinSize1.sh mypool 14 | # 15 | source "${UTILITYPATH}/Prompts.sh" 16 | 17 | ############################################################################### 18 | # Main 19 | ############################################################################### 20 | __check_root__ 21 | __check_proxmox__ 22 | 23 | if [ -z "$1" ]; then 24 | echo "Error: No pool name provided." 25 | echo "Usage: $0 " 26 | exit 1 27 | fi 28 | 29 | POOL_NAME="$1" 30 | 31 | echo "Setting min_size of pool '$POOL_NAME' to 1..." 32 | ceph osd pool set "$POOL_NAME" min_size 1 --yes-i-really-mean-it 33 | 34 | if [ $? -eq 0 ]; then 35 | echo "min_size has been set to 1 for pool '$POOL_NAME'." 36 | else 37 | echo "Error: Failed to set min_size for pool '$POOL_NAME'." 38 | exit 1 39 | fi 40 | -------------------------------------------------------------------------------- /Storage/Ceph/SetPoolSize1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CephSetPoolSize1.sh 4 | # 5 | # This script sets the 'size' parameter of a specified Ceph storage pool to 1, disabling data replication. 6 | # 7 | # Usage: 8 | # ./CephSetPoolSize1.sh 9 | # 10 | # Example: 11 | # ./CephSetPoolSize1.sh testpool 12 | # 13 | 14 | source "${UTILITYPATH}/Prompts.sh" 15 | 16 | ############################################################################### 17 | # Checks and setup 18 | ############################################################################### 19 | __check_root__ 20 | __check_proxmox__ 21 | 22 | ############################################################################### 23 | # Main 24 | ############################################################################### 25 | POOL_NAME="$1" 26 | 27 | if [ -z "$POOL_NAME" ]; then 28 | echo "Usage: $0 " 29 | exit 1 30 | fi 31 | 32 | echo "Setting size of pool \"$POOL_NAME\" to 1..." 33 | ceph osd pool set "$POOL_NAME" size 1 --yes-i-really-mean-it 34 | if [ $? -eq 0 ]; then 35 | echo "size has been set to 1 for pool \"$POOL_NAME\"." 36 | else 37 | echo "Failed to set size for pool \"$POOL_NAME\". Please check the pool name and your permissions." 38 | exit 1 39 | fi 40 | -------------------------------------------------------------------------------- /Storage/Ceph/SingleDrive.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CephSingleDrive.sh 4 | # 5 | # This script helps set up Ceph on a single-drive system, such as a home lab 6 | # server, by removing the local-lvm partition and creating a Ceph OSD in the 7 | # freed space. 8 | # 9 | # Usage: 10 | # ./CephSingleDrive.sh 11 | # 12 | # Steps: 13 | # create_osd - Bootstrap Ceph auth, create LVs, and prepare an OSD 14 | # clear_local_lvm - Delete the local-lvm (pve/data) volume (Destructive!) 15 | # 16 | # Examples: 17 | # ./CephSingleDrive.sh create_osd 18 | # ./CephSingleDrive.sh clear_local_lvm 19 | # 20 | # Function Index: 21 | # - clear_local_lvm 22 | # - create_osd 23 | # 24 | 25 | source "${UTILITYPATH}/Prompts.sh" 26 | 27 | __check_root__ 28 | __check_proxmox__ 29 | 30 | ############################################################################### 31 | # Functions 32 | ############################################################################### 33 | function clear_local_lvm() { 34 | echo "WARNING: This will remove the local-lvm 'pve/data' and all data within it!" 35 | read -p "Are you sure you want to proceed? [yes/NO]: " confirmation 36 | case "$confirmation" in 37 | yes|YES) 38 | echo "Removing LVM volume 'pve/data'..." 39 | lvremove -y pve/data 40 | echo "Local-lvm 'pve/data' removed successfully." 41 | ;; 42 | *) 43 | echo "Aborting operation." 44 | ;; 45 | esac 46 | } 47 | 48 | function create_osd() { 49 | echo "Creating OSD on this node..." 50 | echo "Bootstrapping Ceph auth..." 51 | ceph auth get client.bootstrap-osd > /var/lib/ceph/bootstrap-osd/ceph.keyring 52 | echo "Bootstrap auth completed." 53 | 54 | echo "Creating new logical volume with all remaining free space..." 55 | lvcreate -l 100%FREE -n vz pve 56 | echo "Logical volume 'pve/vz' created." 57 | 58 | echo "Preparing and activating the logical volume for OSD..." 59 | ceph-volume lvm create --data pve/vz 60 | echo "OSD prepared and activated." 61 | } 62 | 63 | ############################################################################### 64 | # Main 65 | ############################################################################### 66 | STEP="$1" 67 | 68 | if [ -z "$STEP" ]; then 69 | echo "Usage: $0 " 70 | exit 1 71 | fi 72 | 73 | case "$STEP" in 74 | create_osd) 75 | create_osd 76 | ;; 77 | clear_local_lvm) 78 | clear_local_lvm 79 | ;; 80 | *) 81 | echo "Invalid step. Use 'create_osd' or 'clear_local_lvm'." 82 | exit 2 83 | ;; 84 | esac 85 | -------------------------------------------------------------------------------- /Storage/Ceph/SparsifyDisk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CephSparsifyVMDisks.sh 4 | # 5 | # This script is designed to sparsify (compact) *all* RBD disk(s) associated with a specific VM 6 | # in a specified Ceph storage pool. By zeroing out unused space in the VM and using the 7 | # 'rbd sparsify' command, any zeroed blocks are reclaimed in the Ceph pool, making the space 8 | # available for other uses. 9 | # 10 | # Usage: 11 | # ./CephSparsifyVMDisks.sh 12 | # pool_name - The name of the Ceph storage pool where the VM disk(s) reside. 13 | # vm_id - The numeric ID of the VM whose disk(s) will be sparsified. 14 | # 15 | # Example: 16 | # ./CephSparsifyVMDisks.sh mypool 101 17 | # 18 | # Notes: 19 | # 1. This script assumes that the RBD image names follow the convention "vm--disk-". 20 | # Adjust the grep pattern and/or logic if your naming differs. 21 | # 2. Ensure you have already zeroed out unused space within the VM (e.g., sdelete -z in Windows 22 | # or fstrim in Linux) before running this script. 23 | # 3. Verify you have the necessary permissions to run 'rbd sparsify' on the target pool/image. 24 | # 25 | 26 | source "${UTILITYPATH}/Prompts.sh" 27 | 28 | ############################################################################### 29 | # Check prerequisites 30 | ############################################################################### 31 | __check_root__ 32 | __check_proxmox__ 33 | 34 | ############################################################################### 35 | # Validate arguments 36 | ############################################################################### 37 | if [ -z "$1" ] || [ -z "$2" ]; then 38 | echo "Error: Missing arguments." 39 | echo "Usage: $0 " 40 | exit 1 41 | fi 42 | 43 | POOL_NAME="$1" 44 | VM_ID="$2" 45 | 46 | echo "Querying all RBD disks for VM ID '${VM_ID}' in pool '${POOL_NAME}'..." 47 | 48 | ############################################################################### 49 | # Main Logic 50 | ############################################################################### 51 | images=$(rbd ls "${POOL_NAME}" | grep "vm-${VM_ID}-disk-") 52 | if [ -z "${images}" ]; then 53 | echo "No disks found for VM ID '${VM_ID}' in pool '${POOL_NAME}'." 54 | exit 0 55 | fi 56 | 57 | echo "Found the following disk(s):" 58 | echo "${images}" 59 | echo 60 | 61 | for imageName in ${images}; do 62 | echo "Attempting to sparsify disk '${POOL_NAME}/${imageName}'..." 63 | rbd sparsify "${POOL_NAME}/${imageName}" 64 | sparsifyExitCode=$? 65 | 66 | if [ ${sparsifyExitCode} -eq 0 ]; then 67 | echo "Successfully sparsified '${POOL_NAME}/${imageName}'." 68 | else 69 | echo "Failed to sparsify '${POOL_NAME}/${imageName}'." 70 | echo "Please check if the image name is correct and that you have the necessary permissions." 71 | # Uncomment the line below if one failure should stop the entire script: 72 | # exit ${sparsifyExitCode} 73 | fi 74 | echo 75 | done 76 | 77 | echo "Disk sparsification process is complete for VM ID '${VM_ID}' in pool '${POOL_NAME}'." 78 | -------------------------------------------------------------------------------- /Storage/Ceph/StartStoppedOSDs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # StartStoppedOSDs.sh 4 | # 5 | # This script starts all stopped Ceph OSDs in a Proxmox VE environment. 6 | # 7 | # Usage: 8 | # ./StartStoppedOSDs.sh 9 | # 10 | # This script: 11 | # - Checks for root privileges. 12 | # - Verifies it is running in a Proxmox environment. 13 | # - Checks or installs the 'ceph' package if needed. 14 | # - Lists all OSDs that are down and attempts to start them. 15 | # 16 | 17 | source "${UTILITYPATH}/Prompts.sh" 18 | 19 | ############################################################################### 20 | # Preliminary Checks 21 | ############################################################################### 22 | __check_root__ 23 | __check_proxmox__ 24 | 25 | ############################################################################### 26 | # Main Logic 27 | ############################################################################### 28 | STOPPED_OSDS="$(ceph osd tree | awk '/down/ {print $4}')" 29 | 30 | if [ -z "$STOPPED_OSDS" ]; then 31 | echo "No OSD is reported as down. Exiting." 32 | exit 0 33 | fi 34 | 35 | for osdId in $STOPPED_OSDS; do 36 | echo "Starting OSD ID: $osdId" 37 | ceph osd start "osd.${osdId}" 38 | if [ $? -eq 0 ]; then 39 | echo " - OSD ID: $osdId started successfully." 40 | else 41 | echo " - Failed to start OSD ID: $osdId." 42 | fi 43 | done 44 | 45 | echo "OSD start process completed!" 46 | -------------------------------------------------------------------------------- /Storage/Ceph/WipeDisk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # CephWipeDisk.sh 4 | # 5 | # Securely erase a disk previously used by Ceph for removal or redeployment. 6 | # This script will: 7 | # 1. Prompt for confirmation to wipe the specified disk. 8 | # 2. Remove any existing partition tables and Ceph signatures. 9 | # 3. Optionally overwrite the disk with zeroes. 10 | # 11 | # Usage: 12 | # ./CephWipeDisk.sh /dev/sdX 13 | # 14 | # Example: 15 | # ./CephWipeDisk.sh /dev/sdb 16 | # 17 | # Notes: 18 | # - This script must be run as root (sudo). 19 | # - Make sure you specify the correct disk. This operation is destructive! 20 | # 21 | 22 | source "${UTILITYPATH}/Prompts.sh" 23 | 24 | __check_root__ 25 | __check_proxmox__ 26 | 27 | ############################################################################### 28 | # Validate arguments 29 | ############################################################################### 30 | if [[ $# -ne 1 ]]; then 31 | echo "Usage: $0 /dev/sdX" 32 | exit 1 33 | fi 34 | 35 | DISK="$1" 36 | 37 | if [[ ! "$DISK" =~ ^/dev/ ]]; then 38 | echo "Error: Invalid disk specified. Please provide a valid /dev/sdX path." 39 | exit 2 40 | fi 41 | 42 | ############################################################################### 43 | # Check and/or install required commands 44 | ############################################################################### 45 | __install_or_prompt__ "parted" 46 | __install_or_prompt__ "util-linux" # Provides wipefs 47 | __install_or_prompt__ "coreutils" 48 | 49 | ############################################################################### 50 | # Confirmation 51 | ############################################################################### 52 | echo "WARNING: This script will wipe and remove partitions/signatures on \"$DISK\"." 53 | echo "This operation is destructive and cannot be undone." 54 | read -r -p "Are you sure you want to continue? (y/N): " confirmWipe 55 | if [[ "$confirmWipe" != "y" && "$confirmWipe" != "Y" ]]; then 56 | echo "Aborting. No changes were made." 57 | exit 0 58 | fi 59 | 60 | ############################################################################### 61 | # Remove Partition Tables and Ceph Signatures 62 | ############################################################################### 63 | echo "Removing partition tables and file system signatures on \"$DISK\"..." 64 | wipefs --all --force "$DISK" 65 | 66 | echo "Re-initializing partition label on \"$DISK\"..." 67 | parted -s "$DISK" mklabel gpt 68 | 69 | ############################################################################### 70 | # Optional Zero Fill 71 | ############################################################################### 72 | read -r -p "Would you like to overwrite the disk with zeroes? (y/N): " overwrite 73 | if [[ "$overwrite" == "y" || "$overwrite" == "Y" ]]; then 74 | __install_or_prompt__ "coreutils" 75 | echo "Overwriting \"$DISK\" with zeroes. This may take a while..." 76 | dd if=/dev/zero of="$DISK" bs=1M status=progress || { 77 | echo "Error: Failed to overwrite disk with zeroes." 78 | exit 5 79 | } 80 | sync 81 | echo "Zero-fill complete." 82 | else 83 | echo "Skipping zero-fill as per user choice." 84 | fi 85 | 86 | ############################################################################### 87 | # Prompt to keep newly installed packages 88 | ############################################################################### 89 | __prompt_keep_installed_packages__ 90 | -------------------------------------------------------------------------------- /Storage/DiskDeleteBulk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # DiskDeleteBulk.sh 4 | # 5 | # This script automates the process of deleting specific disk images from a Ceph 6 | # storage pool. It is designed to operate over a range of virtual machine (VM) 7 | # disk images, identifying each by a unique naming convention and deleting them 8 | # from the specified Ceph pool. This is particularly useful for bulk cleanup of 9 | # VM disk images in virtualized data centers or cloud platforms. 10 | # 11 | # Usage: 12 | # ./DiskDeleteBulk.sh 13 | # 14 | # Example: 15 | # ./DiskDeleteBulk.sh vm_pool 1 100 1 16 | # 17 | # Function Index: 18 | # - delete_disk 19 | # 20 | 21 | source "${UTILITYPATH}/Prompts.sh" 22 | 23 | __check_root__ 24 | __check_proxmox__ 25 | 26 | ############################################################################### 27 | # Validate and parse inputs 28 | ############################################################################### 29 | POOL_NAME="$1" 30 | START_VM_INDEX="$2" 31 | END_VM_INDEX="$3" 32 | DISK_NUMBER="$4" 33 | 34 | if [ -z "$POOL_NAME" ] || [ -z "$START_VM_INDEX" ] || [ -z "$END_VM_INDEX" ] || [ -z "$DISK_NUMBER" ]; then 35 | echo "Error: Missing required arguments." 36 | echo "Usage: ./DiskDeleteBulk.sh " 37 | exit 1 38 | fi 39 | 40 | ############################################################################### 41 | # Delete a disk in the specified Ceph pool 42 | ############################################################################### 43 | function delete_disk() { 44 | local pool="$1" 45 | local disk="$2" 46 | 47 | rbd rm "$disk" -p "$pool" 48 | if [ $? -ne 0 ]; then 49 | echo "Failed to remove the disk \"$disk\" in pool \"$pool\"" 50 | return 1 51 | fi 52 | 53 | echo "Disk \"$disk\" has been deleted." 54 | } 55 | 56 | ############################################################################### 57 | # Main 58 | ############################################################################### 59 | for vmIndex in $(seq "$START_VM_INDEX" "$END_VM_INDEX"); do 60 | diskName="vm-${vmIndex}-disk-${DISK_NUMBER}" 61 | delete_disk "$POOL_NAME" "$diskName" 62 | done 63 | -------------------------------------------------------------------------------- /Storage/PassthroughStorageToLXC.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # PassthroughStorageToLXC.sh 4 | # 5 | # A script to pass through a host directory into one or more LXC containers for shared storage. 6 | # It will automatically detect whether each container is unprivileged and convert it to privileged 7 | # if necessary, then mount the specified host directory inside the container with the given permissions. 8 | # 9 | # Usage: 10 | # ./PassthroughStorageToLXC.sh 11 | # 12 | # Example: 13 | # # Mounts /mnt/data with read-write permissions into containers 101 and 102 14 | # ./PassthroughStorageToLXC.sh /mnt/data rw 101 102 15 | # 16 | # # Mounts /mnt/logs with read-only permissions into containers 101, 102, and 103 17 | # ./PassthroughStorageToLXC.sh /mnt/logs ro 101 102 103 18 | # 19 | 20 | source "${UTILITYPATH}/Prompts.sh" 21 | 22 | ############################################################################### 23 | # Pre-Execution Checks 24 | ############################################################################### 25 | __check_root__ 26 | __check_proxmox__ 27 | 28 | ############################################################################### 29 | # Parse Arguments 30 | ############################################################################### 31 | if [[ $# -lt 3 ]]; then 32 | echo "Usage: $0 " 33 | echo "Example: $0 /mnt/data rw 101 102" 34 | exit 1 35 | fi 36 | 37 | HOST_DIRECTORY="$1" 38 | MOUNT_PERMISSION="$2" 39 | shift 2 40 | CONTAINERS=("$@") 41 | 42 | if [[ ! -d "$HOST_DIRECTORY" ]]; then 43 | echo "Error: Host directory \"$HOST_DIRECTORY\" does not exist." 44 | exit 2 45 | fi 46 | 47 | if [[ "$MOUNT_PERMISSION" != "ro" && "$MOUNT_PERMISSION" != "rw" ]]; then 48 | echo "Error: Permission must be either \"ro\" or \"rw\"." 49 | exit 3 50 | fi 51 | 52 | roFlag=0 53 | if [[ "$MOUNT_PERMISSION" == "ro" ]]; then 54 | roFlag=1 55 | fi 56 | 57 | ############################################################################### 58 | # Main Logic 59 | ############################################################################### 60 | for CTID in "${CONTAINERS[@]}"; do 61 | echo "Processing container ID: \"$CTID\"..." 62 | 63 | if ! pct status "$CTID" &>/dev/null; then 64 | echo "Warning: LXC container \"$CTID\" not found. Skipping." 65 | continue 66 | fi 67 | 68 | unprivilegedSetting="$(pct config "$CTID" | awk '/^unprivileged:/ {print $2}')" 69 | if [[ "$unprivilegedSetting" == "1" ]]; then 70 | echo "Container \"$CTID\" is unprivileged. Converting to privileged..." 71 | pct set "$CTID" -unprivileged 0 --force 72 | echo "Stopping container \"$CTID\" to apply changes..." 73 | pct stop "$CTID" 74 | echo "Starting container \"$CTID\" after privilege change..." 75 | pct start "$CTID" 76 | fi 77 | 78 | mountPoint="/mnt/$(basename "$HOST_DIRECTORY")" 79 | nextMpIndex=0 80 | while pct config "$CTID" | grep -q "^mp${nextMpIndex}:"; do 81 | ((nextMpIndex++)) 82 | done 83 | 84 | echo "Mounting \"$HOST_DIRECTORY\" at \"$mountPoint\" (ro=$roFlag) in container \"$CTID\"..." 85 | pct set "$CTID" -mp${nextMpIndex} "${HOST_DIRECTORY},mp=${mountPoint},ro=${roFlag},backup=0" 86 | 87 | echo "Successfully mounted in container \"$CTID\"." 88 | echo "------------------------------------------------------" 89 | done 90 | 91 | echo "All specified containers processed. Done." 92 | -------------------------------------------------------------------------------- /Storage/UpdateStaleMount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # UpdateStaleMount.sh 4 | # 5 | # This script updates a stale file mount across a Proxmox VE cluster by: 6 | # 1) Disabling the specified data center storage. 7 | # 2) Forcibly unmounting the stale mount on each cluster node. 8 | # 3) Removing the stale directory. 9 | # 4) Re-enabling the data center storage. 10 | # 11 | # Usage: 12 | # ./UpdateStaleMount.sh 13 | # 14 | # Arguments: 15 | # storage_name - The name/ID of the Proxmox storage to disable/re-enable. 16 | # mount_path - The path of the stale mount point on each node (e.g., /mnt/pve/ISO). 17 | # 18 | # Example: 19 | # ./UpdateStaleMount.sh ISO_Storage /mnt/pve/ISO 20 | # 21 | source "${UTILITYPATH}/Prompts.sh" 22 | source "${UTILITYPATH}/Queries.sh" 23 | 24 | __check_root__ 25 | __check_proxmox__ 26 | __check_cluster_membership__ 27 | 28 | ############################################################################### 29 | # Parse and validate arguments 30 | ############################################################################### 31 | STORAGE_NAME="$1" 32 | MOUNT_PATH="$2" 33 | 34 | if [ -z "$STORAGE_NAME" ] || [ -z "$MOUNT_PATH" ]; then 35 | echo "Usage: $0 " 36 | exit 1 37 | fi 38 | 39 | ############################################################################### 40 | # Step 1: Disable the data center storage 41 | ############################################################################### 42 | echo "Disabling storage \"${STORAGE_NAME}\"..." 43 | pvesm set "${STORAGE_NAME}" --disable 1 44 | if [ $? -ne 0 ]; then 45 | echo "Error: Failed to disable storage \"${STORAGE_NAME}\"." 46 | exit 1 47 | fi 48 | 49 | ############################################################################### 50 | # Step 2: Gather cluster node IPs 51 | ############################################################################### 52 | echo "Retrieving remote node IPs..." 53 | readarray -t REMOTE_NODE_IPS < <( __get_remote_node_ips__ ) 54 | if [ ${#REMOTE_NODE_IPS[@]} -eq 0 ]; then 55 | echo "Error: No remote node IPs found. Ensure this node is part of a cluster." 56 | exit 1 57 | fi 58 | 59 | echo "Found the following node IPs in the cluster:" 60 | printf '%s\n' "${REMOTE_NODE_IPS[@]}" 61 | 62 | ############################################################################### 63 | # Step 3: Unmount and remove the stale directory on each node 64 | ############################################################################### 65 | for nodeIp in "${REMOTE_NODE_IPS[@]}"; do 66 | echo "Processing node IP: \"${nodeIp}\"" 67 | ssh root@"${nodeIp}" "umount -f \"${MOUNT_PATH}\"" 2>/dev/null 68 | ssh root@"${nodeIp}" "rm -rf \"${MOUNT_PATH}\"" 2>/dev/null 69 | done 70 | 71 | ############################################################################### 72 | # Step 4: Re-enable the storage 73 | ############################################################################### 74 | echo "Re-enabling storage \"${STORAGE_NAME}\"..." 75 | pvesm set "${STORAGE_NAME}" --disable 0 76 | if [ $? -ne 0 ]; then 77 | echo "Error: Failed to re-enable storage \"${STORAGE_NAME}\"." 78 | exit 1 79 | fi 80 | 81 | echo "Successfully updated the stale file mount for storage \"${STORAGE_NAME}\"." 82 | exit 0 83 | -------------------------------------------------------------------------------- /UpdateProxmoxScripts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script updates the contents of the Proxmox scripts repository without replacing the top-level folder. 4 | # It clones the repository into a temporary directory, clears the original folder's contents, 5 | # and moves the new files into the original folder. 6 | # 7 | # Usage: 8 | # ./UpdateProxmoxScripts.sh 9 | # 10 | 11 | # Variables 12 | REPO_URL="https://github.com/coelacant1/proxmoxscripts" 13 | REPO_NAME="proxmoxscripts" 14 | 15 | # Ensure the script is run with root or equivalent permissions 16 | if [ "$EUID" -ne 0 ]; then 17 | echo "Please run as root." 18 | exit 1 19 | fi 20 | 21 | # Get the current directory 22 | CURRENT_DIR=$(pwd) 23 | 24 | # Ensure the script is being run from within the correct folder 25 | if [[ "${CURRENT_DIR,,}" != *"${REPO_NAME,,}"* ]]; then 26 | echo "This script must be run from within the $REPO_NAME folder." 27 | exit 1 28 | fi 29 | 30 | # Create a temporary directory for cloning 31 | TEMP_DIR=$(mktemp -d) || { echo "Failed to create a temporary directory"; exit 1; } 32 | 33 | # Clone the repository into the temporary directory 34 | echo "Cloning the repository into a temporary directory..." 35 | git clone "$REPO_URL" "$TEMP_DIR/$REPO_NAME" || { echo "Failed to clone the repository"; exit 1; } 36 | 37 | # Clear the current folder's contents (but not the folder itself) 38 | echo "Clearing the contents of the current folder..." 39 | find "$CURRENT_DIR" -mindepth 1 -delete || { echo "Failed to clear the folder contents"; exit 1; } 40 | 41 | # Move new files into the current folder 42 | echo "Moving updated files into the current folder..." 43 | mv "$TEMP_DIR/$REPO_NAME/"* "$CURRENT_DIR" || { echo "Failed to move updated files"; exit 1; } 44 | mv "$TEMP_DIR/$REPO_NAME/".* "$CURRENT_DIR" 2>/dev/null || true # Move hidden files, ignore errors 45 | 46 | # Make the MakeScriptsExecutable.sh script executable 47 | if [ -f "$CURRENT_DIR/MakeScriptsExecutable.sh" ]; then 48 | echo "Making MakeScriptsExecutable.sh executable..." 49 | chmod +x "$CURRENT_DIR/MakeScriptsExecutable.sh" || { echo "Failed to set executable permission"; exit 1; } 50 | 51 | # Run the MakeScriptsExecutable.sh script 52 | echo "Running MakeScriptsExecutable.sh..." 53 | "$CURRENT_DIR/MakeScriptsExecutable.sh" || { echo "Failed to execute MakeScriptsExecutable.sh"; exit 1; } 54 | fi 55 | 56 | # Clean up the temporary directory 57 | rm -rf "$TEMP_DIR" 58 | 59 | echo "Update completed successfully." 60 | -------------------------------------------------------------------------------- /Utilities/Conversion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Conversion.sh 4 | # 5 | # Provides utility functions for converting data structures, such as 6 | # converting a dotted IPv4 address to its 32-bit integer representation 7 | # (and vice versa). 8 | # 9 | # Usage: 10 | # source "Conversion.sh" 11 | # 12 | # Example: 13 | # source "./Conversion.sh" 14 | # 15 | # This script is mainly intended as a library of functions to be sourced 16 | # by other scripts. If invoked directly, it currently has no standalone 17 | # actions. 18 | # 19 | # Function Index: 20 | # - __ip_to_int__ 21 | # - __int_to_ip__ 22 | # - __cidr_to_netmask__ 23 | # 24 | 25 | ############################################################################### 26 | # IP Conversion Utilities 27 | ############################################################################### 28 | 29 | # --- __ip_to_int__ ------------------------------------------------------------ 30 | # @function __ip_to_int__ 31 | # @description Converts a dotted IPv4 address string to its 32-bit integer equivalent. 32 | # @usage __ip_to_int__ "127.0.0.1" 33 | # @param 1 Dotted IPv4 address string (e.g., "192.168.1.10") 34 | # @return Prints the 32-bit integer representation of the IP to stdout. 35 | # @example_output For __ip_to_int__ "127.0.0.1", the output is: 2130706433 36 | __ip_to_int__() { 37 | local a b c d 38 | IFS=. read -r a b c d <<<"$1" 39 | echo "$((a * 256 ** 3 + b * 256 ** 2 + c * 256 + d))" 40 | } 41 | 42 | # --- __int_to_ip__ ------------------------------------------------------------ 43 | # @function __int_to_ip__ 44 | # @description Converts a 32-bit integer to its dotted IPv4 address equivalent. 45 | # @usage __int_to_ip__ 2130706433 46 | # @param 1 32-bit integer 47 | # @return Prints the dotted IPv4 address string to stdout. 48 | # @example_output For __int_to_ip__ 2130706433, the output is: 127.0.0.1 49 | __int_to_ip__() { 50 | local ip 51 | ip=$(printf "%d.%d.%d.%d" \ 52 | "$((($1 >> 24) & 255))" \ 53 | "$((($1 >> 16) & 255))" \ 54 | "$((($1 >> 8) & 255))" \ 55 | "$(($1 & 255))") 56 | echo "$ip" 57 | } 58 | 59 | # --- __cidr_to_netmask__ ------------------------------------------------------------ 60 | # @function __cidr_to_netmask__ 61 | # @description Converts a CIDR prefix to a dotted-decimal netmask. 62 | # @usage __cidr_to_netmask__ 18 63 | # @param 1 CIDR prefix (e.g., 18) 64 | # @return Prints the full subnet netmask. 65 | # @example_output For __cidr_to_netmask__ 18, the output is: 255.255.192.0 66 | __cidr_to_netmask__() { 67 | local cidr="$1" 68 | local mask=$(( 0xffffffff << (32 - cidr) & 0xffffffff )) 69 | local octet1=$(( (mask >> 24) & 255 )) 70 | local octet2=$(( (mask >> 16) & 255 )) 71 | local octet3=$(( (mask >> 8) & 255 )) 72 | local octet4=$(( mask & 255 )) 73 | echo "${octet1}.${octet2}.${octet3}.${octet4}" 74 | } 75 | -------------------------------------------------------------------------------- /Utilities/SSH.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # SSH.sh 4 | # 5 | # This script provides repeated-use SSH functions that can be sourced by other 6 | # scripts. 7 | # 8 | # Usage: 9 | # source SSH.sh 10 | # 11 | # Function Index: 12 | # - __wait_for_ssh__ 13 | # 14 | 15 | source "${UTILITYPATH}/Prompts.sh" 16 | 17 | __install_or_prompt__ "sshpass" 18 | 19 | ############################################################################### 20 | # SSH Functions 21 | ############################################################################### 22 | 23 | # --- __wait_for_ssh__ ------------------------------------------------------------ 24 | # @function __wait_for_ssh__ 25 | # @description Repeatedly attempts to connect via SSH to a specified host using a given username and password until SSH is reachable or until the maximum number of attempts is exhausted. 26 | # @usage __wait_for_ssh__ 27 | # @param 1 The SSH host (IP or domain). 28 | # @param 2 The SSH username. 29 | # @param 3 The SSH password. 30 | # @return Returns 0 if a connection is established within the max attempts, otherwise exits with code 1. 31 | # @example_output For __wait_for_ssh__ "192.168.1.100" "user" "pass", the output might be: 32 | # SSH is up on "192.168.1.100" 33 | __wait_for_ssh__() { 34 | local host="$1" 35 | local sshUsername="$2" 36 | local sshPassword="$3" 37 | local maxAttempts=20 38 | local delay=3 39 | 40 | for attempt in $(seq 1 "$maxAttempts"); do 41 | if sshpass -p "$sshPassword" ssh -o ConnectTimeout=5 -o StrictHostKeyChecking=no \ 42 | "$sshUsername@$host" exit 2>/dev/null; then 43 | echo "SSH is up on \"$host\"" 44 | return 0 45 | fi 46 | echo "Attempt $attempt/$maxAttempts: SSH not ready on \"$host\"; waiting $delay seconds..." 47 | sleep "$delay" 48 | done 49 | 50 | echo "Error: Could not connect to SSH on \"$host\" after $maxAttempts attempts." 51 | exit 1 52 | } 53 | -------------------------------------------------------------------------------- /Utilities/_ExampleScript.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # _ExampleScript.sh 4 | # 5 | # Demonstrates usage of the included spinner and message functions. 6 | # 7 | # Usage: 8 | # ./_ExampleScript.sh 9 | # 10 | # 11 | # This script simulates a process, updates its status, and then shows success and error messages. 12 | # 13 | 14 | source "${UTILITYPATH}/Prompts.sh" 15 | 16 | ############################################################################### 17 | # Initial Checks 18 | ############################################################################### 19 | __check_root__ 20 | __check_proxmox__ 21 | 22 | ############################################################################### 23 | # Parse Arguments 24 | ############################################################################### 25 | if [ $# -lt 2 ]; then 26 | echo "Error: Insufficient arguments." 27 | echo "Usage: ./_ExampleScript.sh " 28 | exit 1 29 | fi 30 | 31 | ############################################################################### 32 | # MAIN 33 | ############################################################################### 34 | __info__ "Simulating an error scenario..." 35 | sleep 2 36 | __err__ "A simulated error has occurred!" 37 | 38 | __info__ "Starting a simulated process..." 39 | sleep 2 40 | __update__ "Process is halfway..." 41 | sleep 2 42 | __ok__ "Process completed successfully." 43 | -------------------------------------------------------------------------------- /Utilities/_TestCommunication.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # _TestCommunication.sh 4 | # 5 | # Usage: 6 | # ./_TestCommunication.sh 7 | # 8 | # Demonstrates usage of the Communication.sh script: 9 | # - Sourcing the script 10 | # - Starting/stopping the spinner 11 | # - Printing info, success, and error messages 12 | # - Handling errors via a trap 13 | # 14 | # Function Index: 15 | # - simulate_task 16 | # - simulate_error 17 | # 18 | 19 | if [ -z "${UTILITYPATH}" ]; then 20 | # UTILITYPATH is unset or empty 21 | export UTILITYPATH="$(pwd)" 22 | fi 23 | 24 | source "${UTILITYPATH}/Communication.sh" 25 | 26 | # Example function that simulates a task 27 | simulate_task() { 28 | # "Info" starts the spinner in the background 29 | __info__ "Simulating a long-running task..." 30 | 31 | # Sleep for 2 seconds to mimic a longer process 32 | sleep 2 33 | 34 | # Update the text while the spinner is still going 35 | __update__ "Halfway done..." 36 | sleep 2 37 | 38 | # Indicate success 39 | __ok__ "Task completed successfully." 40 | } 41 | 42 | # Example function that simulates an error 43 | simulate_error() { 44 | __info__ "Starting a failing command..." 45 | sleep 1 46 | 47 | # We'll run a command that doesn't exist to force an error 48 | non_existent_command 49 | 50 | # If the script reaches here, the spinner won't have stopped yet, 51 | # but the error trap will trigger first, printing an error. 52 | } 53 | 54 | ############################################################################### 55 | # MAIN SCRIPT 56 | ############################################################################### 57 | echo "=== Communication.sh Demo ===" 58 | 59 | # By default, Communication.sh sets a trap for errors: 60 | # trap '__handle_err__ $LINENO "$BASH_COMMAND"' ERR 61 | # which prints the line, exit code, and command that failed. 62 | 63 | # 1) Demonstrate a successful task 64 | simulate_task 65 | 66 | echo 67 | sleep 1 68 | echo "Now we will demonstrate an intentional error." 69 | sleep 1 70 | 71 | # 2) Demonstrate an error scenario 72 | simulate_error 73 | 74 | # (Script ends here, but the ERR trap in Communication.sh will fire on the failing command.) 75 | echo "This line won't be reached if 'set -e' is in effect (because of the error)." 76 | -------------------------------------------------------------------------------- /Utilities/_TestConversion.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # _TestConversion.sh 4 | # 5 | # Usage: 6 | # ./_TestConversion.sh 7 | # 8 | # A simple test script that sources "Conversion.sh" and tests its functions. 9 | # 10 | # Function Index: 11 | # - test_ip_to_int 12 | # - test_int_to_ip 13 | # 14 | 15 | if [ -z "${UTILITYPATH}" ]; then 16 | # UTILITYPATH is unset or empty 17 | export UTILITYPATH="$(pwd)" 18 | fi 19 | 20 | # Source the library 21 | source "${UTILITYPATH}/Conversion.sh" 22 | 23 | ############################################################################### 24 | # Helper Functions 25 | ############################################################################### 26 | 27 | # Test if __ip_to_int__ outputs the expected integer 28 | test_ip_to_int() { 29 | local ip="$1" 30 | local expected="$2" 31 | 32 | local result 33 | result="$(__ip_to_int__ "$ip")" 34 | 35 | if [[ "$result" == "$expected" ]]; then 36 | echo "[PASS] __ip_to_int__ \"$ip\" => $result" 37 | else 38 | echo "[FAIL] __ip_to_int__ \"$ip\" => $result (expected $expected)" 39 | fi 40 | } 41 | 42 | # Test if __int_to_ip__ outputs the expected dotted-IP string 43 | test_int_to_ip() { 44 | local integer="$1" 45 | local expected="$2" 46 | 47 | local result 48 | result="$(__int_to_ip__ "$integer")" 49 | 50 | if [[ "$result" == "$expected" ]]; then 51 | echo "[PASS] __int_to_ip__ $integer => $result" 52 | else 53 | echo "[FAIL] __int_to_ip__ $integer => $result (expected $expected)" 54 | fi 55 | } 56 | 57 | ############################################################################### 58 | # Test Cases 59 | ############################################################################### 60 | 61 | # IP to Int tests 62 | test_ip_to_int "127.0.0.1" "2130706433" 63 | test_ip_to_int "192.168.1.10" "3232235786" 64 | test_ip_to_int "10.0.0.255" "167772415" 65 | 66 | # Int to IP tests 67 | test_int_to_ip "2130706433" "127.0.0.1" 68 | test_int_to_ip "3232235786" "192.168.1.10" 69 | test_int_to_ip "167772415" "10.0.0.255" 70 | 71 | echo 72 | echo "All tests completed." 73 | -------------------------------------------------------------------------------- /Utilities/_TestPrompts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # _TestPrompts.sh 4 | # 5 | # Usage: 6 | # ./_TestPrompts.sh 7 | # 8 | # Demonstrates usage of Prompts.sh by sourcing it and calling each function. 9 | # 10 | 11 | if [ -z "${UTILITYPATH}" ]; then 12 | # UTILITYPATH is unset or empty 13 | export UTILITYPATH="$(pwd)" 14 | fi 15 | 16 | # 1) Source the script 17 | source "${UTILITYPATH}/Prompts.sh" 18 | 19 | echo "=== TEST: __check_root__ ===" 20 | __check_root__ 21 | 22 | echo 23 | echo "=== TEST: __check_proxmox__ ===" 24 | __check_proxmox__ 25 | 26 | echo 27 | echo "=== TEST: __install_or_prompt__ (curl) ===" 28 | __install_or_prompt__ "curl" 29 | 30 | echo 31 | echo "=== TEST: __prompt_keep_installed_packages__ ===" 32 | __prompt_keep_installed_packages__ 33 | 34 | echo 35 | echo "All tests completed successfully." 36 | -------------------------------------------------------------------------------- /Utilities/_TestQueries.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # _TestQueries.sh 4 | # 5 | # Usage: 6 | # ./_TestQueries.sh 7 | # 8 | # A quick test script that sources "Queries.sh" and exercises 9 | # some of its functions to demonstrate usage. 10 | # 11 | # 1) Source the Queries.sh script (assuming it's in the same directory). 12 | # Adjust the path if it's located elsewhere. 13 | # 14 | 15 | if [ -z "${UTILITYPATH}" ]; then 16 | # UTILITYPATH is unset or empty 17 | export UTILITYPATH="$(pwd)" 18 | fi 19 | 20 | source "${UTILITYPATH}/Queries.sh" 21 | 22 | 23 | echo "===============================" 24 | echo " TESTING: __check_cluster_membership__" 25 | echo "===============================" 26 | __check_cluster_membership__ 27 | 28 | echo 29 | echo "===============================" 30 | echo " TESTING: __get_number_of_cluster_nodes__" 31 | echo "===============================" 32 | NUM_NODES="$(__get_number_of_cluster_nodes__)" 33 | echo "Cluster nodes detected: $NUM_NODES" 34 | 35 | echo 36 | echo "===============================" 37 | echo " TESTING: __init_node_mappings__ and __get_ip_from_name__, __get_name_from_ip__" 38 | echo "===============================" 39 | __init_node_mappings__ 40 | echo "Initialization done. Checking a sample node name/IP..." 41 | 42 | 43 | if [ -z "${NODE_NAME}" ]; then 44 | read -rp "Enter node name: " NODE_NAME 45 | fi 46 | 47 | if [ -z "${NODE_IP}" ]; then 48 | read -rp "Enter node IP: " NODE_IP 49 | fi 50 | 51 | # Example usage of node name and IP 52 | echo "Manually checking your provided node name and IP via the node mapping:" 53 | echo "Node '${NODE_NAME}' => IP: $(__get_ip_from_name__ "${NODE_NAME}")" 54 | echo "IP '${NODE_IP}' => Node: $(__get_name_from_ip__ "${NODE_IP}")" 55 | 56 | echo 57 | echo "===============================" 58 | echo " TESTING: __get_cluster_lxc__" 59 | echo "===============================" 60 | echo "All LXC containers in the cluster:" 61 | readarray -t ALL_CLUSTER_LXC < <( __get_cluster_lxc__ ) 62 | printf ' %s\n' "${ALL_CLUSTER_LXC[@]}" 63 | 64 | echo 65 | echo "===============================" 66 | echo " TESTING: __get_server_vms__ (QEMU) for 'local'" 67 | echo "===============================" 68 | echo "QEMU VMs on local server:" 69 | readarray -t LOCAL_VMS < <( __get_server_vms__ "local" ) 70 | printf ' %s\n' "${LOCAL_VMS[@]}" 71 | 72 | echo 73 | echo "Done with tests." 74 | exit 0 75 | -------------------------------------------------------------------------------- /Utilities/_TestSSH.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # _TestSSH.sh 4 | # 5 | # Demonstrates how to source SSH.sh and call the __wait_for_ssh__ function. 6 | # 7 | # Usage: 8 | # ./_TestSSH.sh [host] [sshUsername] 9 | # 10 | # Example: 11 | # ./_TestSSH.sh 12 | # ./_TestSSH.sh 192.168.1.100 root s3cr3t 13 | # 14 | 15 | ############################################################################### 16 | # Ensure UTILITYPATH is set (or default to current directory) 17 | ############################################################################### 18 | if [ -z "${UTILITYPATH}" ]; then 19 | export UTILITYPATH="$(pwd)" 20 | fi 21 | 22 | ############################################################################### 23 | # Source the SSH.sh script (adjust path if needed) 24 | ############################################################################### 25 | source "${UTILITYPATH}/SSH.sh" 26 | 27 | ############################################################################### 28 | # Parse input arguments or prompt the user 29 | ############################################################################### 30 | host="${1}" 31 | sshUsername="${2}" 32 | sshPassword="${3}" 33 | 34 | # If host was not provided, prompt for it: 35 | if [ -z "${host}" ]; then 36 | read -rp "Enter host (IP or hostname): " host 37 | fi 38 | 39 | # If username was not provided, prompt for it: 40 | if [ -z "${sshUsername}" ]; then 41 | read -rp "Enter SSH username: " sshUsername 42 | fi 43 | 44 | # If password was not provided, prompt for it (hidden input): 45 | if [ -z "${sshPassword}" ]; then 46 | read -rsp "Enter SSH password: " sshPassword 47 | echo # Move to a new line after entering the password 48 | fi 49 | 50 | ############################################################################### 51 | # Test the SSH connection 52 | ############################################################################### 53 | echo "Attempting to connect to '${host}' as '${sshUsername}'..." 54 | __wait_for_ssh__ "${host}" "${sshUsername}" "${sshPassword}" 55 | 56 | # If the function returns successfully, continue: 57 | echo "Success: SSH is accessible on '${host}'." 58 | exit 0 59 | -------------------------------------------------------------------------------- /VirtualMachines/CloudInit/BulkAddSSHKey.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkAddSSHKey.sh 4 | # 5 | # This script adds an SSH public key to a range of virtual machines (VMs) 6 | # within a Proxmox VE environment. It appends a new SSH public key for each VM 7 | # and regenerates the Cloud-Init image to apply the changes. 8 | # 9 | # Usage: 10 | # ./BulkAddSSHKey.sh 11 | # 12 | # Example: 13 | # # Adds the specified SSH key to all VMs with IDs between 400 and 430 14 | # ./BulkAddSSHKey.sh 400 430 "ssh-rsa AAAAB3Nza... user@host" 15 | # 16 | 17 | source "${UTILITYPATH}/Prompts.sh" 18 | 19 | ############################################################################### 20 | # Validate environment and arguments 21 | ############################################################################### 22 | __check_root__ 23 | __check_proxmox__ 24 | 25 | if [ "$#" -ne 3 ]; then 26 | echo "Error: Wrong number of arguments." >&2 27 | echo "Usage: $0 " >&2 28 | exit 1 29 | fi 30 | 31 | START_VM_ID="$1" 32 | END_VM_ID="$2" 33 | SSH_PUBLIC_KEY="$3" 34 | 35 | ############################################################################### 36 | # Main logic 37 | ############################################################################### 38 | for (( vmId=START_VM_ID; vmId<=END_VM_ID; vmId++ )); do 39 | if qm status "$vmId" &>/dev/null; then 40 | echo "Adding SSH public key to VM ID: $vmId" 41 | tempFile="$(mktemp)" 42 | qm cloudinit get "$vmId" ssh-authorized-keys > "$tempFile" 43 | echo "$SSH_PUBLIC_KEY" >> "$tempFile" 44 | qm set "$vmId" --sshkeys "$tempFile" 45 | rm "$tempFile" 46 | qm cloudinit dump "$vmId" 47 | echo " - SSH public key appended for VM ID: $vmId." 48 | else 49 | echo "VM ID: $vmId does not exist. Skipping..." 50 | fi 51 | done 52 | 53 | ############################################################################### 54 | # Wrap-up 55 | ############################################################################### 56 | echo "SSH public key addition process completed!" 57 | -------------------------------------------------------------------------------- /VirtualMachines/CloudInit/BulkChangeDNS.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeDNS.sh 4 | # 5 | # Updates the DNS search domain and DNS server for a range of VMs within a Proxmox VE environment. 6 | # It sets new DNS settings and regenerates the Cloud-Init image to apply changes. 7 | # 8 | # Usage: 9 | # ./BulkChangeDNS.sh 10 | # 11 | # Example: 12 | # ./BulkChangeDNS.sh 400 430 8.8.8.8 example.com 13 | # 14 | 15 | source "${UTILITYPATH}/Prompts.sh" 16 | 17 | __check_root__ 18 | __check_proxmox__ 19 | 20 | ############################################################################### 21 | # Argument Checking 22 | ############################################################################### 23 | if [ "$#" -ne 4 ]; then 24 | echo "Usage: $0 " 25 | exit 1 26 | fi 27 | 28 | START_VMID="$1" 29 | END_VMID="$2" 30 | DNS_SERVER="$3" 31 | DNS_SEARCHDOMAIN="$4" 32 | 33 | ############################################################################### 34 | # Main 35 | ############################################################################### 36 | for (( vmid=START_VMID; vmid<=END_VMID; vmid++ )); do 37 | if qm status "$vmid" &>/dev/null; then 38 | echo "Updating DNS settings for VM ID: $vmid" 39 | qm set "$vmid" --nameserver "$DNS_SERVER" --searchdomain "$DNS_SEARCHDOMAIN" 40 | qm cloudinit dump "$vmid" 41 | echo " - Cloud-Init DNS settings updated for VM ID: $vmid." 42 | else 43 | echo "VM ID: $vmid does not exist. Skipping..." 44 | fi 45 | done 46 | 47 | echo "Cloud-Init DNS update process completed!" 48 | -------------------------------------------------------------------------------- /VirtualMachines/CloudInit/BulkChangeIP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeIP.sh 4 | # 5 | # Updates the IP addresses of a range of VMs within a Proxmox VE environment. 6 | # Assigns each VM a unique static IP, incrementing from a starting IP address, 7 | # updates their network bridge configuration, and regenerates the Cloud-Init image. 8 | # 9 | # Usage: 10 | # ./BulkChangeIP.sh [gateway] 11 | # 12 | # Example usage: 13 | # # Update IP addresses from VM 400 to 430 14 | # ./BulkChangeIP.sh 400 430 192.168.1.50/24 vmbr0 192.168.1.1 15 | # 16 | # # Without specifying a gateway 17 | # ./BulkChangeIP.sh 400 430 192.168.1.50/24 vmbr0 18 | # 19 | source "${UTILITYPATH}/Prompts.sh" 20 | 21 | __check_root__ 22 | __check_proxmox__ 23 | 24 | ############################################################################### 25 | # Argument Parsing 26 | ############################################################################### 27 | if [ "$#" -lt 4 ]; then 28 | echo "Usage: $0 [gateway]" 29 | exit 1 30 | fi 31 | 32 | START_VM_ID="$1" 33 | END_VM_ID="$2" 34 | START_IP_CIDR="$3" 35 | BRIDGE="$4" 36 | GATEWAY="${5:-}" 37 | 38 | IFS='/' read -r START_IP SUBNET_MASK <<< "$START_IP_CIDR" 39 | 40 | ############################################################################### 41 | # Main Logic 42 | ############################################################################### 43 | START_IP_INT=$(__ip_to_int__ "$START_IP") 44 | 45 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 46 | currentIpInt=$(( START_IP_INT + VMID - START_VM_ID )) 47 | newIp="$(__int_to_ip__ "$currentIpInt")" 48 | 49 | if qm status "$VMID" &>/dev/null; then 50 | echo "Updating VM ID: ${VMID} with IP: ${newIp}" 51 | qm set "$VMID" --ipconfig0 "ip=${newIp}/${SUBNET_MASK},gw=${GATEWAY}" 52 | qm set "$VMID" --net0 "virtio,bridge=${BRIDGE}" 53 | qm cloudinit dump "$VMID" 54 | echo " - Cloud-Init image regenerated for VM ID: ${VMID}." 55 | else 56 | echo "VM ID: ${VMID} does not exist. Skipping..." 57 | fi 58 | done 59 | 60 | echo "IP update process completed!" 61 | -------------------------------------------------------------------------------- /VirtualMachines/CloudInit/BulkChangeUserPass.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkChangeUserPass.sh 4 | # 5 | # This script updates the Cloud-Init username and password for a range of 6 | # virtual machines (VMs) within a Proxmox VE environment. It allows you to 7 | # set a new username (optional) and password (required) for each VM, then 8 | # regenerates the Cloud-Init image to apply the changes. 9 | # 10 | # Usage: 11 | # ./BulkChangeUserPass.sh [username] 12 | # 13 | # Examples: 14 | # # Update VMs 400 through 430 with a new password and new username 15 | # ./BulkChangeUserPass.sh 400 430 myNewPassword newuser 16 | # 17 | # # Update VMs 400 through 430 with a new password only, preserving the existing username 18 | # ./BulkChangeUserPass.sh 400 430 myNewPassword 19 | # 20 | source "${UTILITYPATH}/Prompts.sh" 21 | 22 | ############################################################################### 23 | # Validate environment 24 | ############################################################################### 25 | __check_root__ 26 | __check_proxmox__ 27 | 28 | ############################################################################### 29 | # Assigning input arguments 30 | ############################################################################### 31 | if [ "$#" -lt 3 ]; then 32 | echo "Error: Missing required parameters." 33 | echo "Usage: $0 [username]" 34 | exit 1 35 | fi 36 | 37 | START_VMID="$1" 38 | END_VMID="$2" 39 | PASSWORD="$3" 40 | USERNAME="${4:-}" 41 | 42 | ############################################################################### 43 | # Update Cloud-Init settings for each VM in the specified range 44 | ############################################################################### 45 | for (( VMID=START_VMID; VMID<=END_VMID; VMID++ )); do 46 | if qm status "$VMID" &>/dev/null; then 47 | echo "Updating Cloud-Init settings for VM ID: $VMID" 48 | qm set "$VMID" --ciuser "$USERNAME" --cipassword "$PASSWORD" 49 | qm cloudinit dump "$VMID" 50 | echo " - Cloud-Init username and password updated for VM ID: $VMID." 51 | else 52 | echo "VM ID: $VMID does not exist. Skipping..." 53 | fi 54 | done 55 | 56 | echo "Cloud-Init user and password update process completed!" 57 | -------------------------------------------------------------------------------- /VirtualMachines/CloudInit/BulkTogglePackageUpgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script enables or disables automatic package upgrades for a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # It updates the Cloud-Init configuration for each VM to set or unset automatic package upgrades. 5 | # 6 | # Usage: 7 | # ./BulkTogglePackageUpgrade.sh 8 | # 9 | # Arguments: 10 | # start_vm_id - The ID of the first VM to update. 11 | # end_vm_id - The ID of the last VM to update. 12 | # enable|disable - Set to 'enable' to enable automatic upgrades, or 'disable' to disable them. 13 | # 14 | # Example: 15 | # ./BulkTogglePackageUpgrade.sh 400 430 enable 16 | # ./BulkTogglePackageUpgrade.sh 400 430 disable 17 | 18 | # Check if the required parameters are provided 19 | if [ "$#" -ne 3 ]; then 20 | echo "Usage: $0 " 21 | exit 1 22 | fi 23 | 24 | # Assigning input arguments 25 | START_VM_ID=$1 26 | END_VM_ID=$2 27 | ACTION=$3 28 | 29 | # Determine the appropriate Cloud-Init setting based on the action 30 | if [ "$ACTION" == "enable" ]; then 31 | AUTO_UPGRADE_SETTING="1" 32 | elif [ "$ACTION" == "disable" ]; then 33 | AUTO_UPGRADE_SETTING="0" 34 | else 35 | echo "Invalid action: $ACTION. Use 'enable' or 'disable'." 36 | exit 1 37 | fi 38 | 39 | # Loop to update automatic package upgrade setting for VMs in the specified range 40 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 41 | # Check if the VM exists 42 | if qm status $VMID &>/dev/null; then 43 | echo "Updating automatic package upgrade setting for VM ID: $VMID" 44 | 45 | # Set the automatic upgrade setting using Cloud-Init 46 | qm set $VMID --ciuser root --cipassword "" --set "packages_auto_upgrade=$AUTO_UPGRADE_SETTING" 47 | 48 | # Regenerate the Cloud-Init image 49 | qm cloudinit dump $VMID 50 | echo " - Automatic package upgrade set to '$ACTION' for VM ID: $VMID." 51 | else 52 | echo "VM ID: $VMID does not exist. Skipping..." 53 | fi 54 | 55 | done 56 | 57 | echo "Automatic package upgrade update process completed!" -------------------------------------------------------------------------------- /VirtualMachines/Hardware/BulkChangeNetwork.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script is designed to automate the process of changing the network bridge configuration for a range of virtual machines (VMs) on a Proxmox VE cluster. 4 | # It iterates through a specified range of VM IDs, modifying their configuration files to replace an old network bridge with a new one if present. 5 | # The script checks for the existence of each VM's configuration file and ensures that changes are only made where applicable. 6 | # 7 | # Usage: 8 | # ./BulkChangeNetwork.sh 9 | # Where: 10 | # start_id - The starting VM ID in the range to be processed. 11 | # end_id - The ending VM ID in the range to be processed. 12 | # hostname - The hostname of the Proxmox node where the VMs are hosted. 13 | # current_network - The current network bridge (e.g., vmbr0) to be replaced. 14 | # new_network - The new network bridge (e.g., vmbr1) to use in the configuration. 15 | 16 | # Check if required inputs are provided 17 | if [ $# -lt 5 ]; then 18 | echo "Usage: $0 " 19 | exit 1 20 | fi 21 | 22 | START_ID=$1 23 | END_ID=$2 24 | HOST_NAME=$3 25 | CURRENT_NETWORK=$4 26 | NEW_NETWORK=$5 27 | 28 | # Loop through the VM IDs 29 | for VMID in $(seq $START_ID $END_ID); do 30 | CONFIG_FILE="/etc/pve/nodes/${HOST_NAME}/qemu-server/${VMID}.conf" 31 | 32 | # Check if the VM config file exists 33 | if [ -f "$CONFIG_FILE" ]; then 34 | echo "Processing VM ID: $VMID" 35 | 36 | # Check and replace the network bridge 37 | if grep -q "$CURRENT_NETWORK" "$CONFIG_FILE"; then 38 | sed -i "s/$CURRENT_NETWORK/$NEW_NETWORK/g" "$CONFIG_FILE" 39 | echo " - Network bridge changed from $CURRENT_NETWORK to $NEW_NETWORK." 40 | else 41 | echo " - $CURRENT_NETWORK not found in network configuration. No changes made." 42 | fi 43 | else 44 | echo "VM ID: $VMID does not exist. Skipping..." 45 | fi 46 | done 47 | -------------------------------------------------------------------------------- /VirtualMachines/Hardware/BulkSetCPUTypeCoreCount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script sets the CPU type and the number of cores for a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # By default, it will use the current CPU type unless specified. 5 | # 6 | # Usage: 7 | # ./BulkSetCPUTypeCoreCount.sh [cpu_type] 8 | # 9 | # Arguments: 10 | # start_vm_id - The ID of the first VM to update. 11 | # end_vm_id - The ID of the last VM to update. 12 | # num_cores - The number of CPU cores to assign to each VM. 13 | # cpu_type - Optional. The CPU type to set for each VM. If not provided, the current CPU type will be retained. 14 | # 15 | # Example: 16 | # ./BulkSetCPUTypeCoreCount.sh 400 430 4 17 | # ./BulkSetCPUTypeCoreCount.sh 400 430 4 host 18 | 19 | # Check if the required parameters are provided 20 | if [ "$#" -lt 3 ] || [ "$#" -gt 4 ]; then 21 | echo "Usage: $0 [cpu_type]" 22 | exit 1 23 | fi 24 | 25 | # Assigning input arguments 26 | START_VM_ID=$1 27 | END_VM_ID=$2 28 | NUM_CORES=$3 29 | CPU_TYPE=${4:-} 30 | 31 | # Loop to update CPU configuration for VMs in the specified range 32 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 33 | # Check if the VM exists 34 | if qm status $VMID &>/dev/null; then 35 | echo "Updating CPU configuration for VM ID: $VMID" 36 | 37 | # Set the number of CPU cores 38 | qm set $VMID --cores $NUM_CORES 39 | 40 | # Set the CPU type if provided 41 | if [ -n "$CPU_TYPE" ]; then 42 | qm set $VMID --cpu $CPU_TYPE 43 | echo " - CPU type set to '$CPU_TYPE' for VM ID: $VMID." 44 | else 45 | echo " - CPU type retained for VM ID: $VMID." 46 | fi 47 | 48 | echo " - Number of cores set to $NUM_CORES for VM ID: $VMID." 49 | else 50 | echo "VM ID: $VMID does not exist. Skipping..." 51 | fi 52 | 53 | done 54 | 55 | echo "CPU configuration update process completed!" -------------------------------------------------------------------------------- /VirtualMachines/Hardware/BulkSetMemoryConfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script sets the amount of memory allocated to a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./BulkSetMemoryConfig.sh 7 | # 8 | # Arguments: 9 | # start_vm_id - The ID of the first VM to update. 10 | # end_vm_id - The ID of the last VM to update. 11 | # memory_size - The amount of memory (in MB) to allocate to each VM. 12 | # 13 | # Example: 14 | # ./BulkSetMemoryConfig.sh 400 430 8192 15 | 16 | # Check if the required parameters are provided 17 | if [ "$#" -ne 3 ]; then 18 | echo "Usage: $0 " 19 | exit 1 20 | fi 21 | 22 | # Assigning input arguments 23 | START_VM_ID=$1 24 | END_VM_ID=$2 25 | MEMORY_SIZE=$3 26 | 27 | # Loop to update memory allocation for VMs in the specified range 28 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 29 | # Check if the VM exists 30 | if qm status $VMID &>/dev/null; then 31 | echo "Updating memory allocation for VM ID: $VMID" 32 | 33 | # Set the memory size 34 | qm set $VMID --memory $MEMORY_SIZE 35 | echo " - Memory allocated: ${MEMORY_SIZE}MB for VM ID: $VMID." 36 | else 37 | echo "VM ID: $VMID does not exist. Skipping..." 38 | fi 39 | 40 | done 41 | 42 | echo "Memory allocation update process completed!" -------------------------------------------------------------------------------- /VirtualMachines/Hardware/BulkUnmountISOs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script unmounts all ISO images from the CD/DVD drives for a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./BulkUnmountISOs.sh 7 | # 8 | # Arguments: 9 | # start_vm_id - The ID of the first VM to update. 10 | # end_vm_id - The ID of the last VM to update. 11 | # 12 | # Example: 13 | # ./BulkUnmountISOs.sh 400 430 14 | 15 | # Check if the required parameters are provided 16 | if [ "$#" -ne 2 ]; then 17 | echo "Usage: $0 " 18 | exit 1 19 | fi 20 | 21 | # Assigning input arguments 22 | START_VM_ID=$1 23 | END_VM_ID=$2 24 | 25 | # Loop to unmount ISOs for VMs in the specified range 26 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 27 | # Check if the VM exists 28 | if qm status $VMID &>/dev/null; then 29 | echo "Unmounting ISOs for VM ID: $VMID" 30 | 31 | # Get all CD/DVD drives for the VM 32 | DRIVES=$(qm config $VMID | grep -oP '(?<=^\S+\s)(ide\d+|sata\d+|scsi\d+|virtio\d+):\s.*media=cdrom') 33 | 34 | # Loop through each drive and unmount the ISO 35 | while read -r DRIVE; do 36 | DRIVE_NAME=$(echo "$DRIVE" | awk -F: '{print $1}') 37 | if [ -n "$DRIVE_NAME" ]; then 38 | qm set $VMID --$DRIVE_NAME none,media=cdrom 39 | echo " - ISO unmounted for drive $DRIVE_NAME of VM ID: $VMID." 40 | fi 41 | done <<< "$DRIVES" 42 | else 43 | echo "VM ID: $VMID does not exist. Skipping..." 44 | fi 45 | 46 | done 47 | 48 | echo "ISO unmount process completed!" -------------------------------------------------------------------------------- /VirtualMachines/Hardware/VMAddTerminalTTYS0.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script automates the process of setting up a serial console on a Debian-based system. It creates necessary configuration files, 4 | # updates GRUB to ensure the console output is directed to both the default terminal and the serial port, and enables a getty service 5 | # on the specified serial port. 6 | # 7 | # Usage: 8 | # ./VMAddTerminalTTYS0.sh 9 | # 10 | # Example: 11 | # ./VMAddTerminalTTYS0.sh # This will create /etc/init/ttyS0.conf and update GRUB to use ttyS0 at 115200 baud rate. 12 | # 13 | # Steps: 14 | # 1. Create the /etc/init directory (if it does not exist) and set proper permissions. 15 | # 2. Write the necessary configuration to /etc/init/ttyS0.conf for managing getty on ttyS0. 16 | # 3. Update GRUB configuration to include console output on ttyS0. 17 | # 4. Run update-grub to apply the new configuration. 18 | # 19 | # After running this script, the system will maintain a getty service on ttyS0, allowing console access via the specified serial port. 20 | 21 | # Create the directory /etc/init if it doesn't exist 22 | mkdir -p /etc/init 23 | chmod 755 /etc/init 24 | 25 | # Create and write the configuration to /etc/init/ttyS0.conf 26 | cat < /etc/init/ttyS0.conf 27 | # ttyS0 - getty 28 | # 29 | # This service maintains a getty on ttyS0 from the point the system is 30 | # started until it is shut down again. 31 | start on stopped rc RUNLEVEL=[12345] 32 | stop on runlevel [!12345] 33 | respawn 34 | exec /sbin/getty -L 115200 ttyS0 vt102 35 | EOF 36 | 37 | # Update GRUB_CMDLINE_LINUX in /etc/default/grub 38 | # Replace the current GRUB_CMDLINE_LINUX line with the new one 39 | sed -i 's/^GRUB_CMDLINE_LINUX=.*/GRUB_CMDLINE_LINUX="quiet console=tty0 console=ttyS0,115200"/' /etc/default/grub 40 | 41 | # Update grub with the new configuration 42 | update-grub 43 | 44 | echo "Script completed. ttyS0.conf has been created and GRUB has been updated." 45 | -------------------------------------------------------------------------------- /VirtualMachines/ISOList.csv: -------------------------------------------------------------------------------- 1 | Debian 12.9.0,https://cdimage.debian.org/debian-cd/current-live/amd64/iso-hybrid/debian-live-12.9.0-amd64-standard.iso 2 | Kali 2025 W03,https://cdimage.kali.org/kali-weekly/kali-linux-2025-W03-installer-amd64.iso 3 | Ubuntu Desktop 24.04.1,https://releases.ubuntu.com/24.04.1/ubuntu-24.04.1-desktop-amd64.iso 4 | Finnix 126,https://www.finnix.org/releases/126/finnix-126.iso 5 | Arch Linux 2025.01.01,https://ziply.mm.fcix.net/archlinux/iso/2025.01.01/archlinux-2025.01.01-x86_64.iso 6 | Proxmox VE 8.3.1,https://enterprise.proxmox.com/iso/proxmox-ve_8.3-1.iso 7 | Proxmox Backup Server 3.3,https://enterprise.proxmox.com/iso/proxmox-backup-server_3.3-1.iso 8 | Proxmox Mail Gateway 8.1,https://enterprise.proxmox.com/iso/proxmox-mail-gateway_8.1-1.iso 9 | OPNSense 24.7,https://mirror.wdc1.us.leaseweb.net/opnsense/releases/24.7/OPNsense-24.7-dvd-amd64.iso.bz2 10 | Windows Server 2016,https://software-static.download.prss.microsoft.com/sg/download/888969d5-f34g-4e03-ac9d-1f9786c66749/SERVER_EVAL_x64FRE_en-us.iso 11 | Windows Server 2019,https://software-download.microsoft.com/download/pr/17763.737.190906-2324.rs5_release_svc_refresh_SERVER_EVAL_x64FRE_en-us_1.iso 12 | Windows Server 2022,https://software-download.microsoft.com/download/pr/Windows_Server_2016_Datacenter_EVAL_en-us_14393_refresh.ISO 13 | Turnkey GitLab 18.1,https://www.turnkeylinux.org/download?file=turnkey-gitlab-18.1-bookworm-amd64.iso 14 | Turnkey File Server 18.0,https://www.turnkeylinux.org/download?file=turnkey-fileserver-18.0-bookworm-amd64.iso 15 | Turnkey OpenVPN 18.1,https://www.turnkeylinux.org/download?file=turnkey-openvpn-18.1-bookworm-amd64.iso 16 | Turnkey Domain Controller 18.1,https://www.turnkeylinux.org/download?file=turnkey-domain-controller-18.1-bookworm-amd64.iso 17 | Turnkey OpenLDAP 18.1,https://www.turnkeylinux.org/download?file=turnkey-openldap-18.1-bookworm-amd64.iso 18 | Turnkey Jenkins 18.1,https://www.turnkeylinux.org/download?file=turnkey-jenkins-18.1-bookworm-amd64.iso 19 | Turnkey Ansible 18.0,https://www.turnkeylinux.org/download?file=turnkey-ansible-18.0-bookworm-amd64.iso 20 | Turnkey WireGuard 18.2,https://www.turnkeylinux.org/download?file=turnkey-wireguard-18.2-bookworm-amd64.iso 21 | -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkBackup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script backs up a range of VMs within a Proxmox environment to a specified storage. 4 | # 5 | # Usage: 6 | # ./BulkBackup.sh 7 | # 8 | # Arguments: 9 | # start_vm_id - The ID of the first VM to back up. 10 | # end_vm_id - The ID of the last VM to back up. 11 | # storage - The target storage location for the backup. 12 | # 13 | # Example: 14 | # ./BulkBackup.sh 500 525 local 15 | 16 | # Check if the required parameters are provided 17 | if [ "$#" -ne 3 ]; then 18 | echo "Usage: $0 " 19 | exit 1 20 | fi 21 | 22 | # Assigning input arguments 23 | START_VM_ID=$1 24 | END_VM_ID=$2 25 | STORAGE=$3 26 | 27 | # Loop through the VM IDs 28 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 29 | # Check if the VM exists 30 | if qm status $VMID &>/dev/null; then 31 | echo "Backing up VM ID: $VMID to storage: $STORAGE" 32 | 33 | # Perform the backup 34 | if vzdump $VMID --storage $STORAGE --mode snapshot; then 35 | echo " - Successfully backed up VM ID: $VMID." 36 | else 37 | echo " - Failed to back up VM ID: $VMID." 38 | fi 39 | else 40 | echo "VM ID: $VMID does not exist. Skipping..." 41 | fi 42 | 43 | done 44 | 45 | echo "Backup process complete." -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkClone.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script automates the process of cloning virtual machines (VMs) within a Proxmox VE environment. It clones a source VM into 4 | # a specified number of new VMs, assigning them unique IDs and names based on a user-provided base name. Adding cloned VMs to a 5 | # designated pool is optional. This script is particularly useful for quickly deploying multiple VMs based on a standardized configuration. 6 | # 7 | # Usage: 8 | # ./BulkClone.sh [pool_name] 9 | # 10 | # Arguments: 11 | # source_vm_id - The ID of the VM that will be cloned. 12 | # base_vm_name - The base name for the new VMs, which will be appended with a numerical index. 13 | # start_vm_id - The starting VM ID for the first clone. 14 | # num_vms - The number of VMs to clone. 15 | # pool_name - Optional. The name of the pool to which the new VMs will be added. If not provided, VMs are not added to any pool. 16 | # 17 | # Example: 18 | # ./BulkClone.sh 110 Ubuntu-2C-20GB 400 30 PoolName 19 | # ./BulkClone.sh 110 Ubuntu-2C-20GB 400 30 # Without specifying a pool 20 | 21 | # Check if the minimum required parameters are provided 22 | if [ "$#" -lt 4 ]; then 23 | echo "Usage: $0 [pool_name]" 24 | exit 1 25 | fi 26 | 27 | # Assigning input arguments 28 | SOURCE_VM_ID=$1 29 | BASE_VM_NAME=$2 30 | START_VM_ID=$3 31 | NUM_VMS=$4 32 | POOL_NAME=${5:-} # Optional pool name, default to an empty string if not provided 33 | 34 | # Loop to create clones 35 | for (( i=0; i<$NUM_VMS; i++ )); do 36 | TARGET_VM_ID=$((START_VM_ID + i)) 37 | NAME_INDEX=$((i + 1)) 38 | VM_NAME="${BASE_VM_NAME}${NAME_INDEX}" 39 | 40 | # Clone the VM and set the constructed name 41 | 42 | # Check if a pool name was provided and add VM to the pool if it was 43 | if [ -n "$POOL_NAME" ]; then 44 | qm clone $SOURCE_VM_ID $TARGET_VM_ID --name $VM_NAME --pool $POOL_NAME 45 | elif 46 | qm clone $SOURCE_VM_ID $TARGET_VM_ID --name $VM_NAME 47 | fi 48 | done 49 | 50 | echo "Cloning completed!" 51 | -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkDelete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script is designed for batch management of virtual machines (VMs) on a Proxmox VE environment. 4 | # It takes a range of VM IDs and performs three actions: unprotects, stops, and destroys each VM in the range. 5 | # This script is useful for cleaning up VMs in a controlled manner, ensuring that all VMs within the specified 6 | # range are properly shut down and removed from the system. Caution is advised, as this will permanently delete VMs. 7 | # 8 | # Usage: 9 | # ./BulkDelete.sh start_vmid stop_vmid 10 | # start_vmid - The starting VM ID from which the batch operation begins. 11 | # stop_vmid - The ending VM ID up to which the batch operation is performed. 12 | # 13 | # Example: 14 | # ./BulkDelete.sh 600 650 15 | # 16 | 17 | # Check if input arguments are provided 18 | if [ "$#" -lt 2 ]; then 19 | echo "Usage: $0 start_vmid stop_vmid" 20 | echo "Example: $0 600 650" 21 | exit 1 22 | fi 23 | 24 | START_VMID=$1 25 | STOP_VMID=$2 26 | 27 | # Main loop through the specified range of VMIDs 28 | for vmid in $(seq $START_VMID $STOP_VMID); do 29 | qm set $vmid --protection 0 30 | qm stop $vmid 31 | qm destroy $vmid 32 | done 33 | 34 | echo "Operation completed for all specified VMs." 35 | -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkDeleteAllLocal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script deletes all virtual machines (VMs) currently listed on this Proxmox machine. 4 | # It performs three actions for each VM: unprotects, stops, and destroys them. 5 | # WARNING: This script will permanently delete all VMs on the Proxmox machine. 6 | # 7 | # Usage: 8 | # ./BulkDeleteAllLocal.sh 9 | # 10 | 11 | # Fetch the list of all VM IDs 12 | VM_IDS=$(qm list | awk 'NR>1 {print $1}') 13 | 14 | if [ -z "$VM_IDS" ]; then 15 | echo "No VMs found on this Proxmox machine." 16 | exit 0 17 | fi 18 | 19 | # Confirm action before proceeding 20 | echo "WARNING: This will delete the following VMs permanently:" 21 | echo "$VM_IDS" 22 | read -p "Are you sure you want to proceed? Type 'yes' to continue: " CONFIRMATION 23 | 24 | if [ "$CONFIRMATION" != "yes" ]; then 25 | echo "Operation canceled." 26 | exit 0 27 | fi 28 | 29 | # Iterate through each VM ID and delete it 30 | for vmid in $VM_IDS; do 31 | echo "Processing VM ID: $vmid" 32 | qm set $vmid --protection 0 33 | qm stop $vmid 34 | qm destroy $vmid 35 | echo "VM ID $vmid has been deleted." 36 | done 37 | 38 | echo "All VMs have been deleted successfully." -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkRemoteMigrate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script migrates virtual machines (VMs) from a local Proxmox node to a target Proxmox node. 4 | # It utilizes the Proxmox API for migration and requires proper authentication using an API token. 5 | # The script removes any existing Cloud-Init drives before initiating the migration and adjusts VM IDs based on a provided offset. 6 | # 7 | # Usage: 8 | # ./BulkRemoteMigrate.sh 9 | # Where: 10 | # target_host - The hostname or IP address of the target Proxmox server. 11 | # api_token - The API token used for authentication. 12 | # fingerprint - The SSL fingerprint of the target Proxmox server. 13 | # target_storage - The storage identifier on the target node where VMs will be stored. 14 | # vm_offset - An integer value to offset the VM IDs to avoid conflicts. 15 | # target_network - The network bridge on the target server to connect the VMs. 16 | # 17 | 18 | # Assigning input arguments 19 | TARGET_HOST="$1" 20 | API_TOKEN="apitoken=$2" 21 | FINGERPRINT="$3" 22 | TARGET_STORAGE="$4" 23 | VM_OFFSET="$5" 24 | TARGET_NETWORK="$6" 25 | 26 | # Proxmox API Token and host information 27 | echo "Using target host: $TARGET_HOST" 28 | echo "Using API token: $API_TOKEN" 29 | echo "Using fingerprint: $FINGERPRINT" 30 | echo "Using target storage: $TARGET_STORAGE" 31 | echo "VM offset: $VM_OFFSET" 32 | echo "Target network: $TARGET_NETWORK" 33 | 34 | VM_IDS=$(qm list | awk 'NR>1 {print $1}') # all on local node 35 | 36 | for VM_ID in $VM_IDS; do 37 | # Calculate target VM ID 38 | TARGET_VM_ID=$((VM_ID + VM_OFFSET)) 39 | 40 | # Delete the Cloud-Init drive (ide2) if it exists 41 | echo "Removing Cloud-Init drive (ide2) for VM ID $VM_ID..." 42 | qm set $VM_ID --delete ide2 43 | 44 | # Determine target bridge based on input network 45 | TARGET_BRIDGE="$TARGET_NETWORK" 46 | 47 | # Command to migrate VM 48 | MIGRATE_CMD="qm remote-migrate $VM_ID $TARGET_VM_ID '$API_TOKEN,host=$TARGET_HOST,fingerprint=$FINGERPRINT' --target-bridge $TARGET_BRIDGE --target-storage $TARGET_STORAGE --online" 49 | 50 | echo "Migrating VM ID $VM_ID to VM ID $TARGET_VM_ID on target node..." 51 | echo "Using command: $MIGRATE_CMD" 52 | 53 | # Execute migration 54 | eval $MIGRATE_CMD 55 | 56 | # Wait for the command to finish 57 | wait 58 | 59 | echo "Migration of VM ID $VM_ID completed." 60 | done 61 | 62 | echo "All specified VMs have been attempted for migration." 63 | -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkReset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script resets a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./BulkReset.sh 7 | # 8 | # Arguments: 9 | # first_vm_id - The ID of the first VM to reset. 10 | # last_vm_id - The ID of the last VM to reset. 11 | # 12 | # Example: 13 | # ./BulkReset.sh 400 430 14 | # 15 | 16 | # Check if the required parameters are provided 17 | if [ "$#" -ne 2 ]; then 18 | echo "Usage: $0 " 19 | exit 1 20 | fi 21 | 22 | # Assigning input arguments 23 | FIRST_VM_ID=$1 24 | LAST_VM_ID=$2 25 | 26 | # Loop to reset VMs in the specified range 27 | for (( vm_id=FIRST_VM_ID; vm_id<=LAST_VM_ID; vm_id++ )); do 28 | echo "Resetting VM ID: $vm_id" 29 | qm reset $vm_id 30 | done 31 | 32 | echo "Resetting completed!" -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkStart.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script starts a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./BulkStart.sh 7 | # 8 | # Arguments: 9 | # first_vm_id - The ID of the first VM to start. 10 | # last_vm_id - The ID of the last VM to start. 11 | # 12 | # Example: 13 | # ./BulkStart.sh 400 430 14 | # 15 | 16 | # Check if the required parameters are provided 17 | if [ "$#" -ne 2 ]; then 18 | echo "Usage: $0 " 19 | exit 1 20 | fi 21 | 22 | # Assigning input arguments 23 | FIRST_VM_ID=$1 24 | LAST_VM_ID=$2 25 | 26 | # Loop to start VMs in the specified range 27 | for (( vm_id=FIRST_VM_ID; vm_id<=LAST_VM_ID; vm_id++ )); do 28 | echo "Starting VM ID: $vm_id" 29 | qm start $vm_id 30 | done 31 | 32 | echo "Starting completed!" -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkStop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script stops a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./BulkStop.sh 7 | # 8 | # Arguments: 9 | # first_vm_id - The ID of the first VM to stop. 10 | # last_vm_id - The ID of the last VM to stop. 11 | # 12 | # Example: 13 | # ./BulkStop.sh 400 430 14 | # 15 | 16 | # Check if the required parameters are provided 17 | if [ "$#" -ne 2 ]; then 18 | echo "Usage: $0 " 19 | exit 1 20 | fi 21 | 22 | # Assigning input arguments 23 | FIRST_VM_ID=$1 24 | LAST_VM_ID=$2 25 | 26 | # Loop to stop VMs in the specified range 27 | for (( vm_id=FIRST_VM_ID; vm_id<=LAST_VM_ID; vm_id++ )); do 28 | echo "Stopping VM ID: $vm_id" 29 | qm stop $vm_id 30 | done 31 | 32 | echo "Stopping completed!" -------------------------------------------------------------------------------- /VirtualMachines/Operations/BulkUnlock.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # BulkUnlock.sh 4 | # 5 | # This script unlocks a range of virtual machines (VMs) within a Proxmox VE environment. 6 | # 7 | # Usage: 8 | # ./BulkUnlock.sh 9 | # 10 | # Arguments: 11 | # first_vm_id - The ID of the first VM to unlock. 12 | # last_vm_id - The ID of the last VM to unlock. 13 | # 14 | # Example: 15 | # # Bulk unlock VMs from ID 400 to 430 16 | # ./BulkUnlock.sh 400 430 17 | # 18 | 19 | source "${UTILITYPATH}/Prompts.sh" 20 | 21 | ############################################################################### 22 | # Check prerequisites 23 | ############################################################################### 24 | __check_root__ 25 | __check_proxmox__ 26 | 27 | ############################################################################### 28 | # Main 29 | ############################################################################### 30 | if [ "$#" -ne 2 ]; then 31 | echo "Usage: $0 " 32 | exit 1 33 | fi 34 | 35 | FIRST_VM_ID="$1" 36 | LAST_VM_ID="$2" 37 | 38 | for (( vmId=FIRST_VM_ID; vmId<=LAST_VM_ID; vmId++ )); do 39 | echo "Unlocking VM ID: \"$vmId\"" 40 | qm unlock "$vmId" 41 | done 42 | 43 | echo "Bulk unlock operation completed!" 44 | -------------------------------------------------------------------------------- /VirtualMachines/Options/BulkEnableGuestAgent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script enables the QEMU guest agent for a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # Optionally, it can restart the VMs after enabling the guest agent. 5 | # 6 | # Usage: 7 | # ./EnableGuestAgent.sh [restart] 8 | # 9 | # Arguments: 10 | # start_vm_id - The ID of the first VM to update. 11 | # end_vm_id - The ID of the last VM to update. 12 | # restart - Optional. Set to 'restart' to restart the VMs after enabling the guest agent. 13 | # 14 | # Example: 15 | # ./EnableGuestAgent.sh 400 430 16 | # ./EnableGuestAgent.sh 400 430 restart 17 | # 18 | 19 | # Check if the required parameters are provided 20 | if [ "$#" -lt 2 ] || [ "$#" -gt 3 ]; then 21 | echo "Usage: $0 [restart]" 22 | exit 1 23 | fi 24 | 25 | # Assigning input arguments 26 | START_VM_ID=$1 27 | END_VM_ID=$2 28 | RESTART_OPTION=${3:-} 29 | 30 | # Loop to enable QEMU guest agent for VMs in the specified range 31 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 32 | # Check if the VM exists 33 | if qm status $VMID &>/dev/null; then 34 | echo "Enabling QEMU guest agent for VM ID: $VMID" 35 | 36 | # Enable the QEMU guest agent 37 | qm set $VMID --agent 1 38 | echo " - QEMU guest agent enabled for VM ID: $VMID." 39 | 40 | # Optionally restart the VM if the 'restart' option is provided 41 | if [ "$RESTART_OPTION" == "restart" ]; then 42 | echo "Restarting VM ID: $VMID" 43 | qm restart $VMID 44 | echo " - VM ID: $VMID restarted." 45 | fi 46 | else 47 | echo "VM ID: $VMID does not exist. Skipping..." 48 | fi 49 | 50 | done 51 | 52 | echo "QEMU guest agent enable process completed!" -------------------------------------------------------------------------------- /VirtualMachines/Options/BulkToggleProtectionMode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script toggles the protection mode for a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./ToggleProtectionMode.sh 7 | # 8 | # Arguments: 9 | # start_vm_id - The ID of the first VM to update. 10 | # end_vm_id - The ID of the last VM to update. 11 | # enable|disable - Set to 'enable' to enable protection, or 'disable' to disable it. 12 | # 13 | # Example: 14 | # ./ToggleProtectionMode.sh 400 430 enable 15 | # ./ToggleProtectionMode.sh 400 430 disable 16 | # 17 | 18 | # Check if the required parameters are provided 19 | if [ "$#" -ne 3 ]; then 20 | echo "Usage: $0 " 21 | exit 1 22 | fi 23 | 24 | # Assigning input arguments 25 | START_VM_ID=$1 26 | END_VM_ID=$2 27 | ACTION=$3 28 | 29 | # Determine the appropriate setting based on the action 30 | if [ "$ACTION" == "enable" ]; then 31 | PROTECTION_SETTING="1" 32 | elif [ "$ACTION" == "disable" ]; then 33 | PROTECTION_SETTING="0" 34 | else 35 | echo "Invalid action: $ACTION. Use 'enable' or 'disable'." 36 | exit 1 37 | fi 38 | 39 | # Loop to update protection mode for VMs in the specified range 40 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 41 | # Check if the VM exists 42 | if qm status $VMID &>/dev/null; then 43 | echo "Updating protection mode for VM ID: $VMID" 44 | 45 | # Set the protection mode 46 | qm set $VMID --protection $PROTECTION_SETTING 47 | echo " - Protection mode set to '$ACTION' for VM ID: $VMID." 48 | else 49 | echo "VM ID: $VMID does not exist. Skipping..." 50 | fi 51 | 52 | done 53 | 54 | echo "Protection mode toggle process completed!" 55 | -------------------------------------------------------------------------------- /VirtualMachines/Options/BulkToggleStartAtBoot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script toggles the start at boot option for a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./ToggleStartAtBoot.sh 7 | # 8 | # Arguments: 9 | # start_vm_id - The ID of the first VM to update. 10 | # end_vm_id - The ID of the last VM to update. 11 | # enable|disable - Set to 'enable' to enable start at boot, or 'disable' to disable it. 12 | # 13 | # Example: 14 | # ./ToggleStartAtBoot.sh 400 430 enable 15 | # ./ToggleStartAtBoot.sh 400 430 disable 16 | # 17 | 18 | # Check if the required parameters are provided 19 | if [ "$#" -ne 3 ]; then 20 | echo "Usage: $0 " 21 | exit 1 22 | fi 23 | 24 | # Assigning input arguments 25 | START_VM_ID=$1 26 | END_VM_ID=$2 27 | ACTION=$3 28 | 29 | # Determine the appropriate setting based on the action 30 | if [ "$ACTION" == "enable" ]; then 31 | ONBOOT_SETTING="1" 32 | elif [ "$ACTION" == "disable" ]; then 33 | ONBOOT_SETTING="0" 34 | else 35 | echo "Invalid action: $ACTION. Use 'enable' or 'disable'." 36 | exit 1 37 | fi 38 | 39 | # Loop to update start at boot for VMs in the specified range 40 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 41 | # Check if the VM exists 42 | if qm status $VMID &>/dev/null; then 43 | echo "Updating start at boot setting for VM ID: $VMID" 44 | 45 | # Set the start at boot option 46 | qm set $VMID --onboot $ONBOOT_SETTING 47 | echo " - Start at boot set to '$ACTION' for VM ID: $VMID." 48 | else 49 | echo "VM ID: $VMID does not exist. Skipping..." 50 | fi 51 | 52 | done 53 | 54 | echo "Start at boot toggle process completed!" -------------------------------------------------------------------------------- /VirtualMachines/Storage/BulkChangeStorage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script automates the process of updating the storage location specified in the configuration 4 | # files of virtual machines (VMs) on a Proxmox server. It is designed to bulk-update the storage 5 | # paths for a range of VM IDs from one storage location to another. This can be useful in scenarios 6 | # where VMs need to be moved to a different storage solution or when reorganizing storage resources. 7 | # 8 | # Usage: 9 | # ./BulkChangeStorage.sh 10 | # start_id - The starting VM ID for the operation. 11 | # end_id - The ending VM ID for the operation. 12 | # hostname - The hostname of the Proxmox node where the VMs are configured. 13 | # current_storage - The current identifier of the storage used in the VMs' configuration. 14 | # new_storage - The new identifier of the storage to replace the current one. 15 | # 16 | # Example: 17 | # ./BulkChangeStorage.sh 100 200 pve-node1 local-lvm local-zfs 18 | # 19 | 20 | # Check if required inputs are provided 21 | if [ $# -lt 5 ]; then 22 | echo "Usage: $0 " 23 | exit 1 24 | fi 25 | 26 | START_ID=$1 27 | END_ID=$2 28 | HOST_NAME=$3 29 | CURRENT_STORAGE=$4 30 | NEW_STORAGE=$5 31 | 32 | # Loop through the VM IDs 33 | for VMID in $(seq $START_ID $END_ID); do 34 | CONFIG_FILE="/etc/pve/nodes/${HOST_NAME}/qemu-server/${VMID}.conf" 35 | 36 | # Check if the VM config file exists 37 | if [ -f "$CONFIG_FILE" ]; then 38 | echo "Processing VM ID: $VMID" 39 | 40 | # Check and replace the storage 41 | if grep -q "$CURRENT_STORAGE" "$CONFIG_FILE"; then 42 | sed -i "s/$CURRENT_STORAGE/$NEW_STORAGE/g" "$CONFIG_FILE" 43 | echo " - Storage location changed from $CURRENT_STORAGE to $NEW_STORAGE." 44 | else 45 | echo " - $CURRENT_STORAGE not found in disk configuration. No changes made." 46 | fi 47 | else 48 | echo "VM ID: $VMID does not exist. Skipping..." 49 | fi 50 | done 51 | -------------------------------------------------------------------------------- /VirtualMachines/Storage/BulkMoveDisk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script facilitates the migration of virtual machine (VM) disks across different storage backends on a Proxmox VE environment. 4 | # It iterates over a specified range of VM IDs and moves their primary disks (assumed to be 'sata0') to a designated target storage. 5 | # This is useful for managing storage utilization, upgrading to new storage hardware, or balancing loads across different storage systems. 6 | # 7 | # Usage: 8 | # ./VMMoveDisk.sh start_vmid stop_vmid target_storage 9 | # start_vmid - The starting VM ID from which disk migration begins. 10 | # stop_vmid - The ending VM ID up to which disk migration is performed. 11 | # target_storage - The identifier of the target storage where disks will be moved. 12 | # 13 | # Example: 14 | # ./VMMoveDisk.sh 101 105 local-lvm 15 | # 16 | # Function Index: 17 | # - move_disk 18 | # 19 | 20 | # Usage Information 21 | if [ "$#" -lt 3 ]; then 22 | echo "Usage: $0 start_vmid stop_vmid target_storage" 23 | echo "Example: $0 101 105 local-lvm" 24 | exit 1 25 | fi 26 | 27 | START_VMID=$1 28 | STOP_VMID=$2 29 | TARGET_STORAGE=$3 30 | 31 | # Function to move a disk 32 | move_disk() { 33 | local vmid=$1 34 | local storage=$2 35 | 36 | echo "Moving disk of VM $vmid to storage $storage..." 37 | qm move-disk $vmid sata0 $storage 38 | 39 | if [ $? -eq 0 ]; then 40 | echo "Disk move successful for VMID $vmid" 41 | else 42 | echo "Failed to move disk for VMID $vmid" 43 | fi 44 | } 45 | 46 | # Main loop through the specified range of VMIDs 47 | for (( vmid=$START_VMID; vmid<=$STOP_VMID; vmid++ )) 48 | do 49 | move_disk $vmid $TARGET_STORAGE 50 | done 51 | 52 | echo "Disk move process completed for all specified VMs." 53 | -------------------------------------------------------------------------------- /VirtualMachines/Storage/BulkResizeStorage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script resizes the storage for a range of virtual machines (VMs) within a Proxmox VE environment. 4 | # 5 | # Usage: 6 | # ./ResizeStorage.sh 7 | # 8 | # Arguments: 9 | # start_vm_id - The ID of the first VM to update. 10 | # end_vm_id - The ID of the last VM to update. 11 | # disk - The disk to resize (e.g., 'scsi0', 'virtio0'). 12 | # size - The new size to set for the disk (e.g., '+10G' to add 10GB). 13 | # 14 | # Example: 15 | # ./ResizeStorage.sh 400 430 scsi0 +10G 16 | # 17 | 18 | # Check if the required parameters are provided 19 | if [ "$#" -ne 4 ]; then 20 | echo "Usage: $0 " 21 | exit 1 22 | fi 23 | 24 | # Assigning input arguments 25 | START_VM_ID=$1 26 | END_VM_ID=$2 27 | DISK=$3 28 | SIZE=$4 29 | 30 | # Loop to resize storage for VMs in the specified range 31 | for (( VMID=START_VM_ID; VMID<=END_VM_ID; VMID++ )); do 32 | # Check if the VM exists 33 | if qm status $VMID &>/dev/null; then 34 | echo "Resizing storage for VM ID: $VMID" 35 | 36 | # Resize the specified disk 37 | qm resize $VMID $DISK $SIZE 38 | echo " - Disk $DISK resized by $SIZE for VM ID: $VMID." 39 | else 40 | echo "VM ID: $VMID does not exist. Skipping..." 41 | fi 42 | 43 | done 44 | 45 | echo "Storage resize process completed!" --------------------------------------------------------------------------------