├── .gitignore ├── LICENSE ├── README.md ├── custom_validators ├── circuits.py ├── devices.py ├── ip.py └── sites.py ├── export_templates ├── .gitkeep └── simple_device_label_zpl.j2 ├── other └── Sync-Vmware.ps1 ├── reports ├── .gitkeep ├── circuit-reports │ ├── circuit_audits.py │ └── circuit_counts.py ├── dcim-reports │ ├── CheckCableLocality.py │ ├── CheckConsoleOOBPower.py │ ├── CheckDeviceNaming.py │ ├── DeviceRackingReport.py │ ├── DuplicatedSerial.py │ ├── RackGroupAssignment.py │ ├── case_insensitive_check.py │ └── missing_device_type_components.py ├── ipam-reports │ ├── dns-reports.py │ ├── ip-check-prefix.py │ ├── ip-duplicate.py │ ├── ip-primary-find.py │ └── ip-primary-missing.py ├── misc │ └── CustomFieldValue.py ├── site │ └── site_address.py └── virtualization-reports │ └── vm_counts.py └── scripts ├── add_device_type_components.py ├── create_vm.py ├── find_orphaned_cables.py ├── fix_assigned_ips.py ├── geolocate_site.py ├── multi_connect.py ├── power_summary.py ├── rack_flipper.py └── renumber.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 NetBox Community 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # NetBox Customizations 2 | A collection of community submitted and maintained NetBox customizations including, but not limited to: 3 | 4 | * [NetBox Custom Validators](https://docs.netbox.dev/en/stable/customization/custom-validation/) 5 | * [NetBox Export Templates](https://docs.netbox.dev/en/stable/customization/export-templates/) 6 | * [NetBox Reports](https://docs.netbox.dev/en/stable/customization/reports/) 7 | * [NetBox Scripts](https://docs.netbox.dev/en/stable/customization/custom-scripts/) 8 | * Other external NetBox Scripts/Integrations - External scripts or tools that are executed independently from NetBox, but use NetBox as a source of truth as an input source and/or ouputsource via the API. 9 | 10 | ## Contributing 11 | To contribute a new report, open a pull request. Place your contribution in the relevant folder. Each file should contain a summary of the script at the top of the file. 12 | 13 | Nothing in this repository comes with any explicit or implied warranty. For more information see [the license](LICENSE). 14 | -------------------------------------------------------------------------------- /custom_validators/circuits.py: -------------------------------------------------------------------------------- 1 | from django.db.models import Q 2 | 3 | from circuits.models import Circuit 4 | from extras.validators import CustomValidator 5 | 6 | 7 | class CircuitInstallDateOnCreate(CustomValidator): 8 | """Require new circuits to have their install date filled out. 9 | Also prevent install dates from being removed from existing circuits. 10 | 11 | This helps your team migrate to requiring install dates if they previously 12 | were not. Using the simple validators to always require the date can 13 | break editing existing circuits with no install date where you don't 14 | actually know the original install date and don't want to fake it.""" 15 | 16 | def validate(self, circuit): 17 | 18 | # Don't require install dates for circuits that haven't been active before 19 | if circuit.status in ("planned", "provisioning"): 20 | return 21 | 22 | # Check install date for new circuits 23 | if circuit.pk is None and not circuit.install_date: 24 | self.fail( 25 | f"Date Installed must contain a valid date.", field="install_date" 26 | ) 27 | # Check install date for existing circuits 28 | # Relies on private field _prechange_snapshot 29 | elif not circuit.install_date: 30 | existing_date = circuit._prechange_snapshot.get("install_date") 31 | if existing_date: 32 | self.fail( 33 | f"Date Installed must contain a valid date.", field="install_date" 34 | ) 35 | 36 | # Alternative way to check install date, but incurs an additional query 37 | # previous_state = Circuit.objects.get(circuit.pk) 38 | # if previous_state.install_date: 39 | # self.fail( 40 | # f"Date Installed must contain a valid date.", field="install_date" 41 | # ) 42 | 43 | 44 | class CircuitCommitRateValidator(CustomValidator): 45 | """Ensure circuit commit rate doesn't exceed the termination speeds.""" 46 | 47 | def validate(self, circuit): 48 | 49 | if ( 50 | not circuit.pk 51 | or not circuit.commit_rate 52 | or not circuit.terminations.all().exists() 53 | ): 54 | return 55 | 56 | cr = circuit.commit_rate 57 | 58 | if circuit.terminations.filter(Q(port_speed__lt=cr) | Q(upstream_speed__lt=cr)): 59 | self.fail( 60 | f"Commit rate cannot be greater than the circuit termination port speeds", 61 | field="commit_rate", 62 | ) 63 | 64 | 65 | class CircuitTerminationValidator(CustomValidator): 66 | """Ensure circuit termination speeds aren't less than the circuit commit rate.""" 67 | 68 | def validate(self, termination): 69 | 70 | cr = termination.circuit.commit_rate 71 | 72 | if not cr: 73 | return 74 | 75 | if termination.port_speed and termination.port_speed < cr: 76 | self.fail( 77 | "Termination port speed cannot be less than the " 78 | f"circuit commit rate ({cr:,} kbps).", 79 | field="port_speed", 80 | ) 81 | elif termination.upstream_speed and termination.upstream_speed < cr: 82 | self.fail( 83 | "Termination upstream speed cannot be less than the " 84 | f"circuit commit rate ({cr:,} kbps).", 85 | field="upstream_speed", 86 | ) 87 | -------------------------------------------------------------------------------- /custom_validators/devices.py: -------------------------------------------------------------------------------- 1 | import re # required for DeviceAssetTagValidator 2 | from extras.validators import CustomValidator 3 | 4 | 5 | # make sure active devices have tenants 6 | class ActiveDeviceTenantValidator(CustomValidator): 7 | def validate(self, instance): 8 | if instance.status == "active" and not instance.tenant: 9 | self.fail("Active devices must have a tenant set!", field="tenant") 10 | 11 | 12 | # make sure active devices have a custom field filled out 13 | class DeviceCustomFieldsValidator(CustomValidator): 14 | def validate(self, instance): 15 | custom_field_name = "field_name" 16 | if instance.status == "active": 17 | if not instance.cf[custom_field_name]: 18 | self.fail( 19 | f"Active device with an asset tags must have {custom_field_name} value set", 20 | field=f"cf_{custom_field_name}", 21 | ) 22 | 23 | 24 | # make sure asset tags (if filled in) match regex format 25 | class DeviceAssetTagValidator(CustomValidator): 26 | def validate(self, instance): 27 | if instance.asset_tag: 28 | pattern = re.compile("^(\d{5})$") 29 | if not pattern.match(instance.asset_tag): 30 | self.fail( 31 | "Asset tag does not match Asset tag format", field="asset_tag" 32 | ) 33 | -------------------------------------------------------------------------------- /custom_validators/ip.py: -------------------------------------------------------------------------------- 1 | from extras.validators import CustomValidator 2 | 3 | # created by Pieter Lambrecht 4 | # make sure RFC1918 ips have a vrf assigned 5 | class RequireVRFforRFC1918(CustomValidator): 6 | """Enforce a VRF for all RFC1918 ip space.""" 7 | 8 | def validate(self, instance): 9 | _RFC1918 = ["10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"] 10 | if not instance.vrf: 11 | if instance.__class__.__module__ == "ipam.models.ip": 12 | try: 13 | new_ip_address = ipaddress.ip_address(str(instance).split("/")[0]) 14 | except ValueError as e: 15 | raise self.fail( 16 | f"Invalid IP address {instance}. Error {str(e)}", field="status" 17 | ) 18 | for ip_network in _RFC1918: 19 | if new_ip_address in ipaddress.ip_network(ip_network): 20 | self.fail("Private IP space requires a VRF!", field="status") 21 | 22 | if instance.__class__.__module__ == "ipam.models.prefix": 23 | try: 24 | new_ip_network = ipaddress.ip_network(instance) 25 | except ValueError as e: 26 | raise self.fail( 27 | f"Invalid IP network {instance}. Error {str(e)}", field="status" 28 | ) 29 | for ip_network in _RFC1918: 30 | if new_ip_network >= ipaddress.ip_network(ip_network): 31 | self.fail("Private IP space requires a VRF!", field="status") 32 | -------------------------------------------------------------------------------- /custom_validators/sites.py: -------------------------------------------------------------------------------- 1 | from extras.validators import CustomValidator 2 | from circuits.models import Circuit 3 | 4 | 5 | class SiteStatusCircuitValidator(CustomValidator): 6 | """Prevent sites from being retired if they have circuits that aren't in 7 | deprovisioning or decommissioned status.""" 8 | 9 | def validate(self, site): 10 | circuit_count = ( 11 | Circuit.objects.filter(terminations__site=site) 12 | .exclude(status__in=["deprovisioning", "decommissioned"]) 13 | .count() 14 | ) 15 | if site.status == "retired" and circuit_count > 0: 16 | self.fail( 17 | f"Site status cannot be set to 'retired', {circuit_count} circuits" 18 | "are not in deprovisioning or decommissioned status.", 19 | field="status", 20 | ) 21 | -------------------------------------------------------------------------------- /export_templates/.gitkeep: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /export_templates/simple_device_label_zpl.j2: -------------------------------------------------------------------------------- 1 | {# This export template is intended to generate device-labels with a zpl-compatible printer without the need for a plugin. 2 | It is optimized for 203dpi and 2x1 inch label size, all positional pararmeters are dpi-dependend #} 3 | {% for device in queryset -%} 4 | ^XA 5 | ^CI28^FX UTF-8! 6 | ^CF0,25 7 | ^FO175,20^FD{{device.name}}^FS 8 | ^CF0,15 9 | ^FO175,50^FDIP/DNS: ^FS 10 | ^CF0,20 11 | ^FX //If there is no name use IP 12 | ^FO175,70^FD{% if device.primary_ip.dns_name != ''%}{{device.primary_ip.dns_name}}{%else%}{{device.primary_ip}}{%endif%}^FS 13 | ^CF0,15 14 | ^FO175,155^FDID: {{device.id}}^FS 15 | ^FX //Change the 15,10 (8 dots = 1mm on 203dpi) to position the label, change the 4 to change QR size 16 | ^FO15,10^BQN,2,4^FDQA,https://demo.detbox.dev/dcim/devices/{{device.id}}/^FS 17 | ^XZ 18 | {% endfor %} 19 | -------------------------------------------------------------------------------- /other/Sync-Vmware.ps1: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: 3 | # This script works with Netbox v2.9. It does not work with v2.10 in its current state. 4 | # 5 | 6 | #Requires -Version 5 7 | <# 8 | .SYNOPSIS 9 | Synchronize Netbox Virtual Machines from VMware vCenter. 10 | .DESCRIPTION 11 | The Sync-Netbox cmdlet uses the Django Swagger REST API included in Netbox and VMware PowerCLI to synchronize data 12 | from vCenter to Netbox. 13 | Function skeleton adapted from https://gist.github.com/9to5IT/9620683 14 | .PARAMETER Token 15 | Netbox REST API token 16 | .NOTES 17 | Version: 1.2 18 | Author: Joe Wegner 19 | Original source: https://github.com/jwegner89/netbox-utilities 20 | Creation Date: 2018-02-08 21 | Purpose/Change: Initial script development 22 | License: GPLv3 23 | Note that this script relies heavily on the PersistentID field in vCenter, as that will uniquely identify the VM 24 | You will need to create a vcenter_persistent_id custom field on your VM object in Netbox for this to work properly 25 | removed PowerCLI requires header due to loading error 26 | Updated to support Netbox v2.9 27 | #Requires -Version 5 -Modules VMware.PowerCLI 28 | #> 29 | 30 | #---------------------------------------------------------[Initialisations]-------------------------------------------------------- 31 | 32 | #Set Error Action to Silently Continue 33 | #$ErrorActionPreference = "SilentlyContinue" 34 | # allow verbose messages to be recorded in transcript 35 | $VerbosePreference = "Continue" 36 | 37 | #----------------------------------------------------------[Declarations]---------------------------------------------------------- 38 | 39 | # store common paths in variables for URI creation 40 | # update for your Netbox instance 41 | $URIBase = "https://netbox.example.com/api" 42 | $ClustersPath = "/virtualization/clusters" 43 | $VirtualMachinesPath = "/virtualization/virtual-machines" 44 | $PlatformsPath = "/dcim/platforms" 45 | $InterfacesPath = "/virtualization/interfaces" 46 | $IPAddressesPath = "/ipam/ip-addresses" 47 | 48 | #-----------------------------------------------------------[Functions]------------------------------------------------------------ 49 | 50 | function Sync-Netbox { 51 | param ( 52 | [parameter(Mandatory=$true)] 53 | [ValidateNotNullOrEmpty()] 54 | [String] 55 | $Token 56 | ) 57 | 58 | begin { 59 | # setup headers for Netbox API calls 60 | $TokenHeader = "Token " + $Token 61 | $Headers = New-Object "System.Collections.Generic.Dictionary[[String],[String]]" 62 | $Headers.Add("Accept", "application/json") 63 | $Headers.Add("Authorization", $TokenHeader) 64 | 65 | # first, we will clear out any VMs that are in Netbox but no longer in vCenter 66 | 67 | # get all VMs in vCenter and collect their persistent IDs 68 | $VMs = Get-VM 69 | $VMCount = "Retrieved $VMs.count from vCenter" 70 | Write-Verbose $VMCount 71 | $vCenterPersistentIDs = @() 72 | foreach ($VM in $VMs) { 73 | $vCenterPersistentIDs += $VM.PersistentID 74 | } 75 | 76 | # retrieve all VMs from Netbox 77 | $URI = $URIBase + $VirtualMachinesPath + "/?limit=0" 78 | $Response = Invoke-RESTMethod -Method GET -Headers $Headers -ContentType "application/json" -URI $URI 79 | #ConvertTo-JSON $Response | Write-Verbose 80 | 81 | # check each Netbox VM against list from vCenter and delete if not present 82 | foreach ($VM in $Response.Results) { 83 | $PersistentID = $VM.custom_fields.vcenter_persistent_id 84 | if ($vCenterPersistentIDs -notcontains $PersistentID) { 85 | # Delete old VM from Netbox inventory 86 | $NetboxID = $VM.ID 87 | $URI = $URIBase + $VirtualMachinesPath + "/" + $NetboxID + "/" 88 | $Response = Invoke-RESTMethod -Method DELETE -Headers $Headers -ContentType "application/json" -URI $URI 89 | #ConvertTo-JSON $Response | Write-Verbose 90 | $Message = "Deleting " + $VM.Name 91 | Write-Verbose $Message 92 | } 93 | } 94 | 95 | # Create mapping of vCenter OSFullName to Netbox platform IDs 96 | $NetboxPlatforms = @{} 97 | $URI = $URIBase + $PlatformsPath + "/?limit=0" 98 | $Response = Invoke-RESTMethod -Method GET -Headers $Headers -ContentType "application/json" -URI $URI 99 | ConvertTo-JSON $Response | Write-Verbose 100 | 101 | foreach ($Platform in $Response.Results) { 102 | $NetboxPlatforms[$Platform.Name] = $Platform.ID 103 | } 104 | 105 | # Create mapping of vCenter Cluster Names to Netbox cluster IDs 106 | $NetboxClusters = @{} 107 | $URI = $URIBase + $ClustersPath + "/?limit=0" 108 | $Response = Invoke-RESTMethod -Method GET -Headers $Headers -ContentType "application/json" -URI $URI 109 | ConvertTo-JSON $Response | Write-Verbose 110 | 111 | foreach ($Cluster in $Response.Results) { 112 | $NetboxClusters[$Cluster.Name] = $Cluster.ID 113 | } 114 | 115 | # retrieve all clusters from vCenter 116 | $Clusters = Get-Cluster 117 | 118 | # iterate through the clusters 119 | foreach ($Cluster in $Clusters) { 120 | # Retrive Netbox ID for cluster 121 | $ClusterID = $NetboxClusters[$Cluster.Name] 122 | 123 | # Retrieve all VMs in cluster 124 | $VMs = Get-VM -Location $Cluster 125 | 126 | # Iterate through each VM object 127 | foreach ($VM in $VMs) { 128 | # Query Netbox for VM using persistent ID from vCenter 129 | $URI = $URIBase + $VirtualMachinesPath + "/?q=&cf_vcenter_persistent_id=" + $VM.PersistentID 130 | $Response = Invoke-RESTMethod -Method GET -Headers $Headers -ContentType "application/json" -URI $URI 131 | ConvertTo-JSON $Response | Write-Verbose 132 | 133 | # A successful request will always have a results dictionary, though it may be empty 134 | $NetboxInfo = $Response.Results 135 | 136 | # Retrieve Netbox ID for VM if available 137 | $NetboxID = $NetboxInfo.ID 138 | 139 | # Create object to hold this VM's attributes for export 140 | $vCenterInfo = @{} 141 | 142 | if ($Response.Count -eq 0) { 143 | # A machine with this PersistentID does not exist yet, or was created manually 144 | $vCenterInfo["custom_fields"] = @{ 145 | "vcenter_persistent_id" = $VM.PersistentID 146 | } 147 | } elseif ($Response.Count -gt 1) { 148 | # duplicate entries exit / something went wrong 149 | Write-Warning -Message [String]::Format("{0} has {1} entries in Netbox, skipping...", $VM.Name, $Response.Count) 150 | continue 151 | } 152 | # don't need to consider case where we have count -eq 1 since we already have the info set 153 | # and count *shouldn't* be negative... 154 | 155 | # calculate values for comparison 156 | $vCPUs = $VM.NumCPU 157 | $Disk = [Math]::Round($VM.ProvisionedSpaceGB).ToString() 158 | 159 | # Match up VMHost with proper Netbox Cluster 160 | $VMHost = Get-VMHost -VM $VM | Select-Object -Property Name 161 | # Our VM hosts have prefixes that match the cluster name, so adjust as needed 162 | if ($VMHost -match "CLUSTER1") { 163 | $ClusterID = $NetboxClusters["CLUSTER1"] 164 | } elseif ($VMHost -match "CLUSTER2") { 165 | $ClusterID = $NetboxClusters["CLUSTER2"] 166 | } 167 | if ($NetboxInfo.Cluster) { 168 | if ($NetboxInfo.Cluster.ID -ne $ClusterID) { $vCenterInfo["cluster"] = $ClusterID } 169 | } else { 170 | $vCenterInfo["cluster"] = $ClusterID 171 | } 172 | 173 | if ($NetboxInfo.vCPUs -ne $vCPUs) { $vCenterInfo["vcpus"] = $vCPUs } 174 | if ($NetboxInfo.Memory -ne $VM.MemoryMB) { $vCenterInfo["memory"] = $VM.MemoryMB } 175 | if ($NetboxInfo.Disk -ne $Disk) { $vCenterInfo["disk"] = $Disk } 176 | 177 | if ($VM.PowerState -eq "PoweredOn") { 178 | # Netbox status ID 1 = Active 179 | if ($NetboxInfo.Status) { 180 | if ($NetboxInfo.Status.Label -ne "Active") { $vCenterInfo["status"] = 1 } 181 | } else { 182 | $vCenterInfo["status"] = 1 183 | } 184 | } else { 185 | # VM is not powered on 186 | # Netbox status ID 0 = Offline 187 | if ($NetboxInfo.Status) { 188 | if ($NetboxInfo.Status.Label -eq "Active") { $vCenterInfo["status"] = 0 } 189 | } else { 190 | $vCenterInfo["status"] = 0 191 | } 192 | } 193 | 194 | # Retrieve guest information 195 | $Guest = Get-VMGuest -VM $VM 196 | 197 | # canonicalize to lower case hostname 198 | if ($Guest.Hostname) { 199 | $Hostname = $Guest.Hostname.ToLower() 200 | # Convert Guest OS name to Netbox ID 201 | if ($NetboxInfo.Name -ne $Hostname) { $vCenterInfo["name"] = $Hostname } 202 | } else { 203 | # Use VM inventory name as a placeholder - uniquely identified by PersistentID 204 | $Name = $VM.Name.ToLower() 205 | if ($NetboxInfo.Name -ne $Name) { $vCenterInfo["name"] = $Name } 206 | } 207 | 208 | # Lookup Netbox ID for platform 209 | if ($Guest.OSFullName) { 210 | $Platform = $Guest.OSFullName 211 | # check that this platform exists in Netbox 212 | if ($NetboxPlatforms.ContainsKey($Platform)) { 213 | $PlatformID = $NetboxPlatforms[$Platform] 214 | if ($NetboxInfo.Platform) { 215 | if ($NetboxInfo.Platform.ID -ne $PlatformID) { $vCenterInfo["platform"] = $PlatformID } 216 | } else { 217 | $vCenterInfo["platform"] = $PlatformID 218 | } 219 | } else { 220 | # platform not present in Netbox, need to create it 221 | 222 | # strip out bad character for friendly URL name 223 | $Slug = $Platform.ToLower() 224 | $Slug = $Slug -Replace "\s","-" 225 | $Slug = $Slug -Replace "\.","" 226 | $Slug = $Slug -Replace "\(","" 227 | $Slug = $Slug -Replace "\)","" 228 | $Slug = $Slug -Replace "/","" 229 | Write-Verbose "Creating new platform:" 230 | $PlatformInfo = @{ 231 | "name" = $Platform 232 | "slug" = $Slug 233 | } 234 | $PlatformJSON = ConvertTo-JSON $PlatformInfo 235 | Write-Verbose $PlatformJSON 236 | $URI = $URIBase + $PlatformsPath + "/" 237 | $Response = Invoke-RESTMethod -Method POST -Headers $Headers -ContentType "application/json" -Body $PlatformJSON -URI $URI 238 | ConvertTo-JSON $Response | Write-Verbose 239 | # add new id into platforms hashtable 240 | $NetboxPlatforms[$Response.Name] = $Response.ID 241 | } 242 | } 243 | 244 | # Store results with defaults from previous request 245 | $NetboxVM = $NetboxInfo 246 | # Check if we have any changes to submit 247 | if ($vCenterInfo.Count -gt 0) { 248 | # Create JSON of data for POST/PATCH 249 | $vCenterJSON = ConvertTo-JSON $vCenterInfo 250 | if ($NetboxID) { 251 | # VM already exists in Netbox, so update with any new info 252 | Write-Verbose "Updating Netbox VM:" 253 | Write-Verbose $vCenterJSON 254 | $URI = $URIBase + $VirtualMachinesPath + "/$NetboxID/" 255 | $Response = Invoke-RESTMethod -Method PATCH -Headers $Headers -ContentType "application/json" -Body $vCenterJSON -URI $URI 256 | ConvertTo-JSON $Response | Write-Verbose 257 | $NetboxVM = $Response 258 | } else { 259 | Write-Verbose "Creating new VM in Netbox:" 260 | Write-Verbose $vCenterJSON 261 | # VM does not exist in Netbox, so create new VM entry 262 | $URI = $URIBase + $VirtualMachinesPath + "/" 263 | $Response = Invoke-RESTMethod -Method POST -Headers $Headers -ContentType "application/json" -Body $vCenterJSON -URI $URI 264 | ConvertTo-JSON $Response | Write-Verbose 265 | $NetboxVM = $Response 266 | } 267 | } else { 268 | $VMName = $NetboxInfo.Name 269 | Write-Verbose "VM $VMName already exists in Netbox and no changes needed" 270 | } 271 | $NetboxID = $NetboxVM.ID 272 | 273 | # Create list to store collected NIC objects 274 | $vCenterNICs = @() 275 | if ($Guest.NICs) { 276 | foreach ($NICInfo in $Guest.NICs) { 277 | foreach ($NIC in $NICInfo) { 278 | # Check that the device name exists 279 | if ($NIC.Device.Name) { 280 | # Process each IP in array 281 | $IPs = @() 282 | foreach ($IP in $NIC.IPAddress) { 283 | $vCenterIP = [IPAddress]$IP 284 | # Create temporary variable for IP 285 | $TempIP = "127.0.0.1/32" 286 | # Apply appropriate prefix for IP version 287 | $AddressType = $vCenterIP | Select-Object -Property AddressFamily 288 | if ([String]$AddressType -eq "@{AddressFamily=InterNetwork}") { 289 | $TempIP = $IP + "/32" 290 | } elseif ([String]$AddressType -eq "@{AddressFamily=InterNetworkV6}") { 291 | $TempIP = $IP + "/128" 292 | } else { 293 | Write-Warning -Message [String]::Format("Address {0} is of type {1}, skipping...", $IP, $AddressType) 294 | continue 295 | } 296 | $IPs += $TempIP 297 | } 298 | 299 | $Interface = @{ 300 | "enabled" = $NIC.Connected 301 | "addresses" = $IPs 302 | "name" = $NIC.Device.Name 303 | "mac_address" = $NIC.MACAddress 304 | "virtual_machine" = $NetboxID 305 | } 306 | $vCenterNICs += $Interface 307 | } 308 | } 309 | } 310 | } 311 | 312 | # Retrieve info on NICs present in Netbox 313 | $URI = $URIBase + $InterfacesPath + "/?virtual_machine_id=$NetboxID" 314 | $Response = Invoke-RESTMethod -Method GET -Headers $Headers -ContentType "application/json" -URI $URI 315 | ConvertTo-JSON $Response | Write-Verbose 316 | $NetboxNICs = $Response.Results 317 | 318 | # 3 conditions we're interested in: 319 | # 1. Interface is in Netbox and not vCenter -> delete interface from Netbox 320 | # 2. Interface is in vCenter and not Netbox -> create new Netbox interface 321 | # 3. Interface is in both -> update info if necessary 322 | 323 | # create list of MACs for Netbox 324 | $NetboxMACs = @() 325 | foreach ($NetboxNIC in $NetboxNICs) { 326 | $NetboxMACs += $NetboxNIC.mac_address 327 | } 328 | # create list of MACs for vCenter 329 | $vCenterMACs = @() 330 | foreach ($vCenterNIC in $vCenterNICs) { 331 | $vCenterMACs += $vCenterNIC.mac_address 332 | } 333 | # Delete any interfaces in Netbox that are not present in vCenter 334 | foreach ($NetboxNIC in $NetboxNICs) { 335 | $vCenterContains = $vCenterMACs -contains $NetboxNIC.mac_address 336 | if (-Not $vCenterContains) { 337 | # Netbox interface does not match vCenter's, so remove it 338 | $Message = "Deleting Netbox interface " + $NetboxNIC.name 339 | Write-Verbose $Message 340 | $URI = $URIBase + $InterfacesPath + "/" + $NetboxNIC.id + "/" 341 | $Response = Invoke-RESTMethod -Method DELETE -Headers $Headers -ContentType "application/json" -URI $URI 342 | ConvertTo-JSON $Response | Write-Verbose 343 | } 344 | } 345 | # create hashtable mapping Netbox interface IDs to IP lists as we process them 346 | $IPAssignments = @{} 347 | foreach ($vCenterNIC in $vCenterNICs) { 348 | $NetboxContains = $NetboxMACs -contains $vCenterNIC.mac_address 349 | if (-Not $NetboxContains) { 350 | # Interface is in vCenter but not Netbox, so create new interface in Netbox with details from vCenter 351 | $Message = "Creating Netbox interface " + $vCenterNIC.name 352 | Write-Verbose $Message 353 | $vCenterNICJSON = ConvertTo-JSON $vCenterNIC 354 | $URI = $URIBase + $InterfacesPath + "/" 355 | $Response = Invoke-RESTMethod -Method POST -Headers $Headers -ContentType "application/json" -Body $vCenterNICJSON -URI $URI 356 | ConvertTo-JSON $Response | Write-Verbose 357 | $NIC = $Response 358 | # Store interface ID 359 | $NICID = [String]$NIC.ID 360 | # Get list of addresses from hash table and delete 361 | $IPs = $vCenterNIC.addresses 362 | $vCenterNIC.Remove["addresses"] 363 | # store IP list in Netbox interface ID to IP arrary hashtable 364 | $IPAssignments[$NICID] = $IPs 365 | } else { 366 | # NIC exists in both, now identify which 367 | foreach ($NetboxNIC in $NetboxNICs) { 368 | $Message = [String]::Format("Comparing Netbox interface '{0}' and vCenter interface '{1}'", $NetboxNIC.name, $vCenterNIC.name) 369 | Write-Verbose $Message 370 | if ($vCenterNIC.mac_address -eq $NetboxNIC.mac_address) { 371 | # Interfaces match, so only need to update if necessary 372 | $NICUpdate = @{} 373 | # Store interface ID 374 | $NICID = [String]$NetboxNIC.id 375 | # Currently we don't want to overwrite any custom name (e.g. from Ansible or manual) 376 | #If ($NetboxNIC.Name -ne $vCenterNIC.Name) { $NICUpdate["name"] = $vCenterNIC.Name } 377 | if ($NetboxNIC.enabled -ne $vCenterNIC.enabled) { $NICUpdate["enabled"] = $vCenterNIC.enabled } 378 | # Get list of addresses from hash table and delete 379 | $IPs = $vCenterNIC.addresses 380 | $vCenterNIC.Remove["addresses"] 381 | # store IP list in Netbox interface ID to IP arrary hashtable 382 | $IPAssignments[$NICID] = $IPs 383 | if ($NICUpdate.count -gt 0) { 384 | # only want to patch if there is anything that needs to change 385 | $Message = "Updating Netbox interface " + $NetboxNIC.name 386 | Write-Verbose $Message 387 | $NICUpdateJSON = ConvertTo-JSON $NICUpdate 388 | $URI = $URIBase + $InterfacesPath + "/" + $NetboxNIC.id + "/" 389 | $Response = Invoke-RESTMethod -Method PATCH -Headers $Headers -ContentType "application/json" -Body $NICUpdateJSON -URI $URI 390 | ConvertTo-JSON $Response | Write-Verbose 391 | } 392 | } 393 | } 394 | } 395 | } 396 | 397 | ConvertTo-JSON $IPAssignments | Write-Verbose 398 | 399 | # situations to consider: 400 | # 1. IP is assigned in Netbox and not configured in vCenter -> change IP status to "deprecated" in Netbox (just in case NIC was disabled, etc) 401 | # 2. IP is configured in vCenter and not present in Netbox -> create new Netbox IP and assign to Netbox interface 402 | # 3. IP is configured in both -> set to active in Netbox if it is not already and confirm interface 403 | 404 | # Create list of all IPs configured on vCenter VM 405 | $ConfiguredIPs = @() 406 | foreach ($InterfaceID in $IPAssignments.Keys) { 407 | $ConfiguredIPs += $IPAssignments[$InterfaceID] 408 | } 409 | 410 | # Retrieve all IPs assigned to virtual machine in Netbox 411 | # helpful: https://groups.google.com/forum/#!topic/netbox-discuss/iREz7f9-bN0 412 | $URI = $URIBase + $IPAddressesPath + "/?virtual_machine_id=" + $NetboxID 413 | $Response = Invoke-RESTMethod -Method GET -Headers $Headers -ContentType "application/json" -URI $URI 414 | ConvertTo-JSON $Response | Write-Verbose 415 | $NetboxIPs = $Response.Results 416 | 417 | # iterate through and store results in array 418 | $AssignedIPs = @() 419 | foreach ($NetboxIP in $NetboxIPs) { 420 | $IP = $NetboxIP.address 421 | if ($ConfiguredIPs -contains $IP) { 422 | # vCenter VM has IP configured, so keep it 423 | $AssignedIPs += $IP.address 424 | } else { 425 | # IP assigned in Netbox but not configured in vCenter, so set to "deprecated" 426 | $Date = Get-Date -Format d 427 | $Description = [String]::Format("{0} - inactive {1}", $NetboxVM.Name, $Date) 428 | $IPPatch = @{ 429 | "status" = "deprecated" 430 | "description" = $Description 431 | } 432 | $IPPatchJSON = ConvertTo-JSON $IPPatch 433 | $URI = $URIBase + $IPAddressesPath + "/" + $NetboxIP.id + "/" 434 | $Response = Invoke-RESTMethod -Method PATCH -Headers $Headers -ContentType "application/json" -Body $IPPatchJSON -URI $URI 435 | ConvertTo-JSON $Response | Write-Verbose 436 | } 437 | } 438 | 439 | # create or update IPs for each interface as needed 440 | foreach ($InterfaceID in $IPAssignments.Keys) { 441 | # get list of IPs from vCenter 442 | $vCenterIPs = $IPAssignments[$InterfaceID] 443 | # Iterate through this interfaces's IPs and check if they are configured in Netbox 444 | foreach ($vCenterIP in $vCenterIPs) { 445 | if ($AssignedIPs -notcontains $vCenterIP) { 446 | # IP not assigned to VM in Netbox, but need to check if it exists already 447 | $URI = $URIBase + $IPAddressesPath + "/?q=" + $vCenterIP 448 | $Response = Invoke-RESTMethod -Method GET -Headers $Headers -ContentType "application/json" -URI $URI 449 | ConvertTo-JSON $Response | Write-Verbose 450 | if ($Response.count -gt 0) { 451 | # IP exists in Netbox, need to assign it to Netbox VM 452 | $NetboxIP = $Response.results 453 | # create details for patching IP in Netbox 454 | $Description = $NetboxVM.Name 455 | $IPPatch = @{ 456 | "status" = "active" 457 | "description" = $Description 458 | "vminterface" = $InterfaceID 459 | } 460 | $IPPatchJSON = ConvertTo-JSON $IPPatch 461 | $URI = $URIBase + $IPAddressesPath + "/" + $NetboxIP.id + "/" 462 | $Response = Invoke-RESTMethod -Method PATCH -Headers $Headers -ContentType "application/json" -Body $IPPatchJSON -URI $URI 463 | ConvertTo-JSON $Response | Write-Verbose 464 | $AssignedIPs += $NetboxIP.address 465 | } else { 466 | # IP does not exist in Netbox, so we need to create it 467 | $Description = $NetboxVM.Name 468 | $IPPost = @{ 469 | "address" = $vCenterIP 470 | "status" = "active" 471 | "description" = $Description 472 | "vminterface" = $InterfaceID 473 | } 474 | $IPPostJSON = ConvertTo-JSON $IPPost 475 | $URI = $URIBase + $IPAddressesPath + "/" 476 | $Response = Invoke-RESTMethod -Method POST -Headers $Headers -ContentType "application/json" -Body $IPPostJSON -URI $URI 477 | ConvertTo-JSON $Response | Write-Verbose 478 | $AssignedIPs += $Response.address 479 | } 480 | } else { 481 | # IP exists in Netbox, make sure status is "Active" and that the interface is correct 482 | # Search through Netbox IPs to find corresponding IP 483 | foreach ($NetboxIP in $NetboxIPs) { 484 | if ($vCenterIP -eq $NetboxIP.address) { 485 | # we've found the corresponding entry so determine what data needs to be updated 486 | $IPPatch = @{} 487 | # check that the IP is on the correct interface 488 | if ($NetboxIP.interface -ne $InterfaceID) { $IPPatch["vminterface"] = $InterfaceID } 489 | # check that the status is active 490 | if ($NetboxIP.status -ne "active") { $IPPatch["status"] = "active" } 491 | # check that the description contains the hostname 492 | $VMShortName = $NetboxVM.Name.Split('.')[0] 493 | $DescriptionMatch = $NetboxIP.description -match $VMShortName 494 | if (-not $DescriptionMatch) { 495 | $IPPatch["status"] = "active" 496 | } 497 | # Only submit patches if anything has changed 498 | if ($IPPatch.count -gt 0) { 499 | $IPPatchJSON = ConvertTo-JSON $IPPatch 500 | $URI = $URIBase + $IPAddressesPath + "/" + $NetboxIP.id + "/" 501 | $Response = Invoke-RESTMethod -Method PATCH -Headers $Headers -ContentType "application/json" -Body $IPPatchJSON -URI $URI 502 | ConvertTo-JSON $Response | Write-Verbose 503 | } 504 | } 505 | } 506 | } 507 | } 508 | } 509 | } 510 | } 511 | } 512 | 513 | process { 514 | } 515 | 516 | end { 517 | } 518 | } 519 | 520 | 521 | #-----------------------------------------------------------[Execution]------------------------------------------------------------ 522 | 523 | # setup logging to file 524 | $Date = Get-Date -UFormat "%Y-%m-%d" 525 | $LogPath = "D:\logs\" + $Date + "_vcenter_netbox_sync.log" 526 | Start-Transcript -Path $LogPath 527 | # import the PowerCLI module 528 | Import-Module VMware.PowerCLI 529 | # Make sure that you are connected to the vCenter servers before running this manually 530 | $Credential = Get-Credential 531 | Connect-VIServer -Server vcenter.example.com -Credential $Credential 532 | 533 | # If running as a scheduled task, ideally you can use a service account 534 | # that can login to both Windows and vCenter with the account's Kerberos ticket 535 | # In that case, you can remove the -Credential from the above Connect-VIServer call 536 | 537 | # create your own token at your Netbox instance, e.g. https://netbox.example.com/user/api-tokens/ 538 | # You may need to assign addtional user permissions at https://netbox.example.com/admin/auth/user/ 539 | # since API permissions are not inherited from LDAP group permissions 540 | $Token = "insert-token-generated-above" 541 | Sync-Netbox -Token $Token 542 | # If you want to see REST responses, add the Verbose flag 543 | #Sync-Netbox -Verbose -Token $Token 544 | Stop-Transcript 545 | -------------------------------------------------------------------------------- /reports/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/netbox-community/customizations/a85b29e93ed75e78146211d7604c73726b9cc869/reports/.gitkeep -------------------------------------------------------------------------------- /reports/circuit-reports/circuit_audits.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | 3 | from circuits.models import Circuit 4 | from circuits.choices import CircuitStatusChoices 5 | from extras.reports import Report 6 | 7 | 8 | WEEKS_IN_HOURS_3 = 24 * 21 9 | MONTHS_IN_HOURS_1 = 24 * 30 10 | MONTHS_IN_HOURS_3 = 24 * 90 11 | MONTHS_IN_HOURS_6 = 24 * 180 12 | 13 | 14 | class StatusDates(Report): 15 | """ 16 | These reports rely on a couple custom fields existing: 17 | - Deprovision Date 18 | - Decomm Date 19 | """ 20 | 21 | description = "Check status dates of circuits for discrepancies." 22 | 23 | def test_check_deprovisioned(self): 24 | 25 | deprovisioned_circuits = Circuit.objects.filter( 26 | status=CircuitStatusChoices.STATUS_DEPROVISIONING 27 | ) 28 | 29 | today = datetime.datetime.utcnow().date() 30 | one_month_ago = today - datetime.timedelta(hours=MONTHS_IN_HOURS_1) 31 | three_months_ago = today - datetime.timedelta(hours=MONTHS_IN_HOURS_3) 32 | 33 | for circuit_obj in deprovisioned_circuits: 34 | 35 | deprovision_date = circuit_obj.cf.get("deprovision_date") 36 | 37 | if not deprovision_date: 38 | self.log_failure(circuit_obj, "No deprovisioned date defined.") 39 | 40 | elif deprovision_date < three_months_ago: # older than 3 months 41 | self.log_failure( 42 | circuit_obj, 43 | "Deprovisioned 3+ months ago ({}), time to decommission (non-billing)!".format( 44 | deprovision_date 45 | ), 46 | ) 47 | 48 | elif deprovision_date < one_month_ago: # older than 1 month 49 | self.log_warning( 50 | circuit_obj, "Deprovisioned 1 month ago ({})".format(deprovision_date) 51 | ) 52 | 53 | else: 54 | self.log_success(circuit_obj) 55 | 56 | def test_check_decommissioned(self): 57 | 58 | decommed_circuits = Circuit.objects.filter(status=CircuitStatusChoices.STATUS_DECOMMISSIONED) 59 | 60 | today = datetime.datetime.utcnow().date() 61 | six_months_ago = today - datetime.timedelta(hours=MONTHS_IN_HOURS_6) 62 | three_weeks_left = six_months_ago + datetime.timedelta(hours=WEEKS_IN_HOURS_3) 63 | 64 | for circuit_obj in decommed_circuits: 65 | 66 | decomm_date = circuit_obj.cf.get("decomm_date") 67 | 68 | if not decomm_date: 69 | self.log_failure(circuit_obj, "No decommissioned date defined.") 70 | 71 | elif decomm_date < six_months_ago: # older than 6 months 72 | self.log_warning( 73 | circuit_obj, 74 | "Circuit ready for deletion, Decommed on {}".format(decomm_date), 75 | ) 76 | 77 | elif decomm_date < three_weeks_left: # 3 weeks til 6 months old 78 | self.log_info( 79 | circuit_obj, 80 | "3 or less weeks until eligible for deletion, Decommed on {}".format( 81 | decomm_date 82 | ), 83 | ) 84 | 85 | else: 86 | self.log_success(circuit_obj) 87 | -------------------------------------------------------------------------------- /reports/circuit-reports/circuit_counts.py: -------------------------------------------------------------------------------- 1 | from django.db.models import Count, Q 2 | 3 | from dcim.choices import SiteStatusChoices 4 | from dcim.models import Site 5 | from extras.reports import Report 6 | 7 | 8 | class MplsCircuitReport(Report): 9 | """ 10 | When you have multiple circuits at a site, but only one should have 11 | the 'mpls' tag. 12 | """ 13 | 14 | description = "Check that each site only has one circuit tagged MPLS." 15 | 16 | def test_site_mpls_counts(self): 17 | 18 | site_circuit_counts = ( 19 | Site.objects.filter(status=SiteStatusChoices.STATUS_ACTIVE) 20 | .annotate( 21 | mpls_count=Count( 22 | "circuit_terminations", 23 | filter=Q(circuit_terminations__circuit__tags__name__in=["mpls"]), 24 | ) 25 | ) 26 | .order_by("name") 27 | ) 28 | 29 | for site in site_circuit_counts: 30 | if site.mpls_count > 1: 31 | 32 | self.log_failure( 33 | site, 34 | "{} circuits tagged MPLS, Reason: More than 1".format( 35 | site.mpls_count 36 | ), 37 | ) 38 | 39 | elif not site.mpls_count: 40 | self.log_failure(site, "Reason: No circuits tagged MPLS") 41 | 42 | else: 43 | self.log_success(site) 44 | 45 | 46 | class CircuitCountReport(Report): 47 | """ 48 | Useful for cases where you have a standardized-ish number of 49 | circuits supposed to be attached to a site. 50 | """ 51 | 52 | description = "Validate number of (non-decommissioned) circuits attached to a site." 53 | 54 | def test_site_circuits(self): 55 | 56 | site_circuit_counts = ( 57 | Site.objects.filter( 58 | # We need circuits matching criteria and also 59 | # sites that have no circuits attached 60 | Q( 61 | circuit_terminations__term_side="A", 62 | # Only non-decommissioned circuits 63 | circuit_terminations__circuit__status__in=[1, 2, 3, 4], 64 | ) 65 | | Q(circuit_terminations__isnull=True), 66 | status=SiteStatusChoices.STATUS_ACTIVE, 67 | ) 68 | .annotate(circuit_count=Count("circuit_terminations")) 69 | .order_by("name") 70 | ) 71 | 72 | for site in site_circuit_counts: 73 | if site.circuit_count < 3: 74 | self.log_failure( 75 | site, "{} circuit(s), Reason: 3 minimum".format(site.circuit_count) 76 | ) 77 | 78 | elif site.circuit_count >= 7: 79 | self.log_failure( 80 | site, 81 | "{} circuit(s), Reason: 7 or more circuits!".format( 82 | site.circuit_count 83 | ), 84 | ) 85 | 86 | elif site.circuit_count > 4: 87 | self.log_warning( 88 | site, 89 | "{} circuit(s), Reason: More than 4".format(site.circuit_count), 90 | ) 91 | 92 | else: 93 | self.log_success(site.circuit_count) 94 | -------------------------------------------------------------------------------- /reports/dcim-reports/CheckCableLocality.py: -------------------------------------------------------------------------------- 1 | from extras.reports import Report 2 | from dcim.models import Cable, RearPort 3 | from dcim.choices import CableTypeChoices 4 | 5 | CABLE_TYPES_OK_BETWEEN_RACKS = { 6 | CableTypeChoices.TYPE_DAC_PASSIVE, 7 | } 8 | 9 | class CheckCableLocality(Report): 10 | description = "Warn on cables between racks, error on cables between sites" 11 | 12 | def test_cable_endpoints(self): 13 | for cable in Cable.objects.prefetch_related('terminations').all(): 14 | devices = set() 15 | term_types = set() 16 | sites = set() 17 | racks = set() 18 | for t in cable.terminations.all(): 19 | device = getattr(t.termination, 'device', None) 20 | if not device: 21 | continue 22 | devices.add(device) 23 | term_types.add(t.termination_type.name) 24 | if device.site: 25 | sites.add(device.site) 26 | if device.rack: 27 | racks.add(device.rack) 28 | 29 | if len(sites) == 0: 30 | continue 31 | if len(sites) > 1: 32 | self.log_failure(cable, f"Endpoints in different sites: {sites} {devices} {cable.type}") 33 | continue 34 | # Rearport to rearport connections are expected to be in different racks 35 | if len(term_types) == 1 and "rear port" in term_types: 36 | self.log_success(cable) 37 | continue 38 | if len(racks) > 1 and cable.type not in CABLE_TYPES_OK_BETWEEN_RACKS: 39 | self.log_warning(cable, f"Endpoints in different racks: {racks} {devices} {cable.type}") 40 | continue 41 | self.log_success(cable) 42 | -------------------------------------------------------------------------------- /reports/dcim-reports/CheckConsoleOOBPower.py: -------------------------------------------------------------------------------- 1 | from dcim.choices import DeviceStatusChoices 2 | from dcim.models import ConsolePort, Device, PowerPort 3 | from extras.reports import Report 4 | 5 | # This sample checks that every live device has a console connection, an out-of-band management connection, and two power connections 6 | # This sample is pulled directly from the example used at https://netbox.readthedocs.io/en/stable/additional-features/reports/ 7 | 8 | class DeviceConnectionsReport(Report): 9 | description = "Validate the minimum physical connections for each device" 10 | 11 | def test_console_connection(self): 12 | 13 | # Check that every console port for every active device has a connection defined. 14 | active = DeviceStatusChoices.STATUS_ACTIVE 15 | for console_port in ConsolePort.objects.prefetch_related('device').filter(device__status=active): 16 | if console_port.connected_endpoint is None: 17 | self.log_failure( 18 | console_port.device, 19 | "No console connection defined for {}".format(console_port.name) 20 | ) 21 | elif not console_port.connection_status: 22 | self.log_warning( 23 | console_port.device, 24 | "Console connection for {} marked as planned".format(console_port.name) 25 | ) 26 | else: 27 | self.log_success(console_port.device) 28 | 29 | def test_power_connections(self): 30 | 31 | # Check that every active device has at least two connected power supplies. 32 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE): 33 | connected_ports = 0 34 | for power_port in PowerPort.objects.filter(device=device): 35 | if power_port.connected_endpoint is not None: 36 | connected_ports += 1 37 | if not power_port.connection_status: 38 | self.log_warning( 39 | device, 40 | "Power connection for {} marked as planned".format(power_port.name) 41 | ) 42 | if connected_ports < 2: 43 | self.log_failure( 44 | device, 45 | "{} connected power supplies found (2 needed)".format(connected_ports) 46 | ) 47 | else: 48 | self.log_success(device) -------------------------------------------------------------------------------- /reports/dcim-reports/CheckDeviceNaming.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from dcim.choices import DeviceStatusChoices 4 | from dcim.models import Device 5 | from extras.reports import Report 6 | 7 | # A modified John Anderson's NetBox Day 2020 Presentation by adding a check for all sites, not just LAX 8 | # All credit goes to @lampwins 9 | 10 | class DeviceHostnameReport(Report): 11 | description = "Verify each device conforms to naming convention Example: spin-(site_name)-0001 or leaf-(site_name)-0001-a" 12 | 13 | def test_device_naming(self): 14 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE): 15 | # Change the naming standard based on the re.match 16 | if re.match("[a-z]{4}-" + str(device.site.name) + "-[0-9]{4}(-[a-b])?", str(device.name) , re.IGNORECASE): 17 | self.log_success(device) 18 | else: 19 | self.log_failure(device, "Hostname does not conform to standard!") 20 | -------------------------------------------------------------------------------- /reports/dcim-reports/DeviceRackingReport.py: -------------------------------------------------------------------------------- 1 | from dcim.choices import DeviceStatusChoices 2 | from dcim.models import Device, Rack 3 | from extras.reports import Report 4 | 5 | class DeviceRackingReport(Report): 6 | description = "Verify each device is assigned to a Rack" 7 | def test_device_racking(self): 8 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE): 9 | if device.rack_id is not None: 10 | if device.position is not None: 11 | self.log_success(device) 12 | 13 | elif device.device_type.is_child_device: 14 | self.log_info(device, "Device is child device and therefore not racked itself") 15 | else: 16 | self.log_warning(device, "Device is racked, but not assigned a position") 17 | else: 18 | self.log_failure(device, "Device is not racked") 19 | -------------------------------------------------------------------------------- /reports/dcim-reports/DuplicatedSerial.py: -------------------------------------------------------------------------------- 1 | from extras.choices import JobResultStatusChoices 2 | from extras.reports import Report 3 | from dcim.models import Device 4 | from collections import OrderedDict 5 | import logging 6 | import traceback 7 | from django.utils import timezone 8 | 9 | 10 | 11 | logger = logging.getLogger(__name__) 12 | 13 | 14 | class SerialReport(Report): 15 | description = "Check if devices have a serial and no duplicate serial exists" 16 | 17 | def __init__(self): 18 | repeated = self._check_repeated() 19 | if len(repeated) > 0: 20 | for serial in repeated: 21 | setattr(self.__class__, 22 | f'test_Serial_{serial}_repeated', 23 | staticmethod(self._repeated_serial_wrapper(serial, repeated[serial] ))) 24 | self.setup() 25 | 26 | def _repeated_serial_wrapper(self, serial, devices): 27 | 28 | def run_test(): 29 | self._repeated_serial(serial, devices) 30 | 31 | return run_test 32 | 33 | def _repeated_serial(self, serial, devices): 34 | for device in devices: 35 | self.log_failure(device, f"Device with serial {serial} repeated") 36 | 37 | def _check_repeated(self): 38 | device_serials = {} 39 | for device in Device.objects.all(): 40 | if device.serial != '': 41 | if device.serial not in device_serials: 42 | device_serials[device.serial] = [device] 43 | else: 44 | device_serials[device.serial].append(device) 45 | repeated_serials = {} 46 | for serial in device_serials: 47 | if len(device_serials[serial]) > 1: 48 | repeated_serials[serial] = device_serials[serial] 49 | return repeated_serials 50 | 51 | 52 | 53 | @property 54 | def name(self): 55 | return "Device Serial Validation" 56 | 57 | def test_Device_has_serial(self): 58 | for device in Device.objects.all(): 59 | if device.serial != '': 60 | self.log_success(device, "Device have serial configured") 61 | else: 62 | self.log_failure(device, "Device hasn't serial configured") 63 | 64 | def setup(self): 65 | self._results = OrderedDict() 66 | self.active_test = None 67 | self.failed = False 68 | 69 | self.logger = logging.getLogger(f"netbox.reports.{self.full_name}") 70 | 71 | # Compile test methods and initialize results skeleton 72 | test_methods = {} 73 | for method in dir(self): 74 | if method.startswith('test_') and callable(getattr(self, method)): 75 | method_array = method.split("_") 76 | method_array = method_array[1:] 77 | name = " ".join(method_array) 78 | test_methods[name] = method 79 | self._results[name] = OrderedDict([ 80 | ('success', 0), 81 | ('info', 0), 82 | ('warning', 0), 83 | ('failure', 0), 84 | ('log', []), 85 | ]) 86 | if not test_methods: 87 | raise Exception("A report must contain at least one test method.") 88 | self.test_methods = test_methods 89 | 90 | def run(self, job_result): 91 | """ 92 | Run the report and save its results. Each test method will be executed in order. 93 | """ 94 | self.logger.info(f"Running report") 95 | job_result.status = JobResultStatusChoices.STATUS_RUNNING 96 | job_result.save() 97 | 98 | try: 99 | 100 | for method_name in self.test_methods: 101 | self.active_test = method_name 102 | test_method = getattr(self, self.test_methods[method_name]) 103 | test_method() 104 | 105 | if self.failed: 106 | self.logger.warning("Report failed") 107 | job_result.status = JobResultStatusChoices.STATUS_FAILED 108 | else: 109 | self.logger.info("Report completed successfully") 110 | job_result.status = JobResultStatusChoices.STATUS_COMPLETED 111 | 112 | except Exception as e: 113 | stacktrace = traceback.format_exc() 114 | self.log_failure(None, f"An exception occurred: {type(e).__name__}: {e}
{stacktrace}
") 115 | logger.error(f"Exception raised during report execution: {e}") 116 | job_result.set_status(JobResultStatusChoices.STATUS_ERRORED) 117 | 118 | job_result.data = self._results 119 | job_result.completed = timezone.now() 120 | job_result.save() 121 | 122 | # Perform any post-run tasks 123 | self.post_run() 124 | -------------------------------------------------------------------------------- /reports/dcim-reports/RackGroupAssignment.py: -------------------------------------------------------------------------------- 1 | from dcim.models import Rack, RackGroup 2 | from extras.reports import Report 3 | 4 | class RackGroupAssignmentReport(Report): 5 | description = "Verify each rack is assigned to a Rack Group" 6 | def test_rack_group_assignment(self): 7 | for rack in Rack.objects.all(): 8 | if rack.group_id is not None: 9 | self.log_success(rack.name) 10 | else: 11 | self.log_failure(rack.name, "No Rack Group assigned") 12 | -------------------------------------------------------------------------------- /reports/dcim-reports/case_insensitive_check.py: -------------------------------------------------------------------------------- 1 | # Useful report to flag devices with duplicated names (case sensitive) before trying to upgrade to NetBox 3.4 or new from a version of NetBox older than 3.4 2 | 3 | from dcim.models import Device, DeviceType, Site 4 | from extras.reports import Report 5 | 6 | 7 | class DeviceReport(Report): 8 | 9 | description = "Check device case insensitive name" 10 | 11 | @property 12 | def name(self): 13 | return "Device Name Report" 14 | 15 | def test_case_insensitive_name(self): 16 | devices = Device.objects.all() 17 | for device in devices: 18 | if device.name is not None: 19 | repeated_devices = [] 20 | if device.tenant is not None: 21 | repeated_devices = devices.filter( 22 | name__iexact=device.name, 23 | site_id=device.site.id, 24 | tenant_id=device.tenant.id, 25 | ).exclude(id=device.id) 26 | else: 27 | repeated_devices = devices.filter( 28 | name__iexact=device.name, site_id=device.site.id 29 | ).exclude(id=device.id) 30 | 31 | for repeated_device in repeated_devices: 32 | self.log_failure( 33 | device, 34 | f"Device with repeated name (case insensitive) [{repeated_device}]({repeated_device.get_absolute_url()})", 35 | ) 36 | 37 | -------------------------------------------------------------------------------- /reports/dcim-reports/missing_device_type_components.py: -------------------------------------------------------------------------------- 1 | # Identify devices which are missing components from the device type definition 2 | 3 | from extras.reports import Report 4 | from dcim.models import Device 5 | 6 | class MissingDeviceTypeComponents(Report): 7 | name = "Missing Device Type Components" 8 | description = "Find devices which are missing components that are in the device type template" 9 | 10 | def test_add_ports(self): 11 | for device in Device.objects.all(): 12 | dt = device.device_type 13 | 14 | for item, templateitem in [ 15 | ('consoleports', 'consoleporttemplates'), 16 | ('consoleserverports', 'consoleserverporttemplates'), 17 | ('powerports', 'powerporttemplates'), 18 | ('poweroutlets', 'poweroutlettemplates'), 19 | ('interfaces', 'interfacetemplates'), 20 | ('rearports', 'rearporttemplates'), 21 | ('frontports', 'frontporttemplates'), 22 | ('devicebays', 'devicebaytemplates'), 23 | ('modulebays', 'modulebaytemplates'), 24 | ]: 25 | names = {i.name for i in getattr(device, item).all()} 26 | templatenames = {i.name for i in getattr(dt, templateitem).all()} 27 | missing = templatenames - names 28 | if missing: 29 | self.log_warning(device, "Missing %s %r" % (item, sorted(missing))) 30 | -------------------------------------------------------------------------------- /reports/ipam-reports/dns-reports.py: -------------------------------------------------------------------------------- 1 | # Make sure to install the dnspython module for this to work (pip3 install dnspython) 2 | # Add `dnspython` to your `local-requirements.txt` to make sure it is included during updates. 3 | 4 | from dcim.choices import DeviceStatusChoices 5 | from dcim.models import Device 6 | from extras.reports import Report 7 | import socket, dns.resolver 8 | 9 | class Check_DNS_A_Record(Report): 10 | description = "Check if device's primary IPv4 has DNS records" 11 | 12 | def test_dna_a_record(self): 13 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE): 14 | if device.interfaces is None: 15 | continue 16 | if device.name is None: 17 | self.log_info(device, "No device name") 18 | continue 19 | if device.primary_ip4_id is not None: 20 | try: 21 | addr = socket.gethostbyname(device.name) 22 | ip4 = str(device.primary_ip4).split("/")[0] 23 | if addr == ip4: 24 | self.log_success(device) 25 | else: 26 | self.log_failure(device,"DNS: " + addr + " - Netbox: " + ip4) 27 | except socket.gaierror as err: 28 | self.log_info(device, "No DNS Resolution") 29 | else: 30 | try: 31 | addr = socket.gethostbyname(device.name) 32 | self.log_warning(device, "No IPv4 set. Could be: " + addr) 33 | except socket.gaierror as err: 34 | self.log_info(device, "No IP or DNS found.") 35 | 36 | class Check_DNS_AAAA_Record(Report): 37 | description = "Check if device's primary IPv6 has DNS records" 38 | 39 | def test_dns_aaaa_record(self): 40 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE): 41 | if device.interfaces is None: 42 | continue 43 | if device.name is None: 44 | self.log_info(device, "No device name") 45 | continue 46 | if device.primary_ip6_id is not None: 47 | try: 48 | aaaa = dns.resolver.query(device.name, "AAAA") 49 | addr = str(aaaa[0]) 50 | ip6 = str(device.primary_ip6).split("/")[0] 51 | if addr == ip6: 52 | self.log_success(device) 53 | else: 54 | self.log_failure(device,"DNS: " + addr + " - Netbox: " + ip6) 55 | except dns.resolver.NoAnswer: 56 | self.log_info(device, "No AAAA Record") 57 | except dns.resolver.NXDOMAIN: 58 | self.log_info(device, "No such domain") 59 | else: 60 | try: 61 | aaaa = dns.resolver.query(device.name, "AAAA") 62 | addr = str(aaaa[0]) 63 | self.log_warning(device, "No IPv6 set. Could be: " + addr) 64 | except dns.resolver.NoAnswer: 65 | self.log_success(device) 66 | except dns.resolver.NXDOMAIN: 67 | self.log_success(device) 68 | -------------------------------------------------------------------------------- /reports/ipam-reports/ip-check-prefix.py: -------------------------------------------------------------------------------- 1 | from ipam.choices import IPAddressRoleChoices 2 | from ipam.models import IPAddress, Prefix 3 | from extras.reports import Report 4 | 5 | LOOPBACK_ROLES = [ 6 | IPAddressRoleChoices.ROLE_LOOPBACK, 7 | IPAddressRoleChoices.ROLE_ANYCAST, 8 | IPAddressRoleChoices.ROLE_VIP, 9 | IPAddressRoleChoices.ROLE_VRRP, 10 | ] 11 | 12 | # CheckPrefixLength forked from https://gist.github.com/candlerb/5380a7cdd03b60fbd02a664feb266d44 13 | class CheckPrefixLength(Report): 14 | description = "Check each IP address has the prefix length of the enclosing subnet" 15 | 16 | def test_prefix_lengths(self): 17 | prefixes = list(Prefix.objects.all()) 18 | prefixes.sort(key=lambda k: k.prefix) # overlapping subnets sort in order from largest to smallest 19 | for ipaddr in IPAddress.objects.all(): 20 | a = ipaddr.address 21 | if str(a).startswith("fe80"): 22 | self.log_success(ipaddr) 23 | continue 24 | # We allow loopback-like things to be single address *or* have the parent prefix length 25 | if ipaddr.role in LOOPBACK_ROLES and a.size == 1: 26 | self.log_success(ipaddr) 27 | continue 28 | parents = [p for p in prefixes if 29 | (p.vrf and p.vrf.id) == (ipaddr.vrf and ipaddr.vrf.id) and 30 | p.prefix.version == a.version and a.ip in p.prefix] 31 | if not parents: 32 | self.log_info(ipaddr, "No parent prefix") 33 | continue 34 | parent = parents[-1] 35 | # If parent is a pool, allow single address *or* have the parent prefix length 36 | if parent.is_pool and a.size == 1: 37 | self.log_success(ipaddr) 38 | continue 39 | if a.prefixlen != parent.prefix.prefixlen: 40 | self.log_failure(ipaddr, "prefixlen (%d) inconsistent with parent prefix (%s)" % 41 | (a.prefixlen, str(parent.prefix))) 42 | continue 43 | # if the parent prefix also contains child prefixes, that probably means that 44 | # an intermediate parent prefix is missing 45 | pchildren = [p for p in prefixes if 46 | (p.vrf and p.vrf.id) == (parent.vrf and parent.vrf.id) and 47 | p.prefix.version == parent.prefix.version and 48 | p.prefix != parent.prefix and 49 | p.prefix in parent.prefix] 50 | if pchildren: 51 | self.log_warning(ipaddr, "parent prefix (%s) contains %d other child prefix(es)" % 52 | (str(parent.prefix), len(pchildren))) 53 | continue 54 | self.log_success(ipaddr) 55 | -------------------------------------------------------------------------------- /reports/ipam-reports/ip-duplicate.py: -------------------------------------------------------------------------------- 1 | from ipam.choices import IPAddressRoleChoices 2 | from ipam.models import IPAddress, Prefix 3 | from extras.reports import Report 4 | from django.db.models import Q 5 | 6 | # UniqueIPReport was forked from https://gist.github.com/dgarros/acc23b4fd8d42844b8a41f695e6cb769 7 | class UniqueIPReport(Report): 8 | description = "Validate that we don't have an IP address allocated multiple times in the network" 9 | 10 | def test_unique_ip(self): 11 | already_found = [] 12 | for ip in IPAddress.objects.exclude(Q(role=IPAddressRoleChoices.ROLE_ANYCAST) | Q(role=IPAddressRoleChoices.ROLE_VIP) | Q(role=IPAddressRoleChoices.ROLE_VRRP)): 13 | if str(ip.address) in already_found: 14 | continue 15 | elif not ip.interface: 16 | continue 17 | duplicates = ip.get_duplicates() 18 | real_dup = 0 19 | for duplicate in duplicates: 20 | if duplicate.interface: 21 | real_dup +=1 22 | if real_dup != 0: 23 | already_found.append(str(ip.address)) 24 | msg = "has %s duplicate ips" % real_dup 25 | self.log_failure( ip, msg ) 26 | 27 | class UniquePrefixReport(Report): 28 | description = "Validate that we don't have a Prefix allocated multiple times in a VRF" 29 | 30 | def test_unique_prefix(self): 31 | for prefix in Prefix.objects.all(): 32 | duplicate_prefixes = Prefix.objects.filter(vrf=prefix.vrf, prefix=str(prefix.prefix)).exclude(pk=prefix.pk) 33 | if len(duplicate_prefixes) > 0 : 34 | msg = "has %s duplicate prefix(es)" % len(duplicate_prefixes) 35 | self.log_failure( prefix, msg ) 36 | -------------------------------------------------------------------------------- /reports/ipam-reports/ip-primary-find.py: -------------------------------------------------------------------------------- 1 | from dcim.choices import DeviceStatusChoices 2 | from dcim.models import Device 3 | from virtualization.choices import VirtualMachineStatusChoices 4 | from virtualization.models import VirtualMachine 5 | from ipam.choices import IPAddressStatusChoices 6 | from extras.reports import Report 7 | 8 | # CheckPrimaryAddress reports forked from https://gist.github.com/candlerb/5380a7cdd03b60fbd02a664feb266d44 9 | class CheckPrimaryAddressDevice(Report): 10 | description = "Check that every device with an assigned IP has a primary IP address assigned" 11 | 12 | def test_device_primary_ips(self): 13 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE).prefetch_related('interfaces__ip_addresses').all(): 14 | fail = False 15 | intcount = 0 16 | all_addrs = {4: [], 6: []} 17 | for interface in device.interfaces.all(): 18 | if not interface.mgmt_only: 19 | intcount += 1 20 | for addr in interface.ip_addresses.exclude(status=IPAddressStatusChoices.STATUS_DEPRECATED).all(): 21 | all_addrs[addr.address.version].append(addr) 22 | # There may be dumb devices with no interfaces / IP addresses, that's OK 23 | if not device.primary_ip4 and all_addrs[4]: 24 | self.log_failure(device, "Device has no primary IPv4 address (could be %s)" % 25 | " ".join([str(a) for a in all_addrs[4]])) 26 | fail = True 27 | if not device.primary_ip6 and all_addrs[6]: 28 | self.log_failure(device, "Device has no primary IPv6 address (could be %s)" % 29 | " ".join([str(a) for a in all_addrs[6]])) 30 | fail = True 31 | if not fail: 32 | # There may be dumb devices that are used as patch panels. Check for front/back ports 33 | if intcount == 0 and device.frontports.count() > 0 and device.rearports.count() > 0: 34 | self.log_success(device) 35 | # Or dumb PDUs 36 | elif intcount == 0 and device.powerports.count() > 0 and device.poweroutlets.count() > 0: 37 | self.log_success(device) 38 | elif intcount == 0: 39 | self.log_warning(device, "No interfaces assigned to device") 40 | else: 41 | if len(all_addrs[4]) + len(all_addrs[6]) == 0: 42 | self.log_warning(device, "No IP assigned to device") 43 | else: 44 | self.log_success(device) 45 | 46 | class CheckPrimaryAddressVM(Report): 47 | description = "Check that every vm with an assigned IP has a primary IP address assigned" 48 | 49 | def test_vm_primary_ips(self): 50 | for vm in VirtualMachine.objects.filter(status=VirtualMachineStatusChoices.STATUS_ACTIVE).prefetch_related('interfaces__ip_addresses').all(): 51 | fail = False 52 | intcount = 0 53 | all_addrs = {4: [], 6: []} 54 | for interface in vm.interfaces.all(): 55 | intcount += 1 56 | for addr in interface.ip_addresses.exclude(status=IPAddressStatusChoices.STATUS_DEPRECATED).all(): 57 | all_addrs[addr.address.version].append(addr) 58 | # A VM is useless without an IP address 59 | if intcount == 0: 60 | self.log_failure(vm, "Virtual machine has no interfaces") 61 | continue 62 | if not all_addrs[4] and not all_addrs[6]: 63 | self.log_failure(vm, "Virtual machine has no IP addresses") 64 | continue 65 | if not vm.primary_ip4 and all_addrs[4]: 66 | self.log_failure(vm, "Virtual machine has no primary IPv4 address (could be %s)" % 67 | " ".join([str(a) for a in all_addrs[4]])) 68 | fail = True 69 | if not vm.primary_ip6 and all_addrs[6]: 70 | self.log_failure(vm, "Virtual machine has no primary IPv6 address (could be %s)" % 71 | " ".join([str(a) for a in all_addrs[6]])) 72 | fail = True 73 | if not fail: 74 | self.log_success(vm) 75 | -------------------------------------------------------------------------------- /reports/ipam-reports/ip-primary-missing.py: -------------------------------------------------------------------------------- 1 | from dcim.choices import DeviceStatusChoices 2 | from dcim.models import Device 3 | from extras.reports import Report 4 | 5 | class DeviceIPReport(Report): 6 | description = "Check that every device has either an IPv4 or IPv6 primary address assigned" 7 | 8 | def test_primary_ip4(self): 9 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE): 10 | intcount = 0 11 | for interface in device.interfaces.all(): 12 | if not interface.mgmt_only: 13 | intcount += 1 14 | # There may be dumb devices with no interfaces so no IP addresses, that's OK 15 | if intcount == 0: 16 | if device.primary_ip4_id is not None: 17 | if device.primary_ip6_id is not None: 18 | self.log_failure(device, "Device has primary IPv4 and IPv6 address but no interfaces") 19 | else: 20 | self.log_warning(device, "Device has missing primary IPv4 addresses but no interfaces") 21 | else: 22 | self.log_success(device) 23 | elif device.primary_ip4_id is None: 24 | if device.device_type.is_child_device is True: 25 | self.log_success(device) 26 | else: 27 | if device.primary_ip6_id is None: 28 | self.log_failure(device, "Device is missing primary IPv4 and IPv6 address") 29 | else: 30 | self.log_warning(device, "Device is missing primary IPv4 addresses") 31 | else: 32 | if device.device_type.is_child_device is True: 33 | self.log_success(device) 34 | else: 35 | if device.primary_ip6_id is None: 36 | self.log_info(device, "Device is missing primary IPv6 address") 37 | else: 38 | self.log_success(device) 39 | -------------------------------------------------------------------------------- /reports/misc/CustomFieldValue.py: -------------------------------------------------------------------------------- 1 | from dcim.choices import DeviceStatusChoices 2 | from dcim.models import Device 3 | from extras.reports import Report 4 | 5 | # This sample looks for a custom field named "Monitor" and then acts from there 6 | 7 | class Check_IfMonitored(Report): 8 | description = "Check if device is flagged to be monitored" 9 | 10 | def test_monitoring_enabled(self): 11 | for device in Device.objects.filter(status=DeviceStatusChoices.STATUS_ACTIVE): 12 | monitor = device.cf.get("Monitor") 13 | if monitor is True: 14 | self.log_success(device) 15 | elif monitor is False: 16 | self.log_info(device, "Device set to not monitor") 17 | else: 18 | self.log_warning(device, "Device has null monitoring field") 19 | -------------------------------------------------------------------------------- /reports/site/site_address.py: -------------------------------------------------------------------------------- 1 | # site_address.py 2 | 3 | # Make sure to add `geocoder` to your `local_requirements.txt` and make sure it is installed in your Python venv. 4 | 5 | import geocoder 6 | from extras.reports import Report 7 | from dcim.models import Site 8 | 9 | class checkSiteAddress(Report): 10 | description = "Check if site has a physical address and/or geolocation information" 11 | 12 | def test_site_address(self): 13 | for site in Site.objects.all(): 14 | if site.physical_address: 15 | self.log_success(site) 16 | else: 17 | self.log_failure(site, site.name) 18 | 19 | def test_site_geo(self): 20 | for site in Site.objects.all(): 21 | if site.latitude and site.longitude: 22 | self.log_success(site) 23 | else: 24 | if site.physical_address: 25 | g = geocoder.osm(site.physical_address) 26 | if g: 27 | self.log_warning(site, f'Missing geo location - possible ({round(g.x,6)}, {round(g.y,6)})') 28 | else: 29 | self.log_warning(site, f'Missing geo location ({site.latitude}, {site.longitude})') 30 | else: 31 | self.log_failure(site, f'Missing geo location ({site.latitude}, {site.longitude})') 32 | -------------------------------------------------------------------------------- /reports/virtualization-reports/vm_counts.py: -------------------------------------------------------------------------------- 1 | from django.db.models import Count 2 | 3 | from dcim.choices import SiteStatusChoices 4 | from dcim.models import Site 5 | from extras.reports import Report 6 | 7 | 8 | class VirtualizationReport(Report): 9 | 10 | description = "Validate the network virtualization environment for a site" 11 | 12 | def test_cluster_exists(self): 13 | """ 14 | Cluster exists for site. 15 | """ 16 | 17 | sites = Site.objects.filter(status=SiteStatusChoices.STATUS_ACTIVE) 18 | missing_clusters = Site.objects.filter(clusters__isnull=True).exclude( 19 | tags__name__in=["no-cluster"] 20 | ) 21 | 22 | for site in sites: 23 | if site in missing_clusters: 24 | self.log_failure(site, "Missing VM cluster") 25 | 26 | else: 27 | self.log_success(site) 28 | 29 | def test_vms_exist(self): 30 | """ 31 | Correct number of VMs (account for special tag or not) 32 | """ 33 | 34 | sites = ( 35 | Site.objects.filter(status__in=[1, 2]) 36 | .prefetch_related("tags") 37 | .annotate(vm_count=Count("clusters__virtual_machines")) 38 | .order_by("name") 39 | ) 40 | 41 | for site in sites: 42 | tags = site.tags.names() 43 | desired_count = 2 44 | special_tag = "" 45 | if "special_tag" in [tag for tag in tags]: 46 | desired_count = 3 47 | special_tag = " special_tag" # Prefix space is for log printing 48 | 49 | if not site.vm_count: 50 | self.log_failure( 51 | site, "No VMs ({}/{})".format(site.vm_count, desired_count) 52 | ) 53 | elif site.vm_count == desired_count: 54 | self.log_success(site) 55 | 56 | elif site.vm_count > desired_count: 57 | self.log_warning( 58 | site, "Too many VMs ({}/{})".format(site.vm_count, desired_count) 59 | ) 60 | elif site.vm_count < desired_count: 61 | self.log_warning( 62 | site, 63 | "Too few VMs ({}/{}){}".format( 64 | site.vm_count, desired_count, special_tag 65 | ), 66 | ) 67 | else: 68 | self.log_info(site, "Unknown status") 69 | -------------------------------------------------------------------------------- /scripts/add_device_type_components.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script adds missing components from the device type to selected device(s) 3 | """ 4 | 5 | from dcim.models import (Manufacturer, DeviceType, Device, 6 | ConsolePort, ConsoleServerPort, PowerPort, 7 | PowerOutlet, Interface, RearPort, FrontPort, 8 | DeviceBay, ModuleBay) 9 | from extras.scripts import Script, ObjectVar, MultiObjectVar 10 | 11 | 12 | class AddDeviceTypeComponents(Script): 13 | class Meta: 14 | name = "Add Device Type Components" 15 | description = "Add missing components to selected devices" 16 | 17 | manufacturer = ObjectVar( 18 | model=Manufacturer, 19 | required=False, 20 | ) 21 | device_type = ObjectVar( 22 | model=DeviceType, 23 | query_params={ 24 | 'manufacturer_id': '$manufacturer', 25 | }, 26 | required=False, 27 | ) 28 | devices = MultiObjectVar( 29 | model=Device, 30 | query_params={ 31 | 'device_type_id': '$device_type', 32 | }, 33 | ) 34 | 35 | def run(self, data, commit): 36 | for device in data["devices"]: 37 | dt = device.device_type 38 | 39 | # Based on Device.save(): 40 | # "If this is a new Device, instantiate all of the related 41 | # components per the DeviceType definition" 42 | # Note that ordering is important: e.g. PowerPort before 43 | # PowerOutlet, RearPort before FrontPort 44 | for klass, item, templateitem in [ 45 | (ConsolePort, 'consoleports', 'consoleporttemplates'), 46 | (ConsoleServerPort, 'consoleserverports', 47 | 'consoleserverporttemplates'), 48 | (PowerPort, 'powerports', 'powerporttemplates'), 49 | (PowerOutlet, 'poweroutlets', 'poweroutlettemplates'), 50 | (Interface, 'interfaces', 'interfacetemplates'), 51 | (RearPort, 'rearports', 'rearporttemplates'), 52 | (FrontPort, 'frontports', 'frontporttemplates'), 53 | (DeviceBay, 'devicebays', 'devicebaytemplates'), 54 | (ModuleBay, 'modulebays', 'modulebaytemplates'), 55 | ]: 56 | names = {i.name for i in getattr(device, item).all()} 57 | templates = getattr(dt, templateitem).all() 58 | items = [ 59 | x.instantiate(device=device) 60 | for x in templates 61 | if x.name not in names 62 | ] 63 | if items: 64 | for i in items: 65 | i.full_clean() 66 | klass.objects.bulk_create(items) 67 | self.log_success("%s (%d): created %d %s" % (device.name, 68 | device.id, 69 | len(items), 70 | item)) 71 | -------------------------------------------------------------------------------- /scripts/create_vm.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script allows you to create a VM, an interface and primary IP address 3 | all in one screen. 4 | 5 | Workaround for issues: 6 | https://github.com/netbox-community/netbox/issues/1492 7 | https://github.com/netbox-community/netbox/issues/648 8 | """ 9 | 10 | from dcim.models import DeviceRole, Platform 11 | from django.core.exceptions import ObjectDoesNotExist 12 | from extras.models import Tag 13 | from ipam.choices import IPAddressStatusChoices 14 | from ipam.models import IPAddress, VRF 15 | from tenancy.models import Tenant 16 | from virtualization.choices import VirtualMachineStatusChoices 17 | from virtualization.models import Cluster, VirtualMachine, VMInterface 18 | from extras.scripts import Script, StringVar, IPAddressWithMaskVar, ObjectVar, MultiObjectVar, ChoiceVar, IntegerVar, TextVar 19 | 20 | class NewVM(Script): 21 | class Meta: 22 | name = "New VM" 23 | description = "Create a new VM" 24 | 25 | vm_name = StringVar(label="VM name") 26 | dns_name = StringVar(label="DNS name", required=False) 27 | vm_tags = MultiObjectVar(model=Tag, label="VM tags", required=False) 28 | primary_ip4 = IPAddressWithMaskVar(label="IPv4 address") 29 | #primary_ip4_tags = MultiObjectVar(model=Tag, label="IPv4 tags", required=False) 30 | primary_ip6 = IPAddressWithMaskVar(label="IPv6 address", required=False) 31 | #primary_ip6_tags = MultiObjectVar(model=Tag, label="IPv6 tags", required=False) 32 | #vrf = ObjectVar(model=VRF, required=False) 33 | role = ObjectVar(model=DeviceRole, query_params=dict(vm_role=True), required=False) 34 | status = ChoiceVar(VirtualMachineStatusChoices, default=VirtualMachineStatusChoices.STATUS_ACTIVE) 35 | cluster = ObjectVar(model=Cluster) 36 | tenant = ObjectVar(model=Tenant, required=False) 37 | platform = ObjectVar(model=Platform, required=False) 38 | interface_name = StringVar(default="eth0") 39 | mac_address = StringVar(label="MAC address", required=False) 40 | vcpus = IntegerVar(label="VCPUs", required=False) 41 | memory = IntegerVar(label="Memory (MB)", required=False) 42 | disk = IntegerVar(label="Disk (GB)", required=False) 43 | comments = TextVar(label="Comments", required=False) 44 | 45 | def run(self, data, commit): 46 | vm = VirtualMachine( 47 | name=data["vm_name"], 48 | role=data["role"], 49 | status=data["status"], 50 | cluster=data["cluster"], 51 | platform=data["platform"], 52 | vcpus=data["vcpus"], 53 | memory=data["memory"], 54 | disk=data["disk"], 55 | comments=data["comments"], 56 | tenant=data.get("tenant"), 57 | ) 58 | vm.full_clean() 59 | vm.save() 60 | vm.tags.set(data["vm_tags"]) 61 | 62 | vminterface = VMInterface( 63 | name=data["interface_name"], 64 | mac_address=data["mac_address"], 65 | virtual_machine=vm, 66 | ) 67 | vminterface.full_clean() 68 | vminterface.save() 69 | 70 | def add_addr(addr, family): 71 | if not addr: 72 | return 73 | if addr.version != family: 74 | raise RuntimeError(f"Wrong family for {a}") 75 | try: 76 | a = IPAddress.objects.get( 77 | address=addr, 78 | vrf=data.get("vrf"), 79 | ) 80 | a.snapshot() 81 | result = "Assigned" 82 | except ObjectDoesNotExist: 83 | a = IPAddress( 84 | address=addr, 85 | vrf=data.get("vrf"), 86 | ) 87 | result = "Created" 88 | a.status = IPAddressStatusChoices.STATUS_ACTIVE 89 | a.dns_name = data["dns_name"] 90 | if a.assigned_object: 91 | raise RuntimeError(f"Address {addr} is already assigned") 92 | a.assigned_object = vminterface 93 | a.tenant = data.get("tenant") 94 | a.full_clean() 95 | a.save() 96 | #a.tags.set(data[f"primary_ip{family}_tags"]) 97 | self.log_info(f"{result} IP address {a.address} {a.vrf or ''}") 98 | setattr(vm, f"primary_ip{family}", a) 99 | 100 | vm.snapshot() 101 | add_addr(data["primary_ip4"], 4) 102 | add_addr(data["primary_ip6"], 6) 103 | vm.full_clean() 104 | vm.save() 105 | self.log_success(f"Created VM [{vm.name}](/virtualization/virtual-machines/{vm.id}/)") 106 | -------------------------------------------------------------------------------- /scripts/find_orphaned_cables.py: -------------------------------------------------------------------------------- 1 | from django.db.models import Count, Q 2 | 3 | from dcim.models import Cable 4 | from extras.scripts import Script 5 | 6 | 7 | class BrokenCableTerminations(Script): 8 | name = f'Find (partially) orphaned cables' 9 | description = f'Find cable terminations misising either the A or B termination' 10 | 11 | def run(self, data, commit): 12 | cables = Cable.objects.annotate( 13 | aterm=Count('terminations', filter=Q(terminations__cable_end="A")), 14 | bterm=Count('terminations', filter=Q(terminations__cable_end="B")), 15 | ).filter(Q(bterm=0) | Q(aterm=0) ) 16 | self.log_info(f'Found {cables.count()} problematic cables in DB') 17 | for cable in cables: 18 | if cable.aterm == 0 and cable.bterm > 0: 19 | self.log_warning(f'[{cable}](/dcim/cables/{cable.pk}/) is missing \'A\' side termination') 20 | elif cable.aterm > 0 and cable.bterm == 0: 21 | self.log_warning(f'[{cable}](/dcim/cables/{cable.pk}/) is missing \'B\' side termination') 22 | else: 23 | self.log_warning(f'[{cable}](/dcim/cables/{cable.pk}/) is orphaned') 24 | -------------------------------------------------------------------------------- /scripts/fix_assigned_ips.py: -------------------------------------------------------------------------------- 1 | """ 2 | Identify and fix any IPAddress objects which have assigned_object_type_id but not assigned_object_id 3 | or vice versa (fix by setting both to null) 4 | """ 5 | 6 | from extras.scripts import Script 7 | from ipam.models import IPAddress 8 | 9 | class FixAssignedIPs(Script): 10 | class Meta: 11 | name = "Fix Assigned IPs" 12 | description = "Fix any IP addresses which have assigned_object_type_id but not assigned_object_id, or vice versa" 13 | 14 | def run(self, data, commit): 15 | for ip in IPAddress.objects.filter(assigned_object_type_id__isnull=False, assigned_object_id__isnull=True): 16 | self.fix(ip) 17 | for ip in IPAddress.objects.filter(assigned_object_type_id__isnull=True, assigned_object_id__isnull=False): 18 | self.fix(ip) 19 | 20 | def fix(self, ip): 21 | old_assigned_object_type_id = ip.assigned_object_type_id 22 | old_assigned_object_id = ip.assigned_object_id 23 | ip.snapshot() 24 | ip.assigned_object_type_id = None 25 | ip.assigned_object_id = None 26 | ip.full_clean() 27 | ip.save() 28 | self.log_success(f"Fixed {ip} - had assigned_object_type_id={old_assigned_object_type_id}, assigned_object_id={old_assigned_object_id}") 29 | -------------------------------------------------------------------------------- /scripts/geolocate_site.py: -------------------------------------------------------------------------------- 1 | # geolocate_site.py 2 | 3 | # Make sure to add `geocoder` to your `local_requirements.txt` and make sure it is installed in your Python venv. 4 | 5 | import geocoder 6 | from dcim.models import Site, SiteGroup, Region 7 | from extras.scripts import Script, ObjectVar, BooleanVar 8 | 9 | name = "Populate geolocation for sites" 10 | 11 | 12 | class SiteGeoAllRegion(Script): 13 | class Meta: 14 | name = "All sites for a region" 15 | description = "Retrieve list of all sites and populate the latitude/longitude fields based on their physical address." 16 | commit_default = True 17 | 18 | region = ObjectVar(model=Region) 19 | overwrite = BooleanVar( 20 | default=False, 21 | label="Override existing value", 22 | description="If location already exists, update the value.", 23 | ) 24 | 25 | def run(self, data, commit): 26 | for site in get_sites_for_region(data["region"]): 27 | update_site(self, site) 28 | 29 | 30 | class SiteGeoAllSiteGroup(Script): 31 | class Meta: 32 | name = "All sites for a site group" 33 | description = "Retrieve list of all sites and populate the latitude/longitude fields based on their physical address." 34 | commit_default = True 35 | 36 | group = ObjectVar(model=SiteGroup) 37 | overwrite = BooleanVar( 38 | default=False, 39 | label="Override existing value", 40 | description="If location already exists, update the value.", 41 | ) 42 | 43 | def run(self, data, commit): 44 | for site in get_sites_for_group(data["group"]): 45 | update_site(self, site) 46 | 47 | 48 | class SiteGeoOne(Script): 49 | class Meta: 50 | name = "Specific site" 51 | description = "Populate the latitude/longitude fields for a specific site based on its physical address." 52 | commit_default = True 53 | 54 | location = ObjectVar(model=Site) 55 | overwrite = BooleanVar( 56 | default=False, 57 | label="Override existing value", 58 | description="If location already exists, update the value.", 59 | ) 60 | 61 | def run(self, data, commit): 62 | site = data["location"] 63 | update_site(self, site, data["overwrite"]) 64 | 65 | 66 | def update_site(script, site, overwrite=False): 67 | if site.physical_address: 68 | if site.latitude and site.longitude and overwrite == False: 69 | script.log_info( 70 | f"{site.name}: {site.physical_address} already at {site.longitude}, {site.latitude}" 71 | ) 72 | elif g := geocoder.osm(site.physical_address): 73 | script.log_success( 74 | f"{site.name} geolocation found: {round(g.y,6)}, {round(g.x,6)}" 75 | ) 76 | site.full_clean() 77 | site.latitude = round(g.y, 6) 78 | site.longitude = round(g.x, 6) 79 | site.save() 80 | else: 81 | script.log_failure( 82 | f"{site.name} no geolocation found for {site.physical_address}" 83 | ) 84 | else: 85 | script.log_warning(f"No physical address for {site.name}") 86 | 87 | 88 | def get_sites_for_region(region): 89 | region_list = [region] 90 | get_child_regions(region, region_list) 91 | site_list = [] 92 | for place in region_list: 93 | site_list.extend(iter(Site.objects.filter(region=place))) 94 | return site_list 95 | 96 | 97 | def get_child_regions(region, region_list): 98 | for sub_region in Region.objects.filter(parent=region): 99 | region_list.append(sub_region) 100 | get_child_regions(sub_region, region_list) 101 | return region_list 102 | 103 | 104 | def get_sites_for_group(group): 105 | group_list = [group] 106 | get_child_groups(group, group_list) 107 | site_list = [] 108 | for type in group_list: 109 | site_list.extend(iter(Site.objects.filter(group=type))) 110 | return site_list 111 | 112 | 113 | def get_child_groups(group, group_list): 114 | for sub_group in SiteGroup.objects.filter(parent=group): 115 | group_list.append(sub_group) 116 | get_child_groups(sub_group, group_list) 117 | return group_list 118 | -------------------------------------------------------------------------------- /scripts/multi_connect.py: -------------------------------------------------------------------------------- 1 | """ 2 | Add multiple connections from one device to another 3 | """ 4 | 5 | from dcim.choices import LinkStatusChoices, CableTypeChoices, CableLengthUnitChoices 6 | from dcim.models import Device, Cable 7 | from django.db import transaction 8 | from extras.models import Tag 9 | from extras.scripts import Script, ChoiceVar, ObjectVar, StringVar, IntegerVar, MultiObjectVar 10 | import re 11 | from netbox.settings import VERSION 12 | from tenancy.models import Tenant 13 | try: 14 | from netbox.choices import ColorChoices 15 | except ModuleNotFoundError: 16 | from utilities.choices import ColorChoices 17 | from utilities.forms.constants import ALPHANUMERIC_EXPANSION_PATTERN 18 | from utilities.forms.utils import expand_alphanumeric_pattern 19 | 20 | NB_VERSION = [int(n) for n in VERSION.split('-')[0].split('.')] 21 | 22 | NO_CHOICE = () 23 | # https://github.com/netbox-community/netbox/issues/8228 24 | # Only apply to Netbox < v3.1.5 25 | if NB_VERSION < [3, 1, 5]: 26 | NO_CHOICE = ( 27 | ('', '---------'), 28 | ) 29 | 30 | TERM_CHOICES = ( 31 | ('interfaces', 'Interfaces'), 32 | ('frontports', 'Front Ports'), 33 | ('rearports', 'Rear Ports'), 34 | ) 35 | 36 | def expand_pattern(value): 37 | if not value: 38 | return [''] 39 | if re.search(ALPHANUMERIC_EXPANSION_PATTERN, value): 40 | return list(expand_alphanumeric_pattern(value)) 41 | return [value] 42 | 43 | class MultiConnect(Script): 44 | class Meta: 45 | name = "Multi Connect" 46 | description = "Add multiple connections from one device to another" 47 | 48 | device_a = ObjectVar(model=Device, label="Device A") 49 | termination_type_a = ChoiceVar(choices=TERM_CHOICES, label="Device A port type") 50 | termination_name_a = StringVar(label="Device A port name pattern", description="Example: ge-0/0/[5,7,12-23]") 51 | 52 | device_b = ObjectVar(model=Device, label="Device B") 53 | termination_type_b = ChoiceVar(choices=TERM_CHOICES, label="Device B port type") 54 | termination_name_b = StringVar(label="Device B port name pattern", description="Example: ge-0/0/[5,7,12-23]") 55 | 56 | cable_status = ChoiceVar(choices=LinkStatusChoices, default=LinkStatusChoices.STATUS_CONNECTED, label="Cable Status") 57 | cable_type = ChoiceVar(choices=NO_CHOICE+tuple(CableTypeChoices), required=False, label="Cable Type") 58 | cable_tenant = ObjectVar(model=Tenant, required=False, label="Cable Tenant") 59 | cable_label = StringVar(label="Cable Label pattern", required=False) 60 | cable_color = ChoiceVar(choices=NO_CHOICE+tuple(ColorChoices), required=False, label="Cable Color") 61 | cable_length = IntegerVar(required=False, label="Cable Length") # unfortunately there is no DecimalVar 62 | cable_length_unit = ChoiceVar(choices=NO_CHOICE+tuple(CableLengthUnitChoices), required=False, label="Cable Length Unit") 63 | cable_tags = MultiObjectVar(model=Tag, required=False, label="Cable Tags") 64 | 65 | def run(self, data, commit): 66 | device_a = data["device_a"] 67 | device_b = data["device_b"] 68 | ports_a = getattr(device_a, data["termination_type_a"]).all() 69 | ports_b = getattr(device_b, data["termination_type_b"]).all() 70 | 71 | terms_a = expand_pattern(data["termination_name_a"]) 72 | terms_b = expand_pattern(data["termination_name_b"]) 73 | if len(terms_a) != len(terms_b): 74 | return self.log_failure(f'Mismatched number of ports: {len(terms_a)} (A) versus {len(terms_b)} (B)') 75 | labels = expand_pattern(data["cable_label"]) 76 | if len(labels) == 1: 77 | labels = [labels[0] for i in range(len(terms_a))] 78 | elif len(labels) != len(terms_a): 79 | return self.log_failure(f'Mismatched number of labels: {len(labels)} labels versus {len(terms_a)} ports') 80 | 81 | for i in range(len(terms_a)): 82 | term_a = [x for x in ports_a if x.name == terms_a[i]] 83 | if len(term_a) != 1: 84 | self.log_failure(f'Unable to find "{terms_a[i]}" in {data["termination_type_a"]} on device A ({device_a.name})') 85 | continue 86 | term_b = [x for x in ports_b if x.name == terms_b[i]] 87 | if len(term_b) != 1: 88 | self.log_failure(f'Unable to find "{terms_b[i]}" in {data["termination_type_b"]} on device B ({device_b.name})') 89 | continue 90 | cable_args = dict( 91 | type=data["cable_type"], 92 | status=data["cable_status"], 93 | tenant=data["cable_tenant"], 94 | label=labels[i], 95 | color=data["cable_color"], 96 | length=data["cable_length"], 97 | length_unit=data["cable_length_unit"], 98 | ) 99 | if NB_VERSION < [3, 3, 0]: 100 | cable_args.update(dict( 101 | termination_a=term_a[0], 102 | termination_b=term_b[0], 103 | )) 104 | else: 105 | cable_args.update(dict( 106 | a_terminations=term_a, 107 | b_terminations=term_b, 108 | )) 109 | cable = Cable(**cable_args) 110 | try: 111 | with transaction.atomic(): 112 | cable.full_clean() 113 | cable.save() 114 | cable.tags.set(data["cable_tags"]) 115 | except Exception as e: 116 | self.log_failure(f'Unable to connect {device_a.name}:{terms_a[i]} to {device_b.name}:{terms_b[i]}: {e}') 117 | continue 118 | self.log_success(f'Created cable from {device_a.name}:{terms_a[i]} to {device_b.name}:{terms_b[i]}') 119 | -------------------------------------------------------------------------------- /scripts/power_summary.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script reports on power utilisation and power port availability, 3 | either globally (aggregated per site), or for an individual site 4 | (listing all the devices). 5 | 6 | It's a script rather than a report so that it can prompt for site choice. 7 | 8 | It doesn't rely on power ports being connected to power feeds or 9 | calculations done at PDU level; it uses the allocated_draw of 10 | each power port directly. 11 | """ 12 | 13 | import csv 14 | import io 15 | from dcim.choices import PowerPortTypeChoices 16 | from dcim.models import Site, Device, PowerPort, PowerOutlet, PowerFeed, PowerPanel 17 | from extras.scripts import Script, StringVar, ObjectVar, ChoiceVar 18 | 19 | DC_TYPES = [PowerPortTypeChoices.TYPE_DC] 20 | 21 | class PowerUsageAllSites(Script): 22 | class Meta: 23 | name = "Power Usage (all sites)" 24 | description = "Report on allocated power per site" 25 | scheduling_enabled = False 26 | commit_default = False 27 | 28 | def run(self, data, commit): 29 | output = io.StringIO() 30 | writer = csv.writer(output) 31 | writer.writerow(['Site','Allocated Draw']) 32 | for site in Site.objects.filter(status='active'): 33 | power_ports = PowerPort.objects.filter(device__site=site,device__status='active') 34 | site_draw = sum(((pp.allocated_draw or 0) for pp in power_ports)) 35 | if site_draw > 0: 36 | writer.writerow([site.name, site_draw]) 37 | return output.getvalue() 38 | 39 | class PowerUsageSingleSite(Script): 40 | class Meta: 41 | name = "Power Usage (single site)" 42 | description = "Report on allocated power for each device in a site" 43 | scheduling_enabled = False 44 | commit_default = False 45 | 46 | site = ObjectVar( 47 | model=Site, 48 | query_params={ 49 | 'status': 'active', 50 | }, 51 | label="Site", 52 | ) 53 | 54 | def run(self, data, commit): 55 | output = io.StringIO() 56 | writer = csv.writer(output) 57 | site = data['site'] 58 | power_ports = PowerPort.objects.filter(device__site=site,device__status='active') 59 | writer.writerow(['Device','Port','Allocated Draw']) 60 | site_draw = 0 61 | for pp in power_ports: 62 | if not pp.allocated_draw: 63 | continue 64 | writer.writerow([pp.device.name, pp.name, pp.allocated_draw]) 65 | site_draw += pp.allocated_draw 66 | self.log_success(f"Total allocated draw for {site}: {site_draw}W") 67 | return output.getvalue() 68 | 69 | class PowerOutletsAllSites(Script): 70 | class Meta: 71 | name = "Power Outlets (all sites)" 72 | description = "Report on total/free power outlets per site" 73 | scheduling_enabled = False 74 | commit_default = False 75 | 76 | def run(self, data, commit): 77 | output = io.StringIO() 78 | writer = csv.writer(output) 79 | writer.writerow(['Site','AC total','AC free','DC total','DC free']) 80 | for site in Site.objects.filter(status='active'): 81 | ac_total = ac_free = dc_total = dc_free = 0 82 | power_ports = PowerOutlet.objects.filter(device__site=site,device__status='active') 83 | for pp in power_ports: 84 | if pp.type in DC_TYPES: 85 | dc_total += 1 86 | dc_free += (0 if pp.mark_connected or pp.cable else 1) 87 | else: 88 | ac_total += 1 89 | ac_free += (0 if pp.mark_connected or pp.cable else 1) 90 | if dc_total > 0 or ac_total > 0: 91 | writer.writerow([site.name, ac_total, ac_free, dc_total, dc_free]) 92 | return output.getvalue() 93 | 94 | class PowerOutletsSingleSite(Script): 95 | class Meta: 96 | name = "Power Outlets (single site)" 97 | description = "Report on power outlets for each PDU in a site" 98 | scheduling_enabled = False 99 | commit_default = False 100 | 101 | site = ObjectVar( 102 | model=Site, 103 | query_params={ 104 | 'status': 'active', 105 | }, 106 | label="Site", 107 | ) 108 | 109 | def run(self, data, commit): 110 | output = io.StringIO() 111 | writer = csv.writer(output) 112 | site = data['site'] 113 | devices = Device.objects.filter(site=site,status='active') 114 | writer.writerow(['Device','Outlet Type','Total','Free']) 115 | for device in devices: 116 | count_by_type = {} # type => [total, free] 117 | for pp in device.poweroutlets.all(): 118 | c = count_by_type.setdefault(pp.type, [0,0]) 119 | c[0] += 1 120 | if not (pp.mark_connected or pp.cable): 121 | c[1] += 1 122 | for type, vals in count_by_type.items(): 123 | writer.writerow([device.name, type, vals[0], vals[1]]) 124 | return output.getvalue() 125 | -------------------------------------------------------------------------------- /scripts/rack_flipper.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2023 Per von Zweigbergk 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a copy 6 | # of this software and associated documentation files (the "Software"), to deal 7 | # in the Software without restriction, including without limitation the rights 8 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | # copies of the Software, and to permit persons to whom the Software is 10 | # furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in all 13 | # copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | # SOFTWARE. 22 | 23 | from extras.scripts import * 24 | 25 | from dcim.models import Device, Rack, RackReservation 26 | 27 | class ChangeManager: 28 | """ 29 | Convenience context manager that ensures that NetBox are snapshotted before any changes, and cleaned and saved 30 | after said changes. Any uncaught exceptions within the concept manager will inhibit saving of those changes. 31 | """ 32 | def __init__(self, obj): 33 | self.obj = obj 34 | def __enter__(self): 35 | if self.obj.pk and hasattr(self.obj, 'snapshot'): 36 | self.obj.snapshot() 37 | def __exit__(self, exc_type, exc_value, exc_tb): 38 | if exc_type is not None: 39 | return False # re-raise any exceptions without saving changes 40 | self.obj.full_clean() 41 | self.obj.save() 42 | 43 | def flip_rack(rack, log_f=lambda s:None): 44 | devices = Device.objects.filter(rack=rack, position__isnull=False) 45 | # Calculate new positions for all devices. We pre-calculated this here because we are about to remove all 46 | # devices from their existing positions, thus temporarilly removing the information about positions. 47 | new_positions = [rack.u_height - (device.position - 1) - (device.device_type.u_height - 1) for device in devices] 48 | 49 | # Remove all racked devices from rack temporarilly to avoid clashes 50 | for device in devices: 51 | with ChangeManager(device): 52 | log_f(f"Removing {device} from position {device.position}") 53 | device.position = None 54 | 55 | # Flip the units on the rack 56 | with ChangeManager(rack): 57 | rack.desc_units = not rack.desc_units 58 | log_f(f"Setting rack {rack} desc_units={rack.desc_units}") 59 | 60 | # Add the devices back to their new positions 61 | for device, position in zip(devices, new_positions): 62 | with ChangeManager(device): 63 | log_f(f"Adding {device} to position {position}") 64 | device.position = position 65 | 66 | # Deal with rack reservations 67 | for reservation in RackReservation.objects.filter(rack=rack): 68 | with ChangeManager(reservation): 69 | reservation.units = sorted(rack.u_height - (unit - 1) for unit in reservation.units) 70 | log_f(f"Updating reservation {reservation.pk} units to {repr(reservation.units)}") 71 | 72 | class RackFlipper(Script): 73 | class Meta: 74 | name = "Rack flipper" 75 | description = "This scripts flips a rack between ascending and descending units while preserving their physical locations" 76 | 77 | rack = ObjectVar( 78 | description="The rack to update", 79 | model=Rack, 80 | required=True 81 | ) 82 | 83 | def run(self, data, commit): 84 | flip_rack(data['rack'], self.log_info) 85 | -------------------------------------------------------------------------------- /scripts/renumber.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script allows you to renumber a network: it renumbers all 3 | Prefixes, IPAddresses and/or IPRanges within the given source block 4 | to the target block. 5 | 6 | BACKUP YOUR DATABASE BEFORE USING! 7 | """ 8 | 9 | from ipam.models import VRF, Prefix, IPAddress, IPRange 10 | from extras.scripts import Script, ObjectVar, IPAddressWithMaskVar, BooleanVar 11 | from utilities.exceptions import AbortScript 12 | 13 | class Renumber(Script): 14 | class Meta: 15 | name = "Renumber" 16 | description = "Renumber Prefixes, IPAddresses and Ranges" 17 | scheduling_enabled = False 18 | commit_default = False 19 | 20 | vrf = ObjectVar(model=VRF, label="VRF", required=False) 21 | source = IPAddressWithMaskVar(label="Original IP address block", required=True) 22 | target = IPAddressWithMaskVar(label="Target IP address block", required=True) 23 | renumber_prefixes = BooleanVar(label="Renumber prefixes", default=True) 24 | renumber_ipaddresses = BooleanVar(label="Renumber IP Addresses", default=True) 25 | renumber_ipranges = BooleanVar(label="Renumber IP Ranges", default=True) 26 | 27 | def run(self, data, commit): 28 | vrf = data["vrf"] 29 | source = data["source"] 30 | target = data["target"] 31 | 32 | if source.version != target.version: 33 | raise AbortScript("Source and target IP address version do not match") 34 | 35 | if source.prefixlen != target.prefixlen: 36 | raise AbortScript("Source and target prefix lengths do not match") 37 | 38 | offset = target.value - source.value 39 | if not offset: 40 | raise AbortScript("Source and target prefixes must be different") 41 | 42 | action = False 43 | 44 | if data["renumber_prefixes"]: 45 | n = 0 46 | for o in Prefix.objects.filter(vrf=vrf, prefix__net_contained_or_equal=source): 47 | o.snapshot() 48 | was = f"{o}" 49 | o.prefix.value += offset 50 | o.full_clean() 51 | o.save() 52 | self.log_info(f"Prefix {was} -> {o}") 53 | n += 1 54 | self.log_success(f"Renumbered {n} Prefixes") 55 | action = True 56 | 57 | if data["renumber_ipaddresses"]: 58 | n = 0 59 | for o in IPAddress.objects.filter(vrf=vrf, address__net_host_contained=source): 60 | o.snapshot() 61 | was = f"{o}" 62 | o.address.value += offset 63 | o.full_clean() 64 | o.save() 65 | self.log_info(f"IP Address {was} -> {o}") 66 | n += 1 67 | self.log_success(f"Renumbered {n} IP Addresses") 68 | action = True 69 | 70 | if data["renumber_ipranges"]: 71 | n = 0 72 | for o in IPRange.objects.filter(vrf=vrf, start_address__net_host_contained=source, end_address__net_host_contained=source): 73 | o.snapshot() 74 | was = f"{o}" 75 | o.start_address.value += offset 76 | o.end_address.value += offset 77 | o.full_clean() 78 | o.save() 79 | self.log_info(f"IP Range {was} -> {o}") 80 | n += 1 81 | self.log_success(f"Renumbered {n} IP Ranges") 82 | action = True 83 | 84 | if not action: 85 | self.log_info(f"No changes requested") 86 | --------------------------------------------------------------------------------