├── requirements.txt ├── .env.example ├── .github ├── dependabot.yml └── workflows │ ├── stale.yml │ └── docker.yml ├── Dockerfile ├── LICENSE ├── .dockerignore ├── log_handler.py ├── nb-dt-import.py ├── settings.py ├── .gitignore ├── README.md ├── repo.py └── netbox_api.py /requirements.txt: -------------------------------------------------------------------------------- 1 | GitPython==3.1.32 2 | pynetbox==7.4.0 3 | python-dotenv==1.0.0 4 | PyYAML==6.0.1 5 | -------------------------------------------------------------------------------- /.env.example: -------------------------------------------------------------------------------- 1 | NETBOX_URL= 2 | NETBOX_TOKEN= 3 | REPO_URL=https://github.com/netbox-community/devicetype-library.git 4 | REPO_BRANCH=master 5 | IGNORE_SSL_ERRORS=False 6 | #REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt # you should enable this if you are running on a linux system 7 | #SLUGS=c9300-48u isr4431 isr4331 8 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 2 3 | updates: 4 | - package-ecosystem: pip 5 | directory: / 6 | schedule: 7 | interval: monthly 8 | time: '02:00' 9 | timezone: America/New_York 10 | labels: 11 | - dependencies 12 | target-branch: master 13 | assignees: 14 | - "danner26" -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9-alpine 2 | 3 | ENV REPO_URL=https://github.com/netbox-community/devicetype-library.git 4 | WORKDIR /app 5 | COPY requirements.txt . 6 | 7 | # Install dependencies 8 | RUN apk add --no-cache git ca-certificates && \ 9 | python3 -m pip install --upgrade pip && \ 10 | pip3 install -r requirements.txt 11 | 12 | # Copy over src code 13 | COPY *.py ./ 14 | 15 | # -u to avoid stdout buffering 16 | CMD ["python3","-u","nb-dt-import.py"] 17 | -------------------------------------------------------------------------------- /.github/workflows/stale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #close-stale-issues (https://github.com/marketplace/actions/close-stale-issues) 3 | name: Close stale PRs 4 | on: # yamllint disable-line rule:truthy 5 | schedule: 6 | - cron: 0 4 * * * 7 | 8 | jobs: 9 | stale: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/stale@v5 13 | with: 14 | close-pr-message: > 15 | This PR has been automatically closed due to lack of activity. 16 | days-before-stale: 30 17 | days-before-close: 7 18 | operations-per-run: 100 19 | remove-stale-when-updated: false 20 | stale-pr-label: stale 21 | stale-pr-message: > 22 | This PR has been automatically marked as stale because it has not 23 | had recent activity. It will be closed automatically if no further 24 | progress is made. -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 Alexander Gittings 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # Git 2 | .git 3 | .gitignore 4 | .gitattributes 5 | 6 | # CI 7 | .codeclimate.yml 8 | .travis.yml 9 | .taskcluster.yml 10 | 11 | # Docker 12 | docker-compose.yml 13 | Dockerfile 14 | .docker 15 | .dockerignore 16 | 17 | # Byte-compiled / optimized / DLL files 18 | **/__pycache__/ 19 | **/*.py[cod] 20 | 21 | # C extensions 22 | *.so 23 | 24 | # Distribution / packaging 25 | .Python 26 | env/ 27 | build/ 28 | develop-eggs/ 29 | dist/ 30 | downloads/ 31 | eggs/ 32 | lib/ 33 | lib64/ 34 | parts/ 35 | sdist/ 36 | var/ 37 | *.egg-info/ 38 | .installed.cfg 39 | *.egg 40 | 41 | # PyInstaller 42 | # Usually these files are written by a python script from a template 43 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 44 | *.manifest 45 | *.spec 46 | 47 | # Installer logs 48 | pip-log.txt 49 | pip-delete-this-directory.txt 50 | 51 | # Unit test / coverage reports 52 | htmlcov/ 53 | .tox/ 54 | .coverage 55 | .cache 56 | nosetests.xml 57 | coverage.xml 58 | 59 | # Translations 60 | *.mo 61 | *.pot 62 | 63 | # Django stuff: 64 | *.log 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Virtual environment 73 | .env 74 | .venv/ 75 | venv/ 76 | 77 | # PyCharm 78 | .idea 79 | 80 | # Python mode for VIM 81 | .ropeproject 82 | **/.ropeproject 83 | 84 | # Vim swap files 85 | **/*.swp 86 | 87 | # VS Code 88 | .vscode/ -------------------------------------------------------------------------------- /.github/workflows/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: ci 3 | 4 | on: 5 | push: 6 | branches: 7 | - 'master' 8 | - 'main' 9 | pull_request: 10 | branches: 11 | - 'master' 12 | - 'main' 13 | workflow_dispatch: 14 | release: 15 | types: [published, edited] 16 | 17 | jobs: 18 | build-and-push-images: 19 | runs-on: ubuntu-latest 20 | steps: 21 | - 22 | name: Checkout 23 | uses: actions/checkout@v2 24 | - 25 | name: Docker meta 26 | id: meta 27 | uses: docker/metadata-action@v3 28 | with: 29 | images: | 30 | ghcr.io/minitriga/Netbox-Device-Type-Library-Import 31 | tags: | 32 | type=raw,value=latest,enable=${{ endsWith(github.ref, github.event.repository.default_branch) }} 33 | type=ref,event=branch 34 | type=ref,event=pr 35 | type=semver,pattern={{version}} 36 | type=semver,pattern={{major}} 37 | type=semver,pattern={{major}}.{{minor}} 38 | - 39 | name: Set up QEMU 40 | uses: docker/setup-qemu-action@v1 41 | - 42 | name: Set up Docker Buildx 43 | uses: docker/setup-buildx-action@v1 44 | - 45 | name: Login to GitHub Container Registry 46 | if: github.event_name != 'pull_request' 47 | uses: docker/login-action@v1 48 | with: 49 | registry: ghcr.io 50 | username: ${{ github.repository_owner }} 51 | password: ${{ secrets.GITHUB_TOKEN }} 52 | - 53 | name: Build and push 54 | uses: docker/build-push-action@v2 55 | with: 56 | context: . 57 | push: ${{ github.event_name != 'pull_request' }} 58 | platforms: linux/amd64 59 | tags: ${{ steps.meta.outputs.tags }} 60 | labels: ${{ steps.meta.outputs.labels }} -------------------------------------------------------------------------------- /log_handler.py: -------------------------------------------------------------------------------- 1 | from sys import exit as system_exit 2 | 3 | 4 | class LogHandler: 5 | def __new__(cls, *args, **kwargs): 6 | return super().__new__(cls) 7 | 8 | def __init__(self, args): 9 | self.args = args 10 | 11 | def exception(self, exception_type, exception, stack_trace=None): 12 | exception_dict = { 13 | "EnvironmentError": f'Environment variable "{exception}" is not set.', 14 | "SSLError": f'SSL verification failed. IGNORE_SSL_ERRORS is {exception}. Set IGNORE_SSL_ERRORS to True if you want to ignore this error. EXITING.', 15 | "GitCommandError": f'The repo "{exception}" is not a valid git repo.', 16 | "GitInvalidRepositoryError": f'The repo "{exception}" is not a valid git repo.', 17 | "Exception": f'An unknown error occurred: "{exception}"' 18 | } 19 | 20 | if self.args.verbose and stack_trace: 21 | print(stack_trace) 22 | print(exception_dict[exception_type]) 23 | system_exit(1) 24 | 25 | def verbose_log(self, message): 26 | if self.args.verbose: 27 | print(message) 28 | 29 | def log(self, message): 30 | print(message) 31 | 32 | def log_device_ports_created(self, created_ports: list = [], port_type: str = "port"): 33 | for port in created_ports: 34 | self.verbose_log(f'{port_type} Template Created: {port.name} - ' 35 | + f'{port.type if hasattr(port, "type") else ""} - {port.device_type.id} - ' 36 | + f'{port.id}') 37 | return len(created_ports) 38 | 39 | def log_module_ports_created(self, created_ports: list = [], port_type: str = "port"): 40 | for port in created_ports: 41 | self.verbose_log(f'{port_type} Template Created: {port.name} - ' 42 | + f'{port.type if hasattr(port, "type") else ""} - {port.module_type.id} - ' 43 | + f'{port.id}') 44 | return len(created_ports) 45 | -------------------------------------------------------------------------------- /nb-dt-import.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | from collections import Counter 3 | from datetime import datetime 4 | import yaml 5 | import pynetbox 6 | from glob import glob 7 | import os 8 | 9 | import settings 10 | from netbox_api import NetBox 11 | 12 | 13 | def main(): 14 | startTime = datetime.now() 15 | args = settings.args 16 | 17 | netbox = NetBox(settings) 18 | files, vendors = settings.dtl_repo.get_devices( 19 | f'{settings.dtl_repo.repo_path}/device-types/', args.vendors) 20 | 21 | settings.handle.log(f'{len(vendors)} Vendors Found') 22 | device_types = settings.dtl_repo.parse_files(files, slugs=args.slugs) 23 | settings.handle.log(f'{len(device_types)} Device-Types Found') 24 | netbox.create_manufacturers(vendors) 25 | netbox.create_device_types(device_types) 26 | 27 | if netbox.modules: 28 | settings.handle.log("Modules Enabled. Creating Modules...") 29 | files, vendors = settings.dtl_repo.get_devices( 30 | f'{settings.dtl_repo.repo_path}/module-types/', args.vendors) 31 | settings.handle.log(f'{len(vendors)} Module Vendors Found') 32 | module_types = settings.dtl_repo.parse_files(files, slugs=args.slugs) 33 | settings.handle.log(f'{len(module_types)} Module-Types Found') 34 | netbox.create_manufacturers(vendors) 35 | netbox.create_module_types(module_types) 36 | 37 | settings.handle.log('---') 38 | settings.handle.verbose_log( 39 | f'Script took {(datetime.now() - startTime)} to run') 40 | settings.handle.log(f'{netbox.counter["added"]} devices created') 41 | settings.handle.log(f'{netbox.counter["images"]} images uploaded') 42 | settings.handle.log( 43 | f'{netbox.counter["updated"]} interfaces/ports updated') 44 | settings.handle.log( 45 | f'{netbox.counter["manufacturer"]} manufacturers created') 46 | if settings.NETBOX_FEATURES['modules']: 47 | settings.handle.log( 48 | f'{netbox.counter["module_added"]} modules created') 49 | settings.handle.log( 50 | f'{netbox.counter["module_port_added"]} module interface / ports created') 51 | 52 | 53 | if __name__ == "__main__": 54 | main() 55 | -------------------------------------------------------------------------------- /settings.py: -------------------------------------------------------------------------------- 1 | from argparse import ArgumentParser 2 | import os 3 | from log_handler import LogHandler 4 | from repo import DTLRepo 5 | from dotenv import load_dotenv 6 | load_dotenv() 7 | 8 | REPO_URL = os.getenv("REPO_URL", 9 | default="https://github.com/netbox-community/devicetype-library.git") 10 | REPO_BRANCH = os.getenv("REPO_BRANCH", default="master") 11 | NETBOX_URL = os.getenv("NETBOX_URL") 12 | NETBOX_TOKEN = os.getenv("NETBOX_TOKEN") 13 | IGNORE_SSL_ERRORS = (os.getenv("IGNORE_SSL_ERRORS", default="False") == "True") 14 | REPO_PATH = f"{os.path.dirname(os.path.realpath(__file__))}/repo" 15 | 16 | # optionally load vendors through a comma separated list as env var 17 | VENDORS = list(filter(None, os.getenv("VENDORS", "").split(","))) 18 | 19 | # optionally load device types through a space separated list as env var 20 | SLUGS = os.getenv("SLUGS", "").split() 21 | 22 | NETBOX_FEATURES = { 23 | 'modules': False, 24 | } 25 | 26 | parser = ArgumentParser(description='Import Netbox Device Types') 27 | parser.add_argument('--vendors', nargs='+', default=VENDORS, 28 | help="List of vendors to import eg. apc cisco") 29 | parser.add_argument('--url', '--git', default=REPO_URL, 30 | help="Git URL with valid Device Type YAML files") 31 | parser.add_argument('--slugs', nargs='+', default=SLUGS, 32 | help="List of device-type slugs to import eg. ap4431 ws-c3850-24t-l") 33 | parser.add_argument('--branch', default=REPO_BRANCH, 34 | help="Git branch to use from repo") 35 | parser.add_argument('--verbose', action='store_true', default=False, 36 | help="Print verbose output") 37 | 38 | args = parser.parse_args() 39 | 40 | args.vendors = [v.casefold() 41 | for vendor in args.vendors for v in vendor.split(",") if v.strip()] 42 | args.slugs = [s for slug in args.slugs for s in slug.split(",") if s.strip()] 43 | 44 | handle = LogHandler(args) 45 | # Evaluate environment variables and exit if one of the mandatory ones are not set 46 | MANDATORY_ENV_VARS = ["REPO_URL", "NETBOX_URL", "NETBOX_TOKEN"] 47 | for var in MANDATORY_ENV_VARS: 48 | if var not in os.environ: 49 | handle.exception("EnvironmentError", var, 50 | f'Environment variable "{var}" is not set.\n\nMANDATORY_ENV_VARS: {str(MANDATORY_ENV_VARS)}.\n\nCURRENT_ENV_VARS: {str(os.environ)}') 51 | 52 | dtl_repo = DTLRepo(args, REPO_PATH, handle) 53 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env* 106 | !.env.example 107 | .venv 108 | env/ 109 | venv/ 110 | ENV/ 111 | env.bak/ 112 | venv.bak/ 113 | 114 | # Spyder project settings 115 | .spyderproject 116 | .spyproject 117 | 118 | # Rope project settings 119 | .ropeproject 120 | 121 | # mkdocs documentation 122 | /site 123 | 124 | # mypy 125 | .mypy_cache/ 126 | .dmypy.json 127 | dmypy.json 128 | 129 | # Pyre type checker 130 | .pyre/ 131 | 132 | # Editor 133 | .vscode 134 | 135 | repo 136 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Netbox Device Type Import 2 | 3 | This library is intended to be your friend and help you import all the device-types defined within the the [NetBox Device Type Library Repository](https://github.com/netbox-community/devicetype-library). 4 | 5 | > Tested working with 2.9.4, 2.10.4 6 | 7 | ## 🪄 Description 8 | 9 | This script will clone a copy of the `netbox-community/devicetype-library` repository to your machine to allow it to import the device types you would like without copy and pasting them into the Netbox UI. 10 | 11 | ## 🚀 Getting Started 12 | 13 | 1. This script is written in Python, so lets setup a virtual environment. 14 | 15 | ``` 16 | git clone https://github.com/netbox-community/Device-Type-Library-Import.git 17 | cd Device-Type-Library-Import 18 | python3 -m venv venv 19 | source venv/bin/activate 20 | ``` 21 | 22 | 2. Now that we have the basics setup, we'll need to install the requirements. 23 | 24 | ``` 25 | pip install -r requirements.txt 26 | ``` 27 | 28 | 3. There are two variables that are required when using this script to import device types into your Netbox installation. (1) Your Netbox instance URL and (2) a token with **write rights**. 29 | 30 | Copy the existing `.env.example` to your own `.env` file, and fill in the variables. 31 | 32 | ``` 33 | cp .env.example .env 34 | vim .env 35 | ``` 36 | 37 | Finally, we are able to execute the script and import some device templates! 38 | 39 | ## 🔌 Usage 40 | 41 | To use the script, simply execute the script as follows. Make sure you're still in the activated virtual environment we created before. 42 | 43 | ``` 44 | ./nb-dt-import.py 45 | ``` 46 | 47 | This will clone the latest master branch from the `netbox-community/devicetype-library` from Github and install it into the `repo` subdirectory. If this directory already exists, it will perform a `git pull` to update the repository instead. 48 | 49 | Next, it will loop over every manufacturer and every device of every manufacturer and begin checking if your Netbox install already has them, and if not, creates them. It will skip preexisting manufacturers, devices, interfaces, etc. so as to not end up with duplicate entries in your Netbox instance. 50 | 51 | ### 🧰 Arguments 52 | 53 | This script currently accepts a list of vendors as an argument, so that you can selectively import devices. 54 | 55 | To import only device by APC, for example: 56 | 57 | ``` 58 | ./nb-dt-import.py --vendors apc 59 | ``` 60 | 61 | `--vendors` can also accept a comma separated list of vendors if you want to import multiple. 62 | 63 | ``` 64 | ./nb-dt-import.py --vendors apc,juniper 65 | ``` 66 | 67 | ## Docker build 68 | 69 | It's possible to use this project as a docker container. 70 | 71 | To build : 72 | 73 | ``` 74 | docker build -t netbox-devicetype-import-library . 75 | ``` 76 | 77 | Alternatively you can pull a pre-built image from Github Container Registry (ghcr.io): 78 | 79 | ``` 80 | docker pull ghcr.io/minitriga/netbox-device-type-library-import 81 | ``` 82 | 83 | The container supports the following env var as configuration : 84 | 85 | - `REPO_URL`, the repo to look for device types (defaults to _https://github.com/netbox-community/devicetype-library.git_) 86 | - `REPO_BRANCH`, the branch to check out if appropriate, defaults to master. 87 | - `NETBOX_URL`, used to access netbox 88 | - `NETBOX_TOKEN`, token for accessing netbox 89 | - `VENDORS`, a comma-separated list of vendors to import (defaults to None) 90 | - `REQUESTS_CA_BUNDLE`, path to a CA_BUNDLE for validation if you are using self-signed certificates(file must be included in the container) 91 | 92 | To run : 93 | 94 | ``` 95 | docker run -e "NETBOX_URL=http://netbox:8080/" -e "NETBOX_TOKEN=98765434567890" ghcr.io/minitriga/netbox-device-type-library-import 96 | ``` 97 | 98 | ## 🧑‍💻 Contributing 99 | 100 | We're happy about any pull requests! 101 | 102 | ## 📜 License 103 | 104 | MIT 105 | -------------------------------------------------------------------------------- /repo.py: -------------------------------------------------------------------------------- 1 | import os 2 | from glob import glob 3 | from re import sub as re_sub 4 | from git import Repo, exc 5 | import yaml 6 | 7 | 8 | class DTLRepo: 9 | def __new__(cls, *args, **kwargs): 10 | return super().__new__(cls) 11 | 12 | def __init__(self, args, repo_path, exception_handler): 13 | self.handle = exception_handler 14 | self.yaml_extensions = ['yaml', 'yml'] 15 | self.url = args.url 16 | self.repo_path = repo_path 17 | self.branch = args.branch 18 | self.repo = None 19 | self.cwd = os.getcwd() 20 | 21 | if os.path.isdir(self.repo_path): 22 | self.pull_repo() 23 | else: 24 | self.clone_repo() 25 | 26 | def get_relative_path(self): 27 | return self.repo_path 28 | 29 | def get_absolute_path(self): 30 | return os.path.join(self.cwd, self.repo_path) 31 | 32 | def get_devices_path(self): 33 | return os.path.join(self.get_absolute_path(), 'device-types') 34 | 35 | def get_modules_path(self): 36 | return os.path.join(self.get_absolute_path(), 'module-types') 37 | 38 | def slug_format(self, name): 39 | return re_sub('\W+', '-', name.lower()) 40 | 41 | def pull_repo(self): 42 | try: 43 | self.handle.log("Package devicetype-library is already installed, " 44 | + f"updating {self.get_absolute_path()}") 45 | self.repo = Repo(self.repo_path) 46 | if not self.repo.remotes.origin.url.endswith('.git'): 47 | self.handle.exception("GitInvalidRepositoryError", self.repo.remotes.origin.url, 48 | f"Origin URL {self.repo.remotes.origin.url} does not end with .git") 49 | self.repo.remotes.origin.pull() 50 | self.repo.git.checkout(self.branch) 51 | self.handle.verbose_log( 52 | f"Pulled Repo {self.repo.remotes.origin.url}") 53 | except exc.GitCommandError as git_error: 54 | self.handle.exception( 55 | "GitCommandError", self.repo.remotes.origin.url, git_error) 56 | except Exception as git_error: 57 | self.handle.exception( 58 | "Exception", 'Git Repository Error', git_error) 59 | 60 | def clone_repo(self): 61 | try: 62 | self.repo = Repo.clone_from( 63 | self.url, self.get_absolute_path(), branch=self.branch) 64 | self.handle.log( 65 | f"Package Installed {self.repo.remotes.origin.url}") 66 | except exc.GitCommandError as git_error: 67 | self.handle.exception("GitCommandError", self.url, git_error) 68 | except Exception as git_error: 69 | self.handle.exception( 70 | "Exception", 'Git Repository Error', git_error) 71 | 72 | def get_devices(self, base_path, vendors: list = None): 73 | files = [] 74 | discovered_vendors = [] 75 | vendor_dirs = os.listdir(base_path) 76 | 77 | for folder in [vendor for vendor in vendor_dirs if not vendors or vendor.casefold() in vendors]: 78 | if folder.casefold() != "testing": 79 | discovered_vendors.append({'name': folder, 80 | 'slug': self.slug_format(folder)}) 81 | for extension in self.yaml_extensions: 82 | files.extend(glob(base_path + folder + f'/*.{extension}')) 83 | return files, discovered_vendors 84 | 85 | def parse_files(self, files: list, slugs: list = None): 86 | deviceTypes = [] 87 | for file in files: 88 | with open(file, 'r') as stream: 89 | try: 90 | data = yaml.safe_load(stream) 91 | except yaml.YAMLError as excep: 92 | self.handle.verbose_log(excep) 93 | continue 94 | manufacturer = data['manufacturer'] 95 | data['manufacturer'] = { 96 | 'name': manufacturer, 'slug': self.slug_format(manufacturer)} 97 | 98 | # Save file location to resolve any relative paths for images 99 | data['src'] = file 100 | 101 | if slugs and True not in [True if s.casefold() in data['slug'].casefold() else False for s in slugs]: 102 | self.handle.verbose_log(f"Skipping {data['model']}") 103 | continue 104 | 105 | deviceTypes.append(data) 106 | return deviceTypes 107 | -------------------------------------------------------------------------------- /netbox_api.py: -------------------------------------------------------------------------------- 1 | from collections import Counter 2 | import pynetbox 3 | import requests 4 | import os 5 | import glob 6 | # from pynetbox import RequestError as APIRequestError 7 | 8 | class NetBox: 9 | def __new__(cls, *args, **kwargs): 10 | return super().__new__(cls) 11 | 12 | def __init__(self, settings): 13 | self.counter = Counter( 14 | added=0, 15 | updated=0, 16 | manufacturer=0, 17 | module_added=0, 18 | module_port_added=0, 19 | images=0, 20 | ) 21 | self.url = settings.NETBOX_URL 22 | self.token = settings.NETBOX_TOKEN 23 | self.handle = settings.handle 24 | self.netbox = None 25 | self.ignore_ssl = settings.IGNORE_SSL_ERRORS 26 | self.modules = False 27 | self.new_filters = False 28 | self.connect_api() 29 | self.verify_compatibility() 30 | self.existing_manufacturers = self.get_manufacturers() 31 | self.device_types = DeviceTypes(self.netbox, self.handle, self.counter, self.ignore_ssl, self.new_filters) 32 | 33 | def connect_api(self): 34 | try: 35 | self.netbox = pynetbox.api(self.url, token=self.token) 36 | if self.ignore_ssl: 37 | self.handle.verbose_log("IGNORE_SSL_ERRORS is True, catching exception and disabling SSL verification.") 38 | #requests.packages.urllib3.disable_warnings() 39 | self.netbox.http_session.verify = False 40 | except Exception as e: 41 | self.handle.exception("Exception", 'NetBox API Error', e) 42 | 43 | def get_api(self): 44 | return self.netbox 45 | 46 | def get_counter(self): 47 | return self.counter 48 | 49 | def verify_compatibility(self): 50 | # nb.version should be the version in the form '3.2' 51 | version_split = [int(x) for x in self.netbox.version.split('.')] 52 | 53 | # Later than 3.2 54 | # Might want to check for the module-types entry as well? 55 | if version_split[0] > 3 or (version_split[0] == 3 and version_split[1] >= 2): 56 | self.modules = True 57 | 58 | # check if version >= 4.1 in order to use new filter names (https://github.com/netbox-community/netbox/issues/15410) 59 | if version_split[0] >= 4 and version_split[1] >= 1: 60 | self.new_filters = True 61 | self.handle.log(f'Netbox version {self.netbox.version} found. Using new filters.') 62 | 63 | def get_manufacturers(self): 64 | return {str(item): item for item in self.netbox.dcim.manufacturers.all()} 65 | 66 | def create_manufacturers(self, vendors): 67 | to_create = [] 68 | self.existing_manufacturers = self.get_manufacturers() 69 | for vendor in vendors: 70 | try: 71 | manGet = self.existing_manufacturers[vendor["name"]] 72 | self.handle.verbose_log(f'Manufacturer Exists: {manGet.name} - {manGet.id}') 73 | except KeyError: 74 | to_create.append(vendor) 75 | self.handle.verbose_log(f"Manufacturer queued for addition: {vendor['name']}") 76 | 77 | if to_create: 78 | try: 79 | created_manufacturers = self.netbox.dcim.manufacturers.create(to_create) 80 | for manufacturer in created_manufacturers: 81 | self.handle.verbose_log(f'Manufacturer Created: {manufacturer.name} - ' 82 | + f'{manufacturer.id}') 83 | self.counter.update({'manufacturer': 1}) 84 | except pynetbox.RequestError as request_error: 85 | self.handle.log("Error creating manufacturers") 86 | self.handle.verbose_log(f"Error during manufacturer creation. - {request_error.error}") 87 | 88 | def create_device_types(self, device_types_to_add): 89 | for device_type in device_types_to_add: 90 | 91 | # Remove file base path 92 | src_file = device_type["src"] 93 | del device_type["src"] 94 | 95 | # Pre-process front/rear_image flag, remove it if present 96 | saved_images = {} 97 | image_base = os.path.dirname(src_file).replace("device-types","elevation-images") 98 | for i in ["front_image","rear_image"]: 99 | if i in device_type: 100 | if device_type[i]: 101 | image_glob = f"{image_base}/{device_type['slug']}.{i.split('_')[0]}.*" 102 | images = glob.glob(image_glob, recursive=False) 103 | if images: 104 | saved_images[i] = images[0] 105 | else: 106 | self.handle.log(f"Error locating image file using '{image_glob}'") 107 | del device_type[i] 108 | 109 | try: 110 | dt = self.device_types.existing_device_types[device_type["model"]] 111 | self.handle.verbose_log(f'Device Type Exists: {dt.manufacturer.name} - ' 112 | + f'{dt.model} - {dt.id}') 113 | except KeyError: 114 | try: 115 | dt = self.netbox.dcim.device_types.create(device_type) 116 | self.counter.update({'added': 1}) 117 | self.handle.verbose_log(f'Device Type Created: {dt.manufacturer.name} - ' 118 | + f'{dt.model} - {dt.id}') 119 | except pynetbox.RequestError as e: 120 | self.handle.log(f'Error {e.error} creating device type:' 121 | f' {device_type["manufacturer"]["name"]} {device_type["model"]}') 122 | continue 123 | 124 | if "interfaces" in device_type: 125 | self.device_types.create_interfaces(device_type["interfaces"], dt.id) 126 | if "power-ports" in device_type: 127 | self.device_types.create_power_ports(device_type["power-ports"], dt.id) 128 | if "power-port" in device_type: 129 | self.device_types.create_power_ports(device_type["power-port"], dt.id) 130 | if "console-ports" in device_type: 131 | self.device_types.create_console_ports(device_type["console-ports"], dt.id) 132 | if "power-outlets" in device_type: 133 | self.device_types.create_power_outlets(device_type["power-outlets"], dt.id) 134 | if "console-server-ports" in device_type: 135 | self.device_types.create_console_server_ports(device_type["console-server-ports"], dt.id) 136 | if "rear-ports" in device_type: 137 | self.device_types.create_rear_ports(device_type["rear-ports"], dt.id) 138 | if "front-ports" in device_type: 139 | self.device_types.create_front_ports(device_type["front-ports"], dt.id) 140 | if "device-bays" in device_type: 141 | self.device_types.create_device_bays(device_type["device-bays"], dt.id) 142 | if self.modules and 'module-bays' in device_type: 143 | self.device_types.create_module_bays(device_type['module-bays'], dt.id) 144 | 145 | # Finally, update images if any 146 | if saved_images: 147 | self.device_types.upload_images(self.url, self.token, saved_images, dt.id) 148 | 149 | def create_module_types(self, module_types): 150 | all_module_types = {} 151 | for curr_nb_mt in self.netbox.dcim.module_types.all(): 152 | if curr_nb_mt.manufacturer.slug not in all_module_types: 153 | all_module_types[curr_nb_mt.manufacturer.slug] = {} 154 | 155 | all_module_types[curr_nb_mt.manufacturer.slug][curr_nb_mt.model] = curr_nb_mt 156 | 157 | 158 | for curr_mt in module_types: 159 | try: 160 | module_type_res = all_module_types[curr_mt['manufacturer']['slug']][curr_mt["model"]] 161 | self.handle.verbose_log(f'Module Type Exists: {module_type_res.manufacturer.name} - ' 162 | + f'{module_type_res.model} - {module_type_res.id}') 163 | except KeyError: 164 | try: 165 | module_type_res = self.netbox.dcim.module_types.create(curr_mt) 166 | self.counter.update({'module_added': 1}) 167 | self.handle.verbose_log(f'Module Type Created: {module_type_res.manufacturer.name} - ' 168 | + f'{module_type_res.model} - {module_type_res.id}') 169 | except pynetbox.RequestError as exce: 170 | self.handle.log(f"Error '{exce.error}' creating module type: " + 171 | f"{curr_mt}") 172 | 173 | if "interfaces" in curr_mt: 174 | self.device_types.create_module_interfaces(curr_mt["interfaces"], module_type_res.id) 175 | if "power-ports" in curr_mt: 176 | self.device_types.create_module_power_ports(curr_mt["power-ports"], module_type_res.id) 177 | if "console-ports" in curr_mt: 178 | self.device_types.create_module_console_ports(curr_mt["console-ports"], module_type_res.id) 179 | if "power-outlets" in curr_mt: 180 | self.device_types.create_module_power_outlets(curr_mt["power-outlets"], module_type_res.id) 181 | if "console-server-ports" in curr_mt: 182 | self.device_types.create_module_console_server_ports(curr_mt["console-server-ports"], module_type_res.id) 183 | if "rear-ports" in curr_mt: 184 | self.device_types.create_module_rear_ports(curr_mt["rear-ports"], module_type_res.id) 185 | if "front-ports" in curr_mt: 186 | self.device_types.create_module_front_ports(curr_mt["front-ports"], module_type_res.id) 187 | 188 | class DeviceTypes: 189 | def __new__(cls, *args, **kwargs): 190 | return super().__new__(cls) 191 | 192 | def __init__(self, netbox, handle, counter, ignore_ssl, new_filters): 193 | self.netbox = netbox 194 | self.handle = handle 195 | self.counter = counter 196 | self.existing_device_types = self.get_device_types() 197 | self.ignore_ssl = ignore_ssl 198 | self.new_filters = new_filters 199 | 200 | def get_device_types(self): 201 | return {str(item): item for item in self.netbox.dcim.device_types.all()} 202 | 203 | def get_power_ports(self, device_type): 204 | return {str(item): item for item in self.netbox.dcim.power_port_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 205 | 206 | def get_rear_ports(self, device_type): 207 | return {str(item): item for item in self.netbox.dcim.rear_port_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 208 | 209 | def get_module_power_ports(self, module_type): 210 | return {str(item): item for item in self.netbox.dcim.power_port_templates.filter(**{'module_type_id' if self.new_filters else 'moduletype_id': module_type})} 211 | 212 | def get_module_rear_ports(self, module_type): 213 | return {str(item): item for item in self.netbox.dcim.rear_port_templates.filter(**{'module_type_id' if self.new_filters else 'moduletype_id': module_type})} 214 | 215 | def get_device_type_ports_to_create(self, dcim_ports, device_type, existing_ports): 216 | to_create = [port for port in dcim_ports if port['name'] not in existing_ports] 217 | for port in to_create: 218 | port['device_type'] = device_type 219 | 220 | return to_create 221 | 222 | def get_module_type_ports_to_create(self, module_ports, module_type, existing_ports): 223 | to_create = [port for port in module_ports if port['name'] not in existing_ports] 224 | for port in to_create: 225 | port['module_type'] = module_type 226 | 227 | return to_create 228 | 229 | def create_interfaces(self, interfaces, device_type): 230 | existing_interfaces = {str(item): item for item in self.netbox.dcim.interface_templates.filter( 231 | **{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 232 | to_create = self.get_device_type_ports_to_create( 233 | interfaces, device_type, existing_interfaces) 234 | 235 | if to_create: 236 | try: 237 | self.counter.update({'updated': 238 | self.handle.log_device_ports_created( 239 | self.netbox.dcim.interface_templates.create(to_create), "Interface") 240 | }) 241 | except pynetbox.RequestError as excep: 242 | self.handle.log(f"Error '{excep.error}' creating Interface") 243 | 244 | def create_power_ports(self, power_ports, device_type): 245 | existing_power_ports = self.get_power_ports(device_type) 246 | to_create = self.get_device_type_ports_to_create(power_ports, device_type, existing_power_ports) 247 | 248 | if to_create: 249 | try: 250 | self.counter.update({'updated': 251 | self.handle.log_device_ports_created( 252 | self.netbox.dcim.power_port_templates.create(to_create), "Power Port") 253 | }) 254 | except pynetbox.RequestError as excep: 255 | self.handle.log(f"Error '{excep.error}' creating Power Port") 256 | 257 | def create_console_ports(self, console_ports, device_type): 258 | existing_console_ports = {str(item): item for item in self.netbox.dcim.console_port_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 259 | to_create = self.get_device_type_ports_to_create(console_ports, device_type, existing_console_ports) 260 | 261 | if to_create: 262 | try: 263 | self.counter.update({'updated': 264 | self.handle.log_device_ports_created( 265 | self.netbox.dcim.console_port_templates.create(to_create), "Console Port") 266 | }) 267 | except pynetbox.RequestError as excep: 268 | self.handle.log(f"Error '{excep.error}' creating Console Port") 269 | 270 | def create_power_outlets(self, power_outlets, device_type): 271 | existing_power_outlets = {str(item): item for item in self.netbox.dcim.power_outlet_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 272 | to_create = self.get_device_type_ports_to_create(power_outlets, device_type, existing_power_outlets) 273 | 274 | if to_create: 275 | existing_power_ports = self.get_power_ports(device_type) 276 | for outlet in to_create: 277 | try: 278 | power_port = existing_power_ports[outlet["power_port"]] 279 | outlet['power_port'] = power_port.id 280 | except KeyError: 281 | pass 282 | 283 | try: 284 | self.counter.update({'updated': 285 | self.handle.log_device_ports_created( 286 | self.netbox.dcim.power_outlet_templates.create(to_create), "Power Outlet") 287 | }) 288 | except pynetbox.RequestError as excep: 289 | self.handle.log(f"Error '{excep.error}' creating Power Outlet") 290 | 291 | def create_console_server_ports(self, console_server_ports, device_type): 292 | existing_console_server_ports = {str(item): item for item in self.netbox.dcim.console_server_port_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 293 | to_create = self.get_device_type_ports_to_create(console_server_ports, device_type, existing_console_server_ports) 294 | 295 | if to_create: 296 | try: 297 | self.counter.update({'updated': 298 | self.handle.log_device_ports_created( 299 | self.netbox.dcim.console_server_port_templates.create(to_create), "Console Server Port") 300 | }) 301 | except pynetbox.RequestError as excep: 302 | self.handle.log(f"Error '{excep.error}' creating Console Server Port") 303 | 304 | def create_rear_ports(self, rear_ports, device_type): 305 | existing_rear_ports = self.get_rear_ports(device_type) 306 | to_create = self.get_device_type_ports_to_create(rear_ports, device_type, existing_rear_ports) 307 | 308 | if to_create: 309 | try: 310 | self.counter.update({'updated': 311 | self.handle.log_device_ports_created( 312 | self.netbox.dcim.rear_port_templates.create(to_create), "Rear Port") 313 | }) 314 | except pynetbox.RequestError as excep: 315 | self.handle.log(f"Error '{excep.error}' creating Rear Port") 316 | 317 | def create_front_ports(self, front_ports, device_type): 318 | existing_front_ports = {str(item): item for item in self.netbox.dcim.front_port_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 319 | to_create = self.get_device_type_ports_to_create(front_ports, device_type, existing_front_ports) 320 | 321 | if to_create: 322 | all_rearports = self.get_rear_ports(device_type) 323 | for port in to_create: 324 | try: 325 | rear_port = all_rearports[port["rear_port"]] 326 | port['rear_port'] = rear_port.id 327 | except KeyError: 328 | self.handle.log(f'Could not find Rear Port for Front Port: {port["name"]} - ' 329 | + f'{port["type"]} - {device_type}') 330 | 331 | try: 332 | self.counter.update({'updated': 333 | self.handle.log_device_ports_created( 334 | self.netbox.dcim.front_port_templates.create(to_create), "Front Port") 335 | }) 336 | except pynetbox.RequestError as excep: 337 | self.handle.log(f"Error '{excep.error}' creating Front Port") 338 | 339 | def create_device_bays(self, device_bays, device_type): 340 | existing_device_bays = {str(item): item for item in self.netbox.dcim.device_bay_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 341 | to_create = self.get_device_type_ports_to_create(device_bays, device_type, existing_device_bays) 342 | 343 | if to_create: 344 | try: 345 | self.counter.update({'updated': 346 | self.handle.log_device_ports_created( 347 | self.netbox.dcim.device_bay_templates.create(to_create), "Device Bay") 348 | }) 349 | except pynetbox.RequestError as excep: 350 | self.handle.log(f"Error '{excep.error}' creating Device Bay") 351 | 352 | def create_module_bays(self, module_bays, device_type): 353 | existing_module_bays = {str(item): item for item in self.netbox.dcim.module_bay_templates.filter(**{'device_type_id' if self.new_filters else 'devicetype_id': device_type})} 354 | to_create = self.get_device_type_ports_to_create(module_bays, device_type, existing_module_bays) 355 | 356 | if to_create: 357 | try: 358 | self.counter.update({'updated': 359 | self.handle.log_device_ports_created( 360 | self.netbox.dcim.module_bay_templates.create(to_create), "Module Bay") 361 | }) 362 | except pynetbox.RequestError as excep: 363 | self.handle.log(f"Error '{excep.error}' creating Module Bay") 364 | 365 | def create_module_interfaces(self, module_interfaces, module_type): 366 | existing_interfaces = {str(item): item for item in self.netbox.dcim.interface_templates.filter(**{'module_type_id' if self.new_filters else 'moduletype_id': module_type})} 367 | to_create = self.get_module_type_ports_to_create(module_interfaces, module_type, existing_interfaces) 368 | 369 | if to_create: 370 | try: 371 | self.counter.update({'updated': 372 | self.handle.log_module_ports_created( 373 | self.netbox.dcim.interface_templates.create(to_create), "Module Interface") 374 | }) 375 | except pynetbox.RequestError as excep: 376 | self.handle.log(f"Error '{excep.error}' creating Module Interface") 377 | 378 | def create_module_power_ports(self, power_ports, module_type): 379 | existing_power_ports = self.get_module_power_ports(module_type) 380 | to_create = self.get_module_type_ports_to_create(power_ports, module_type, existing_power_ports) 381 | 382 | if to_create: 383 | try: 384 | self.counter.update({'updated': 385 | self.handle.log_module_ports_created( 386 | self.netbox.dcim.power_port_templates.create(to_create), "Module Power Port") 387 | }) 388 | except pynetbox.RequestError as excep: 389 | self.handle.log(f"Error '{excep.error}' creating Module Power Port") 390 | 391 | def create_module_console_ports(self, console_ports, module_type): 392 | existing_console_ports = {str(item): item for item in self.netbox.dcim.console_port_templates.filter(**{'module_type_id' if self.new_filters else 'moduletype_id': module_type})} 393 | to_create = self.get_module_type_ports_to_create(console_ports, module_type, existing_console_ports) 394 | 395 | if to_create: 396 | try: 397 | self.counter.update({'updated': 398 | self.handle.log_module_ports_created( 399 | self.netbox.dcim.console_port_templates.create(to_create), "Module Console Port") 400 | }) 401 | except pynetbox.RequestError as excep: 402 | self.handle.log(f"Error '{excep.error}' creating Module Console Port") 403 | 404 | def create_module_power_outlets(self, power_outlets, module_type): 405 | existing_power_outlets = {str(item): item for item in self.netbox.dcim.power_outlet_templates.filter(**{'module_type_id' if self.new_filters else 'moduletype_id': module_type})} 406 | to_create = self.get_module_type_ports_to_create(power_outlets, module_type, existing_power_outlets) 407 | 408 | if to_create: 409 | existing_power_ports = self.get_module_power_ports(module_type) 410 | for outlet in to_create: 411 | try: 412 | power_port = existing_power_ports[outlet["power_port"]] 413 | outlet['power_port'] = power_port.id 414 | except KeyError: 415 | pass 416 | 417 | try: 418 | self.counter.update({'updated': 419 | self.handle.log_module_ports_created( 420 | self.netbox.dcim.power_outlet_templates.create(to_create), "Module Power Outlet") 421 | }) 422 | except pynetbox.RequestError as excep: 423 | self.handle.log(f"Error '{excep.error}' creating Module Power Outlet") 424 | 425 | def create_module_console_server_ports(self, console_server_ports, module_type): 426 | existing_console_server_ports = {str(item): item for item in self.netbox.dcim.console_server_port_templates.filter(**{'module_type_id' if self.new_filters else 'moduletype_id': module_type})} 427 | to_create = self.get_module_type_ports_to_create(console_server_ports, module_type, existing_console_server_ports) 428 | 429 | if to_create: 430 | try: 431 | self.counter.update({'updated': 432 | self.handle.log_module_ports_created( 433 | self.netbox.dcim.console_server_port_templates.create(to_create), "Module Console Server Port") 434 | }) 435 | except pynetbox.RequestError as excep: 436 | self.handle.log(f"Error '{excep.error}' creating Module Console Server Port") 437 | 438 | def create_module_rear_ports(self, rear_ports, module_type): 439 | existing_rear_ports = self.get_module_rear_ports(module_type) 440 | to_create = self.get_module_type_ports_to_create(rear_ports, module_type, existing_rear_ports) 441 | 442 | if to_create: 443 | try: 444 | self.counter.update({'updated': 445 | self.handle.log_module_ports_created( 446 | self.netbox.dcim.rear_port_templates.create(to_create), "Module Rear Port") 447 | }) 448 | except pynetbox.RequestError as excep: 449 | self.handle.log(f"Error '{excep.error}' creating Module Rear Port") 450 | 451 | def create_module_front_ports(self, front_ports, module_type): 452 | existing_front_ports = {str(item): item for item in self.netbox.dcim.front_port_templates.filter(**{'module_type_id' if self.new_filters else 'moduletype_id': module_type})} 453 | to_create = self.get_module_type_ports_to_create(front_ports, module_type, existing_front_ports) 454 | 455 | if to_create: 456 | existing_rear_ports = self.get_module_rear_ports(module_type) 457 | for port in to_create: 458 | try: 459 | rear_port = existing_rear_ports[port["rear_port"]] 460 | port['rear_port'] = rear_port.id 461 | except KeyError: 462 | self.handle.log(f'Could not find Rear Port for Front Port: {port["name"]} - ' 463 | + f'{port["type"]} - {module_type}') 464 | 465 | try: 466 | self.counter.update({'updated': 467 | self.handle.log_module_ports_created( 468 | self.netbox.dcim.front_port_templates.create(to_create), "Module Front Port") 469 | }) 470 | except pynetbox.RequestError as excep: 471 | self.handle.log(f"Error '{excep.error}' creating Module Front Port") 472 | 473 | def upload_images(self,baseurl,token,images,device_type): 474 | '''Upload front_image and/or rear_image for the given device type 475 | 476 | Args: 477 | baseurl: URL for Netbox instance 478 | token: Token to access Netbox instance 479 | images: map of front_image and/or rear_image filename 480 | device_type: id for the device-type to update 481 | 482 | Returns: 483 | None 484 | ''' 485 | url = f"{baseurl}/api/dcim/device-types/{device_type}/" 486 | headers = { "Authorization": f"Token {token}" } 487 | 488 | files = { i: (os.path.basename(f), open(f,"rb") ) for i,f in images.items() } 489 | response = requests.patch(url, headers=headers, files=files, verify=(not self.ignore_ssl)) 490 | 491 | self.handle.log( f'Images {images} updated at {url}: {response}' ) 492 | self.counter["images"] += len(images) 493 | --------------------------------------------------------------------------------