├── requirements.txt ├── .vscode ├── settings.json └── launch.json ├── netbox-2.3.4-model.png ├── conf.sample ├── LICENSE.md ├── README.md ├── .gitignore ├── clean_netbox.py └── racktables2netbox.py /requirements.txt: -------------------------------------------------------------------------------- 1 | PyMySQL==1.0.2 2 | pynetbox==5.3.1 3 | slugify==0.0.1 4 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "python.linting.pylintEnabled": true, 3 | "python.linting.enabled": true 4 | } -------------------------------------------------------------------------------- /netbox-2.3.4-model.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/goebelmeier/racktables2netbox/HEAD/netbox-2.3.4-model.png -------------------------------------------------------------------------------- /conf.sample: -------------------------------------------------------------------------------- 1 | # ====== MySQL Source (Racktables) ====== # 2 | [MySQL] 3 | DB_IP = racktables.example.net 4 | DB_PORT = 3306 5 | DB_NAME = racktables 6 | DB_USER = netbox 7 | DB_PWD = PASSWORD 8 | # ====== Log settings ==================== # 9 | [Log] 10 | LOGFILE = migration.log 11 | DEBUG_LOG = debug.log 12 | CLEAN_LOG = clean_netbox.log 13 | STDOUT = True # print to STDOUT 14 | DEBUG = True # write debug log 15 | # ====== NetBox upload settings ========= # 16 | [NetBox] 17 | NETBOX_TOKEN = NETBOX_API_TOKEN 18 | NETBOX_HOST = https://netbox.example.net 19 | # ====== Other settings ========= # 20 | [Misc] 21 | CHILD_AS_BUILDING = True # use RTs sub-location as Device42 building. If False, use it as a Device42 room. 22 | ROW_AS_ROOM = True # use RTs row as Device42 room. 23 | # Note: Rooms are required because racks are mounted to rooms, not to buildings! 24 | PDU_MOUNT = left # Can be one of: left, right, above, below. Used for Zero-U PDU migration. Default is left 25 | PDU_ORIENTATION = front # can be one of: front, back. Used for Zero-U PDU migration. Default is front 26 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Timo Reimann 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # racktables2netbox 2 | A [RackTables](https://github.com/racktables/racktables) to [NetBox](https://github.com/digitalocean/netbox) migration utility. This tiny tool should be used to migrate your existing RackTables installations towards NetBox. 3 | 4 | ## Installation 5 | ```curl --output racktables2netbox.zip https://github.com/goebelmeier/racktables2netbox/archive/master.zip 6 | unzip racktables2netbox.zip 7 | cd racktables2netbox 8 | cp conf.sample conf 9 | ``` 10 | 11 | ## Usage 12 | 1. Create a NetBox API Token 13 | 2. Create a RackTables read-only database user 14 | 3. edit ``conf`` regarding your needs (URLs, credentials, ...) 15 | 4. run `python3 racktables2netbox.py` 16 | 5. optional: to get back to a clean NetBox installation run `python3 clean_netbox.py` 17 | 18 | ## Contributing 19 | 1. Migration should follow a strict order. Please have a look at the corresponding [wiki page](https://github.com/goebelmeier/racktables2netbox/wiki/Migration-order) 20 | 1. Fork it () 21 | 2. Create your feature branch (`git checkout -b feature/fooBar`) 22 | 3. Commit your changes (`git commit -am 'Add some fooBar'`) 23 | 4. Push to the branch (`git push origin feature/fooBar`) 24 | 5. Create a new Pull Request 25 | 26 | ## Credits 27 | Thanks to [Device42](https://www.device42.com/) who have already written a [RackTables to Device42 migration utility](https://github.com/device42/Racktables-to-Device42-Migration). I was able to use it as a starting point and begin to rewrite it step by step towards NetBox. 28 | 29 | ## License 30 | racktables2netbox is licensed under MIT license. See [LICENSE.md](LICENSE.md) for more information. -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | conf 2 | .vscode/* 3 | !.vscode/settings.json 4 | !.vscode/tasks.json 5 | !.vscode/launch.json 6 | !.vscode/extensions.json 7 | 8 | # Byte-compiled / optimized / DLL files 9 | __pycache__/ 10 | *.py[cod] 11 | *$py.class 12 | 13 | # C extensions 14 | *.so 15 | 16 | # Distribution / packaging 17 | .Python 18 | build/ 19 | develop-eggs/ 20 | dist/ 21 | downloads/ 22 | eggs/ 23 | .eggs/ 24 | lib/ 25 | lib64/ 26 | parts/ 27 | sdist/ 28 | var/ 29 | wheels/ 30 | *.egg-info/ 31 | .installed.cfg 32 | *.egg 33 | MANIFEST 34 | 35 | # PyInstaller 36 | # Usually these files are written by a python script from a template 37 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 38 | *.manifest 39 | *.spec 40 | 41 | # Installer logs 42 | pip-log.txt 43 | pip-delete-this-directory.txt 44 | 45 | # Unit test / coverage reports 46 | htmlcov/ 47 | .tox/ 48 | .coverage 49 | .coverage.* 50 | .cache 51 | nosetests.xml 52 | coverage.xml 53 | *.cover 54 | .hypothesis/ 55 | .pytest_cache/ 56 | 57 | # Translations 58 | *.mo 59 | *.pot 60 | 61 | # Django stuff: 62 | *.log 63 | local_settings.py 64 | db.sqlite3 65 | 66 | # Flask stuff: 67 | instance/ 68 | .webassets-cache 69 | 70 | # Scrapy stuff: 71 | .scrapy 72 | 73 | # Sphinx documentation 74 | docs/_build/ 75 | 76 | # PyBuilder 77 | target/ 78 | 79 | # Jupyter Notebook 80 | .ipynb_checkpoints 81 | 82 | # pyenv 83 | .python-version 84 | 85 | # celery beat schedule file 86 | celerybeat-schedule 87 | 88 | # SageMath parsed files 89 | *.sage.py 90 | 91 | # Environments 92 | .env 93 | .venv 94 | env/ 95 | venv/ 96 | ENV/ 97 | env.bak/ 98 | venv.bak/ 99 | 100 | # Spyder project settings 101 | .spyderproject 102 | .spyproject 103 | 104 | # Rope project settings 105 | .ropeproject 106 | 107 | # mkdocs documentation 108 | /site 109 | 110 | # mypy 111 | .mypy_cache/ -------------------------------------------------------------------------------- /clean_netbox.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | import imp 5 | import sys 6 | import json 7 | import requests 8 | import urllib3 9 | import logging 10 | 11 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 12 | 13 | # Load config file into variable 14 | conf = imp.load_source('conf', 'conf') 15 | api_url_base = "{}/api".format(conf.NETBOX_HOST) 16 | 17 | def api_request(method, url): 18 | # Log which request we're trying to do 19 | logger.debug("HTTP Request: {} - {}".format(method, url)) 20 | 21 | # Prepare request 22 | request = requests.Request(method, url) 23 | prepared_request = s.prepare_request(request) 24 | 25 | response = s.send(prepared_request) 26 | 27 | # Log HTTP Response 28 | logger.debug("HTTP Response: {!s} - {}".format(response.status_code, response.reason)) 29 | 30 | return response 31 | 32 | def delete_sites(): 33 | logger.info('Deleting Sites') 34 | # Get all sites 35 | api_url = '{}/dcim/sites'.format(api_url_base) 36 | 37 | response = api_request('GET', api_url) 38 | sites = json.loads(response.content.decode('utf-8')) 39 | 40 | # Delete every site you got 41 | for site in sites['results']: 42 | url = '{}/{}'.format(api_url, site['id']) 43 | response = api_request('DELETE', url) 44 | 45 | return 46 | 47 | def main(): 48 | # We need to delete the items beginning from the most nested items to the top level items 49 | delete_sites() 50 | 51 | if __name__ == '__main__': 52 | # Initialize logging platform 53 | logger = logging.getLogger('clean_netbox') 54 | logger.setLevel(logging.DEBUG) 55 | 56 | # Log to file 57 | fh = logging.FileHandler(conf.CLEAN_LOG) 58 | fh.setLevel(logging.DEBUG) 59 | 60 | # Log to stdout 61 | ch = logging.StreamHandler() 62 | ch.setLevel(logging.DEBUG) 63 | 64 | # Format log output 65 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 66 | fh.setFormatter(formatter) 67 | ch.setFormatter(formatter) 68 | 69 | # Attach handlers to logger 70 | logger.addHandler(fh) 71 | logger.addHandler(ch) 72 | 73 | # Create HTTP connection pool 74 | s = requests.Session() 75 | 76 | # Disable SSL verification 77 | s.verify = False 78 | 79 | # Define REST Headers 80 | headers = {'Content-Type': 'application/json', 81 | 'Accept': 'application/json; indent=4', 82 | 'Authorization': 'Token {0}'.format(conf.NETBOX_TOKEN)} 83 | 84 | s.headers.update(headers) 85 | 86 | # try: 87 | # import http.client as http_client 88 | # except ImportError: 89 | # # Python 2 90 | # import httplib as http_client 91 | # http_client.HTTPConnection.debuglevel = 1 92 | 93 | # requests_log = logging.getLogger("requests.packages.urllib3") 94 | # requests_log.setLevel(logging.DEBUG) 95 | # requests_log.propagate = True 96 | 97 | # Run the main function 98 | main() 99 | logger.info('[!] Done!') 100 | sys.exit() 101 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | { 8 | "name": "Python", 9 | "type": "python", 10 | "request": "launch", 11 | "stopOnEntry": true, 12 | "pythonPath": "${config:python.pythonPath}", 13 | "program": "${file}", 14 | "cwd": "${workspaceFolder}", 15 | "env": {}, 16 | "envFile": "${workspaceFolder}/.env", 17 | "debugOptions": [ 18 | "RedirectOutput" 19 | ] 20 | }, 21 | { 22 | "name": "Python: Attach", 23 | "type": "python", 24 | "request": "attach", 25 | "localRoot": "${workspaceFolder}", 26 | "remoteRoot": "${workspaceFolder}", 27 | "port": 3000, 28 | "secret": "my_secret", 29 | "host": "localhost" 30 | }, 31 | { 32 | "name": "Python: Terminal (integrated)", 33 | "type": "python", 34 | "request": "launch", 35 | "stopOnEntry": true, 36 | "pythonPath": "${config:python.pythonPath}", 37 | "program": "${file}", 38 | "cwd": "", 39 | "console": "integratedTerminal", 40 | "env": {}, 41 | "envFile": "${workspaceFolder}/.env", 42 | "debugOptions": [] 43 | }, 44 | { 45 | "name": "Python: Terminal (external)", 46 | "type": "python", 47 | "request": "launch", 48 | "stopOnEntry": true, 49 | "pythonPath": "${config:python.pythonPath}", 50 | "program": "${file}", 51 | "cwd": "", 52 | "console": "externalTerminal", 53 | "env": {}, 54 | "envFile": "${workspaceFolder}/.env", 55 | "debugOptions": [] 56 | }, 57 | { 58 | "name": "Python: Django", 59 | "type": "python", 60 | "request": "launch", 61 | "stopOnEntry": true, 62 | "pythonPath": "${config:python.pythonPath}", 63 | "program": "${workspaceFolder}/manage.py", 64 | "cwd": "${workspaceFolder}", 65 | "args": [ 66 | "runserver", 67 | "--noreload", 68 | "--nothreading" 69 | ], 70 | "env": {}, 71 | "envFile": "${workspaceFolder}/.env", 72 | "debugOptions": [ 73 | "RedirectOutput", 74 | "DjangoDebugging" 75 | ] 76 | }, 77 | { 78 | "name": "Python: Flask (0.11.x or later)", 79 | "type": "python", 80 | "request": "launch", 81 | "stopOnEntry": false, 82 | "pythonPath": "${config:python.pythonPath}", 83 | "program": "fully qualified path fo 'flask' executable. Generally located along with python interpreter", 84 | "cwd": "${workspaceFolder}", 85 | "env": { 86 | "FLASK_APP": "${workspaceFolder}/quickstart/app.py" 87 | }, 88 | "args": [ 89 | "run", 90 | "--no-debugger", 91 | "--no-reload" 92 | ], 93 | "envFile": "${workspaceFolder}/.env", 94 | "debugOptions": [ 95 | "RedirectOutput" 96 | ] 97 | }, 98 | { 99 | "name": "Python: Flask (0.10.x or earlier)", 100 | "type": "python", 101 | "request": "launch", 102 | "stopOnEntry": false, 103 | "pythonPath": "${config:python.pythonPath}", 104 | "program": "${workspaceFolder}/run.py", 105 | "cwd": "${workspaceFolder}", 106 | "args": [], 107 | "env": {}, 108 | "envFile": "${workspaceFolder}/.env", 109 | "debugOptions": [ 110 | "RedirectOutput" 111 | ] 112 | }, 113 | { 114 | "name": "Python: PySpark", 115 | "type": "python", 116 | "request": "launch", 117 | "stopOnEntry": true, 118 | "osx": { 119 | "pythonPath": "${env:SPARK_HOME}/bin/spark-submit" 120 | }, 121 | "windows": { 122 | "pythonPath": "${env:SPARK_HOME}/bin/spark-submit.cmd" 123 | }, 124 | "linux": { 125 | "pythonPath": "${env:SPARK_HOME}/bin/spark-submit" 126 | }, 127 | "program": "${file}", 128 | "cwd": "${workspaceFolder}", 129 | "env": {}, 130 | "envFile": "${workspaceFolder}/.env", 131 | "debugOptions": [ 132 | "RedirectOutput" 133 | ] 134 | }, 135 | { 136 | "name": "Python: Module", 137 | "type": "python", 138 | "request": "launch", 139 | "stopOnEntry": true, 140 | "pythonPath": "${config:python.pythonPath}", 141 | "module": "module.name", 142 | "cwd": "${workspaceFolder}", 143 | "env": {}, 144 | "envFile": "${workspaceFolder}/.env", 145 | "debugOptions": [ 146 | "RedirectOutput" 147 | ] 148 | }, 149 | { 150 | "name": "Python: Pyramid", 151 | "type": "python", 152 | "request": "launch", 153 | "stopOnEntry": true, 154 | "pythonPath": "${config:python.pythonPath}", 155 | "cwd": "${workspaceFolder}", 156 | "env": {}, 157 | "envFile": "${workspaceFolder}/.env", 158 | "args": [ 159 | "${workspaceFolder}/development.ini" 160 | ], 161 | "debugOptions": [ 162 | "RedirectOutput", 163 | "Pyramid" 164 | ] 165 | }, 166 | { 167 | "name": "Python: Watson", 168 | "type": "python", 169 | "request": "launch", 170 | "stopOnEntry": true, 171 | "pythonPath": "${config:python.pythonPath}", 172 | "program": "${workspaceFolder}/console.py", 173 | "cwd": "${workspaceFolder}", 174 | "args": [ 175 | "dev", 176 | "runserver", 177 | "--noreload=True" 178 | ], 179 | "env": {}, 180 | "envFile": "${workspaceFolder}/.env", 181 | "debugOptions": [ 182 | "RedirectOutput" 183 | ] 184 | } 185 | ] 186 | } -------------------------------------------------------------------------------- /racktables2netbox.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | __version__ = 1.00 4 | 5 | import configparser 6 | import json 7 | import logging 8 | import pprint 9 | import pymysql 10 | import pynetbox 11 | import requests 12 | import slugify 13 | import socket 14 | import struct 15 | import urllib3 16 | import re 17 | 18 | class Migrator: 19 | def slugify(self, text): 20 | return slugify.slugify(text, max_length=50) 21 | 22 | def create_tenant_group(self, name): 23 | pass 24 | 25 | def create_tenant(self, name, tenant_group=None): 26 | logger.info("Creating tenant {}").format(name) 27 | 28 | tenant = { 29 | 'name': name, 30 | 'slug': self.slugify(name) 31 | } 32 | 33 | if tenant_group: 34 | tenant["tenant_group"] = netbox.tenancy.tenant_groups.all() 35 | 36 | return netbox.tenancy.tenants.create(tenant) 37 | 38 | def create_region(self, name, parent=None): 39 | netbox.dcim.regions.create() 40 | 41 | if not parent: 42 | pass 43 | pass 44 | 45 | def create_site(self, name, region, status, physical_address, facility, shipping_address, contact_phone, contact_email, contact_name, tenant, time_zone): 46 | slug = self.slugify(name) 47 | pass 48 | 49 | 50 | # Re-Enabled SSL verification 51 | # urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 52 | class REST(object): 53 | def __init__(self): 54 | self.base_url = "{}/api".format(config['NetBox']['NETBOX_HOST']) 55 | 56 | # Create HTTP connection pool 57 | self.s = requests.Session() 58 | 59 | # SSL verification 60 | self.s.verify = True 61 | 62 | # Define REST Headers 63 | headers = {'Content-Type': 'application/json', 64 | 'Accept': 'application/json; indent=4', 65 | 'Authorization': 'Token {0}'.format(config['NetBox']['NETBOX_TOKEN'])} 66 | 67 | self.s.headers.update(headers) 68 | 69 | def uploader(self, data, url): 70 | method = 'POST' 71 | 72 | logger.debug("HTTP Request: {} - {} - {}".format(method, url, data)) 73 | 74 | request = requests.Request(method, url, data = json.dumps(data)) 75 | prepared_request = self.s.prepare_request(request) 76 | r = self.s.send(prepared_request) 77 | logger.debug(f"HTTP Response: {r.status_code!s} - {r.reason}") 78 | r.raise_for_status() 79 | 80 | return r.json() 81 | 82 | def fetcher(self, url): 83 | method = 'GET' 84 | 85 | logger.debug("HTTP Request: {} - {}".format(method, url)) 86 | 87 | request = requests.Request(method, url) 88 | prepared_request = self.s.prepare_request(request) 89 | r = self.s.send(prepared_request) 90 | 91 | logger.debug(f'HTTP Response: {r.status_code} - {r.reason}') 92 | r.raise_for_status() 93 | 94 | return r.text 95 | 96 | def post_subnet(self, data): 97 | url = self.base_url + '/ipam/prefixes/' 98 | logger.info('Posting data to {}'.format(url)) 99 | self.uploader(data, url) 100 | 101 | def post_ip(self, data): 102 | url = self.base_url + '/ipam/ip-addresses/' 103 | logger.info('Posting IP data to {}'.format(url)) 104 | self.uploader(data, url) 105 | 106 | # def post_device(self, data): 107 | # url = self.base_url + '/api/1.0/device/' 108 | # logger.info('Posting device data to {}'.format(url)) 109 | # self.uploader(data, url) 110 | 111 | # def post_location(self, data): 112 | # url = self.base_url + '/api/1.0/location/' 113 | # logger.info('Posting location data to {}'.format(url)) 114 | # self.uploader(data, url) 115 | 116 | # def post_room(self, data): 117 | # url = self.base_url + '/api/1.0/rooms/' 118 | # logger.info('Posting room data to {}'.format(url)) 119 | # self.uploader(data, url) 120 | 121 | # def post_rack(self, data): 122 | # url = self.base_url + '/api/1.0/racks/' 123 | # logger.info('Posting rack data to {}'.format(url)) 124 | # response = self.uploader(data, url) 125 | # return response 126 | 127 | # def post_pdu(self, data): 128 | # url = self.base_url + '/api/1.0/pdus/' 129 | # logger.info('Posting PDU data to {}'.format(url)) 130 | # response = self.uploader(data, url) 131 | # return response 132 | 133 | # def post_pdu_model(self, data): 134 | # url = self.base_url + '/api/1.0/pdu_models/' 135 | # logger.info('Posting PDU model to {}'.format(url)) 136 | # response = self.uploader(data, url) 137 | # return response 138 | 139 | # def post_pdu_to_rack(self, data, rack): 140 | # url = self.base_url + '/api/1.0/pdus/rack/' 141 | # logger.info('Posting PDU to rack {}'.format(rack)) 142 | # self.uploader(data, url) 143 | 144 | # def post_hardware(self, data): 145 | # url = self.base_url + '/api/1.0/hardwares/' 146 | # logger.info('Adding hardware data to {}'.format(url)) 147 | # self.uploader(data, url) 148 | 149 | # def post_device2rack(self, data): 150 | # url = self.base_url + '/api/1.0/device/rack/' 151 | # logger.info('Adding device to rack at {}'.format(url)) 152 | # self.uploader(data, url) 153 | 154 | def post_building(self, data): 155 | url = self.base_url + '/dcim/sites/' 156 | logger.info('Uploading building data to {}'.format(url)) 157 | self.uploader(data, url) 158 | 159 | # def post_switchport(self, data): 160 | # url = self.base_url + '/api/1.0/switchports/' 161 | # logger.info('Uploading switchports data to {}'.format(url)) 162 | # self.uploader(data, url) 163 | 164 | # def post_patch_panel(self, data): 165 | # url = self.base_url + '/api/1.0/patch_panel_models/' 166 | # logger.info('Uploading patch panels data to {}'.format(url)) 167 | # self.uploader(data, url) 168 | 169 | # def post_patch_panel_module_models(self, data): 170 | # url = self.base_url + '/api/1.0/patch_panel_module_models/' 171 | # logger.info('Uploading patch panels modules data to {}}'.format(url)) 172 | # self.uploader(data, url) 173 | 174 | # def get_pdu_models(self): 175 | # url = self.base_url + '/api/1.0/pdu_models/' 176 | # logger.info('Fetching PDU models from {}'.format(url)) 177 | # self.fetcher(url) 178 | 179 | # def get_racks(self): 180 | # url = self.base_url + '/api/1.0/racks/' 181 | # logger.info('Fetching racks from {}'.format(url)) 182 | # ata = self.fetcher(url) 183 | # return data 184 | 185 | # def get_devices(self): 186 | # url = self.base_url + '/api/1.0/devices/' 187 | # logger.info('Fetching devices from {}'.format(url)) 188 | # data = self.fetcher(url) 189 | # return data 190 | 191 | # def get_buildings(self): 192 | # url = self.base_url + '/api/dcim/sites/' 193 | # logger.info('Fetching buildings from {}'.format(url)) 194 | # data = self.fetcher(url) 195 | # return data 196 | 197 | # def get_rooms(self): 198 | # url = self.base_url + '/api/1.0/rooms/' 199 | # logger.info('Fetching rooms from {}'.format(url)) 200 | # data = self.fetcher(url) 201 | # return data 202 | 203 | 204 | class DB(object): 205 | """ 206 | Fetching data from Racktables and converting them to Device42 API format. 207 | """ 208 | 209 | def __init__(self): 210 | self.con = None 211 | self.tables = [] 212 | self.rack_map = [] 213 | self.vm_hosts = {} 214 | self.chassis = {} 215 | self.rack_id_map = {} 216 | self.container_map = {} 217 | self.building_room_map = {} 218 | 219 | def connect(self): 220 | """ 221 | Connection to RT database 222 | :return: 223 | """ 224 | self.con = pymysql.connect( 225 | host=config['MySQL']['DB_IP'], 226 | port=int(config['MySQL']['DB_PORT']), 227 | db=config['MySQL']['DB_NAME'], 228 | user=config['MySQL']['DB_USER'], 229 | passwd=config['MySQL']['DB_PWD'] 230 | ) 231 | 232 | @staticmethod 233 | def convert_ip(ip_raw): 234 | """ 235 | IP address conversion to human readable format 236 | :param ip_raw: 237 | :return: 238 | """ 239 | ip = socket.inet_ntoa(struct.pack('!I', ip_raw)) 240 | return ip 241 | 242 | def get_ips(self): 243 | """ 244 | Fetch IPs from RT and send them to upload function 245 | :return: 246 | """ 247 | adrese = [] 248 | if not self.con: 249 | self.connect() 250 | with self.con: 251 | cur = self.con.cursor() 252 | q = 'SELECT * FROM IPv4Address WHERE IPv4Address.name != "" or IPv4Address.comment != ""' 253 | cur.execute(q) 254 | ips = cur.fetchall() 255 | if config['Log']['DEBUG']: 256 | msg = ('IPs', str(ips)) 257 | logger.debug(msg) 258 | 259 | for line in ips: 260 | net = {} 261 | ip_raw, name, comment, reserved = line 262 | ip = self.convert_ip(ip_raw) 263 | adrese.append(ip) 264 | 265 | net.update({'address': ip}) 266 | msg = 'IP Address: %s' % ip 267 | logger.info(msg) 268 | 269 | desc = ' '.join([name, comment]).strip() 270 | net.update({'description': desc}) 271 | msg = 'Label: %s' % desc 272 | logger.info(msg) 273 | 274 | rest.post_ip(net) 275 | logger.info('Post ip {ip}') 276 | 277 | 278 | def get_subnets(self): 279 | """ 280 | Fetch subnets from RT and send them to upload function 281 | :return: 282 | """ 283 | subs = {} 284 | if not self.con: 285 | self.connect() 286 | with self.con: 287 | cur = self.con.cursor() 288 | q = "SELECT * FROM IPv4Network" 289 | cur.execute(q) 290 | subnets = cur.fetchall() 291 | if config['Log']['DEBUG']: 292 | msg = ('Subnets', str(subnets)) 293 | logger.debug(msg) 294 | for line in subnets: 295 | sid, raw_sub, mask, name, x = line 296 | subnet = self.convert_ip(raw_sub) 297 | subs.update({'prefix':'/'.join([subnet, str(mask)])}) 298 | subs.update({'status':'active'}) 299 | #subs.update({'mask_bits': str(mask)}) 300 | subs.update({'description':name}) 301 | rest.post_subnet(subs) 302 | 303 | def get_infrastructure(self): 304 | """ 305 | Get locations, rows and racks from RT, convert them to buildings and rooms and send to uploader. 306 | :return: 307 | """ 308 | sites_map = {} 309 | rooms_map = {} 310 | rows_map = {} 311 | rackgroups = [] 312 | racks = [] 313 | 314 | if not self.con: 315 | self.connect() 316 | 317 | # ============ BUILDINGS AND ROOMS ============ 318 | with self.con: 319 | cur = self.con.cursor() 320 | q = """SELECT id, name, parent_id, parent_name FROM Location""" 321 | cur.execute(q) 322 | raw = cur.fetchall() 323 | 324 | for rec in raw: 325 | location_id, location_name, parent_id, parent_name = rec 326 | if not parent_name: 327 | sites_map.update({location_id: location_name}) 328 | else: 329 | rooms_map.update({location_name: parent_name}) 330 | 331 | print("Sites:") 332 | pp.pprint(sites_map) 333 | 334 | pp.pprint(rooms_map) 335 | 336 | print("Rack Groups:") 337 | for room, parent in list(rooms_map.items()): 338 | if parent in sites_map.values(): 339 | if room in rooms_map.values(): 340 | continue 341 | 342 | rackgroup = {} 343 | 344 | if room not in sites_map.values(): 345 | name = parent + "-" + room 346 | rackgroup.update({'site': rooms_map[parent]}) 347 | else: 348 | name = room 349 | rackgroup.update({'site': parent}) 350 | 351 | rackgroup.update({'name': name}) 352 | 353 | rackgroups.append(rackgroup) 354 | 355 | for site_id, site_name in list(sites_map.items()): 356 | if site_name not in rooms_map.values(): 357 | rackgroup = {} 358 | rackgroup.update({'site': site_name}) 359 | rackgroup.update({'name': site_name}) 360 | 361 | rackgroups.append(rackgroup) 362 | 363 | pp.pprint(rackgroups) 364 | 365 | 366 | # upload rooms 367 | # buildings = json.loads((rest.get_buildings()))['buildings'] 368 | 369 | # for room, parent in list(rooms_map.items()): 370 | # roomdata = {} 371 | # roomdata.update({'name': room}) 372 | # roomdata.update({'building': parent}) 373 | # rest.post_room(roomdata) 374 | 375 | # # ============ ROWS AND RACKS ============ 376 | # with self.con: 377 | # cur = self.con.cursor() 378 | # q = """SELECT id, name ,height, row_id, row_name, location_id, location_name from Rack;""" 379 | # cur.execute(q) 380 | # raw = cur.fetchall() 381 | 382 | # for rec in raw: 383 | # rack_id, rack_name, height, row_id, row_name, location_id, location_name = rec 384 | 385 | # rows_map.update({row_name: location_name}) 386 | 387 | # # prepare rack data. We will upload it a little bit later 388 | # rack = {} 389 | # rack.update({'name': rack_name}) 390 | # rack.update({'size': height}) 391 | # rack.update({'rt_id': rack_id}) # we will remove this later 392 | # if config['Misc']['ROW_AS_ROOM']: 393 | # rack.update({'room': row_name}) 394 | # rack.update({'building': location_name}) 395 | # else: 396 | # row_name = row_name[:10] # there is a 10char limit for row name 397 | # rack.update({'row': row_name}) 398 | # if location_name in rooms_map: 399 | # rack.update({'room': location_name}) 400 | # building_name = rooms_map[location_name] 401 | # rack.update({'building': building_name}) 402 | # else: 403 | # rack.update({'building': location_name}) 404 | # racks.append(rack) 405 | 406 | # # upload rows as rooms 407 | # if config['Misc']['ROW_AS_ROOM']: 408 | # if config['Log']['DEBUG']: 409 | # msg = ('Rooms', str(rows_map)) 410 | # logger.debug(msg) 411 | # for room, parent in list(rows_map.items()): 412 | # roomdata = {} 413 | # roomdata.update({'name': room}) 414 | # roomdata.update({'building': parent}) 415 | # rest.post_room(roomdata) 416 | 417 | # # upload racks 418 | # if config['Log']['DEBUG']: 419 | # msg = ('Racks', str(racks)) 420 | # logger.debug(msg) 421 | # for rack in racks: 422 | # rt_rack_id = rack['rt_id'] 423 | # del rack['rt_id'] 424 | # response = rest.post_rack(rack) 425 | # d42_rack_id = response['msg'][1] 426 | 427 | # self.rack_id_map.update({rt_rack_id: d42_rack_id}) 428 | 429 | # self.all_ports = self.get_ports() 430 | 431 | def get_hardware(self): 432 | """ 433 | Get hardware from RT and send it to uploader 434 | :return: 435 | """ 436 | if not self.con: 437 | self.connect() 438 | with self.con: 439 | # get hardware items (except PDU's) 440 | cur = self.con.cursor() 441 | q = """SELECT 442 | Object.id,Object.name as Description, Object.label as Name, 443 | Object.asset_no as Asset,Dictionary.dict_value as Type 444 | FROM Object 445 | LEFT JOIN AttributeValue ON Object.id = AttributeValue.object_id 446 | LEFT JOIN Attribute ON AttributeValue.attr_id = Attribute.id 447 | LEFT JOIN Dictionary ON Dictionary.dict_key = AttributeValue.uint_value 448 | WHERE Attribute.id=2 AND Object.objtype_id != 2 449 | """ 450 | cur.execute(q) 451 | data = cur.fetchall() 452 | 453 | if config['Log']['DEBUG']: 454 | msg = ('Hardware', str(data)) 455 | logger.debug(msg) 456 | 457 | # create map device_id:height 458 | # RT does not impose height for devices of the same hardware model so it might happen that - 459 | # two or more devices based on same HW model have different size in rack 460 | # here we try to find and set smallest U for device 461 | hwsize_map = {} 462 | for line in data: 463 | line = [0 if not x else x for x in line] 464 | data_id, description, name, asset, dtype = line 465 | size = self.get_hardware_size(data_id) 466 | if size: 467 | floor, height, depth, mount = size 468 | if data_id not in hwsize_map: 469 | hwsize_map.update({data_id: height}) 470 | else: 471 | h = float(hwsize_map[data_id]) 472 | if float(height) < h: 473 | hwsize_map.update({data_id: height}) 474 | 475 | for line in data: 476 | hwddata = {} 477 | line = [0 if not x else x for x in line] 478 | data_id, description, name, asset, dtype = line 479 | 480 | if '%GPASS%' in dtype: 481 | vendor, model = dtype.split("%GPASS%") 482 | elif len(dtype.split()) > 1: 483 | venmod = dtype.split() 484 | vendor = venmod[0] 485 | model = ' '.join(venmod[1:]) 486 | else: 487 | vendor = dtype 488 | model = dtype 489 | 490 | size = self.get_hardware_size(data_id) 491 | if size: 492 | floor, height, depth, mount = size 493 | # patching height 494 | height = hwsize_map[data_id] 495 | hwddata.update({'notes': description}) 496 | hwddata.update({'type': 1}) 497 | hwddata.update({'size': height}) 498 | hwddata.update({'depth': depth}) 499 | hwddata.update({'name': model[:48]}) 500 | hwddata.update({'manufacturer': vendor}) 501 | # rest.post_hardware(hwddata) 502 | 503 | def get_hardware_size(self, data_id): 504 | """ 505 | Calculate hardware size. 506 | :param data_id: hw id 507 | :return: 508 | floor - starting U location for the device in the rack 509 | height - height of the device 510 | depth - depth of the device (full, half) 511 | mount - orientation of the device in the rack. Can be front or back 512 | """ 513 | if not self.con: 514 | self.connect() 515 | with self.con: 516 | # get hardware items 517 | cur = self.con.cursor() 518 | q = """SELECT unit_no,atom FROM RackSpace WHERE object_id = %s""" % data_id 519 | cur.execute(q) 520 | data = cur.fetchall() 521 | if data != (): 522 | front = 0 523 | interior = 0 524 | rear = 0 525 | floor = 0 526 | depth = 1 # 1 for full depth (default) and 2 for half depth 527 | mount = 'front' # can be [front | rear] 528 | i = 1 529 | 530 | for line in data: 531 | flr, tag = line 532 | 533 | if i == 1: 534 | floor = int(flr) - 1 # '-1' since RT rack starts at 1 and Device42 starts at 0. 535 | else: 536 | if int(flr) < floor: 537 | floor = int(flr) - 1 538 | i += 1 539 | if tag == 'front': 540 | front += 1 541 | elif tag == 'interior': 542 | interior += 1 543 | elif tag == 'rear': 544 | rear += 1 545 | 546 | if front and interior and rear: # full depth 547 | height = front 548 | return floor, height, depth, mount 549 | 550 | elif front and interior and not rear: # half depth, front mounted 551 | height = front 552 | depth = 2 553 | return floor, height, depth, mount 554 | 555 | elif interior and rear and not front: # half depth, rear mounted 556 | height = rear 557 | depth = 2 558 | mount = 'rear' 559 | return floor, height, depth, mount 560 | 561 | # for devices that look like less than half depth: 562 | elif front and not interior and not rear: 563 | height = front 564 | depth = 2 565 | return floor, height, depth, mount 566 | elif rear and not interior and not front: 567 | height = rear 568 | depth = 2 569 | return floor, height, depth, mount 570 | else: 571 | return None, None, None, None 572 | else: 573 | return None, None, None, None 574 | 575 | @staticmethod 576 | def add_hardware(height, depth, name): 577 | """ 578 | 579 | :rtype : object 580 | """ 581 | hwddata = {} 582 | hwddata.update({'type': 1}) 583 | if height: 584 | hwddata.update({'size': height}) 585 | if depth: 586 | hwddata.update({'depth': depth}) 587 | if name: 588 | hwddata.update({'name': name[:48]}) 589 | # rest.post_hardware(hwddata) 590 | 591 | def get_vmhosts(self): 592 | if not self.con: 593 | self.connect() 594 | with self.con: 595 | cur = self.con.cursor() 596 | q = """SELECT id, name FROM Object WHERE objtype_id='1505'""" 597 | cur.execute(q) 598 | raw = cur.fetchall() 599 | 600 | dev = {} 601 | for rec in raw: 602 | host_id = int(rec[0]) 603 | try: 604 | name = rec[1].strip() 605 | except AttributeError: 606 | continue 607 | self.vm_hosts.update({host_id: name}) 608 | dev.update({'name': name}) 609 | dev.update({'is_it_virtual_host': 'yes'}) 610 | # rest.post_device(dev) 611 | 612 | def get_chassis(self): 613 | if not self.con: 614 | self.connect() 615 | with self.con: 616 | cur = self.con.cursor() 617 | q = """SELECT id, name FROM Object WHERE objtype_id='1502'""" 618 | cur.execute(q) 619 | raw = cur.fetchall() 620 | 621 | dev = {} 622 | for rec in raw: 623 | host_id = int(rec[0]) 624 | try: 625 | name = rec[1].strip() 626 | except AttributeError: 627 | continue 628 | self.chassis.update({host_id: name}) 629 | dev.update({'name': name}) 630 | dev.update({'is_it_blade_host': 'yes'}) 631 | # rest.post_device(dev) 632 | 633 | def get_container_map(self): 634 | """ 635 | Which VM goes into which VM host? 636 | Which Blade goes into which Chassis ? 637 | :return: 638 | """ 639 | if not self.con: 640 | self.connect() 641 | with self.con: 642 | cur = self.con.cursor() 643 | q = """SELECT parent_entity_id AS container_id, child_entity_id AS object_id 644 | FROM EntityLink WHERE child_entity_type='object' AND parent_entity_type = 'object'""" 645 | cur.execute(q) 646 | raw = cur.fetchall() 647 | for rec in raw: 648 | container_id, object_id = rec 649 | self.container_map.update({object_id: container_id}) 650 | 651 | def get_devices(self): 652 | if not self.con: 653 | self.connect() 654 | with self.con: 655 | cur = self.con.cursor() 656 | # get object IDs 657 | q = 'SELECT id FROM Object' 658 | cur.execute(q) 659 | idsx = cur.fetchall() 660 | ids = [x[0] for x in idsx] 661 | 662 | with self.con: 663 | for dev_id in ids: 664 | q = """Select 665 | Object.objtype_id, 666 | Object.name as Description, 667 | Object.label as Name, 668 | Object.asset_no as Asset, 669 | Attribute.name as Name, 670 | Dictionary.dict_value as Type, 671 | Object.comment as Comment, 672 | RackSpace.rack_id as RackID, 673 | Rack.name as rack_name, 674 | Rack.row_name, 675 | Rack.location_id, 676 | Rack.location_name, 677 | Location.parent_name 678 | 679 | FROM Object 680 | LEFT JOIN AttributeValue ON Object.id = AttributeValue.object_id 681 | LEFT JOIN Attribute ON AttributeValue.attr_id = Attribute.id 682 | LEFT JOIN RackSpace ON Object.id = RackSpace.object_id 683 | LEFT JOIN Dictionary ON Dictionary.dict_key = AttributeValue.uint_value 684 | LEFT JOIN Rack ON RackSpace.rack_id = Rack.id 685 | LEFT JOIN Location ON Rack.location_id = Location.id 686 | WHERE Object.id = %s 687 | AND Object.objtype_id not in (2,9,1505,1560,1561,1562,50275)""" % dev_id 688 | 689 | cur.execute(q) 690 | data = cur.fetchall() 691 | if data: # RT objects that do not have data are locations, racks, rows etc... 692 | self.process_data(data, dev_id) 693 | 694 | def process_data(self, data, dev_id): 695 | devicedata = {} 696 | device2rack = {} 697 | name = None 698 | opsys = None 699 | hardware = None 700 | note = None 701 | rrack_id = None 702 | floor = None 703 | dev_type = 0 704 | 705 | for x in data: 706 | dev_type, rdesc, rname, rasset, rattr_name, rtype, \ 707 | rcomment, rrack_id, rrack_name, rrow_name, \ 708 | rlocation_id, rlocation_name, rparent_name = x 709 | 710 | name = x[1] 711 | note = x[-7] 712 | 713 | if 'Operating System' in x: 714 | opsys = x[-8] 715 | if '%GSKIP%' in opsys: 716 | opsys = opsys.replace('%GSKIP%', ' ') 717 | if '%GPASS%' in opsys: 718 | opsys = opsys.replace('%GPASS%', ' ') 719 | if 'SW type' in x: 720 | opsys = x[-8] 721 | if '%GSKIP%' in opsys: 722 | opsys = opsys.replace('%GSKIP%', ' ') 723 | if '%GPASS%' in opsys: 724 | opsys = opsys.replace('%GPASS%', ' ') 725 | 726 | if 'Server Hardware' in x: 727 | hardware = x[-8] 728 | if '%GSKIP%' in hardware: 729 | hardware = hardware.replace('%GSKIP%', ' ') 730 | if '%GPASS%' in hardware: 731 | hardware = hardware.replace('%GPASS%', ' ') 732 | if '\t' in hardware: 733 | hardware = hardware.replace('\t', ' ') 734 | 735 | if 'HW type' in x: 736 | hardware = x[-8] 737 | if '%GSKIP%' in hardware: 738 | hardware = hardware.replace('%GSKIP%', ' ') 739 | if '%GPASS%' in hardware: 740 | hardware = hardware.replace('%GPASS%', ' ') 741 | if '\t' in hardware: 742 | hardware = hardware.replace('\t', ' ') 743 | if note: 744 | note = note.replace('\n', ' ') 745 | if '<' in note: 746 | note = note.replace('<', '') 747 | if '>' in note: 748 | note = note.replace('>', '') 749 | 750 | if name: 751 | # set device data 752 | devicedata.update({'name': name}) 753 | if hardware: 754 | devicedata.update({'hardware': hardware[:48]}) 755 | if opsys: 756 | devicedata.update({'os': opsys}) 757 | if note: 758 | devicedata.update({'notes': note}) 759 | if dev_id in self.vm_hosts: 760 | devicedata.update({'is_it_virtual_host': 'yes'}) 761 | if dev_type == 8: 762 | devicedata.update({'is_it_switch': 'yes'}) 763 | elif dev_type == 1502: 764 | devicedata.update({'is_it_blade_host': 'yes'}) 765 | elif dev_type == 4: 766 | try: 767 | blade_host_id = self.container_map[dev_id] 768 | blade_host_name = self.chassis[blade_host_id] 769 | devicedata.update({'type': 'blade'}) 770 | devicedata.update({'blade_host': blade_host_name}) 771 | except KeyError: 772 | pass 773 | elif dev_type == 1504: 774 | devicedata.update({'type': 'virtual'}) 775 | devicedata.pop('hardware', None) 776 | try: 777 | vm_host_id = self.container_map[dev_id] 778 | vm_host_name = self.vm_hosts[vm_host_id] 779 | devicedata.update({'virtual_host': vm_host_name}) 780 | except KeyError: 781 | pass 782 | 783 | d42_rack_id = None 784 | # except VMs 785 | if dev_type != 1504: 786 | if rrack_id: 787 | d42_rack_id = self.rack_id_map[rrack_id] 788 | 789 | # if the device is mounted in RT, we will try to add it to D42 hardwares. 790 | floor, height, depth, mount = self.get_hardware_size(dev_id) 791 | if floor is not None: 792 | floor = int(floor) + 1 793 | else: 794 | floor = 'auto' 795 | if not hardware: 796 | hardware = 'generic' + str(height) + 'U' 797 | self.add_hardware(height, depth, hardware) 798 | 799 | # upload device 800 | if devicedata: 801 | if hardware and dev_type != 1504: 802 | devicedata.update({'hardware': hardware[:48]}) 803 | 804 | # set default type for racked devices 805 | if 'type' not in devicedata and d42_rack_id and floor: 806 | devicedata.update({'type': 'physical'}) 807 | 808 | rest.post_device(devicedata) 809 | 810 | # update ports 811 | if dev_type == 8 or dev_type == 4 or dev_type == 445 or dev_type == 1055: 812 | ports = self.get_ports_by_device(self.all_ports, dev_id) 813 | if ports: 814 | for item in ports: 815 | switchport_data = { 816 | 'port': item[0], 817 | 'switch': name, 818 | 'label': item[1] 819 | } 820 | 821 | get_links = self.get_links(item[3]) 822 | if get_links: 823 | device_name = self.get_device_by_port(get_links[0]) 824 | switchport_data.update({'device': device_name}) 825 | switchport_data.update({'remote_device': device_name}) 826 | # switchport_data.update({'remote_port': self.get_port_by_id(self.all_ports, get_links[0])}) 827 | 828 | rest.post_switchport(switchport_data) 829 | 830 | # reverse connection 831 | device_name = self.get_device_by_port(get_links[0]) 832 | switchport_data = { 833 | 'port': self.get_port_by_id(self.all_ports, get_links[0]), 834 | 'switch': device_name 835 | } 836 | 837 | switchport_data.update({'device': name}) 838 | switchport_data.update({'remote_device': name}) 839 | switchport_data.update({'remote_port': item[0]}) 840 | 841 | rest.post_switchport(switchport_data) 842 | else: 843 | rest.post_switchport(switchport_data) 844 | 845 | # if there is a device, we can try to mount it to the rack 846 | if dev_type != 1504 and d42_rack_id and floor: # rack_id is D42 rack id 847 | device2rack.update({'device': name}) 848 | if hardware: 849 | device2rack.update({'hw_model': hardware[:48]}) 850 | device2rack.update({'rack_id': d42_rack_id}) 851 | device2rack.update({'start_at': floor}) 852 | 853 | rest.post_device2rack(device2rack) 854 | else: 855 | if dev_type != 1504 and d42_rack_id is not None: 856 | msg = '\n-----------------------------------------------------------------------\ 857 | \n[!] INFO: Cannot mount device "%s" (RT id = %d) to the rack.\ 858 | \n\tFloor returned from "get_hardware_size" function was: %s' % (name, dev_id, str(floor)) 859 | logger.info(msg) 860 | else: 861 | msg = '\n-----------------------------------------------------------------------\ 862 | \n[!] INFO: Device %s (RT id = %d) cannot be uploaded. Data was: %s' % (name, dev_id, str(devicedata)) 863 | logger.info(msg) 864 | 865 | else: 866 | # device has no name thus it cannot be migrated 867 | msg = '\n-----------------------------------------------------------------------\ 868 | \n[!] INFO: Device with RT id=%d cannot be migrated because it has no name.' % dev_id 869 | logger.info(msg) 870 | 871 | def get_device_to_ip(self): 872 | if not self.con: 873 | self.connect() 874 | with self.con: 875 | # get hardware items (except PDU's) 876 | cur = self.con.cursor() 877 | q = """SELECT 878 | IPv4Allocation.ip,IPv4Allocation.name, 879 | Object.name as hostname 880 | FROM %s.`IPv4Allocation` 881 | LEFT JOIN Object ON Object.id = object_id""" % config['MySQL']['DB_NAME'] 882 | cur.execute(q) 883 | data = cur.fetchall() 884 | 885 | if config['Log']['DEBUG']: 886 | msg = ('Device to IP', str(data)) 887 | logger.debug(msg) 888 | 889 | for line in data: 890 | devmap = {} 891 | rawip, nic_name, hostname = line 892 | ip = self.convert_ip(rawip) 893 | devmap.update({'ipaddress': ip}) 894 | devmap.update({'device': hostname}) 895 | if nic_name: 896 | devmap.update({'tag': nic_name}) 897 | rest.post_ip(devmap) 898 | 899 | def get_pdus(self): 900 | if not self.con: 901 | self.connect() 902 | with self.con: 903 | cur = self.con.cursor() 904 | q = """SELECT 905 | Object.id,Object.name as Name, Object.asset_no as Asset, 906 | Object.comment as Comment, Dictionary.dict_value as Type, RackSpace.atom as Position, 907 | (SELECT Object.id FROM Object WHERE Object.id = RackSpace.rack_id) as RackID 908 | FROM Object 909 | LEFT JOIN AttributeValue ON Object.id = AttributeValue.object_id 910 | LEFT JOIN Attribute ON AttributeValue.attr_id = Attribute.id 911 | LEFT JOIN Dictionary ON Dictionary.dict_key = AttributeValue.uint_value 912 | LEFT JOIN RackSpace ON RackSpace.object_id = Object.id 913 | WHERE Object.objtype_id = 2 914 | """ 915 | cur.execute(q) 916 | data = cur.fetchall() 917 | 918 | if config['Log']['DEBUG']: 919 | msg = ('PDUs', str(data)) 920 | logger.debug(msg) 921 | 922 | rack_mounted = [] 923 | pdumap = {} 924 | pdumodels = [] 925 | pdu_rack_models = [] 926 | 927 | for line in data: 928 | pdumodel = {} 929 | pdudata = {} 930 | line = ['' if x is None else x for x in line] 931 | pdu_id, name, asset, comment, pdu_type, position, rack_id = line 932 | 933 | if '%GPASS%' in pdu_type: 934 | pdu_type = pdu_type.replace('%GPASS%', ' ') 935 | 936 | pdu_type = pdu_type[:64] 937 | pdudata.update({'name': name}) 938 | pdudata.update({'notes': comment}) 939 | pdudata.update({'pdu_model': pdu_type}) 940 | pdumodel.update({'name': pdu_type}) 941 | pdumodel.update({'pdu_model': pdu_type}) 942 | if rack_id: 943 | floor, height, depth, mount = self.get_hardware_size(pdu_id) 944 | pdumodel.update({'size': height}) 945 | pdumodel.update({'depth': depth}) 946 | 947 | # post pdu models 948 | if pdu_type and name not in pdumodels: 949 | rest.post_pdu_model(pdumodel) 950 | pdumodels.append(pdumodel) 951 | elif pdu_type and rack_id: 952 | if pdu_id not in pdu_rack_models: 953 | rest.post_pdu_model(pdumodel) 954 | pdu_rack_models.append(pdu_id) 955 | 956 | # post pdus 957 | if pdu_id not in pdumap: 958 | response = rest.post_pdu(pdudata) 959 | d42_pdu_id = response['msg'][1] 960 | pdumap.update({pdu_id: d42_pdu_id}) 961 | 962 | # mount to rack 963 | if position: 964 | if pdu_id not in rack_mounted: 965 | rack_mounted.append(pdu_id) 966 | floor, height, depth, mount = self.get_hardware_size(pdu_id) 967 | if floor is not None: 968 | floor = int(floor) + 1 969 | else: 970 | floor = 'auto' 971 | try: 972 | d42_rack_id = self.rack_id_map[rack_id] 973 | if floor: 974 | rdata = {} 975 | rdata.update({'pdu_id': pdumap[pdu_id]}) 976 | rdata.update({'rack_id': d42_rack_id}) 977 | rdata.update({'pdu_model': pdu_type}) 978 | rdata.update({'where': 'mounted'}) 979 | rdata.update({'start_at': floor}) 980 | rdata.update({'orientation': mount}) 981 | rest.post_pdu_to_rack(rdata, d42_rack_id) 982 | except TypeError: 983 | msg = '\n-----------------------------------------------------------------------\ 984 | \n[!] INFO: Cannot mount pdu "%s" (RT id = %d) to the rack.\ 985 | \n\tFloor returned from "get_hardware_size" function was: %s' % (name, pdu_id, str(floor)) 986 | logger.info(msg) 987 | except KeyError: 988 | msg = '\n-----------------------------------------------------------------------\ 989 | \n[!] INFO: Cannot mount pdu "%s" (RT id = %d) to the rack.\ 990 | \n\tWrong rack id map value: %s' % (name, pdu_id, str(rack_id)) 991 | logger.info(msg) 992 | # It's Zero-U then 993 | else: 994 | rack_id = self.get_rack_id_for_zero_us(pdu_id) 995 | if rack_id: 996 | try: 997 | d42_rack_id = self.rack_id_map[rack_id] 998 | except KeyError: 999 | msg = '\n-----------------------------------------------------------------------\ 1000 | \n[!] INFO: Cannot mount pdu "%s" (RT id = %d) to the rack.\ 1001 | \n\tWrong rack id map value: %s' % (name, pdu_id, str(rack_id)) 1002 | logger.info(msg) 1003 | if config['Misc']['PDU_MOUNT'].lower() in ('left', 'right', 'above', 'below'): 1004 | where = config['Misc']['PDU_MOUNT'].lower() 1005 | else: 1006 | where = 'left' 1007 | if config['Misc']['PDU_ORIENTATION'].lower() in ('front', 'back'): 1008 | mount = config['Misc']['PDU_ORIENTATION'].lower() 1009 | else: 1010 | mount = 'front' 1011 | rdata = {} 1012 | 1013 | try: 1014 | rdata.update({'pdu_id': pdumap[pdu_id]}) 1015 | rdata.update({'rack_id': d42_rack_id}) 1016 | rdata.update({'pdu_model': pdu_type}) 1017 | rdata.update({'where': where}) 1018 | rdata.update({'orientation': mount}) 1019 | rest.post_pdu_to_rack(rdata, d42_rack_id) 1020 | except UnboundLocalError: 1021 | msg = '\n-----------------------------------------------------------------------\ 1022 | \n[!] INFO: Cannot mount pdu "%s" (RT id = %d) to the rack.\ 1023 | \n\tWrong rack id: %s' % (name, pdu_id, str(rack_id)) 1024 | logger.info(msg) 1025 | 1026 | def get_patch_panels(self): 1027 | if not self.con: 1028 | self.connect() 1029 | with self.con: 1030 | cur = self.con.cursor() 1031 | q = """SELECT 1032 | id, 1033 | name, 1034 | AttributeValue.uint_value 1035 | FROM Object 1036 | LEFT JOIN AttributeValue ON AttributeValue.object_id = id AND AttributeValue.attr_id = 6 1037 | WHERE Object.objtype_id = 9 1038 | """ 1039 | cur.execute(q) 1040 | data = cur.fetchall() 1041 | 1042 | if config['Log']['DEBUG']: 1043 | msg = ('PDUs', str(data)) 1044 | logger.debug(msg) 1045 | 1046 | for item in data: 1047 | ports = self.get_ports_by_device(self.all_ports, item[0]) 1048 | patch_type = 'singular' 1049 | port_type = None 1050 | 1051 | if isinstance(ports, list) and len(ports) > 0: 1052 | if len(ports) > 1: 1053 | types = [] 1054 | 1055 | # check patch_type 1056 | for port in ports: 1057 | if port[2][:12] not in types: 1058 | types.append(port[2][:12]) 1059 | 1060 | if len(types) > 1: 1061 | patch_type = 'modular' 1062 | for port in ports: 1063 | rest.post_patch_panel_module_models({ 1064 | 'name': port[0], 1065 | 'port_type': port[2][:12], 1066 | 'number_of_ports': 1, 1067 | 'number_of_ports_in_row': 1 1068 | }) 1069 | 1070 | if patch_type == 'singular': 1071 | port_type = ports[0][2][:12] 1072 | 1073 | payload = { 1074 | 'name': item[1], 1075 | 'type': patch_type, 1076 | 'number_of_ports': item[2], 1077 | 'number_of_ports_in_row': item[2] 1078 | } 1079 | 1080 | if port_type is not None: 1081 | payload.update({'port_type': port_type}) 1082 | 1083 | rest.post_patch_panel(payload) 1084 | 1085 | def get_ports(self): 1086 | if not self.con: 1087 | self.connect() 1088 | with self.con: 1089 | cur = self.con.cursor() 1090 | q = """SELECT 1091 | name, 1092 | label, 1093 | PortOuterInterface.oif_name, 1094 | Port.id, 1095 | object_id 1096 | FROM Port 1097 | LEFT JOIN PortOuterInterface ON PortOuterInterface.id = type""" 1098 | cur.execute(q) 1099 | data = cur.fetchall() 1100 | 1101 | if data: 1102 | return data 1103 | else: 1104 | return False 1105 | 1106 | @staticmethod 1107 | def get_ports_by_device(ports, device_id): 1108 | device_ports = [] 1109 | for port in ports: 1110 | if port[4] == device_id: 1111 | device_ports.append(port) 1112 | 1113 | return device_ports 1114 | 1115 | @staticmethod 1116 | def get_port_by_id(ports, port_id): 1117 | for port in ports: 1118 | if port[3] == port_id: 1119 | return port[0] 1120 | 1121 | def get_device_by_port(self, port_id): 1122 | if not self.con: 1123 | self.connect() 1124 | with self.con: 1125 | cur = self.con.cursor() 1126 | q = """SELECT 1127 | name 1128 | FROM Object 1129 | WHERE id = ( SELECT object_id FROM Port WHERE id = %s )""" % port_id 1130 | cur.execute(q) 1131 | data = cur.fetchone() 1132 | if data: 1133 | return data[0] 1134 | else: 1135 | return False 1136 | 1137 | def get_links(self, port_id): 1138 | if not self.con: 1139 | self.connect() 1140 | with self.con: 1141 | cur = self.con.cursor() 1142 | q = """SELECT 1143 | porta, 1144 | portb 1145 | FROM Link 1146 | WHERE portb = %s""" % port_id 1147 | cur.execute(q) 1148 | data = cur.fetchall() 1149 | 1150 | if data: 1151 | return data[0] 1152 | else: 1153 | with self.con: 1154 | cur = self.con.cursor() 1155 | q = """SELECT 1156 | portb, 1157 | porta 1158 | FROM Link 1159 | WHERE porta = %s""" % port_id 1160 | cur.execute(q) 1161 | data = cur.fetchall() 1162 | 1163 | if data: 1164 | return data[0] 1165 | else: 1166 | return False 1167 | 1168 | def get_rack_id_for_zero_us(self, pdu_id): 1169 | if not self.con: 1170 | self.connect() 1171 | with self.con: 1172 | cur = self.con.cursor() 1173 | q = """SELECT 1174 | EntityLink.parent_entity_id 1175 | FROM EntityLink 1176 | WHERE EntityLink.child_entity_id = %s 1177 | AND EntityLink.parent_entity_type = 'rack'""" % pdu_id 1178 | cur.execute(q) 1179 | data = cur.fetchone() 1180 | if data: 1181 | return data[0] 1182 | 1183 | 1184 | if __name__ == '__main__': 1185 | # Import config 1186 | configfile = 'conf' 1187 | config = configparser.ConfigParser() 1188 | config.read(configfile) 1189 | 1190 | # Initialize Data pretty printer 1191 | pp = pprint.PrettyPrinter(indent=4) 1192 | 1193 | # Initialize logging platform 1194 | logger = logging.getLogger('racktables2netbox') 1195 | logger.setLevel(logging.DEBUG) 1196 | 1197 | # Log to file 1198 | fh = logging.FileHandler(config['Log']['LOGFILE']) 1199 | fh.setLevel(logging.DEBUG) 1200 | 1201 | # Log to stdout 1202 | ch = logging.StreamHandler() 1203 | ch.setLevel(logging.DEBUG) 1204 | 1205 | # Format log output 1206 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 1207 | fh.setFormatter(formatter) 1208 | ch.setFormatter(formatter) 1209 | 1210 | # Attach handlers to logger 1211 | logger.addHandler(fh) 1212 | logger.addHandler(ch) 1213 | 1214 | netbox = pynetbox.api(config['NetBox']['NETBOX_HOST'], token=config['NetBox']['NETBOX_TOKEN']) 1215 | 1216 | tenant_groups = netbox.tenancy.tenant_groups.all() 1217 | 1218 | print() 1219 | 1220 | 1221 | rest = REST() 1222 | racktables = DB() 1223 | #racktables.get_subnets() 1224 | racktables.get_ips() 1225 | #racktables.get_infrastructure() 1226 | # racktables.get_hardware() 1227 | # racktables.get_container_map() 1228 | # racktables.get_chassis() 1229 | # racktables.get_vmhosts() 1230 | # racktables.get_device_to_ip() 1231 | # racktables.get_pdus() 1232 | # racktables.get_patch_panels() 1233 | # racktables.get_devices() 1234 | 1235 | migrator = Migrator() 1236 | 1237 | logger.info('[!] Done!') 1238 | # sys.exit() 1239 | --------------------------------------------------------------------------------