├── .coveragerc
├── .github
└── workflows
│ ├── main.yml
│ └── python-publish.yml
├── .gitignore
├── .pre-commit-config.yaml
├── LICENSE
├── MANIFEST.in
├── README.md
├── RELEASE-NOTES.md
├── VERSION
├── bin
└── netcfgbu
├── docs
├── QuickStart.md
├── TOC.md
├── commands.md
├── config-credentials.md
├── config-inventory-integrations.md
├── config-ospec.md
├── config-osspec-custom-connectors.md
├── config-ssh-jumphost.md
├── config-ssh-options.md
├── config-vcs-git.md
├── configuration-file.md
├── environment_variables.md
├── inventory.md
├── mkbadges.sh
├── motivation.md
├── plugins.md
├── troubleshooting.md
├── usage-filtering.md
├── usage-vcs.md
└── version.svg
├── inventory.csv
├── netbox
├── README.md
├── netbox_inventory.py
└── requirements.txt
├── netcfgbu.toml
├── netcfgbu
├── __init__.py
├── aiofut.py
├── cli
│ ├── __init__.py
│ ├── backup.py
│ ├── inventory.py
│ ├── lint.py
│ ├── login.py
│ ├── main.py
│ ├── probe.py
│ ├── report.py
│ ├── root.py
│ └── vcs.py
├── config.py
├── config_model.py
├── connectors
│ ├── __init__.py
│ ├── basic.py
│ └── ssh.py
├── consts.py
├── filetypes.py
├── filtering.py
├── inventory.py
├── jumphosts.py
├── linter.py
├── logger.py
├── os_specs.py
├── plugins.py
├── probe.py
└── vcs
│ ├── __init__.py
│ └── git.py
├── pyproject.toml
├── requirements-develop.txt
├── requirements.txt
├── setup.py
├── tasks.py
├── tests
├── __init__.py
├── conftest.py
├── files
│ ├── do-fail.sh
│ ├── do-nothing.sh
│ ├── fake-testkey
│ ├── mvp-netcfgbu.toml
│ ├── plugins
│ │ └── test-plugin.py
│ ├── test-config-jumphosts.toml
│ ├── test-config-logging.toml
│ ├── test-config-os-name-prompt-pattern.toml
│ ├── test-config-os_name.toml
│ ├── test-content-config.txt
│ ├── test-credentials.toml
│ ├── test-csv-withcomments.csv
│ ├── test-gitspec-badrepo.toml
│ ├── test-gitspec.toml
│ ├── test-inventory-fail.toml
│ ├── test-inventory-noscript.toml
│ ├── test-inventory-script-donothing.toml
│ ├── test-inventory-script-fails.toml
│ ├── test-inventory.toml
│ ├── test-just-defaults.toml
│ ├── test-linter-fail.toml
│ ├── test-linter.toml
│ ├── test-small-inventory.csv
│ └── test-vcs.toml
├── test_cli_inventory.py
├── test_cli_probe.py
├── test_cli_vcs.py
├── test_config.py
├── test_connectors.py
├── test_filetypes.py
├── test_filtering.py
├── test_inventory.py
├── test_jumphosts.py
├── test_linters.py
├── test_os_name.py
├── test_os_name_prompt_pattern.py
├── test_plugins.py
├── test_probe.py
└── test_vcs.py
└── tox.ini
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit =
3 | netcfgbu/cli/main.py
4 | netcfgbu/cli/backup.py
5 | netcfgbu/cli/login.py
6 | netcfgbu/logger*
7 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Build and test netcfgbu
5 |
6 | on:
7 | push:
8 | branches: [ master, develop ]
9 | pull_request:
10 | branches: [ master, develop ]
11 |
12 | jobs:
13 | build:
14 |
15 | runs-on: ubuntu-latest
16 |
17 | steps:
18 | - uses: actions/checkout@v2
19 | - name: Set up Python 3.8
20 | uses: actions/setup-python@v2
21 | with:
22 | python-version: 3.8
23 | - name: install tox and friends
24 | run: python -m pip install --upgrade pip setuptools tox virtualenv
25 | - name: run tox
26 | run: tox
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflows will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: Upload Python Package
5 |
6 | on:
7 | release:
8 | types: [created]
9 |
10 | jobs:
11 | deploy:
12 |
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Set up Python
18 | uses: actions/setup-python@v2
19 | with:
20 | python-version: '3.8'
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --upgrade pip
24 | pip install setuptools wheel twine
25 | - name: Build and publish
26 | env:
27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
29 | run: |
30 | python setup.py sdist bdist_wheel
31 | twine upload dist/*
32 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 | .pytest_tmpdir/
50 |
51 | # Translations
52 | *.mo
53 | *.pot
54 |
55 | # Django stuff:
56 | *.log
57 | local_settings.py
58 | db.sqlite3
59 |
60 | # Flask stuff:
61 | instance/
62 | .webassets-cache
63 |
64 | # Scrapy stuff:
65 | .scrapy
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 | failures.csv
107 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 | default_language_version:
4 | python: python3.8
5 |
6 | repos:
7 | - repo: https://github.com/pre-commit/pre-commit-hooks
8 | rev: v2.4.0
9 | hooks:
10 | - id: check-merge-conflict
11 | - id: trailing-whitespace
12 | - id: check-yaml
13 | - id: check-toml
14 | - id: check-added-large-files
15 | - id: flake8
16 |
17 | - repo: https://github.com/psf/black
18 | rev: stable
19 | hooks:
20 | - id: black
21 | args: ["."]
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE README.rst VERSION tox.ini
2 | include requirements*.txt
3 | recursive-include tests
4 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://www.python.org/downloads/release/python-380/)
2 | [](https://github.com/ambv/black)
3 |
4 | 
5 |
6 | # Network Configuration Backup
7 |
8 | As a network engineer I need to backup my network configuration files into a
9 | version control system, and I need a tool to automate this process. My primary
10 | means of accessing the devices is SSH.
11 |
12 | **Supported Devices**
13 | The `netcfgbu` tool was built specifically to backup network operating system (NOS)
14 | configurations that are monolithic in nature. That is to say the entire
15 | configuration can be captured using a command such as "show running-config". Any
16 | NOS that provides a monolithic configuration should be supported by `netcfgbu`.
17 |
18 | **Primary Considerations**
19 | * I have a multi-vendor environment. I need to account for the different commands
20 | that are used to obtain the running configuration and disable paging if
21 | required.
22 |
23 | * I want to provide my network inventory in a simple CSV format. I want to
24 | create this inventory dynamically from one or more sources, for example Netbox.
25 | I want the ability to filter this inventory with limit and exclude constraints.
26 |
27 | * I may need to try multiple SSH credentials. I must not store my passwords in
28 | any configuration file, so this tool must acquire passwords via environment
29 | variables.
30 |
31 | * I will have a large number of devices (>1000) so I want this tool to take
32 | advantage of any and all techniques that reduce the total amount of time.
33 |
34 | ---
35 |
36 |
37 | The general approach to `netcfgbu` is a configuration based methodology so as
38 | to not hardcode the tool to work with specific network device drivers
39 | and avoid the complexity and dependency of including a collection of 3rd-party
40 | libraries specific to network devices.
41 |
42 | Read the Documenttion [here](docs/TOC.md).
43 | Read the Quick Start [here](docs/QuickStart.md)
44 | [Example netcfgbu.toml configuration](netcfgbu.toml)
45 |
46 | # Introduction
47 |
48 | Once you've setup the [configuration](docs/configuration-file.md) file and
49 | [inventory](docs/inventory.md) file you can backup all of your configurations
50 | using the command:
51 |
52 | ```shell script
53 | $ netcfgbu backup
54 | ```
55 |
56 | At the end of the run, you will see a report, for example:
57 |
58 | ```shell script
59 | # ------------------------------------------------------------------------------
60 | Summary: TOTAL=1482, OK=1482, FAIL=0
61 | START=2020-Jun-05 01:48:55 PM, STOP=2020-Jun-05 01:50:08 PM
62 | DURATION=72.566s
63 | # ------------------------------------------------------------------------------
64 | ```
65 |
66 | There are a number of other [commands](docs/commands.md) provided as shown via `--help`:
67 |
68 | ```text
69 | Usage: netcfgbu [OPTIONS] COMMAND [ARGS]...
70 |
71 | Options:
72 | --version Show the version and exit.
73 | --help Show this message and exit.
74 |
75 | Commands:
76 | backup Backup network configurations.
77 | inventory Inventory subcommands.
78 | login Verify SSH login to devices.
79 | probe Probe device for SSH reachablility.
80 | vcs Version Control System subcommands.
81 | ```
82 |
83 | # Setup
84 |
85 | The `netcfgbu` tool does not require you to create a configuration file, but
86 | for practical purposes you will generally need one. The file is
87 | [TOML](https://github.com/toml-lang/toml) format. The default file is
88 | `netcfgbu.toml` and `netcfgbu` searches for in the current working directory.
89 | You can override this location using the `-C ` option or using the
90 | environment variable `NETCFGBU_CONFIG`
91 |
92 | At a minimum you need to designate the [inventory](docs/inventory.md) CSV file and
93 | a default set of SSH login credentials. The network device configs will be
94 | stored in the current working directory, or as specified in the `defaults.configs_dir`
95 | option. The configuration-file supports the use of environment variables.
96 |
97 | Example:
98 | ```toml
99 | [defaults]
100 | inventory = "$PROJ_DIR/inventory.csv"
101 | configs_dir = "$PROJ_DIR/configs"
102 | credentials.username = "$NETWORK_USERNAME"
103 | credentials.password = "$NETWORK_PASSWORD"
104 | ```
105 |
106 | The `netcfgbu` defines the use of specific [Environment Variables](docs/environment_variables.md). Their
107 | use is the equivalent of the following configuration file. That is to say, if you did not provide `netcfgbu` a
108 | configuration file, this would be used:
109 |
110 | ```toml
111 | [defaults]
112 | inventory = "$NETCFBU_INVENTORY"
113 | configs_dir = "$NETCFGBU_CONFIGSDIR"
114 | credentials.username = "$NETCFGBU_DEFAULT_USERNAME"
115 | credentials.password = "$NETCFGBU_DEFAULT_PASSWORD"
116 | ```
117 |
118 | ### System Requirements and Installation
119 |
120 | This tool requires the use of Python3.8.
121 | Installation available via PyPI:
122 |
123 | ```shell script
124 | $ pip install netcfgbu
125 | ```
126 |
127 | ### Questions or Suggestions?
128 |
129 | Please open a github issue if you have any questions or suggestions.
130 |
131 | Thank you!
--------------------------------------------------------------------------------
/RELEASE-NOTES.md:
--------------------------------------------------------------------------------
1 | # Release Notes
2 |
3 | #### v0.6.0 (2020-Aug-11)
4 | * Added support for User defined prompt patterns
5 | * Added examples to support Cumulus (@saparikh)
6 | #### v0.5.0 (2020-Jun-28)
7 | * BREAKING Change: removed support of non-CSV files for filtering "@". Only
8 | CSV files are currently supported.
9 | * Addes support for SSH jumphost proxy, see [Using Jumphosts](docs/config-ssh-jumphost.md)
10 | * Added CI/CD tooling - tox & friends, github actions, pre-commit
11 | * Added unit-test coverage for full infrastructure components; that is
12 | everything but the CLI.
13 |
14 | #### v0.4.0 (2020-Jun-21)
15 | * BREAKING Change `[[github]]` to `[[git]]` in `netcfgby.toml`
16 | * BREAKING Change subcommand `inventory ls` to `inventory list`
17 | * Added unit-test coverage for configuration file use-cases
18 | * Added Troubleshooting documentation
19 |
20 | #### v0.3.1 (2020-Jun-17)
21 | * Bugfix resulting in missing `os_name` config
22 |
23 | #### v0.3.0 (2020-Jun-12)
24 | * Add support for Github version control system
25 | * Add config file validation
26 | * Add support for user-defined inventory columns
27 | * Enhanced netbox integration script to work with >= version 2.7
28 |
29 | #### v0.2.0 (2020-Jun-09)
30 | * Add Quick-Start docs
31 | * Add Netbox Integration docs
32 | * Add --inventory, -i options for inventory file
33 | * Add NETCFGBU environment variables
34 | * Add --debug-ssh for debugging
35 | * Add support for SSH-config options
36 | * Add config-file validation
37 | * Refactored config-file options
38 |
39 | #### v0.1.0 (2020-Jun-05)
40 | Minimum viable package features that will backup configuration files from
41 | network devices and store the contents onto your server.
42 |
43 | This version does not support the following features:
44 | * Version Control System (VCS) integration with github
45 | * Removing config content that would result in false-positive changes
46 |
47 | These features will be included in the next release.
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 0.8.0
2 |
--------------------------------------------------------------------------------
/bin/netcfgbu:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | from netcfgbu.cli import main
4 |
5 | if __name__ == "__main__":
6 | main.run()
7 |
--------------------------------------------------------------------------------
/docs/QuickStart.md:
--------------------------------------------------------------------------------
1 | # Quick Start
2 |
3 | If you are a Netbox user you can dynamically build the inventory.csv file as
4 | described [here](../netbox/README.md).
5 |
6 | You can use the provided configuration file [netcfgbu.toml](../netcfgbu.toml) as
7 | a starting point and customize it as necessary. See [Configurations](TOC.md)
8 | for details.
9 |
10 | If you are storing your configuration backups in a Git Version Control System, refer to the [Git
11 | Configuration](config-vcs-git.md) and [Git Usage](usage-vcs.md) sections.
12 |
13 |
14 | ---
15 |
16 |
17 | If you want to run a probe test to verify SSH access to all your devices
18 | you can run:
19 |
20 | ```shell script
21 | $ netcfgbu probe
22 | ```
23 |
24 | When the command completes you will see a report similar:
25 | ```shell script
26 | # ------------------------------------------------------------------------------
27 | Summary: TOTAL=1463, OK=1463, FAIL=0
28 | START=2020-Jun-09 08:09:52 PM, STOP=2020-Jun-09 08:09:53 PM
29 | DURATION=0.358s
30 | # ------------------------------------------------------------------------------
31 | ```
32 |
33 | If you want to run a login test to verify that your configured credentials
34 | are working you can run:
35 |
36 | ```shell script
37 | $ netcfgbu login
38 | ```
39 |
40 | When the command completes you will see a report similar:
41 | ```shell script
42 | # ------------------------------------------------------------------------------
43 | Summary: TOTAL=1463, OK=1462, FAIL=1
44 | START=2020-Jun-09 08:10:40 PM, STOP=2020-Jun-09 08:11:52 PM
45 | DURATION=71.797s
46 |
47 |
48 | FAILURES: 1
49 | host os_name reason
50 | ----------------- --------- -------------------------------
51 | switch01 iosxe ConnectionLost: Connection lost
52 | # ------------------------------------------------------------------------------
53 | ```
54 |
55 | Any errors will be logged to a file called `failures.csv`, which you can then
56 | use to exclude on future commands.
57 |
58 |
59 | ---
60 |
61 |
62 | When you want to run a backup of your configs you can run:
63 |
64 | ```shell script
65 | $ netcfgbu backup
66 | ```
67 |
68 | Or to exclude any devices that failed the login test:
69 |
70 | ```shell script
71 | $ netcfgbu backup --exclude @failures.csv
72 | ````
73 |
74 | When the backup completes you will see a report similar:
75 |
76 | ```shell script
77 | # ------------------------------------------------------------------------------
78 | Summary: TOTAL=1462, OK=1462, FAIL=0
79 | START=2020-Jun-09 08:14:36 PM, STOP=2020-Jun-09 08:18:29 PM
80 | DURATION=80.672s
81 | # ------------------------------------------------------------------------------
82 | ```
83 |
--------------------------------------------------------------------------------
/docs/TOC.md:
--------------------------------------------------------------------------------
1 | # Network Config Backup Documentation
2 |
3 | * [Quick Start](QuickStart.md)
4 |
5 | * [Motivation](motivation.md)
6 |
7 | * Configuration
8 | * [General](configuration-file.md)
9 | * [OS Specific Backup Options](config-ospec.md)
10 | * [Credentials](config-credentials.md)
11 | * [Git](config-vcs-git.md)
12 | * [SSH Jumphost Support](config-ssh-jumphost.md)
13 | * [SSH Config Options](config-ssh-options.md)
14 |
15 | * [Inventory](inventory.md)
16 | * [File Definition](inventory.md#inventory-file)
17 | * [Integration Scripts](configuration-file.md#inventory-scripts)
18 |
19 | * Command Usages
20 | * [General Commands](commands.md)
21 | * [Filtering](usage-filtering.md)
22 | * [Gitub Commands](usage-vcs.md)
23 |
24 | * [Troubleshooting](troubleshooting.md)
25 |
26 | * [Environment Variables](environment_variables.md)
27 |
28 | * [OS Custom Connectors](custom-connectors.md)
29 |
--------------------------------------------------------------------------------
/docs/commands.md:
--------------------------------------------------------------------------------
1 | # Commands
2 |
3 | This page presents an overview of the `netcfgbu` commands. For full command details use the
4 | CLI `--help` option.
5 |
6 | For any devices that fail during a command, the `netcfgbu` tool will generate a
7 | file called `failures.csv` You can use this file in future command to retry
8 | using the `--limit @failures.csv`. Or you can use this file to exclude these
9 | devices using `--exlcude @failures.csv`. For more details see
10 | [filtering](usage-filtering.md)
11 |
12 | **inventory**
13 | The `inventory ls` command is used to list the contents of the current inventory file. This
14 | is useful for when you want to test your filtering expressions before you try to run a backup.
15 |
16 | Example:
17 | ```shell script
18 | $ netcfgbu inventory ls --limit os_name=eos --brief
19 | ```
20 |
21 | The `inventory build` command is used to invoke your inventory script that will create the inventory
22 | file.
23 |
24 | Example:
25 | ```shell script
26 | $ netcfgbu inventory build --name netbox
27 | ```
28 |
29 | **probe**
30 | The `probe` command is used to determine if the SSH port is available on the target device. This
31 | is a useful first step before attempting to run a backup. This probe does **not** attempt to
32 | login / authenticate with SSH.
33 |
34 | ```shell script
35 | $ netcfgbu probe
36 | ```
37 |
38 | **login**
39 | The `login` command is used to determine if the `netcfgbu` is able to authenticate with the
40 | device SSH, and reports the credential username value that was used. This is useful to
41 | ensure that not only is the device reachable with SSH open, but the that `netcfgbu` is configured
42 | with the correct credentials to allow a connection.
43 |
44 | ```shell script
45 | $ netcfgbu probe
46 | ```
47 |
48 | **backup**
49 | This `backup` command is used to login to the device via SSH, extract the
50 | running configuration, and save it to a file called $host.cfg, where $host is
51 | the value defined in the inventory item. For example if an inventory item has
52 | a host value of "myswitch1", then the file "myswitch1.cfg" is created upon
53 | successful backup. The backup files are stored in either the current working
54 | directory, or the directory designated by the `config_dir` value in the
55 | [configuration file](configuration-file.md#Changing-Storage-Directory)
56 |
57 | Example:
58 | ```shell script
59 | $ netcfgbu backup --exclude @failures.csv
60 | ```
--------------------------------------------------------------------------------
/docs/config-credentials.md:
--------------------------------------------------------------------------------
1 | # Credentials
2 |
3 | The `netcfgbu` tool requires that any credentials you use will have the
4 | necessary priveledge to execute any of the `pre_get_config` and `get_config`
5 | commands without having to change the priveldge level. The `netcfgbu` tool
6 | does not support the capabilty of changing priveldge levels.
7 |
8 | The `netcfgbu` tool will attempt to login to each device using any of the
9 | following credentials **_in this order_**:
10 |
11 | 1. Host specific
12 | 2. OS-name specific
13 | 3. Default
14 | 4. Global
15 |
16 | One or more credentials can be defined in per OS-name and Global sections. You
17 | can defined multiple credentials using TOML [Array of
18 | Tables](https://github.com/toml-lang/toml#user-content-array-of-tables) syntax.
19 | When multiple credentials are supplied in a given section `netcfgbu` will use
20 | these credentials in the order that they are defined.
21 |
22 | **Host specific credentials**
23 | Host specific credentials must be provided in the inventory file using the
24 | `username` and `password` field-columns. See the [inventory
25 | section](inventory.md) for details.
26 |
27 | **OS-name specific credentials**
28 | Example:
29 | ```toml
30 | [os_name.asa]
31 | disable_paging = 'terminal pager 0'
32 |
33 | [[os_name.asa.credentials]]
34 | username = 'superBadSecOps'
35 | password = '$SECOPS_PASSWORD'
36 | ```
37 |
38 | NOTE: The indentation here is only for human-eyeballs. If you were to add a
39 | variable after the credentials section it would **not** be part of the
40 | `[os_name.asa]` section, but rather a new global variable.
41 |
42 |
43 | **Default credentials**
44 | Defined in the `[defaults]` section.
45 |
46 | Example:
47 | ```toml
48 | [defaults]
49 | credentials.username = 'nwkautomaniac'
50 | credentials.password = "$NETWORK_PASSWORD"
51 | ```
52 |
53 | **Global credentials**
54 | `netcfgbu` will use these credentials in the order that they are defined in the
55 | configuration file.
56 |
57 | Example:
58 | ```toml
59 | [[credentials]]
60 | username = "superadmin"
61 | password = "$ENABLE_PASSWORD"
62 |
63 | [[credentials]]
64 | username = "superadmin"
65 | password = "$ENABLE_PASSWORD_1999"
66 | ````
67 |
--------------------------------------------------------------------------------
/docs/config-inventory-integrations.md:
--------------------------------------------------------------------------------
1 | # Inventory Integration
2 | You can use `netcfgbu` to invoke a script that is used to extract inventory from
3 | an external system and save it to the required inventory format. You can define one
4 | or more `[inventory]` sections for this purpose.
5 |
6 | Example:
7 | ```toml
8 | [[inventory]]
9 | script = "$PROJ_DIR/netcfgbu/examples/netbox_inventory.py --output inventory.csv"
10 | ```
11 | You can find the Netbox code example [here](../examples/netbox_inventory.py).
12 |
13 | You invoke the script by:
14 | ```shell script
15 | $ netcfgbu inventory build
16 | ```
17 |
18 | You can define multiple inventory sections and invoke them by name.
19 |
20 | Example:
21 | ```toml
22 | [[inventory]]
23 | name = "cmdb"
24 | script = "/usr/local/bin/cmdb-inventory.py > inventory.csv"
25 |
26 | [[inventory]]
27 | name = "netbox"
28 | script = "$PROJ_DIR/netcfgbu/examples/netbox_inventory.py --output inventory.csv"
29 | ```
30 |
31 | ```shell script
32 | $ netcfgbu inventory build --name netbox
33 | ```
34 |
35 | If you do not provide a name `netcfgbu` will use the first configured inventory
36 | section by default.
37 |
--------------------------------------------------------------------------------
/docs/config-ospec.md:
--------------------------------------------------------------------------------
1 | ## OS Specifications
2 | Most devices will require you to disable paging before getting the running
3 | config. To account for this, you need to define OS specification sections. For
4 | each `[os_name.$name]` section you can configure the following variables:
5 |
6 | **`pre_get_config`**:
7 | The command(s) required to disable paging so that when running the command(s) to
8 | obtain the running config the SSH session is not blocked awaiting on a _More_ prompt.
9 |
10 | **`get_config`**:
11 | The command(s) required to obtain the running configuration.
12 |
13 | ***`timeout`***
14 | The time in seconds to await the collection of the configuration before
15 | declaring a timeout error. Default is 60 seconds.
16 |
17 | ***`linter`***
18 | Identifies the Linter specification to apply to the configuration once it
19 | has been retrieved. See [Linters](#Linters) in next section.
20 |
21 | **`prompt_pattern`**
22 | Allows the User to define a custom prompt match regular expression pattern.
23 | Please be careful to ensure any special characters such as dash (-) are escaped.
24 |
25 | Examples:
26 | ```toml
27 | [os_name.ios]
28 | pre_get_config = "terminal length 0"
29 | linter = "ios"
30 |
31 | [os_name.asa]
32 | timeout = 120
33 | pre_get_config = 'terminal pager 0'
34 |
35 | [os_name.nxos]
36 | get_config = 'show running-config | no-more'
37 |
38 | [os_name.cumulus]
39 | get_config = "( cat /etc/hostname; cat /etc/network/interfaces; cat /etc/cumulus/ports.conf; sudo cat /etc/frr/frr.conf)"
40 |
41 | # example Cumulus prompt value: cumulus@leaf01:mgmt-vrf:~$
42 | prompt_pattern = '[a-z0-9.\-@:~]{10,65}\s*[#$]'
43 | ```
44 |
45 | If you need to provide multiple commands, define a list of commands, as described
46 | [TOML Array](https://github.com/toml-lang/toml#user-content-array).
47 |
48 | ## Linters
49 | Linters post-process the configuration once it has been retrieved from the device.
50 | At present there are two variables you can define:
51 |
52 | **config_starts_after**
53 | A regular-expression or value that designates the line before
54 | the configuration file contents. Different operating systems have a
55 | different "starting line", for example:
56 |
57 | ```toml
58 | [linters.iosxr]
59 | config_starts_after = 'Building configuration'
60 |
61 | [linters.ios]
62 | config_starts_after = 'Current configuration'
63 | ```
64 |
65 | **config_ends_at**
66 | You can configure this value to identify a line of text that marks the end of
67 | the configuration.
68 |
69 | For example for Palto Alto systems the ommand to get the configuration is
70 | `show` from within the OS configure mode. When that command completes the next
71 | prompt is `[edit]`; so we use this value to indicate the end of the
72 | configuration.
73 |
74 | ```toml
75 | [linters.panos]
76 | config_ends_at = "[edit]"
77 | ```
--------------------------------------------------------------------------------
/docs/config-osspec-custom-connectors.md:
--------------------------------------------------------------------------------
1 | # Custom Connections
2 | You may encounter network devices that require additional steps beyond the
3 | standard login process. For example, the Cisco WLC Aireos v8.5 requires the
4 | Username and Password values to be provided, even after the SSH connection was
5 | established using the same credentials.
6 |
7 | In such cases the goal of `netcfgbu` is to provide the necessary SSH connector,
8 | but in such a way that it is not specifically tied to a network vendor or OS.
9 | For example there may be other network devices that behave the same way as the
10 | WLC, and you could use the same connector for the WLC and this other device
11 | type.
12 |
13 | To use a custom connector type you can add the `connection` value in
14 | the OS section of your [configuration file](configuration-file.md).
15 |
16 | Example for Cisco WLC:
17 | ```toml
18 | [os_name.aireos]
19 | show_running = "show run-config commands"
20 | disable_paging = "config paging disable"
21 | connection = "netcfgbu.connectors.ssh.LoginPromptUserPass"
22 | ```
23 |
24 | The `connection` value identifies the python module location where this
25 | connector can be found. In the above example, you can find
26 | [LoginPromptUserPass here](../netcfgbu/connectors/ssh.py). This approach
27 | allows you to use a connector that is either packaged with `netcfgbu` or use
28 | one that can be found in another python package should that be necessary.
29 |
30 | If you have a need for a custom connector and would like it written for you,
31 | please [open an issue](https://github.com/jeremyschulman/netcfgbu/issues).
--------------------------------------------------------------------------------
/docs/config-ssh-jumphost.md:
--------------------------------------------------------------------------------
1 | # Configuration for Use with Jump Hosts
2 |
3 | *Added in 0.5.0*
4 |
5 | You can configure one or more jump host proxy servers. Each ``[jumphost]]``
6 | section supports the following fields:
7 |
8 | * `proxy` - Defines the jump host proxy destination. This string value can
9 | be in the form "[user@]host[:port]". If `user` is not provided, then `$USER`
10 | from the environment will be used
11 |
12 | * `include` - A list of [filter](usage-filtering.md) expressions that identify
13 | which inventory records will be matched to use this jump host
14 |
15 | * `exclude` - *(Optional)* A list of [filter](usage-filtering.md) expressions that identify
16 | which inventory records will be matched to use this jumphost
17 |
18 | * `timeout` - *(Optional)* A timeout in seconds when connecting to the jump host server. If
19 | not provided, will use the default connection timeout value (30s)
20 |
21 | Using jump hosts currently requires that you are also using an ssh-agent and have
22 | loaded any ssh-keys so that the `$SSH_AUTH_SOCK` environment variable exists
23 | and running `ssh-add -l` shows that your ssh keys have been loaded for use.
24 |
25 | ### Examples
26 |
27 | For any inventory item that matches the host with a suffix of ".dc1" use the jump server
28 | "dc1-jumpie.com" with login user-name "jeremy"
29 |
30 | ```toml
31 | [[jumphost]]
32 | proxy = "jeremy@dc1-jumpie.com"
33 | include = ['host=.*\.dc1']
34 | ```
35 |
36 | Exclude any devices that are role equal to "firewall"; this presumes that your
37 | inventory contains a field-column called role.
38 |
39 | ```toml
40 | [[jumphost]]
41 | proxy = "jeremy@dc1-jumpie.com"
42 | include = ['host=.*\.dc1']
43 | exclude = ['role=firewall']
44 | ```
45 |
46 | Assuming your inventory has a field-column site, use jump host with IP address
47 | 192.168.10.2 for any device with site equal to "datacenter1" or "office1".
48 |
49 | ```toml
50 | [[jumphost]]
51 | proxy = "192.168.10.2"
52 | include = ['site=datacenter1|office1']
53 | ```
54 |
--------------------------------------------------------------------------------
/docs/config-ssh-options.md:
--------------------------------------------------------------------------------
1 | # SSH Config Options
2 | You may need to provide SSH configuration options such as Key Exchange or
3 | Cipher options. The `netcfgbu` tool uses [AsyncSSH](https://github.com/ronf/asyncssh) as an underlying transport.
4 | You can provide any SSH Configuration option supported by AsyncSSH either at
5 | the global level or at the OS-spec level.
6 |
7 | For example at the global level:
8 | ```toml
9 | [ssh_configs]
10 | kex_algs = ["ecdh-sha2-nistp256", "diffie-hellman-group14-sha1"]
11 | encryption_algs = [
12 | "aes128-cbc,3des-cbc",
13 | "aes192-cbc,aes256-cbc",
14 | "aes256-ctr,aes192-ctr",
15 | "aes128-ctr"]
16 | ```
17 |
18 | Or at an OS-spec level:
19 | ```toml
20 | [os_name.aireos]
21 | ssh_configs.kex_algs = ["ecdh-sha2-nistp256", "diffie-hellman-group14-sha1"]
22 | ssh_configs.encryption_algs = ["aes128-cbc,3des-cbc"]
23 | ```
24 |
25 | If both global and OS-spec SSH configuration options are provided the OS-spec
26 | option will be used; i.e. overrides the specific option if it was present
27 | in the global options.
28 |
29 | For details on the specific SSH options, refer to the AsyncSSH option names, [here](https://asyncssh.readthedocs.io/en/stable/api.html#asyncssh.SSHClientConnectionOptions)
30 | and supported option values, [here](https://asyncssh.readthedocs.io/en/stable/api.html#supported-algorithms).
31 |
32 | *NOTE: A future version of AsyncSSH will support the use of ssh_config file(s)*
--------------------------------------------------------------------------------
/docs/config-vcs-git.md:
--------------------------------------------------------------------------------
1 | # Git VCS Configuration
2 | When you want to store your configuration files in a git based version control system you
3 | will need to define at least one `[[git]]` section in your configuration file.
4 | You must use one of the following git authentication methods:
5 |
6 | * Git Token
7 | * Git SSH deployment key without passphrase
8 | * Git SSH deployment key with passphrase
9 |
10 | ___
11 |
12 | :question: If you are not certain which method you want to use, refer to the document
13 | links in the [References](#References) below.
14 |
15 | ___
16 |
17 |
18 | You can define more than one `[[git]]` section in your configuration file
19 | so that you can use different repositories or credential methods.
20 |
21 | For inforamtion on using the `netcfgbu vcs` subcommands, see [Using VCS
22 | Subcommands](usage-vcs.md).
23 |
24 | ## Configuration Options
25 | Each `[[git]]` section supports the following options:
26 |
27 | **name**
28 | When your configuration file contains multiple `[git]` sections you can
29 | assign a name so that you can use the `--name` option when running the
30 | `netcfgbu vcs` subcommands.
31 |
32 | **repo**
33 | This is the git repository git URL that is found on the git repository
34 | page when you select "Clone or Download". The value will begin either with "http://"
35 | when using HTTPS mode or or "git@" when using SSH mode.
36 |
37 | **username** (Optional)
38 | When provided this value will be used to indicate the user-name
39 | when files are stored into the git repo. If you do not configure
40 | this option then the environment `$USER` value is used. This
41 | option supports the use of Enviornment variables.
42 |
43 | **email** (Optional)
44 | When provided this value will be used to indcate the user email
45 | address with files are stored into the git repo. If you do not
46 | configure this option then the `username` value is used.
47 |
48 | **token** (Optional)
49 | If you configure this value then `netcfgbu` will use this token
50 | to access your git repository as the credential method. This
51 | option supports the use of Enviornment variables. Use this
52 | option if your `repo` field begins with "https://".
53 |
54 | **deploy_key** (Optional)
55 | This option indicates the filepath to the SSH private-key file. Use
56 | this option if your `repo` field begins with "git@". This
57 | option supports the use of Enviornment variables.
58 |
59 | **deploy_passphrase** (Optional)
60 | This option is required if your deployment key was created with a passphrase.
61 | This option supports the use of Enviornment variables.
62 |
63 | ## Examples
64 | ```toml
65 | [[git]]
66 | # the first entry does not require a name and it will be treated
67 | # as a default; i.e. when the --name option is omitted.
68 | repo = "https://github.mycorp.com/jschulman/test-network-configs.git"
69 | token = "$GIT_TOKEN"
70 |
71 | [[git]]
72 | # example of using a deployment key that does not use a passphrase
73 | name = "demo-key-no-pw"
74 | repo = "git@gitlab.com:jschulman/test-network-configs.git"
75 | deploy_key = "$HOME/test-config-backups"
76 |
77 | [[git]]
78 | # example of using a deployment key that uses a passphrase
79 | name = "demo-key-pw"
80 | repo = "git@github.mycorp.com:jschulman/test-network-configs.git"
81 | deploy_key = "$HOME/pwtest-backups"
82 | deploy_passphrase = "$GITKEY_PASSWORD"
83 | ```
84 |
85 | ## References
86 | For more information about the tradeoffs of using Tokens vs. Deployment Keys
87 | see [this document](https://developer.github.com/v3/guides/managing-deploy-keys/).
88 |
89 | For more information about using Github/Gitlab Tokens see [Github](https://help.github.com/en/github/authenticating-to-github/creating-a-personal-access-token-for-the-command-line)/[Gitlab](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html).
90 |
91 | For more information about using Github/Gitlab deployment keys see [Github](https://developer.github.com/v3/guides/managing-deploy-keys/#deploy-keys)/[Gitlab](https://docs.gitlab.com/ee/user/project/deploy_keys/).
--------------------------------------------------------------------------------
/docs/configuration-file.md:
--------------------------------------------------------------------------------
1 | # Configuration File
2 |
3 | `netcfgbu` requires you to setup a TOML configuration file. The default
4 | file name is `netcfgbu.toml` and is searched for in the current working directory.
5 | You can override this location using the -C option or setting your
6 | environment variable `NETCFGBU_CONFIG`.
7 |
8 | (See [example config file](../netcfgbu.toml))
9 |
10 | By default, without any OS specific configurations, when you run `netcfgbu
11 | backup` the tool will attempt to login to the devices provided in the inventory
12 | file using username/password credentials, execute the `show running-config`
13 | command, and save the contents to a file called _$host_.cfg into your current
14 | directory. For example, if you are using Arista EOS or Cisco IOS-XE devices
15 | `netcfgbu` will work out-of-the box without any OS specific configuration.
16 |
17 | Most devices, however, require you to disable paging. Some devices use
18 | different commands to obtain the running configuration. `netcfgbu` allows you
19 | to map the OS name values found in your inventory file to OS-name specific
20 | configuration sections. This approach allows you full choice and control of
21 | commands so that you can easily add new network OS devices without requiring
22 | code changes to `netcfgbu`.
23 |
24 | ## Defaults
25 |
26 | All of the default values support the use of environment variables as shown
27 | in the example below. All of these defaults also support the use
28 | of `NETCFGBU_` environment variables as described [here](environment_variables.md).
29 |
30 | **`inventory`**
31 | File path to the inventory CSV.
32 |
33 | **`credentials.username`**
34 | The default login user-name
35 |
36 | **`credentials.password`**
37 | The default login password. You should always use environment variables here,
38 | but you are not required to do so.
39 |
40 | Example:
41 | ```toml
42 | [defaults]
43 | inventory = "$PROJ_DIR/inventory.csv"
44 | credentials.username = 'nwkautomaniac'
45 | credentials.password = "$NETWORK_PASSWORD"
46 | ```
47 |
48 | ## Changing Storage Directory
49 | To change where the configuration files are stored you add the `config_dir`
50 | variable to the defaults section, for example:
51 |
52 | ```toml
53 | [defaults]
54 | configs_dir = "$PROJ_DIR/configs"
55 | ```
56 |
57 | ## Plugins Directory
58 | To change where the plugin files are stores you add the `plugins_dir` variable to the defaults section, for example:
59 |
60 | ```toml
61 | [defaults]
62 | plugins_dir = "$PROJ_DIR/plugins"
63 | ```
64 |
65 | ## Logging
66 | To enable logging you can defined the `[logging]` section in the configuration
67 | file. The format of this section is the standard Python logging module, as
68 | documented [here]( https://docs.python.org/3/library/logging.config.html).
69 |
70 | The logger name for `netcfgbu` is "netcfgbu".
71 | See the [sample netcfgbu.toml](../netcfgbu.toml) for further details.
72 |
--------------------------------------------------------------------------------
/docs/environment_variables.md:
--------------------------------------------------------------------------------
1 | # Environment Variables
2 |
3 | The following environment variables are used by `netcfgbu`
4 |
5 | **NETCFGBU_CONFIG**
6 | Path to the the `netcfgbu` configuration file.
7 |
8 | **NETCFGBU_INVENTORY**
9 | Path to the `netcfgbu` inventory (CSV) file.
10 |
11 | **NETCFGBU_CONFIGSDIR**
12 | Directory where the network configuration backups will be stored.
13 |
14 | **NETCFGBU_PLUGINSDIR**
15 | Directory where `netcfgbu` plugins wil be stored.
16 |
17 | **NETCFGBU_DEFAULT_USERNAME**
18 | The default credential login user name.
19 |
20 | **NETCFGBU_DEFAULT_PASSWORD**
21 | The default credential password.
22 |
--------------------------------------------------------------------------------
/docs/inventory.md:
--------------------------------------------------------------------------------
1 | # Inventory File
2 |
3 | The inventory file is a CSV file that MUST contain at a minimum two columns:
4 | `host` and `os_name`, for example:
5 |
6 | Example:
7 | ```csv
8 | host,os_name
9 | switch1,ios
10 | switch2,nxos
11 | fw1,asa
12 | ```
13 |
14 | **NOTE**: The values of `os_name` are entirely up to you as you will define OS
15 | specifications in the configuration file. The `netcfgbu` tool does
16 | not include any hardcoded OS names.
17 |
18 | If your host names cannot be resolved via DNS, then you MUST include the `ipaddr` column, for example:
19 |
20 | Example:
21 | ```csv
22 | host,os_name,ipaddr
23 | switch1,ios,10.1.123.1
24 | switch2,nxos,10.1.123.2
25 | fw1,asa,10.1.123.254
26 | ```
27 |
28 | If you need to provide host specific credentials, then you can add the columns `username` and `password`.
29 | Both of these columns support the use of environment variables.
30 |
31 | Example:
32 | ```csv
33 | host,os_name,ipaddr,username,password
34 | switch1,ios,10.1.123.1
35 | switch2,nxos,10.1.123.2
36 | fw1,asa,10.1.123.254,SecOpsAdmin,$SECOPS_PASSWORD
37 | ```
38 |
39 | You can add any additional columns, and use those column names for filtering purposes.
40 | See [Filtering Usage](usage-filtering.md) for additional information.
41 |
42 |
--------------------------------------------------------------------------------
/docs/mkbadges.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | #python -m pybadges \
4 | # --left-text="python" \
5 | # --right-text="3.8" \
6 | # --whole-link="https://www.python.org/" \
7 | # --embed-logo \
8 | # --logo='https://dev.w3.org/SVG/tools/svgweb/samples/svg-files/python.svg' > py38.svg
9 | #
10 | #GIT_BRANCH=$(git branch --show-current)
11 |
12 | python -m pybadges \
13 | --left-color="red" \
14 | --left-text="alpha" \
15 | --right-text=$(cat ../VERSION) \
16 | --right-color="red" \
17 | --whole-link="https://www.python.org/" > version.svg
18 |
--------------------------------------------------------------------------------
/docs/motivation.md:
--------------------------------------------------------------------------------
1 | # Motivation for Netcfgbu
2 | I hope to share some of my design goals and considerations for creating
3 | `netcfgbu`. We are all on this network automation journey, and this is a bit
4 | of story-time-around-the-campfire. Please enjoy.
5 |
6 | ### Preface
7 | As the lead network automation software engineer responsible for building and
8 | maintaining tools and systems in support of a multi-vendor network, I need a
9 | reliable and simple to use tool to perform network configuration backups and
10 | store those configurations in a version control systems, in my case Github. I
11 | looked around the open-source ecosystem for existing tools and found
12 | [Oxidized](https://github.com/ytti/oxidized) as a more modern version of the
13 | legacy [Rancid](https://shrubbery.net/rancid/) system. I decided against
14 | spending time with Oxidized primarily because it was not written in Python. My
15 | experience with open-source projects is at some point I will need to dig
16 | through the code and either fix issues that I experience in my network or add
17 | features that I need. I have a very long career developing commercial and
18 | open-source products in many languages, including Perl (Rancid) and Ruby
19 | (Oxidized). That said, as a member of the network automation community in 2020
20 | I find myself working with Python as the de-facto programming language promoted
21 | and presented as "The Next Thing".
22 |
23 | ### Guiding Principles
24 | As a member of the network automation community every tool or system that I
25 | build is done with the following two principles in mind. First and foremost is
26 | to make decisions that will increase reliability so that Users will build trust
27 | and confidence is using the tool. Second is to make decisions that reduce
28 | operator friction in installing, configuring, and using the tool. Ideally a
29 | User should be able to install and start using the tool very quickly so they
30 | get that first "Ah ha!" experience with little to no effort.
31 |
32 | ### Influences
33 | There are many tools that I've used over the years that have influenced the design
34 | and implementation decisions put into `netcfgbu`. There are many experiences
35 | that I have had working with open-source projects that have been influences.
36 |
37 | #### Limit Dependencies
38 | I wanted to build `netcfgbu` with the minimum number of dependencies in terms
39 | of 3rd-party packages. There are many network device driver libraries out
40 | there, such as [Netmiko](https://github.com/ktbyers/netmiko),
41 | [NAPALM](https://github.com/napalm-automation/napalm), and
42 | [PyATS](https://github.com/CiscoTestAutomation/pyats) to name a few. These
43 | libraries are great in their own regards and can be used to build very
44 | sophisticated tools. That said, the `netcfgbu` tool does not need these
45 | libraries to perform the very simple function that it is defined to do. While
46 | I could have used an amalgamation of these libraries to quickly create
47 | `netcfgbu` it would mean that I would be "dragging along" a lot of dependencies
48 | that I did not need. My experience with open-source projects is that at
49 | some point you end up in a "dependency Hellscape" sorting out conflicting
50 | package requirements. As a means to reduce complexity and increase reliability
51 | the `netcfgbu` is built upon a single SSH library, [asyncssh](https://github.com/ronf/asyncssh).
52 |
53 | #### Simplifying Constraints
54 | A simplifying constraint is a "rule" that allows you to make an implementation
55 | decision that results in less complex code. For example, the `netcfgbu` tool
56 | requires that any credential you use **MUST** allow the necessary `get_config`
57 | and `pre_get_config` commands without having to execute any privledge enable
58 | commands. This simplifying constraint results in the fact that the `netcfgbu`
59 | tool does not need to account for running any privilege mode commands or
60 | dealing with the complexity associated with those mechanisms.
61 |
62 | I would further go so far as to submit that as a network automation best-practice
63 | you should create a specific login account to perform the config backup service, for example
64 | "backup-svcadmin", whose command privileges were limited to only those
65 | required by the `get_config` and `pre_get_config` commands to ensure that this
66 | account could do nothing more than perform network backups.
67 |
68 | #### Decouple the What from the How
69 | A tool like `netcfgbu` needs to process a list of devices. This inventory
70 | needs to originate from somewhere whether it is a CSV file or a source of truth
71 | system like Netbox. In any case a User's inventory system(s) is going to
72 | maintain specific fields for each device record, for example `hostname` or
73 | `name` or `os_family` or `platform` or `management_ipaddr`. A User could
74 | potentially have multiple "sources of truths" spreadsheets that need to all be
75 | considered for use. Regardless of the origin format and field names, the
76 | `netcfgbu` tool operates using a simple CSV file system. As a User of
77 | `netcfgbu` you are required to create an inventory CSV file based on your
78 | inventory sources and adhere to a few basic field-name requirements, as
79 | described in the [Inventory](inventory.md) docs. You could create this file by
80 | hand, or you could build it dynamically from a SoT such as Netbox. As a result
81 | of this decision, the `netcfgbu` provides the subcommand `inventory build` so
82 | that you can run a script that will generate your inventory CSV file on demand
83 | as needed. This approach was loosely inspired by the concept of the Ansible
84 | dynamic-inventory script mechanisms.
85 |
86 | Another very important decoupling decision was made with regard to notion of
87 | "supported network devices." I wanted to build a tool that did not have a list
88 | of "supported devices". All devices that adhere to `netcfgbu` design
89 | principles are supported; and you will not need to wait for your device to be
90 | supported. The way this decision was approached was through the use of the
91 | inventory field `os_name`. `os_name` is used to uniquely identify the device
92 | operating system name (and potentially version). The value that you use in the
93 | field *is completely up to you*. There is no hard-coded mapping of supported
94 | OS names because `netcfgbu` is not built following a supported OS model design.
95 | For example you can use the value "nxos" or "nexus" or "nx-os" as your
96 | `os_name` for Cisco NX-OS based devices **so long as** you create the necessary
97 | configuration using that same name.
98 |
99 | By way of example from the [netcfgbu.toml](../netcfgbu.toml) sample, the
100 | following section says that any inventory item with the `os_name` field equal
101 | to "nxos" will use the command `show running-config | no-more` as the command
102 | to obtain the running configuration file.
103 |
104 | ```toml
105 | [os_name.nxos]
106 | get_config = 'show running-config | no-more'
107 | ```
108 |
109 | But if instead my inventory file used the `os_name` equal to "nexus", then my
110 | configuration file would be:
111 |
112 | ```toml
113 | [os_name.nexus]
114 | get_config = 'show running-config | no-more'
115 | ```
116 |
117 | #### Filtering Choice and Control
118 | One of the features that I really like with Ansible is the `--limit` option
119 | that allows you to target specific devices in your inventory so that you have
120 | fine-grain control over which devices you want to apply the command. Using
121 | that idea the `netcfgbu` provides both `--limit` and `--exclude` options, as
122 | described in the [Filtering](usage-filtering.md) docs. These options apply to
123 | any field in your inventory CSV, including any that you define for your own purposes.
124 | For example, the example [netbox_inventory.py](../netbox/netbox_inventory.py) script
125 | will create an inventory CSV file with fields "site", "role", and "region". As a
126 | result you can then use those fields in your `--limit` and `--exclude` options,
127 | for example:
128 |
129 | ```shell script
130 | $ netcfgbu login --limit site=dc1 --exclude role=leaf
131 | ```
132 |
133 | The `netcfgbu` also supports the @ construct again borrowed from
134 | Ansible so that you can filter based on host names present in the file. The
135 | example of retry-last-failed devices would look like this:
136 |
137 | ```shell script
138 | $ netcfgbu login --limit @failures.csv
139 | ```
140 |
141 | #### Troubleshooting
142 | I wanted `netcfgbu` to assist in the troubleshooting processes to determine
143 | that a device SSH port was open (probe) and that the configured credentials
144 | (login) would work. These two subcommands `probe` and `login` are provided for
145 | such a purpose so that you can determine if there would be any issues prior to
146 | executing the `backup` subcommand. I found that some of my older network
147 | infrastructure was running code that uses legacy SSH config-options, and I needed
148 | to make adjustments to the [SSH config-options](config-ssh-options.md) to account
149 | for them according. As such the `netcfgbu` includes a `--debug-ssh=[1-3]` option
150 | that will provide very detailed information about the SSH exchange so you can
151 | determine the root cause of any SSH login failure.
152 |
153 | #### Credentials
154 | Once of the more esoteric issues with network login is having to deal with the
155 | potential of having to try different credentials for login. You may find
156 | yourself at times with devices that, for some reason or another, cannot access
157 | your AAA server and you need to use the "break-glass" locally configured
158 | credential. Or you may have specific types of devices, for example firewalls,
159 | that use a different set of credentials for access. The `netcfgbu` tool was
160 | designed so that you can configure many different credentials that will be
161 | attempted in a specific order; see [Credentials](config-credentials.md) for
162 | further details.
163 |
164 | #### Speed
165 | Execution speed was a factor in the design decision as applied to the goal
166 | of reducing User friction. There are automation use-cases that follow
167 | a general pattern:
168 |
169 | * step-1: take snapshot of network configs before making changes to network
170 | * step-2: make network config changes
171 | * step-3: validate network is working as expected
172 | * step-4: take snapshot of network configs
173 |
174 | And if step-3 should fail we can revert to the configs in step-1.
175 |
176 | To design goal for speed to to reduce the amount of time spent in step-1 so
177 | that we can get to the actual work of steps 2 and 3. Consider the difference
178 | in User experience it would mean if step-1 took 60 minutes vs. 60 seconds.
179 |
180 | With that design goal in mind I chose to take advantage of the modern Python
181 | 3.8 asyncio features rather than using a traditional threading approach. In my
182 | many years dealing with multi-threaded applications, my net-net from all of it
183 | is to avoid when possible :-) Python 3.8 asyncio feature maturity coupled with
184 | the maturity of the asyncssh package allowed me to implement `netcfgbu` to
185 | maximize speed, reduce user friction, and increase reliability by avoiding
186 | threading.
187 |
188 | ## Fin
189 | I do hope this document sheds some light on my motivations for creating
190 | `netcfgbu`. My purpose in building this tool is in no way to diminish the work
191 | of tools such as Rancid and Oxidized. If you are using those tools and they
192 | work for you that is great! We all learn and grow by [standing on the
193 | shoulders of
194 | giants](https://en.wikipedia.org/wiki/Standing_on_the_shoulders_of_giants). If
195 | you are looking for a network backup configuration tool please give `netcfgbu`
196 | a try. I'm here to help if you get stuck or need any assistance.
197 |
198 | Cheers,
199 | -- Jeremy Schulman
200 | Twitter: @nwkautomaniac
201 | Slack: @nwkautomaniac on networktocode.slack.com
202 |
--------------------------------------------------------------------------------
/docs/plugins.md:
--------------------------------------------------------------------------------
1 | # NETCFGBU Plugins
2 |
3 | This page shows how to hook into key `netcfgbu` lifecycle events to enable users to write arbitrary Python code to be executed when different `netcfgbu` commands are executed.
4 |
5 | An example of this in use could be to notify a slack channel of the status of device backups. This could also be used to kick off an Ansible workflow to validate the backed up configurations using [Batfish](https://github.com/batfish/batfish).
6 |
7 | ## Lifecycle Hooks
8 | The following table describes the possible lifecycle hooks:
9 |
10 | | Name | Method | Description | Arguments |
11 | | --- | --- | --- | --- |
12 | | Backup Success | `backup_success` | `backup_success` is executed for every device that has successfully backed up. | `record`, `result` |
13 | | Backup Failed | `backup_failed` | `backup_failed` is executed for every device that has failed to backup. | `record`, `result` |
14 | | Report | `report` | `report` is executed at the end of `netcfgbu backup` command. | `report` |
15 | | Git Report | `git_report` | `git_report` is executed at the end of `netcfgbu vcs save`. | `success`, `tag_name` |
16 |
17 |
18 | ## Implementing Plugins
19 | Firstly to use `netcfgbu` a `plugins` directory needs to be identified within the `netcfgbu` configuration file or by using the environment variables. Please see [environment_variables](environment_variables.md) and [configuration-file](configuration-file.md) for the specifics.
20 |
21 | Within the `plugins` directory Python files can be created which subclass the `netcfgbu` Plugin class like so...
22 |
23 | ```python
24 | from netcfgbu import Plugin
25 |
26 | class ScienceLogic(Plugin):
27 | name = "ScienceLogic"
28 |
29 | def backup_success(rec: dict, res: bool):
30 | print("Backup Successful")
31 |
32 | def backup_failed(rec: dict, exc: str):
33 | print("Backup Failed")
34 |
35 | def report(report):
36 | print("Backup Report")
37 | ```
38 |
39 | Any number of Python files and classes can be created and they will all be executed within `netcfbu`. Please see the [table](#lifecycle-hooks) for the number of hooks that are available.
40 |
41 | ## Example Output
42 | The following is an example of the above plugin in action.
43 |
44 | ```bash
45 | (venv) $ netcfgbu backup -C netcfgbu.toml
46 | Backup Successful
47 | # ------------------------------------------------------------------------------
48 | Summary: TOTAL=1, OK=1, FAIL=0
49 | START=2021-Feb-20 01:29:47 AM, STOP=2021-Feb-20 01:29:55 AM
50 | DURATION=7.829s
51 | # ------------------------------------------------------------------------------
52 | Backup Report
53 | ```
54 |
55 |
--------------------------------------------------------------------------------
/docs/troubleshooting.md:
--------------------------------------------------------------------------------
1 | # Troubleshooting
2 |
3 | #### Too many open files (EMFILE)
4 |
5 | If you see an error that includes the word `EMFILE` it means that netcfgbu
6 | is attempting to open more files than your system is currently allowed. If you
7 | are on a Unix or MacOS based system, you can observe the maximum allowed open
8 | files using the `ulimit` command, as shown:
9 |
10 | ```shell script
11 | $ ulimit -a
12 | -t: cpu time (seconds) unlimited
13 | -f: file size (blocks) unlimited
14 | -d: data seg size (kbytes) unlimited
15 | -s: stack size (kbytes) 8192
16 | -c: core file size (blocks) 0
17 | -v: address space (kbytes) unlimited
18 | -l: locked-in-memory size (kbytes) unlimited
19 | -u: processes 1418
20 | -n: file descriptors 256
21 | ```
22 |
23 | Change the `file descriptors` value to a larger value, for example 4096:
24 |
25 | ```shell script
26 | $ ulimit -n 4096
27 | ```
28 |
29 | #### Unable to SSH due to mismatch SSH-configs
30 |
31 | You may encouter a failure to SSH login/backup a device due to the
32 | fact that the device requires the use of legacy SSH config settings, and modern
33 | SSH implementations are starting to remove weaker algorithms be default. This
34 | is a typical problem if you are running some older network operating systems that
35 | used an older version of SSH libraries in their products.
36 |
37 | To troubleshot these issues used the CLI option `--debug-ssh=2` when running
38 | the login subcommmand. You will observe the following logging information:
39 |
40 | ```shell script
41 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Received key exchange request
42 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Key exchange algs: curve25519-sha256@libssh.org,ecdh-sha2-nistp521,ecdh-sha2-nistp256,ecdh-sha2-nistp384,diffie-hellman-group14-sha1
43 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Host key algs: rsa-sha2-512,rsa-sha2-256,ssh-rsa,ecdsa-sha2-nistp521,ssh-ed25519
44 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Client to server:
45 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Encryption algs: aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
46 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] MAC algs: hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha1-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-sha1
47 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Compression algs: none,zlib@openssh.com
48 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Server to client:
49 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Encryption algs: aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr
50 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] MAC algs: hmac-sha2-512-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha1-etm@openssh.com,hmac-sha2-512,hmac-sha2-256,hmac-sha1
51 | 2020-06-21 14:11:00,954 DEBUG: [conn=0] Compression algs: none,zlib@openssh.com
52 | ```
53 |
54 | Using the information provded update your configuration file to include the required
55 | exchange settings, either in the global `[ssh_configs]` section or in the `[os_name.$name.ssh_configs] section.
56 | There is an example of such a configuration is the sample [netcfgbu.toml](../netcfgbu.toml).
57 |
58 | For addition information refer to the [SSH-config options](config-ssh-options.md) documentation page.
--------------------------------------------------------------------------------
/docs/usage-filtering.md:
--------------------------------------------------------------------------------
1 | # Filtering Inventory
2 |
3 | This page describes how you can filter the inventory file so that only the
4 | desired items are used in the execution of a command. There are two methods
5 | for filtering records: by field-name(s) and by file-contents.
6 |
7 | Use the `--limit` option to include only those items that match on the filter criteria.
8 | Use the `--exclude` option to exclude inventory items based on the filter criteria.
9 |
10 | You can provide multiple filter options on the command-line to mix and match how you
11 | want to filter the inventory.
12 |
13 | ## Filter by Field Names
14 |
15 | You can filter using the inventory field names: `host` and `os_name`. The
16 | filter values are [regular expressions](https://regex101.com/).
17 |
18 | Example: Select a single host called myswitch1
19 |
20 | ```shell script
21 | $ netcfgbu backup --limit host=myswitch1
22 | ```
23 |
24 | Example: Select all hosts that use "iosxe" or "nxos" as the network os_name:
25 | ```shell script
26 | $ netcfgbu backup --limit 'os_name=iosxe|nxos'
27 | ```
28 |
29 | Example: Select all hosts that do _not_ "iosxe" or "nxos" as the network os_name:
30 | ```shell script
31 | $ netcfgbu backup --exclude 'os_name=iosxe|nxos'
32 | ```
33 |
34 | Example: Select all hosts that use "iosxe" or "nxos" **and** have a name suffix of "mycorp.com"
35 | ```shell script
36 | $ netcfgbu backup --limit 'os_name=iosxe|nxos' --limit 'host=.*mycorp.com'
37 | ```
38 |
39 | Example: Select a host with a specific IP addresses:
40 | ```shell script
41 | $ netcfgbu backup --limit "ipaddr=10.0.20.10"
42 | ```
43 |
44 | Example: Select all hosts with IP addresses in a given prefix:
45 | ```shell script
46 | $ netcfgbu backup --limit "ipaddr=2620:10:abcd::/64"
47 | ```
48 |
49 | Example: Select all hosts with IP addresses that match a regex:
50 | ```shell script
51 | $ netcfgbu backup --limit "ipaddr=10.(10|30).5.\d+"
52 | ```
53 |
54 |
55 |
56 |
57 | ## Filter by CSV File Contents
58 | If the filter expression begins with an at-symbol (@), then the contents of the
59 | file are used to filter the inventory. Any line that begins with a hash (#)
60 | will be ignored. The CSV file must contain the `host` column-field.
61 |
62 | Example:
63 | ```shell script
64 | $ netcfgbu backup --exclude @failures.csv
65 | ```
66 |
--------------------------------------------------------------------------------
/docs/usage-vcs.md:
--------------------------------------------------------------------------------
1 | # Using VCS Git Subcommands
2 |
3 | When you are using a Git Version Control System to store your configuration files you can use the
4 | `netcfgbu vcs` subcommands. Before using these commands ensure you have
5 | setup your configuration file as described [here](config-vcs-git.md).
6 |
7 | Each of the vcs subcommands support the `--name` option if your configuration
8 | file contains more than one `[[git]]` section.
9 |
10 | ## Preparing Your Configs Directory
11 |
12 | As a one-time initial step you will need to run the `prepare` subcommand so that the
13 | directory used for config backups (`configs_dir`) is initialized for use with your chosen platform.
14 |
15 | This command will run the necessary command to initialize the directory for
16 | git usage and fetch the current git repository files.
17 |
18 | ```shell script
19 | $ netcfgbu vcs prepare
20 | ```
21 |
22 | If you have more than one `[[git]]` configuraiton section defined, you can
23 | use the `--name` option.
24 |
25 | For example, if you have a configuraiton with `name = "firewalls"` defined you
26 | would run:
27 |
28 | ```shell script
29 | $ netcfgbu vcs prepare --name firewalls
30 | ```
31 |
32 | ## Saving to Git
33 |
34 | Once you have completed the backup process and you want to store your changes
35 | into the git repository you run the `save` command. By default `netcfgbu`
36 | will create a git tag (release) based on the current timestamp in the format
37 | `_`. For example, if
38 | you run the `save` command on June 12, 2020 at 1:35p the tag release name would
39 | be `20200612_133500`. If want to explicitly set the tag-release name use the
40 | `--tag-name` option.
41 |
42 | ---
43 |
44 | :warning: If there are no actual changes to the files in `configs_dir`
45 | then the `save` command will not make any updates to git.
46 |
47 | ---
48 |
49 |
50 | ### Examples:
51 |
52 | Save using the first `[[git]]` configuration and the default tag-name
53 |
54 | ```shell script
55 | $ netcfgbu vcs save
56 | ```
57 |
58 | Save the configs using the tag-name "pre-change-ticket12345"
59 |
60 | ```shell script
61 | $ netcfgbu vcs save --tag-name pre-change-ticket12345
62 | ```
63 |
64 | Save using the git configuraiton named "firewalls"
65 |
66 | ```shell script
67 | $ netcfgbu vcs save --name firewalls
68 | ```
69 |
70 | ## Checking the Status of Changes before You Save
71 |
72 | If after running your backup process you want to see the status of changes that
73 | would be made to your git platform you can run the `status` command. The output of this
74 | command is the same as if you ran `git status` in the `configs_dir`.
75 |
76 | Example when no changes / differences in `configs_dir`:
77 | ```shell script
78 | $ netcfgbu vcs status
79 | 2020-06-12 11:32:22,722 INFO:
80 | VCS diffs git: https://github.mycorp.com/jschulman/test-network-configs.git
81 | dir: /home/jschulman/Projects/NetworkBackup/configs
82 |
83 | On branch master
84 | nothing to commit, working tree clean
85 | ```
86 |
87 | Example when changes in `configs_dir`
88 | ```shell script
89 | $ netcfgbu vcs status
90 | 2020-06-12 11:34:27,786 INFO:
91 | VCS diffs git: https://github.mycorp.com/jschulman/test-network-configs.git
92 | dir: /home/jschulman/Projects/NetworkBackup/configs
93 |
94 | On branch master
95 | Changes not staged for commit:
96 | (use "git add ..." to update what will be committed)
97 | (use "git restore ..." to discard changes in working directory)
98 | modified: switch01.cfg
99 | modified: switch02.cfg
100 |
101 | no changes added to commit (use "git add" and/or "git commit -a")
102 | ```
--------------------------------------------------------------------------------
/docs/version.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/inventory.csv:
--------------------------------------------------------------------------------
1 | #
2 | # NOTES:
3 | # - the first row MUST be the header for the csv file. Do not add a # in front of it, that will cause it to be ignored
4 | # - username and password are optional. you can specify them in the TOML configuration file if it is the same for all devices of a specific OS type
5 | #
6 | ipaddr,host,os_name,username,password
7 | # 1.1.1.1,test,ios,cisco,cisco
8 |
--------------------------------------------------------------------------------
/netbox/README.md:
--------------------------------------------------------------------------------
1 | # Netbox Integration
2 |
3 | As a User of Netbox I want to use the Devices as the inventory for my backup process. I want to dynamically
4 | create the `netcfgbu` inventory.csv file based on Netbox inventory filters such as site, region, or tags. I
5 | only want to include devices that are in the "Active" status and have a Primary IP address assigned.
6 |
7 | You can find the example Netbox script [netbox_inventory.py](netbox_inventory.py):
8 |
9 | ```shell script
10 | usage: netbox_inventory.py [-h] [--site SITE] [--region REGION] [--role ROLE] [--exclude-role EXCLUDE_ROLE]
11 | [--exclude-tag EXCLUDE_TAG] [--output OUTPUT]
12 |
13 | optional arguments:
14 | -h, --help show this help message and exit
15 | --site SITE limit devices to site
16 | --region REGION limit devices to region
17 | --role ROLE limit devices with role
18 | --exclude-role EXCLUDE_ROLE
19 | exclude devices with role
20 | --exclude-tag EXCLUDE_TAG
21 | exclude devices with tag
22 | --output OUTPUT
23 | ```
24 |
25 | ## Setup
26 |
27 | #### Environment
28 | To use the `netbox_inventory.py` script you will need to export two environment variables:
29 |
30 | **NETBOX_ADDR**
31 | The URL to your netbox server, for example: "https://netbox.mycorp.com"
32 |
33 | **NETBOX_TOKEN**
34 | The Netbox API token that has read access to the system.
35 |
36 | #### Configuration File
37 |
38 | Ensure your `netcfgbu.toml` file includes an `[inventory]` definition to execute the script to generate
39 | the inventory.csv file.
40 |
41 | The following example has the script located in /usr/local/bin, will exclude
42 | any device that has a tag "no-backup", and will save the contents to the file
43 | "inventory.csv"
44 |
45 | Example:
46 | ```toml
47 | [[inventory]]
48 | name = 'netbox'
49 | script = "/usr/local/bin/netbox_inventory.py --exclude-tag no-backup --output inventory.csv"
50 | ```
51 |
52 | ## Execution
53 |
54 | To build the inventory run the following command:
55 |
56 | ```shell script
57 | $ netcfgbu inventory build --name netbox
58 | ```
59 |
60 | As output you will see similar:
61 | ```shell script
62 | 2020-06-09 20:03:35,412 INFO: Executing script: [/usr/local/bin/netbox_inventory.py --exclude-tag no-backup --output inventory.csv]
63 | ```
64 |
65 | When the build completes you can get a summary of the inventory:
66 |
67 | ```shell script
68 | $ netcfgbu inventory ls --brief
69 | ```
70 |
71 | ## Limitations
72 |
73 | This netbox_inventory.py script is currently written to work with Netbox 2.6,
74 | but a near-term future release will include support for later releases; as
75 | Netbox 2.7 API changed.
--------------------------------------------------------------------------------
/netbox/netbox_inventory.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3.8
2 | #
3 | # This script is used to retrieve the device inventory from a Netbox system and
4 | # emil the CSV file to either stdout (default) or a filename provided
5 | #
6 | # The following Environment variables are REQUIRED:
7 | #
8 | # NETBOX_ADDR: the URL to the NetBox server
9 | # "https://my-netbox-server"
10 | #
11 | # NETBOX_TOKEN: the NetBox login token
12 | # "e0759aa0d6b4146-from-netbox-f744c4489adfec48f"
13 | #
14 | # The following Environment variables are OPTIONAL:
15 | #
16 | # NETBOX_INVENTORY_OPTIONS
17 | # Same as the options provided by "--help"
18 | #
19 |
20 | import sys
21 | import argparse
22 | import os
23 | import csv
24 | from functools import lru_cache
25 |
26 | import requests # noqa
27 | from urllib3 import disable_warnings # noqa
28 |
29 |
30 | CSV_FIELD_NAMES = ["host", "ipaddr", "os_name", "role", "site", "region"]
31 |
32 |
33 | def rec_to_csv(rec):
34 | hostname = rec["name"]
35 | ipaddr = rec["primary_ip"]["address"].split("/")[0]
36 | platform = rec["platform"]
37 | os_name = platform["slug"] if platform else "N/A"
38 | role = rec["device_role"]["slug"]
39 | site = rec["site"]["slug"]
40 | region = get_site(site)["region"]["slug"]
41 |
42 | return [hostname, ipaddr, os_name, role, site, region]
43 |
44 |
45 | def cli():
46 | """ Create CLI option parser, parse User inputs and return results """
47 | options_parser = argparse.ArgumentParser()
48 | options_parser.add_argument("--site", action="store", help="limit devices to site")
49 | options_parser.add_argument(
50 | "--region", action="store", help="limit devices to region"
51 | )
52 | options_parser.add_argument(
53 | "--role", action="append", help="limit devices with role(s)"
54 | )
55 | options_parser.add_argument(
56 | "--exclude-role", action="append", help="exclude devices with role(s)"
57 | )
58 | options_parser.add_argument(
59 | "--exclude-tag", action="append", help="exclude devices with tag(s)"
60 | )
61 | options_parser.add_argument(
62 | "--output",
63 | type=argparse.FileType("w+"),
64 | default=sys.stdout,
65 | help="save inventory to filename",
66 | )
67 |
68 | nb_env_opts = os.environ.get("NETBOX_INVENTORY_OPTIONS")
69 | opt_arg = nb_env_opts.split(";") if nb_env_opts else None
70 | return options_parser.parse_args(opt_arg)
71 |
72 |
73 | class NetBoxSession(requests.Session):
74 | def __init__(self, url, token):
75 | super(NetBoxSession, self).__init__()
76 | self.url = url
77 | self.headers["authorization"] = "Token %s" % token
78 | self.verify = False
79 |
80 | def prepare_request(self, request):
81 | request.url = self.url + request.url
82 | return super(NetBoxSession, self).prepare_request(request)
83 |
84 |
85 | netbox: NetBoxSession = None
86 |
87 |
88 | @lru_cache()
89 | def get_site(site_slug):
90 | res = netbox.get("/api/dcim/sites/", params={"slug": site_slug})
91 | res.raise_for_status()
92 | return res.json()["results"][0]
93 |
94 |
95 | def create_csv_file(inventory_records, cli_opts):
96 | csv_wr = csv.writer(cli_opts.output)
97 | csv_wr.writerow(CSV_FIELD_NAMES)
98 |
99 | for rec in inventory_records:
100 | csv_wr.writerow(rec_to_csv(rec))
101 |
102 |
103 | def fetch_inventory(cli_opts):
104 | global netbox
105 |
106 | try:
107 | nb_url = os.environ["NETBOX_ADDR"]
108 | nb_token = os.environ["NETBOX_TOKEN"]
109 | except KeyError as exc:
110 | sys.exit(f"ERROR: missing envirnoment variable: {exc.args[0]}")
111 |
112 | netbox = NetBoxSession(url=nb_url, token=nb_token)
113 |
114 | # -------------------------------------------------------------------------
115 | # perform a GET on the API URL to obtain the Netbox version; the value is
116 | # stored in the response header. convert to tuple(int) for comparison
117 | # purposes. If the Netbox version is after 2.6 the API status/choice
118 | # changed from int -> str.
119 | # -------------------------------------------------------------------------
120 |
121 | res = netbox.get("/api")
122 | api_ver = tuple(map(int, res.headers["API-Version"].split(".")))
123 | params = dict(limit=0, status=1, has_primary_ip="true")
124 | params["exclude"] = "config_context"
125 |
126 | if api_ver > (2, 6):
127 | params["status"] = "active"
128 |
129 | if cli_opts.site:
130 | params["site"] = cli_opts.site
131 |
132 | if cli_opts.region:
133 | params["region"] = cli_opts.region
134 |
135 | res = netbox.get("/api/dcim/devices/", params=params)
136 | if not res.ok:
137 | sys.exit("FAIL: get inventory: " + res.text)
138 |
139 | body = res.json()
140 | device_list = body["results"]
141 |
142 | # -------------------------------------------------------------------------
143 | # User Filters
144 | # -------------------------------------------------------------------------
145 |
146 | # If Caller provided an explicit list of device-roles, then filter the
147 | # device list based on those roles before creating the inventory
148 |
149 | filter_functions = []
150 |
151 | if cli_opts.role:
152 |
153 | def filter_role(dev_dict):
154 | return dev_dict["device_role"]["slug"] in cli_opts.role
155 |
156 | filter_functions.append(filter_role)
157 |
158 | if cli_opts.exclude_role:
159 |
160 | def filter_ex_role(dev_dict):
161 | return dev_dict["device_role"]["slug"] not in cli_opts.exclude_role
162 |
163 | filter_functions.append(filter_ex_role)
164 |
165 | if cli_opts.exclude_tag:
166 | ex_tag_set = set(cli_opts.exclude_tag)
167 |
168 | def filter_ex_tag(dev_dict):
169 | return not set(dev_dict["tags"]) & ex_tag_set
170 |
171 | filter_functions.append(filter_ex_tag)
172 |
173 | def apply_filters():
174 | for dev_dict in device_list:
175 | if all(fn(dev_dict) for fn in filter_functions):
176 | yield dev_dict
177 |
178 | return apply_filters() if filter_functions else iter(device_list)
179 |
180 |
181 | def build_inventory():
182 | cli_opts = cli()
183 | inventory = fetch_inventory(cli_opts)
184 | create_csv_file(inventory, cli_opts)
185 |
186 |
187 | if __name__ == "__main__":
188 | disable_warnings()
189 | build_inventory()
190 |
--------------------------------------------------------------------------------
/netbox/requirements.txt:
--------------------------------------------------------------------------------
1 | requests
--------------------------------------------------------------------------------
/netcfgbu.toml:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------------
2 | #
3 | # Network Configuration Backup
4 | # Configuration File
5 | # -----------------------------------------------------------------------------
6 |
7 | # -----------------------------------------------------------------------------
8 | # Default Settings
9 | # -----------------------------------------------------------------------------
10 |
11 | [defaults]
12 | inventory = "$PROJ_DIR/inventory.csv"
13 | configs_dir = "$PROJ_DIR/configs"
14 | credentials.username = "$NETWORK_USERNAME"
15 | credentials.password = "$NETWORK_PASSWORD"
16 |
17 | # -----------------------------------------------------------------------------
18 | #
19 | # Jumphosts
20 | #
21 | # -----------------------------------------------------------------------------
22 |
23 | #[[jumphost]]
24 | # NOTE: your local SSH config file is not used, so make sure you provide
25 | # either the IP address of FQDN of the jumphost
26 |
27 | # proxy = "jschulman@my-dc1.com"
28 | # include = ['host=.*\.dc1']
29 | # you MUST provide an include filter. For all devices, use
30 | # ['host=.*']
31 | # exclude = ['os_name=asa']
32 |
33 | # -----------------------------------------------------------------------------
34 | #
35 | # Version Control System(s)
36 | #
37 | # Currently only git systems are supported, but others types may be
38 | # supported in the future. If you have more than one vcs repository you can
39 | # include the `name` field so that you can identify the vcs-repo using the CLI
40 | # --name option.
41 | # -----------------------------------------------------------------------------
42 |
43 | [[git]]
44 | # the first entry does not require a name and it will be treated
45 | # as a default; i.e. when the --name option is omitted.
46 | repo = "https://github.mycorp.com/jschulman/test-network-configs.git"
47 | token = "$GIT_TOKEN"
48 |
49 | [[git]]
50 | # example of using a deployment key that does not use a passphrase
51 | name = "ssh"
52 | repo = "git@gitlab.com:jschulman/test-network-configs.git"
53 | deploy_key = "$HOME/test-config-backups"
54 |
55 | [[git]]
56 | # example of using a deployment key that uses a passphrase
57 | name = "ssh-pw"
58 | repo = "git@github.mlbam.net:jschulman/test-network-configs.git"
59 | deploy_key = "$HOME/pwtest-backups"
60 | deploy_passphrase = "$GITKEY_PASSWORD"
61 |
62 | # -----------------------------------------------------------------------------
63 | # Inventory Integrations
64 | # -----------------------------------------------------------------------------
65 |
66 | [[inventory]]
67 | name = 'netbox'
68 | script = '''$PROJ_DIR/netcfgbu/netbox/netbox_inventory.py \
69 | --exclude-tag no-backup --exclude-tag no-ssh \
70 | --output inventory.csv'''
71 |
72 | # -----------------------------------------------------------------------------
73 | # Global Credentials
74 | # -----------------------------------------------------------------------------
75 |
76 | #[[credentials]]
77 | # username = "superadmin"
78 | # password = "$ENABLE_PASSWORD"
79 |
80 | # -----------------------------------------------------------------------------
81 | # Global SSH Configs
82 | # -----------------------------------------------------------------------------
83 |
84 | [ssh_configs]
85 | kex_algs = [
86 | 'ecdh-sha2-nistp256',
87 | 'diffie-hellman-group14-sha1',
88 | 'diffie-hellman-group1-sha1'
89 | ]
90 |
91 | # -----------------------------------------------------------------------------
92 | #
93 | # Network OS Specifications
94 | #
95 | # -----------------------------------------------------------------------------
96 |
97 | # -----------------------------------------------------------------------------
98 | # Cisco switchs, routers, and firewalls
99 | # -----------------------------------------------------------------------------
100 |
101 | [os_name.ios]
102 | pre_get_config = "terminal length 0"
103 | linter = "ios"
104 |
105 | [os_name.iosxe]
106 | linter = 'ios'
107 |
108 | [os_name.nxos]
109 | get_config = 'show running-config | no-more'
110 | linter = "nxos"
111 |
112 | [os_name.iosxr]
113 | linter = "iosxr"
114 |
115 | [os_name.asa]
116 | pre_get_config = 'terminal pager 0'
117 |
118 | # -----------------------------------------------------------------------------
119 | # Cisco WLC
120 | # -----------------------------------------------------------------------------
121 |
122 | [os_name.aireos]
123 | # support for 8.5 release requires a special connector to handle the User &
124 | # Password prompts
125 |
126 | get_config = "show run-config commands"
127 | pre_get_config = "config paging disable"
128 | connection = "netcfgbu.connectors.ssh.LoginPromptUserPass"
129 |
130 | [os_name.aireos8_10]
131 | # extending the timeout to 3min due to observations with the 8.10 release
132 | # resulting in sporatic delays in the output of the config.
133 |
134 | timeout = 300
135 |
136 | get_config = "show run-config commands"
137 | pre_get_config = "config paging disable"
138 |
139 | # need to explicitly set the Key Exchange algorithms to support the 8.10
140 | # SSH configured requirements; can be set here or in your ssh_config file.
141 |
142 | # ssh_configs.kex_algs = [
143 | # 'ecdh-sha2-nistp256',
144 | # 'diffie-hellman-group14-sha1'
145 | # ]
146 |
147 | # NOTE: if you do not have these devices in your inventory, delete this section
148 | # otherwise you will get an error if the environment variables are not defined
149 | [[os_name.aireos8_10.credentials]]
150 | username = "$WLC_USERNAME"
151 | password = "$WLC_PASSWORD"
152 |
153 | # -----------------------------------------------------------------------------
154 | # Palo Alto PANOS Firewalls
155 | # -----------------------------------------------------------------------------
156 |
157 | [os_name.panos]
158 | pre_get_config = [
159 | "set cli pager off",
160 | "configure"
161 | ]
162 | get_config = "show"
163 | linter = "panos"
164 |
165 | # -----------------------------------------------------------------------------
166 | # Cumulus Linux
167 | # -----------------------------------------------------------------------------
168 |
169 | [os_name.cumulus]
170 | # NOTE: make sure that the user has password-less sudo access, otherwise the
171 | # get_config execution will fail. There is no current workaround for this
172 | # requirement. Also pre_get_config does not work for Cumulus devices at this time.
173 | #
174 | # Do not change the order of the cat commands either. This ensures the final
175 | # file format is recognized by Batfish (https://github.com/batfish/batfish)
176 |
177 | get_config = "( cat /etc/hostname; cat /etc/network/interfaces; cat /etc/cumulus/ports.conf; sudo cat /etc/frr/frr.conf)"
178 |
179 | # example Cumulus prompt value: cumulus@leaf01:mgmt-vrf:~$
180 | prompt_pattern = '[a-z0-9.\-@:~]{10,65}\s*[#$]'
181 |
182 | # -----------------------------------------------------------------------------
183 | # Juniper JUNOS routers, switches and firewalls
184 | # -----------------------------------------------------------------------------
185 |
186 | [os_name.junos]
187 | # NOTE: Do not login as the root user. This will require you to enter CLI mode
188 | # prior to getting the configuration, which currently does not work for Juniper
189 | # devices.
190 |
191 | pre_get_config = [
192 | "set cli screen-length 0"
193 | ]
194 |
195 | get_config = "show configuration | display set"
196 | # you can return the configuration in hierarchical format by removing
197 | # `| display set` from the command above
198 |
199 | # -----------------------------------------------------------------------------
200 | # Linters
201 | # -----------------------------------------------------------------------------
202 |
203 | [linters.iosxr]
204 | config_starts_after = 'Building configuration'
205 |
206 | [linters.ios]
207 | config_starts_after = 'Current configuration'
208 |
209 | [linters.nxos]
210 | config_starts_after = '!Time:'
211 |
212 | [linters.panos]
213 | config_ends_at = "[edit]"
214 |
215 | # -----------------------------------------------------------------------------
216 | #
217 | # Version Control System(s)
218 | #
219 | # Currently only github systems are supported, but others types may be
220 | # supported in the future. If you have more than one vcs repository you can
221 | # include the `name` field so that you can identify the vcs-repo using the CLI
222 | # --name option.
223 | # -----------------------------------------------------------------------------
224 |
225 | [[vcs]]
226 | repo = "https://github.mycorp.com/jschulman/test-network-configs.git"
227 | token = "$GIT_TOKEN"
228 |
229 | # -----------------------------------------------------------------------------
230 | # Logging - follows Python format as described
231 | # https://docs.python.org/3/library/logging.config.html
232 | # -----------------------------------------------------------------------------
233 |
234 |
235 | [logging.loggers.netcfgbu]
236 | handlers = ["console", "file"]
237 | level = "INFO"
238 |
239 | [logging.loggers.asyncssh]
240 | # set the level to warning by default. If you want to enable debugging
241 | # use the '--debug-ssh' option to set the debug level from [1-3];
242 | # https://asyncssh.readthedocs.io/en/latest/api.html#asyncssh.set_debug_level
243 |
244 | handlers = ["console"]
245 | level = "WARNING"
246 |
247 | [logging.handlers.console]
248 | class = "logging.StreamHandler"
249 | formatter = "basic"
250 | stream = "ext://sys.stdout"
251 |
252 | [logging.handlers.file]
253 | class = "logging.FileHandler"
254 | formatter = "basic"
255 | filename = "netcfgbu.log"
256 |
257 | [logging.formatters.basic]
258 | format = "%(asctime)s %(levelname)s: %(message)s"
259 |
--------------------------------------------------------------------------------
/netcfgbu/__init__.py:
--------------------------------------------------------------------------------
1 | from netcfgbu.plugins import Plugin
2 |
3 | __all__ = ["Plugin"]
4 |
--------------------------------------------------------------------------------
/netcfgbu/aiofut.py:
--------------------------------------------------------------------------------
1 | from typing import AsyncIterable, Iterable, Coroutine, Optional
2 | import asyncio
3 | from asyncio import Task
4 |
5 | __all__ = ["as_completed"]
6 |
7 |
8 | async def as_completed(
9 | aws: Iterable[Coroutine], timeout: Optional[int] = None
10 | ) -> AsyncIterable[Task]:
11 | """
12 | This async generator is used to "mimic" the behavior of the
13 | concurrent.futures.as_completed functionality. Usage of this as_completed
14 | generator is slightly different from the builtin asyncio version; see
15 | example below.
16 |
17 | The builtin asyncio.as_completed yields futures such that the originating
18 | coroutine can not be retrieved. In order to obtain the originating
19 | coroutine these must be wrapped in futures as explained in this Stack
20 | Overflow: https://bit.ly/2AsPtJE
21 |
22 | Parameters
23 | ----------
24 | aws:
25 | An interable of coroutines that will be wrapped into futures and
26 | executed through the asyncio on_completed builtin.
27 |
28 | timeout: int
29 | (same as asyncio.as_completed):
30 | If provided an asyncio.TimeoutError will be raised if all of the
31 | coroutines have not completed within the timeout value.
32 |
33 | Yields
34 | ------
35 | asyncio.Task
36 |
37 | Examples:
38 | ---------
39 |
40 | # create a dictionary of key=coroutine, value=dict, where the value will
41 | # be used later when the coroutine completes
42 |
43 | tasks = {
44 | probe(rec.get('ipaddr') or rec.get('host')): rec
45 | for rec in inventory
46 | }
47 |
48 | async for probe_task in as_completed(tasks):
49 | try:
50 | # obtain the originating coroutine so we can use it as an index
51 | # into the tasks dictionary and obtain the associated inventory
52 | # record
53 |
54 | task_coro = probe_task.get_coro()
55 | rec = tasks[task_coro]
56 |
57 | # now obtain the coroutine return value using the `result`
58 | # method.
59 |
60 | probe_ok = 'OK' if probe_task.result() else 'FAIL'
61 | report[probe_ok].append(rec)
62 |
63 | except OSError as exc:
64 | probe_ok = 'ERROR'
65 | report['ERROR'].append((rec, exc))
66 |
67 | print(f"{rec['host']}: {probe_ok}")
68 | """
69 | loop = asyncio.get_running_loop()
70 |
71 | # The main gist is to "wrap" the coroutine into
72 | # "[futureW[futureO[coroutine]]]" where the outer futureW is what is handed
73 | # into the builtin asyncio.as_completed. The inner futureO that wraps the
74 | # originating coroutine will call the futureW.set_result() when the
75 | # original futureO[coroutine] completes. The call to set_result triggers
76 | # the event causing the futureW to be done, which is what results in the
77 | # builtin on_completed to yield the results.
78 |
79 | def wrap_coro(coro):
80 | fut = asyncio.ensure_future(coro)
81 | wrapper = loop.create_future()
82 | fut.add_done_callback(wrapper.set_result)
83 | return wrapper
84 |
85 | for next_completed in asyncio.as_completed(
86 | [wrap_coro(coro) for coro in aws], timeout=timeout
87 | ):
88 | yield await next_completed
89 |
--------------------------------------------------------------------------------
/netcfgbu/cli/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jeremyschulman/netcfgbu/c2056f07aefa7c9e584fc9a34c9971100df7fa49/netcfgbu/cli/__init__.py
--------------------------------------------------------------------------------
/netcfgbu/cli/backup.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import click
4 |
5 | from netcfgbu.os_specs import make_host_connector
6 | from netcfgbu.logger import get_logger, stop_aiologging
7 | from netcfgbu.aiofut import as_completed
8 | from netcfgbu import jumphosts
9 | from netcfgbu.plugins import Plugin, load_plugins
10 |
11 | from .root import (
12 | cli,
13 | WithInventoryCommand,
14 | opt_config_file,
15 | opts_inventory,
16 | opt_batch,
17 | opt_debug_ssh,
18 | )
19 |
20 | from .report import Report
21 |
22 |
23 | def exec_backup(app_cfg, inventory_recs):
24 | backup_tasks = dict()
25 |
26 | log = get_logger()
27 |
28 | backup_tasks = {
29 | make_host_connector(rec, app_cfg).backup_config(): rec for rec in inventory_recs
30 | }
31 |
32 | total = len(backup_tasks)
33 | report = Report()
34 | done = 0
35 |
36 | async def process_batch():
37 | nonlocal done
38 |
39 | if app_cfg.jumphost:
40 | await jumphosts.connect_jumphosts()
41 |
42 | async for task in as_completed(backup_tasks):
43 | done += 1
44 | coro = task.get_coro()
45 | rec = backup_tasks[coro]
46 | msg = f"DONE ({done}/{total}): {rec['host']} "
47 |
48 | try:
49 | res = task.result()
50 | ok = res is True
51 | report.task_results[ok].append((rec, res))
52 | Plugin.run_backup_success(rec, res)
53 |
54 | except (asyncio.TimeoutError, OSError) as exc:
55 | ok = False
56 | report.task_results[False].append((rec, exc))
57 | Plugin.run_backup_failed(rec, exc)
58 |
59 | except Exception as exc:
60 | ok = False
61 | log.error(msg + f"FAILURE: {str(exc)}")
62 | report.task_results[False].append((rec, exc))
63 | Plugin.run_backup_failed(rec, exc)
64 |
65 | log.info(msg + ("PASS" if ok else "FALSE"))
66 |
67 | loop = asyncio.get_event_loop()
68 | report.start_timing()
69 | loop.run_until_complete(process_batch())
70 | report.stop_timing()
71 | stop_aiologging()
72 | report.print_report()
73 | Plugin.run_report(report)
74 |
75 |
76 | @cli.command(name="backup", cls=WithInventoryCommand)
77 | @opt_config_file
78 | @opts_inventory
79 | @opt_debug_ssh
80 | @opt_batch
81 | @click.pass_context
82 | def cli_backup(ctx, **_cli_opts):
83 | """
84 | Backup network configurations.
85 | """
86 | load_plugins(ctx.obj["app_cfg"].defaults.plugins_dir)
87 | exec_backup(app_cfg=ctx.obj["app_cfg"], inventory_recs=ctx.obj["inventory_recs"])
88 |
--------------------------------------------------------------------------------
/netcfgbu/cli/inventory.py:
--------------------------------------------------------------------------------
1 | from collections import Counter
2 | from operator import itemgetter
3 | from textwrap import indent
4 |
5 | import click
6 | from tabulate import tabulate
7 |
8 | from netcfgbu.config_model import AppConfig
9 | from netcfgbu.inventory import build
10 |
11 | from .root import (
12 | cli,
13 | get_spec_nameorfirst,
14 | WithInventoryCommand,
15 | WithConfigCommand,
16 | opt_config_file,
17 | opts_inventory,
18 | )
19 |
20 | from .report import LN_SEP, SPACES_4
21 |
22 |
23 | # -----------------------------------------------------------------------------
24 | # Inventory Commands
25 | # -----------------------------------------------------------------------------
26 |
27 |
28 | @cli.group(name="inventory")
29 | def cli_inventory():
30 | """
31 | Inventory subcommands.
32 | """
33 | pass # pragma: no cover
34 |
35 |
36 | @cli_inventory.command("list", cls=WithInventoryCommand)
37 | @opt_config_file
38 | @opts_inventory
39 | @click.option("--brief", "-b", is_flag=True)
40 | @click.pass_context
41 | def cli_inventory_list(ctx, **cli_opts):
42 | inventory_recs = ctx.obj["inventory_recs"]
43 | os_names = Counter(rec["os_name"] for rec in inventory_recs)
44 |
45 | os_name_table = indent(
46 | tabulate(
47 | headers=["os_name", "count"],
48 | tabular_data=sorted(os_names.items(), key=itemgetter(1), reverse=True),
49 | ),
50 | SPACES_4,
51 | )
52 |
53 | print(LN_SEP)
54 | print(
55 | f"""
56 | SUMMARY: TOTAL={len(inventory_recs)}
57 |
58 | {os_name_table}
59 | """
60 | )
61 |
62 | if cli_opts["brief"] is True:
63 | return # pragma: no cover
64 |
65 | field_names = inventory_recs[0].keys()
66 |
67 | print(
68 | tabulate(
69 | headers=field_names,
70 | tabular_data=[rec.values() for rec in inventory_recs],
71 | )
72 | )
73 |
74 |
75 | @cli_inventory.command("build", cls=WithConfigCommand)
76 | @opt_config_file
77 | @click.option("--name", "-n", help="inventory name as defined in config file")
78 | @click.option("--brief", is_flag=True)
79 | @click.pass_context
80 | def cli_inventory_build(ctx, **cli_opts):
81 | """
82 | Build the inventory file.
83 |
84 | If the netcfgbu configuraiton file contains inventory definitions then you
85 | can use this command to the script to build the inventory.
86 | """
87 |
88 | app_cfg: AppConfig = ctx.obj["app_cfg"]
89 |
90 | if not (spec := get_spec_nameorfirst(app_cfg.inventory, cli_opts["name"])):
91 | cfg_opt = ctx.params["config"]
92 | inv_name = cli_opts["name"]
93 | inv_name = f"'{inv_name}'" if inv_name else ""
94 | err_msg = (
95 | f"Inventory section {inv_name} not defined in configuration file: {cfg_opt.name}"
96 | if cfg_opt
97 | else "Configuration file required for use with build subcommand"
98 | )
99 | raise RuntimeError(err_msg)
100 |
101 | build(spec)
102 |
--------------------------------------------------------------------------------
/netcfgbu/cli/lint.py:
--------------------------------------------------------------------------------
1 | # def exec_lint(app_cfg, inventory):
2 | #
3 | # lint_hosts = [
4 | # (rec, os_spec["linter"])
5 | # for rec in inventory
6 | # if "linter" in (os_spec := get_os_spec(rec, app_cfg))
7 | # ]
8 | #
9 | # try:
10 | # configs_dir = Path(app_cfg["defaults"]["configs_dir"])
11 | # except IndexError:
12 | # configs_dir = Path().cwd()
13 | #
14 | # log = get_logger()
15 | #
16 | # report = Report()
17 | #
18 | # report.start_timing()
19 | # for rec, linter in lint_hosts:
20 | # lint_spec = app_cfg["linters"][linter]
21 | # config_fileobj = configs_dir.joinpath(rec["host"] + ".cfg")
22 | # if not config_fileobj.exists():
23 | # log.warning(f"File not found: {config_fileobj.name}, skipping.")
24 | # report.task_results[False].append(
25 | # (rec, FileNotFoundError(config_fileobj.name))
26 | # )
27 | # continue
28 | #
29 | # try:
30 | # lint_file(config_fileobj, lint_spec)
31 | # except RuntimeWarning as exc:
32 | # log.warning(exc.args[0])
33 | # # do not count as failure
34 | # report.task_results[True].append((rec, exc))
35 | #
36 | # log.info(f"LINTED: {config_fileobj.name}")
37 | # report.task_results[True].append((rec,))
38 | #
39 | # report.stop_timing()
40 | # stop_aiologging()
41 | # report.print_report()
42 |
43 | # -----------------------------------------------------------------------------
44 | # Lint Commands
45 | # -----------------------------------------------------------------------------
46 |
47 | # TODO: Not included as the linting process is automatically done as
48 | # part of the get-config process. That said, in the future we may
49 | # provide as --no-lint option for get-config and then provide
50 | # lint commands for User post-processing.
51 | # @cli.command(name="lint", cls=WithInventoryCommand)
52 | # @opt_config_file
53 | # @opts_inventory
54 | # @click.pass_context
55 | # def cli_lint(ctx, **_cli_opts):
56 | # """
57 | # Remove unwanted content from network config files.
58 | # """
59 | # exec_lint(
60 | # app_cfg=ctx.obj["app_cfg"], inventory=ctx.obj["inventory_recs"],
61 | # )
62 |
--------------------------------------------------------------------------------
/netcfgbu/cli/login.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import click
4 |
5 | from netcfgbu.logger import get_logger, stop_aiologging
6 | from netcfgbu.aiofut import as_completed
7 | from netcfgbu.os_specs import make_host_connector
8 | from netcfgbu.connectors import set_max_startups
9 |
10 |
11 | from .root import (
12 | cli,
13 | WithInventoryCommand,
14 | opt_config_file,
15 | opts_inventory,
16 | opt_batch,
17 | opt_debug_ssh,
18 | opt_timeout,
19 | )
20 |
21 | from .report import Report, err_reason
22 | from netcfgbu import jumphosts
23 | from netcfgbu.config_model import AppConfig
24 | from netcfgbu.consts import DEFAULT_LOGIN_TIMEOUT
25 |
26 |
27 | def exec_test_login(app_cfg: AppConfig, inventory_recs, cli_opts):
28 |
29 | timeout = cli_opts["timeout"] or DEFAULT_LOGIN_TIMEOUT
30 |
31 | login_tasks = {
32 | make_host_connector(rec, app_cfg).test_login(timeout=timeout): rec
33 | for rec in inventory_recs
34 | }
35 |
36 | if (batch_n := cli_opts["batch"]) is not None:
37 | set_max_startups(batch_n)
38 |
39 | total = len(login_tasks)
40 |
41 | report = Report()
42 | done = 0
43 | log = get_logger()
44 |
45 | async def process_batch():
46 | nonlocal done
47 |
48 | if app_cfg.jumphost:
49 | await jumphosts.connect_jumphosts()
50 |
51 | async for task in as_completed(login_tasks):
52 | done += 1
53 | coro = task.get_coro()
54 | rec = login_tasks[coro]
55 | msg = f"DONE ({done}/{total}): {rec['host']} "
56 |
57 | try:
58 | if login_user := task.result():
59 | log.info(msg + f"with user {login_user}")
60 | report.task_results[True].append(rec)
61 | else:
62 | reason = "all credentials failed"
63 | log.warning(msg + reason)
64 | report.task_results[False].append((rec, reason))
65 |
66 | except asyncio.TimeoutError as exc:
67 | log.warning(msg + "Timeout")
68 | report.task_results[False].append((rec, exc))
69 |
70 | except Exception as exc:
71 | report.task_results[False].append((rec, exc))
72 | log.error(msg + f": {err_reason(exc)}")
73 |
74 | loop = asyncio.get_event_loop()
75 | report.start_timing()
76 | loop.run_until_complete(process_batch())
77 | report.stop_timing()
78 | stop_aiologging()
79 | report.print_report()
80 |
81 |
82 | @cli.command(name="login", cls=WithInventoryCommand)
83 | @opt_config_file
84 | @opts_inventory
85 | @opt_timeout
86 | @opt_batch
87 | @opt_debug_ssh
88 | @click.pass_context
89 | def cli_login(ctx, **cli_opts):
90 | """
91 | Verify SSH login to devices.
92 | """
93 |
94 | exec_test_login(ctx.obj["app_cfg"], ctx.obj["inventory_recs"], cli_opts)
95 |
--------------------------------------------------------------------------------
/netcfgbu/cli/main.py:
--------------------------------------------------------------------------------
1 | from .root import cli
2 |
3 | from .inventory import cli_inventory # noqa
4 | from .probe import cli_check # noqa
5 | from .login import cli_login # noqa
6 | from .backup import cli_backup # noqa
7 | from .vcs import cli_vcs # noqa
8 |
9 |
10 | def run():
11 | cli(obj={})
12 |
--------------------------------------------------------------------------------
/netcfgbu/cli/probe.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | import click
4 |
5 | from netcfgbu.logger import get_logger, stop_aiologging
6 | from netcfgbu.aiofut import as_completed
7 | from netcfgbu.probe import probe
8 |
9 | from .root import (
10 | cli,
11 | WithInventoryCommand,
12 | opt_config_file,
13 | opts_inventory,
14 | opt_timeout,
15 | )
16 |
17 | from .report import Report
18 | from netcfgbu.consts import DEFAULT_PROBE_TIMEOUT
19 |
20 |
21 | def exec_probe(inventory, timeout=None):
22 | inv_n = len(inventory)
23 | log = get_logger()
24 | log.info(f"Checking SSH reachability on {inv_n} devices ...")
25 | timeout = timeout or DEFAULT_PROBE_TIMEOUT
26 |
27 | loop = asyncio.get_event_loop()
28 |
29 | tasks = {
30 | probe(
31 | rec.get("ipaddr") or rec.get("host"), timeout=timeout, raise_exc=True
32 | ): rec
33 | for rec in inventory
34 | }
35 |
36 | total = len(tasks)
37 | done = 0
38 | report = Report()
39 |
40 | async def proces_check():
41 | nonlocal done
42 |
43 | async for probe_task in as_completed(tasks):
44 | done += 1
45 | task_coro = probe_task.get_coro()
46 | rec = tasks[task_coro]
47 | msg = f"DONE ({done}/{total}): {rec['host']} "
48 |
49 | try:
50 | probe_ok = probe_task.result()
51 | report.task_results[probe_ok].append((rec, probe_ok))
52 |
53 | except (asyncio.TimeoutError, OSError) as exc:
54 | probe_ok = False
55 | report.task_results[False].append((rec, exc))
56 |
57 | except Exception as exc:
58 | probe_ok = False
59 | log.error(msg + f"FAILURE: {str(exc)}")
60 |
61 | log.info(msg + ("PASS" if probe_ok else "FAIL"))
62 |
63 | report.start_timing()
64 | loop.run_until_complete(proces_check())
65 | report.stop_timing()
66 | stop_aiologging()
67 | report.print_report()
68 |
69 |
70 | @cli.command(name="probe", cls=WithInventoryCommand)
71 | @opt_config_file
72 | @opts_inventory
73 | @opt_timeout
74 | @click.pass_context
75 | def cli_check(ctx, **cli_opts):
76 | """
77 | Probe device for SSH reachablility.
78 |
79 | The probe check determines if the device is reachable and the SSH port
80 | is available to receive connections.
81 | """
82 | exec_probe(ctx.obj["inventory_recs"], timeout=cli_opts["timeout"])
83 |
--------------------------------------------------------------------------------
/netcfgbu/cli/report.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 | from collections import defaultdict
4 | from errno import errorcode
5 | import csv
6 | from time import monotonic
7 | from datetime import datetime
8 |
9 | from tabulate import tabulate
10 |
11 | LN_SEP = "# " + "-" * 78
12 | SPACES_4 = " " * 4
13 |
14 |
15 | def err_reason(exc):
16 | return {
17 | str: lambda: exc,
18 | asyncio.TimeoutError: lambda: "TIMEOUT%s" % (str(exc.args or "")),
19 | OSError: lambda: errorcode[exc.errno],
20 | }.get(exc.__class__, lambda: "%s: %s" % (str(exc.__class__.__name__), str(exc)))()
21 |
22 |
23 | class Report(object):
24 | TIME_FORMAT = "%Y-%b-%d %I:%M:%S %p"
25 |
26 | def __init__(self):
27 | self.start_ts = None
28 | self.start_tm = 0
29 |
30 | self.stop_ts = None
31 | self.stop_tm = 0
32 |
33 | self.task_results = defaultdict(list)
34 |
35 | def start_timing(self):
36 | self.start_ts = datetime.now()
37 | self.start_tm = monotonic()
38 |
39 | def stop_timing(self):
40 | self.stop_ts = datetime.now()
41 | self.stop_tm = monotonic()
42 |
43 | @property
44 | def start_time(self):
45 | return self.start_ts.strftime(self.TIME_FORMAT)
46 |
47 | @property
48 | def stop_time(self):
49 | return self.stop_ts.strftime(self.TIME_FORMAT)
50 |
51 | @property
52 | def duration(self):
53 | return self.stop_tm - self.start_tm
54 |
55 | def print_report(self):
56 | if not self.stop_tm:
57 | self.stop_timing() # pragma: no cover
58 |
59 | fail_n = len(self.task_results[False])
60 | ok_n = len(self.task_results[True])
61 |
62 | total_n = ok_n + fail_n
63 |
64 | print(LN_SEP)
65 |
66 | print(
67 | f"Summary: TOTAL={total_n}, OK={ok_n}, FAIL={fail_n}\n"
68 | f" START={self.start_time}, STOP={self.stop_time}\n"
69 | f" DURATION={self.duration:.3f}s"
70 | )
71 |
72 | headers = ["host", "os_name", "reason"]
73 |
74 | failure_tabular_data = [
75 | [rec["host"], rec["os_name"], err_reason(exc)]
76 | for rec, exc in self.task_results[False]
77 | ]
78 |
79 | if not fail_n:
80 | print(LN_SEP)
81 | return
82 |
83 | with open("failures.csv", "w+") as ofile:
84 | wr_csv = csv.writer(ofile)
85 | wr_csv.writerow(headers)
86 | wr_csv.writerows(failure_tabular_data)
87 |
88 | print(f"\n\nFAILURES: {fail_n}")
89 | print(tabulate(headers=headers, tabular_data=failure_tabular_data))
90 | print(LN_SEP)
91 |
--------------------------------------------------------------------------------
/netcfgbu/cli/root.py:
--------------------------------------------------------------------------------
1 | from importlib import metadata
2 | from pathlib import Path
3 |
4 | import click
5 | from functools import reduce
6 | from first import first
7 |
8 | import netcfgbu
9 | from netcfgbu import config as _config
10 | from netcfgbu import inventory as _inventory
11 | from netcfgbu import jumphosts
12 |
13 |
14 | VERSION = metadata.version(netcfgbu.__package__)
15 |
16 |
17 | # -----------------------------------------------------------------------------
18 | #
19 | # CLI Custom Click Commands
20 | #
21 | # -----------------------------------------------------------------------------
22 |
23 |
24 | class WithConfigCommand(click.Command):
25 | def invoke(self, ctx):
26 | try:
27 | ctx.obj["app_cfg"] = _config.load(fileio=ctx.params["config"])
28 | super().invoke(ctx)
29 |
30 | except Exception as exc:
31 | ctx.fail(str(exc))
32 |
33 |
34 | class WithInventoryCommand(click.Command):
35 | def invoke(self, ctx):
36 |
37 | try:
38 | app_cfg = ctx.obj["app_cfg"] = _config.load(fileio=ctx.params["config"])
39 |
40 | if debug_ssh_lvl := ctx.params.get("debug_ssh"): # pragma: no cover
41 | from asyncssh import logging as assh_lgr
42 | import logging
43 |
44 | assh_lgr.set_log_level(logging.DEBUG)
45 | assh_lgr.set_debug_level(debug_ssh_lvl)
46 |
47 | if ctx.params["inventory"]:
48 | ctx.obj["app_cfg"].defaults.inventory = ctx.params["inventory"]
49 |
50 | inv = ctx.obj["inventory_recs"] = _inventory.load(
51 | app_cfg=app_cfg,
52 | limits=ctx.params["limit"],
53 | excludes=ctx.params["exclude"],
54 | )
55 |
56 | if not inv:
57 | raise RuntimeError(
58 | f"No inventory matching limits in: {app_cfg.defaults.inventory}"
59 | )
60 |
61 | # if there is jump host configuraiton then prepare for later use.
62 | if app_cfg.jumphost:
63 | jumphosts.init_jumphosts(jumphost_specs=app_cfg.jumphost, inventory=inv)
64 |
65 | super().invoke(ctx)
66 |
67 | except Exception as exc:
68 | ctx.fail(str(exc))
69 |
70 |
71 | # -----------------------------------------------------------------------------
72 | #
73 | # CLI Options
74 | #
75 | # -----------------------------------------------------------------------------
76 |
77 |
78 | def get_spec_nameorfirst(spec_list, spec_name=None):
79 | if not spec_list:
80 | return None
81 |
82 | if not spec_name:
83 | return first(spec_list)
84 |
85 | return first(spec for spec in spec_list if getattr(spec, "name", "") == spec_name)
86 |
87 |
88 | def check_for_default(ctx, opt, value):
89 | if value or Path("netcfgbu.toml").exists():
90 | return value
91 |
92 | return None
93 |
94 |
95 | opt_config_file = click.option(
96 | "-C",
97 | "--config",
98 | envvar="NETCFGBU_CONFIG",
99 | type=click.File(),
100 | callback=check_for_default
101 | # required=True,
102 | # default="netcfgbu.toml",
103 | )
104 |
105 | # -----------------------------------------------------------------------------
106 | # Inventory Options
107 | # -----------------------------------------------------------------------------
108 |
109 | opt_inventory = click.option(
110 | "--inventory", "-i", help="Inventory file-name", envvar="NETCFGBU_INVENTORY"
111 | )
112 |
113 | opt_limits = click.option(
114 | "--limit",
115 | "-l",
116 | "--include",
117 | multiple=True,
118 | help="limit/include in inventory",
119 | )
120 |
121 | opt_excludes = click.option(
122 | "--exclude",
123 | "-e",
124 | multiple=True,
125 | help="exclude from inventory",
126 | )
127 |
128 |
129 | def opts_inventory(in_fn_deco):
130 | return reduce(
131 | lambda _d, fn: fn(_d), [opt_inventory, opt_limits, opt_excludes], in_fn_deco
132 | )
133 |
134 |
135 | opt_batch = click.option(
136 | "--batch",
137 | "-b",
138 | type=click.IntRange(1, 500),
139 | help="inevntory record processing batch size",
140 | )
141 |
142 | opt_timeout = click.option(
143 | "--timeout", "-t", help="timeout(s)", type=click.IntRange(0, 5 * 60)
144 | )
145 |
146 | opt_debug_ssh = click.option(
147 | "--debug-ssh", help="enable SSH debugging", type=click.IntRange(1, 3)
148 | )
149 |
150 |
151 | @click.group()
152 | @click.version_option(version=VERSION)
153 | def cli():
154 | pass # pragma: no cover
155 |
--------------------------------------------------------------------------------
/netcfgbu/cli/vcs.py:
--------------------------------------------------------------------------------
1 | import click
2 |
3 | from netcfgbu import config as _config
4 | from netcfgbu.vcs import git
5 | from netcfgbu.logger import stop_aiologging
6 | from netcfgbu.plugins import load_plugins
7 |
8 | from .root import cli, get_spec_nameorfirst, opt_config_file
9 |
10 | opt_vcs_name = click.option("--name", help="vcs name as defined in config file")
11 |
12 |
13 | @cli.group(name="vcs")
14 | def cli_vcs():
15 | """
16 | Version Control System subcommands.
17 | """
18 | pass # pragma: no cover
19 |
20 |
21 | class VCSCommand(click.Command):
22 | def invoke(self, ctx):
23 | cfg_fileopt = ctx.params["config"]
24 |
25 | try:
26 | app_cfgs = ctx.obj["app_cfg"] = _config.load(fileio=cfg_fileopt)
27 | if not (spec := get_spec_nameorfirst(app_cfgs.git, ctx.params["name"])):
28 | err_msg = (
29 | "No configuration file provided, required for vcs support"
30 | if not cfg_fileopt
31 | else f"No vcs config section found in configuration file: {cfg_fileopt.name}"
32 | )
33 | raise RuntimeError(err_msg)
34 |
35 | ctx.obj["vcs_spec"] = spec
36 | super().invoke(ctx)
37 | stop_aiologging()
38 |
39 | except Exception as exc:
40 | ctx.fail(exc.args[0])
41 |
42 |
43 | @cli_vcs.command(name="prepare", cls=VCSCommand)
44 | @opt_config_file
45 | @opt_vcs_name
46 | @click.pass_context
47 | def cli_vcs_prepare(ctx, **_cli_opts):
48 | """
49 | Prepare your system with the VCS repo.
50 |
51 | This command is used to setup your `configs_dir` as the VCS repository
52 | so that when you execute the backup process the resulting backup files
53 | can be stored in the VCS system.
54 | """
55 |
56 | git.vcs_prepare(
57 | spec=ctx.obj["vcs_spec"], repo_dir=ctx.obj["app_cfg"].defaults.configs_dir
58 | )
59 |
60 |
61 | @cli_vcs.command(name="save", cls=VCSCommand)
62 | @opt_config_file
63 | @opt_vcs_name
64 | @click.option("--tag-name", help="tag-release name")
65 | @click.pass_context
66 | def cli_vcs_save(ctx, **cli_opts):
67 | """
68 | Save changes into VCS repository.
69 |
70 | After you have run the config backup process you will need to push those
71 | changes into the VCS repository. This command performs the necesssary
72 | steps to add changes to the repository and set a release tag. The release
73 | tag by default is the timestamp in the form of
74 | "_"
75 | """
76 | load_plugins(ctx.obj["app_cfg"].defaults.plugins_dir)
77 | git.vcs_save(
78 | ctx.obj["vcs_spec"],
79 | repo_dir=ctx.obj["app_cfg"].defaults.configs_dir,
80 | tag_name=cli_opts["tag_name"],
81 | )
82 |
83 |
84 | @cli_vcs.command(name="status", cls=VCSCommand)
85 | @opt_config_file
86 | @opt_vcs_name
87 | @click.pass_context
88 | def cli_vcs_status(ctx, **_cli_opts):
89 | """
90 | Show VCS repository status.
91 |
92 | This command will show the status of the `configs_dir` contents so that you
93 | will know what will be changed before you run the `vcs save` command.
94 | """
95 | output = git.vcs_status(
96 | spec=ctx.obj["vcs_spec"], repo_dir=ctx.obj["app_cfg"].defaults.configs_dir
97 | )
98 | print(output)
99 |
--------------------------------------------------------------------------------
/netcfgbu/config.py:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------------
2 | # System Imports
3 | # -----------------------------------------------------------------------------
4 |
5 | from pathlib import Path
6 |
7 | # -----------------------------------------------------------------------------
8 | # Public Imports
9 | # -----------------------------------------------------------------------------
10 |
11 | import toml
12 |
13 | # -----------------------------------------------------------------------------
14 | # Private Imports
15 | # -----------------------------------------------------------------------------
16 |
17 | from .logger import setup_logging
18 | from .config_model import AppConfig
19 | from pydantic import ValidationError
20 |
21 | __all__ = ["load"]
22 |
23 |
24 | def validation_errors(filepath, errors):
25 | sp_4 = " " * 4
26 | as_human = ["Configuration errors", f"{sp_4}File:[{filepath}]"]
27 |
28 | for _err in errors:
29 | loc_str = ".".join(map(str, _err["loc"]))
30 | as_human.append(f"{sp_4}Section: [{loc_str}]: {_err['msg']}")
31 |
32 | return "\n".join(as_human)
33 |
34 |
35 | def load(*, filepath=None, fileio=None) -> AppConfig:
36 | app_cfg = dict()
37 |
38 | if filepath:
39 | app_cfg_file = Path(filepath)
40 | fileio = app_cfg_file.open()
41 |
42 | if fileio:
43 | app_cfg = toml.load(fileio)
44 |
45 | setup_logging(app_cfg)
46 |
47 | app_defaults = app_cfg.get("defaults")
48 | if not app_defaults:
49 | app_cfg["defaults"] = dict(credentials={})
50 |
51 | try:
52 | cfg_obj = AppConfig.parse_obj(app_cfg)
53 | except ValidationError as exc:
54 | filepath = fileio.name if fileio else ""
55 | raise RuntimeError(validation_errors(filepath=filepath, errors=exc.errors()))
56 |
57 | configs_dir: Path = cfg_obj.defaults.configs_dir
58 | if not configs_dir.is_dir():
59 | configs_dir.mkdir()
60 |
61 | plugins_dir: Path = cfg_obj.defaults.plugins_dir
62 | if not plugins_dir.is_dir():
63 | plugins_dir.mkdir()
64 |
65 | return cfg_obj
66 |
--------------------------------------------------------------------------------
/netcfgbu/config_model.py:
--------------------------------------------------------------------------------
1 | import re
2 | import os
3 | from typing import Optional, Union, List, Dict
4 | from os.path import expandvars
5 | from itertools import chain
6 | from pathlib import Path
7 |
8 | from pydantic import (
9 | BaseModel,
10 | SecretStr,
11 | BaseSettings,
12 | PositiveInt,
13 | FilePath,
14 | Field,
15 | validator,
16 | root_validator,
17 | )
18 |
19 |
20 | from . import consts
21 |
22 | __all__ = [
23 | "AppConfig",
24 | "Credential",
25 | "InventorySpec",
26 | "OSNameSpec",
27 | "LinterSpec",
28 | "GitSpec",
29 | "JumphostSpec",
30 | ]
31 |
32 | _var_re = re.compile(
33 | r"\${(?P[a-z0-9_]+)}" r"|" r"\$(?P[^{][a-z_0-9]+)", flags=re.IGNORECASE
34 | )
35 |
36 |
37 | class NoExtraBaseModel(BaseModel):
38 | class Config:
39 | extra = "forbid"
40 |
41 |
42 | class EnvExpand(str):
43 | """
44 | When a string value contains a reference to an environment variable, use
45 | this type to expand the contents of the variable using os.path.expandvars.
46 |
47 | For example like:
48 | password = "$MY_PASSWORD"
49 | foo_password = "${MY_PASSWORD}_foo"
50 |
51 | will be expanded, given MY_PASSWORD is set to 'boo!' in the environment:
52 | password -> "boo!"
53 | foo_password -> "boo!_foo"
54 | """
55 |
56 | @classmethod
57 | def __get_validators__(cls):
58 | yield cls.validate
59 |
60 | @classmethod
61 | def validate(cls, v):
62 | if found_vars := list(filter(len, chain.from_iterable(_var_re.findall(v)))):
63 | for var in found_vars:
64 | if (var_val := os.getenv(var)) is None:
65 | raise ValueError(f'Environment variable "{var}" missing.')
66 |
67 | if not len(var_val):
68 | raise ValueError(f'Environment variable "{var}" empty.')
69 |
70 | return expandvars(v)
71 |
72 | return v
73 |
74 |
75 | class EnvSecretStr(EnvExpand, SecretStr):
76 | @classmethod
77 | def validate(cls, v):
78 | return SecretStr.validate(EnvExpand.validate(v))
79 |
80 |
81 | class Credential(NoExtraBaseModel):
82 | username: EnvExpand
83 | password: EnvSecretStr
84 |
85 |
86 | class DefaultCredential(Credential, BaseSettings):
87 | username: EnvExpand = Field(..., env="NETCFGBU_DEFAULT_USERNAME")
88 | password: EnvSecretStr = Field(..., env="NETCFGBU_DEFAULT_PASSWORD")
89 |
90 |
91 | class Defaults(NoExtraBaseModel, BaseSettings):
92 | configs_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_CONFIGSDIR", "PWD"))
93 | plugins_dir: Optional[EnvExpand] = Field(..., env=("NETCFGBU_PLUGINSDIR", "PWD"))
94 | inventory: EnvExpand = Field(..., env="NETCFGBU_INVENTORY")
95 | credentials: DefaultCredential
96 |
97 | @validator("inventory")
98 | def _inventory_provided(cls, value): # noqa
99 | if not len(value):
100 | raise ValueError("inventory empty value not allowed")
101 | return value
102 |
103 | @validator("configs_dir")
104 | def _configs_dir(cls, value): # noqa
105 | return Path(value).absolute()
106 |
107 | @validator("plugins_dir")
108 | def _plugins_dir(cls, value): # noqa
109 | if value == os.getenv("PWD") and "/plugins" not in value:
110 | value = value + "/plugins"
111 | return Path(value).absolute()
112 |
113 |
114 | class FilePathEnvExpand(FilePath):
115 | """ A FilePath field whose value can interpolated from env vars """
116 |
117 | @classmethod
118 | def __get_validators__(cls):
119 | yield from EnvExpand.__get_validators__()
120 | yield from FilePath.__get_validators__()
121 |
122 |
123 | class GitSpec(NoExtraBaseModel):
124 | name: Optional[str]
125 | repo: EnvExpand
126 | email: Optional[str]
127 | username: Optional[EnvExpand]
128 | password: Optional[EnvExpand]
129 | token: Optional[EnvSecretStr]
130 | deploy_key: Optional[FilePathEnvExpand]
131 | deploy_passphrase: Optional[EnvSecretStr]
132 |
133 | @validator("repo")
134 | def validate_repo(cls, repo): # noqa
135 | expected = ("https:", "git@")
136 | if not repo.startswith(expected):
137 | raise ValueError(
138 | f"Bad repo URL [{repo}]: expected to start with {expected}."
139 | )
140 | return repo
141 |
142 | @root_validator
143 | def enure_proper_auth(cls, values):
144 | req = ("token", "deploy_key", "password")
145 | auth_vals = list(filter(None, (values.get(auth) for auth in req)))
146 | auth_c = len(auth_vals)
147 | if not auth_c:
148 | raise ValueError(
149 | f'Missing one of required auth method fields: {"|".join(req)}'
150 | )
151 |
152 | if auth_c > 1:
153 | raise ValueError(f'Only one of {"|".join(req)} allowed')
154 |
155 | if values.get("deploy_passphrase") and not values.get("deploy_key"):
156 | raise ValueError("deploy_key required when using deploy_passphrase")
157 |
158 | return values
159 |
160 |
161 | class OSNameSpec(NoExtraBaseModel):
162 | credentials: Optional[List[Credential]]
163 | pre_get_config: Optional[Union[str, List[str]]]
164 | get_config: Optional[str]
165 | connection: Optional[str]
166 | linter: Optional[str]
167 | timeout: PositiveInt = Field(consts.DEFAULT_GETCONFIG_TIMEOUT)
168 | ssh_configs: Optional[Dict]
169 | prompt_pattern: Optional[str]
170 |
171 |
172 | class LinterSpec(NoExtraBaseModel):
173 | config_starts_after: Optional[str]
174 | config_ends_at: Optional[str]
175 |
176 |
177 | class InventorySpec(NoExtraBaseModel):
178 | name: Optional[str]
179 | script: EnvExpand
180 |
181 | @validator("script")
182 | def validate_script(cls, script_exec): # noqa
183 | script_bin, *script_vargs = script_exec.split()
184 | if not os.path.isfile(script_bin):
185 | raise ValueError(f"File not found: {script_bin}")
186 |
187 | if not os.access(script_bin, os.X_OK):
188 | raise ValueError(f"{script_bin} is not executable")
189 |
190 | return script_exec
191 |
192 |
193 | class JumphostSpec(NoExtraBaseModel):
194 | proxy: str
195 | name: Optional[str]
196 | include: Optional[List[str]]
197 | exclude: Optional[List[str]]
198 | timeout: PositiveInt = Field(consts.DEFAULT_LOGIN_TIMEOUT)
199 |
200 | @validator("name", always=True)
201 | def _default_name(cls, value, values): # noqa
202 | return values["proxy"] if not value else value
203 |
204 |
205 | class AppConfig(NoExtraBaseModel):
206 | defaults: Defaults
207 | credentials: Optional[List[Credential]]
208 | linters: Optional[Dict[str, LinterSpec]]
209 | os_name: Optional[Dict[str, OSNameSpec]]
210 | inventory: Optional[List[InventorySpec]]
211 | logging: Optional[Dict]
212 | ssh_configs: Optional[Dict]
213 | git: Optional[List[GitSpec]]
214 | jumphost: Optional[List[JumphostSpec]]
215 |
216 | @validator("os_name")
217 | def _linters(cls, v, values): # noqa
218 | linters = values.get("linters") or {}
219 | for os_name, os_spec in v.items():
220 | if os_spec.linter and os_spec.linter not in linters:
221 | raise ValueError(
222 | f'OS spec "{os_name}" using undefined linter "{os_spec.linter}"'
223 | )
224 | return v
225 |
--------------------------------------------------------------------------------
/netcfgbu/connectors/__init__.py:
--------------------------------------------------------------------------------
1 | from functools import lru_cache
2 | from importlib import import_module
3 |
4 | from .basic import BasicSSHConnector
5 | from .basic import set_max_startups # noqa
6 |
7 |
8 | @lru_cache()
9 | def get_connector_class(mod_cls_name=None):
10 | if not mod_cls_name:
11 | return BasicSSHConnector
12 |
13 | mod_name, _, cls_name = mod_cls_name.rpartition(".")
14 | mod_obj = import_module(mod_name)
15 | return getattr(mod_obj, cls_name)
16 |
--------------------------------------------------------------------------------
/netcfgbu/connectors/ssh.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from netcfgbu.connectors.basic import BasicSSHConnector
3 |
4 |
5 | class LoginPromptUserPass(BasicSSHConnector):
6 | async def login(self):
7 | await super(LoginPromptUserPass, self).login()
8 |
9 | await asyncio.wait_for(self.process.stdout.readuntil(b"User:"), timeout=10)
10 |
11 | username = (self.conn_args["username"] + "\n").encode("utf-8")
12 | self.process.stdin.write(username)
13 |
14 | await asyncio.wait_for(self.process.stdout.readuntil(b"Password:"), timeout=10)
15 |
16 | password = (self.conn_args["password"] + "\n").encode("utf-8")
17 | self.process.stdin.write(password)
18 |
19 | return self.conn
20 |
--------------------------------------------------------------------------------
/netcfgbu/consts.py:
--------------------------------------------------------------------------------
1 | DEFAULT_MAX_STARTUPS = 100
2 | DEFAULT_LOGIN_TIMEOUT = 30
3 | DEFAULT_GETCONFIG_TIMEOUT = 60
4 | DEFAULT_PROBE_TIMEOUT = 10
5 |
6 | # DEFAULT_CONFIG_STARTS_AFTER = "Current configuration"
7 | # DEFAULT_CONFIG_ENDS_WITH = "end"
8 |
9 | PROMPT_VALID_CHARS = r"a-z0-9.\-_@()/:~"
10 | PROMPT_MAX_CHARS = 65
11 |
12 | INVENTORY_FIELDNAMES = ["host", "ipaddr", "os_name", "username", "password"]
13 |
--------------------------------------------------------------------------------
/netcfgbu/filetypes.py:
--------------------------------------------------------------------------------
1 | import csv
2 |
3 |
4 | class CommentedCsvReader(csv.DictReader):
5 | def __next__(self):
6 | value = super(CommentedCsvReader, self).__next__()
7 |
8 | if value[self.fieldnames[0]].startswith("#"):
9 | return self.__next__()
10 |
11 | return value
12 |
13 |
14 | # TODO: not in use just yet.
15 | # class TextFileReader(object):
16 | # wordsep_re = re.compile(r"\s+|,")
17 | #
18 | # def __init__(self, fileio, index=0):
19 | # self._index = index
20 | # self._lines = fileio.readlines()
21 | #
22 | # def __iter__(self):
23 | # return self
24 | #
25 | # def __next__(self):
26 | # try:
27 | # line_item = self._lines.pop(0)
28 | # except IndexError:
29 | # raise StopIteration
30 | #
31 | # if line_item.startswith("#"):
32 | # return self.__next__()
33 | #
34 | # try:
35 | # return self.wordsep_re.split(line_item)[self._index]
36 | # except IndexError:
37 | # pass
38 | #
39 | # return self.__next__()
40 |
--------------------------------------------------------------------------------
/netcfgbu/filtering.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains the filtering functions that are using to process the
3 | '--include' and '--exclude' command line options. The code in this module is
4 | not specific to the netcfgbu inventory column names, can could be re-used for
5 | other CSV related tools and use-cases.
6 | """
7 | import ipaddress
8 | import operator
9 | import re
10 | from abc import ABC, abstractmethod
11 | from pathlib import Path
12 | from typing import List, AnyStr, Optional, Callable, Dict
13 |
14 | from .filetypes import CommentedCsvReader
15 |
16 | __all__ = ["create_filter"]
17 |
18 |
19 | value_pattern = r"(?P\S+)$"
20 | file_reg = re.compile(r"@(?P.+)$")
21 | wordsep_re = re.compile(r"\s+|,")
22 |
23 |
24 | class Filter(ABC):
25 | """Filter is a type that supports op comparisons against inventory fields
26 |
27 | An implementation of Filter should capture:
28 | - The record fieldname to compare
29 | - The filter expression
30 |
31 | A Filter instance will be passed an inventory record when called, returning
32 | the bool result of whether the record matches the filter
33 | """
34 |
35 | @abstractmethod
36 | def __call__(self, record: Dict[str, AnyStr]) -> bool:
37 | pass
38 |
39 |
40 | class RegexFilter(Filter):
41 | """ Filter an inventory record field with a given regex """
42 |
43 | def __init__(self, fieldname: str, expr: str) -> None:
44 | self.fieldname = fieldname
45 | try:
46 | self.re = re.compile(f"^{expr}$", re.IGNORECASE)
47 | except re.error as exc:
48 | raise ValueError(
49 | f"Invalid filter regular-expression: {expr!r}: {exc}"
50 | ) from None
51 |
52 | self.__doc__ = f"limit_{fieldname}({self.re.pattern})"
53 | self.__name__ = self.__doc__
54 | self.__qualname__ = self.__doc__
55 |
56 | def __call__(self, record: Dict[str, AnyStr]) -> bool:
57 | return bool(self.re.match(record[self.fieldname]))
58 |
59 | def __repr__(self) -> str:
60 | return f"RegexFilter(fieldname={self.fieldname!r}, expr={self.re})"
61 |
62 |
63 | class IPFilter(Filter):
64 | """Filter an inventory record field based on IP address
65 |
66 | When the specified filter ip address is a prefix (E.g 192.168.0.0/28), will
67 | check that the record IP is within the prefix range
68 | Will interpret single IP addresses (E.g. 2620:abcd:10::10) as an absolute match
69 | """
70 |
71 | def __init__(self, fieldname: str, ip: str) -> None:
72 | self.fieldname = fieldname
73 | self.ip = ipaddress.ip_network(ip)
74 | self.__doc__ = f"limit_{fieldname}({self.ip})"
75 | self.__name__ = self.__doc__
76 | self.__qualname__ = self.__doc__
77 |
78 | def __call__(self, record: Dict[str, AnyStr]) -> bool:
79 | return ipaddress.ip_address(record[self.fieldname]) in self.ip
80 |
81 | def __repr__(self) -> str:
82 | return f"IpFilter(fieldname={self.fieldname!r}, ip='{self.ip}')"
83 |
84 |
85 | def create_filter_function(op_filters, optest_fn):
86 | def filter_fn(rec):
87 | for op_fn in op_filters:
88 | if optest_fn(op_fn(rec)):
89 | return False
90 |
91 | return True
92 |
93 | return filter_fn
94 |
95 |
96 | def mk_file_filter(filepath, key):
97 |
98 | if filepath.endswith(".csv"):
99 | filter_hostnames = [rec[key] for rec in CommentedCsvReader(open(filepath))]
100 | else:
101 | raise ValueError(
102 | f"File '{filepath}' not a CSV file. Only CSV files are supported at this time"
103 | )
104 |
105 | def op_filter(rec):
106 | return rec[key] in filter_hostnames
107 |
108 | op_filter.hostnames = filter_hostnames
109 | op_filter.__doc__ = f"file: {filepath})"
110 | op_filter.__name__ = op_filter.__doc__
111 | op_filter.__qualname__ = op_filter.__doc__
112 |
113 | return op_filter
114 |
115 |
116 | def create_filter(
117 | constraints: List[AnyStr], field_names: List[AnyStr], include: Optional[bool] = True
118 | ) -> Callable[[Dict], bool]:
119 | """
120 | This function returns a function that is used to filter inventory records.
121 |
122 | Parameters
123 | ----------
124 | constraints:
125 | A list of contraint expressions that are in the form "=".
126 |
127 | field_names:
128 | A list of known field names
129 |
130 | include:
131 | When True, the filter function will match when the constraint is true,
132 | for example if the contraint is "os_name=eos", then it would match
133 | records that have os_name field euqal to "eos".
134 |
135 | When False, the filter function will match when the constraint is not
136 | true. For exampl if the constraint is "os_name=eos", then the filter
137 | function would match recoreds that have os_name fields not equal to
138 | "eos".
139 |
140 | Returns
141 | -------
142 | The returning filter function expects an inventory record as the single
143 | input parameter, and the function returns True/False on match.
144 | """
145 | fieldn_pattern = "^(?P" + "|".join(fieldn for fieldn in field_names) + ")"
146 | field_value_reg = re.compile(fieldn_pattern + "=" + value_pattern)
147 |
148 | op_filters: List[Filter] = []
149 | for filter_expr in constraints:
150 |
151 | # check for the '@' filtering use-case first.
152 |
153 | if mo := file_reg.match(filter_expr):
154 | filepath = mo.group(1)
155 | if not Path(filepath).exists():
156 | raise FileNotFoundError(filepath)
157 |
158 | try:
159 | op_filters.append(mk_file_filter(filepath, key="host"))
160 | continue
161 |
162 | except KeyError:
163 | raise ValueError(
164 | f"File '{filepath}' does not contain host content as expected"
165 | )
166 |
167 | # next check for keyword=value filtering use-case
168 |
169 | if (mo := field_value_reg.match(filter_expr)) is None:
170 | raise ValueError(f"Invalid filter expression: {filter_expr}")
171 |
172 | fieldn, value = mo.groupdict().values()
173 |
174 | if fieldn.casefold() == "ipaddr":
175 | try:
176 | value_filter = IPFilter(fieldn, value)
177 | except ValueError:
178 | value_filter = RegexFilter(fieldn, value)
179 | else:
180 | value_filter = RegexFilter(fieldn, value)
181 |
182 | op_filters.append(value_filter)
183 |
184 | optest_fn = operator.not_ if include else operator.truth
185 | filter_fn = create_filter_function(op_filters, optest_fn)
186 | filter_fn.op_filters = op_filters
187 | filter_fn.constraints = constraints
188 |
189 | return filter_fn
190 |
--------------------------------------------------------------------------------
/netcfgbu/inventory.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import os
3 |
4 |
5 | from .logger import get_logger
6 | from .filtering import create_filter
7 | from .filetypes import CommentedCsvReader
8 | from .config_model import AppConfig, InventorySpec
9 |
10 |
11 | def load(app_cfg: AppConfig, limits=None, excludes=None):
12 |
13 | inventory_file = Path(app_cfg.defaults.inventory)
14 | if not inventory_file.exists():
15 | raise FileNotFoundError(
16 | f"Inventory file does not exist: {inventory_file.absolute()}"
17 | )
18 |
19 | iter_recs = CommentedCsvReader(inventory_file.open())
20 | field_names = iter_recs.fieldnames
21 |
22 | if limits:
23 | filter_fn = create_filter(constraints=limits, field_names=field_names)
24 | iter_recs = filter(filter_fn, iter_recs)
25 |
26 | if excludes:
27 | filter_fn = create_filter(
28 | constraints=excludes, field_names=field_names, include=False
29 | )
30 | iter_recs = filter(filter_fn, iter_recs)
31 |
32 | return list(iter_recs)
33 |
34 |
35 | def build(inv_def: InventorySpec) -> int:
36 | lgr = get_logger()
37 |
38 | # the script field is required so therefore it exists from
39 | # config-load-validation.
40 |
41 | script = inv_def.script
42 | lgr.info(f"Executing script: [{script}]")
43 |
44 | # Note: if you want to check the pass/fail of this call os.system() will
45 | # return 0 or non-zero as the exit code from the underlying script. There
46 | # is no exception handling. If you want to do exception handling, then
47 | # you'll need to use subprocess.call in place of os.system.
48 |
49 | rc = os.system(script)
50 | if rc != 0:
51 | lgr.warning(f"inventory script returned non-zero return code: {rc}")
52 |
53 | return rc
54 |
--------------------------------------------------------------------------------
/netcfgbu/jumphosts.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains the code providing jump host feature functionality
3 | so that any device in inventory that requires a proxy server can use
4 | the netcfgbu tool.
5 | """
6 |
7 | # -----------------------------------------------------------------------------
8 | # System Imports
9 | # -----------------------------------------------------------------------------
10 |
11 | from typing import Optional, List, Dict, AnyStr
12 | import asyncio
13 | from urllib.parse import urlparse
14 |
15 | # -----------------------------------------------------------------------------
16 | # Public Imports
17 | # -----------------------------------------------------------------------------
18 |
19 | import asyncssh
20 | from first import first
21 |
22 | # -----------------------------------------------------------------------------
23 | # Private Imports
24 | # -----------------------------------------------------------------------------
25 |
26 | from .config_model import JumphostSpec
27 | from .filtering import create_filter
28 | from .logger import get_logger
29 |
30 |
31 | class JumpHost(object):
32 | """
33 | A JumpHost instance is used to provide a tunnel connection so that any
34 | device in the inventory that requires one can use it.
35 | """
36 |
37 | available = list()
38 |
39 | def __init__(self, spec: JumphostSpec, field_names: List[AnyStr]):
40 | """
41 | Prepare a jump host instance for potential use. This method
42 | does not connect to the proxy system.
43 | Parameters
44 | ----------
45 | spec:
46 | The jumphost configuraiton
47 |
48 | field_names:
49 | List of inventory field names that are used to prepare any
50 | necessary filtering functionality
51 | """
52 | self._spec = spec
53 | self.filters = list()
54 | self._conn = None
55 | self._init_filters(field_names)
56 |
57 | @property
58 | def tunnel(self):
59 | """
60 | Returns the SSH client connection of the jump-host for use as `tunnel` when
61 | connecting to a target device. If the SSH client does not exist, then raise
62 | a RuntimeError.
63 | """
64 | if not self.is_active:
65 | raise RuntimeError(
66 | f"Attempting to use JumpHost {self.name}, but not connected"
67 | )
68 | return self._conn
69 |
70 | @property
71 | def name(self):
72 | """ Returns the string-name of the jump host"""
73 | return self._spec.name
74 |
75 | @property
76 | def is_active(self):
77 | """ Return True if the jumphost is connected, False otherwise """
78 | return bool(self._conn)
79 |
80 | def _init_filters(self, field_names):
81 | """ Called only by init, prepares the jump host filter functions to later use """
82 | include, exclude = self._spec.include, self._spec.exclude
83 | if include:
84 | self.filters.append(
85 | create_filter(
86 | constraints=include, field_names=field_names, include=True
87 | )
88 | )
89 |
90 | if exclude:
91 | self.filters.append(
92 | create_filter(
93 | constraints=exclude, field_names=field_names, include=False
94 | )
95 | )
96 |
97 | async def connect(self):
98 | """
99 | Connects to the jumphost system so that it can be used later as the
100 | tunnel to connect to other devices.
101 | """
102 | proxy_parts = urlparse("ssh://" + self._spec.proxy)
103 |
104 | conn_args = dict(host=proxy_parts.hostname, known_hosts=None)
105 | if proxy_parts.username:
106 | conn_args["username"] = proxy_parts.username
107 |
108 | if proxy_parts.port:
109 | conn_args["port"] = proxy_parts.port
110 |
111 | async def connect_to_jh():
112 | """ obtain the SSH client connection """
113 | self._conn = await asyncssh.connect(**conn_args)
114 |
115 | await asyncio.wait_for(connect_to_jh(), timeout=self._spec.timeout)
116 |
117 | def filter(self, inv_rec):
118 | """
119 | This function returns True if this jump host is required to support the given
120 | inventory record. Returns False otherwise.
121 | """
122 | return any(_f(inv_rec) for _f in self.filters)
123 |
124 |
125 | # -----------------------------------------------------------------------------
126 | #
127 | # CODE BEGINS
128 | #
129 | # -----------------------------------------------------------------------------
130 |
131 |
132 | def init_jumphosts(jumphost_specs: List[JumphostSpec], inventory: List[Dict]):
133 | """
134 | Initialize the required set of Jump Host instances so that they can be used
135 | when netcfgbu attempts to access devices that require the use of jump
136 | hosts.
137 |
138 | Parameters
139 | ----------
140 | jumphost_specs:
141 | List of jump host specs from the app config instance
142 |
143 | inventory:
144 | List of inventory records; these are used to determine which, if any,
145 | of the configured jump hosts are actually required for use given any
146 | provided inventory filtering.
147 | """
148 | field_names = inventory[0].keys()
149 |
150 | # create a list of jump host instances so that we can determine which, if
151 | # any, will be used during the execution of the command.
152 |
153 | jh_list = [JumpHost(spec, field_names=field_names) for spec in jumphost_specs]
154 |
155 | req_jh = {
156 | use_jh
157 | for rec in inventory
158 | if (use_jh := first(jh for jh in jh_list if jh.filter(rec)))
159 | }
160 |
161 | JumpHost.available = list(req_jh)
162 |
163 |
164 | async def connect_jumphosts():
165 | """
166 | This coroutine is used to connect to all of the required jump host servers. This
167 | should be called before attempting to run any of the SSH device tasks, such as
168 | login or backup.
169 |
170 | Returns
171 | -------
172 | True if all required jump host servers are connected.
173 | False otherwise; check log errors for details.
174 | """
175 | log = get_logger()
176 | ok = True
177 |
178 | for jh in JumpHost.available:
179 | try:
180 | await jh.connect()
181 | log.info(f"JUMPHOST: connected to {jh.name}")
182 |
183 | except (asyncio.TimeoutError, asyncssh.Error) as exc:
184 | errmsg = str(exc) or exc.__class__.__name__
185 | log.error(f"JUMPHOST: connect to {jh.name} failed: {errmsg}")
186 | ok = False
187 |
188 | return ok
189 |
190 |
191 | def get_jumphost(inv_rec: dict) -> Optional[JumpHost]:
192 | """
193 | Return the jumphost instance that is used to tunnel the connection
194 | for the given inventory record. If this record does not require the
195 | use of a jumphost, then return None.
196 | """
197 | return first(jh for jh in JumpHost.available if jh.filter(inv_rec))
198 |
--------------------------------------------------------------------------------
/netcfgbu/linter.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | import re
3 |
4 | from .logger import get_logger
5 | from .config_model import LinterSpec
6 |
7 |
8 | log = get_logger()
9 |
10 |
11 | def lint_content(config_content, lint_spec: LinterSpec):
12 | start_offset = 0
13 | end_offset = None
14 |
15 | if not start_offset and lint_spec.config_starts_after:
16 | if start_mo := re.search(
17 | f"^{lint_spec.config_starts_after}.*$", config_content, re.MULTILINE
18 | ):
19 | start_offset = start_mo.end() + 1
20 |
21 | if lint_spec.config_ends_at:
22 | # if not found, rfind returns -1 to indciate; therefore need to make
23 | # this check
24 | if (found := config_content.rfind("\n" + lint_spec.config_ends_at)) > 0:
25 | end_offset = found
26 |
27 | config_content = config_content[start_offset:end_offset]
28 |
29 | # if remove_lines := lint_spec.remove_lines:
30 | # remove_lines_reg = "|".join(remove_lines)
31 | # config_content = re.sub(remove_lines_reg, "", config_content, flags=re.M)
32 |
33 | return config_content
34 |
35 |
36 | def lint_file(fileobj: Path, lint_spec) -> bool:
37 | """
38 | Perform the linting function on the content in the given file.
39 | Returns True if the content was changed, False otherwise.
40 | """
41 | orig_config_content = fileobj.read_text()
42 |
43 | config_content = lint_content(orig_config_content, lint_spec)
44 | if config_content == orig_config_content:
45 | log.debug(f"LINT no change on {fileobj.name}")
46 | return False
47 |
48 | fileobj.rename(str(fileobj.absolute()) + ".orig")
49 | fileobj.write_text(config_content)
50 | return True
51 |
--------------------------------------------------------------------------------
/netcfgbu/logger.py:
--------------------------------------------------------------------------------
1 | """
2 |
3 | References
4 | ----------
5 |
6 | Logging in asyncio applications
7 | https://bit.ly/36WWgrf
8 | """
9 | from typing import Set
10 | import sys
11 | import asyncio
12 | from queue import SimpleQueue as Queue
13 |
14 | from logging.config import dictConfig
15 | from logging import getLogger
16 | import logging
17 | import logging.handlers
18 |
19 |
20 | __all__ = ["setup_logging", "get_logger", "stop_aiologging"]
21 |
22 |
23 | _g_quelgr_listener: logging.handlers.QueueListener
24 |
25 |
26 | class LocalQueueHandler(logging.handlers.QueueHandler):
27 | def emit(self, record: logging.LogRecord) -> None:
28 | # Removed the call to self.prepare(), handle task cancellation
29 | try:
30 | self.enqueue(record)
31 |
32 | except asyncio.CancelledError:
33 | raise
34 |
35 | except asyncio.QueueFull:
36 | self.handleError(record)
37 |
38 |
39 | def setup_logging_queue(logger_names) -> None:
40 | """
41 | Move log handlers to a separate thread.
42 |
43 | Replace all configured handlers with a LocalQueueHandler, and start a
44 | logging.QueueListener holding the original handlers.
45 | """
46 | global _g_quelgr_listener
47 | queue = Queue()
48 | handlers: Set[logging.Handler] = set()
49 | que_handler = LocalQueueHandler(queue)
50 |
51 | for lname in logger_names:
52 | lgr = logging.getLogger(lname)
53 | lgr.addHandler(que_handler)
54 | for h in lgr.handlers[:]:
55 | if h is not que_handler:
56 | lgr.removeHandler(h)
57 | handlers.add(h)
58 |
59 | _g_quelgr_listener = logging.handlers.QueueListener(
60 | queue, *handlers, respect_handler_level=True
61 | )
62 | _g_quelgr_listener.start()
63 |
64 |
65 | def setup_logging(app_cfg):
66 | log_cfg = app_cfg.get("logging") or {}
67 | log_cfg["version"] = 1
68 |
69 | dictConfig(log_cfg)
70 | setup_logging_queue(log_cfg.get("loggers") or [])
71 |
72 |
73 | def stop_aiologging():
74 | _g_quelgr_listener.stop()
75 | sys.stdout.flush()
76 |
77 |
78 | def get_logger():
79 | return getLogger(__package__)
80 |
--------------------------------------------------------------------------------
/netcfgbu/os_specs.py:
--------------------------------------------------------------------------------
1 | from netcfgbu.connectors import get_connector_class
2 | from netcfgbu.config_model import AppConfig, OSNameSpec
3 |
4 |
5 | def get_os_spec(rec, app_cfg: AppConfig):
6 | os_name = rec["os_name"]
7 | os_specs = app_cfg.os_name or {}
8 | return os_specs.get(os_name) or OSNameSpec()
9 |
10 |
11 | def make_host_connector(rec, app_cfg: AppConfig):
12 | os_spec_def = get_os_spec(rec, app_cfg)
13 | os_spec_cls = get_connector_class(os_spec_def.connection)
14 | return os_spec_cls(host_cfg=rec, os_spec=os_spec_def, app_cfg=app_cfg)
15 |
--------------------------------------------------------------------------------
/netcfgbu/plugins.py:
--------------------------------------------------------------------------------
1 | from collections import defaultdict
2 |
3 | _registered_plugins = defaultdict(dict)
4 |
5 | _PLUGIN_NAME = "hooks"
6 |
7 |
8 | def load_plugins(plugins_dir):
9 | if not plugins_dir.is_dir():
10 | return
11 |
12 | from importlib.machinery import FileFinder, SourceFileLoader
13 |
14 | finder = FileFinder(str(plugins_dir), (SourceFileLoader, [".py"])) # noqa
15 |
16 | for py_file in plugins_dir.glob("*.py"):
17 | mod_name = py_file.stem
18 | finder.find_spec(mod_name).loader.load_module(mod_name)
19 |
20 | _registered_plugins[_PLUGIN_NAME] = []
21 | for subclass in Plugin.__subclasses__():
22 | _registered_plugins[_PLUGIN_NAME].append(subclass)
23 |
24 |
25 | class Plugin(object):
26 | """base plugin class to use for subclassed plugins to enable custom methods to be run"""
27 |
28 | name = None
29 |
30 | def report(report):
31 | """returns a report specific plugin once the netcfgbu backup process has completed"""
32 | pass
33 |
34 | def backup_success(rec: dict, res: bool):
35 | """returns a backup success specific plugin once the netcfgbu backup process has completed"""
36 | pass
37 |
38 | def backup_failed(rec: dict, exc: str):
39 | """returns a backup failed specific plugin once the netcfgbu backup process has completed"""
40 | pass
41 |
42 | def git_report(success: bool, tag_name: str):
43 | """returns a git report specific plugin once the netcfgbu vcs save process has completed"""
44 | pass
45 |
46 | def run_backup_failed(rec: dict, exc: str):
47 | """execute plugins submodules for backup failed plugins"""
48 |
49 | tasks = _registered_plugins[_PLUGIN_NAME] or Plugin
50 | if isinstance(tasks, list):
51 | for task in tasks:
52 | task.backup_failed(rec, exc)
53 | else:
54 | tasks.backup_failed(rec, exc)
55 |
56 | def run_backup_success(rec: dict, res: str):
57 | """execute plugins submodules for backup success plugins"""
58 |
59 | tasks = _registered_plugins[_PLUGIN_NAME] or Plugin
60 | if isinstance(tasks, list):
61 | for task in tasks:
62 | task.backup_success(rec, res)
63 | else:
64 | tasks.backup_success(rec, res)
65 |
66 | def run_report(task_results):
67 | """execute plugins submodules for report plugins"""
68 |
69 | tasks = _registered_plugins[_PLUGIN_NAME] or Plugin
70 | if isinstance(tasks, list):
71 | for task in tasks:
72 | task.report(task_results)
73 | else:
74 | tasks.report(task_results)
75 |
76 | def run_git_report(success: bool, tag_name: str) -> None:
77 | """execute plugins submodules for vcs save plugins"""
78 |
79 | tasks = _registered_plugins[_PLUGIN_NAME] or Plugin
80 | if isinstance(tasks, list):
81 | for task in tasks:
82 | task.git_report(success, tag_name)
83 | else:
84 | tasks.git_report(success, tag_name)
85 |
--------------------------------------------------------------------------------
/netcfgbu/probe.py:
--------------------------------------------------------------------------------
1 | """
2 | This module contains the probe coroutine used to validate that a target device
3 | has a given port open.
4 | """
5 |
6 | # -----------------------------------------------------------------------------
7 | # System Imports
8 | # -----------------------------------------------------------------------------
9 |
10 | import asyncio
11 |
12 | __all__ = ["probe"]
13 |
14 |
15 | # -----------------------------------------------------------------------------
16 | #
17 | # CODE BEGINS
18 | #
19 | # -----------------------------------------------------------------------------
20 |
21 |
22 | async def probe(host, timeout: int, port=22, raise_exc=False) -> bool:
23 | """
24 | Coroutine used to determine if a host port is online and available.
25 |
26 | Parameters
27 | ----------
28 | host: str
29 | The host name or IP address
30 |
31 | port: int
32 | The port to check, defaults to SSH(22)
33 |
34 | timeout: int
35 | The connect timeout in seconds. If the probe done doen connect
36 | within this timeout then the probe returns False
37 |
38 | raise_exc: bool
39 | When the probe fails:
40 | When True the asyncio.TimeoutError will be raised
41 | When False, return False
42 | """
43 |
44 | loop = asyncio.get_running_loop()
45 | coro = loop.create_connection(asyncio.BaseProtocol, host=host, port=port)
46 |
47 | try:
48 | await asyncio.wait_for(coro, timeout=timeout)
49 | return True
50 |
51 | except asyncio.TimeoutError:
52 | if raise_exc:
53 | raise
54 |
55 | return False
56 |
--------------------------------------------------------------------------------
/netcfgbu/vcs/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/jeremyschulman/netcfgbu/c2056f07aefa7c9e584fc9a34c9971100df7fa49/netcfgbu/vcs/__init__.py
--------------------------------------------------------------------------------
/netcfgbu/vcs/git.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains the Version Control System (VCS) integration
3 | using Git as the backend. The following functions are exported
4 | for use:
5 |
6 | * vcs_prepare:
7 | Used to prepare the repo directory for VCS use.
8 |
9 | * vcs_save:
10 | Used to save files in the repo directory into VCS and tag the collection
11 | with a release tag.
12 |
13 | * vcs_status:
14 | Used to show the current target status of file changes.
15 |
16 | """
17 | # -----------------------------------------------------------------------------
18 | # System Imports
19 | # -----------------------------------------------------------------------------
20 |
21 | from typing import Optional
22 | import os
23 | from urllib.parse import urlsplit
24 | from pathlib import Path
25 | from datetime import datetime
26 |
27 | # -----------------------------------------------------------------------------
28 | # Public Imports
29 | # -----------------------------------------------------------------------------
30 |
31 | import pexpect
32 |
33 | # -----------------------------------------------------------------------------
34 | # Private Imports
35 | # -----------------------------------------------------------------------------
36 |
37 | from netcfgbu.logger import get_logger
38 | from netcfgbu.config_model import GitSpec
39 | from netcfgbu.plugins import Plugin
40 |
41 | git_bin = "git"
42 |
43 |
44 | def tag_name_timestamp() -> str:
45 | """
46 | Create the tag name using the current time with
47 | format _<24hr>
48 | """
49 | return datetime.now().strftime("%Y%m%d_%H%M%S") # pragma: no cover
50 |
51 |
52 | # -----------------------------------------------------------------------------
53 | #
54 | # Git VCS Entrypoints
55 | #
56 | # -----------------------------------------------------------------------------
57 |
58 |
59 | def vcs_save(gh_cfg: GitSpec, repo_dir: Path, tag_name: Optional[str] = None) -> bool:
60 | logr = get_logger()
61 | logr.info(f"VCS update git: {gh_cfg.repo}")
62 |
63 | ghr = git_runner(gh_cfg, repo_dir)
64 | if not tag_name:
65 | tag_name = tag_name_timestamp()
66 |
67 | output = ghr.run("status")
68 | if "nothing to commit" in output:
69 | logr.info("VCS no changes, skipping")
70 | Plugin.run_git_report(success=False, tag_name=tag_name)
71 | return False
72 |
73 | logr.info(f"VCS saving changes, tag={tag_name}")
74 |
75 | commands = (
76 | ("add -A", False),
77 | (f"commit -m {tag_name}", False),
78 | ("push", True),
79 | (f"tag -a {tag_name} -m {tag_name}", False),
80 | ("push --tags", True),
81 | )
82 |
83 | for cmd, req_auth in commands:
84 | ghr.run(cmd, req_auth)
85 |
86 | Plugin.run_git_report(success=True, tag_name=tag_name)
87 | return True
88 |
89 |
90 | def vcs_prepare(spec: GitSpec, repo_dir: Path):
91 | logr = get_logger()
92 | logr.info(f"VCS prepare git: {spec.repo}")
93 |
94 | ghr = git_runner(spec, repo_dir)
95 | ghr.git_init()
96 | ghr.git_pull()
97 |
98 |
99 | def vcs_status(spec: GitSpec, repo_dir: Path):
100 | logr = get_logger()
101 | logr.info(
102 | f"""
103 | VCS diffs git: {spec.repo}
104 | dir: {str(repo_dir)}
105 | """
106 | )
107 |
108 | ghr = git_runner(spec, repo_dir)
109 | return ghr.run("status")
110 |
111 |
112 | # -----------------------------------------------------------------------------
113 | #
114 | # Git Runners to perform commands
115 | #
116 | # -----------------------------------------------------------------------------
117 |
118 |
119 | class GitRunner(object):
120 | """
121 | The GitRunner class is used to peform the specific `git` command
122 | operations requested for the VCS use cases.
123 | """
124 |
125 | def __init__(self, config: GitSpec, repo_dir):
126 | self.user = config.username or os.environ["USER"]
127 | self.config = config
128 | self.repo_dir = repo_dir
129 | self.git_file = repo_dir.joinpath(".git", "config")
130 |
131 | parsed = urlsplit(config.repo)
132 | if parsed.scheme == "https":
133 | self.repo_url = f"https://{self.user}@{parsed.netloc}{parsed.path}"
134 | else:
135 | self.repo_url = config.repo
136 |
137 | @property
138 | def repo_exists(self):
139 | return self.git_file.exists()
140 |
141 | @property
142 | def is_dir_empty(self):
143 | return not any(self.repo_dir.iterdir())
144 |
145 | def run_noauth(self, cmd: str):
146 | """
147 | Run the git command that does not require any user authentication
148 | """
149 | output, rc = pexpect.run(
150 | command=f"{git_bin} {cmd}",
151 | withexitstatus=True,
152 | cwd=self.repo_dir,
153 | encoding="utf-8",
154 | )
155 |
156 | if rc != 0:
157 | raise RuntimeError(f"git {cmd} failed: %s" % output)
158 |
159 | return output
160 |
161 | # run with auth is an alias to be created by subclass if needed
162 | run_auth = run_noauth
163 |
164 | def run(self, cmd: str, authreq=False):
165 | return [self.run_noauth, self.run_auth][authreq](cmd) # noqa
166 |
167 | def git_init(self):
168 | output = self.run("remote -v") if self.repo_exists else ""
169 | if self.repo_url not in output:
170 | commands = (("init", False), (f"remote add origin {self.repo_url}", False))
171 |
172 | for cmd, req_auth in commands:
173 | self.run(cmd, req_auth)
174 |
175 | self.git_config()
176 |
177 | def git_pull(self):
178 | self.run("pull origin master", authreq=True)
179 |
180 | def git_config(self):
181 | config = self.config
182 |
183 | config_opts = (
184 | ("user.email", config.email or self.user),
185 | ("user.name", self.user),
186 | ("push.default", "matching"),
187 | )
188 |
189 | for cfg_opt, cfg_val in config_opts:
190 | self.run(f"config --local {cfg_opt} {cfg_val}")
191 |
192 | def git_clone(self):
193 | self.run(f"clone {self.repo_url} {str(self.repo_dir)}", authreq=True)
194 | self.git_config()
195 |
196 |
197 | class GitAuthRunner(GitRunner):
198 | """
199 | Git Runner that is used for either User/Password or Token cases
200 | """
201 |
202 | PASSWORD_PROMPT = "Password for"
203 |
204 | def _get_secret(self):
205 | return self.config.token.get_secret_value()
206 |
207 | def run_auth(self, cmd):
208 | output, rc = pexpect.run(
209 | command=f"{git_bin} {cmd}",
210 | cwd=self.repo_dir,
211 | withexitstatus=True,
212 | encoding="utf-8",
213 | events={self.PASSWORD_PROMPT: self._get_secret() + "\n"},
214 | )
215 |
216 | if rc != 0:
217 | raise RuntimeError(output)
218 |
219 | return output
220 |
221 |
222 | class GitTokenRunner(GitAuthRunner):
223 | # use the default password prompt value
224 | pass
225 |
226 |
227 | class GitDeployKeyRunner(GitRunner):
228 | """
229 | Git Runner used with deployment keys without passphrase
230 | """
231 |
232 | def git_config(self):
233 | super().git_config()
234 | ssh_key = str(Path(self.config.deploy_key).absolute())
235 | self.run(
236 | f"config --local core.sshCommand 'ssh -i {ssh_key} -o StrictHostKeyChecking=no'"
237 | )
238 |
239 |
240 | class GitSecuredDeployKeyRunner(GitDeployKeyRunner, GitAuthRunner):
241 | """
242 | Git Runner used when deployment key has passphrase configured
243 | """
244 |
245 | PASSWORD_PROMPT = "Enter passphrase for key"
246 |
247 | def _get_secret(self):
248 | return self.config.deploy_passphrase.get_secret_value()
249 |
250 |
251 | def git_runner(gh_cfg: GitSpec, repo_dir: Path) -> GitRunner:
252 | """
253 | Used to select the Git Runner based on the configuration file
254 | settings.
255 | """
256 | if gh_cfg.token:
257 | return GitTokenRunner(gh_cfg, repo_dir)
258 |
259 | elif gh_cfg.deploy_key:
260 | if not gh_cfg.deploy_passphrase:
261 | return GitDeployKeyRunner(gh_cfg, repo_dir)
262 | else:
263 | return GitSecuredDeployKeyRunner(gh_cfg, repo_dir)
264 |
265 | # Note: this is unreachable code since the config-model validation should
266 | # have ensured the proper fields exist in the spec.
267 |
268 | raise RuntimeError("Git config missing authentication settings") # pragma: no cover
269 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | target-version = ['py38']
3 |
4 | [tool.interrogate]
5 | fail-under = 0
6 | verbose = 1
7 | color = true
8 | ignore-module = true
9 | exclude = ["setup.py", "tasks.py", "bin"]
--------------------------------------------------------------------------------
/requirements-develop.txt:
--------------------------------------------------------------------------------
1 | requests
2 | invoke
3 | black
4 | flake8
5 | flake8-breakpoint
6 | pytest
7 | pytest-cov
8 | pytest-asyncio
9 | asynctest
10 | tox
11 | pre-commit
12 | interrogate
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiofiles~=0.5.0
2 | asyncssh~=2.2.1
3 | toml~=0.10.1
4 | invoke~=1.4.1
5 | pydantic~=1.5.1
6 | tabulate~=0.8.7
7 | click~=7.1.2
8 | first~=2.0.2
9 | pexpect~=4.8.0
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | package_name = "netcfgbu"
4 | package_version = open("VERSION").read().strip()
5 |
6 |
7 | def requirements(filename="requirements.txt"):
8 | return open(filename.strip()).readlines()
9 |
10 |
11 | with open("README.md", "r") as fh:
12 | long_description = fh.read()
13 |
14 |
15 | setup(
16 | name=package_name,
17 | version=package_version,
18 | description="Network Configuration Backup",
19 | long_description=long_description,
20 | long_description_content_type="text/markdown",
21 | author="Jeremy Schulman",
22 | packages=find_packages(),
23 | include_package_data=True,
24 | install_requires=requirements(),
25 | scripts=["bin/netcfgbu"],
26 | classifiers=[
27 | "Development Status :: 4 - Beta",
28 | "Intended Audience :: Developers",
29 | "License :: OSI Approved :: Apache Software License",
30 | "Operating System :: MacOS :: MacOS X",
31 | "Operating System :: POSIX",
32 | "Programming Language :: Python :: 3.8",
33 | "Topic :: Software Development :: Libraries :: Python Modules",
34 | "Topic :: System :: Networking",
35 | ],
36 | )
37 |
--------------------------------------------------------------------------------
/tasks.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | #
3 | # For use with the invoke tool, see: http://www.pyinvoke.org/
4 | #
5 | # References
6 | # ----------
7 | #
8 | # Black:
9 | # Flake8: https://flake8.pycqa.org/en/latest/user/configuration.html
10 |
11 |
12 | from invoke import task
13 |
14 |
15 | @task
16 | def precheck(ctx):
17 | ctx.run("black .")
18 | ctx.run("flake8 .")
19 | ctx.run("pre-commit run -a")
20 | ctx.run("interrogate -c pyproject.toml --exclude=build --exclude tests", pty=True)
21 |
22 |
23 | @task
24 | def clean(ctx):
25 | ctx.run("python setup.py clean")
26 | ctx.run("rm -rf netcfgbu.egg-info")
27 | ctx.run("rm -rf .pytest_cache .pytest_tmpdir .coverage")
28 | ctx.run("rm -rf htmlcov")
29 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # DO NOT DETELE THIS FILE
2 | # This file exists so that test coverage runs as expected on Github Actions + tox
3 |
--------------------------------------------------------------------------------
/tests/conftest.py:
--------------------------------------------------------------------------------
1 | # pytest configuration file
2 |
3 | import pytest
4 | import logging
5 | from pathlib import Path
6 |
7 |
8 | @pytest.fixture()
9 | def fake_inventory_file(tmpdir):
10 | yield str(tmpdir.join("inventory.csv"))
11 |
12 |
13 | @pytest.fixture()
14 | def netcfgbu_envars(monkeypatch):
15 | monkeypatch.setenv("NETCFGBU_DEFAULT_USERNAME", "dummy-username")
16 | monkeypatch.setenv("NETCFGBU_DEFAULT_PASSWORD", "dummy-password")
17 | monkeypatch.setenv("NETCFGBU_INVENTORY", "/tmp/inventory.csv")
18 |
19 |
20 | class RecordsCollector(logging.Handler):
21 | def __init__(self, *args, **kwargs):
22 | super().__init__(*args, **kwargs)
23 | self.records = []
24 |
25 | def emit(self, record):
26 | self.records.append(record)
27 |
28 |
29 | @pytest.fixture()
30 | def log_vcr():
31 | lgr = logging.getLogger()
32 | lgr.setLevel(logging.DEBUG)
33 | lgr_vcr = RecordsCollector()
34 | lgr.handlers[0] = lgr_vcr
35 | return lgr
36 |
37 |
38 | @pytest.fixture(scope="module")
39 | def files_dir(request):
40 | return Path(request.module.__file__).parent / "files"
41 |
--------------------------------------------------------------------------------
/tests/files/do-fail.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | echo "Doing something that failed, ieeeeeeee!"
3 | exit 1
4 |
--------------------------------------------------------------------------------
/tests/files/do-nothing.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | echo "Doing nothing"
3 | exit 0
4 |
--------------------------------------------------------------------------------
/tests/files/fake-testkey:
--------------------------------------------------------------------------------
1 | # fake deployment ssh-key; file must exists to pass config-model check.
2 |
--------------------------------------------------------------------------------
/tests/files/mvp-netcfgbu.toml:
--------------------------------------------------------------------------------
1 | # -----------------------------------------------------------------------------
2 | #
3 | # Network Configuration Backup
4 | # Configuration File
5 | # -----------------------------------------------------------------------------
6 |
7 | # -----------------------------------------------------------------------------
8 | # Default Settings
9 | # -----------------------------------------------------------------------------
10 |
11 | [defaults]
12 | inventory = "/tmp/inventory.csv"
13 | configs_dir = "/tmp/configs"
14 | credentials.username = "my-user-login-name"
15 | credentials.password = "my-user-login-password"
16 |
17 |
18 |
19 |
20 | # -----------------------------------------------------------------------------
21 | #
22 | # Network OS Specifications
23 | #
24 | # -----------------------------------------------------------------------------
25 |
26 | # -----------------------------------------------------------------------------
27 | # Cisco switchs, routers, and firewalls
28 | # -----------------------------------------------------------------------------
29 |
30 | [os_name.ios]
31 | pre_get_config = "terminal length 0"
32 | linter = "ios"
33 |
34 | [os_name.iosxe]
35 | linter = 'ios'
36 |
37 | [os_name.nxos]
38 | get_config = 'show running-config | no-more'
39 | linter = "nxos"
40 |
41 | [os_name.iosxr]
42 | linter = "iosxr"
43 |
44 | [os_name.asa]
45 | pre_get_config = 'terminal pager 0'
46 |
47 | # -----------------------------------------------------------------------------
48 | # Cisco WLC
49 | # -----------------------------------------------------------------------------
50 |
51 | [os_name.aireos]
52 | # support for 8.5 release requires a special connector to handle the User &
53 | # Password prompts
54 |
55 | get_config = "show run-config commands"
56 | pre_get_config = "config paging disable"
57 | connection = "netcfgbu.connectors.ssh.LoginPromptUserPass"
58 |
59 | [os_name.aireos8_10]
60 | # extending the timeout to 3min due to observations with the 8.10 release
61 | # resulting in sporatic delays in the output of the config.
62 |
63 | timeout = 300
64 |
65 | get_config = "show run-config commands"
66 | pre_get_config = "config paging disable"
67 |
68 | # need to explicitly set the Key Exchange algorithms to support the 8.10
69 | # SSH configured requirements; can be set here or in your ssh_config file.
70 |
71 | # ssh_configs.kex_algs = [
72 | # 'ecdh-sha2-nistp256',
73 | # 'diffie-hellman-group14-sha1'
74 | # ]
75 |
76 | [[os_name.aireos8_10.credentials]]
77 | username = "$WLC_USERNAME"
78 | password = "$WLC_PASSWORD"
79 |
80 | # -----------------------------------------------------------------------------
81 | # Palo Alto PANOS Firewalls
82 | # -----------------------------------------------------------------------------
83 |
84 | [os_name.panos]
85 | pre_get_config = [
86 | "set cli pager off",
87 | "configure"
88 | ]
89 | get_config = "show"
90 | linter = "panos"
91 |
92 | # -----------------------------------------------------------------------------
93 | # Linters
94 | # -----------------------------------------------------------------------------
95 |
96 | [linters.iosxr]
97 | config_starts_after = 'Building configuration'
98 |
99 | [linters.ios]
100 | config_starts_after = 'Current configuration'
101 |
102 | [linters.nxos]
103 | config_starts_after = '!Time:'
104 |
105 | [linters.panos]
106 | config_ends_at = "[edit]"
107 |
108 | # -----------------------------------------------------------------------------
109 | #
110 | # Version Control System(s)
111 | #
112 | # Currently only github systems are supported, but others types may be
113 | # supported in the future. If you have more than one vcs repository you can
114 | # include the `name` field so that you can identify the vcs-repo using the CLI
115 | # --name option.
116 | # -----------------------------------------------------------------------------
117 |
118 | [[vcs]]
119 | repo = "https://github.mycorp.com/jschulman/test-network-configs.git"
120 | token = "$GIT_TOKEN"
121 |
122 | # -----------------------------------------------------------------------------
123 | # Logging - follows Python format as described
124 | # https://docs.python.org/3/library/logging.config.html
125 | # -----------------------------------------------------------------------------
126 |
127 |
128 | [logging.loggers.netcfgbu]
129 | handlers = ["console", "file"]
130 | level = "INFO"
131 |
132 | [logging.loggers.asyncssh]
133 | # set the level to warning by default. If you want to enable debugging
134 | # use the '--debug-ssh' option to set the debug level from [1-3];
135 | # https://asyncssh.readthedocs.io/en/latest/api.html#asyncssh.set_debug_level
136 |
137 | handlers = ["console"]
138 | level = "WARNING"
139 |
140 | [logging.handlers.console]
141 | class = "logging.StreamHandler"
142 | formatter = "basic"
143 | stream = "ext://sys.stdout"
144 |
145 | [logging.handlers.file]
146 | class = "logging.FileHandler"
147 | formatter = "basic"
148 | filename = "netcfgbu.log"
149 |
150 | [logging.formatters.basic]
151 | format = "%(asctime)s %(levelname)s: %(message)s"
152 |
--------------------------------------------------------------------------------
/tests/files/plugins/test-plugin.py:
--------------------------------------------------------------------------------
1 | from netcfgbu.plugins import Plugin
2 |
3 |
4 | class TestPlugin(Plugin):
5 | def backup_success(rec: dict, res: bool):
6 | return (rec, res)
7 |
8 | def backup_failed(rec: dict, res: bool):
9 | return (rec, res)
10 |
--------------------------------------------------------------------------------
/tests/files/test-config-jumphosts.toml:
--------------------------------------------------------------------------------
1 | [[jumphost]]
2 | proxy = "1.2.3.4"
3 |
4 | [[jumphost]]
5 | proxy = "11.22.33.44"
6 | name = 'foobaz'
--------------------------------------------------------------------------------
/tests/files/test-config-logging.toml:
--------------------------------------------------------------------------------
1 | [logging.loggers.netcfgbu]
2 | handlers = ["console", "file"]
3 | level = "INFO"
4 |
5 | [logging.handlers.console]
6 | class = "logging.StreamHandler"
7 | formatter = "basic"
8 | stream = "ext://sys.stdout"
9 |
10 | [logging.handlers.file]
11 | class = "logging.FileHandler"
12 | formatter = "basic"
13 | filename = "/tmp/netcfgbu.log"
14 |
15 | [logging.formatters.basic]
16 | format = "%(asctime)s %(levelname)s: %(message)s"
17 |
--------------------------------------------------------------------------------
/tests/files/test-config-os-name-prompt-pattern.toml:
--------------------------------------------------------------------------------
1 | [os_name.cumulus]
2 | # NOTE: make sure that the user has password-less sudo access, otherwise the
3 | # get_config execution will fail. There is no current workaround for this
4 | # requirement. Also pre_get_config does not work for Cumulus devices at this time.
5 | #
6 | # Do not change the order of the cat commands either. This ensures the final
7 | # file format is recognized by Batfish (https://github.com/batfish/batfish)
8 |
9 | get_config = "( cat /etc/hostname; cat /etc/network/interfaces; cat /etc/cumulus/ports.conf; sudo cat /etc/frr/frr.conf)"
10 |
11 | # example Cumulus prompt value: cumulus@leaf01:mgmt-vrf:~$
12 | prompt_pattern = '[a-z0-9.\-@:~]{10,65}\s*[#$]'
13 |
--------------------------------------------------------------------------------
/tests/files/test-config-os_name.toml:
--------------------------------------------------------------------------------
1 | [os_name.ios]
2 | get_config = 'fake show running-config'
3 |
--------------------------------------------------------------------------------
/tests/files/test-content-config.txt:
--------------------------------------------------------------------------------
1 | version 7.1(4)N1(1)
2 | hostname switch1
3 |
4 | no feature telnet
5 | feature scp-server
6 | feature tacacs+
7 | cfs eth distribute
8 | feature bgp
9 | feature pim
10 | feature udld
11 |
12 |
--------------------------------------------------------------------------------
/tests/files/test-credentials.toml:
--------------------------------------------------------------------------------
1 | [[credentials]]
2 | username = "superadmin"
3 | password = "$ENABLE_PASSWORD"
4 |
5 |
--------------------------------------------------------------------------------
/tests/files/test-csv-withcomments.csv:
--------------------------------------------------------------------------------
1 | host,os_name
2 | switch1,eos
3 | switch2,eos
4 | # swittch3,ios
5 | # swiitch4,ios
6 |
--------------------------------------------------------------------------------
/tests/files/test-gitspec-badrepo.toml:
--------------------------------------------------------------------------------
1 | [[git]]
2 | name = "ssh"
3 | repo = "gibberish@gitlab.com:jschulman/test-network-configs.git"
4 | deploy_key = "$HOME/test-config-backups"
5 |
--------------------------------------------------------------------------------
/tests/files/test-gitspec.toml:
--------------------------------------------------------------------------------
1 | [[git]]
2 | # the first entry does not require a name and it will be treated
3 | # as a default; i.e. when the --name option is omitted.
4 | repo = "https://github.mycorp.com/jschulman/test-network-configs.git"
5 | token = "$GIT_TOKEN"
6 |
7 | [[git]]
8 | # example of using a deployment key that does not use a passphrase
9 | name = "ssh"
10 | repo = "git@gitlab.com:jschulman/test-network-configs.git"
11 | deploy_key = "$GITKEY_DIR/fake-testkey"
12 |
13 | [[git]]
14 | # example of using a deployment key that uses a passphrase
15 | name = "ssh-pw"
16 | repo = "git@github.mlbam.net:jschulman/test-network-configs.git"
17 | deploy_key = "$GITKEY_DIR/fake-testkey"
18 | deploy_passphrase = "$GITKEY_PASSWORD"
19 |
--------------------------------------------------------------------------------
/tests/files/test-inventory-fail.toml:
--------------------------------------------------------------------------------
1 | [[inventory]]
2 | name = 'netbox'
3 | script = '''/tmp/netbox_inventory.py \
4 | --exclude-tag no-backup --exclude-tag no-ssh \
5 | --output inventory.csv'''
6 |
--------------------------------------------------------------------------------
/tests/files/test-inventory-noscript.toml:
--------------------------------------------------------------------------------
1 | [[inventory]]
2 | name = 'do-nothing'
3 |
--------------------------------------------------------------------------------
/tests/files/test-inventory-script-donothing.toml:
--------------------------------------------------------------------------------
1 | [[jumphost]]
2 | proxy = "1.2.3.4"
3 | include = ['os_name=.*']
4 |
5 | [[inventory]]
6 | script = "${SCRIPT_DIR}/do-nothing.sh"
7 |
8 | [[inventory]]
9 | name = 'dummy'
10 | script = "${SCRIPT_DIR}/do-nothing.sh"
11 |
--------------------------------------------------------------------------------
/tests/files/test-inventory-script-fails.toml:
--------------------------------------------------------------------------------
1 | [[inventory]]
2 | script = "${SCRIPT_DIR}/do-fail.sh"
3 |
4 | [logging.loggers.netcfgbu]
5 | handlers = ["console", "file"]
6 | level = "INFO"
7 |
8 | [logging.handlers.console]
9 | class = "logging.StreamHandler"
10 | formatter = "basic"
11 | stream = "ext://sys.stdout"
12 |
13 | [logging.handlers.file]
14 | class = "logging.FileHandler"
15 | formatter = "basic"
16 | filename = "/tmp/netcfgbu.log"
17 |
18 | [logging.formatters.basic]
19 | format = "%(asctime)s %(levelname)s: %(message)s"
20 |
--------------------------------------------------------------------------------
/tests/files/test-inventory.toml:
--------------------------------------------------------------------------------
1 | [[inventory]]
2 | name = 'netbox'
3 | script = "${SCRIPT_DIR}/do-nothing.sh"
4 |
--------------------------------------------------------------------------------
/tests/files/test-just-defaults.toml:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory = "/tmp/inventory.csv"
3 | configs_dir = "/tmp/configs"
4 | credentials.username = "my-user-login-name"
5 | credentials.password = "my-user-login-password"
6 |
7 |
8 |
--------------------------------------------------------------------------------
/tests/files/test-linter-fail.toml:
--------------------------------------------------------------------------------
1 | [os_name.ios]
2 | pre_get_config = "terminal length 0"
3 | linter = "ios"
4 |
5 | # missing linter causes failure
6 |
--------------------------------------------------------------------------------
/tests/files/test-linter.toml:
--------------------------------------------------------------------------------
1 | [os_name.ios]
2 | pre_get_config = "terminal length 0"
3 | linter = "ios"
4 |
5 | [linters.ios]
6 | config_starts_after = 'Current configuration'
7 |
--------------------------------------------------------------------------------
/tests/files/test-small-inventory.csv:
--------------------------------------------------------------------------------
1 | host,os_name
2 | switch1,eos
3 | switch2,eos
4 | switch3,ios
5 | switch4,nxos
6 | switch5,nxos
7 | switch6,nxos
8 |
--------------------------------------------------------------------------------
/tests/files/test-vcs.toml:
--------------------------------------------------------------------------------
1 | [[git]]
2 | repo = 'git@dummy.git'
3 | token = "$GIT_TOKEN"
4 |
5 | #[[git]]
6 | # repo = 'git@dummy-keyfile.git'
7 | # deploy_key = "$GIT_DEPLOY_KEYFILE"
8 |
--------------------------------------------------------------------------------
/tests/test_cli_inventory.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from click.testing import CliRunner
3 | from unittest.mock import Mock
4 |
5 | from netcfgbu.cli import inventory
6 |
7 |
8 | @pytest.fixture(autouse=True)
9 | def _always(netcfgbu_envars):
10 | pass
11 |
12 |
13 | @pytest.fixture()
14 | def mock_build(monkeypatch):
15 | monkeypatch.setattr(inventory, "build", Mock())
16 | return inventory.build
17 |
18 |
19 | def test_cli_inventory_fail_noinventoryfile():
20 | runner = CliRunner()
21 |
22 | # isolate the file system so it doesn't accidentally pickup the sample
23 | # "netcfgbu.toml" in the project directory.
24 |
25 | with runner.isolated_filesystem():
26 | res = runner.invoke(inventory.cli_inventory_list, obj={})
27 |
28 | assert res.exit_code != 0
29 | assert "Inventory file does not exist" in res.output
30 |
31 |
32 | def test_cli_inventory_pass(files_dir, monkeypatch):
33 | test_cfg = files_dir.joinpath("test-inventory-script-donothing.toml")
34 | test_inv = files_dir.joinpath("test-small-inventory.csv")
35 |
36 | monkeypatch.setenv("NETCFGBU_INVENTORY", str(test_inv))
37 | monkeypatch.setenv("SCRIPT_DIR", str(files_dir))
38 | monkeypatch.setenv("NETCFGBU_CONFIG", str(test_cfg))
39 |
40 | runner = CliRunner()
41 | res = runner.invoke(inventory.cli_inventory_list, obj={})
42 | assert res.exit_code == 0
43 |
44 |
45 | def test_cli_inventory_fail_limits_zero(files_dir, monkeypatch):
46 | test_inv = files_dir.joinpath("test-small-inventory.csv")
47 | monkeypatch.setenv("NETCFGBU_INVENTORY", str(test_inv))
48 |
49 | runner = CliRunner()
50 | res = runner.invoke(
51 | inventory.cli_inventory_list, obj={}, args=["--exclude", "os_name=.*"]
52 | )
53 |
54 | assert res.exit_code != 0
55 | assert "No inventory matching limits" in res.output
56 |
57 |
58 | def test_cli_inventory_fail_limits_invalid(files_dir, monkeypatch):
59 | test_inv = files_dir.joinpath("test-small-inventory.csv")
60 | monkeypatch.setenv("NETCFGBU_INVENTORY", str(test_inv))
61 |
62 | runner = CliRunner()
63 | res = runner.invoke(
64 | inventory.cli_inventory_list, obj={}, args=["--limit", "foo=bar"]
65 | )
66 |
67 | assert res.exit_code != 0
68 | assert "Invalid filter expression" in res.output
69 |
70 |
71 | def test_cli_inventory_fail_build():
72 | runner = CliRunner()
73 | res = runner.invoke(inventory.cli_inventory_build, obj={})
74 | assert res.exit_code != 0
75 | assert "Configuration file required for use with build subcommand" in res.output
76 |
77 |
78 | def test_cli_inventory_pass_build(files_dir, mock_build: Mock, monkeypatch):
79 | test_cfg = files_dir.joinpath("test-inventory-script-donothing.toml")
80 |
81 | monkeypatch.setenv("SCRIPT_DIR", str(files_dir))
82 | monkeypatch.setenv("NETCFGBU_CONFIG", str(test_cfg))
83 |
84 | runner = CliRunner()
85 | res = runner.invoke(inventory.cli_inventory_build, obj={})
86 |
87 | assert res.exit_code == 0
88 | assert mock_build.called is True
89 | inv_spec = mock_build.mock_calls[0].args[0]
90 | assert inv_spec.script.endswith("do-nothing.sh")
91 |
92 |
93 | def test_cli_inventory_pass_build_name(files_dir, mock_build: Mock, monkeypatch):
94 | test_cfg = files_dir.joinpath("test-inventory-script-donothing.toml")
95 |
96 | monkeypatch.setenv("SCRIPT_DIR", str(files_dir))
97 | monkeypatch.setenv("NETCFGBU_CONFIG", str(test_cfg))
98 |
99 | runner = CliRunner()
100 | res = runner.invoke(inventory.cli_inventory_build, obj={}, args=["--name=dummy"])
101 | assert res.exit_code == 0
102 | assert mock_build.called is True
103 | inv_spec = mock_build.mock_calls[0].args[0]
104 | assert inv_spec.name == "dummy"
105 |
106 |
107 | def test_cli_inventory_fail_build_badname(files_dir, monkeypatch):
108 | test_cfg = files_dir.joinpath("test-inventory-script-donothing.toml")
109 |
110 | monkeypatch.setenv("SCRIPT_DIR", str(files_dir))
111 | monkeypatch.setenv("NETCFGBU_CONFIG", str(test_cfg))
112 |
113 | runner = CliRunner()
114 | res = runner.invoke(inventory.cli_inventory_build, obj={}, args=["--name=noexists"])
115 | assert res.exit_code != 0
116 | assert (
117 | "Inventory section 'noexists' not defined in configuration file" in res.output
118 | )
119 |
--------------------------------------------------------------------------------
/tests/test_cli_probe.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 |
3 |
4 | import pytest
5 | from click.testing import CliRunner
6 | from unittest.mock import Mock
7 | from asynctest import CoroutineMock
8 | from netcfgbu.cli import probe
9 |
10 |
11 | @pytest.fixture(autouse=True)
12 | def _always(netcfgbu_envars, files_dir, monkeypatch):
13 | test_inv = files_dir.joinpath("test-small-inventory.csv")
14 | monkeypatch.setenv("NETCFGBU_INVENTORY", str(test_inv))
15 |
16 |
17 | def test_cli_probe_pass(monkeypatch):
18 | mock_probe = Mock()
19 | monkeypatch.setattr(probe, "exec_probe", mock_probe)
20 |
21 | runner = CliRunner()
22 | res = runner.invoke(probe.cli_check, obj={})
23 |
24 | assert res.exit_code == 0
25 | assert mock_probe.called
26 | call_args = mock_probe.mock_calls[0].args
27 | inv_rec = call_args[0]
28 | assert len(inv_rec) == 6
29 |
30 |
31 | def test_cli_probe_pass_exec(monkeypatch, log_vcr):
32 | mock_probe = CoroutineMock()
33 | monkeypatch.setattr(probe, "probe", mock_probe)
34 | monkeypatch.setattr(probe, "get_logger", Mock(return_value=log_vcr))
35 |
36 | runner = CliRunner()
37 | res = runner.invoke(probe.cli_check, obj={})
38 | assert res.exit_code == 0
39 | logs = log_vcr.handlers[0].records[1:]
40 | assert all("PASS" in log.msg for log in logs)
41 |
42 |
43 | def test_cli_probe_fail_exec(monkeypatch, log_vcr):
44 | mock_probe = CoroutineMock()
45 | mock_probe.side_effect = asyncio.TimeoutError
46 | monkeypatch.setattr(probe, "probe", mock_probe)
47 | monkeypatch.setattr(probe, "get_logger", Mock(return_value=log_vcr))
48 |
49 | runner = CliRunner()
50 | res = runner.invoke(probe.cli_check, obj={})
51 | assert res.exit_code == 0
52 | logs = log_vcr.handlers[0].records[1:]
53 | assert all("FAIL" in log.msg for log in logs)
54 |
--------------------------------------------------------------------------------
/tests/test_cli_vcs.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 | from unittest.mock import Mock
3 | from operator import itemgetter
4 |
5 | import pytest
6 | from click.testing import CliRunner
7 |
8 | from netcfgbu.cli import vcs
9 |
10 |
11 | @pytest.fixture(scope="module")
12 | def files_dir(request):
13 | return Path(request.module.__file__).parent.joinpath("files")
14 |
15 |
16 | @pytest.fixture(scope="module")
17 | def config_file(files_dir):
18 | return files_dir.joinpath("test-vcs.toml")
19 |
20 |
21 | @pytest.fixture(autouse=True)
22 | def _vcs_each_test(monkeypatch, netcfgbu_envars):
23 | # need to monkeypatch the logging to avoid a conflict with the Click test
24 | # runner also trying to stdout.
25 | monkeypatch.setenv("GIT_TOKEN", "dummy-token")
26 | monkeypatch.setattr(vcs, "stop_aiologging", Mock())
27 |
28 |
29 | @pytest.fixture()
30 | def mock_git(monkeypatch):
31 | # monkeypatch the git module so we can check the called parameters.
32 | monkeypatch.setattr(vcs, "git", Mock(spec=vcs.git))
33 | return vcs.git
34 |
35 |
36 | def test_cli_vcs_fail_missingconfig_file():
37 | runner = CliRunner()
38 |
39 | # isolate the file system so it doesn't accidentally pickup the sample
40 | # "netcfgbu.toml" in the project directory.
41 |
42 | with runner.isolated_filesystem():
43 | res = runner.invoke(vcs.cli_vcs_status, obj={})
44 |
45 | assert res.exit_code != 0
46 | assert "No configuration file provided" in res.output
47 |
48 |
49 | def test_cli_vcs_fail_missingconfig_section(files_dir, monkeypatch):
50 |
51 | # select a test inventory file that does not contain any vcs configuration
52 | cfg_file = files_dir.joinpath("test-config-logging.toml")
53 |
54 | runner = CliRunner()
55 |
56 | # isolate the file system so it doesn't accidentally pickup the sample
57 | # "netcfgbu.toml" in the project directory.
58 |
59 | with runner.isolated_filesystem():
60 | res = runner.invoke(vcs.cli_vcs_status, obj={}, args=["-C", str(cfg_file)])
61 |
62 | assert res.exit_code != 0
63 | assert "No vcs config section found" in res.output
64 |
65 |
66 | def test_cli_vcs_pass_status(mock_git: Mock, config_file, monkeypatch):
67 |
68 | runner = CliRunner()
69 | res = runner.invoke(vcs.cli_vcs_status, obj={}, args=["-C", str(config_file)])
70 |
71 | assert res.exit_code == 0
72 | assert mock_git.vcs_status.called
73 | kwargs = mock_git.vcs_status.mock_calls[0].kwargs
74 | git_spec = kwargs["spec"]
75 | assert git_spec.repo == "git@dummy.git"
76 | assert git_spec.token.get_secret_value() == "dummy-token"
77 |
78 |
79 | def test_cli_vcs_pass_prepare(mock_git: Mock, config_file, monkeypatch):
80 | # monkeypatch the git module so we can check the called parameters.
81 | monkeypatch.setenv("NETCFGBU_CONFIGSDIR", "/tmp/configs")
82 |
83 | runner = CliRunner()
84 | res = runner.invoke(vcs.cli_vcs_prepare, obj={}, args=["-C", str(config_file)])
85 |
86 | assert res.exit_code == 0
87 | assert mock_git.vcs_prepare.called
88 | kwargs = mock_git.vcs_prepare.mock_calls[0].kwargs
89 | git_spec, repo_dir = kwargs["spec"], kwargs["repo_dir"]
90 | assert git_spec.repo == "git@dummy.git"
91 | assert str(repo_dir) == "/tmp/configs"
92 |
93 |
94 | def test_cli_vcs_pass_save_tag_notgiven(mock_git: Mock, config_file, monkeypatch):
95 | monkeypatch.setenv("NETCFGBU_CONFIGSDIR", "/tmp/configs")
96 | runner = CliRunner()
97 | res = runner.invoke(vcs.cli_vcs_save, obj={}, args=["-C", str(config_file)])
98 | assert res.exit_code == 0
99 | assert mock_git.vcs_save.called
100 | repo_dir, tag_name = itemgetter("repo_dir", "tag_name")(
101 | mock_git.vcs_save.mock_calls[0].kwargs
102 | )
103 | assert str(repo_dir) == "/tmp/configs"
104 | assert tag_name is None
105 |
--------------------------------------------------------------------------------
/tests/test_config.py:
--------------------------------------------------------------------------------
1 | from os import getenv
2 | from io import StringIO
3 | from pathlib import Path
4 |
5 | import pytest # noqa
6 | from pydantic import ValidationError
7 | from first import first
8 | import toml
9 |
10 | from netcfgbu.config import load
11 | from netcfgbu import config_model
12 |
13 |
14 | def test_config_onlyenvars_pass(monkeypatch, netcfgbu_envars):
15 | """
16 | Execute a test where there is no configuration file. In this
17 | case the NETCFGBU_ environment variables must exist.
18 | """
19 | app_cfg = load()
20 |
21 | assert app_cfg.defaults.inventory == getenv("NETCFGBU_INVENTORY")
22 | assert app_cfg.defaults.credentials.username == getenv("NETCFGBU_DEFAULT_USERNAME")
23 | assert app_cfg.defaults.credentials.password.get_secret_value() == getenv(
24 | "NETCFGBU_DEFAULT_PASSWORD"
25 | )
26 |
27 |
28 | def test_config_onlyenvars_fail_missing():
29 | """
30 | Execute a test where there is no configuration file. Omit the default
31 | environment variables and ensure that an exception is raised as expected.
32 | """
33 |
34 | with pytest.raises(RuntimeError) as excinfo:
35 | load()
36 |
37 | exc_errmsg = excinfo.value.args[0]
38 |
39 | assert "defaults.inventory" in exc_errmsg
40 | assert "defaults.credentials.username" in exc_errmsg
41 | assert "defaults.credentials.password" in exc_errmsg
42 |
43 |
44 | def test_config_onlyenvars_fail_bad_noinventory(monkeypatch):
45 | """
46 | Test the case where NETCFGBU_INVENTORY is set but empty, but the file does
47 | not exist; which would generate an exception message.
48 | """
49 | monkeypatch.setenv("NETCFGBU_INVENTORY", "")
50 |
51 | with pytest.raises(RuntimeError) as excinfo:
52 | load()
53 |
54 | exc_errmsgs = excinfo.value.args[0].splitlines()
55 | found = first([line for line in exc_errmsgs if "defaults.inventory" in line])
56 | assert found
57 | assert "inventory empty value not allowed" in found
58 |
59 |
60 | def test_config_credentials_fail_missingvar(request, monkeypatch, fake_inventory_file):
61 | """
62 | Test the case where the [[credentials]] section is provided that uses
63 | an environment variable, and that environment variable is missing.
64 | """
65 | fileio = open(f"{request.fspath.dirname}/files/test-credentials.toml")
66 |
67 | with pytest.raises(RuntimeError) as excinfo:
68 | load(fileio=fileio)
69 |
70 | exc_errmsgs = excinfo.value.args[0].splitlines()
71 | found = first([line for line in exc_errmsgs if "credentials.0.password" in line])
72 | assert found
73 | assert 'Environment variable "ENABLE_PASSWORD" missing' in found
74 |
75 |
76 | def test_config_credentials_fail_empytvar(request, monkeypatch, netcfgbu_envars):
77 | """
78 | Test the case where the [[credentials]] section is provided that uses an
79 | environment variable, and that environment variable exists, but it the
80 | empty-string.
81 | """
82 |
83 | fileio = open(f"{request.fspath.dirname}/files/test-credentials.toml")
84 | monkeypatch.setenv("ENABLE_PASSWORD", "")
85 |
86 | with pytest.raises(RuntimeError) as excinfo:
87 | load(fileio=fileio)
88 |
89 | exc_errmsgs = excinfo.value.args[0].splitlines()
90 | found = first([line for line in exc_errmsgs if "credentials.0.password" in line])
91 | assert found
92 | assert 'Environment variable "ENABLE_PASSWORD" empty' in found
93 |
94 |
95 | def test_config_credentials_pass_usesvar(request, monkeypatch, netcfgbu_envars):
96 | """
97 | Test the case where the [[credentials]] section is provided that uses an
98 | environment variable, and that environment variable exists, and it is set
99 | to a non-empty value.
100 | """
101 |
102 | fileio = open(f"{request.fspath.dirname}/files/test-credentials.toml")
103 | monkeypatch.setenv("ENABLE_PASSWORD", "foobaz")
104 | app_cfg = load(fileio=fileio)
105 | assert app_cfg.credentials[0].password.get_secret_value() == "foobaz"
106 |
107 |
108 | def test_config_git_pass(request, netcfgbu_envars, monkeypatch):
109 | """
110 | Test the case where a [[git]] section is properly configured.
111 | """
112 | files_dir = Path(request.fspath.dirname).joinpath("files")
113 | monkeypatch.setenv("GIT_TOKEN", "fake-token")
114 | monkeypatch.setenv("GITKEY_PASSWORD", "fake-password")
115 | monkeypatch.setenv("GITKEY_DIR", str(files_dir.absolute()))
116 | fileio = files_dir.joinpath("test-gitspec.toml").open()
117 | app_cfg = load(fileio=fileio)
118 |
119 | assert app_cfg.git[0].token.get_secret_value() == "fake-token"
120 | assert app_cfg.git[2].deploy_passphrase.get_secret_value() == "fake-password"
121 |
122 |
123 | def test_config_git_fail_badrepo(request, netcfgbu_envars, monkeypatch):
124 | """
125 | Test the case where a [[git]] section has an improper GIT URL.
126 | """
127 | fileio = open(f"{request.fspath.dirname}/files/test-gitspec-badrepo.toml")
128 | with pytest.raises(RuntimeError) as excinfo:
129 | load(fileio=fileio)
130 |
131 | exc_errmsgs = excinfo.value.args[0].splitlines()
132 | found = first([line for line in exc_errmsgs if "git.0.repo" in line])
133 | assert found
134 | assert "Bad repo URL" in found
135 |
136 |
137 | def test_config_inventory_pass(request, monkeypatch, netcfgbu_envars):
138 | """
139 | Test the case where an [[inventory]] section is properly configured.
140 | """
141 | files_dir = request.fspath.dirname + "/files"
142 | monkeypatch.setenv("SCRIPT_DIR", files_dir)
143 | fileio = open(f"{files_dir}/test-inventory.toml")
144 | load(fileio=fileio)
145 |
146 |
147 | def test_config_inventory_fail_noscript(request, netcfgbu_envars):
148 | """
149 | Test the case where an [[inventory]] section defined a script, but the
150 | script does not actually exist.
151 | """
152 | fileio = open(f"{request.fspath.dirname}/files/test-inventory-fail.toml")
153 | with pytest.raises(RuntimeError) as excinfo:
154 | load(fileio=fileio)
155 |
156 | exc_errmsgs = excinfo.value.args[0].splitlines()
157 | found = first([line for line in exc_errmsgs if "inventory.0.script" in line])
158 | assert found
159 | assert "File not found:" in found
160 |
161 |
162 | def test_config_inventory_fail_script_noexec(netcfgbu_envars, tmpdir):
163 | """
164 | Test the case where an [[inventory]] section defines a script, the script
165 | file exists, but the script file is not executable.
166 | """
167 | fake_script = tmpdir.join("dummy-script.sh")
168 | fake_script.ensure()
169 |
170 | config_data = {"inventory": [{"name": "foo", "script": str(fake_script)}]}
171 |
172 | strio = StringIO()
173 | strio.name = "fake-file"
174 | toml.dump(config_data, strio)
175 | strio.seek(0)
176 |
177 | with pytest.raises(RuntimeError) as excinfo:
178 | load(fileio=strio)
179 |
180 | exc_errmsgs = excinfo.value.args[0].splitlines()
181 | found = first([line for line in exc_errmsgs if "inventory.0.script" in line])
182 | assert found
183 | assert "is not executable" in found
184 |
185 |
186 | def test_config_linter_pass(netcfgbu_envars, request):
187 | """
188 | Test the case where an [os_name] section defines a linter, and that
189 | linter exists; no errors expected.
190 | """
191 | fileio = open(f"{request.fspath.dirname}/files/test-linter.toml")
192 | app_cfg = load(fileio=fileio)
193 |
194 | assert app_cfg.os_name["ios"]
195 | assert app_cfg.os_name["ios"].linter == "ios"
196 | assert app_cfg.linters["ios"]
197 |
198 |
199 | def test_config_linter_fail(netcfgbu_envars, request):
200 | """
201 | Test the case where an [os_name] section defines a linter, but that
202 | linter is not defined in the configuration.
203 | """
204 | fileio = open(f"{request.fspath.dirname}/files/test-linter-fail.toml")
205 |
206 | with pytest.raises(RuntimeError) as excinfo:
207 | load(fileio=fileio)
208 |
209 | exc_errmsgs = excinfo.value.args[0].splitlines()
210 | found = first([line for line in exc_errmsgs if "os_name" in line])
211 | assert 'OS spec "ios" using undefined linter "ios"' in found
212 |
213 |
214 | def test_config_pass_noexistdir(tmpdir, netcfgbu_envars, monkeypatch):
215 | """
216 | Test use-case where the provided configs-dir directory does not
217 | exist in the configuration; but as a result the configs-dir is
218 | created.
219 | """
220 | dirpath = tmpdir.join("dummy-dir")
221 | monkeypatch.setenv("NETCFGBU_CONFIGSDIR", str(dirpath))
222 | app_cfg = load()
223 |
224 | configs_dir: Path = app_cfg.defaults.configs_dir
225 | assert configs_dir == dirpath
226 | assert configs_dir.exists()
227 |
228 |
229 | def test_plugins_pass_noexistdir(tmpdir, netcfgbu_envars, monkeypatch):
230 | """
231 | Test use-case where the provided configs-dir directory does not
232 | exist in the configuration; but as a result the configs-dir is
233 | created.
234 | """
235 | dirpath = tmpdir.join("dummy-dir")
236 | monkeypatch.setenv("NETCFGBU_PLUGINSDIR", str(dirpath))
237 | app_cfg = load()
238 |
239 | plugins_dir: Path = app_cfg.defaults.plugins_dir
240 | assert plugins_dir == dirpath
241 | assert plugins_dir.exists()
242 |
243 |
244 | def test_config_pass_asfilepath(request):
245 | """
246 | Test use-case where the config is provided as a filepath, and the file exists.
247 | """
248 | abs_filepath = f"{request.fspath.dirname}/files/test-just-defaults.toml"
249 | load(filepath=abs_filepath)
250 |
251 |
252 | def test_config_fail_asfilepath(tmpdir):
253 | """
254 | Test use-case where the config is provided as a filepath, and the filep
255 | does not exist.
256 | """
257 | noexist_filepath = str(tmpdir.join("noexist"))
258 |
259 | with pytest.raises(FileNotFoundError) as excinfo:
260 | load(filepath=noexist_filepath)
261 |
262 | assert excinfo.value.filename == noexist_filepath
263 |
264 |
265 | def test_config_jumphost_name(netcfgbu_envars, request):
266 | abs_filepath = request.fspath.dirname + "/files/test-config-jumphosts.toml"
267 | app_cfg = load(filepath=abs_filepath)
268 | jh = app_cfg.jumphost[0]
269 | assert jh.name == jh.proxy
270 |
271 | jh = app_cfg.jumphost[1]
272 | assert jh.name != jh.proxy
273 |
274 |
275 | def test_vcs_fail_config(tmpdir):
276 |
277 | fake_key = tmpdir.join("fake-key")
278 | fake_key.ensure()
279 | fake_key = str(fake_key)
280 |
281 | with pytest.raises(ValidationError) as excinfo:
282 | config_model.GitSpec(
283 | repo="git@dummy.git", password="fooer", deploy_passphrase="foobaz"
284 | )
285 |
286 | errs = excinfo.value.errors()
287 | assert errs[0]["msg"] == "deploy_key required when using deploy_passphrase"
288 |
289 | with pytest.raises(ValidationError) as excinfo:
290 | config_model.GitSpec(repo="git@dummy.git")
291 |
292 | errs = excinfo.value.errors()
293 | assert errs[0]["msg"].startswith("Missing one of required auth method fields")
294 |
295 | with pytest.raises(ValidationError) as excinfo:
296 | config_model.GitSpec(repo="git@dummy.git", token="token", deploy_key=fake_key)
297 |
298 | errs = excinfo.value.errors()
299 | assert errs[0]["msg"].startswith("Only one of")
300 |
--------------------------------------------------------------------------------
/tests/test_connectors.py:
--------------------------------------------------------------------------------
1 | import pytest # noqa
2 |
3 | from netcfgbu import connectors
4 |
5 |
6 | def test_connectors_pass():
7 | conn_cls = connectors.get_connector_class()
8 | assert conn_cls == connectors.BasicSSHConnector
9 |
10 |
11 | def test_connectors_pass_named():
12 | name = "netcfgbu.connectors.ssh.LoginPromptUserPass"
13 | conn_cls = connectors.get_connector_class(name)
14 | from netcfgbu.connectors.ssh import LoginPromptUserPass
15 |
16 | assert conn_cls == LoginPromptUserPass
17 |
18 |
19 | def test_connectors_fail_named(tmpdir):
20 | with pytest.raises(ModuleNotFoundError):
21 | connectors.get_connector_class(str(tmpdir))
22 |
--------------------------------------------------------------------------------
/tests/test_filetypes.py:
--------------------------------------------------------------------------------
1 | from netcfgbu.filetypes import CommentedCsvReader
2 |
3 |
4 | def test_filetypes_csv_hascomments(request):
5 | filepath = f"{request.fspath.dirname}/files/test-csv-withcomments.csv"
6 | csv_data = [rec["host"] for rec in (CommentedCsvReader(open(filepath)))]
7 | assert "switch1" in csv_data
8 | assert "switch2" in csv_data
9 | assert "switch3" not in csv_data
10 | assert "switch4" not in csv_data
11 |
--------------------------------------------------------------------------------
/tests/test_inventory.py:
--------------------------------------------------------------------------------
1 | import pytest # noqa
2 | from first import first
3 |
4 | from netcfgbu import inventory
5 | from netcfgbu import config
6 |
7 |
8 | def test_inventory_pass(request, monkeypatch, netcfgbu_envars):
9 | """
10 | Test the use-case where there is a small inventory file that is properlly
11 | formatted. Load the entire inventory as one subtest. Load a filtered
12 | set of records as another subtest.
13 | """
14 | inventory_fpath = f"{request.fspath.dirname}/files/test-small-inventory.csv"
15 | monkeypatch.setenv("NETCFGBU_INVENTORY", inventory_fpath)
16 | app_cfg = config.load()
17 |
18 | # all records
19 | inv_recs = inventory.load(app_cfg)
20 | assert len(inv_recs) == 6
21 |
22 | # filter records
23 | inv_recs = inventory.load(
24 | app_cfg, limits=["os_name=eos"], excludes=["host=switch1"]
25 | )
26 | assert len(inv_recs) == 1
27 | assert inv_recs[0]["host"] == "switch2"
28 |
29 |
30 | def test_inventory_fail_nofilegiven(tmpdir, netcfgbu_envars):
31 | """
32 | Test the use-case where the inventory is given in configuration file,
33 | but the inventory file does not actually exist.
34 | """
35 | app_cfg = config.load()
36 |
37 | with pytest.raises(FileNotFoundError) as excinfo:
38 | inventory.load(app_cfg)
39 |
40 | errmsg = excinfo.value.args[0]
41 | assert "Inventory file does not exist" in errmsg
42 |
43 |
44 | def test_inventory_pass_build(request, monkeypatch, netcfgbu_envars):
45 | """
46 | Test the use-case where the configuraiton contains an inventory build
47 | script. The script exists, it runs without error.
48 | """
49 | files_dir = request.fspath.dirname + "/files"
50 | monkeypatch.setenv("SCRIPT_DIR", files_dir)
51 | config_fpath = files_dir + "/test-inventory-script-donothing.toml"
52 | app_cfg = config.load(filepath=config_fpath)
53 | inv_def = app_cfg.inventory[0]
54 | rc = inventory.build(inv_def)
55 | assert rc == 0
56 |
57 |
58 | def test_inventory_fail_build_exitnozero(request, monkeypatch, netcfgbu_envars):
59 | """
60 | Test the use-case where the configuraiton contains an inventory build
61 | script. The script exists, it runs but exists with non-zero return code.
62 | """
63 | files_dir = request.fspath.dirname + "/files"
64 | monkeypatch.setenv("SCRIPT_DIR", files_dir)
65 | config_fpath = files_dir + "/test-inventory-script-fails.toml"
66 |
67 | app_cfg = config.load(filepath=config_fpath)
68 | inv_def = app_cfg.inventory[0]
69 | rc = inventory.build(inv_def)
70 |
71 | assert rc != 0
72 |
73 |
74 | def test_inventory_fail_build_noscript(request, netcfgbu_envars):
75 | """
76 | Test the use-case where the configuraiton contains an inventory build
77 | script. The script exists, it runs without error.
78 | """
79 | config_fpath = f"{request.fspath.dirname}/files/test-inventory-noscript.toml"
80 | with pytest.raises(RuntimeError) as excinfo:
81 | config.load(filepath=config_fpath)
82 |
83 | exc_errmsgs = excinfo.value.args[0].splitlines()
84 | found = first([line for line in exc_errmsgs if "inventory.0.script" in line])
85 | assert found
86 | assert "field required" in found
87 |
--------------------------------------------------------------------------------
/tests/test_jumphosts.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from pathlib import Path
3 | from collections import Counter
4 | from unittest.mock import Mock
5 |
6 | import pytest # noqa
7 | from asynctest import CoroutineMock # noqa
8 |
9 | import asyncssh
10 |
11 | from netcfgbu import config_model
12 | from netcfgbu import jumphosts
13 | from netcfgbu.filetypes import CommentedCsvReader
14 |
15 |
16 | @pytest.fixture(scope="module", autouse=True)
17 | def inventory(request):
18 | test_dir = Path(request.module.__file__).parent
19 | inv_fp = test_dir / "files/test-small-inventory.csv"
20 | return list(CommentedCsvReader(inv_fp.open()))
21 |
22 |
23 | @pytest.fixture()
24 | def mock_asyncssh_connect(monkeypatch):
25 | monkeypatch.setattr(jumphosts, "asyncssh", Mock())
26 |
27 | jumphosts.asyncssh.Error = asyncssh.Error
28 | jumphosts.asyncssh.connect = CoroutineMock()
29 | return jumphosts.asyncssh.connect
30 |
31 |
32 | def test_jumphosts_pass_noused(inventory):
33 |
34 | # without an include or exclude, this jump host will not be used
35 | # TODO: consider this an error in config-model validation?
36 |
37 | jh_spec = config_model.JumphostSpec(proxy="1.2.3.4")
38 |
39 | jumphosts.init_jumphosts(jumphost_specs=[jh_spec], inventory=inventory)
40 | assert len(jumphosts.JumpHost.available) == 0
41 |
42 |
43 | def test_jumphosts_pass_incused(inventory):
44 | # include on EOS devices to be used in the jump host, this will result in
45 | # the jump host being required by 4 of the devices in the inventory
46 |
47 | jh_spec = config_model.JumphostSpec(proxy="1.2.3.4", include=["os_name=eos"])
48 |
49 | jumphosts.init_jumphosts(jumphost_specs=[jh_spec], inventory=inventory)
50 | assert len(jumphosts.JumpHost.available) == 1
51 |
52 | jh_use_count = Counter(
53 | getattr(jumphosts.get_jumphost(rec), "name", None) for rec in inventory
54 | )
55 |
56 | assert jh_use_count["1.2.3.4"] == 2
57 |
58 |
59 | def test_jumphosts_pass_exlused(inventory):
60 | # exclude EOS devices to be used in the jump host, this will result in the
61 | # jump host being required
62 |
63 | jh_spec = config_model.JumphostSpec(proxy="1.2.3.4", exclude=["os_name=eos"])
64 |
65 | jumphosts.init_jumphosts(jumphost_specs=[jh_spec], inventory=inventory)
66 | assert len(jumphosts.JumpHost.available) == 1
67 |
68 | jh_use_count = Counter(
69 | getattr(jumphosts.get_jumphost(rec), "name", None) for rec in inventory
70 | )
71 |
72 | assert jh_use_count["1.2.3.4"] == 4
73 |
74 |
75 | def test_jumphosts_pass_exlallused(inventory):
76 | # exclude all OS names will result in no jump hosts required
77 |
78 | jh_spec = config_model.JumphostSpec(proxy="1.2.3.4", exclude=["os_name=.*"])
79 |
80 | jumphosts.init_jumphosts(jumphost_specs=[jh_spec], inventory=inventory)
81 | assert len(jumphosts.JumpHost.available) == 0
82 |
83 |
84 | @pytest.mark.asyncio
85 | async def test_jumphosts_pass_connect(inventory, mock_asyncssh_connect, monkeypatch):
86 |
87 | jh_spec = config_model.JumphostSpec(
88 | proxy="dummy-user@1.2.3.4:8022", exclude=["os_name=eos"]
89 | )
90 |
91 | jumphosts.init_jumphosts(jumphost_specs=[jh_spec], inventory=inventory)
92 | ok = await jumphosts.connect_jumphosts()
93 |
94 | assert ok
95 | assert mock_asyncssh_connect.called
96 | assert mock_asyncssh_connect.call_count == 1
97 | called = mock_asyncssh_connect.mock_calls[0]
98 | assert called.kwargs["host"] == "1.2.3.4"
99 | assert called.kwargs["username"] == "dummy-user"
100 | assert called.kwargs["port"] == 8022
101 |
102 | jh: jumphosts.JumpHost = jumphosts.JumpHost.available[0]
103 | assert jh.tunnel is not None
104 |
105 |
106 | @pytest.mark.asyncio
107 | async def test_jumphosts_fail_connect(
108 | netcfgbu_envars, log_vcr, inventory, mock_asyncssh_connect, monkeypatch
109 | ):
110 |
111 | monkeypatch.setattr(jumphosts, "get_logger", Mock(return_value=log_vcr))
112 |
113 | jh_spec = config_model.JumphostSpec(
114 | proxy="dummy-user@1.2.3.4:8022", exclude=["os_name=eos"]
115 | )
116 |
117 | jumphosts.init_jumphosts(jumphost_specs=[jh_spec], inventory=inventory)
118 |
119 | mock_asyncssh_connect.side_effect = asyncio.TimeoutError()
120 | ok = await jumphosts.connect_jumphosts()
121 | assert ok is False
122 |
123 | mock_asyncssh_connect.side_effect = asyncssh.Error(code=10, reason="nooooope")
124 |
125 | ok = await jumphosts.connect_jumphosts()
126 | assert ok is False
127 |
128 | jh: jumphosts.JumpHost = jumphosts.JumpHost.available[0]
129 | with pytest.raises(RuntimeError) as excinfo:
130 | _ = jh.tunnel
131 |
132 | errmsg = excinfo.value.args[0]
133 | assert "not connected" in errmsg
134 |
135 | log_recs = log_vcr.handlers[0].records
136 | assert (
137 | log_recs[-1].msg
138 | == "JUMPHOST: connect to dummy-user@1.2.3.4:8022 failed: nooooope"
139 | )
140 |
--------------------------------------------------------------------------------
/tests/test_linters.py:
--------------------------------------------------------------------------------
1 | from pathlib import Path
2 |
3 | from netcfgbu import config_model
4 | from netcfgbu import linter
5 |
6 |
7 | def test_linters_pass_content(files_dir):
8 | good_content = files_dir.joinpath("test-content-config.txt").read_text()
9 | lint_spec = config_model.LinterSpec(
10 | config_starts_after="!Time:", config_ends_at="! end-test-marker"
11 | )
12 |
13 | lint_content = (
14 | """\
15 | !Command: show running-config
16 | !Time: Sat Jun 27 17:54:17 2020
17 | """
18 | + good_content
19 | + """
20 | ! end-test-marker"""
21 | )
22 |
23 | result = linter.lint_content(lint_spec=lint_spec, config_content=lint_content)
24 | assert result == good_content
25 |
26 |
27 | def test_liners_pass_file(files_dir, tmpdir):
28 | exp_content = files_dir.joinpath("test-content-config.txt").read_text()
29 | lint_spec = config_model.LinterSpec(
30 | config_starts_after="!Time:", config_ends_at="! end-test-marker"
31 | )
32 |
33 | tmp_file = Path(tmpdir.join("content"))
34 | tmp_file.write_text(
35 | """\
36 | !Command: show running-config
37 | !Time: Sat Jun 27 17:54:17 2020
38 | """
39 | + exp_content
40 | + """
41 | ! end-test-marker"""
42 | )
43 |
44 | linter.lint_file(tmp_file, lint_spec=lint_spec)
45 | linted_content = tmp_file.read_text()
46 | assert linted_content == exp_content
47 |
48 |
49 | def test_liners_pass_nochange(files_dir, tmpdir, log_vcr, monkeypatch):
50 | exp_content = files_dir.joinpath("test-content-config.txt").read_text()
51 | lint_spec = config_model.LinterSpec(
52 | config_starts_after="!Time:", config_ends_at="! end-test-marker"
53 | )
54 |
55 | tmp_file = Path(tmpdir.join("content"))
56 | tmp_file.write_text(exp_content)
57 |
58 | monkeypatch.setattr(linter, "log", log_vcr)
59 |
60 | changed = linter.lint_file(tmp_file, lint_spec=lint_spec)
61 |
62 | assert changed is False
63 | linted_content = tmp_file.read_text()
64 | assert linted_content == exp_content
65 | last_log = log_vcr.handlers[0].records[-1].msg
66 | assert "LINT no change on content" in last_log
67 |
--------------------------------------------------------------------------------
/tests/test_os_name.py:
--------------------------------------------------------------------------------
1 | from netcfgbu import os_specs
2 | from netcfgbu import config
3 | from netcfgbu.connectors import BasicSSHConnector
4 |
5 |
6 | def test_os_name_pass(netcfgbu_envars):
7 | rec = {"host": "dummy", "os_name": "dummy"}
8 | app_cfg = config.load()
9 |
10 | conn = os_specs.make_host_connector(rec, app_cfg)
11 |
12 | assert isinstance(conn, BasicSSHConnector)
13 | assert conn.name == "dummy"
14 | creds_d = conn.creds[0]
15 | assert creds_d.username == "dummy-username"
16 | assert creds_d.password.get_secret_value() == "dummy-password"
17 |
18 |
19 | def test_os_name_pass_namefound(netcfgbu_envars, request):
20 | filepath = str(request.fspath.dirname + "/files/test-config-os_name.toml")
21 | app_cfg = config.load(filepath=filepath)
22 |
23 | rec = {"host": "dummy", "os_name": "ios"}
24 |
25 | conn = os_specs.make_host_connector(rec, app_cfg)
26 | assert conn.os_spec.get_config == "fake show running-config"
27 |
--------------------------------------------------------------------------------
/tests/test_os_name_prompt_pattern.py:
--------------------------------------------------------------------------------
1 | import re
2 | from netcfgbu import os_specs
3 | from netcfgbu.connectors import BasicSSHConnector
4 | from netcfgbu.config import load
5 |
6 |
7 | def test_config_os_name_prompt_pattern(netcfgbu_envars, request): # noqa
8 | """
9 | This test validates that a User provided prompt_pattern in the [os_name.$name]
10 | configuration section results in the User defined pattern used by the
11 | SSH connector instance.
12 | """
13 | rec = {"host": "dummy", "os_name": "cumulus"}
14 | abs_filepath = (
15 | request.fspath.dirname + "/files/test-config-os-name-prompt-pattern.toml"
16 | )
17 | app_cfg = load(filepath=abs_filepath)
18 | conn = os_specs.make_host_connector(rec, app_cfg)
19 |
20 | # this value is copied from the configuration toml file. If you
21 | # change the test data file then you'd have to change this expected pattern.
22 | expected_pattern = r"[a-z0-9.\-@:~]{10,65}\s*[#$]"
23 |
24 | # the conenctor code adds a capture group for processing reasons.
25 | expected_pattern = r"^\r?(" + expected_pattern + r")\s*$"
26 | expected_re = re.compile(expected_pattern.encode("utf-8"))
27 |
28 | # going to perform a PROMPT pattern match against a sample value.
29 | test_prompt_value = "cumulus@leaf01:mgmt-vrf:~$"
30 |
31 | assert isinstance(conn, BasicSSHConnector)
32 | assert expected_re.pattern == conn.PROMPT_PATTERN.pattern
33 | assert expected_re.match(test_prompt_value.encode("utf-8"))
34 |
--------------------------------------------------------------------------------
/tests/test_plugins.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from netcfgbu.config import load
3 | from netcfgbu.plugins import Plugin, _registered_plugins, load_plugins
4 |
5 |
6 | @pytest.fixture()
7 | def pytest_load_plugins(request, monkeypatch, netcfgbu_envars):
8 | plugins_dir = f"{request.fspath.dirname}/files/plugins"
9 | monkeypatch.setenv("NETCFGBU_PLUGINSDIR", str(plugins_dir))
10 | app_cfg = load()
11 |
12 | load_plugins(app_cfg.defaults.plugins_dir)
13 |
14 |
15 | def test_loading_plugins(pytest_load_plugins):
16 | assert issubclass(_registered_plugins["hooks"][0], Plugin)
17 |
18 |
19 | def test_plugin_backup_success(pytest_load_plugins):
20 | rec = {"host": "switch1", "os_name": "junos"}
21 | res = True
22 |
23 | result = _registered_plugins["hooks"][0].backup_success(rec, res)
24 |
25 | assert result[0] == rec
26 | assert result[1] == res
27 |
28 |
29 | def test_plugin_backup_failed(pytest_load_plugins):
30 | rec = {"host": "switch1", "os_name": "junos"}
31 | res = False
32 |
33 | result = _registered_plugins["hooks"][0].backup_failed(rec, res)
34 |
35 | assert result[0] == rec
36 | assert result[1] == res
37 |
--------------------------------------------------------------------------------
/tests/test_probe.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | from unittest.mock import Mock
3 |
4 | from asynctest import CoroutineMock # noqa
5 | import pytest # noqa
6 |
7 | from netcfgbu import probe
8 | from netcfgbu.consts import DEFAULT_PROBE_TIMEOUT
9 |
10 |
11 | @pytest.mark.asyncio
12 | async def test_probe_pass(monkeypatch):
13 | mock_asyncio = Mock()
14 | mock_asyncio.TimeoutError = asyncio.TimeoutError
15 | mock_wait_for = CoroutineMock()
16 |
17 | mock_asyncio.wait_for = mock_wait_for
18 | monkeypatch.setattr(probe, "asyncio", mock_asyncio)
19 |
20 | ok = await probe.probe(host="1.2.3.4", timeout=DEFAULT_PROBE_TIMEOUT)
21 | assert ok is True
22 |
23 |
24 | @pytest.mark.asyncio
25 | async def test_probe_pass_timeout(monkeypatch):
26 | mock_asyncio = Mock()
27 | mock_asyncio.TimeoutError = asyncio.TimeoutError
28 | mock_wait_for = Mock()
29 |
30 | mock_asyncio.wait_for = mock_wait_for
31 |
32 | def raises_timeout(coro, timeout): # noqa
33 | raise asyncio.TimeoutError
34 |
35 | mock_wait_for.side_effect = raises_timeout
36 | monkeypatch.setattr(probe, "asyncio", mock_asyncio)
37 |
38 | ok = await probe.probe(host="1.2.3.4", timeout=DEFAULT_PROBE_TIMEOUT)
39 | assert ok is False
40 |
41 |
42 | @pytest.mark.asyncio
43 | async def test_probe_pass_raises_timeout(monkeypatch):
44 | mock_asyncio = Mock()
45 | mock_asyncio.TimeoutError = asyncio.TimeoutError
46 | mock_wait_for = Mock()
47 |
48 | mock_asyncio.wait_for = mock_wait_for
49 |
50 | def raises_timeout(coro, timeout): # noqa
51 | raise asyncio.TimeoutError
52 |
53 | mock_wait_for.side_effect = raises_timeout
54 | monkeypatch.setattr(probe, "asyncio", mock_asyncio)
55 |
56 | with pytest.raises(asyncio.TimeoutError):
57 | await probe.probe(host="1.2.3.4", timeout=DEFAULT_PROBE_TIMEOUT, raise_exc=True)
58 |
--------------------------------------------------------------------------------
/tests/test_vcs.py:
--------------------------------------------------------------------------------
1 | """
2 | This file contains the pytest test cases for the vcs.git module
3 | """
4 |
5 | from pathlib import Path
6 | from unittest.mock import Mock
7 | import pytest # noqa
8 |
9 | from netcfgbu.vcs import git
10 | from netcfgbu import config_model
11 |
12 |
13 | @pytest.fixture()
14 | def mock_pexpect(monkeypatch):
15 | mock_pexpect = Mock()
16 | mock_run = Mock()
17 |
18 | # we don't really care about the pexpect.run return value
19 | mock_run.return_value = ("", 0)
20 | mock_pexpect.run = mock_run
21 | monkeypatch.setattr(git, "pexpect", mock_pexpect)
22 | return mock_pexpect
23 |
24 |
25 | def test_vcs_pass_prepare_token(mock_pexpect, tmpdir, monkeypatch):
26 | monkeypatch.setenv("USER", "dummy-user")
27 | git_cfg = config_model.GitSpec(repo="git@dummy.git", token="dummy-token")
28 | repo_dir = tmpdir.join("repo")
29 |
30 | git.vcs_prepare(spec=git_cfg, repo_dir=Path(repo_dir))
31 |
32 | mock_run = mock_pexpect.run
33 | assert mock_run.called
34 | calls = mock_run.mock_calls
35 |
36 | expected_commands = [
37 | "git init",
38 | "git remote add origin git@dummy.git",
39 | "git config --local user.email dummy-user",
40 | "git config --local user.name dummy-user",
41 | "git config --local push.default matching",
42 | "git pull origin master",
43 | ]
44 |
45 | assert len(calls) == len(expected_commands)
46 | for cmd_i, cmd in enumerate(expected_commands):
47 | assert calls[cmd_i].kwargs["command"] == cmd
48 |
49 |
50 | def test_vcs_pass_prepare_deploykey(mock_pexpect, tmpdir, monkeypatch):
51 | monkeypatch.setenv("USER", "dummy-user")
52 |
53 | key_file = tmpdir.join("dummy-keyfile")
54 | key_file.ensure()
55 | git_cfg = config_model.GitSpec(repo="git@dummy.git", deploy_key=str(key_file))
56 |
57 | repo_dir = tmpdir.join("repo")
58 |
59 | git.vcs_prepare(spec=git_cfg, repo_dir=Path(repo_dir))
60 |
61 | mock_run = mock_pexpect.run
62 | assert mock_run.called
63 | calls = mock_run.mock_calls
64 |
65 | expected_commands = [
66 | "git init",
67 | "git remote add origin git@dummy.git",
68 | "git config --local user.email dummy-user",
69 | "git config --local user.name dummy-user",
70 | "git config --local push.default matching",
71 | f"git config --local core.sshCommand 'ssh -i {key_file} -o StrictHostKeyChecking=no'",
72 | "git pull origin master",
73 | ]
74 |
75 | assert len(calls) == len(expected_commands)
76 | for cmd_i, cmd in enumerate(expected_commands):
77 | assert calls[cmd_i].kwargs["command"] == cmd
78 |
79 |
80 | def test_vcs_pass_prepare_deploykey_passphrase(mock_pexpect, tmpdir, monkeypatch):
81 | monkeypatch.setenv("USER", "dummy-user")
82 |
83 | key_file = tmpdir.join("dummy-keyfile")
84 | key_file.ensure()
85 | key_file = str(key_file)
86 |
87 | git_cfg = config_model.GitSpec(
88 | repo="git@dummy.git",
89 | deploy_key=key_file,
90 | deploy_passphrase="dummy-key-passphrase",
91 | )
92 |
93 | repo_dir = tmpdir.join("repo")
94 |
95 | git.vcs_prepare(spec=git_cfg, repo_dir=Path(repo_dir))
96 |
97 | mock_run = mock_pexpect.run
98 | assert mock_run.called
99 | calls = mock_run.mock_calls
100 |
101 | expected_commands = [
102 | "git init",
103 | "git remote add origin git@dummy.git",
104 | "git config --local user.email dummy-user",
105 | "git config --local user.name dummy-user",
106 | "git config --local push.default matching",
107 | f"git config --local core.sshCommand 'ssh -i {key_file} -o StrictHostKeyChecking=no'",
108 | "git pull origin master",
109 | ]
110 |
111 | assert len(calls) == len(expected_commands)
112 | for cmd_i, cmd in enumerate(expected_commands):
113 | assert calls[cmd_i].kwargs["command"] == cmd
114 |
115 |
116 | def test_vcs_pass_save(mock_pexpect, tmpdir, monkeypatch):
117 | monkeypatch.setenv("USER", "dummy-user")
118 |
119 | git_cfg = config_model.GitSpec(repo="git@dummy.git", token="dummy-token")
120 |
121 | repo_dir = tmpdir.join("repo")
122 |
123 | mock_timestamp = Mock()
124 | mock_timestamp.return_value = "dummy-timestamp"
125 | monkeypatch.setattr(git, "tag_name_timestamp", mock_timestamp)
126 |
127 | git.vcs_save(gh_cfg=git_cfg, repo_dir=Path(repo_dir))
128 |
129 | mock_run = mock_pexpect.run
130 | assert mock_run.called
131 | calls = mock_run.mock_calls
132 |
133 | expected_commands = [
134 | "git status",
135 | "git add -A",
136 | "git commit -m dummy-timestamp",
137 | "git push",
138 | "git tag -a dummy-timestamp -m dummy-timestamp",
139 | "git push --tags",
140 | ]
141 |
142 | assert len(calls) == len(expected_commands)
143 | for cmd_i, cmd in enumerate(expected_commands):
144 | assert calls[cmd_i].kwargs["command"] == cmd
145 |
146 |
147 | def test_vcs_pass_save_nochange(monkeypatch, tmpdir, mock_pexpect):
148 | monkeypatch.setenv("USER", "dummy-user")
149 |
150 | git_cfg = config_model.GitSpec(repo="git@dummy.git", token="dummy-token")
151 |
152 | repo_dir = tmpdir.join("repo")
153 |
154 | mock_pexpect.run.return_value = ("nothing to commit", 0)
155 | git.vcs_save(gh_cfg=git_cfg, repo_dir=Path(repo_dir))
156 |
157 | mock_run = mock_pexpect.run
158 | assert mock_run.called
159 | calls = mock_run.mock_calls
160 |
161 | expected_commands = ["git status"]
162 |
163 | assert len(calls) == len(expected_commands)
164 | for cmd_i, cmd in enumerate(expected_commands):
165 | assert calls[cmd_i].kwargs["command"] == cmd
166 |
167 |
168 | def test_vcs_pass_status(monkeypatch, tmpdir, mock_pexpect):
169 | monkeypatch.setenv("USER", "dummy-user")
170 | git_cfg = config_model.GitSpec(repo="git@dummy.git", token="dummy-token")
171 | repo_dir = tmpdir.join("repo")
172 |
173 | mock_pexpect.run.return_value = ("nothing to commit", 0)
174 | result = git.vcs_status(spec=git_cfg, repo_dir=Path(repo_dir))
175 | assert result == "nothing to commit"
176 |
177 |
178 | def test_vcs_pass_run_auth(monkeypatch, tmpdir, mock_pexpect):
179 | monkeypatch.setenv("USER", "dummy-user")
180 | git_cfg = config_model.GitSpec(repo="git@dummy.git", token="dummy-token")
181 | repo_dir = Path(tmpdir.join("repo"))
182 |
183 | git_rnr = git.git_runner(git_cfg, repo_dir)
184 | mock_run = Mock()
185 | git_rnr.run = mock_run
186 |
187 | mock_pexpect.run.return_value = ("yipiee!", 0)
188 | git_rnr.git_clone()
189 |
190 | calls = mock_run.mock_calls
191 | expected_commands = [
192 | f"clone git@dummy.git {repo_dir}",
193 | "config --local user.email dummy-user",
194 | "config --local user.name dummy-user",
195 | "config --local push.default matching",
196 | ]
197 |
198 | assert len(calls) == len(expected_commands)
199 | for cmd_i, cmd in enumerate(expected_commands):
200 | assert calls[cmd_i].args[0] == cmd
201 |
202 |
203 | def test_vcs_fail_run_auth(monkeypatch, tmpdir, mock_pexpect):
204 | monkeypatch.setenv("USER", "dummy-user")
205 | git_cfg = config_model.GitSpec(repo="git@dummy.git", token="dummy-token")
206 | repo_dir = Path(tmpdir.join("repo"))
207 |
208 | git_rnr = git.git_runner(git_cfg, repo_dir)
209 |
210 | mock_pexpect.run.return_value = ("fake-failure", 1)
211 | with pytest.raises(RuntimeError) as exc:
212 | git_rnr.git_clone()
213 |
214 | errmsg = exc.value.args[0]
215 | assert errmsg == "fake-failure"
216 |
217 |
218 | def test_vcs_pass_git_config(monkeypatch, tmpdir, mock_pexpect):
219 | monkeypatch.setenv("USER", "dummy-user")
220 | git_cfg = config_model.GitSpec(repo="https://github@dummy.git", token="dummy-token")
221 | repo_dir = Path(tmpdir.join("repo"))
222 | repo_dir.mkdir()
223 |
224 | git_rnr = git.git_runner(git_cfg, repo_dir)
225 |
226 | assert git_rnr.repo_url == "https://dummy-user@github@dummy.git"
227 | assert git_rnr.is_dir_empty is True
228 |
229 | git_rnr.git_config()
230 |
231 | expected_commands = [
232 | "git config --local user.email dummy-user",
233 | "git config --local user.name dummy-user",
234 | "git config --local push.default matching",
235 | ]
236 |
237 | calls = mock_pexpect.run.mock_calls
238 |
239 | assert len(calls) == len(expected_commands)
240 | for cmd_i, cmd in enumerate(expected_commands):
241 | assert calls[cmd_i].kwargs["command"] == cmd
242 |
243 |
244 | def test_vcs_fail_run_noauth(monkeypatch, tmpdir, mock_pexpect):
245 | monkeypatch.setenv("USER", "dummy-user")
246 | git_cfg = config_model.GitSpec(repo="https://github@dummy.git", token="dummy-token")
247 | repo_dir = Path(tmpdir.join("repo"))
248 |
249 | git_rnr = git.git_runner(git_cfg, repo_dir)
250 |
251 | mock_pexpect.run.return_value = ("fake-failure", 1)
252 | with pytest.raises(RuntimeError) as excinfo:
253 | git_rnr.run("status")
254 |
255 | errmsg = excinfo.value.args[0]
256 | assert errmsg == "git status failed: fake-failure"
257 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py38,lint,test
3 |
4 | [testenv]
5 | envdir={toxworkdir}/.env
6 | recreate = false
7 | deps = -rrequirements-develop.txt
8 |
9 | passenv =
10 | PWD
11 | HOME
12 |
13 | commands =
14 | lint: flake8
15 | lint: black .
16 | lint: pre-commit run --all-files
17 | lint: interrogate -c pyproject.toml
18 | test: pytest --cov-report=term
19 |
20 | [flake8]
21 | ignore = E501,E203,W503
22 |
23 |
24 | [pytest]
25 | testpaths = tests
26 | addopts =
27 | -v
28 | --basetemp=.pytest_tmpdir
29 | --tb=short
30 | --cov=netcfgbu
31 | --cov-append
32 | --cov-report=html
33 | -p no:warnings
34 |
--------------------------------------------------------------------------------