├── .gitignore
├── README.md
├── backup_configs
├── ansible.cfg
├── backup_with_cmd_mod.yml
├── backup_with_napalm.yml
├── group_vars
│ ├── all.yml
│ ├── asa.yml
│ ├── bigip.yml
│ ├── ios.yml
│ └── nxos.yml
├── hosts.yml
└── playbooks
│ ├── create_dir.yml
│ └── git.yml
├── build_fabric
├── PLAN
├── README.md
├── __pycache__
│ └── validate.cpython-36.pyc
├── ansible.cfg
├── custom_val_builder
│ ├── README
│ ├── ansible.cfg
│ ├── files
│ │ ├── compliance_report.json
│ │ ├── desired_state.yml
│ │ └── file_input.json
│ ├── filter_plugins
│ │ ├── __pycache__
│ │ │ ├── format_data_model.cpython-36.pyc
│ │ │ ├── format_dm.cpython-36.pyc
│ │ │ └── val_builder.cpython-36.pyc
│ │ ├── format_dm.py
│ │ └── val_builder.py
│ ├── inv_from_vars_cfg.yml
│ ├── inventory_plugins
│ │ ├── __pycache__
│ │ │ └── inv_from_vars.cpython-36.pyc
│ │ └── inv_from_vars.py
│ ├── playbook.yml
│ ├── templates
│ │ ├── inventory_plugins
│ │ │ ├── __pycache__
│ │ │ │ └── inv_from_vars.cpython-36.pyc
│ │ │ └── inv_from_vars.py
│ │ └── val_tmpl.j2
│ └── vars
│ │ ├── ansible.yml
│ │ ├── base.yml
│ │ ├── fabric.yml
│ │ ├── my_vars.yml
│ │ ├── services_interface.yml
│ │ └── services_tenant.yml
├── filter_plugins
│ ├── __pycache__
│ │ ├── comp_report.cpython-36.pyc
│ │ ├── custom_validate.cpython-36.pyc
│ │ ├── format_data_model.cpython-36.pyc
│ │ ├── input_svc_validate.cpython-36.pyc
│ │ ├── input_validate.cpython-36.pyc
│ │ └── validate.cpython-36.pyc
│ └── input_validate.py
├── inv_from_vars_cfg.yml
├── inventory_plugins
│ ├── __pycache__
│ │ └── inv_from_vars.cpython-36.pyc
│ └── inv_from_vars.py
├── playbook.yml
├── roles
│ ├── base
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── nxos
│ │ │ └── bse_tmpl.j2
│ ├── fabric
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── nxos
│ │ │ └── fbc_tmpl.j2
│ ├── intf_cleanup
│ │ ├── filter_plugins
│ │ │ ├── __pycache__
│ │ │ │ ├── get_intf.cpython-36.pyc
│ │ │ │ └── intf_cleanup.cpython-36.pyc
│ │ │ └── get_intf.py
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── nxos
│ │ │ └── dflt_intf_tmpl.j2
│ ├── services
│ │ ├── filter_plugins
│ │ │ ├── __pycache__
│ │ │ │ ├── format_data_model.cpython-36.pyc
│ │ │ │ └── format_dm.cpython-36.pyc
│ │ │ └── format_dm.py
│ │ ├── tasks
│ │ │ ├── svc_intf.yml
│ │ │ ├── svc_rtr.yml
│ │ │ └── svc_tnt.yml
│ │ └── templates
│ │ │ └── nxos
│ │ │ ├── svc_intf_tmpl.j2
│ │ │ ├── svc_rtr_tmpl.j2
│ │ │ └── svc_tnt_tmpl.j2
│ └── validate
│ │ ├── filter_plugins
│ │ ├── __pycache__
│ │ │ └── custom_validate.cpython-36.pyc
│ │ └── custom_validate.py
│ │ ├── tasks
│ │ ├── cus_val.yml
│ │ └── nap_val.yml
│ │ └── templates
│ │ ├── napalm
│ │ ├── bse_fbc_val_tmpl.j2
│ │ └── svc_tnt_val_tmpl.j2
│ │ └── nxos
│ │ ├── bse_fbc_val_tmpl.j2
│ │ ├── svc_intf_val_tmpl.j2
│ │ └── svc_tnt_val_tmpl.j2
├── ssh_keys
│ ├── ssh_hosts
│ └── ssh_key_add.yml
├── templates
│ ├── input.yml
│ ├── render_jinja.py
│ ├── services_template.j2
│ └── template.j2
├── test_playbook.yml
├── unit_test
│ ├── base.yml
│ ├── fabric.yml
│ ├── service_interface.yml
│ └── service_tenant.yml
└── vars
│ ├── ansible.yml
│ ├── base.yml
│ ├── fabric.yml
│ ├── service_interface.yml
│ ├── service_routing.yml
│ └── service_tenant.yml
├── configurations
├── dc1-asav-xfw1
├── dc1-csr-xnet1
├── dc1-ltm-lb1.ucs
├── dc1-n9k-border1
├── dc1-n9k-leaf1
├── dc1-n9k-leaf2
├── dc1-n9k-spine1
├── dc1-n9k-spine2
└── dc1-vios-sw1
├── data_model
├── README.md
├── ansible.cfg
├── dyn_inv_script.py
├── filter_plugins
│ ├── __pycache__
│ │ └── format_data_model.cpython-36.pyc
│ └── format_data_model.py
├── inv_from_vars_cfg.yml
├── inventory_plugins
│ ├── __pycache__
│ │ └── inv_from_vars.cpython-36.pyc
│ └── inv_from_vars.py
├── playbook.yml
├── templates
│ ├── base_template.j2
│ ├── fabric_template.j2
│ ├── input.yml
│ ├── render_jinja.py
│ ├── services_template.j2
│ └── template.j2
└── vars
│ ├── ansible.yml
│ ├── base.yml
│ ├── fabric.yml
│ └── services.yml
├── network_state_report
├── README.md
├── ansible.cfg
├── group_vars
│ ├── all.yml
│ ├── asa.yml
│ ├── bigip.yml
│ ├── border_leaf.yml
│ ├── ios.yml
│ ├── iosxe.yml
│ ├── leaf.yml
│ ├── nxos.yml
│ ├── spine.yml
│ └── test.yml
├── hosts.yml
├── playbook_main.yml
└── roles
│ ├── bgp
│ ├── filter_plugins
│ │ └── bgp_filter.py
│ └── tasks
│ │ └── main.yml
│ ├── edge
│ ├── filter_plugins
│ │ └── edge_filter.py
│ └── tasks
│ │ └── main.yml
│ ├── interfaces
│ ├── filter_plugins
│ │ └── interfaces_filter.py
│ └── tasks
│ │ └── main.yml
│ ├── l2_tables
│ ├── filter_plugins
│ │ └── l2_tables_filter.py
│ └── tasks
│ │ └── main.yml
│ ├── l3_tables
│ ├── filter_plugins
│ │ └── l3_tables_filter.py
│ └── tasks
│ │ └── main.yml
│ ├── ospf
│ ├── filter_plugins
│ │ └── ospf_filter.py
│ └── tasks
│ │ └── main.yml
│ ├── report
│ ├── filter_plugins
│ │ └── report_filter.py
│ └── tasks
│ │ └── main.yml
│ └── vips
│ ├── filter_plugins
│ └── vip_filter.py
│ └── tasks
│ └── main.yml
└── stesworld_network_topology.pdf
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # stesworld.com - Making dreams for the dreamers
2 |
3 | Initial lab from the ipspace automation course.
4 |
5 | At present the company has one DC built with a Leaf and Spine EVPN/VXLAN topology. It has one Internet breakout via GTT and a dedicated Express Route circuit to Azure.
6 |
7 | The plan in the future is to either expand to a second DC or provide Internet redundancy in the current DC.
8 |
Option1: *Build second DC that will operate as active/active with the current DC with Internet redundancy provided via the opposite DC*
9 |
Option2: *Expand current DC by adding more leaf switches as well as adding another border leaf and internet breakout for redundancy.*
10 |
11 | They will also be adding a Direct connect link to AWS t provide cloud provider redudancy, although how this is incoporated will depned on the above option taken.
12 |
13 | The main goal of the companies IT department is to grasp orchestration/automation and "infrastructure/network as code". Currently they are predominantly a Cisco shop but are keen to try other vendors which may be better designed and more flexible to this ethos.
14 |
15 | They are currently in the process of incorporating orchestration/automation workflows into their current day to day operations with the vision in the future that 99% of BAU or maintenance work will be done in this manner.
16 |
17 | Although the current network was built in the traditional hands on method, any expansion MUST be done via orchestration/automation tools.
18 |
19 | ### Configuration backups
20 |
21 | *backup_configs* will backup all device configs upload them to this repo in the *configurations* directory.
22 |
23 | ### Reporting
24 |
25 | *network_state_report* creates custom reports of network elements within the fabric.
26 |
27 | ### Data Models
28 |
29 | *data_model* creates config snippets using declarative data models.
30 |
--------------------------------------------------------------------------------
/backup_configs/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | library = /home/ste/.local/lib/python3.7/site-packages/napalm_ansible/modules
3 | action_plugins = /home/ste/.local/lib/python3.7/site-packages/napalm_ansible/plugins/action
4 |
5 | stdout_callback = selective
6 |
7 | gathering = explicit
8 | retry_files_enabled = False
9 | inventory = hosts.yml
10 | transport = network_cli
11 |
12 |
--------------------------------------------------------------------------------
/backup_configs/backup_with_cmd_mod.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook backs up the device configuratiosn to a remote git repo
3 | - name: "Creating file structure"
4 | hosts: localhost
5 | tasks:
6 | # Create a date fact (variable) to be used in the name stored config file
7 | - name: SYS >> Get ansible date/time facts
8 | setup:
9 | filter: "ansible_date_time"
10 | - name: SYS >> Store Date in a Fact
11 | set_fact:
12 | DATE: "{{ ansible_date_time.date }}"
13 |
14 | # Clone a remote repositary to save the config output in
15 | - name: GIT >> Clone private repo into /tmp
16 | git:
17 | repo: git@github.com:sjhloco/ip_auto_lab.git
18 | version: master
19 | dest: /tmp/ip_auto_lab
20 |
21 | # Create copy of Cisco running config and save to file in /tmp
22 | - name: "Backing up Devices"
23 | hosts: cisco
24 | # hosts: cisco1 # For testing locally
25 | tasks:
26 | - name: "NET >> Backup Cisco running configs"
27 | ios_command:
28 | commands: "{{ commands }}"
29 | register: CLI_OUTPUT
30 |
31 | - name: SYS >> Save output to backup dir
32 | copy:
33 | content: "{{ CLI_OUTPUT.stdout[0] }}"
34 | dest: "/tmp/ip_auto_lab/configurations/{{ inventory_hostname }}_{{hostvars.localhost.DATE}}"
35 |
36 | # Create copy of ASA running config and save to file in /tmp
37 | - name: "Backing up Devices"
38 | hosts: firewall
39 | tasks:
40 | - name: "NET >> Backup ASA running configs"
41 | asa_command:
42 | commands: "{{ commands }}"
43 | register: CLI_OUTPUT
44 |
45 | - name: SYS >> Save output to backup dir
46 | copy:
47 | content: "{{ CLI_OUTPUT.stdout[0] }}"
48 | dest: "/tmp/ip_auto_lab/configurations/{{ inventory_hostname }}_{{hostvars.localhost.DATE}}"
49 |
50 | # Create copy of F5 config, download and save to file in /tmp
51 | # Still to do, cant get bigip_config to work
52 |
53 | # Commit and push changes to it and cleanup file directory
54 | - name: "Cleaning up directory"
55 | hosts: localhost
56 | tasks:
57 | - name: GIT >> Commit and push changes
58 | shell: |
59 | cd /tmp/ip_auto_lab
60 | git add .
61 | git commit -m "Config backups by Ansible {{hostvars.localhost.DATE}}"
62 | git push
63 |
64 | - name: "SYS >> Delete the directory"
65 | file:
66 | path: "/tmp/ip_auto_lab"
67 | state: absent
--------------------------------------------------------------------------------
/backup_configs/backup_with_napalm.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook backs up the device configurations to a remote git repo
3 |
4 | - name: "Creating file structure"
5 | hosts: localhost
6 | tasks:
7 | # Create the enviroment to save files including the git repo
8 | - name: SYS >> Cleanup old files
9 | file: path="{{ repo_path }}" state=absent
10 | - name: GIT >> Clone private repo into /tmp
11 | git:
12 | repo: "{{ git_repo }}"
13 | version: master
14 | dest: "{{ repo_path }}"
15 | changed_when: False
16 | - name: GIT >> Check git status
17 | shell: git status
18 | args:
19 | chdir: "{{ repo_path }}" # changes to this dir before running the cmd
20 | register: git_status
21 | changed_when: False
22 |
23 | # Create copy of Cisco running configs
24 | - name: "Backing up Devices with NAPALM"
25 | hosts: router:switch:firewall
26 | connection: local
27 | tasks:
28 | - name: "NET >> Backup Cisco running configs"
29 | napalm_get_facts:
30 | provider: "{{ creds_all }}"
31 | dev_os: "{{ os }}"
32 | filter:
33 | - "config"
34 | tags: [print_action]
35 | # Save the configs to file in /tmp
36 | - name: SYS >> Save output to backup dir
37 | copy:
38 | content: "{{ napalm_config.running }}" # nested ansible fact
39 | dest: "{{ repo_path }}/configurations/{{ inventory_hostname }}"
40 | changed_when: False
41 |
42 | # Create a F5 UCS backup
43 | - name: Backup BIGIP Devices with BIGIP module
44 | hosts: bigip
45 | tasks:
46 | - name: Create and download a UCS
47 | bigip_ucs_fetch:
48 | src: "{{ inventory_hostname }}.ucs"
49 | dest: "{{ repo_path }}/configurations/{{ inventory_hostname }}.ucs"
50 | provider: "{{ creds_big_ip }}"
51 | backup: yes # Will backup if doesnt already exist
52 | delegate_to: localhost
53 | tags: [print_action]
54 |
55 | # Commit and push changes to it and cleanup file directory
56 | - name: "Push configs to GIT and cleanup up"
57 | hosts: localhost
58 | tasks:
59 | # Create a date fact (variable) to be used in the name of the config file
60 | - name: SYS >> Get ansible date/time facts
61 | setup:
62 | filter: "ansible_date_time"
63 | - name: SYS >> Store ansible date/time facts
64 | set_fact:
65 | DATE: "{{ ansible_date_time.date }}"
66 | # Commit and push changes to remote git repo
67 | - name: GIT >> Commit and push changes
68 | shell: |
69 | git add .
70 | git commit -m "Config backups by Ansible {{hostvars.localhost.DATE}}"
71 | git push
72 | args:
73 | chdir: "{{ repo_path }}"
74 | when: not("working directory clean" in git_status.stdout)
75 | changed_when: False
76 | - name: "SYS >> Delete the directory"
77 | file:
78 | path: "{{ repo_path }}"
79 | state: absent
80 | changed_when: False
--------------------------------------------------------------------------------
/backup_configs/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | git_repo: git@github.com:sjhloco/ip_auto_lab.git
3 | repo_path: /tmp/ip_auto_lab
4 | ansible_python_interpreter: "/usr/bin/python3.7"
5 |
6 | creds_all:
7 | hostname: "{{ ansible_host|default(inventory_hostname) }}"
8 | username: admin
9 | password: ansible
--------------------------------------------------------------------------------
/backup_configs/group_vars/asa.yml:
--------------------------------------------------------------------------------
1 | ---
2 | os: asa
--------------------------------------------------------------------------------
/backup_configs/group_vars/bigip.yml:
--------------------------------------------------------------------------------
1 | ---
2 | creds_big_ip:
3 | server: "{{ ansible_host|default(inventory_hostname) }}"
4 | user: admin
5 | password: ansible
6 | validate_certs: no
7 |
--------------------------------------------------------------------------------
/backup_configs/group_vars/ios.yml:
--------------------------------------------------------------------------------
1 | ---
2 | os: ios
3 |
--------------------------------------------------------------------------------
/backup_configs/group_vars/nxos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | os: nxos
3 |
--------------------------------------------------------------------------------
/backup_configs/hosts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | all:
3 | children:
4 | router:
5 | children:
6 | ios:
7 | hosts:
8 | dc1-csr-xnet1:
9 | ansible_host: 10.10.108.17
10 | switch:
11 | children:
12 | spine:
13 | children:
14 | nxos:
15 | hosts:
16 | dc1-n9k-spine1:
17 | ansible_host: 10.10.108.11
18 | dc1-n9k-spine2:
19 | ansible_host: 10.10.108.12
20 | leaf:
21 | children:
22 | nxos:
23 | hosts:
24 | dc1-n9k-leaf1:
25 | ansible_host: 10.10.108.13
26 | dc1-n9k-leaf2:
27 | ansible_host: 10.10.108.14
28 | border_leaf:
29 | children:
30 | nxos:
31 | hosts:
32 | dc1-n9k-border1:
33 | ansible_host: 10.10.108.15
34 | dmz:
35 | children:
36 | ios:
37 | hosts:
38 | dc1-vios-sw1:
39 | ansible_host: 10.10.108.18
40 | firewall:
41 | children:
42 | asa:
43 | hosts:
44 | dc1-asav-xfw1:
45 | ansible_host: 10.10.108.16
46 | bigip:
47 | children:
48 | ltm:
49 | hosts:
50 | dc1-ltm-lb1:
51 | ansible_host: 10.10.108.19
52 |
--------------------------------------------------------------------------------
/backup_configs/playbooks/create_dir.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # To create and delete a local directory on the ansible server
3 | - name: "Create file structure"
4 | hosts: 127.0.0.1
5 | connection: local
6 | gather_facts: yes
7 | tasks:
8 | - name: "SYS >> Create the directory with a timestamp"
9 | file:
10 | path: "/tmp/config_backups_{{ansible_date_time.date}}"
11 | state: directory
12 |
13 | # Delete the directory
14 | - name: "Cleanup"
15 | hosts: 127.0.0.1
16 | connection: local
17 | gather_facts: yes
18 | tasks:
19 | - name: "SYS >> Delete the directory"
20 | file:
21 | path: "/tmp/config_backups_{{ansible_date_time.date}}"
22 | state: absent
23 |
--------------------------------------------------------------------------------
/backup_configs/playbooks/git.yml:
--------------------------------------------------------------------------------
1 | # Clone a remote repositary to save the config output in
2 | - name: GIT >> Clone private repo into /tmp
3 | git:
4 | repo: git@github.com:sjhloco/test_git.git
5 | version: master
6 | dest: /tmp/test_git
7 |
8 |
9 |
10 | # Commit and push changes to it and cleanup file directory
11 | - hosts: localhost
12 | tasks:
13 | - name: GIT >> Commit and push changes
14 | shell: |
15 | cd /tmp/test_git
16 | git add .
17 | git commit -m "Config backups by Ansible {{hostvars.localhost.DATE}}"
18 | git push
19 |
--------------------------------------------------------------------------------
/build_fabric/PLAN:
--------------------------------------------------------------------------------
1 | I change ec_fmt in fabric.yml from Port-channel to port-channel, need to keep inmind incase thigns start failing
2 |
3 |
4 | INTERFACES: need to be able to create loopbacks - JUST NEED TO INCLUDE IF IN DEFAULT VRF
5 |
6 | Loopback must be /32
7 | reserved range is 11 to 20
8 | If not teannt set for any needs to be put in default
9 |
10 |
11 | TRY -> on nexus, should be waht using for redist
12 |
13 | +POST_VAL to fix=
14 | post-val for new loopback interface variabel setup (bse)
15 | New lp_fmt to be tested for
16 | Make surw that interface ranges dont conflict with fbric LOOPBACKS OR interfaces
17 | itnerface tenant can be nothing and then is default
18 | interfaces in service interface are now a list
19 | loopback ip must be /32
20 |
21 | RTR post-val
22 | no duplicate group or peer names
23 | cant have same prefix in a prefix-list (duplicate preofx in same prefix-list)
24 | BGP tenant has to be s single device, or a list of multiple devices (cant be muliple devices not in a list)
25 | (no duplicate group or peer names allowed)
26 | 'as_path' and 'med' can be applied to outbound advertisements and 'weight' and local preference applied to those recieved inbound
27 | prefix always must be a list,even if only 1 element.
28 | BGP deny does not accept the default keywork, all others do
29 | Cant have host address (/32) on a normal intefrcaes
30 | OSPF interfaces in same OSPF process
31 |
32 | RTR-
33 | TEST: If minimun values for BGP peer are correct
34 | BFD_routing: Whetehr is default dicated by whether virtual supports it
35 |
36 |
37 | Need to check what did with allow, dney and default and update info in variabel file
38 | -Do all take them? Did I used deny?
39 |
40 |
41 | Add to notes about removing dict elements if doen exist (use pop rahter than del)
42 | https://stackoverflow.com/questions/11277432/how-to-remove-a-key-from-a-python-dictionary
43 |
44 |
45 |
46 |
47 | pre-checks -
48 |
49 |
50 |
51 | Nice to have
52 | 1. Add playbook for netbox
53 | 2. Add EOS, thin kcan use this to get the config
54 | https://overlaid.net/2019/02/19/arista-bgp-evpn-ansible-lab/
55 | https://github.com/varnumd/ansible-arista-evpn-lab
56 |
57 |
58 | NXOS caveats
59 | route-map RM_OSPF98>>BGP65001 permit 10`
60 | match ip address prefix-list PL_OSPF98>>BGP65001_ME50`
61 | cant take prefix list containing '>'
62 |
63 | Error: CLI DN creation failed substituting values. Path sys/rpm/rtmap-[RM_OSPF98
64 | BGP65001]/ent-10/mrtdst/rsrtDstAtt-[sys/rpm/pfxlistv4-[PL_OSPF98>>BGP65001_ME50]
65 |
66 |
67 |
68 |
69 |
70 | ADD TO LIST?DICT NOTES
71 | POP dictionary - use when want to delte a key, but may not exist (similart to how used get() and setdefault())
72 |
73 |
74 | If specified key is present in the dictionary, it remove and return its value.
75 | If the specified key is not present, it throws an error KeyError.
76 |
77 | TO get round this use this. If the key is is not present, the specified value (None) value is returned.
78 | intf.pop('name', None)
79 |
80 | To delete multiple dicts in one line:
81 | del grp['switch'], grp['peer'], grp['tenant']
--------------------------------------------------------------------------------
/build_fabric/__pycache__/validate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/__pycache__/validate.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | library = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/modules
3 | action_plugins = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/plugins/action
4 |
5 | forks = 20
6 | #jinja2_extensions = jinja2.ext.do
7 |
8 | #stdout_callback = selective
9 |
10 | gathering = explicit
11 | retry_files_enabled = False
12 | inventory = hosts.yml
13 | transport = network_cli
14 |
15 | # callback_plugins = callback_plugins
16 | # stdout_callback = selective
17 | host_key_checking = False
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | library = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/modules
3 | action_plugins = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/plugins/action
4 |
5 | gathering = explicit
6 | retry_files_enabled = False
7 | inventory = hosts.yml
8 | transport = network_cli
9 |
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/files/compliance_report.json:
--------------------------------------------------------------------------------
1 | {"show vpc": {"complies": false, "present": {}, "missing": ["peer-link_po", "peer-link_vlans", "vpc_peer_keepalive_status", "vpc_peer_status", "Po44", "Po13", "Po14"], "extra": []}, "skipped": [], "complies": false}
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/files/desired_state.yml:
--------------------------------------------------------------------------------
1 | cmds:
2 | - show vpc:
3 | peer-link_po: Po1
4 | peer-link_vlans: "1-2,10,20,24,30,40,110,120,210,220,3001-3002"
5 | vpc_peer_keepalive_status: peer-alive
6 | vpc_peer_status: peer-ok
7 | Po44:
8 | consistency_status: SUCCESS
9 | port_status: "1"
10 | vpc_num: "44"
11 | active_vlans: 20
12 | Po13:
13 | consistency_status: SUCCESS
14 | port_status: "1"
15 | vpc_num: "13"
16 | active_vlans: 30,40
17 | Po14:
18 | consistency_status: SUCCESS
19 | port_status: "1"
20 | vpc_num: "14"
21 | active_vlans: 10,20,24,30
22 |
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/files/file_input.json:
--------------------------------------------------------------------------------
1 | {"192.168.100.11": {"state": "FULL"}, "192.168.100.12": {"state": "FULL"},"192.168.100.22": {"state": "FULL"}}
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/filter_plugins/__pycache__/format_data_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/custom_val_builder/filter_plugins/__pycache__/format_data_model.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/filter_plugins/__pycache__/format_dm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/custom_val_builder/filter_plugins/__pycache__/format_dm.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/filter_plugins/__pycache__/val_builder.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/custom_val_builder/filter_plugins/__pycache__/val_builder.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/filter_plugins/val_builder.py:
--------------------------------------------------------------------------------
1 | """ Used to test creating compliance reports to be used with custom_validate in playbook
2 | Uses napalm_validate with an input file of the actual state rather than naplam_validate state
3 |
4 | Can take an input of a static dictionary file of the expected output or the actual device output.
5 | If using the input file it is stored in files/file_input.json
6 |
7 | ansible-playbook playbook.yml -i hosts --tag tmpl
8 | ansible-playbook playbook.yml -i hosts --tag dev_val
9 | ansible-playbook playbook.yml -i hosts --tag file_val
10 |
11 | Unhash desired_state, actual_state or result dependant on what you want returned to screen.
12 | The desired state is saved to files/desired_sate.yml and report to files/compliance_report.json
13 |
14 | """
15 |
16 | from napalm.base import validate
17 | from napalm.base.exceptions import ValidationException
18 | import json
19 | from collections import defaultdict
20 | import os
21 | import re
22 |
23 | class FilterModule(object):
24 | def filters(self):
25 | return {
26 | 'custom_validate': self.custom_validate,
27 | }
28 |
29 | ############################################ Method to run napalm_validate ############################################
30 | # REPORT: Uses naplam_validate on custom data fed into it (still supports '_mode: strict') to validate and create reports
31 |
32 | def compliance_report(self, desired_state, actual_state):
33 | report = {}
34 | for cmd, desired_results in desired_state.items():
35 | key = cmd
36 | try: # Feeds files into napalm_validate
37 | report[key] = validate.compare(desired_results, actual_state)
38 | except NotImplementedError:
39 | report[key] = {"skipped": True, "reason": "NotImplemented"}
40 |
41 | complies = all([e.get("complies", True) for e in report.values()])
42 | report["skipped"] = [k for k, v in report.items() if v.get("skipped", False)]
43 | report["complies"] = complies
44 |
45 | # Create a new compliance report
46 | with open('files/compliance_report.json', 'w') as file_content:
47 | json.dump(report, file_content)
48 | return report
49 |
50 | ############################################ Engine for custom_validate ############################################
51 | # ENGINE: Runs method to get data model, puts it through napalm_validate and then repsonds to Ansible
52 |
53 | def custom_validate(self, desired_state, output, input_method):
54 | json_output = json.loads(output) # Output comes in as serilaised json (long sting), needs making into json
55 |
56 | # Runs against a static input file of the device output (in json)
57 | if input_method == 'file':
58 | actual_state = json_output
59 | # Feeds the device input into the device_dm method create the Data Model
60 | elif input_method == 'device':
61 | actual_state = self.device_dm(desired_state, json_output, actual_state = {})
62 |
63 | # Feeds the validation file (desired state) and new data model through the reporting function
64 | result = self.compliance_report(desired_state, actual_state)
65 |
66 | # Unhash what you want to display on screen
67 | return desired_state
68 | # return actual_state
69 | # return result
70 |
71 | ############################################ Device data-model generators ############################################
72 | # Creates the data model from the out retruned by the device
73 |
74 | def device_dm(self, desired_state, json_output, actual_state):
75 | # actual_state = json_output
76 |
77 | actual_state = defaultdict(dict)
78 | actual_state['peer-link_po'] = json_output['TABLE_peerlink']['ROW_peerlink']['peerlink-ifindex']
79 | actual_state['peer-link_vlans'] = json_output['TABLE_peerlink']['ROW_peerlink']['peer-up-vlan-bitset']
80 | actual_state['vpc_peer_keepalive_status'] = json_output['vpc-peer-keepalive-status']
81 | actual_state['vpc_peer_status'] = json_output['vpc-peer-status']
82 | for vpc in json_output['TABLE_vpc']['ROW_vpc']:
83 | actual_state[vpc['vpc-ifindex']]['consistency_status'] = vpc['vpc-consistency-status']
84 | actual_state[vpc['vpc-ifindex']]['port_status'] = vpc['vpc-port-state']
85 | actual_state[vpc['vpc-ifindex']]['vpc_num'] = vpc['vpc-id']
86 | actual_state[vpc['vpc-ifindex']]['active_vlans'] = vpc['up-vlan-bitset']
87 |
88 | return actual_state
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/inv_from_vars_cfg.yml:
--------------------------------------------------------------------------------
1 | ### the inventory config file that is referneced in playbook runs to run the inventory pluggin ###
2 |
3 | # To just run the inventory pluggin to generate the host_vars and group_vars that are printed to screen:
4 | # ansible-inventory --playbook-dir=$(pwd) -i inv_from_vars_cfg.yml --list
5 |
6 | # Name of the inventory pluggin (stored in the inventory_plugins directory)
7 | plugin: inv_from_vars
8 |
9 | # Data-model in Ansible vars directory where dictionaries will be imported from
10 | var_files:
11 | - ansible.yml
12 | - base.yml
13 | - fabric.yml
14 |
15 | #Dictionaries that will be imported from the varaible files in the vars directory
16 | var_dicts:
17 | ansible:
18 | - device_type
19 | base:
20 | - device_name
21 | - addr
22 | fabric:
23 | - network_size
24 | - num_intf
25 | - bse_intf
26 | - lp
27 | - mlag
28 | - addr_incre
29 |
30 |
31 |
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/custom_val_builder/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Create a Custom Validation"
3 | # hosts: all
4 | hosts: DC1-N9K-LEAF01
5 | # hosts: DC1-N9K-BORDER01
6 | # hosts: DC1-N9K-SPINE01
7 | connection: local
8 | vars_files:
9 | - vars/fabric.yml
10 | - vars/base.yml
11 | - vars/my_vars.yml
12 | - vars/ansible.yml
13 | - vars/services_tenant.yml
14 | - vars/services_interface.yml
15 |
16 |
17 | tasks:
18 | # TEMPLATE: Creates validation file of expected desired state from the input data
19 | - name: "SYS >> Creating the validation files"
20 | block:
21 | - set_fact:
22 | flt_svc_intf: "{{ svc_intf.intf |create_svc_intf_dm(inventory_hostname, svc_intf.adv, fbc.adv.bse_intf) }}"
23 | check_mode: False # These tasks still make changes when in check mode
24 | run_once: true # Only has to run once to create the new data-models
25 | - template:
26 | src: "val_tmpl.j2"
27 | dest: "files/desired_state.yml"
28 | changed_when: False
29 | tags: [tmpl, dev_val, file_val, flt]
30 | # - debug: var=flt_svc_intf
31 | # tags: [tmpl, dev_val, file_val, flt]
32 |
33 | # DEVICE_OUTPUT: napalm_cli gets the actual state and feeds it into custom_validate
34 | - name: "Create compliance report from dynamic device output"
35 | block:
36 | - include_vars: "files/desired_state.yml"
37 | - name: "NET >> Getting cmd output from device"
38 | napalm_cli:
39 | provider: "{{ ans.creds_all }}"
40 | dev_os: "{{ ansible_network_os }}"
41 | args:
42 | commands:
43 | - "{{ item.keys() | list | first }} | json"
44 | register: output
45 | loop: "{{ cmds }}"
46 | loop_control:
47 | label: "{{ item.keys() | list | first }}"
48 | - name: "SYS >> Creating compliance report"
49 | set_fact:
50 | result: "{{ cmds[lp_idx] | custom_validate(item.cli_results.values() | list | first, 'device') }}"
51 | loop: "{{ output.results }}"
52 | loop_control:
53 | label: "{{ item.cli_results.keys() | list | first }}"
54 | index_var: lp_idx
55 | - debug: var=result
56 | tags: [dev_val]
57 |
58 | # FILE_INPUT: Runs custom_validate using a static input of the device output
59 | - name: "SYS >> Creating compliance report"
60 | block:
61 | - include_vars: "files/desired_state.yml"
62 | - set_fact:
63 | result: "{{ cmds[0] | custom_validate(lookup('file','files/file_input.json'), 'file') }}"
64 | - debug: var=result
65 | tags: [file_val]
66 |
67 |
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/templates/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/custom_val_builder/templates/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/templates/val_tmpl.j2:
--------------------------------------------------------------------------------
1 | cmds:
2 | - show vpc:
3 | peer-link_po: {{ fbc.adv.bse_intf.ec_short + fbc.adv.mlag.peer_po |string }}
4 | peer-link_vlans: "1-2,10,20,24,30,40,110,120,210,220,3001-3002"
5 | vpc_peer_keepalive_status: peer-alive
6 | vpc_peer_status: peer-ok
7 | {% for intf in flt_svc_intf %}
8 | {% if intf.vpc_num is defined %}
9 | {# Changes long name (port-channel) to short name (Po) #}
10 | {{ intf.intf_num | replace(fbc.adv.bse_intf.ec_fmt,fbc.adv.bse_intf.ec_short) }}:
11 | consistency_status: SUCCESS
12 | port_status: "1"
13 | vpc_num: "{{ intf.vpc_num }}"
14 | active_vlans: {{ intf.ip_vlan }}
15 | {% endif %}{% endfor %}
16 |
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/vars/ansible.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Login and Ansible settings (normally would be in all.yml) ################
3 |
4 | # Python location on the Ansible host (operating system specific)
5 | ansible_python_interpreter: "/usr/bin/env python"
6 | # Username and password uses by Ansible modules
7 | ansible_user: "{{ ans.creds_all.username }}"
8 | ansible_ssh_pass: "{{ ans.creds_all.password }}"
9 | ansible_network_os: nxos
10 |
11 | # Non-ansible core variables used in playbook, so ones I can suffix ans. before
12 | ans:
13 | # Base directory Location to store the generated configuration snippets
14 | dir_path: ~/device_configs
15 |
16 | # Connection Variables
17 | creds_all: # Napalm
18 | hostname: "{{ ansible_host|default(inventory_hostname) }}"
19 | username: admin
20 | password: ansible
21 |
22 | # Operating system type
23 | device_type:
24 | spine_os: nxos
25 | border_os: nxos
26 | leaf_os: nxos
27 |
28 |
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/vars/base.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to create core elements of the device configs ################
3 |
4 | # The naming structure that is added before the automatically generated node number (0x). Groups are created based on the name (i.e spine, border, leaf)
5 | bse:
6 | device_name: # Must contain - and characters after - must be either letters, digits or underscore as this is used as the group name
7 | spine: 'DC1-N9K-SPINE'
8 | border: 'DC1-N9K-BORDER'
9 | leaf: 'DC1-N9K-LEAF'
10 | # Ranges from which device addresses are created from. Must have the mask in prefix format (/)
11 | addr:
12 | lp_net: '192.168.100.0/32' # Routing (OSPF/BGP), VTEP and VPC addresses. By default will use .11 to .59
13 | mgmt_net: '10.10.108.0/24' # Needs to be at least /27 to cover max spine (4), leafs (10) and borders (4)
14 | mlag_net: '10.255.255.0/28' # VPC peer link addresses. Needs to be at least /28 to cover max leafs (10) and borders (4)
15 | srv_ospf_net: '10.255.255.16/28' # Non-core OSPF process peerings between border switches (4 addresses per OSPF process)
16 |
17 | users:
18 | - username: admin
19 | password: $5$ugYwyCgs$CSnUuaIxlxXHRw/Nq3hKU9gfkA8Y2fYHiTZeDFSXik3 # Passwords must be entered encrypted type-5
20 | role: network-admin
21 |
22 | # Details for all the services that the switches consume
23 | services:
24 | domain: 'stesworld.com'
25 | src_int: loopback1 # Used for any control plane functions
26 | dns:
27 | prim: 10.10.10.41
28 | sec: 10.10.10.42
29 | tacacs:
30 | grp_name: ISE_TACACS
31 | key: vagzjefjq # Must be entered encrypted type-6
32 | servers:
33 | - 10.10.10.51
34 | - 10.10.10.52
35 | - 10.10.10.53
36 | snmp:
37 | host: 10.10.10.43
38 | comm: 5NMPC0MMUN1TY
39 | ntp:
40 | server:
41 | - 10.10.10.45
42 | - 10.10.20.46
43 | log:
44 | server:
45 | - 10.10.10.47
46 | - 10.10.20.48
47 |
48 | # Managament Access-lists
49 | mgmt_acl:
50 | - acl_name: SNMP_ACCESS
51 | source: [10.10.20.43/24, 10.10.10.43/24]
52 | port: [udp, snmp]
53 | - acl_name: SSH_ACCESS
54 | source: [10.10.10.0/24, 10.255.254.0/24, 10.10.108.0/24, 192.168.255.0/24]
55 | port: [tcp, 22]
56 |
57 | # Advanced base configuration that is less likely to be changed
58 | adv:
59 | image: nxos.9.2.4.bin
60 | image_name: 9.2(4) # See caveats in README, if not correct checkpoint config_repalce will fail on NXOS
61 | exec_timeout:
62 | console: 0
63 | vty: 15
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/vars/fabric.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to decide how the fabric will look ################
3 |
4 | # This Only scales to 4 spines, 10 leafs, 4 borders. By default the following ports are used:
5 | # SPINE-to-LEAF = Eth1/1 - 1/10 SPINE-to-Border = Eth1/11 - 1/15
6 | # LEAF-to-SPINE = Eth1/1 - 1/5 BORDER-to-SPINE: = Eth1/1 - 1/5
7 | # MLAG Peer-link = Eth1/127 - 128 MLAG keepalive = mgmt
8 |
9 | # How big the network is, so the number of each switch. border/leaf must be in increments of 2 as in MLAG pair
10 | fbc:
11 | network_size:
12 | num_spines: 2 # Can be 1 to 4
13 | num_borders: 2 # Can be 0, 2 or 4
14 | num_leafs: 2 # Can be 2, 4, 6, 8 or 10
15 |
16 | # Number of interfaces on the device (first and last interface). Is needed to make interfaces declerative and default all interfaces not used
17 | num_intf:
18 | spine: 1,128
19 | border: 1,128
20 | leaf: 1,128
21 |
22 | # To change Fabric routing protocol settings
23 | route:
24 | ospf:
25 | pro: 'underlay' # Can be numbered or named
26 | area: 0.0.0.0 # Must be in dotted decimal format
27 | bgp:
28 | as_num: 65001
29 |
30 | acast_gw_mac: 0000.2222.3333 # Must be in the format xxxx.xxxx.xxxx
31 |
32 | ################ Advanced settings to further customize the fabric ################
33 |
34 | adv:
35 | # Seed interfaces used to create the fabric. These are the first interfaces used, the inventory pluggin increments them
36 | bse_intf:
37 | intf_fmt: Ethernet1/ # Switch interface naming format
38 | intf_short: Eth1/ # Used in descritions of interfaces
39 | ec_fmt: port-channel # LAG interface naming format
40 | ec_short: Po # Used in descritions of LAG interfaces
41 | sp_to_lf: 1 # First interface used for SPINE to LEAF links (1 to 10)
42 | sp_to_bdr: 11 # First interface used for SPINE to BORDER links (11 to 14)
43 | lf_to_sp: 1 # First interface used LEAF to SPINE links (1 to 4)
44 | bdr_to_sp: 1 # First interface used BORDER to SPINE links (1 to 4)
45 | mlag_peer: 11-12 # Interfaces used for the MLAG peer Link (will be in the MLAG LAG)
46 |
47 | # Loopback interfaces to be used, numbers and descriptions can be changed. As per best practise one per function.
48 | lp:
49 | rtr:
50 | loopback1: LP > Routing protocol RID and peerings
51 | vtep:
52 | loopback2: LP > VTEP Tunnels (PIP) and MLAG (VIP)
53 | bgw:
54 | loopback3: LP > BGW anycast address # Added now incase add multisite to the script
55 |
56 | # All MLAG specific settings except for peer Link interfaces (bse.adv.base_intf.mlag_peer) and subnet (bse.addr.mlag_net)
57 | mlag:
58 | domain: 1 # MLAG Domain number
59 | peer_po: 1 # Port-channel used for Peer Link
60 | peer_vlan: 2 # VLAN used for Peer Link and OSPF peering
61 |
62 | # The increment that is added to the subnet and device hostname number to generate the unique last octet of the IP addresses
63 | addr_incre:
64 | spine_ip: 11 # SPINE mgmt IP and routing loopback addresses will be from .11 to .14
65 | border_ip: 16 # BORDER mgmt IP and routing loopback addresses will be from .16 to .19
66 | leaf_ip: 21 # LEAF mgmt IP and routing loopback addresses will be from .21 to .30
67 | border_vtep_lp: 36 # BORDER VTEP loopback addresses will be from .36 to .39
68 | leaf_vtep_lp: 41 # LEAF VTEP loopback addresses will be from .41 to .50
69 | border_mlag_lp: 56 # Pair of BORDER MLAG shared loopback addresses (VIP) will be from .56 to .57
70 | leaf_mlag_lp: 51 # Pair of LEAF MLAG shared loopback addresses (VIP) will be from .51 to .55
71 | border_bgw_lp: 58 # Pair of BORDER BGW shared anycast loopback addresses will be from .58 to .59
72 | mlag_leaf_ip: 0 # Start IP for LEAF Peer Links, so LEAF1 is .0, LEAF2 .1, LEAF3 .2, etc
73 | mlag_border_ip: 10 # Start IP for BORDER Peer Links, so BORDER1 is .10, BORDER2 .11, etc
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/vars/my_vars.yml:
--------------------------------------------------------------------------------
1 | ---
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/vars/services_interface.yml:
--------------------------------------------------------------------------------
1 | ###################### Service: Device Ports ######################
2 | # By default all interfaces are dual-homed with an LACP state of 'active'. Only the odd numbered switch needs to be specified in the variable file.
3 | # The VPC number can not be changed, it will always be the same as the port-channel number.
4 | # Interfaces and POs can be assigned from a pool or specified manually.
5 |
6 | # Are 5 types of interface that can be specified:
7 | # -access: A L2 single VLAN access port. STP is set to 'edge'
8 | # -stp_trunk: A L2 trunk port going to a device that supports STP. STP is set to 'network' so the other device must support Brige Assurance
9 | # -stp_trunk_non_ba: Same as stp_trunk but sets STP will be set to 'normal' for devices that dont support BA
10 | # -non_stp_trunk: A L2 trunk port going to a device that doesnt support BPDU. STP set to 'edge' and BPDU Guard enabled
11 | # -layer3: A non-switchport L3 interface with an IP address
12 |
13 | # Interfaces are defined as a dictionary value for the single_homed or dual-homed key.
14 | # At a minimun the following settings need to be configured:
15 | # - single_homed: or dual-homed:
16 | # - descr: string
17 | # - type: access, stp_trunk, stp_trunk_non_ba, non_stp_trunk or layer3
18 | # - ip_vlan: vlan or ip Depends on the type, either ip/prefifx, vlan or multiple vlans separated by ,
19 | # - switch: name Name of switch to create on. If dual-homed needs to be odd switch number from MLAG pair
20 | # - tenant: name Layer3 interfaces only, is the VRF the interface will be in*
21 |
22 | # To statically assign the interface and/or port-channel number (default is dynamically from a range) add either of these 2 extra dictionaries to the interface.
23 | # The values used can overlap with the dynamic interface range however for simplicty would advise to use a separate range for dynamic and static assignments.
24 | # - intf_num: integrar Only specify the number, name is got from the fbc.adv.bse_intf.intf_fmt variable
25 | # - po_num: integrar Only specify the number, name is got from the fbc.adv.bse_intf.ec_fmt variable
26 |
27 |
28 | # Add infr about po_mode, can be active, passive or on
29 |
30 | #### Base variables ####
31 | # If not using single-homed or dual-homed interfaces make sure the dictionary (*single_homed* or *dual_homed*) is hashed out.
32 |
33 | svc_intf:
34 | intf:
35 | single_homed:
36 | - descr: L3 > DC1-ASAv-XFW01 eth1
37 | type: layer3
38 | tenant: RED
39 | ip_vlan: 10.255.99.1/30
40 | switch: DC1-N9K-BORDER01
41 | intf_num: 41
42 | - descr: L3 > DC1-ASAv-XFW02 eth1
43 | type: layer3
44 | tenant: RED
45 | ip_vlan: 10.255.99.5/30
46 | switch: DC1-N9K-BORDER02
47 | - descr: L3 > DC1-SRV-MON01 nic1
48 | type: layer3
49 | tenant: BLU
50 | ip_vlan: 10.100.100.21/30
51 | switch: DC1-N9K-LEAF01
52 | - descr: ACCESS > DC1-SRV-APP01 eth1
53 | type: access
54 | ip_vlan: 10
55 | switch: DC1-N9K-LEAF02
56 | intf_num: 29
57 | - descr: UPLINK > DC1-VIOS-SW2
58 | type: stp_trunk
59 | ip_vlan: 110,120
60 | switch: DC1-N9K-LEAF01
61 |
62 | dual_homed:
63 | - descr: ACCESS > DC1-SRV-PRD01 eth1
64 | type: access
65 | ip_vlan: 20
66 | switch: DC1-N9K-LEAF01
67 | intf_num: 45
68 | po_num: 44
69 | - descr: UPLINK > DC1-LTM-ESX1
70 | type: non_stp_trunk
71 | ip_vlan: 10,20,24,30
72 | switch: DC1-N9K-LEAF01
73 | - descr: UPLINK > DC1-VIOS-SW1
74 | type: stp_trunk_non_ba
75 | ip_vlan: 110
76 | switch: DC1-N9K-BORDER01
77 | - descr: ACCESS > DC1-LTM-LB01
78 | type: non_stp_trunk
79 | ip_vlan: 30,40
80 | switch: DC1-N9K-LEAF01
81 | intf_num: 25
82 | - descr: UPLINK > DC1-VIOS-DMZ01
83 | type: stp_trunk_non_ba
84 | ip_vlan: 210,220
85 | switch: DC1-N9K-BORDER01
86 |
87 | #### Advanced variables ####
88 | # Reserved interface ranges that server ports can be automatically assigned from (applies to all leaf and border switches)
89 |
90 | adv:
91 | single_homed: # Used only for single-homed devices
92 | first_intf: 33 # First interface
93 | last_intf: 40 # Last interface
94 | dual_homed: # Used only for dual-homed devices
95 | first_intf: 13 # First interface
96 | last_intf: 32 # Last interface
97 | first_po: 13 # First PortChannel used
98 | last_po: 32 # last PortChannel used
99 |
100 |
101 |
--------------------------------------------------------------------------------
/build_fabric/custom_val_builder/vars/services_tenant.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###################### Service: Tenant ######################
3 | # VLANs will only be Layer 2 unless an ip address is assigned to them.
4 | # VRFs will only be created on a border or leaf if a VLAN within that VRF is to be created on that deivce type
5 | # Even if it is not a L3_tenant still creates VRF and reserves the L3VNI/VLAN (stops renumbering if made L3_tenant in the future)
6 |
7 | # At a minimun per-tenant you need:
8 | # - tenant_name: Name Name of the VRF
9 | # l3_tenant: True or False Does it need SVIs or is routing done on a device external to the L&S fabric (i.e router)
10 | # vlans: VLANs within this tenant
11 | # - num: Number VLAN number
12 | # name: Name VLAN Name
13 |
14 | # These settings are optional and need not be set. Setting them overides the explicit default values
15 | # ip_addr: x.x.x.x/24 Adding an IP address makes it a L3 VLAN (default L2 only)
16 | # ipv4_bgp_redist: True or False Whether thhe SVI is redistributed into IPv4 BGP addr-fam (default True unless no ip_addr in which case is False)
17 | # create_on_leaf: True or False Whether this VLAN is created on the leafs (default True)
18 | # create_on_border: True or False Whether this VLAN is created on the borders (default False)
19 |
20 | #### Base variables ####
21 | svc_tnt:
22 | tnt:
23 | - tenant_name: BLU
24 | l3_tenant: True
25 | vlans:
26 | - num: 10
27 | name: data
28 | ip_addr: 10.10.10.1/24
29 | - num: 20
30 | name: dev
31 | ip_addr: 10.10.12.1/24
32 | - num: 24
33 | name: servers
34 | ip_addr: 10.10.11.1/24
35 | - num: 30
36 | name: lb_vips
37 | ip_addr: 10.10.20.1/24
38 | ipv4_bgp_redist: False
39 | - num: 40
40 | name: ha_keepalive
41 |
42 | - tenant_name: GRN
43 | l3_tenant: True
44 | vlans:
45 | - num: 110
46 | name: grn-web
47 | ip_addr: 10.250.110.1/24
48 | create_on_border: True
49 | ipv4_bgp_redist: False
50 | - num: 120
51 | name: grn-mail
52 | ip_addr: 10.250.120.1/24
53 | create_on_border: True
54 | ipv4_bgp_redist: False
55 |
56 | - tenant_name: AMB
57 | l3_tenant: False
58 | vlans:
59 | - num: 210
60 | name: amb-web
61 | create_on_border: True
62 | - num: 220
63 | name: amb-mail
64 | create_on_border: True
65 |
66 | - tenant_name: RED
67 | l3_tenant: False
68 | vlans:
69 | - num: 90
70 | name: red-ctt1
71 | create_on_border: True
72 | create_on_leaf: False
73 | - num: 91
74 | name: red-ctt2
75 | create_on_border: True
76 | create_on_leaf: False
77 |
78 | #### Advanced variables ####
79 | # Each L3 Tenant requires a VLAN and VNI. These are automatically generated by incrementing the L3VNI base_vlan and base_vni
80 | # Each tenant vlan requires a VNI. These are formed of an increment of 10000 per-tenant with each VLAN number added to this
81 |
82 | adv:
83 | bse_vni:
84 | tnt_vlan: 3001 # Starting VLAN number for transit L3VNI
85 | l3vni: 3001 # Starting VNI number for transit L3VNI
86 | l2vni: 10000 # Start L2VNI and the range to add to each tenants vlan.
87 | vni_incre:
88 | tnt_vlan: 1 # Value by which to increase transit L3VNI VLAN number for each tenant
89 | l3vni: 1 # Value by which to increase transit L3VNI VNI number for each tenant
90 | l2vni: 10000 # Value by which to increase the L2VNI range used (range + vlan) for each tenant
91 |
92 | bgp:
93 | ipv4_redist_rm_name: rm_CONN_vrf>>BGPas # Can change route-map name, but it MUST still include 'vrf' and 'as' in the text
--------------------------------------------------------------------------------
/build_fabric/filter_plugins/__pycache__/comp_report.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/filter_plugins/__pycache__/comp_report.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/filter_plugins/__pycache__/custom_validate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/filter_plugins/__pycache__/custom_validate.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/filter_plugins/__pycache__/format_data_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/filter_plugins/__pycache__/format_data_model.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/filter_plugins/__pycache__/input_svc_validate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/filter_plugins/__pycache__/input_svc_validate.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/filter_plugins/__pycache__/input_validate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/filter_plugins/__pycache__/input_validate.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/filter_plugins/__pycache__/validate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/filter_plugins/__pycache__/validate.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/inv_from_vars_cfg.yml:
--------------------------------------------------------------------------------
1 | ### the inventory config file that is referneced in playbook runs to run the inventory pluggin ###
2 |
3 | # To just run the inventory pluggin to generate the host_vars and group_vars that are printed to screen:
4 | # ansible-inventory --playbook-dir=$(pwd) -i inv_from_vars_cfg.yml --list
5 |
6 | # Name of the inventory pluggin (stored in the inventory_plugins directory)
7 | plugin: inv_from_vars
8 |
9 | # Data-model in Ansible vars directory where dictionaries will be imported from
10 | var_files:
11 | - ansible.yml
12 | - base.yml
13 | - fabric.yml
14 |
15 | #Dictionaries that will be imported from the varaible files in the vars directory
16 | var_dicts:
17 | ansible:
18 | - device_type
19 | base:
20 | - device_name
21 | - addr
22 | fabric:
23 | - network_size
24 | - num_intf
25 | - bse_intf
26 | - lp
27 | - mlag
28 | - addr_incre
29 |
30 |
31 |
--------------------------------------------------------------------------------
/build_fabric/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/roles/base/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Uses template to build the base configuration (mainly non-fabric) using mostly the variables from base.yml) ###
3 |
4 | - name: "SYS >> Generating base config snippets"
5 | template:
6 | src: "{{ ansible_network_os }}/bse_tmpl.j2"
7 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/config/base.conf"
8 | changed_when: False # Stops it reporting changes in playbook summary
9 | check_mode: False # These tasks still make changes when in check mode
10 | # tags: [bse, bse_fbc, full]
--------------------------------------------------------------------------------
/build_fabric/roles/fabric/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Uses template to build the fabric configuration using mostly the variables from fabric.yml) ###
3 |
4 | - name: "SYS >> Generating fabric config snippets"
5 | template:
6 | src: "{{ ansible_network_os }}/fbc_tmpl.j2"
7 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/config/fabric.conf"
8 | changed_when: False # Stops it reporting changes in playbook summary
9 | check_mode: False # These tasks still make changes when in check mode
10 | # tags: [fbc, bse_fbc, full]
--------------------------------------------------------------------------------
/build_fabric/roles/fabric/templates/nxos/fbc_tmpl.j2:
--------------------------------------------------------------------------------
1 | {####### Loopback interfaces #######}
2 | {% for lp in intf_lp %}
3 | interface {{ lp.name }}
4 | description {{ lp.descr }}
5 | ip address {{ lp.ip }}
6 | {% if lp |length == 4 %}
7 | ip address {{ lp.mlag_lp_addr }} secondary
8 | {% endif %}
9 | ip router ospf {{ fbc.route.ospf.pro }} area {{ fbc.route.ospf.area }}
10 | no shutdown
11 | {% endfor %}
12 | {# Not in base template as is dependant on loopback existing #}
13 | logging source-interface {{ bse.services.src_int }}
14 |
15 | {####### Fabric interfaces #######}
16 | {% for intf, descr in intf_fbc.items() %}
17 | interface {{ intf }}
18 | description {{ descr }}
19 | no switchport
20 | medium p2p
21 | ip unnumbered {{ intf_lp[0]['name'] }}
22 | ip router ospf {{ fbc.route.ospf.pro }} area {{ fbc.route.ospf.area }}
23 | no shutdown
24 | {% endfor %}
25 |
26 | {####### VPC Configuration #######}
27 | {% if bse.device_name.spine not in inventory_hostname %}
28 | vlan {{ fbc.adv.mlag.peer_vlan }}
29 | name special_svi_over_peer-link
30 | interface Vlan {{ fbc.adv.mlag.peer_vlan }}
31 | description special_svi_over_peer-link
32 | no shutdown
33 | medium p2p
34 | no ip redirects
35 | ip address {{ mlag_peer_ip }}
36 | no ipv6 redirects
37 | ip router ospf {{ fbc.route.ospf.pro }} area {{ fbc.route.ospf.area }}
38 |
39 | vpc domain {{ fbc.adv.mlag.domain }}
40 | peer-switch
41 | {# Devices with odd hostname get lowest VPV priority #}
42 | {% if inventory_hostname[-2:]|int is odd %}
43 | role priority 8192
44 | {# Keepalive peer uses mgmt interface with IP worked out based on odd/even name (add/minus +1) #}
45 | peer-keepalive destination {{ ansible_host | ipmath(1) }} source {{ ansible_host }}
46 | {% else %}
47 | role priority 16384
48 | peer-keepalive destination {{ ansible_host | ipmath(-1) }} source {{ ansible_host }}
49 | {% endif %}
50 | peer-gateway
51 | auto-recovery
52 | ip arp synchronize
53 |
54 | {####### VPC Interfaces #######}
55 | {% for intf, descr in intf_mlag.items() %}
56 | interface {{ intf }}
57 | description {{ descr }}
58 | switchport
59 | spanning-tree port type network
60 | {% if fbc.adv.bse_intf.ec_fmt in intf %}
61 | switchport mode trunk
62 | switchport trunk allowed vlan 1-4094
63 | vpc peer-link
64 | {% else %}
65 | channel-group {{ fbc.adv.mlag.peer_po }} force mode active
66 | {% endif %}
67 | no shutdown
68 | {% endfor %}
69 |
70 | {####### VXLAN Configuration #######}
71 | interface nve1
72 | advertise virtual-rmac
73 | no shutdown
74 | host-reachability protocol bgp
75 | source-interface {{ intf_lp[1]['name'] }}
76 | {# Only adds the leafs as they are the only devices with SVIs #}
77 | fabric forwarding anycast-gateway-mac {{ fbc.acast_gw_mac }}
78 | {% endif %}
79 |
80 | {####### Routing Configuration #######}
81 | router ospf {{ fbc.route.ospf.pro }}
82 | router-id {{ intf_lp[0]['ip'].split('/') | first }}
83 |
84 | router bgp {{ fbc.route.bgp.as_num }}
85 | router-id {{ intf_lp[0]['ip'].split('/') | first }}
86 | address-family l2vpn evpn
87 | {% if bse.device_name.spine not in inventory_hostname %}
88 | advertise-pip
89 | {% endif %}
90 | retain route-target all
91 | template peer FABRIC
92 | remote-as {{ fbc.route.bgp.as_num }}
93 | update-source {{ intf_lp[0]['name'] }}
94 | timers 3 9
95 | address-family ipv4 unicast
96 | send-community
97 | send-community extended
98 | soft-reconfiguration inbound
99 | {# Spines need to be Route Reflectors #}
100 | {% if bse.device_name.spine in inventory_hostname %}
101 | route-reflector-client
102 | {% endif %}
103 | address-family l2vpn evpn
104 | send-community
105 | send-community extended
106 | {% if bse.device_name.spine in inventory_hostname %}
107 | route-reflector-client
108 | {% endif %}
109 | {% if bse.device_name.spine in inventory_hostname %}
110 | {# loop through the leaf and border groups and get loopback hostvar #}
111 | {% for dvc in groups[bse.device_name.leaf.split('-')[-1].lower()] + groups[bse.device_name.border.split('-')[-1].lower()] %}
112 | neighbor {{ hostvars[dvc]['intf_lp'][0]['ip'] |ipaddr('address') }}
113 | description {{ dvc }}
114 | inherit peer FABRIC
115 | {% endfor %} {% else %}
116 | {% for sp in groups[bse.device_name.spine.split('-')[-1].lower()] %}
117 | neighbor {{ hostvars[sp]['intf_lp'][0]['ip'] |ipaddr('address') }}
118 | description {{ sp }}
119 | inherit peer FABRIC
120 | {% endfor %} {% endif %}
--------------------------------------------------------------------------------
/build_fabric/roles/intf_cleanup/filter_plugins/__pycache__/get_intf.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/roles/intf_cleanup/filter_plugins/__pycache__/get_intf.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/roles/intf_cleanup/filter_plugins/__pycache__/intf_cleanup.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/roles/intf_cleanup/filter_plugins/__pycache__/intf_cleanup.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/roles/intf_cleanup/filter_plugins/get_intf.py:
--------------------------------------------------------------------------------
1 | '''
2 | Decleratively cleans up all interfaces not used by ensuring config is the default.
3 | '''
4 |
5 | class FilterModule(object):
6 | def filters(self):
7 | return {
8 | 'get_intf': self.get_intf
9 | }
10 |
11 | def get_intf(self, hostvar, bse_intf, svc_intf):
12 | intf_fmt = bse_intf['intf_fmt']
13 | used_intf, total_intf, left_intf = ([] for i in range(3))
14 |
15 | # ACTUAL_INTF: Uses the value specified in fbc.num_intf to create a list of all possible interfaces on the device
16 | first_intf = int(hostvar['num_intf'].split(',')[0])
17 | last_intf = int(hostvar['num_intf'].split(',')[1]) + 1 # Needs +1 as range doesnt include last number
18 |
19 | # Creates list of all possible interfaces on the device
20 | for intf in range(first_intf, last_intf):
21 | total_intf.append(bse_intf['intf_fmt'] + (str(intf)))
22 |
23 | # INTF_FBC: Creates a list of the fabric interfaces, are got from the inventory
24 | for intf in hostvar['intf_fbc'].keys():
25 | if intf_fmt in intf:
26 | used_intf.append(intf)
27 | # INTF_MLAG: Creates a list of the MLAG interfaces, are got from the inventory
28 | if hostvar.get('intf_mlag') != None: # get required as Spine wont have int_mlag dict
29 | for intf in hostvar['intf_mlag'].keys():
30 | if intf_fmt in intf:
31 | used_intf.append(intf)
32 | # SVC_INTF: Creates a list of the physical interfaces (not PO or LP), are got from *svc_intf_dm* method in *format_dm.py* custom filter plugin
33 | if svc_intf != None:
34 | for intf in svc_intf:
35 | # if ec_fmt not in intf['intf_num'] or lp_fmt not in intf['intf_num']:
36 | if intf_fmt in intf['intf_num']:
37 | used_intf.append(intf['intf_num'])
38 |
39 | #COMPARE: Gets just the none duplicates from both lists, so the interfaces not used
40 | left_intf = list(set(total_intf) ^ set(used_intf))
41 | left_intf.sort()
42 |
43 | return left_intf
--------------------------------------------------------------------------------
/build_fabric/roles/intf_cleanup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Uses template to build the default config for any unused interfaces removing those used with host_vars and service_interfaces.yml ###
3 | - name: "Getting interface list"
4 | block:
5 | - name: "SYS >> Getting list of unused interfaces"
6 | set_fact:
7 | flt_dflt_intf: "{{ hostvars[inventory_hostname] |get_intf(fbc.adv.bse_intf, flt_svc_intf |default(None)) }}"
8 |
9 | - name: "SYS >> Generating default interface config snippet"
10 | template:
11 | src: "{{ ansible_network_os }}/dflt_intf_tmpl.j2"
12 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/config/dflt_intf.conf"
13 | changed_when: False # Stops it reporting changes in playbook summary
14 | check_mode: False # These tasks still make changes when in check mode
--------------------------------------------------------------------------------
/build_fabric/roles/intf_cleanup/templates/nxos/dflt_intf_tmpl.j2:
--------------------------------------------------------------------------------
1 | {###### Resets any non-used interface to the default setting, without this not declerative as interface config is not removed ######}
2 | {% for intf in flt_dflt_intf%}
3 | interface {{ intf }}
4 | !#shutdown
5 | !#switchport
6 | switchport mode access
7 | !#switchport trunk allowed vlan 1-4094
8 | {% endfor %}
--------------------------------------------------------------------------------
/build_fabric/roles/services/filter_plugins/__pycache__/format_data_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/roles/services/filter_plugins/__pycache__/format_data_model.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/roles/services/filter_plugins/__pycache__/format_dm.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/roles/services/filter_plugins/__pycache__/format_dm.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/roles/services/tasks/svc_intf.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Uses template to build the interface configuration, so defines the port type (L3, trunk, access, etc) and port-channel membership from service_intefrace.yml) ###
3 |
4 | - name: "Create the interface configuration snippets"
5 | block:
6 | - name: "SYS >> Creating per-device service_interface data-models"
7 | set_fact:
8 | flt_svc_intf: "{{ svc_intf.intf |create_svc_intf_dm(inventory_hostname, svc_intf.adv, fbc.adv.bse_intf) }}"
9 | changed_when: False # Stops it reporting changes in playbook summary
10 | check_mode: False # These tasks still make changes when in check mode
11 |
12 | - name: "SYS >> Generating service_interface config snippets"
13 | template:
14 | src: "{{ ansible_network_os }}/svc_intf_tmpl.j2"
15 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/config/svc_intf.conf"
16 | changed_when: False # Stops it reporting changes in playbook summary
17 | check_mode: False # These tasks still make changes when in check mode
18 | when: bse.device_name.spine not in inventory_hostname
--------------------------------------------------------------------------------
/build_fabric/roles/services/tasks/svc_rtr.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Uses template to build the tenant router configuration, so defines BGP and OSPF using variables from service_routing.yml ####
3 |
4 | - name: "Create the tenant routing configuration snippets"
5 | block:
6 | - name: "SYS >> Creating per-device service_routing data-models"
7 | set_fact:
8 | flt_svc_rtr: "{{ inventory_hostname |create_svc_rtr_dm(svc_rtr.bgp.group |default (), svc_rtr.bgp.tenant |default (),
9 | svc_rtr.ospf |default (), svc_rtr.static_route |default (), svc_rtr.adv, fbc) }}"
10 | changed_when: False # Stops it reporting changes in playbook summary
11 | check_mode: False # These tasks still make changes when in check mode
12 |
13 | - name: "SYS >> Generating the tenant routing configuration snippets"
14 | template:
15 | src: "{{ ansible_network_os }}/svc_rtr_tmpl.j2"
16 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/config/svc_rtr.conf"
17 | changed_when: False # Stops it reporting changes in playbook summary
18 | check_mode: False # These tasks still make changes when in check mode
19 | when: bse.device_name.spine not in inventory_hostname
--------------------------------------------------------------------------------
/build_fabric/roles/services/tasks/svc_tnt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Uses template to build the tenant configuration, so VRFs, SVIs, VXLANs and VLANs using variables from service_tenant.yml) ###
3 |
4 | - name: "Create the tenant configuration snippets"
5 | block:
6 | - name: "SYS >> Creating per-device-role service_tenant data-models"
7 | set_fact:
8 | flt_svc_tnt: "{{ svc_tnt.tnt |create_svc_tnt_dm(svc_tnt.adv.bse_vni, svc_tnt.adv.vni_incre, fbc.adv.mlag.peer_vlan,
9 | svc_rtr.adv.redist.rm_name |default(svc_tnt.adv.redist.rm_name), fbc.route.bgp.as_num) }}" # Completes data-model with VNI
10 | changed_when: False # Stops it reporting changes in playbook summary
11 | check_mode: False # These tasks still make changes when in check mode
12 | run_once: true # Only has to run once to create the new data-models as is not per-device
13 |
14 | - name: "SYS >> Generating service_tenant config snippets"
15 | template:
16 | src: "{{ ansible_network_os }}/svc_tnt_tmpl.j2"
17 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/config/svc_tnt.conf"
18 | changed_when: False # Stops it reporting changes in playbook summary
19 | check_mode: False # These tasks still make changes when in check mode
20 | when: bse.device_name.spine not in inventory_hostname
21 |
--------------------------------------------------------------------------------
/build_fabric/roles/services/templates/nxos/svc_intf_tmpl.j2:
--------------------------------------------------------------------------------
1 | {################################################# Service: Device Ports #################################################}
2 | {% for intf in flt_svc_intf %}
3 | interface {{ intf.intf_num }}
4 | description {{ intf.descr }}
5 | {% if intf.type == 'layer3' %}
6 | no switchport
7 | {% endif %}
8 | {# Common config for all Layer3 and loopback ports #}
9 | {% if intf.type == 'layer3' or intf.type == 'loopback' %}{% if intf.tenant is defined %}
10 | vrf member {{ intf.tenant }}
11 | {% endif %}
12 | ip address {{ intf.ip_vlan }}
13 | {% else %}
14 | {# Common config for all Layer2 ports #}
15 | spanning-tree port type {{ intf.stp }}
16 | !#switchport
17 | {# Layer2 Access port config #}
18 | {% if intf.type == 'access' %}
19 | switchport access vlan {{ intf.ip_vlan }}
20 | {# Layer2 single-homed access port config #}
21 | {% if intf.dual_homed is sameas false %}
22 | !#switchport trunk allowed vlan 1-4094
23 | switchport mode access
24 | {# Layer2 port-channel access port config #}
25 | {% endif %}{% if fbc.adv.bse_intf.ec_fmt in intf.intf_num %}
26 | !#switchport trunk allowed vlan 1-4094
27 | switchport mode access
28 | vpc {{ intf.vpc_num }}
29 | {# Layer2 Trunk port config #}
30 | {% endif %}{% elif 'trunk' in intf.type %}
31 | {% if intf.type == 'non_stp_trunk' %}
32 | spanning-tree bpduguard enable
33 | {# Layer2 single-homed trunk port config #}
34 | {% endif %}{% if intf.dual_homed is sameas false %}
35 | switchport trunk allowed vlan {{ intf.ip_vlan }}
36 | switchport mode trunk
37 | {# Layer2 port-channel trunk port config #}
38 | {% endif %}{% if fbc.adv.bse_intf.ec_fmt in intf.intf_num %}
39 | switchport trunk allowed vlan {{ intf.ip_vlan }}
40 | switchport mode trunk
41 | vpc {{ intf.vpc_num }}
42 | {# All Layer2 dual-homed ports #}
43 | {% endif %}{% endif %}{% if intf.dual_homed is sameas true %}
44 | {% if intf.po_mode == 'on' %}
45 | channel-group {{ intf.po_num }} force
46 | {% else %}
47 | channel-group {{ intf.po_num }} force mode {{ intf.po_mode }}
48 | {% endif %}{% endif %}{% endif %}
49 | no shutdown
50 | {% endfor %}
51 |
--------------------------------------------------------------------------------
/build_fabric/roles/services/templates/nxos/svc_rtr_tmpl.j2:
--------------------------------------------------------------------------------
1 | {################################################# Service: Tenant Routing #################################################}
2 |
3 |
4 | {### Create all the Prefix-lists ###}
5 | ip prefix-list {{ svc_rtr.adv.dflt_pl.pl_allow }} seq 5 permit 0.0.0.0/0 le 32
6 | ip prefix-list {{ svc_rtr.adv.dflt_pl.pl_default }} seq 5 permit 0.0.0.0/0
7 | ip prefix-list {{ svc_rtr.adv.dflt_pl.pl_deny }} seq 5 deny 0.0.0.0/0 le 32
8 | {% for pfx in flt_svc_rtr[0] %}
9 | ip prefix-list {{ pfx[0] }} seq {{ pfx[1] }} {{ pfx[2] }} {{ pfx[3] }}
10 | {% endfor %}
11 |
12 | {### Create all the Route-maps ###}
13 | {% for rm in flt_svc_rtr[1] %}
14 | route-map {{ rm[0] }} permit {{ rm[1] }}
15 | {% if 'conn' in rm[0] %}
16 | match interface {{ rm[2] }}
17 | {% elif rm[2] != None %}
18 | match ip address prefix-list {{ rm[2] }}
19 | {% endif %}{% if rm[3][0] == 'weight' %}
20 | set weight {{ rm[3][1] }}
21 | {% elif rm[3][0] == 'pref' %}
22 | set local-preference {{ rm[3][1] }}
23 | {% elif rm[3][0] == 'med' or rm[3][0] == 'metric' %}
24 | set metric {{ rm[3][1] }}
25 | {% elif rm[3][0] == 'as_prepend' %}
26 | set as-path prepend {{ rm[3][1] * ((fbc.route.bgp.as_num | string) + ' ') }}
27 | {% endif %}{% endfor %}
28 |
29 |
30 | router bgp {{ fbc.route.bgp.as_num }}
31 | {### Create all the peer templates ###}
32 | {% for grp in flt_svc_rtr[2].values() %}
33 | template peer {{ grp.name}}
34 | {% if grp.remote_as is defined %}
35 | remote-as {{ grp.remote_as }}
36 | {% endif %}{% if grp.timers is defined %}
37 | timers {{ grp.timers[0] }} {{ grp.timers[1] }}
38 | {% endif %}{% if grp.bfd is defined %}
39 | bfd
40 | {% endif %}{% if grp.ebgp_multihop is defined %}
41 | ebgp-multihop {{ grp.ebgp_multihop }}
42 | {% endif %}{% if grp.password is defined %}
43 | password {{ grp.password }}
44 | {% endif %}{% if grp.update_source is defined %}
45 | update-source {{ grp.update_source }}
46 | {% endif %}
47 | address-family ipv4 unicast
48 | send-community
49 | send-community extended
50 | soft-reconfiguration inbound
51 | {% if grp.default is defined %}
52 | default-originate
53 | {% endif %}{% if grp.inbound_rm is defined %}
54 | route-map {{ grp.inbound_rm }} in
55 | {% endif %}{% if grp.outbound_rm is defined %}
56 | route-map {{ grp.outbound_rm }} out
57 | {% endif %}{% if grp.next_hop_self is defined %}
58 | next-hop-self
59 | {% endif %}{% endfor %}
60 |
61 | {### Create all the peers ###}
62 | {% for vrf, peer in flt_svc_rtr[3].items() %}
63 | vrf {{ vrf }}
64 | {% for pr in peer %}
65 | neighbor {{ pr.peer_ip }}
66 | inherit peer {{ pr.grp }}
67 | description {{ pr.description }}
68 | {% if pr.remote_as is defined %}
69 | remote-as {{ pr.remote_as }}
70 | {% endif %}{% if pr.timers is defined %}
71 | timers {{ pr.timers[0] }} {{ pr.timers[1] }}
72 | {% endif %}{% if pr.bfd is defined %}
73 | bfd
74 | {% endif %}{% if pr.ebgp_multihop is defined %}
75 | ebgp-multihop {{pr.ebgp_multihop }}
76 | {% endif %}{% if pr.password is defined %}
77 | password {{ pr.password }}
78 | {% endif %}{% if pr.update_source is defined %}
79 | update-source {{ pr.update_source }}
80 | {% endif %}
81 | address-family ipv4 unicast
82 | {% if pr.default is defined %}
83 | default-originate
84 | {% endif %}{% if pr.inbound_rm is defined %}
85 | route-map {{ pr.inbound_rm }} in
86 | {% endif %}{% if pr.outbound_rm is defined %}
87 | route-map {{ pr.outbound_rm }} out
88 | {% endif %}{% if pr.next_hop_self is defined %}
89 | next-hop-self
90 | {% endif %}{% endfor %}{% endfor %}
91 |
92 | {### Create network, summary and redistribution per vrf ###}
93 |
94 | {% for vrf, cfg in flt_svc_rtr[4].items() %}
95 | vrf {{ vrf }}
96 | address-family ipv4 unicast
97 | {% if cfg.network is defined %}{% for pfx in cfg.network %}
98 | network {{ pfx }}
99 | {% endfor %}{% endif %}
100 | {% if cfg.summary is defined %}{% for pfx, attr in cfg.summary.items() %}
101 | aggregate-address {{ pfx }} {{ attr }}
102 | {% endfor %}{% endif %}
103 | {% if cfg.redist is defined %}{% for each_redist in cfg.redist %}
104 | redistribute {{ each_redist.type | replace('connected', 'direct') }} route-map {{ each_redist.rm_name }}
105 | {% endfor %}{% endif %}
106 | {% endfor %}
107 |
108 |
109 | {### Create the OSPF process ###}
110 | {% for ospf_proc, ospf_cfg in flt_svc_rtr[5].items() %}
111 | router ospf {{ ospf_proc }}
112 | {% if ospf_cfg.rid is defined %}
113 | router-id {{ ospf_cfg.rid }}
114 | {% endif %}{% if ospf_cfg.bfd is sameas true %}
115 | bfd
116 | {% endif %}{% if ospf_cfg.area_type is defined %}{% for area, type in ospf_cfg.area_type.items() %}
117 | area {{ area }} {{ type }}
118 | {% endfor %}{% endif %}{% if ospf_cfg.default_orig is defined %}
119 | default-information originate {{ ospf_cfg.default_orig }}
120 | {% endif %}{% if ospf_cfg.summary is defined %}
121 | {% for each_smry in ospf_cfg.summary %}{% if each_smry.area is defined %}
122 | {% for pfx, attr in each_smry.prefix.items() %}
123 | area {{ each_smry.area }} range {{ pfx }} {{ attr }}
124 | {% endfor %}{% else %}{% for pfx, attr in each_smry.prefix.items() %}
125 | summary-address {{ pfx }} {{ attr }}
126 | {% endfor %}{% endif %}{% endfor %}
127 | {% endif %}{% if ospf_cfg.auth is defined %}{% for each_area in ospf_cfg.auth %}
128 | area {{ each_area }} authentication message-digest
129 | {% endfor %}{% endif %}
130 | passive-interface default
131 | vrf {{ ospf_cfg.tenant }}
132 | {% if ospf_cfg.redist is defined %}{% for each_redist in ospf_cfg.redist %}
133 | redistribute {{ each_redist.type | replace('connected', 'direct') }} route-map {{ each_redist.rm_name }}
134 | {% endfor %}{% endif %}
135 | {% endfor %}
136 |
137 | {### Create the OSPF interfaces config ###}
138 | {% for intf, ospf_cfg in flt_svc_rtr[6].items() %}
139 | interface {{ intf }}
140 | {% if ospf_cfg.type is defined %}
141 | medium p2p
142 | {% endif %}{% if ospf_cfg.authentication is defined %}
143 | ip ospf message-digest-key 1 md5 3 {{ ospf_cfg.authentication }}
144 | {% endif %}{% if ospf_cfg.cost is defined %}
145 | ip ospf cost {{ ospf_cfg.cost }}
146 | {% endif %}{% if ospf_cfg.timers is defined %}
147 | ip ospf bfd disable
148 | ip ospf hello-interval {{ ospf_cfg.timers[0] }}
149 | ip ospf dead-interval {{ ospf_cfg.timers[1] }}
150 | {% endif %}{% if ospf_cfg.passive is not defined %}
151 | no ip ospf passive-interface
152 | {% endif %}
153 | ip router ospf {{ ospf_cfg.proc }} area {{ ospf_cfg.area }}
154 | {% endfor %}
155 |
156 | {### Create the static routes ###}
157 | {% for vrf, route in flt_svc_rtr[7].items() %}
158 | vrf context {{ vrf }}
159 | {% for each_rte in route %}{% for each_pfx in each_rte.prefix %}
160 | {% if each_rte.next_hop_vrf is defined %}
161 | ip route {{ each_pfx }} {{ each_rte.interface }} {{ each_rte.gateway }} vrf {{ each_rte.next_hop_vrf }} {{ each_rte.ad }}
162 | {% else %}
163 | ip route {{ each_pfx }} {{ each_rte.interface }} {{ each_rte.gateway }} {{ each_rte.ad }}
164 | {% endif %}{% endfor %}{% endfor %}{% endfor %}
--------------------------------------------------------------------------------
/build_fabric/roles/services/templates/nxos/svc_tnt_tmpl.j2:
--------------------------------------------------------------------------------
1 | {#### Logic to decide which set of variables to render dependant on device-role ####}
2 | {% if bse.device_name.leaf in inventory_hostname %}{% set flt_vars = flt_svc_tnt[0] %}
3 | {% elif bse.device_name.border in inventory_hostname %}{% set flt_vars = flt_svc_tnt[1] %}
4 | {% endif %}
5 |
6 | {#### VRF: All VRFs are created even if it is not a L3_tenant ####}
7 | {% for flt_tnt in flt_vars %}
8 | vrf context {{ flt_tnt.tnt_name }}
9 | vni {{ flt_tnt.l3vni }}
10 | rd auto
11 | address-family ipv4 unicast
12 | route-target both auto
13 | route-target both auto evpn
14 | {% endfor %}
15 |
16 | {#### VLANs: If is L3_tenant creates all L2 and L3 VLANs, if not doesnt create the L3VNI VLAN ####}
17 | spanning-tree vlan 1-3967 priority 0
18 | {% for flt_tnt in flt_vars %}{% for vl in flt_tnt.vlans %}
19 | {% if flt_tnt.l3_tnt is sameas true %}
20 | vlan {{ vl.num }}
21 | name {{ vl.name }}
22 | vn-segment {{ vl.vni }}
23 | {% elif vl.vni != flt_tnt.l3vni %}
24 | vlan {{ vl.num }}
25 | name {{ vl.name }}
26 | vn-segment {{ vl.vni }}
27 | {% endif %}
28 | {% endfor %}{% endfor %}
29 |
30 | {#### SVIs - IP assigned depends whether it is a L3VNI, SVI or redistributed SVI (uses default (L3VNI) or custom tag ####}
31 | {% for flt_tnt in flt_vars %}{% if flt_tnt.l3_tnt is sameas true %}
32 | {% for vl in flt_tnt.vlans %}{% if vl.ip_addr != None %}
33 | interface vlan{{ vl.num }}
34 | no shutdown
35 | vrf member {{ flt_tnt.tnt_name }}
36 | no ip redirects
37 | no ipv6 redirects
38 | {% if vl.ip_addr == 'l3_vni' %}
39 | ip forward
40 | {% elif vl.ipv4_bgp_redist is sameas true %}
41 | ip address {{ vl.ip_addr }} tag {{ flt_tnt.bgp_redist_tag }}
42 | fabric forwarding mode anycast-gateway
43 | {% else %}
44 | ip address {{ vl.ip_addr }}
45 | fabric forwarding mode anycast-gateway
46 | {% endif %}
47 | {% endif %}{% endfor %}
48 | {% endif %}{% endfor %}
49 |
50 | {#### VXLAN: Add L2VNI (if vlan number is not L3VNI vlan) or associate L3VNI (if L3_tenant) to NVE interface ####}
51 | interface nve1
52 | global suppress-arp
53 | global ingress-replication protocol bgp
54 | {% for flt_tnt in flt_vars %}{% if flt_tnt.l3_tnt is sameas true %}
55 | member vni {{ flt_tnt.l3vni }} associate-vrf
56 | {% endif %}
57 | {% for vl in flt_tnt.vlans %}{% if vl.num != flt_tnt.tnt_vlan %}
58 | member vni {{ vl.vni }}
59 | {% endif %}{% endfor %}
60 | {% endfor %}
61 |
62 | {#### EVPN: Create for all vlans except the L3VNI vlan ####}
63 | evpn
64 | {% for flt_tnt in flt_vars %}{% for vl in flt_tnt.vlans %}
65 | {% if vl.num != flt_tnt.tnt_vlan %}
66 | vni {{ vl.vni }} l2
67 | rd auto
68 | route-target import auto
69 | route-target export auto
70 | {% endif %}
71 | {% endfor %}{% endfor %}
72 |
73 | {#### BGP_REDIST: Only applies if it is a L3_tenant. RM always created and applied, however the match statement is dependant on ipv4_bgp_redist ####}
74 | {% for flt_tnt in flt_vars %}{% if flt_tnt.l3_tnt is sameas true %}
75 | route-map {{ flt_tnt.rm_name }} permit 10
76 | {% if flt_tnt.tnt_redist is sameas true %}
77 | match tag {{ flt_tnt.bgp_redist_tag }}
78 | {% endif %}
79 | {% endif %}{% endfor %}
80 |
81 | router bgp 65001
82 | {% for flt_tnt in flt_vars %}{% if flt_tnt.l3_tnt is sameas true %}
83 | vrf {{ flt_tnt.tnt_name }}
84 | address-family ipv4 unicast
85 | advertise l2vpn evpn
86 | redistribute direct route-map {{ flt_tnt.rm_name }}
87 | {% endif %}{% endfor %}
--------------------------------------------------------------------------------
/build_fabric/roles/validate/filter_plugins/__pycache__/custom_validate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/build_fabric/roles/validate/filter_plugins/__pycache__/custom_validate.cpython-36.pyc
--------------------------------------------------------------------------------
/build_fabric/roles/validate/tasks/cus_val.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ### Uses custom_validate pluggin to verify that the actual state of OSPF, LAG and MLAG matches the desired state ###
3 | # 4a. TEMPLATE: Creates validation file of expected desired state from the input data
4 | - name: "SYS >> Creating {{ ansible_network_os }} bse_fbc custom_validate validation file"
5 | template:
6 | src: "{{ ansible_network_os }}/bse_fbc_val_tmpl.j2"
7 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/validate/{{ ansible_network_os }}_desired_state.yml"
8 | changed_when: False
9 | tags: [cus_val_fbc_bse]
10 |
11 | - name: "BLK >> bse_fbc and svc_tnt custom_validate validation files"
12 | block:
13 | - set_fact:
14 | flt_svc_tnt: "{{ svc_tnt.tnt |create_svc_tnt_dm(svc_tnt.adv.bse_vni, svc_tnt.adv.vni_incre, fbc.adv.mlag.peer_vlan) }}"
15 | - name: "SYS >> Creating {{ ansible_network_os }} bse_fbc and svc_tnt custom_validate validation file"
16 | template:
17 | src: "{{ ansible_network_os }}/svc_tnt_val_tmpl.j2"
18 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/validate/{{ ansible_network_os }}_desired_state.yml"
19 | changed_when: False
20 | tags: [cus_val_tnt, cus_val_svc]
21 |
22 | - name: "BLK >> bse_fbc, svc_tnt and svc_intf custom_validate validation files"
23 | block:
24 | - set_fact:
25 | flt_svc_tnt: "{{ svc_tnt.tnt |create_svc_tnt_dm(svc_tnt.adv.bse_vni, svc_tnt.adv.vni_incre, fbc.adv.mlag.peer_vlan) }}"
26 | - set_fact:
27 | flt_svc_intf: "{{ svc_intf.intf |create_svc_intf_dm(inventory_hostname, svc_intf.adv, fbc.adv.bse_intf) }}"
28 | - name: "SYS >> Creating {{ ansible_network_os }} bse_fbc, svc_tnt and svc_intf custom_validate validation file"
29 | template:
30 | src: "{{ ansible_network_os }}/svc_intf_val_tmpl.j2"
31 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/validate/{{ ansible_network_os }}_desired_state.yml"
32 | changed_when: False
33 | tags: [cus_val_intf, cus_val_svc, cus_val, post_val, full]
34 |
35 | # 4b. CUSTOM: napalm_cli gets the actual state and custmised version of napalm_validate used to compare and report
36 | - name: "Create {{ ansible_network_os }} compliance report"
37 | block:
38 | # Loads the variable file
39 | - include_vars: "{{ ans.dir_path }}/{{ inventory_hostname }}/validate/{{ ansible_network_os }}_desired_state.yml"
40 | - name: "NET >> Validating OSPF, Port-channels, VPC, VTEPs, VNIs and interfaces"
41 | # Generates actual_state using napalm_cli
42 | napalm_cli:
43 | provider: "{{ ans.creds_all }}"
44 | dev_os: "{{ ansible_network_os }}"
45 | args:
46 | commands:
47 | - "{{ item.keys() | list | first }} | json"
48 | register: output
49 | # Loops through the cmds to run and uses label to print the cmd run each loop iteration
50 | loop: "{{ cmds }}"
51 | loop_control:
52 | label: "{{ item.keys() | list | first }}"
53 |
54 | # 4c. REPORT: Output is parsed into data model and then passed through custom_validate pluggin to compare states and generate a report
55 | - assert:
56 | # Only True or False is returned to Assert module. The compliance report is added to the napalm_validate generated report
57 | that: "{{ cmds[lp_idx] | custom_validate(item.cli_results.values() | list | first, ans.dir_path,
58 | inventory_hostname, ansible_network_os) }} == 'custom_validate passed'"
59 | quiet: yes
60 | fail_msg: "Non-compliant state encountered. Refer to the full report {{ ans.dir_path }}/reports/{{ inventory_hostname }}_compliance_report.json"
61 | loop: "{{ output.results }}"
62 | loop_control:
63 | label: "{{ item.cli_results.keys() | list | first }}"
64 | index_var: lp_idx
65 | when: not ansible_check_mode # Dont want to run validation in check_mode
66 | tags: [cus_val_fbc_bse, cus_val_tnt, cus_val_intf, cus_val_svc, cus_val, post_val, full]
67 |
68 | # Use to test validation outputs
69 | # - name: "Print output to screen instead of asser"
70 | # block:
71 | # - set_fact:
72 | # # Only True or False is returned to Assert module. The compliance report is added to the napalm_validate generated report
73 | # test: "{{ cmds[lp_idx] | custom_validate(item.cli_results.values() | list | first, ans.dir_path, inventory_hostname, ansible_network_os) }}"
74 | # loop: "{{ output.results }}"
75 | # loop_control:
76 | # label: "{{ item.cli_results.keys() | list | first }}"
77 | # index_var: lp_idx
78 | # - debug:
79 | # var=test
80 |
81 |
82 |
83 |
--------------------------------------------------------------------------------
/build_fabric/roles/validate/tasks/nap_val.yml:
--------------------------------------------------------------------------------
1 | ### Uses napalm_validate to verify that the actual state of LLDP, BGP and ping matches the desired state ###
2 |
3 | ---
4 | # 4a. TEMPLATE: Creates validation file of expected desired state from the input data
5 | - name: "SYS >> Creating bse_fbc napalm_validate validation file"
6 | template:
7 | src: napalm/bse_fbc_val_tmpl.j2
8 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/validate/napalm_desired_state.yml"
9 | changed_when: False
10 | tags: [nap_val_fbc_bse]
11 |
12 | - name: "SYS >> Creating bse_fbc and svc_tnt napalm_validate validation file"
13 | template:
14 | src: napalm/svc_tnt_val_tmpl.j2
15 | dest: "{{ ans.dir_path }}/{{ inventory_hostname }}/validate/napalm_desired_state.yml"
16 | changed_when: False
17 | tags: [nap_val_tnt,nap_val_svc, nap_val, post_val, full]
18 |
19 |
20 | # 4b. NAPALM: Napalm_validate used to gather and compare actual and desired state on all device types
21 | - name: "Create napalm compliance report"
22 | block:
23 | - name: "NET >> Validating LLDP Connections, BGP and Reachability"
24 | # Generates actual_state and compares against the desired state
25 | napalm_validate:
26 | provider: "{{ ans.creds_all }}"
27 | dev_os: "{{ ansible_network_os }}"
28 | # Have to use filter plugin as naplam_validate wont take ~/ so filter gets full path
29 | validation_file: "{{ ans.dir_path | fix_home_path() }}/{{ inventory_hostname }}/validate/napalm_desired_state.yml"
30 | register: nap_val
31 | ignore_errors: yes # Needed so continues to the FAIL play
32 |
33 | # 4c. REPORT: The compliance Report is saved to file no matter what the outcome. Output to screen is not verbose unless non-compliant.
34 | - name: "SYS >> Saving compliance reports to {{ ans.dir_path }}/reports/"
35 | copy: content="{{ nap_val.compliance_report }}" dest={{ ans.dir_path }}/reports/{{ inventory_hostname }}_compliance_report.json
36 | changed_when: False
37 | ignore_errors: yes # Needed so continues to the FAIL play
38 | - name: "FAIL >> Compliance check failed"
39 | fail:
40 | msg: "Non-compliant state encountered. Refer to the full report in {{ ans.dir_path }}/reports/{{ inventory_hostname }}_compliance_report.json"
41 | when: not nap_val.compliance_report.complies
42 | when: not ansible_check_mode # Dont want to run validation in check_mode
43 | tags: [nap_val_fbc_bse, nap_val_tnt,nap_val_svc, nap_val, post_val, full]
--------------------------------------------------------------------------------
/build_fabric/roles/validate/templates/napalm/bse_fbc_val_tmpl.j2:
--------------------------------------------------------------------------------
1 | {##### From the input variables generates a validation file (expected state) used by napalm_validate to validate actual state #####}
2 | {####### Validates the fabric, so the configuration as applied from fabric.yml by fabric role ######}
3 | {########## Block statements inherit cmd validation info from the svc_tnt_val_tmpl.j2 template #########}
4 |
5 | {# Macro used as need to use the same block more than once #}
6 | {%- macro macro_get_bgp_neighbors() -%}
7 | {% block get_bgp_neighbors %}{% endblock %}
8 | {%- endmacro -%}
9 |
10 |
11 | {### get_facts ###}
12 | - get_facts:
13 | hostname: {{ inventory_hostname }}
14 |
15 | {### get_lldp_neighbors ###}
16 | - get_lldp_neighbors:
17 | {% for intf, descr in intf_fbc.items() %}
18 | {# {% for intf, descr in hostvars['DC1-N9K-BORDER02'].intf_fbc.items() %} #} {#For testing in python #}
19 | {{ intf }}:
20 | - hostname: {{ descr.split(' ')[2] + '.' + bse.services.domain }}
21 | port: {{ fbc.adv.bse_intf.intf_fmt + (descr.split(' ')[3] | replace(fbc.adv.bse_intf.intf_short, '')) }}
22 | {% endfor %}
23 |
24 | {### get_bgp_neighbors ###}
25 | - get_bgp_neighbors:
26 | global:
27 | router_id: {{ intf_lp[0].ip |ipaddr('address') }}
28 | peers:
29 | _mode: strict
30 | {% if bse.device_name.spine in inventory_hostname %}
31 | {% for x in groups[bse.device_name.leaf.split('-')[-1].lower()] + groups[bse.device_name.border.split('-')[-1].lower()] %}
32 | {{ hostvars[x].intf_lp[0].ip |ipaddr('address') }}:
33 | is_enabled: true
34 | is_up: true
35 | {{ macro_get_bgp_neighbors() }}
36 | {% endfor %} {% else %}
37 | {% for x in groups[bse.device_name.spine.split('-')[-1].lower()] %}
38 | {{ hostvars[x].intf_lp[0].ip |ipaddr('address') }}:
39 | is_enabled: true
40 | is_up: true
41 | {{ macro_get_bgp_neighbors() }}
42 | {% endfor %} {% endif %}
43 |
44 | {### ping: Gets all loopback addresses from host_vars. Disabled as takes too long ###}
45 | {# {% for x in groups['all'] %}
46 | - ping:
47 | _name: ping RTR loopback {{ x }}
48 | _kwargs:
49 | destination: {{ hostvars[x].intf_lp[0].ip |ipaddr('address') }}
50 | count: 3
51 | success:
52 | packet_loss: 0
53 | _mode: strict
54 | {% endfor %}
55 | {% for x in groups[bse.device_name.leaf.split('-')[-1].lower()] + groups[bse.device_name.border.split('-')[-1].lower()] %}
56 | - ping:
57 | _name: ping VTEP loopback {{ x }}
58 | _kwargs:
59 | destination: {{ hostvars[x].intf_lp[1].ip |ipaddr('address') }}
60 | count: 3
61 | success:
62 | packet_loss: 0
63 | _mode: strict
64 | - ping:
65 | _name: ping MLAG loopback {{ x }}
66 | _kwargs:
67 | destination: {{ hostvars[x].intf_lp[1].mlag_lp_addr |ipaddr('address') }}
68 | count: 3
69 | success:
70 | packet_loss: 0
71 | _mode: strict
72 | {% endfor %}
73 | {% for x in groups['border'] %}
74 | - ping:
75 | _name: ping BGW loopback: {{ x }}
76 | _kwargs:
77 | destination: {{ hostvars[x].intf_lp[2].ip |ipaddr('address') }}
78 | count: 3
79 | success:
80 | packet_loss: 0
81 | _mode: strict
82 | {% endfor %} #}
83 |
--------------------------------------------------------------------------------
/build_fabric/roles/validate/templates/napalm/svc_tnt_val_tmpl.j2:
--------------------------------------------------------------------------------
1 | {##### From the input variables generates a validation file (expected state) used by napalm_validate to validate actual state #####}
2 | {####### Validates the tenant information, so the configuration as applied from services_interface.yml by the svc_intf role ######}
3 |
4 | {# Adds the results of this rendered file into the bse_fbc_val_tmpl file #}
5 | {% extends "napalm/bse_fbc_val_tmpl.j2" %}
6 |
7 |
8 | {### get_bgp_neighbors ###}
9 | {% block get_bgp_neighbors %}
10 | address_family:
11 | l2vpn:
12 | received_prefixes: '>=1'
13 | {% endblock %}
--------------------------------------------------------------------------------
/build_fabric/roles/validate/templates/nxos/bse_fbc_val_tmpl.j2:
--------------------------------------------------------------------------------
1 | {##### From the input variables generates a validation file (expected state) used by custom_validate to validate actual state #####}
2 | {####### Validates the fabric, so the configuration as applied from fabric.yml by fabric role ######}
3 | {########## Block statements inherit cmd validation info from the svc_tnt_val_tmpl.j2 template #########}
4 | cmds:
5 | {### show ip ospf neighbors detail ###}
6 | - show ip ospf neighbors detail:
7 | {% if bse.device_name.spine in inventory_hostname %}
8 | {% for x in groups[bse.device_name.leaf.split('-')[-1].lower()] + groups[bse.device_name.border.split('-')[-1].lower()] %}
9 | {{ hostvars[x].intf_lp[0].ip |ipaddr('address') }}:
10 | state: FULL
11 | {% endfor %}{% else %}
12 | {% for x in groups[bse.device_name.spine.split('-')[-1].lower()] %}
13 | {{ hostvars[x].intf_lp[0].ip |ipaddr('address') }}:
14 | state: FULL
15 | {% endfor %}
16 | {# MLAG peering for leaf and border got based on odd/even hostname and +/- 1 #}
17 | {% if inventory_hostname[-2:]|int is odd %}
18 | {{ intf_lp[0].ip |ipaddr('address') | ipmath(+1) }}:
19 | state: FULL
20 | {% else %}
21 | {{ intf_lp[0].ip |ipaddr('address') | ipmath(-1) }}:
22 | state: FULL
23 | {% endif %}
24 |
25 |
26 | {### show port-channel summary ###}
27 | - show port-channel summary:
28 | {{ fbc.adv.bse_intf.ec_fmt + fbc.adv.mlag.peer_po |string }}:
29 | oper_status: U
30 | protocol: LACP
31 | members:
32 | _mode: strict
33 | {{ fbc.adv.bse_intf.intf_fmt + fbc.adv.bse_intf.mlag_peer.split('-')[0] }}:
34 | mbr_status: P
35 | {{ fbc.adv.bse_intf.intf_fmt + fbc.adv.bse_intf.mlag_peer.split('-')[1] }}:
36 | mbr_status: P
37 | {% block show_port_channel_summary %}
38 | {% endblock %}
39 |
40 |
41 | {### show vpc ###}
42 | - show vpc:
43 | vpc_peer_keepalive_status: peer-alive
44 | vpc_peer_status: peer-ok
45 | peer-link_po: {{ fbc.adv.bse_intf.ec_short + fbc.adv.mlag.peer_po |string }}
46 | {% block show_vpc %}{% if fbc.adv.mlag.peer_vlan == '2' %}
47 | peer-link_vlans: "1-2"
48 | {% else %}
49 | peer-link_vlans: "1,{{ fbc.adv.mlag.peer_vlan }}"
50 | {% endif %}{% endblock %}
51 |
52 |
53 | {### show interfaces_trunk ###}
54 | - show interface trunk:
55 | {{ fbc.adv.bse_intf.ec_fmt + fbc.adv.mlag.peer_po |string }}:
56 | allowed_vlans: 1-4094
57 | {% block show_int_trunk %}{% if fbc.adv.mlag.peer_vlan == '2' %}
58 | stpfwd_vlans: "1-2"
59 | {% else %}
60 | stpfwd_vlans: "1,{{ fbc.adv.mlag.peer_vlan }}"
61 | {% endif %}{% endblock %}
62 | {% endif %}
63 |
64 |
65 | {### show ip int brief include-secondary vrf all ###}
66 | - show ip int brief include-secondary vrf all:
67 | mgmt0:
68 | tenant: management
69 | prefix: {{ ansible_host }}
70 | proto-state: up
71 | link-state: up
72 | admin-state: up
73 | {# Creates template for all loopbacks (including secondary mlag ip) and interfaces in default VRF #}
74 | {%for intf in intf_lp + intf_fbc.keys() | list %}{% if intf.ip is defined %}
75 | {{ intf.name | replace('loopback','Lo') }}:
76 | {% if intf.mlag_lp_addr is defined %}
77 | prefix: {{ intf.ip |ipaddr('address'), intf.mlag_lp_addr |ipaddr('address') }}
78 | {% else %}
79 | prefix: {{ intf.ip |ipaddr('address') }}
80 | {% endif %}{% else %}
81 | {{ intf | replace('Ethernet','Eth') }}:
82 | prefix: None
83 | {% endif %}
84 | tenant: default
85 | proto-state: up
86 | link-state: up
87 | admin-state: up
88 | {% endfor %}
89 | {# The rest of the actions are ONLY performed on the leaf and border switches #}
90 | {% if bse.device_name.spine not in inventory_hostname %}
91 | Vlan{{ fbc.adv.mlag.peer_vlan }}:
92 | tenant: default
93 | prefix: {{ mlag_peer_ip |ipaddr('address') }}
94 | proto-state: up
95 | link-state: up
96 | admin-state: up
97 | {% endif %}
98 | {% block show_ip_int_brief %}
99 | {% endblock %}
100 |
101 |
102 | {### show nve peers ###}
103 | {% block show_nve_peers %}
104 | {% endblock %}
105 |
106 |
107 | {### show nve vni ###}
108 | {% block show_nve_vni %}
109 | {% endblock %}
110 |
111 |
112 | {### show interface status ###}
113 | {% block show_int_status%}
114 | {% endblock %}
--------------------------------------------------------------------------------
/build_fabric/roles/validate/templates/nxos/svc_intf_val_tmpl.j2:
--------------------------------------------------------------------------------
1 | {##### From the input variables generates a validation file (expected state) used by custom_validate to validate actual state #####}
2 | {####### Validates the tenant information, so the configuration as applied from services_interface.yml by the svc_intf role ######}
3 | {########## Block statements inherit info from svc_rtr_val_tmpl.j2 template and pass it to bse_tnt_val_tmpl #########}
4 |
5 | {# Adds the results of this rendered file into the svc_tnt_val_tmpl file #}
6 | {% extends ansible_network_os + "/svc_tnt_val_tmpl.j2" %}
7 |
8 | cmds:
9 | {### show port-channel summary ###}
10 | {% block show_port_channel_summary %}{% for intf in flt_svc_intf %}{% if intf['po_num'] is defined %}
11 | {{ fbc.adv.bse_intf.ec_fmt + intf.po_num |string }}:
12 | oper_status: U
13 | {% if intf['po_mode'] == 'on' %}
14 | protocol: NONE
15 | {% else %}
16 | protocol: LACP
17 | {% endif %}
18 | members:
19 | _mode: strict
20 | {{ intf.intf_num }}:
21 | mbr_status: P
22 | {% endif %}{% endfor %}{% endblock %}
23 |
24 |
25 | {### show vpc ###}
26 | {% block show_vpc %}
27 | {{ super() }}
28 | {% for intf in flt_svc_intf %}{% if intf.vpc_num is defined %}
29 | {{ intf.intf_num | replace(fbc.adv.bse_intf.ec_fmt,fbc.adv.bse_intf.ec_short) }}:
30 | consistency_status: SUCCESS
31 | port_status: "1"
32 | vpc_num: "{{ intf.vpc_num }}"
33 | active_vlans: {{ intf.ip_vlan }}
34 | {% endif %}{% endfor %}{% endblock %}
35 |
36 |
37 | {### show interfaces_trunk ###}
38 | {% block show_int_trunk %}
39 | {{ super() }}
40 | {% for intf in flt_svc_intf %}{% if 'trunk' in intf.type %}
41 | {{ intf.intf_num }}:
42 | allowed_vlans: {{ intf.ip_vlan }}
43 | {% if intf.po_num is not defined %}
44 | stpfwd_vlans: {{ intf.ip_vlan }}
45 | {% else %}
46 | stpfwd_vlans: none
47 | {% endif %}{% endif %}{% endfor %}
48 | {% endblock %}
49 |
50 |
51 | {### show ip int brief include-secondary vrf all ###}
52 | {% block show_ip_int_brief %}
53 | {{ super() }}
54 | {% for intf in flt_svc_intf %}{% if intf.type == "layer3" %}
55 | {{ intf.intf_num | replace('Ethernet','Eth') }}:
56 | prefix: {{ intf.ip_vlan |ipaddr('address') }}
57 | tenant: {{ intf.tenant }}
58 | proto-state: up
59 | link-state: up
60 | admin-state: up
61 | {% endif %}{% endfor %}
62 | {% endblock %}
63 |
64 |
65 | {### show interface status ###}
66 | {% block show_int_status%}{% if bse.device_name.spine not in inventory_hostname %}
67 | - show interface status:
68 | {% for intf in flt_svc_intf %}
69 | {% if 'Port-channel' in intf.intf_num %}
70 | {{ intf.intf_num | replace('Port-channel','port-channel') }}:
71 | {% else %}
72 | {{ intf.intf_num }}:
73 | {% endif %}
74 | name: {{ intf.descr }}
75 | state: connected
76 | {% if intf.type == "layer3" %}
77 | vlan: routed
78 | {% elif intf.type == "access" %}
79 | vlan: "{{ intf.ip_vlan }}"
80 | {% else %}
81 | vlan: trunk
82 | {% endif %}{% endfor %}
83 | {% endif %}{% endblock %}
--------------------------------------------------------------------------------
/build_fabric/roles/validate/templates/nxos/svc_tnt_val_tmpl.j2:
--------------------------------------------------------------------------------
1 | {##### From the input variables generates a validation file (expected state) used by custom_validate to validate actual state #####}
2 | {####### Validates the tenant information, so the configuration as applied from services_interface.yml by the svc_intf role ######}
3 | {########## Block statements inherit info from svc_intf_val_tmpl.j2 template and pass it to bse_fbc_val_tmpl #########}
4 |
5 | {#### Logic to decide which set of variables in flt_svc_tnt to render dependant on device-role ####}
6 | {% if bse.device_name.leaf in inventory_hostname %}
7 | {% set flt_vars = flt_svc_tnt[0] %}{% set stp_fwd_vlans = flt_svc_tnt[2] %}
8 | {% elif bse.device_name.border in inventory_hostname %}
9 | {% set flt_vars = flt_svc_tnt[1] %}{% set stp_fwd_vlans = flt_svc_tnt[3] %}
10 | {% endif %}
11 |
12 | {# Adds the results of this rendered file into the bse_fbc_val_tmpl file #}
13 | {% extends ansible_network_os + "/bse_fbc_val_tmpl.j2" %}
14 |
15 |
16 | {### show port-channel summary ###}
17 | {% block show_port_channel_summary %}
18 | {% endblock %}
19 |
20 |
21 | {### show vpc ###}
22 | {% block show_vpc %}
23 | peer-link_vlans: {{ stp_fwd_vlans }}
24 | {% endblock %}
25 |
26 |
27 | {### show interfaces_trunk ###}
28 | {% block show_int_trunk %}
29 | stpfwd_vlans: {{ stp_fwd_vlans }}
30 | {% endblock %}
31 |
32 |
33 | {### show ip int brief include-secondary vrf all ###}
34 | {% block show_ip_int_brief %}{% if bse.device_name.spine not in inventory_hostname %}
35 | {% for flt_tnt in flt_vars %}{% if flt_tnt.l3_tnt is sameas true %}
36 | {% for vl in flt_tnt.vlans %}
37 | {% if vl.ip_addr == 'l3_vni' or vl.ip_addr |ipaddr('address') != False %}
38 | Vlan{{ vl.num }}:
39 | {% if vl.ip_addr != 'l3_vni' %}
40 | prefix: {{ vl.ip_addr |ipaddr('address') }}
41 | {% endif %}
42 | tenant: {{ flt_tnt.tnt_name }}
43 | proto-state: up
44 | link-state: up
45 | admin-state: up
46 | {% endif %}{% endfor %}{% endif %}{% endfor %}
47 | {% endif %}{% endblock %}
48 |
49 |
50 | {### show nve peers ###}
51 | {% block show_nve_peers %}{% if bse.device_name.spine not in inventory_hostname %}
52 | - show nve peers:
53 | {% for host in groups[bse.device_name.leaf.split('-')[-1].lower()] + groups[bse.device_name.border.split('-')[-1].lower()] %}
54 | {# Gets VTEP loopbacks for all devices except own #}
55 | {% if host != inventory_hostname %}
56 | {{ hostvars[host].intf_lp[1].ip |ipaddr('address') }}:
57 | peer-state: Up
58 | {# Secondary IP is shared so only need off one in VPC pair (odd numbered device), also not this devices pair #}
59 | {% if host[-2:]|int is odd %}{% if hostvars[host].intf_lp[1].mlag_lp_addr != hostvars[inventory_hostname].intf_lp[1].mlag_lp_addr %}
60 | {{ hostvars[host].intf_lp[1].mlag_lp_addr |ipaddr('address') }}:
61 | peer-state: Up
62 | {% endif %}{% endif %}
63 | {% endif %}{% endfor %}
64 | {% endif %}{% endblock %}
65 |
66 |
67 | {# ## show nve vni ###}
68 | {% block show_nve_vni %}{% if bse.device_name.spine not in inventory_hostname %}
69 | - show nve vni:
70 | _mode: strict
71 | {% for flt_tnt in flt_vars %}{% for vl in flt_tnt.vlans %}
72 | {% if vl.ip_addr != 'l3_vni' %}
73 | "{{ vl.vni }}":
74 | type: L2 [{{ vl.num }}]
75 | state: Up
76 | {% endif %}{% if flt_tnt.l3_tnt is sameas true and vl.ip_addr == 'l3_vni' %}
77 | "{{ vl.vni }}":
78 | type: L3 [{{ flt_tnt.tnt_name }}]
79 | state: Up
80 | {% endif %}{% endfor %}{% endfor %}
81 | {% endif %}{% endblock %}
82 |
83 |
84 | {### show interface status ###}
85 | {% block show_int_status%}
86 | {% endblock %}
87 |
--------------------------------------------------------------------------------
/build_fabric/ssh_keys/ssh_hosts:
--------------------------------------------------------------------------------
1 | [all]
2 | 10.10.108.11
3 | 10.10.108.12
4 | 10.10.108.21
5 | 10.10.108.22
6 | 10.10.108.16
7 | 10.10.108.17
--------------------------------------------------------------------------------
/build_fabric/ssh_keys/ssh_key_add.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Collects SSH keys from all hosts and adds to know_hosts file so dont have to accept fingerprint
3 |
4 | - name: Collect and add all SSH keys to known_hosts
5 | hosts: all
6 | connection: local
7 | tasks:
8 | - name: SYS >> Collecting all SSH keys
9 | # Using the cmd module to run shh-keyscan locally (shh-keyscan is a linux program)
10 | command: "ssh-keyscan {{ ansible_host|default(inventory_hostname) }}"
11 | register: "host_keys"
12 | changed_when: false
13 | # To make sure that we get a ssh_key back from each device
14 | - assert:
15 | that: host_keys.stdout
16 | msg: "We did not get SSH key for {{ inventory_hostname }}"
17 |
18 | # SSH keys are added tothe file surronded by the text from marker
19 | - blockinfile:
20 | dest: "~/.ssh/known_hosts"
21 | marker: "##### {mark} This part managed by Ansible #####"
22 | # Has to be inline jinja rather than template as blockinfile doenst support templates
23 | block: |
24 | {% for h in groups['all'] if hostvars[h].host_keys is defined and hostvars[h].host_keys.stdout is defined %}
25 | {{ hostvars[h].host_keys.stdout }}
26 | {% endfor %}
27 | # delegate_to: localhost
--------------------------------------------------------------------------------
/build_fabric/templates/input.yml:
--------------------------------------------------------------------------------
1 | # For testing
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | # fbc:
10 | # adv:
11 | # # Seed interfaces used to create the fabric. These are the first interfaces used, the inventory pluggin increments them
12 | # bse_intf:
13 | # intf_fmt: Ethernet1/ # Switch interface naming format
14 | # intf_short: Eth1/ # Used in descritions of interfaces
15 | # ec_fmt: Port-channel # LAG interface naming format
16 | # ec_short: Po # Used in descritions of LAG interfaces
17 | # sp_to_lf: 1 # First interface used for SPINE to LEAF links (1 to 10)
18 | # sp_to_bdr: 11 # First interface used for SPINE to BORDER links (11 to 44)
19 | # lf_to_sp: 1 # First interface used LEAF to SPINE links (1 to 4)
20 | # bdr_to_sp: 1 # First interface used BORDER to SPINE links (1 to 4)
21 | # mlag_peer: 14-15
22 |
23 | # mlag:
24 | # domain: 1 # MLAG Domain number
25 | # peer_po: 1 # Port-channel used for Peer Link
26 | # peer_vlan: 2
27 |
28 |
29 |
30 |
31 |
32 |
33 | # bse:
34 | # services:
35 | # domain: 'stesworld.com'
36 |
37 | # fbc:
38 | # adv:
39 | # bse_intf:
40 | # intf_fmt: Ethernet1/
41 | # intf_short: Eth1/
42 |
43 | # hostvars:
44 | # DC1-N9K-BORDER01:
45 | # ansible_host: "10.10.108.16"
46 | # ansible_network_os: "nxos"
47 | # intf_fbc:
48 | # Ethernet1/1: "UPLINK > DC1-N9K-SPINE01 Eth1/11"
49 | # Ethernet1/2: "UPLINK > DC1-N9K-SPINE02 Eth1/11"
50 | # intf_lp:
51 | # - descr: "LP > Routing protocol RID and peerings"
52 | # ip: "192.168.100.16/32"
53 | # name: "loopback1"
54 | # - descr: "LP > VTEP Tunnels (PIP) and MLAG (VIP)"
55 | # ip: "192.168.100.36/32"
56 | # mlag_lp_addr: "192.168.100.56/32"
57 | # name: "loopback2"
58 | # - descr: "LP > BGW anycast address"
59 | # ip: "192.168.100.58/32"
60 | # name: "loopback3"
61 | # intf_mlag:
62 | # Ethernet1/14: "MLAG peer-link > DC1-N9K-BORDER02 Eth1/14"
63 | # Ethernet1/15: "MLAG peer-link > DC1-N9K-BORDER02 Eth1/15"
64 | # Port-channel1: "MLAG peer-link > DC1-N9K-BORDER02 Po1"
65 | # mlag_peer_ip: "10.255.255.11/31"
66 | # DC1-N9K-BORDER02:
67 | # ansible_host: "10.10.108.17"
68 | # ansible_network_os: "nxos"
69 | # intf_fbc:
70 | # Ethernet1/1: "UPLINK > DC1-N9K-SPINE01 Eth1/12"
71 | # Ethernet1/2: "UPLINK > DC1-N9K-SPINE02 Eth1/12"
72 | # intf_lp:
73 | # - descr: "LP > Routing protocol RID and peerings"
74 | # ip: "192.168.100.17/32"
75 | # name: "loopback1"
76 | # - descr: "LP > VTEP Tunnels (PIP) and MLAG (VIP)"
77 | # ip: "192.168.100.37/32"
78 | # mlag_lp_addr: "192.168.100.56/32"
79 | # name: "loopback2"
80 | # - descr: "LP > BGW anycast address"
81 | # ip: "192.168.100.58/32"
82 | # name: "loopback3"
83 | # intf_mlag:
84 | # Ethernet1/14: "MLAG peer-link > DC1-N9K-BORDER01 Eth1/14"
85 | # Ethernet1/15: "MLAG peer-link > DC1-N9K-BORDER01 Eth1/15"
86 | # Port-channel1: "MLAG peer-link > DC1-N9K-BORDER01 Po1"
87 | # mlag_peer_ip: "10.255.255.12/31"
88 | # DC1-N9K-LEAF01:
89 | # ansible_host: "10.10.108.21"
90 | # ansible_network_os: "nxos"
91 | # intf_fbc:
92 | # Ethernet1/1: "UPLINK > DC1-N9K-SPINE01 Eth1/1"
93 | # Ethernet1/2: "UPLINK > DC1-N9K-SPINE02 Eth1/1"
94 | # intf_lp:
95 | # - descr: "LP > Routing protocol RID and peerings"
96 | # ip: "192.168.100.21/32"
97 | # name: "loopback1"
98 | # - descr: "LP > VTEP Tunnels (PIP) and MLAG (VIP)"
99 | # ip: "192.168.100.41/32"
100 | # mlag_lp_addr: "192.168.100.51/32"
101 | # name: "loopback2"
102 | # intf_mlag:
103 | # Ethernet1/14: "MLAG peer-link > DC1-N9K-LEAF02 Eth1/14"
104 | # Ethernet1/15: "MLAG peer-link > DC1-N9K-LEAF02 Eth1/15"
105 | # Port-channel1: "MLAG peer-link > DC1-N9K-LEAF02 Po1"
106 | # mlag_peer_ip: "10.255.255.1/31"
107 | # DC1-N9K-LEAF02:
108 | # ansible_host: "10.10.108.22"
109 | # ansible_network_os: "nxos"
110 | # intf_fbc:
111 | # Ethernet1/1: "UPLINK > DC1-N9K-SPINE01 Eth1/2"
112 | # Ethernet1/2: "UPLINK > DC1-N9K-SPINE02 Eth1/2"
113 | # intf_lp:
114 | # - descr: "LP > Routing protocol RID and peerings"
115 | # ip: "192.168.100.22/32"
116 | # name: "loopback1"
117 | # - descr: "LP > VTEP Tunnels (PIP) and MLAG (VIP)"
118 | # ip: "192.168.100.42/32"
119 | # mlag_lp_addr: "192.168.100.51/32"
120 | # name: "loopback2"
121 | # intf_mlag:
122 | # Ethernet1/14: "MLAG peer-link > DC1-N9K-LEAF01 Eth1/14"
123 | # Ethernet1/15: "MLAG peer-link > DC1-N9K-LEAF01 Eth1/15"
124 | # Port-channel1: "MLAG peer-link > DC1-N9K-LEAF01 Po1"
125 | # mlag_peer_ip: "10.255.255.2/31"
126 | # DC1-N9K-SPINE01:
127 | # ansible_host: "10.10.108.11"
128 | # ansible_network_os: "nxos"
129 | # intf_fbc:
130 | # Ethernet1/1: "UPLINK > DC1-N9K-LEAF01 Eth1/1"
131 | # Ethernet1/11: "UPLINK > DC1-N9K-BORDER01 Eth1/1"
132 | # Ethernet1/12: "UPLINK > DC1-N9K-BORDER02 Eth1/1"
133 | # Ethernet1/2: "UPLINK > DC1-N9K-LEAF02 Eth1/1"
134 | # intf_lp:
135 | # - descr: "LP > Routing protocol RID and peerings"
136 | # ip: "192.168.100.11/32"
137 | # name: "loopback1"
138 | # DC1-N9K-SPINE02:
139 | # ansible_host: "10.10.108.12"
140 | # ansible_network_os: "nxos"
141 | # intf_fbc:
142 | # Ethernet1/1: "UPLINK > DC1-N9K-LEAF01 Eth1/2"
143 | # Ethernet1/11: "UPLINK > DC1-N9K-BORDER01 Eth1/2"
144 | # Ethernet1/12: "UPLINK > DC1-N9K-BORDER02 Eth1/2"
145 | # Ethernet1/2: "UPLINK > DC1-N9K-LEAF02 Eth1/2"
146 | # intf_lp:
147 | # - descr: "LP > Routing protocol RID and peerings"
148 | # ip: "192.168.100.12/32"
149 | # name: "loopback1"
150 |
151 | # all:
152 | # children:
153 | # - "border"
154 | # - "leaf"
155 | # - "spine"
156 | # - "ungrouped"
157 | # border:
158 | # hosts:
159 | # - "DC1-N9K-BORDER01"
160 | # - "DC1-N9K-BORDER02"
161 | # leaf:
162 | # hosts:
163 | # - "DC1-N9K-LEAF01"
164 | # - "DC1-N9K-LEAF02"
165 | # spine:
166 | # hosts:
167 | # - "DC1-N9K-SPINE01"
168 | # - "DC1-N9K-SPINE02"
--------------------------------------------------------------------------------
/build_fabric/templates/render_jinja.py:
--------------------------------------------------------------------------------
1 | # From http://networkbit.ch/python-jinja-template/
2 | # Used to render a yaml file with a jinja2 template and print the output - good for testing Ansible
3 | # Run the script using "python render_jinja.py input.yml template.j2"
4 |
5 | from sys import argv #Imports argv so that can enter values when run the script
6 | from jinja2 import Environment, FileSystemLoader #Imports from Jinja2
7 | import yaml #Import YAML from PyYAML
8 |
9 | #Variables created when the script is run
10 | script, yaml_input, jinja_template = argv
11 |
12 | #Loads data from YAML file into Python dictionary
13 | # config = yaml.load(open(yaml_input))
14 | config = yaml.load(open(yaml_input), Loader=yaml.FullLoader)
15 |
16 | #Loads the Jinja2 template
17 | env = Environment(loader=FileSystemLoader('./'), trim_blocks=True, lstrip_blocks=True)
18 | template = env.get_template(jinja_template)
19 |
20 | #Render template using data and prints the output to screen
21 | print(template.render(config))
--------------------------------------------------------------------------------
/build_fabric/templates/template.j2:
--------------------------------------------------------------------------------
1 | {# For testing #}
--------------------------------------------------------------------------------
/build_fabric/unit_test/base.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to create core elements of the device configs ################
3 |
4 | # The naming structure that is added before the automatically generated node number (0x). Groups are created based on the name (i.e spine, border, leaf)
5 | bse:
6 | device_name: # Must contain - and characters after - must be either letters, digits or underscore as this is used as the group name
7 | spine: 'DC1-N9K-h!'
8 | border: 'DC1-N9K-BORDER'
9 | leaf: 'DC1-N9K-LEAF'
10 | # Ranges from which device addresses are created from. Must have the mask in prefix format (/)
11 | addr:
12 | lp_net: '192.168.100.0/35' # Routing (OSPF/BGP), VTEP and VPC addresses. By default will use .11 to .59
13 | mgmt_net: '10.10.108.1/24' # Needs to be at least /27 to cover max spine (4), leafs (10) and borders (4)
14 | mlag_net: '10.255.300.0/28' # VPC peer link addresses. Needs to be at least /28 to cover max leafs (10) and borders (4)
15 | srv_ospf_net: 'J.255.255.16/28' # Non-core OSPF process peerings between border switches (4 addresses per OSPF process)
16 |
17 | users:
18 | - username: admin
19 | password: testing # Passwords must be entered encrypted type-5
20 | role: network-admin
21 | - username:
22 | password: 123456789ten111213141516171819twenty
23 | role: network-admin
24 |
25 | # Details for all the services that the switches consume
26 | services:
27 | domain: 'stesworld.com'
28 | src_int: loopback1 # Used for any control plane functions
29 | dns:
30 | prim: 10.10.10.41
31 | sec: 10.10.10.42
32 | tacacs:
33 | grp_name: ISE_TACACS
34 | key: vagzjefjq # Must be entered encrypted type-6
35 | servers:
36 | - 10.10.10.51
37 | - 10.10.10.52
38 | - 10.10.10.53
39 | snmp:
40 | host: 10.10.10.43
41 | comm: 5NMPC0MMUN1TY
42 | ntp:
43 | server:
44 | - 10.10.10.45
45 | - 10.10.20.46
46 | log:
47 | server:
48 | - 10.10.10.47
49 | - 10.10.20.48
50 |
51 | # Managament Access-lists
52 | mgmt_acl:
53 | - acl_name: SNMP_ACCESS
54 | source: [10.10.20.43/24, 10.10.10.43/24]
55 | port: [udp, snmp]
56 | - acl_name: SSH_ACCESS
57 | source: [10.10.10.0/24, 10.255.254.0/24, 10.10.108.0/24, 192.168.255.0/24]
58 | port: [tcp, 22]
59 |
60 | # Advanced base configuration that is less likely to be changed
61 | adv:
62 | image: nxos.9.2.4.bin
63 | image_name: 9.2(4) # See caveats in README, if not correct checkpoint config_repalce will fail on NXOS
64 | exec_timeout:
65 | console: 0
66 | vty: 15
--------------------------------------------------------------------------------
/build_fabric/unit_test/fabric.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to decide how the fabric will look ################
3 |
4 | # This Only scales to 4 spines, 10 leafs, 4 borders. By default the following ports are used:
5 | # SPINE-to-LEAF = Eth1/1 - 1/10 SPINE-to-Border = Eth1/11 - 1/15
6 | # LEAF-to-SPINE = Eth1/1 - 1/5 BORDER-to-SPINE: = Eth1/1 - 1/5
7 | # MLAG Peer-link = Eth1/127 - 128 MLAG keepalive = mgmt
8 |
9 | # How big the network is, so the number of each switch. border/leaf must be in increments of 2 as in MLAG pair
10 | fbc:
11 | network_size:
12 | num_spine: 5 # Can be 1 to 4
13 | num_border: 1 # Can be 0, 2 or 4
14 | num_leaf: "2" # Can be 2, 4, 6, 8 or 10
15 |
16 | # Number of interfaces on the device (first and last interface). Is needed to make interfaces declerative and default all interfaces not used
17 | num_intf:
18 | spine: 1,h
19 | border: 44,128
20 | leaf: 1,1289
21 |
22 | # To change Fabric routing protocol settings
23 | route:
24 | ospf:
25 | pro: # Can be numbered or named
26 | area: 0.0.0.267 # Must be in dotted decimal format
27 | bgp:
28 | as_num:
29 |
30 | acast_gw_mac: 000.2222:33z3 # Must be in the format xxxx.xxxx.xxxx
31 |
32 | ################ Advanced settings to further customize the fabric ################
33 |
34 | adv:
35 | # Seed interfaces used to create the fabric. These are the first interfaces used, the inventory pluggin increments them
36 | bse_intf:
37 | intf_fmt: Ethernet1/ # Switch interface naming format
38 | intf_short: Eth1/ # Used in descritions of interfaces
39 | ec_fmt: Port-channel # LAG interface naming format
40 | ec_short: Po # Used in descritions of LAG interfaces
41 | sp_to_lf: 1 # First interface used for SPINE to LEAF links (1 to 10)
42 | sp_to_bdr: ee # First interface used for SPINE to BORDER links (11 to 14)
43 | lf_to_sp: 1 # First interface used LEAF to SPINE links (1 to 4)
44 | bdr_to_sp: # First interface used BORDER to SPINE links (1 to 4)
45 | mlag_peer: 11d12 # Interfaces used for the MLAG peer Link (will be in the MLAG LAG)
46 |
47 | # Loopback interfaces to be used, numbers and descriptions can be changed. As per best practise one per function.
48 | lp:
49 | rtr:
50 | loopback1: LP > Routing protocol RID and peerings
51 | vtep:
52 | loopback2: LP > VTEP Tunnels (PIP) and MLAG (VIP)
53 | bgw:
54 | loopback1: LP > BGW anycast address # Added now incase add multisite to the script
55 |
56 | # All MLAG specific settings except for peer Link interfaces (bse.adv.base_intf.mlag_peer) and subnet (bse.addr.mlag_net)
57 | mlag:
58 | domain: 1 # MLAG Domain number
59 | peer_po: "2" # Port-channel used for Peer Link
60 | peer_vlan: data # VLAN used for Peer Link and OSPF peering
61 |
62 | # The increment that is added to the subnet and device hostname number to generate the unique last octet of the IP addresses
63 | addr_incre:
64 | spine_ip: 11 # SPINE mgmt IP and routing loopback addresses will be from .11 to .14
65 | border_ip: "16" # BORDER mgmt IP and routing loopback addresses will be from .16 to .19
66 | leaf_ip: # LEAF mgmt IP and routing loopback addresses will be from .21 to .30
67 | border_vtep_lp: 41 # BORDER VTEP loopback addresses will be from .36 to .39
68 | leaf_vtep_lp: 41 # LEAF VTEP loopback addresses will be from .41 to .50
69 | border_mlag_lp: five # Pair of BORDER MLAG shared loopback addresses (VIP) will be from .56 to .57
70 | leaf_mlag_lp: 51 # Pair of LEAF MLAG shared loopback addresses (VIP) will be from .51 to .55
71 | border_bgw_lp: 51 # Pair of BORDER BGW shared anycast loopback addresses will be from .58 to .59
72 | mlag_leaf_ip: 0 # Start IP for LEAF Peer Links, so LEAF1 is .0, LEAF2 .1, LEAF3 .2, etc
73 | mlag_border_ip: 10 # Start IP for BORDER Peer Links, so BORDER1 is .10, BORDER2 .11, etc
--------------------------------------------------------------------------------
/build_fabric/unit_test/service_interface.yml:
--------------------------------------------------------------------------------
1 | ###################### Service: Device Ports ######################
2 |
3 | #### Base variables ####
4 | # If not using single-homed or dual-homed interfaces make sure the dictionary (*single_homed* or *dual_homed*) is hashed out.
5 |
6 | svc_intf:
7 | intf:
8 | single_homed:
9 | - descr: L3 > DC1-ASAv-XFW01 eth1
10 | type: layer3
11 | tenant: RED
12 | ip_vlan: 10.255.99.1/30
13 | switch: DC1-N9K-BORDER01
14 | intf_num: one
15 | - descr: L3 > DC1-ASAv-XFW02 eth1
16 | type: layer3
17 | tenant: BLU
18 | ip_vlan: 10.255.99.256/30
19 | switch: DC1-N9K-BORDER02
20 | - descr: L3 > DC1-SRV-MON01 nic1
21 | type: layer3
22 | tenant: BLU
23 | ip_vlan: 10.100.100.21/33
24 | switch: DC1-N9K-LEAF01
25 | - descr: L3 > DC1-SRV-MON05 nic1
26 | type: layer3
27 | tenant: RED
28 | ip_vlan: 10.100.100.21/33
29 | switch: DC1-N9K-LEAF01
30 | - descr: ACCESS > DC1-SRV-APP01 eth1
31 | type: access
32 | ip_vlan: 10
33 | switch: DC1-N9K-LEAF02
34 | - descr: UPLINK > DC1-VIOS-SW3
35 | type: stp_trunk
36 | ip_vlan: 110,one
37 | switch: DC1-N9K-LEF01
38 | - descr: UPLINK > DC1-VIOS-SW4
39 | type: stp_trunk_non_ba
40 | ip_vlan: "30"
41 | switch: DC1-N9K-LEAF01
42 | - descr: ACCESS >DC1-LTM-LB02
43 | type: non_stp_trunk
44 | ip_vlan: 30
45 | switch: DC1-N9K-LEAF02
46 |
47 | dual_homed:
48 | - descr: ACCESS >DC1-SRV-APP01 eth1
49 | type: access
50 | ip_vlan: Ten
51 | switch: DC1-N9K-LEAF01
52 | - descr: ACCESS >DC1-SRV-PRD01 eth1
53 | type: layer3
54 | ip_vlan: 20
55 | switch: DC1-N9K-LEAF01
56 | intf_num: 45
57 | po_num: four
58 | po_mode: act
59 | - descr: UPLINK > DC1-VIOS-SW1
60 | type: stp_trunk
61 | ip_vlan: 110, 120
62 | switch: DC1-N9K-LEAF01
63 | - descr: UPLINK > DC1-VIOS-SW2
64 | type: stp_trunk_non_ba
65 | ip_vlan: 30
66 | switch: DC1-N9K-LEAF01
67 | intf_num: 15
68 | - descr: ACCESS >DC1-LTM-LB01
69 | type: non_stp_trunk
70 | ip_vlan: 30
71 | switch: DC1-N9K-LEAF01
72 | intf_num: 25
73 | - descr: UPLINK > DC1-LTM-ESX1
74 | type: non_stp_trunk
75 | ip_vlan: 10,20,24,30
76 | switch: DC1-N9K-LEAF01
77 | po_num: 66
78 | - descr: UPLINK > DC1-VIOS-DMZ01
79 | type: stp_trunk_non_ba
80 | ip_vlan: 110-112,120,10
81 | switch: DC1-N9K-BORDER02
82 | - descr: UPLINK > DC1-VIOS-DMZ01
83 | type: stp_trunk_non_ba
84 | ip_vlan: 110-112,120,110
85 | switch: DC1-N9K-LEAF01
86 |
87 | #### Advanced variables ####
88 | # Reserved interface ranges that server ports can be automatically assigned from (applies to all leaf and border switches)
89 |
90 | adv:
91 | single_homed: # Used only for single-homed devices
92 | first_intf: 33 # First interface
93 | last_intf: 34 # Last interface
94 | dual_homed: # Used only for dual-homed devices
95 | first_intf: 13 # First interface
96 | last_intf: 15 # Last interface
97 | first_po: 31 # First PortChannel used
98 | last_po: 32 # last PortChannel used
99 |
100 |
101 |
--------------------------------------------------------------------------------
/build_fabric/unit_test/service_tenant.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###################### Service: Tenant ######################
3 | # VLANs will only be Layer 2 unless an ip address is assigned to them.
4 | # VRFs will only be created on a border or leaf if a VLAN within that VRF is to be created on that deivce type
5 | # Even if it is not a L3_tenant still creates VRF and reserves the L3VNI/VLAN (stops renumbering if made L3_tenant in the future)
6 |
7 | # At a minimun per-tenant you need:
8 | # - tenant_name: Name Name of the VRF
9 | # l3_tenant: True or False Does it need SVIs or is routing done on a device external to the L&S fabric (i.e router)
10 | # vlans: VLANs within this tenant
11 | # - num: Number VLAN number
12 | # name: Name VLAN Name
13 |
14 | # These settings are optional and need not be set. Setting them overides the explicit default values
15 | # ip_addr: x.x.x.x/24 Adding an IP address makes it a L3 VLAN (default L2 only)
16 | # ipv4_bgp_redist: True or False Whether thhe SVI is redistributed into IPv4 BGP addr-fam (default True unless no ip_addr in which case is False)
17 | # create_on_leaf: True or False Whether this VLAN is created on the leafs (default True)
18 | # create_on_border: True or False Whether this VLAN is created on the borders (default False)
19 |
20 | #### Base variables ####
21 | svc_tnt:
22 | tnt:
23 | - tenant_name: BLU
24 | l3_tenant: True
25 | vlans:
26 | - num: 10
27 | name: data
28 | ip_addr: 10.10.10.1/24
29 | - num: dev
30 | name: amb-mail
31 | ip_addr: 10.10.12.1/24
32 | - num: 24
33 | name: servers
34 | ip_addr: 10.10.11.1/24
35 | - num: 40
36 | name: lb_vips
37 | ip_addr: 10.10.256.1/24
38 | ipv4_bgp_redist: Fals
39 | - num: 40
40 | name: ha_keepalive
41 | create_on_leaf: False
42 | - num: 110
43 | name: grn-web
44 |
45 | - tenant_name:
46 | l3_tenant: Tru
47 | vlans:
48 | - num: 110
49 | name: grn-web
50 | ip_addr: 10.250.110.1/24
51 | create_on_border: True
52 | ipv4_bgp_redist: False
53 | - num: 120
54 | name: grn-mail
55 | ip_addr: 10.250.120.1/35
56 | create_on_border: Tru
57 | ipv4_bgp_redist: False
58 |
59 | - tenant_name: AMB
60 | l3_tenant: False
61 | vlans:
62 | - num: 210
63 | name: amb-web
64 | create_on_border: True
65 | - num: 210
66 | name: amb-mail
67 | create_on_border: True
68 |
69 | - tenant_name: RED
70 | l3_tenant: "False"
71 | vlans:
72 | - num: 90
73 | name: red-ctt1
74 | create_on_border: True
75 | create_on_leaf: False
76 | - num: 91
77 | name:
78 | create_on_border: True
79 | create_on_leaf: Fals
80 |
81 | #### Advanced variables ####
82 | # Each L3 Tenant requires a VLAN and VNI. These are automatically generated by incrementing the L3VNI base_vlan and base_vni
83 | # Each tenant vlan requires a VNI. These are formed of an increment of 10000 per-tenant with each VLAN number added to this
84 |
85 | adv:
86 | bse_vni:
87 | tnt_vlan: test # Starting VLAN number for transit L3VNI
88 | l3vni: 3001 # Starting VNI number for transit L3VNI
89 | l2vni: "10000" # Start L2VNI and the range to add to each tenants vlan.
90 | vni_incre:
91 | tnt_vlan: 1 # Value by which to increase transit L3VNI VLAN number for each tenant
92 | l3vni: 1 # Value by which to increase transit L3VNI VNI number for each tenant
93 | l2vni: 10000 # Value by which to increase the L2VNI range used (range + vlan) for each tenant
94 |
95 | bgp:
96 | ipv4_redist_rm_name: rm_CONN_vrf>>BGPa # Can change route-map name, but it MUST still include 'vrf' and 'as' in the text
--------------------------------------------------------------------------------
/build_fabric/vars/ansible.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Login and Ansible settings (normally would be in all.yml) ################
3 |
4 | # Python location on the Ansible host (operating system specific)
5 | ansible_python_interpreter: "/usr/bin/env python"
6 | # Username and password uses by Ansible modules
7 | ansible_user: "{{ ans.creds_all.username }}"
8 | ansible_ssh_pass: "{{ ans.creds_all.password }}"
9 |
10 | # Non-ansible core variables used in playbook, so ones I can suffix ans. before
11 | ans:
12 | # Base directory Location to store the generated configuration snippets
13 | dir_path: ~/device_configs
14 |
15 | # Connection Variables
16 | creds_all: # Napalm
17 | hostname: "{{ ansible_host|default(inventory_hostname) }}"
18 | username: admin
19 | password: ansible
20 |
21 | # Operating system type
22 | device_type:
23 | spine_os: nxos
24 | border_os: nxos
25 | leaf_os: nxos
26 |
27 |
--------------------------------------------------------------------------------
/build_fabric/vars/base.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to create core elements of the device configs ################
3 |
4 | # The naming structure that is added before the automatically generated node number (0x). Groups are created based on the name (i.e spine, border, leaf)
5 | bse:
6 | device_name: # Must contain - and characters after - must be either letters, digits or underscore as this is used as the group name
7 | spine: 'DC1-N9K-SPINE'
8 | border: 'DC1-N9K-BORDER'
9 | leaf: 'DC1-N9K-LEAF'
10 | # Ranges from which device addresses are created from. Must have the mask in prefix format (/)
11 | addr:
12 | lp_net: '192.168.100.0/32' # Routing (OSPF/BGP), VTEP and VPC addresses. By default will use .11 to .59
13 | mgmt_net: '10.10.108.0/24' # Needs to be at least /27 to cover max spine (4), leafs (10) and borders (4)
14 | mlag_net: '10.255.255.0/28' # VPC peer link addresses. Needs to be at least /28 to cover max leafs (10) and borders (4)
15 | srv_ospf_net: '10.255.255.16/28' # Non-core OSPF process peerings between border switches (4 addresses per OSPF process)
16 |
17 | users:
18 | - username: admin
19 | password: $5$ugYwyCgs$CSnUuaIxlxXHRw/Nq3hKU9gfkA8Y2fYHiTZeDFSXik3 # Passwords must be entered encrypted type-5
20 | role: network-admin
21 |
22 | # Details for all the services that the switches consume
23 | services:
24 | domain: 'stesworld.com'
25 | src_int: loopback1 # Used for any control plane functions
26 | dns:
27 | prim: 10.10.10.41
28 | sec: 10.10.10.42
29 | tacacs:
30 | grp_name: ISE_TACACS
31 | key: vagzjefjq # Must be entered encrypted type-6
32 | servers:
33 | - 10.10.10.51
34 | - 10.10.10.52
35 | - 10.10.10.53
36 | snmp:
37 | host: 10.10.10.43
38 | comm: 5NMPC0MMUN1TY
39 | ntp:
40 | server:
41 | - 10.10.10.45
42 | - 10.10.20.46
43 | log:
44 | server:
45 | - 10.10.10.47
46 | - 10.10.20.48
47 |
48 | # Managament Access-lists
49 | mgmt_acl:
50 | - acl_name: SNMP_ACCESS
51 | source: [10.10.20.43/24, 10.10.10.43/24]
52 | port: [udp, snmp]
53 | - acl_name: SSH_ACCESS
54 | source: [10.10.10.0/24, 10.255.254.0/24, 10.10.108.0/24, 192.168.255.0/24]
55 | port: [tcp, 22]
56 |
57 | # Advanced base configuration that is less likely to be changed
58 | adv:
59 | image: nxos.9.2.4.bin
60 | image_name: 9.2(4) # See caveats in README, if not correct checkpoint config_repalce will fail on NXOS
61 | exec_timeout:
62 | console: 0
63 | vty: 15
--------------------------------------------------------------------------------
/build_fabric/vars/fabric.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to decide how the fabric will look ################
3 |
4 | # This Only scales to 4 spines, 10 leafs, 4 borders. By default the following ports are used:
5 | # SPINE-to-LEAF = Eth1/1 - 1/10 SPINE-to-Border = Eth1/11 - 1/15
6 | # LEAF-to-SPINE = Eth1/1 - 1/5 BORDER-to-SPINE: = Eth1/1 - 1/5
7 | # MLAG Peer-link = Eth1/127 - 128 MLAG keepalive = mgmt
8 |
9 | # How big the network is, so the number of each switch. border/leaf must be in increments of 2 as in MLAG pair
10 | fbc:
11 | network_size:
12 | num_spine: 2 # Can be 1 to 4
13 | num_border: 2 # Can be 0, 2 or 4
14 | num_leaf: 2 # Can be 2, 4, 6, 8 or 10
15 |
16 | # Number of interfaces on the device (first and last interface). Is needed to make interfaces declerative and default all interfaces not used
17 | num_intf:
18 | spine: 1,128
19 | border: 1,128
20 | leaf: 1,128
21 |
22 | # To change Fabric routing protocol settings
23 | route:
24 | ospf:
25 | pro: 'underlay' # Can be numbered or named
26 | area: 0.0.0.0 # Must be in dotted decimal format
27 | bgp:
28 | as_num: 65001
29 |
30 | acast_gw_mac: 0000.2222.3333 # Must be in the format xxxx.xxxx.xxxx
31 |
32 | ################ Advanced settings to further customize the fabric ################
33 |
34 | adv:
35 | # Seed interfaces used to create the fabric. These are the first interfaces used, the inventory pluggin increments them
36 | bse_intf:
37 | intf_fmt: Ethernet1/ # Switch interface naming format
38 | intf_short: Eth1/ # Used in descriptions of interfaces
39 | ec_fmt: port-channel # LAG interface naming format
40 | ec_short: Po # Used in descriptions of LAG interfaces
41 | lp_fmt: loopback # Loopback interface naming format
42 | sp_to_lf: 1 # First interface used for SPINE to LEAF links (1 to 10)
43 | sp_to_bdr: 11 # First interface used for SPINE to BORDER links (11 to 14)
44 | lf_to_sp: 1 # First interface used LEAF to SPINE links (1 to 4)
45 | bdr_to_sp: 1 # First interface used BORDER to SPINE links (1 to 4)
46 | mlag_peer: 11-12 # Interfaces used for the MLAG peer Link (will be in the MLAG LAG)
47 |
48 | # Loopback interfaces to be used by the fabric, numbers and descriptions can be changed. As per best practise one per function.
49 | lp:
50 | rtr:
51 | num: 1 # The loopback number, is added to fbc.adv.lp_fmt so would be loopback1
52 | descr: LP > Routing protocol RID and peerings
53 | vtep:
54 | num: 2
55 | descr: LP > VTEP Tunnels (PIP) and MLAG (VIP)
56 | bgw:
57 | num: 3 # Added now incase add multisite to the script
58 | descr: LP > BGW anycast address
59 |
60 | # All MLAG specific settings except for peer Link interfaces (bse.adv.base_intf.mlag_peer) and subnet (bse.addr.mlag_net)
61 | mlag:
62 | domain: 1 # MLAG Domain number
63 | peer_po: 1 # Port-channel used for Peer Link
64 | peer_vlan: 2 # VLAN used for Peer Link and OSPF peering
65 |
66 | # The increment that is added to the subnet and device hostname number to generate the unique last octet of the IP addresses
67 | addr_incre:
68 | spine_ip: 11 # SPINE mgmt IP and routing loopback addresses will be from .11 to .14
69 | border_ip: 16 # BORDER mgmt IP and routing loopback addresses will be from .16 to .19
70 | leaf_ip: 21 # LEAF mgmt IP and routing loopback addresses will be from .21 to .30
71 | border_vtep_lp: 36 # BORDER VTEP loopback addresses will be from .36 to .39
72 | leaf_vtep_lp: 41 # LEAF VTEP loopback addresses will be from .41 to .50
73 | border_mlag_lp: 56 # Pair of BORDER MLAG shared loopback addresses (VIP) will be from .56 to .57
74 | leaf_mlag_lp: 51 # Pair of LEAF MLAG shared loopback addresses (VIP) will be from .51 to .55
75 | border_bgw_lp: 58 # Pair of BORDER BGW shared anycast loopback addresses will be from .58 to .59
76 | mlag_leaf_ip: 0 # Start IP for LEAF Peer Links, so LEAF1 is .0, LEAF2 .1, LEAF3 .2, etc
77 | mlag_border_ip: 10 # Start IP for BORDER Peer Links, so BORDER1 is .10, BORDER2 .11, etc
--------------------------------------------------------------------------------
/build_fabric/vars/service_interface.yml:
--------------------------------------------------------------------------------
1 | ###################### Service: Device Ports ######################
2 | # By default all interfaces are dual-homed with an LACP state of 'active'. Only the odd numbered switch needs to be specified in the variable file.
3 | # The VPC number can not be changed, it will always be the same as the port-channel number.
4 | # Interfaces and POs can be assigned from a pool or specified manually.
5 |
6 | # Are 5 types of interface that can be specified:
7 | # -access: A L2 single VLAN access port. STP is set to 'edge'
8 | # -stp_trunk: A L2 trunk port going to a device that supports STP. STP is set to 'network' so the other device must support Brige Assurance
9 | # -stp_trunk_non_ba: Same as stp_trunk but sets STP will be set to 'normal' for devices that dont support BA
10 | # -non_stp_trunk: A L2 trunk port going to a device that doesnt support BPDU. STP set to 'edge' and BPDU Guard enabled
11 | # -layer3: A non-switchport L3 interface with an IP address
12 | # -loopback: loopback address, as per layer3 is defined
13 |
14 | # Interfaces are defined as a dictionary value for the single_homed or dual-homed key.
15 | # At a minimun the following settings need to be configured:
16 | # single_homed: or dual-homed:
17 | # - descr: string
18 | # type: access, stp_trunk, stp_trunk_non_ba, non_stp_trunk or layer3
19 | # ip_vlan: vlan or ip Depends on the type, either ip/prefifx, vlan or multiple vlans separated by ,
20 | # switch: name Name of switch to create on. If dual-homed needs to be odd switch number from MLAG pair
21 | # tenant: name Layer3 interfaces only, is the VRF the interface will be in*
22 |
23 | # To statically assign the interface and/or port-channel number (default is dynamically from a range) add either of these 2 extra dictionaries to the interface.
24 | # The values used can overlap with the dynamic interface range however for simplicty would advise to use a separate range for dynamic and static assignments.
25 | # - intf_num: integrar Only specify the number, name is got from the fbc.adv.bse_intf.intf_fmt variable
26 | # - po_num: integrar Only specify the number, name is got from the fbc.adv.bse_intf.ec_fmt variable
27 | # - po_mode: string Can optionally specifiy the Port-cahnnle mode, allowed values are 'on, passive or active'. By default if nothinf entered uses 'active'
28 |
29 | #### Base variables ####
30 | # If not using single-homed or dual-homed interfaces make sure the dictionary (*single_homed* or *dual_homed*) is hashed out.
31 |
32 | svc_intf:
33 | intf:
34 | single_homed:
35 | - descr: L3 > DC1-ASAv-XFW01 eth1
36 | type: layer3
37 | tenant: RED
38 | ip_vlan: 10.255.99.1/30
39 | switch: [DC1-N9K-BORDER01]
40 | intf_num: 41
41 | - descr: L3 > DC1-ASAv-XFW02 eth1
42 | type: layer3
43 | tenant: RED
44 | ip_vlan: 10.255.99.5/30
45 | switch: [DC1-N9K-BORDER02]
46 | - descr: L3 > DC1-SRV-MON01 nic1
47 | type: layer3
48 | tenant: BLU
49 | ip_vlan: 10.100.100.21/30
50 | switch: [DC1-N9K-LEAF01]
51 | - descr: ACCESS > DC1-SRV-APP01 eth1
52 | type: access
53 | ip_vlan: 10
54 | switch: [DC1-N9K-LEAF02]
55 | intf_num: 29
56 | - descr: UPLINK > DC1-VIOS-SW2
57 | type: stp_trunk
58 | ip_vlan: 110,120
59 | switch: [DC1-N9K-LEAF01, DC1-N9K-LEAF04, DC1-N9K-BORDER01]
60 | - descr: L3 > DC1-SRV-MON01 nic1
61 | type: layer3
62 | tenant: BLU
63 | ip_vlan: 10.100.100.25/30
64 | switch: [DC1-N9K-LEAF01]
65 | intf_num: 42
66 | - descr: LP > MCAST RP
67 | type: loopback
68 | tenant: BLU
69 | ip_vlan: 5.5.5.5/32
70 | switch: [DC1-N9K-LEAF01]
71 | intf_num: 42
72 | - descr: LP > MCAST RP
73 | type: loopback
74 | tenant: BLU
75 | ip_vlan: 6.6.6.6/32
76 | switch: [DC1-N9K-LEAF01]
77 | intf_num: 11
78 | - descr: LP > MCAST PIM
79 | type: loopback
80 | ip_vlan: 7.7.7.7/32
81 | switch: [DC1-N9K-LEAF01]
82 |
83 | dual_homed:
84 | - descr: ACCESS > DC1-SRV-PRD01 eth1
85 | type: access
86 | ip_vlan: 20
87 | switch: [DC1-N9K-LEAF01]
88 | intf_num: 45
89 | po_num: 44
90 | po_mode: on
91 | - descr: UPLINK > DC1-LTM-ESX1
92 | type: non_stp_trunk
93 | ip_vlan: 10,11,12,13,20,24,30
94 | switch: [DC1-N9K-LEAF01]
95 | - descr: UPLINK > DC1-VIOS-SW1
96 | type: stp_trunk_non_ba
97 | ip_vlan: 110
98 | switch: [DC1-N9K-BORDER01]
99 | - descr: ACCESS > DC1-LTM-LB01
100 | type: non_stp_trunk
101 | ip_vlan: 30,40
102 | switch: [DC1-N9K-LEAF01, DC1-N9K-LEAF03]
103 | intf_num: 25
104 | - descr: UPLINK > DC1-VIOS-DMZ01
105 | type: stp_trunk_non_ba
106 | ip_vlan: 210,220
107 | switch: [DC1-N9K-BORDER01]
108 | - descr: UPLINK > DC1-VIOS-SW5
109 | type: stp_trunk
110 | ip_vlan: 110-112,120
111 | switch: [DC1-N9K-LEAF01]
112 |
113 | #### Advanced variables ####
114 | # Reserved interface ranges that server ports can be automatically assigned from (applies to all leaf and border switches)
115 |
116 | adv:
117 | single_homed: # Used only for single-homed devices
118 | first_intf: 33 # First interface
119 | last_intf: 40 # Last interface
120 | first_lp: 11 # First loopback interface
121 | last_lp: 20 # Last loopback interface
122 | dual_homed: # Used only for dual-homed devices
123 | first_intf: 13 # First interface
124 | last_intf: 32 # Last interface
125 | first_po: 13 # First PortChannel used
126 | last_po: 32 # last PortChannel used
127 |
128 |
129 |
--------------------------------------------------------------------------------
/build_fabric/vars/service_tenant.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###################### Service: Tenant ######################
3 | # VLANs will only be Layer 2 unless an ip address is assigned to them.
4 | # VRFs will only be created on a border or leaf if a VLAN within that VRF is to be created on that deivce type
5 | # Even if it is not a L3_tenant still creates VRF and reserves the L3VNI/VLAN (stops renumbering if made L3_tenant in the future)
6 |
7 | # At a minimun per-tenant you need:
8 | # - tenant_name: Name Name of the VRF
9 | # l3_tenant: True or False Does it need SVIs or is routing done on a device external to the L&S fabric (i.e router)
10 | # vlans: VLANs within this tenant
11 | # - num: Number VLAN number
12 | # name: Name VLAN Name
13 |
14 | # These settings are optional and need not be set. Setting them overides the explicit default values
15 | # ip_addr: x.x.x.x/24 Adding an IP address makes it a L3 VLAN (default L2 only)
16 | # ipv4_bgp_redist: True or False Whether thhe SVI is redistributed into IPv4 BGP addr-fam (default True unless no ip_addr in which case is False)
17 | # create_on_leaf: True or False Whether this VLAN is created on the leafs (default True)
18 | # create_on_border: True or False Whether this VLAN is created on the borders (default False)
19 |
20 | #### Base variables ####
21 | svc_tnt:
22 | tnt:
23 | - tenant_name: BLU
24 | l3_tenant: True
25 | vlans:
26 | - num: 10
27 | name: data
28 | ip_addr: 10.10.10.1/24
29 | - num: 20
30 | name: dev
31 | ip_addr: 10.10.20.1/24
32 | - num: 24
33 | name: servers
34 | ip_addr: 10.10.24.1/24
35 | - num: 30
36 | name: lb_vips
37 | ip_addr: 10.10.30.1/24
38 | ipv4_bgp_redist: False
39 | - num: 40
40 | name: ha_keepalive
41 | - num: 12
42 | name: storage
43 | ip_addr: 10.10.12.1/24
44 | - num: 11
45 | name: apps
46 | ip_addr: 10.10.11.1/24
47 | - num: 13
48 | name: compute
49 | ip_addr: 10.10.13.1/24
50 |
51 | - tenant_name: GRN
52 | l3_tenant: True
53 | vlans:
54 | - num: 110
55 | name: grn-web
56 | ip_addr: 10.250.110.1/24
57 | create_on_border: True
58 | ipv4_bgp_redist: False
59 | - num: 111
60 | name: grn-svc
61 | - num: 112
62 | name: grn-mgmt
63 | - num: 120
64 | name: grn-mail
65 | ip_addr: 10.250.120.1/24
66 | create_on_border: True
67 | ipv4_bgp_redist: False
68 |
69 | - tenant_name: AMB
70 | l3_tenant: False
71 | vlans:
72 | - num: 210
73 | name: amb-web
74 | create_on_border: True
75 | - num: 220
76 | name: amb-mail
77 | create_on_border: True
78 |
79 | - tenant_name: RED
80 | l3_tenant: False
81 | vlans:
82 | - num: 90
83 | name: red-ctt1
84 | create_on_border: True
85 | create_on_leaf: False
86 | - num: 91
87 | name: red-ctt2
88 | create_on_border: True
89 | create_on_leaf: False
90 |
91 | #### Advanced variables ####
92 | # Each L3 Tenant requires a VLAN and VNI. These are automatically generated by incrementing the L3VNI base_vlan and base_vni
93 | # Each tenant vlan requires a VNI. These are formed of an increment of 10000 per-tenant with each VLAN number added to this
94 |
95 | adv:
96 | bse_vni:
97 | tnt_vlan: 3001 # Starting VLAN number for transit L3VNI
98 | l3vni: 3001 # Starting VNI number for transit L3VNI
99 | l2vni: 10000 # Start L2VNI and the range to add to each tenants vlan.
100 | vni_incre:
101 | tnt_vlan: 1 # Value by which to increase transit L3VNI VLAN number for each tenant
102 | l3vni: 1 # Value by which to increase transit L3VNI VNI number for each tenant
103 | l2vni: 10000 # Value by which to increase the L2VNI range used (range + vlan) for each tenant
104 |
105 | # service_routing.yml (svc_rtr.adv.redist) takes precedence, this is only used if it is not defined in service_routing.yml
106 | redist:
107 | rm_name: RM_src_to_dst # Name can be anythign but must include src and dst as they are replaced with 'CONN' and 'BGPxx_VRF'
--------------------------------------------------------------------------------
/configurations/dc1-ltm-lb1.ucs:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/configurations/dc1-ltm-lb1.ucs
--------------------------------------------------------------------------------
/configurations/dc1-n9k-spine1:
--------------------------------------------------------------------------------
1 |
2 | !Command: show running-config
3 | !Running configuration last done at: Fri Jun 7 07:59:55 2019
4 | !Time: Sat Jun 8 13:47:05 2019
5 |
6 | version 7.0(3)I7(6) Bios:version
7 | hostname DC1-N9K-SPINE1
8 | vdc DC1-N9K-SPINE1 id 1
9 | limit-resource vlan minimum 16 maximum 4094
10 | limit-resource vrf minimum 2 maximum 4096
11 | limit-resource port-channel minimum 0 maximum 511
12 | limit-resource u4route-mem minimum 248 maximum 248
13 | limit-resource u6route-mem minimum 96 maximum 96
14 | limit-resource m4route-mem minimum 58 maximum 58
15 | limit-resource m6route-mem minimum 8 maximum 8
16 |
17 | feature nxapi
18 | feature scp-server
19 | nv overlay evpn
20 | feature ospf
21 | feature bgp
22 | feature fabric forwarding
23 | feature interface-vlan
24 | feature vn-segment-vlan-based
25 | feature nv overlay
26 |
27 | no password strength-check
28 | username admin password 5 $5$AsOtwa2X$xt9938IV2b.nAo6P5dX9U/cIl3ioeUsy8TELN4b76p8 role network-admin
29 | ip domain-lookup
30 | ip domain-name stesworld
31 | crypto key param rsa label DC1-N9K-SPINE1.stesworld modulus 2048
32 | copp profile strict
33 | snmp-server user admin network-admin auth md5 0x20d8bb249f331b2046bea66458c73309 priv 0x20d8bb249f331b2046bea66458c73309 localizedkey
34 | rmon event 1 description FATAL(1) owner PMON@FATAL
35 | rmon event 2 description CRITICAL(2) owner PMON@CRITICAL
36 | rmon event 3 description ERROR(3) owner PMON@ERROR
37 | rmon event 4 description WARNING(4) owner PMON@WARNING
38 | rmon event 5 description INFORMATION(5) owner PMON@INFO
39 |
40 | vlan 1
41 |
42 | route-map SPINE-OUTBOUND permit 10
43 | set ip next-hop unchanged
44 | vrf context management
45 |
46 |
47 | interface Vlan1
48 |
49 | interface Ethernet1/1
50 | no switchport
51 | medium p2p
52 | ip unnumbered loopback0
53 | ip router ospf 1 area 0.0.0.0
54 | no shutdown
55 |
56 | interface Ethernet1/2
57 | no switchport
58 | medium p2p
59 | ip unnumbered loopback0
60 | ip router ospf 1 area 0.0.0.0
61 | no shutdown
62 |
63 | interface Ethernet1/3
64 | no switchport
65 | medium p2p
66 | ip unnumbered loopback0
67 | ip router ospf 1 area 0.0.0.0
68 | no shutdown
69 |
70 | interface Ethernet1/4
71 |
72 | interface Ethernet1/5
73 |
74 | interface Ethernet1/6
75 |
76 | interface Ethernet1/7
77 |
78 | interface Ethernet1/8
79 |
80 | interface Ethernet1/9
81 |
82 | interface Ethernet1/10
83 |
84 | interface Ethernet1/11
85 |
86 | interface Ethernet1/12
87 |
88 | interface Ethernet1/13
89 |
90 | interface Ethernet1/14
91 |
92 | interface Ethernet1/15
93 |
94 | interface Ethernet1/16
95 |
96 | interface Ethernet1/17
97 |
98 | interface Ethernet1/18
99 |
100 | interface Ethernet1/19
101 |
102 | interface Ethernet1/20
103 |
104 | interface Ethernet1/21
105 |
106 | interface Ethernet1/22
107 |
108 | interface Ethernet1/23
109 |
110 | interface Ethernet1/24
111 |
112 | interface Ethernet1/25
113 |
114 | interface Ethernet1/26
115 |
116 | interface Ethernet1/27
117 |
118 | interface Ethernet1/28
119 |
120 | interface Ethernet1/29
121 |
122 | interface Ethernet1/30
123 |
124 | interface Ethernet1/31
125 |
126 | interface Ethernet1/32
127 |
128 | interface Ethernet1/33
129 |
130 | interface Ethernet1/34
131 |
132 | interface Ethernet1/35
133 |
134 | interface Ethernet1/36
135 |
136 | interface Ethernet1/37
137 |
138 | interface Ethernet1/38
139 |
140 | interface Ethernet1/39
141 |
142 | interface Ethernet1/40
143 |
144 | interface Ethernet1/41
145 |
146 | interface Ethernet1/42
147 |
148 | interface Ethernet1/43
149 |
150 | interface Ethernet1/44
151 |
152 | interface Ethernet1/45
153 |
154 | interface Ethernet1/46
155 |
156 | interface Ethernet1/47
157 |
158 | interface Ethernet1/48
159 |
160 | interface Ethernet1/49
161 |
162 | interface Ethernet1/50
163 |
164 | interface Ethernet1/51
165 |
166 | interface Ethernet1/52
167 |
168 | interface Ethernet1/53
169 |
170 | interface Ethernet1/54
171 |
172 | interface Ethernet1/55
173 |
174 | interface Ethernet1/56
175 |
176 | interface Ethernet1/57
177 |
178 | interface Ethernet1/58
179 |
180 | interface Ethernet1/59
181 |
182 | interface Ethernet1/60
183 |
184 | interface Ethernet1/61
185 |
186 | interface Ethernet1/62
187 |
188 | interface Ethernet1/63
189 |
190 | interface Ethernet1/64
191 |
192 | interface Ethernet1/65
193 |
194 | interface Ethernet1/66
195 |
196 | interface Ethernet1/67
197 |
198 | interface Ethernet1/68
199 |
200 | interface Ethernet1/69
201 |
202 | interface Ethernet1/70
203 |
204 | interface Ethernet1/71
205 |
206 | interface Ethernet1/72
207 |
208 | interface Ethernet1/73
209 |
210 | interface Ethernet1/74
211 |
212 | interface Ethernet1/75
213 |
214 | interface Ethernet1/76
215 |
216 | interface Ethernet1/77
217 |
218 | interface Ethernet1/78
219 |
220 | interface Ethernet1/79
221 |
222 | interface Ethernet1/80
223 |
224 | interface Ethernet1/81
225 |
226 | interface Ethernet1/82
227 |
228 | interface Ethernet1/83
229 |
230 | interface Ethernet1/84
231 |
232 | interface Ethernet1/85
233 |
234 | interface Ethernet1/86
235 |
236 | interface Ethernet1/87
237 |
238 | interface Ethernet1/88
239 |
240 | interface Ethernet1/89
241 |
242 | interface Ethernet1/90
243 |
244 | interface Ethernet1/91
245 |
246 | interface Ethernet1/92
247 |
248 | interface Ethernet1/93
249 |
250 | interface Ethernet1/94
251 |
252 | interface Ethernet1/95
253 |
254 | interface Ethernet1/96
255 |
256 | interface Ethernet1/97
257 |
258 | interface Ethernet1/98
259 |
260 | interface Ethernet1/99
261 |
262 | interface Ethernet1/100
263 |
264 | interface Ethernet1/101
265 |
266 | interface Ethernet1/102
267 |
268 | interface Ethernet1/103
269 |
270 | interface Ethernet1/104
271 |
272 | interface Ethernet1/105
273 |
274 | interface Ethernet1/106
275 |
276 | interface Ethernet1/107
277 |
278 | interface Ethernet1/108
279 |
280 | interface Ethernet1/109
281 |
282 | interface Ethernet1/110
283 |
284 | interface Ethernet1/111
285 |
286 | interface Ethernet1/112
287 |
288 | interface Ethernet1/113
289 |
290 | interface Ethernet1/114
291 |
292 | interface Ethernet1/115
293 |
294 | interface Ethernet1/116
295 |
296 | interface Ethernet1/117
297 |
298 | interface Ethernet1/118
299 |
300 | interface Ethernet1/119
301 |
302 | interface Ethernet1/120
303 |
304 | interface Ethernet1/121
305 |
306 | interface Ethernet1/122
307 |
308 | interface Ethernet1/123
309 |
310 | interface Ethernet1/124
311 |
312 | interface Ethernet1/125
313 |
314 | interface Ethernet1/126
315 |
316 | interface Ethernet1/127
317 |
318 | interface Ethernet1/128
319 |
320 | interface mgmt0
321 | vrf member management
322 | ip address 10.10.108.11/24
323 |
324 | interface loopback0
325 | ip address 192.168.100.1/32
326 | ip router ospf 1 area 0.0.0.0
327 | line console
328 | exec-timeout 0
329 | line vty
330 | exec-timeout 15
331 | boot nxos bootflash:/nxos.7.0.3.I7.6.bin
332 | router ospf 1
333 | router-id 192.168.100.1
334 | router bgp 65001
335 | router-id 192.168.100.1
336 | address-family ipv4 unicast
337 | address-family l2vpn evpn
338 | retain route-target all
339 | template peer VXLAN-LEAF
340 | remote-as 65001
341 | update-source loopback0
342 | timers 3 9
343 | address-family ipv4 unicast
344 | send-community
345 | send-community extended
346 | soft-reconfiguration inbound
347 | address-family l2vpn evpn
348 | send-community
349 | send-community extended
350 | route-reflector-client
351 | neighbor 192.168.100.3
352 | inherit peer VXLAN-LEAF
353 | neighbor 192.168.100.4
354 | inherit peer VXLAN-LEAF
355 | neighbor 192.168.100.5
356 | inherit peer VXLAN-LEAF
357 |
358 |
359 |
360 |
--------------------------------------------------------------------------------
/configurations/dc1-n9k-spine2:
--------------------------------------------------------------------------------
1 |
2 | !Command: show running-config
3 | !Running configuration last done at: Fri Jun 7 07:44:15 2019
4 | !Time: Sat Jun 8 13:46:21 2019
5 |
6 | version 7.0(3)I7(6) Bios:version
7 | hostname DC1-N9K-SPINE2
8 | vdc DC1-N9K-SPINE2 id 1
9 | limit-resource vlan minimum 16 maximum 4094
10 | limit-resource vrf minimum 2 maximum 4096
11 | limit-resource port-channel minimum 0 maximum 511
12 | limit-resource u4route-mem minimum 248 maximum 248
13 | limit-resource u6route-mem minimum 96 maximum 96
14 | limit-resource m4route-mem minimum 58 maximum 58
15 | limit-resource m6route-mem minimum 8 maximum 8
16 |
17 | feature nxapi
18 | feature scp-server
19 | nv overlay evpn
20 | feature ospf
21 | feature bgp
22 | feature fabric forwarding
23 | feature interface-vlan
24 | feature vn-segment-vlan-based
25 | feature nv overlay
26 |
27 | no password strength-check
28 | username admin password 5 $5$149DIPAT$oR0LDhE2zmnxT07TMwDIHTLfGzrXAZ21LkfbcB1liO5 role network-admin
29 | ip domain-lookup
30 | ip domain-name stesworld
31 | crypto key param rsa label DC1-N9K-SPINE2.stesworld modulus 2048
32 | copp profile strict
33 | snmp-server user admin network-admin auth md5 0xf596f4d29840a02b62890fd6d122313d priv 0xf596f4d29840a02b62890fd6d122313d localizedkey
34 | rmon event 1 description FATAL(1) owner PMON@FATAL
35 | rmon event 2 description CRITICAL(2) owner PMON@CRITICAL
36 | rmon event 3 description ERROR(3) owner PMON@ERROR
37 | rmon event 4 description WARNING(4) owner PMON@WARNING
38 | rmon event 5 description INFORMATION(5) owner PMON@INFO
39 |
40 | vlan 1
41 |
42 | route-map SPINE-OUTBOUND permit 10
43 | set ip next-hop unchanged
44 | vrf context management
45 |
46 |
47 | interface Vlan1
48 |
49 | interface Ethernet1/1
50 | no switchport
51 | medium p2p
52 | ip unnumbered loopback0
53 | ip router ospf 1 area 0.0.0.0
54 | no shutdown
55 |
56 | interface Ethernet1/2
57 | no switchport
58 | medium p2p
59 | ip unnumbered loopback0
60 | ip router ospf 1 area 0.0.0.0
61 | no shutdown
62 |
63 | interface Ethernet1/3
64 | no switchport
65 | medium p2p
66 | ip unnumbered loopback0
67 | ip router ospf 1 area 0.0.0.0
68 | no shutdown
69 |
70 | interface Ethernet1/4
71 |
72 | interface Ethernet1/5
73 |
74 | interface Ethernet1/6
75 |
76 | interface Ethernet1/7
77 |
78 | interface Ethernet1/8
79 |
80 | interface Ethernet1/9
81 |
82 | interface Ethernet1/10
83 |
84 | interface Ethernet1/11
85 |
86 | interface Ethernet1/12
87 |
88 | interface Ethernet1/13
89 |
90 | interface Ethernet1/14
91 |
92 | interface Ethernet1/15
93 |
94 | interface Ethernet1/16
95 |
96 | interface Ethernet1/17
97 |
98 | interface Ethernet1/18
99 |
100 | interface Ethernet1/19
101 |
102 | interface Ethernet1/20
103 |
104 | interface Ethernet1/21
105 |
106 | interface Ethernet1/22
107 |
108 | interface Ethernet1/23
109 |
110 | interface Ethernet1/24
111 |
112 | interface Ethernet1/25
113 |
114 | interface Ethernet1/26
115 |
116 | interface Ethernet1/27
117 |
118 | interface Ethernet1/28
119 |
120 | interface Ethernet1/29
121 |
122 | interface Ethernet1/30
123 |
124 | interface Ethernet1/31
125 |
126 | interface Ethernet1/32
127 |
128 | interface Ethernet1/33
129 |
130 | interface Ethernet1/34
131 |
132 | interface Ethernet1/35
133 |
134 | interface Ethernet1/36
135 |
136 | interface Ethernet1/37
137 |
138 | interface Ethernet1/38
139 |
140 | interface Ethernet1/39
141 |
142 | interface Ethernet1/40
143 |
144 | interface Ethernet1/41
145 |
146 | interface Ethernet1/42
147 |
148 | interface Ethernet1/43
149 |
150 | interface Ethernet1/44
151 |
152 | interface Ethernet1/45
153 |
154 | interface Ethernet1/46
155 |
156 | interface Ethernet1/47
157 |
158 | interface Ethernet1/48
159 |
160 | interface Ethernet1/49
161 |
162 | interface Ethernet1/50
163 |
164 | interface Ethernet1/51
165 |
166 | interface Ethernet1/52
167 |
168 | interface Ethernet1/53
169 |
170 | interface Ethernet1/54
171 |
172 | interface Ethernet1/55
173 |
174 | interface Ethernet1/56
175 |
176 | interface Ethernet1/57
177 |
178 | interface Ethernet1/58
179 |
180 | interface Ethernet1/59
181 |
182 | interface Ethernet1/60
183 |
184 | interface Ethernet1/61
185 |
186 | interface Ethernet1/62
187 |
188 | interface Ethernet1/63
189 |
190 | interface Ethernet1/64
191 |
192 | interface Ethernet1/65
193 |
194 | interface Ethernet1/66
195 |
196 | interface Ethernet1/67
197 |
198 | interface Ethernet1/68
199 |
200 | interface Ethernet1/69
201 |
202 | interface Ethernet1/70
203 |
204 | interface Ethernet1/71
205 |
206 | interface Ethernet1/72
207 |
208 | interface Ethernet1/73
209 |
210 | interface Ethernet1/74
211 |
212 | interface Ethernet1/75
213 |
214 | interface Ethernet1/76
215 |
216 | interface Ethernet1/77
217 |
218 | interface Ethernet1/78
219 |
220 | interface Ethernet1/79
221 |
222 | interface Ethernet1/80
223 |
224 | interface Ethernet1/81
225 |
226 | interface Ethernet1/82
227 |
228 | interface Ethernet1/83
229 |
230 | interface Ethernet1/84
231 |
232 | interface Ethernet1/85
233 |
234 | interface Ethernet1/86
235 |
236 | interface Ethernet1/87
237 |
238 | interface Ethernet1/88
239 |
240 | interface Ethernet1/89
241 |
242 | interface Ethernet1/90
243 |
244 | interface Ethernet1/91
245 |
246 | interface Ethernet1/92
247 |
248 | interface Ethernet1/93
249 |
250 | interface Ethernet1/94
251 |
252 | interface Ethernet1/95
253 |
254 | interface Ethernet1/96
255 |
256 | interface Ethernet1/97
257 |
258 | interface Ethernet1/98
259 |
260 | interface Ethernet1/99
261 |
262 | interface Ethernet1/100
263 |
264 | interface Ethernet1/101
265 |
266 | interface Ethernet1/102
267 |
268 | interface Ethernet1/103
269 |
270 | interface Ethernet1/104
271 |
272 | interface Ethernet1/105
273 |
274 | interface Ethernet1/106
275 |
276 | interface Ethernet1/107
277 |
278 | interface Ethernet1/108
279 |
280 | interface Ethernet1/109
281 |
282 | interface Ethernet1/110
283 |
284 | interface Ethernet1/111
285 |
286 | interface Ethernet1/112
287 |
288 | interface Ethernet1/113
289 |
290 | interface Ethernet1/114
291 |
292 | interface Ethernet1/115
293 |
294 | interface Ethernet1/116
295 |
296 | interface Ethernet1/117
297 |
298 | interface Ethernet1/118
299 |
300 | interface Ethernet1/119
301 |
302 | interface Ethernet1/120
303 |
304 | interface Ethernet1/121
305 |
306 | interface Ethernet1/122
307 |
308 | interface Ethernet1/123
309 |
310 | interface Ethernet1/124
311 |
312 | interface Ethernet1/125
313 |
314 | interface Ethernet1/126
315 |
316 | interface Ethernet1/127
317 |
318 | interface Ethernet1/128
319 |
320 | interface mgmt0
321 | vrf member management
322 | ip address 10.10.108.12/24
323 |
324 | interface loopback0
325 | ip address 192.168.100.2/32
326 | ip router ospf 1 area 0.0.0.0
327 | line console
328 | exec-timeout 0
329 | line vty
330 | exec-timeout 15
331 | boot nxos bootflash:/nxos.7.0.3.I7.6.bin
332 | router ospf 1
333 | router-id 192.168.100.2
334 | router bgp 65001
335 | router-id 192.168.100.2
336 | address-family l2vpn evpn
337 | retain route-target all
338 | template peer VXLAN-LEAF
339 | remote-as 65001
340 | update-source loopback0
341 | timers 3 9
342 | address-family ipv4 unicast
343 | send-community
344 | send-community extended
345 | soft-reconfiguration inbound
346 | address-family l2vpn evpn
347 | send-community
348 | send-community extended
349 | route-reflector-client
350 | neighbor 192.168.100.3
351 | inherit peer VXLAN-LEAF
352 | neighbor 192.168.100.4
353 | inherit peer VXLAN-LEAF
354 | neighbor 192.168.100.5
355 | inherit peer VXLAN-LEAF
356 |
357 |
358 |
359 |
--------------------------------------------------------------------------------
/configurations/dc1-vios-sw1:
--------------------------------------------------------------------------------
1 | Building configuration...
2 |
3 | Current configuration : 4317 bytes
4 | !
5 | ! Last configuration change at 13:42:47 UTC Fri Apr 26 2019 by admin
6 | !
7 | version 15.2
8 | service timestamps debug datetime msec
9 | service timestamps log datetime msec
10 | service password-encryption
11 | service compress-config
12 | !
13 | hostname DC1-VIOS-SW1
14 | !
15 | boot-start-marker
16 | boot-end-marker
17 | !
18 | !
19 | vrf definition MGMT
20 | rd 65001:65001
21 | !
22 | address-family ipv4
23 | exit-address-family
24 | !
25 | enable secret 5 $1$4C5r$TsoRXTsv5.smEpbDLmp0C0
26 | !
27 | username admin privilege 15 password 7 02070A4802040324
28 | no aaa new-model
29 | !
30 | !
31 | !
32 | !
33 | !
34 | vtp mode transparent
35 | !
36 | !
37 | !
38 | ip domain-name stesworld.com
39 | ip cef
40 | login on-success log
41 | no ipv6 cef
42 | !
43 | !
44 | !
45 | spanning-tree mode pvst
46 | spanning-tree extend system-id
47 | !
48 | vlan internal allocation policy ascending
49 | !
50 | vlan 50
51 | name dmz-web
52 | !
53 | vlan 51
54 | name dmz-mail
55 | !
56 | !
57 | !
58 | !
59 | !
60 | !
61 | !
62 | !
63 | !
64 | !
65 | !
66 | !
67 | !
68 | !
69 | interface Port-channel1
70 | switchport trunk allowed vlan 50,51
71 | switchport trunk encapsulation dot1q
72 | switchport mode trunk
73 | switchport nonegotiate
74 | !
75 | interface GigabitEthernet0/0
76 | description UPLINK > dc1-9k-leaf1
77 | switchport trunk allowed vlan 50,51
78 | switchport trunk encapsulation dot1q
79 | switchport mode trunk
80 | switchport nonegotiate
81 | media-type rj45
82 | negotiation auto
83 | channel-group 1 mode active
84 | spanning-tree portfast edge trunk
85 | spanning-tree bpduguard enable
86 | !
87 | interface GigabitEthernet0/1
88 | description UPLINK > dc1-9k-leaf2
89 | switchport trunk allowed vlan 50,51
90 | switchport trunk encapsulation dot1q
91 | switchport mode trunk
92 | switchport nonegotiate
93 | media-type rj45
94 | negotiation auto
95 | channel-group 1 mode active
96 | spanning-tree portfast edge trunk
97 | spanning-tree bpduguard enable
98 | !
99 | interface GigabitEthernet0/2
100 | switchport access vlan 50
101 | switchport mode access
102 | media-type rj45
103 | negotiation auto
104 | !
105 | interface GigabitEthernet0/3
106 | media-type rj45
107 | negotiation auto
108 | !
109 | interface GigabitEthernet1/0
110 | media-type rj45
111 | negotiation auto
112 | !
113 | interface GigabitEthernet1/1
114 | media-type rj45
115 | negotiation auto
116 | !
117 | interface GigabitEthernet1/2
118 | media-type rj45
119 | negotiation auto
120 | !
121 | interface GigabitEthernet1/3
122 | no switchport
123 | ip address 10.10.108.18 255.255.255.0
124 | negotiation auto
125 | !
126 | ip forward-protocol nd
127 | !
128 | no ip http server
129 | no ip http secure-server
130 | !
131 | !
132 | !
133 | !
134 | !
135 | !
136 | control-plane
137 | !
138 | banner exec ^C
139 | **************************************************************************
140 | * IOSv is strictly limited to use for evaluation, demonstration and IOS *
141 | * education. IOSv is provided as-is and is not supported by Cisco's *
142 | * Technical Advisory Center. Any use or disclosure, in whole or in part, *
143 | * of the IOSv Software or Documentation to any third party for any *
144 | * purposes is expressly prohibited except as otherwise authorized by *
145 | * Cisco in writing. *
146 | **************************************************************************^C
147 | banner incoming ^C
148 | **************************************************************************
149 | * IOSv is strictly limited to use for evaluation, demonstration and IOS *
150 | * education. IOSv is provided as-is and is not supported by Cisco's *
151 | * Technical Advisory Center. Any use or disclosure, in whole or in part, *
152 | * of the IOSv Software or Documentation to any third party for any *
153 | * purposes is expressly prohibited except as otherwise authorized by *
154 | * Cisco in writing. *
155 | **************************************************************************^C
156 | banner login ^C
157 | **************************************************************************
158 | * IOSv is strictly limited to use for evaluation, demonstration and IOS *
159 | * education. IOSv is provided as-is and is not supported by Cisco's *
160 | * Technical Advisory Center. Any use or disclosure, in whole or in part, *
161 | * of the IOSv Software or Documentation to any third party for any *
162 | * purposes is expressly prohibited except as otherwise authorized by *
163 | * Cisco in writing. *
164 | **************************************************************************^C
165 | !
166 | line con 0
167 | exec-timeout 0 0
168 | privilege level 15
169 | login local
170 | line aux 0
171 | line vty 0 4
172 | exec-timeout 15 0
173 | privilege level 15
174 | login local
175 | transport input all
176 | !
177 | event manager applet CLIlog
178 | event cli pattern ".*" sync no skip no
179 | action 1.0 syslog priority informational msg "$_cli_msg"
180 | action 2.0 set _exit_status "1"
181 | !
182 | end
--------------------------------------------------------------------------------
/data_model/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | library = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/modules
3 | action_plugins = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/plugins/action
4 |
5 | forks = 20
6 | #jinja2_extensions = jinja2.ext.do
7 |
8 | #stdout_callback = selective
9 |
10 | gathering = explicit
11 | retry_files_enabled = False
12 | inventory = hosts.yml
13 | transport = network_cli
14 |
--------------------------------------------------------------------------------
/data_model/filter_plugins/__pycache__/format_data_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/data_model/filter_plugins/__pycache__/format_data_model.cpython-36.pyc
--------------------------------------------------------------------------------
/data_model/filter_plugins/format_data_model.py:
--------------------------------------------------------------------------------
1 |
2 | from collections import Counter
3 | from ipaddress import ip_network
4 |
5 | class FilterModule(object):
6 | def filters(self):
7 | return {
8 | 'srv_tenants_dm': self.srv_tenants_dm,
9 | 'srv_ports_dm': self.srv_ports_dm
10 | }
11 |
12 | # Create a new tenants data model including VNI info
13 | def srv_tenants_dm(self, srv_tenants, base_vni):
14 | # Create variables from imported nested variables
15 | l3vni = base_vni['l3vni']
16 | tn_vlan = base_vni['tn_vlan']
17 | l2vni = base_vni['l2vni']
18 |
19 | for tn in srv_tenants:
20 | # If a tenant is L3 (has VRF) adds the VLAN and L3VNI for it to the dictionary
21 | if tn['l3_tenant'] is True:
22 | tn['l3vni'] = l3vni
23 | tn['tn_vlan'] = tn_vlan
24 | # For each L3 tenant increases VLAN and L3VNOI by 1
25 | l3vni = l3vni + 1
26 | tn_vlan = tn_vlan + 1
27 | # For each VLAN createsa L2VNI by adding vlan num to base VNI
28 | for vl in tn['vlans']:
29 | vl['l2vni'] = l2vni + vl['num']
30 | # For each tenant increments the L2VNI by 10000
31 | l2vni = l2vni + 10000
32 |
33 | # If any SVIs are redistrbuted into BGP creates the new tenant 'redist' dictionary
34 | for tn in srv_tenants:
35 | if tn['l3_tenant'] is True:
36 | for vl in tn['vlans']:
37 | if vl['ipv4_bgp_redist'] == True:
38 | tn['redist'] = True
39 |
40 | # Dictionary returned back to Ansible
41 | return srv_tenants
42 |
43 | # Create a new ports data model including interface and Po info
44 | def srv_ports_dm(self, srv_ports, srv_ports_adv, srv_tenants):
45 | ### 1. First need to create seperate lists for single and dual homed so can use loop index to increment interafce number ###
46 | sh_ports = []
47 | dh_ports = []
48 | # Split the single and dual homed start interface into a list of two elements (port type and number)
49 | sh_first = srv_ports_adv['single_homed']['first_int'].split('/')
50 | dh_first = srv_ports_adv['dual_homed']['first_int'].split('/')
51 |
52 | ### 1. Use iteration number (index) to increment interface and add to the dictionary
53 | # Create single-homed interfaces
54 | for index, port in enumerate(srv_ports['single_homed']):
55 | int_num = str(int(sh_first[1]) + index) # Adds the index to the start port number
56 | port['interface'] = sh_first[0] + '/' + int_num # Creates a new dict element for interface number
57 | sh_ports.append(port) # Adds all single-homed port dictonaries to a list
58 |
59 | # Create dual-homed interfaces,POs and VPC
60 | for index, port in enumerate(srv_ports['dual_homed']):
61 | int_num = str(int(dh_first[1]) + index)
62 | port['interface'] = dh_first[0] + '/' + int_num # Used 2 different ways to add to dictionary, could have used either
63 | port.update({'vpc': srv_ports_adv['dual_homed']['vpc'] + index, 'po': srv_ports_adv['dual_homed']['po'] + index})
64 | dh_ports.append(port) # Adds all dual-homed port dictonaries to a list
65 |
66 | ### 2. FAIL-FAST: Only return new dictionaires if havent reached the interface limit
67 | num_sh = []
68 | num_dh = []
69 | # Works out the max number of interfaces that would be available
70 | sh_limit = int(srv_ports_adv['single_homed']['last_int'].split('/')[1]) - int(sh_first[1]) + 1
71 | dh_limit = int(srv_ports_adv['dual_homed']['last_int'].split('/')[1]) - int(dh_first[1]) + 1
72 |
73 | # Gets switch names from port dictionaries
74 | for sh_swi in sh_ports:
75 | num_sh.append(sh_swi ['switch'])
76 | for dh_swi in dh_ports:
77 | num_dh.append(dh_swi ['switch'])
78 |
79 | # Counts the number of ports on each switch and returns error if is higher than limit
80 | for sw_name, sw_count in dict(Counter(num_sh)).items():
81 | if sw_count > sh_limit: # If the number of switch ports is more than the limit
82 | return 'Error: No single-homed ports left on ' + sw_name
83 | for sw_name, sw_count in dict(Counter(num_dh)).items():
84 | if sw_count > dh_limit: # If the number of switch ports is more than the limit
85 | return 'Error: No dual-homed ports left on ' + sw_name
86 |
87 | # 3. Returns a dictionary containing both dictionaries
88 | return {'sh_ports': sh_ports, 'dh_ports': dh_ports}
--------------------------------------------------------------------------------
/data_model/inv_from_vars_cfg.yml:
--------------------------------------------------------------------------------
1 | # inv_from_vars_cfg.yml file in YAML format
2 | # Example command line: ANSIBLE_INVENTORY_PLUGINS=$(pwd inventory_plugins) ansible-inventory -i inv_from_vars_cfg.yml --list
3 | plugin: inv_from_vars
4 |
5 | # Data-model in Ansible vars directory where dictionaries will be imported from
6 | var_files:
7 | - ansible.yml
8 | - base.yml
9 | - fabric.yml
10 |
11 | #Dictionaries that will be imported from the varaible files in the vars directory
12 | var_dicts:
13 | ansible:
14 | - device_type
15 | base:
16 | - device_name
17 | - addressing
18 | fabric:
19 | - address_incre
20 | - network_size
21 |
22 |
23 |
--------------------------------------------------------------------------------
/data_model/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/data_model/inventory_plugins/__pycache__/inv_from_vars.cpython-36.pyc
--------------------------------------------------------------------------------
/data_model/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook is used to generate the different configuration snippets
3 |
4 | # 1. Create the file structure
5 | - name: "Create file structure"
6 | hosts: all
7 | connection: local
8 | vars_files:
9 | - vars/ansible.yml
10 | tasks:
11 | - block:
12 | - name: "SYS >> Delete the directory"
13 | file: path="{{ dir_path }}" state=absent
14 | changed_when: False
15 | - name: "Creating file structure"
16 | file: path="{{ dir_path }}/{{ inventory_hostname }}" state=directory
17 | changed_when: False
18 | check_mode: False
19 | tags: dir
20 |
21 | # 2. Create the base config snippets using basetemplate.j2
22 | - name: "SYS >> Create base config snippets"
23 | hosts: all
24 | connection: local
25 | vars_files:
26 | - vars/ansible.yml
27 | - vars/base.yml
28 | tasks:
29 | - name: "SYS >> Create config snippets"
30 | template:
31 | src: base_template.j2
32 | dest: "{{ dir_path }}/{{ inventory_hostname }}/base.cfg"
33 | tags: base
34 |
35 | # 3. Create the fabric config snippets using fabric_template.j2
36 | - name: "SYS >> Create fabric config snippets"
37 | hosts: all
38 | connection: local
39 | vars_files:
40 | - vars/ansible.yml
41 | - vars/base.yml
42 | - vars/fabric.yml
43 | tasks:
44 | - name: "SYS >> Create config snippets"
45 | template:
46 | src: fabric_template.j2
47 | dest: "{{ dir_path }}/{{ inventory_hostname }}/fabric.cfg"
48 | tags: fabric
49 |
50 | # 4. Create the service config snippets using service_template.j2
51 | - name: "SYS >> Create services config snippets"
52 | hosts: all
53 | connection: local
54 | vars_files:
55 | - vars/ansible.yml
56 | - vars/base.yml
57 | - vars/fabric.yml
58 | - vars/services.yml
59 | tasks:
60 | - block:
61 | - name: "SYS >> Use Python to create complete data-model"
62 | set_fact:
63 | srv_tenants_dm: "{{ srv_tenants |srv_tenants_dm(srv_tenants_adv.base_vni) }}" # Completes data-model with VNI
64 | srv_ports_dm: "{{ srv_ports |srv_ports_dm(srv_ports_adv, srv_tenants) }}" # Completes data-model with interface & PO/VPC
65 | changed_when: false
66 | run_once: true
67 | - name: "SYS >> Create config snippets"
68 | template:
69 | src: services_template.j2
70 | dest: "{{ dir_path }}/{{ inventory_hostname }}/services.cfg"
71 | changed_when: false
72 | # - debug:
73 | # var: hostvars
74 | tags: services
75 |
76 |
--------------------------------------------------------------------------------
/data_model/templates/base_template.j2:
--------------------------------------------------------------------------------
1 | hostname {{ inventory_hostname }}
2 | vdc {{ inventory_hostname }} id 1
3 | limit-resource vlan minimum 16 maximum 4094
4 | limit-resource vrf minimum 2 maximum 4096
5 | limit-resource port-channel minimum 0 maximum 511
6 | limit-resource u4route-mem minimum 248 maximum 248
7 | limit-resource u6route-mem minimum 96 maximum 96
8 | limit-resource m4route-mem minimum 58 maximum 58
9 | limit-resource m6route-mem minimum 8 maximum 8
10 |
11 | feature nxapi
12 | feature scp-server
13 | feature tacacs+
14 | cfs eth distribute
15 | nv overlay evpn
16 | feature ospf
17 | feature bgp
18 | feature fabric forwarding
19 | feature interface-vlan
20 | feature vn-segment-vlan-based
21 | feature lacp
22 | feature vpc
23 | feature lldp
24 | feature bfd
25 | clock timezone GMT 0 0
26 | clock summer-time BST 5 sun mar 02:00 5 sun oct 02:00 60
27 | feature nv overlay
28 | {# Username and authetication based settings #}
29 | no password strength-check
30 | {# Loops through the list of users #}
31 | {% for usr in bse.users %}
32 | username {{ usr.username }} password 5 {{ usr.password }} role {{ usr.role }}
33 | {% endfor %}
34 | ip domain-lookup
35 | ip domain-name {{ bse_services.domain }}
36 | ip name-server {{ bse_services.dns.prim }} {{ bse_services.dns.sec }}
37 | tacacs-server key 7 {{ bse_services.tacacs.key }}
38 | {# Loops through the list of tacacs servers #}
39 | {% for srv in bse_services.tacacs.servers %}
40 | tacacs-server host {{ srv }}
41 | {% endfor %}
42 | aaa group server tacacs+ {{ bse_services.tacacs.grp_name }}
43 | {% for srv in bse_services.tacacs.servers %}
44 | server {{ srv }}
45 | {% endfor %}
46 | source-interface {{ bse_services.src_int }}
47 | crypto key param rsa label {{ inventory_hostname }}.{{ bse_services.domain }} modulus 2048
48 | system default switchport shutdown
49 | logging message interface type ethernet description
50 | {# Managment access-lists #}
51 | ip access-list {{ bse_acl.snmp.name }}
52 | {# The start sequence number that has 10 added to it with each loop iteration #}
53 | {% set seq = namespace(cnt=10) %}
54 | {% for host in bse_acl.snmp.source %}
55 | {{ seq.cnt }} permit {{ bse_acl.snmp.port[0] }} {{ host }} any
56 | {% set seq.cnt= seq.cnt + 10 %}
57 | {% endfor %}
58 | {{ seq.cnt }} deny ip any any log
59 | ip access-list {{ bse_acl.ssh.name }}
60 | {% set seq = namespace(cnt=10) %}
61 | {% for host in bse_acl.ssh.source %}
62 | {{ seq.cnt }} permit {{ bse_acl.ssh.port[0] }} {{ host }} any
63 | {% set seq.cnt= seq.cnt + 10 %}
64 | {% endfor %}
65 | {{ seq.cnt }} deny ip any any log
66 | copp profile strict
67 | snmp-server source-interface traps {{ bse_services.src_int }}
68 | snmp-server source-interface informs {{ bse_services.src_int }}
69 | snmp-server user admin network-admin auth md5 0x99187947008c0a20401eac07e9fb58c3 priv 0x99187947008c0a20401eac07e9fb58c3 localizedkey
70 | snmp-server host {{ bse_services.snmp.host }} traps version 2c {{ bse_services.snmp.comm }}
71 | rmon event 1 description FATAL(1) owner PMON@FATAL
72 | rmon event 2 description CRITICAL(2) owner PMON@CRITICAL
73 | rmon event 3 description ERROR(3) owner PMON@ERROR
74 | rmon event 4 description WARNING(4) owner PMON@WARNING
75 | rmon event 5 description INFORMATION(5) owner PMON@INFO
76 | snmp-server community {{ bse_services.snmp.comm }} group network-operator
77 | snmp-server community {{ bse_services.snmp.comm }} use-ipv4acl {{ bse_acl.snmp.name }}
78 | {% for ntp in bse_services.ntp.server %}
79 | ntp server {{ ntp }} use-vrf default
80 | {% endfor %}
81 | ntp source-interface {{ bse_services.src_int }}
82 | aaa authentication login default group {{ bse_services.tacacs.grp_name }}
83 | aaa authentication login console local
84 | aaa authorization commands default group {{ bse_services.tacacs.grp_name }} local
85 | aaa accounting default group {{ bse_services.tacacs.grp_name }}
86 | aaa authentication login error-enable
87 |
88 | {# Only adds the leafs as they are the only devices with SVIs #}
89 | {% if device_name.leaf_name in inventory_hostname %}
90 | fabric forwarding anycast-gateway-mac {{ bse_adv.acast_gw_mac }}
91 | {% endif %}
92 |
93 | {# Management and conole access #}
94 | interface mgmt0
95 | vrf member management
96 | ip address {{ ansible_host }}/{{ addressing.mgmt_ip_subnet.split('/')[1] }}
97 | ip access-group {{ bse_acl.ssh.name }} in
98 | cli alias name wr copy running-config startup-config
99 | line console
100 | exec-timeout {{ bse_adv.exec_timeout.console }}
101 | line vty
102 | exec-timeout {{ bse_adv.exec_timeout.vty }}
103 | ip access-group {{ bse_acl.ssh.name }} in
104 | boot nxos bootflash:/{{ bse_adv.image }}
105 |
106 | {% for log in bse_services.log.server %}
107 | logging server {{ log }}
108 | {% endfor %}
109 | logging source-interface {{ bse_services.src_int }}
--------------------------------------------------------------------------------
/data_model/templates/input.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###################### Service: Tenant ######################
3 | # VLANs will only be Layer 2 unless an ip address is assigned to them.
4 | # VRFs will only be created on a border or leaf if a VLAN within that VRF is to be created on that deivce type
5 | # Even if it is not a L3_tenant still creates VRF and reserves the L3VNI/VLAN (stops renumbering if made L3_tenant in the future)
6 |
7 | # At a minimun per-tenant you need:
8 | # - tenant_name: Name Name of the VRF
9 | # l3_tenant: True or False Does it need SVIs or is routing done on a device external to the L&S fabric (i.e router)
10 | # vlans: VLANs within this tenant
11 | # - num: Number VLAN number
12 | # name: Name VLAN Name
13 |
14 | # These settings are optional and need not be set. Setting them overides the explicit default values
15 |
16 | # bgp_redist_tag: xxx Per-tenant tag used when redistributing SVIs into IPv4 BGP addr-fam (default L3VNI)
17 | # ip_addr: x.x.x.x/24 Adding an IP address makes it a L3 VLAN (default L2 only)
18 | # ipv4_bgp_redist: True or False Whether thhe SVI is redistributed into IPv4 BGP addr-fam (default True unless no ip_addr in which case is False)
19 | # create_on_leaf: True or False Whether this VLAN is created on the leafs (default True)
20 | # create_on_border: True or False Whether this VLAN is created on the borders (default False)
21 |
22 | #### Base variables ####
23 | svc_tnt:
24 | tnt:
25 | - tenant_name: BLU
26 | l3_tenant: True
27 | vlans:
28 | - num: 10
29 | name: data
30 | ip_addr: 10.10.10.1/24
31 | - num: 20
32 | name: dev
33 | ip_addr: 10.10.12.1/24
34 | - num: 24
35 | name: servers
36 | ip_addr: 10.10.11.1/24
37 | - num: 30
38 | name: lb_vips
39 | ip_addr: 10.10.20.1/24
40 | ipv4_bgp_redist: False
41 | - num: 40
42 | name: ha_keepalive
43 |
44 | - tenant_name: GRN
45 | l3_tenant: True
46 | vlans:
47 | - num: 110
48 | name: grn-web
49 | ip_addr: 10.250.110.1/24
50 | create_on_border: True
51 | ipv4_bgp_redist: False
52 | - num: 120
53 | name: grn-mail
54 | ip_addr: 10.250.120.1/24
55 | create_on_border: True
56 | ipv4_bgp_redist: False
57 |
58 | - tenant_name: AMB
59 | l3_tenant: False
60 | vlans:
61 | - num: 210
62 | name: amb-web
63 | create_on_border: True
64 | - num: 220
65 | name: amb-mail
66 | create_on_border: True
67 |
68 | - tenant_name: RED
69 | l3_tenant: False
70 | vlans:
71 | - num: 90
72 | name: red-ctt1
73 | create_on_border: True
74 | create_on_leaf: False
75 | - num: 91
76 | name: red-ctt2
77 | create_on_border: True
78 | create_on_leaf: False
79 |
80 | #### Advanced variables ####
81 | # Each L3 Tenant requires a VLAN and VNI. These are automatically generated by incrementing the L3VNI base_vlan and base_vni
82 | # Each tenant vlan requires a VNI. These are formed of an increment of 10000 per-tenant with each VLAN number added to this
83 |
84 |
85 | adv:
86 | bse_vni:
87 | tnt_vlan: 3001 # Transit L3VNI start VLAN number. Incre by 1 for each L3 tenant (VRF)
88 | l3vni: 3001 # Transit L3VNI start VNI number. Incre by 1 for each L3 tenant (VRF)
89 | l2vni: 10000 # Start L2VNI and the range to add to each tenants vlan. Increments by 10000 for each tenant
90 | bgp:
91 | ipv4_redist_rm_name: rm_CONN_vrf>>BGPas # Can change route-map name, but it MUST still include 'vrf' and 'as' in the text
92 |
93 |
--------------------------------------------------------------------------------
/data_model/templates/render_jinja.py:
--------------------------------------------------------------------------------
1 | # From http://networkbit.ch/python-jinja-template/
2 | # Used to render a yaml file with a jinja2 template and print the output - good for testing Ansible
3 | # Run the script using "python3 render_jinja.py input.yml template.j2"
4 |
5 | from sys import argv #Imports argv so that can enter values when run the script
6 | from jinja2 import Environment, FileSystemLoader #Imports from Jinja2
7 | import yaml #Import YAML from PyYAML
8 |
9 | #Variables created when the script is run
10 | script, yaml_input, jinja_template = argv
11 |
12 | #Loads data from YAML file into Python dictionary
13 | config = yaml.load(open(yaml_input))
14 |
15 | #Loads the Jinja2 template
16 | env = Environment(loader=FileSystemLoader('./'), trim_blocks=True, lstrip_blocks=True)
17 | template = env.get_template(jinja_template)
18 |
19 | #Render template using data and prints the output to screen
20 | print(template.render(config))
--------------------------------------------------------------------------------
/data_model/templates/template.j2:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/data_model/templates/template.j2
--------------------------------------------------------------------------------
/data_model/vars/ansible.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Login and Ansible settings (normally would be in all.yml) ################
3 |
4 | # Python location on the Ansible host (operating system specific)
5 | ansible_python_interpreter: "/usr/bin/env python" # Ubuntu
6 |
7 | # Base directory Location to store the generated configuration snippets
8 | dir_path: ~/device_configs
9 |
10 | # Connection Variables
11 | creds_all: # Napalm
12 | hostname: "{{ ansible_host|default(inventory_hostname) }}"
13 | username: admin
14 | password: ansible
15 |
16 | # Operating system type
17 | device_type:
18 | spine_os: nxos
19 | border_os: nxos
20 | leaf_os: nxos
--------------------------------------------------------------------------------
/data_model/vars/base.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to create core elements of the device configs ################
3 |
4 | # The naming structure that is added before the automatically generated node number (0x). Groups are created based on the name (i.e spine, border, leaf)
5 | device_name: # The start (DC1-N9K) can be changed, however the rest must NOT be chaneged as is used in the scritping logic
6 | spine_name: 'DC1-N9K-SPINE'
7 | border_name: 'DC1-N9K-BORDER'
8 | leaf_name: 'DC1-N9K-LEAF'
9 |
10 | # Ranges from which device addresses are created from. Must have the mask in prefix format (/)
11 | addressing:
12 | lp_ip_subnet: '192.168.100.0/32' # Core OSPF and BGP peerings. By default will use .10 to .37
13 | mgmt_ip_subnet: '10.10.108.0/24' # Needs to be at least /27 to cover max spine (4), leafs (10) and borders (4)
14 | vpc_peer_subnet: '10.255.255.0/28' # VPC peer link addresses. Needs to be at least /28 to cover max leafs (10) and borders (4)
15 | srv_ospf_subnet: '10.255.255.16/28' # Non-core OSPF process peerings between border switches (4 addresses per OSPF process)
16 |
17 | bse:
18 | users:
19 | - username: admin
20 | password: $5$zuqcgFp4$62a5Mbxu1uFu7Udszc9boXrR9knz.Rhqbi.xstWTud/
21 | role: network-admin
22 |
23 | # Details for all the services that the switches consume
24 | bse_services:
25 | domain: 'stesworld.com'
26 | src_int: loopback0
27 | dns:
28 | prim: 10.10.10.41
29 | sec: 10.10.10.42
30 | tacacs:
31 | grp_name: ISE_TACACS
32 | key: securekey
33 | servers:
34 | - 10.10.10.51
35 | - 10.10.10.52
36 | - 10.10.10.53
37 | snmp:
38 | host: 10.10.10.43
39 | comm: 5NMPC0MMUN1TY
40 | ntp:
41 | server: [10.10.10.45, 10.10.20.46]
42 | log:
43 | server: [10.10.10.47, 10.10.20.48]
44 |
45 | # Managament and control plane Access-lists
46 | bse_acl:
47 | snmp:
48 | name: SNMP_ACCESS
49 | source: [10.10.20.43/24, 10.10.10.43/24]
50 | port: [udp, 161]
51 | ssh:
52 | name: SSH_ACCESS
53 | source: [10.10.10.0/24, 10.255.254.0/24]
54 | port: [tcp, 22]
55 |
56 | # Advanced base configuration that is less likely to be changed
57 | bse_adv:
58 | acast_gw_mac: 0000.2222.3333
59 | image: nxos.7.0.3.I7.6.bin
60 | exec_timeout:
61 | console: 0
62 | vty: 15
--------------------------------------------------------------------------------
/data_model/vars/fabric.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################ Variables used to decide how the fabric will look ################
3 | # This Only scales to 4 spines, 10 leafs, 4 borders. By default the following ports are used:
4 | # SPINE-to-LEAF = Eth1/1 - 1/10 SPINE-to-Border = Eth1/11 - 1/15
5 | # LEAF-to-SPINE = Eth1/1 - 1/5 BORDER-to-SPINE: = Eth1/1 - 1/5
6 | # VPC Peer-link = Eth1/127 - 128 VPC keepalive = mgmt
7 |
8 | # How big the network is, so the number of each switch. border/leaf must be in increments of 2 as in VPC pair
9 | network_size:
10 | num_spines: 2
11 | num_borders: 2
12 | num_leafs: 2
13 |
14 | # To change Fabric routing protocol settings
15 | fbc:
16 | ospf:
17 | ospf_pro: 1
18 | ospf_area: 0.0.0.0
19 | bgp:
20 | as_num: 65001
21 |
22 | ################ Advanced settings to further customize the fabric ################
23 |
24 | fbc_adv:
25 | # To change the interfaces used to create the farbic. These are the first interfaces used, the playbook increments these
26 | base_int:
27 | int_format: Ethernet1/ # Switch interface, must be same on all switches
28 | int_short: Eth1/ # Switch interface used in interface descriptions
29 | spine_to_leaf: 1 # First interface used for SPINE to LEAF links (1 to 10)
30 | spine_to_border: 11 # First interface used for SPINE to BORDER links (1 to 4)
31 | leaf_to_spine: 1 # First interface used LEAF to SPINE links (1 to 4)
32 | border_to_spine: 1 # First interface used BORDER to SPINE links (1 to 4)
33 | vpc_peer: 127-128 # Interfaces used for the VPC peer Link
34 | # All VPC specific settings except for peer Link interfaces (fabric.yml - base_interface) and subnet (base.yml - addressing)
35 | vpc:
36 | domain: 1 # VPC Domain number
37 | peer_po: 1 # Port-channel used for Peer Link
38 | peer_vlan: 2 # VLAN used for Peer Link and OSPF peering
39 |
40 | # The increment that is added to the subnet and device hostname number to generate the unique last octet of the IP addresses
41 | address_incre:
42 | spine_ip: 10 # SPINE IP addresses will be from .11 to .14
43 | border_ip: 15 # BORDER IP addresses will be from .16 to .19
44 | leaf_ip: 20 # LEAF IP addresses will be from .21 to .30
45 | sec_leaf_lp: 30 # Pair of LEAF secondary loopback IP addresses will be from .31 to .35
46 | sec_border_lp: 35 # Pair of BORDER secondary loopback addresses will be from .36 to .37
47 | vpc_leaf_ip: 0 # Start IP for LEAF Peer Links, so LEAF1 is .1, LEAF2 .2, LEAF3 .3, etc
48 | vpc_border_ip: 10 # Start IP for BORDER Peer Links, so BORDER1 is .11, BORDER2 .12, etc
49 |
--------------------------------------------------------------------------------
/network_state_report/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | library = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/modules
3 | action_plugins = /home/ste/virt/ansible_2.8.4/lib/python3.6/site-packages/napalm_ansible/plugins/action
4 |
5 | forks = 10
6 | jinja2_extensions = jinja2.ext.do
7 |
8 | #stdout_callback = selective
9 |
10 | gathering = explicit
11 | retry_files_enabled = False
12 | inventory = hosts.yml
13 | transport = network_cli
14 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ansible_python_interpreter: "/usr/bin/env python" # Ubuntu
3 |
4 | # File structure variables
5 | tmp_path: ~/network_state/tmp_tables
6 | dir_path: ~/network_state
7 |
8 | # Connection Variables
9 | creds_all: # Napalm
10 | hostname: "{{ ansible_host|default(inventory_hostname) }}"
11 | username: admin
12 | password: ansible
13 |
14 | ansible_user: admin # Ansible
15 | ansible_ssh_pass: ansible
16 |
17 | # Commands used inthe various roles
18 | itf_cmds: # Interfaces role
19 | - interfaces
20 | - lldp_neighbors
21 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/asa.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Connection Variables
3 | os: asa # Napalm
4 | ansible_network_os: asa # Ansible
5 |
6 | # Commands used inthe various roles
7 | edge_cmds: # edge role
8 | - show xlate count
9 | - show vpn-sessiondb l2l
10 | itf_cmds: # Interfaces role
11 | - interfaces
12 | l3_cmds: # Layer3 role
13 | - show arp statistics | in ASA
14 | - show route | in via|directly
15 | ospf_cmds: # OSPF role
16 | - show ospf neighbor | include FULL
17 | - show ospf database database-summary | in Total
18 |
19 | # Network Topology Information
20 | ospf_neigh: 2
21 | vpn_peer: 0
22 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/bigip.yml:
--------------------------------------------------------------------------------
1 | ---
2 | creds_big_ip:
3 | server: "{{ ansible_host|default(inventory_hostname) }}"
4 | user: admin
5 | password: ansible
6 | validate_certs: no
7 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/border_leaf.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Network Topology Information
3 | bgp_neigh: 4
4 | ospf_neigh: 3
5 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/ios.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Connection Variables
3 | os: ios # Napalm
4 | ansible_network_os: ios # Ansible
5 |
6 | # Commands used inthe various roles
7 | l2_cmds: # Layer2 role
8 | - show vlan
9 | - show etherchannel summary
10 | - show mac address-table
11 |
12 | # Network Topology Information
13 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/iosxe.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Connection Variables
3 | os: ios # Napalm
4 | ansible_network_os: ios # Ansible
5 |
6 | # Commands used inthe various roles
7 | edge_cmds: # edge role
8 | - show ip nat translations total
9 | - show crypto session
10 | l3_cmds: # Layer3 role
11 | - show ip arp summary
12 | - show ip route vrf * | in via|directly
13 | ospf_cmds: # OSPF role
14 | - show ip ospf neighbor | include FULL
15 | - show ip ospf database database-summary | in Total
16 |
17 | # Network Topology Information
18 | ospf_neigh: 1
19 | bgp_neigh: 4
20 | vpn_peer: 0
21 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/leaf.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Network Topology Information
3 | bgp_neigh: 2
4 | ospf_neigh: 3 # border and leaf
5 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/nxos.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Connection Variables
3 | os: nxos # Napalm
4 | ansible_network_os: nxos # Ansible
5 |
6 | # Commands used inthe various roles
7 | l2_cmds: # Layer2 role
8 | - show vlan
9 | - show port-channel summary
10 | - show mac address-table
11 |
12 | l3_cmds: # Layer3 role
13 | - show ip arp summary vrf all | in Total
14 | - show ip route vrf all | in ubest|mbest
15 | ospf_cmds: # OSPF role
16 | - show ip ospf neighbor vrf all | include FULL
17 | - show ip ospf database database-summary vrf all | in Total
18 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/spine.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Network Topology Information
3 | bgp_neigh: 3 # border and leaf
4 | ospf_neigh: 3 # border and leaf
5 |
--------------------------------------------------------------------------------
/network_state_report/group_vars/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Connection Variables
3 | os: ios # Napalm
4 | ansible_network_os: ios # Ansible
5 |
6 | # Connection Credentials
7 | creds_all:
8 | hostname: "{{ ansible_host|default(inventory_hostname) }}"
9 | username: ste
10 | password: testing123
11 |
12 | # Commands used inthe various roles
13 |
14 | # Network Topology Information
15 |
--------------------------------------------------------------------------------
/network_state_report/hosts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | all:
3 | children:
4 | router:
5 | children:
6 | iosxe:
7 | children:
8 | edge:
9 | hosts:
10 | dc1-csr-xnet1:
11 | ansible_host: 10.10.108.17
12 | switch:
13 | children:
14 | nxos:
15 | children:
16 | spine:
17 | hosts:
18 | dc1-n9k-spine1:
19 | ansible_host: 10.10.108.11
20 | dc1-n9k-spine2:
21 | ansible_host: 10.10.108.12
22 | leaf:
23 | hosts:
24 | dc1-n9k-leaf1:
25 | ansible_host: 10.10.108.13
26 | dc1-n9k-leaf2:
27 | ansible_host: 10.10.108.14
28 | border_leaf:
29 | hosts:
30 | dc1-n9k-border1:
31 | ansible_host: 10.10.108.15
32 | ios:
33 | children:
34 | dmz:
35 | hosts:
36 | dc1-vios-sw1:
37 | ansible_host: 10.10.108.18
38 | firewall:
39 | children:
40 | asa:
41 | hosts:
42 | dc1-asav-xfw1:
43 | ansible_host: 10.10.108.16
44 | bigip:
45 | children:
46 | ltm:
47 | hosts:
48 | dc1-ltm-lb1:
49 | ansible_host: 10.10.108.19
50 |
--------------------------------------------------------------------------------
/network_state_report/playbook_main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This playbook creates tables of the state of different elements within the network
3 |
4 | - name: "Create file structure"
5 | hosts: localhost
6 | tasks:
7 | # 1. Create the enviroment to save dynamically created files to
8 | - block:
9 | - name: SYS >> Deleting old temporary table directory
10 | file: path="{{ tmp_path }}" state=absent
11 | changed_when: False
12 | - name: SYS >> Creating temporary table directory
13 | file:
14 | path: "{{ tmp_path }}"
15 | state: directory
16 | changed_when: False
17 | check_mode: False
18 | tags: report
19 |
20 | # 2. Run all the plays to gather output
21 | - name: "Gather BGP output"
22 | hosts: nxos:router
23 | connection: local
24 | roles:
25 | - bgp
26 |
27 | - name: "Gather Edge output"
28 | hosts: router:firewall
29 | connection: local
30 | roles:
31 | - edge
32 |
33 | - name: "Gather interface outputs"
34 | hosts: switch:router:firewall
35 | connection: local
36 | roles:
37 | - interfaces
38 |
39 | - name: "Gather L2 table outputs"
40 | hosts: dmz:leaf
41 | connection: local
42 | roles:
43 | - l2_tables
44 |
45 | - name: "Gather L3 tables and OSPF outputs"
46 | hosts: router:firewall:nxos
47 | connection: network_cli
48 | roles:
49 | - l3_tables
50 | - ospf
51 |
52 | - name: "Gather VIP outputs"
53 | hosts: ltm
54 | connection: local
55 | roles:
56 | - vips
57 |
58 | # 3. Create tables from all the gathered outout
59 | - name: "Create Tables"
60 | hosts: localhost
61 | connection: local
62 | roles:
63 | - report
64 |
65 | # 4. Create a date fact (variable) and join tables in a time-stamped report
66 | tasks:
67 | - name: SYS >> Creating report from generated tables
68 | assemble:
69 | src: "{{ tmp_path }}"
70 | dest: "{{ dir_path }}/network_state_{{ lookup('pipe','date +%Y-%m-%d_%H%M') }}.txt"
71 | changed_when: False
72 | check_mode: False
73 | tags: report
74 |
--------------------------------------------------------------------------------
/network_state_report/roles/bgp/filter_plugins/bgp_filter.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class FilterModule(object):
4 | def filters(self):
5 | return {
6 | 'bgp_filter': self.neigh_filter,
7 | 'csr_bgp_filter': self.csr_neigh_filter
8 | }
9 |
10 | # Create a list of BGP facts from the bgp output (naplam)
11 | def neigh_filter(self, bgp_output, inventory_hostname, bgp_neigh):
12 | enabled_bgp_neigh = 0
13 | up_bgp_neigh = 0
14 | bgp_pfxrcd = 0
15 | for a in bgp_output['napalm_bgp_neighbors'].values(): # for each vrf
16 | for b in a['peers'].values(): # for each neighbor
17 | if b["is_enabled"] is True:
18 | enabled_bgp_neigh += 1
19 | if b["is_up"] is True:
20 | up_bgp_neigh += 1 # Adds up all up neighbors
21 | #z.append(b["is_enabled"])
22 | for c in b['address_family'].values(): # for each address-family
23 | bgp_pfxrcd = bgp_pfxrcd + int(c['received_prefixes']) # Adds up all the prefixes
24 | bgp_table = [inventory_hostname, bgp_neigh, enabled_bgp_neigh, up_bgp_neigh, bgp_pfxrcd]
25 | return bgp_table
26 |
27 | # Create a list of BGP facts from the bgp output (genie cli)
28 | def csr_neigh_filter(self, bgp_output, inventory_hostname, bgp_neigh):
29 | enabled_bgp_neigh = 0
30 | up_bgp_neigh = 0
31 | bgp_pfxrcd = 0
32 |
33 | for a in bgp_output['vrf'].values(): # for each vrf
34 | for b in a['neighbor'].values(): # for each neighbor
35 | enabled_bgp_neigh += 1 # Adds up all up neighbors
36 | for c in b['address_family'].values(): # for each address-family
37 | if (c['state_pfxrcd']).isdigit() is True: # If prexix count is a decimal value
38 | up_bgp_neigh += 1 # Adds 1 if neighbor is up
39 | bgp_pfxrcd = bgp_pfxrcd + int(c['state_pfxrcd']) # Adds up the prefixes
40 |
41 | bgp_table = [inventory_hostname, bgp_neigh, enabled_bgp_neigh, up_bgp_neigh, bgp_pfxrcd]
42 | return bgp_table
43 |
--------------------------------------------------------------------------------
/network_state_report/roles/bgp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: "NET >> Gather NXOS output"
4 | napalm_get_facts:
5 | provider: "{{ creds_all }}"
6 | dev_os: "{{ os }}"
7 | filter:
8 | - "bgp_neighbors"
9 | register: bgp_output
10 |
11 | - name: "SYS >> Pass through Python Filter"
12 | set_fact: # Passes output through filter with bgp_neigh variable
13 | bgp_table: "{{ bgp_output.ansible_facts | bgp_filter(inventory_hostname, bgp_neigh) }}"
14 | changed_when: False
15 | tags: [bgp]
16 | when: os == 'nxos'
17 |
18 | # NAPALM get_bgp_neighbors doesnt work with csr1000v so alternative method
19 | - block:
20 | - name: "NET >> Get IOSXE output"
21 | napalm_cli:
22 | provider: "{{ creds_all }}"
23 | dev_os: "{{ os }}"
24 | args:
25 | commands:
26 | - show bgp all summary
27 | register: bgp_output
28 |
29 | - name: SYS >> Read in parse_genie role
30 | include_role:
31 | name: clay584.parse_genie
32 |
33 | - name: SYS >> Parse BGP
34 | set_fact:
35 | genie_bgp: "{{ bgp_output.cli_results['show bgp all summary'] | parse_genie(command='show bgp all summary', os=os) }}"
36 |
37 | - name: "SYS >> Pass through Python Filter"
38 | set_fact: # Passes output through filter with bgp_neigh variable
39 | bgp_table: "{{ genie_bgp | csr_bgp_filter(inventory_hostname, bgp_neigh) }}"
40 | changed_when: False
41 | tags: [bgp]
42 | when: os == 'ios'
43 |
44 | # Creates localhost_fact that is a list of all other host_facts (lists)
45 | - name: "SYS >> Creating nested list of all outputs"
46 | set_fact: # Need if statement to stop errors wehn only one device as cant create list
47 | list_bgp_table: |
48 | {% if bgp_table | length >= 2 %}
49 | {% set x = [] %}
50 | {% for host in ansible_play_hosts %}
51 | {% do x.append(hostvars[host].bgp_table) %}
52 | {% endfor %}
53 | {{ x|join(',') }}
54 | {% endif %}
55 | run_once: once
56 | delegate_to: localhost
57 | delegate_facts: True # What makes it a localhost_fact
58 | tags: [bgp]
59 |
--------------------------------------------------------------------------------
/network_state_report/roles/edge/filter_plugins/edge_filter.py:
--------------------------------------------------------------------------------
1 | class FilterModule(object):
2 | def filters(self):
3 | return {
4 | 'edge_filter': self.edge_filter,
5 | }
6 |
7 | # Create a list of OSPF facts from the ospf output
8 | def edge_filter(self, edge_output, inventory_hostname, os, vpn_peer):
9 | # Filter commands run on ASA outouts
10 | vpn_cnt = 0
11 | if os == 'asa':
12 | nat_cnt = int(edge_output['show xlate count'].split(' ')[0])
13 | for a in edge_output['show vpn-sessiondb l2l'].splitlines():
14 | if 'Connection' in a:
15 | vpn_cnt += 1
16 | # Filter commands run on CSR outouts
17 | elif os == 'ios':
18 | nat_cnt = int(edge_output['show ip nat translations total'].split(' ')[-1])
19 | for a in edge_output['show crypto session'].splitlines():
20 | if 'UP-ACTIVE' in a:
21 | vpn_cnt += 1
22 | edge_table = [inventory_hostname, vpn_peer, vpn_cnt, nat_cnt]
23 | return edge_table
24 |
--------------------------------------------------------------------------------
/network_state_report/roles/edge/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: "NET >> Gathering all outputs"
4 | napalm_cli:
5 | provider: "{{ creds_all }}"
6 | dev_os: "{{ os }}"
7 | args:
8 | commands: "{{ edge_cmds }}" # Loops cmds from host_var
9 | register: edge_output
10 |
11 |
12 | - name: "SYS >> Passing through Python Filter"
13 | set_fact:
14 | edge_table: "{{ edge_output.cli_results | edge_filter(inventory_hostname, os, vpn_peer) }}"
15 | changed_when: false
16 | tags: [edge]
17 |
18 | - name: "SYS >> Creating nested list of all outputs"
19 | set_fact: # Need if statement to stop errors wehn only one device as cant create list
20 | list_edge_table: |
21 | {% if edge_table | length >= 2 %}
22 | {% set x = [] %}
23 | {% for host in ansible_play_hosts %}
24 | {% do x.append(hostvars[host].edge_table) %}
25 | {% endfor %}
26 | {{ x|join(',') }}
27 | {% endif %}
28 | run_once: once
29 | delegate_to: localhost
30 | delegate_facts: True # What makes it a localhost_fact
31 | tags: [edge]
32 |
--------------------------------------------------------------------------------
/network_state_report/roles/interfaces/filter_plugins/interfaces_filter.py:
--------------------------------------------------------------------------------
1 | class FilterModule(object):
2 | def filters(self):
3 | return {
4 | 'itf_filter': self.itf_filter,
5 | }
6 |
7 | # Create a list of OSPF facts from the ospf output
8 | def itf_filter(self, itf_output, inventory_hostname):
9 | enabled_int = 0
10 | up_int = 0
11 | # Needed as ASA output doesnt have lldp neighbors so stops erroring
12 | if len(itf_output) == 1:
13 | lldp_neigh = 0
14 | elif len(itf_output) == 2:
15 | lldp_neigh = len(itf_output[1]['ansible_facts']['napalm_lldp_neighbors'])
16 | # Counting enabled and up interfaces
17 | for a in itf_output[0]['ansible_facts']['napalm_interfaces'].values():
18 | if a['is_enabled'] is True:
19 | enabled_int += 1
20 | if a['is_up'] is True:
21 | up_int += 1
22 | interface_table = [inventory_hostname, enabled_int, up_int, lldp_neigh]
23 | return interface_table
24 |
--------------------------------------------------------------------------------
/network_state_report/roles/interfaces/tasks/main.yml:
--------------------------------------------------------------------------------
1 |
2 | - block:
3 | - name: "NET >> Gathering all outputs"
4 | napalm_get_facts:
5 | provider: "{{ creds_all }}"
6 | dev_os: "{{ os }}"
7 | filter: "{{ item }}"
8 | register: itf_output
9 | loop: "{{ itf_cmds }}"
10 |
11 | # Used a loop as asa as it cant take the lldp filter
12 | - name: "SYS >> Passing through Python Filter"
13 | set_fact: # Passes output through filter
14 | #interface_table: "{{ itf_output.results[0].ansible_facts | itf_filter(inventory_hostname) }}"
15 | interface_table: "{{ itf_output.results | itf_filter(inventory_hostname) }}"
16 | changed_when: False
17 | tags: [itf]
18 |
19 | # Creates localhost_fact that is a list of all other host_facts (lists)
20 | - name: "SYS >> Creating nested list of all outputs"
21 | set_fact: # Need if statement to stop errors wehn only one device as cant create list
22 | list_interface_table: |
23 | {% if interface_table | length >= 2 %}
24 | {% set x = [] %}
25 | {% for host in ansible_play_hosts %}
26 | {% do x.append(hostvars[host].interface_table) %}
27 | {% endfor %}
28 | {{ x|join(',') }}
29 | {% endif %}
30 | run_once: once
31 | delegate_to: localhost
32 | delegate_facts: True # What makes it a localhost_fac
33 | tags: [itf]
34 |
--------------------------------------------------------------------------------
/network_state_report/roles/l2_tables/filter_plugins/l2_tables_filter.py:
--------------------------------------------------------------------------------
1 | class FilterModule(object):
2 | def filters(self):
3 | return {
4 | 'l2_filter': self.l2_filter,
5 | }
6 |
7 | # Create a list of OSPF facts from the ospf output
8 | def l2_filter(self, genie_vlan, inventory_hostname, genie_mac, genie_pc):
9 | vlans = len(genie_vlan['vlans'].values())
10 | macs = 0
11 | up_pc = 0
12 | pc_members = 0
13 | up_pc_memb = 0
14 | # Counts the nummber of mac addresses
15 | for a in genie_mac['mac_table']['vlans'].values(): # For each vlan
16 | for b in a['mac_addresses']: # for each mac address
17 | macs += 1
18 | # Counts PCs, member ports and status
19 | configured_pc = len(genie_pc['interfaces'].values())
20 | for a in genie_pc['interfaces'].values(): # For every pc
21 | if a['oper_status'] == 'up': # Add up operational PCs
22 | up_pc += 1
23 | for a in genie_pc['interfaces'].values(): # For every pc
24 | for b in a['members'].values(): # For each member port of a PC
25 | pc_members += 1 # count them
26 | if b['flags'] == 'P': # For each up member port of a PC
27 | up_pc_memb += 1 # count them
28 |
29 | l2_table = [inventory_hostname, vlans, macs, configured_pc, up_pc, pc_members, up_pc_memb]
30 | return l2_table
31 |
--------------------------------------------------------------------------------
/network_state_report/roles/l2_tables/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This role uses genie parsers to get the data structure
3 | - block:
4 | - name: "NET >> Gathering all outputs"
5 | napalm_cli:
6 | provider: "{{ creds_all }}"
7 | dev_os: "{{ os }}"
8 | args:
9 | commands: "{{ l2_cmds }}" # Loops cmds from host_var
10 | register: l2_output
11 |
12 | - name: SYS >> Read in parse_genie role
13 | include_role:
14 | name: clay584.parse_genie
15 |
16 | # Uses Cisco Genie to parse the data into a proper data structure
17 | - name: SYS >> Parse VLAN and MAC table
18 | set_fact:
19 | genie_vlan: "{{ l2_output.cli_results['show vlan'] | parse_genie(command='show vlan', os=os) }}"
20 | genie_mac: "{{ l2_output.cli_results['show mac address-table'] | parse_genie(command='show mac address-table', os=os) }}"
21 |
22 | - name: SYS >> Parse Port-Channel table (IOS)
23 | set_fact:
24 | genie_pc: "{{ l2_output.cli_results['show etherchannel summary'] | parse_genie(command='show etherchannel summary', os=os) }}"
25 | when: os == 'ios'
26 | - name: SYS >> Parse PC table (NXOS)
27 | set_fact:
28 | genie_pc: "{{ l2_output.cli_results['show port-channel summary'] | parse_genie(command='show port-channel summary', os=os) }}"
29 | when: os == 'nxos'
30 |
31 | - name: "SYS >> Passing through Python Filter"
32 | set_fact: # Passes output through filter with all the host_vars
33 | l2_table: "{{ genie_vlan | l2_filter(inventory_hostname, genie_mac, genie_pc) }}"
34 | changed_when: False
35 | tags: [l2]
36 |
37 | # Creates localhost_fact that is a list of all other host_facts (lists)
38 | - name: "SYS >> Creating nested list of all outputs"
39 | set_fact: # Need if statement to stop errors wehn only one device as cant create list
40 | list_l2_table: |
41 | {% if l2_table | length >= 2 %}
42 | {% set x = [] %}
43 | {% for host in ansible_play_hosts %}
44 | {% do x.append(hostvars[host].l2_table) %}
45 | {% endfor %}
46 | {{ x|join(',') }}
47 | {% endif %}
48 | run_once: once
49 | delegate_to: localhost
50 | delegate_facts: True # What makes it a localhost_fact
51 | tags: [l2]
52 |
--------------------------------------------------------------------------------
/network_state_report/roles/l3_tables/filter_plugins/l3_tables_filter.py:
--------------------------------------------------------------------------------
1 | class FilterModule(object):
2 | def filters(self):
3 | return {
4 | 'l3_filter': self.l3_filter,
5 | }
6 |
7 | # Create a list of OSPF facts from the ospf output
8 | def l3_filter(self, l3_output, inventory_hostname):
9 | if l3_output[0]['item'] == "show ip arp summary": # If is CSR output
10 | arps = int(l3_output[0]['stdout'].split(' ')[0].strip())
11 | else: # All other devicest
12 | arps = int(l3_output[0]['stdout'].split(':')[1].strip())
13 | # Routing table entries are cacluated by counting the number of lines
14 | routes = len(l3_output[1]['stdout'].splitlines())
15 | l3_table = [inventory_hostname, arps, routes]
16 | return l3_table
17 |
--------------------------------------------------------------------------------
/network_state_report/roles/l3_tables/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Had to use Ansible command module as couldnt use | include with NAPALM API based connections
3 | - block:
4 | - name: "NET >> Gathering all outputs"
5 | cli_command:
6 | command: "{{ item }}" # Loops cmds from host_var
7 | register: l3_table_output
8 | loop: "{{ l3_cmds }}"
9 |
10 | - name: "SYS >> Passing through Python Filter"
11 | set_fact: # Passes output through filter
12 | l3_table: "{{ l3_table_output['results'] | l3_filter(inventory_hostname) }}"
13 | changed_when: False
14 | tags: [l3]
15 |
16 | # Creates localhost_fact that is a list of all other host_facts (lists)
17 | - name: "SYS >> Creating nested list of all outputs"
18 | set_fact: # Need if statement to stop errors wehn only one device as cant create list
19 | list_l3_table: |
20 | {% if l3_table | length >= 2 %}
21 | {% set x = [] %}
22 | {% for host in ansible_play_hosts %}
23 | {% do x.append(hostvars[host].l3_table) %}
24 | {% endfor %}
25 | {{ x|join(',') }}
26 | {% endif %}
27 | run_once: once
28 | delegate_to: localhost
29 | delegate_facts: True # What makes it a localhost_fact
30 | tags: [l3]
31 |
--------------------------------------------------------------------------------
/network_state_report/roles/ospf/filter_plugins/ospf_filter.py:
--------------------------------------------------------------------------------
1 | class FilterModule(object):
2 | def filters(self):
3 | return {
4 | 'ospf_filter': self.ospf_filter,
5 | }
6 |
7 | # Create a list of OSPF facts from the ospf output
8 | def ospf_filter(self, ospf_output, inventory_hostname, ospf_neigh):
9 | up_ospf_neigh = 0
10 | ospf_lsa = 0
11 | for a in ospf_output[0]['stdout'].splitlines(): # Counts number of lines
12 | up_ospf_neigh += 1
13 | for a in ospf_output[1]['stdout'].splitlines():
14 | ospf_lsa = ospf_lsa + int(a.split('Total')[1].strip().split(' ')[0]) # Gets just decimal number fo LSAs and Adds
15 | ospf_table = [inventory_hostname, ospf_neigh, up_ospf_neigh, ospf_lsa]
16 | return ospf_table
17 |
--------------------------------------------------------------------------------
/network_state_report/roles/ospf/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Had to use Ansible command module as couldnt use | include with NAPALM API based connections
3 | - block:
4 | - name: "NET >> Gathering all outputs"
5 | cli_command:
6 | command: "{{ item }}" # Loops cmds from host_var
7 | register: ospf_output
8 | loop: "{{ ospf_cmds }}"
9 |
10 | - name: "SYS >> Passing through Python Filter"
11 | set_fact: # Passes output through filter with ospf_neigh variable
12 | ospf_table: "{{ ospf_output['results'] | ospf_filter(inventory_hostname, ospf_neigh) }}"
13 | changed_when: False
14 | tags: [ospf]
15 |
16 | # Creates localhost_fact that is a list of all other host_facts (lists)
17 | - name: "SYS >> Creating nested list of all outputs"
18 | set_fact: # Need if statement to stop errors wehn only one device as cant create list
19 | list_ospf_table: |
20 | {% if ospf_table | length >= 2 %}
21 | {% set x = [] %}
22 | {% for host in ansible_play_hosts %}
23 | {% do x.append(hostvars[host].ospf_table) %}
24 | {% endfor %}
25 | {{ x|join(',') }}
26 | {% endif %}
27 | run_once: once
28 | delegate_to: localhost
29 | delegate_facts: True # What makes it a localhost_fact
30 | tags: [ospf]
31 |
--------------------------------------------------------------------------------
/network_state_report/roles/report/filter_plugins/report_filter.py:
--------------------------------------------------------------------------------
1 | from prettytable import PrettyTable
2 |
3 | class FilterModule(object):
4 | def filters(self):
5 | return {
6 | 'bgp_table': self.bgp_table,
7 | 'edge_table': self.edge_table,
8 | 'interface_table': self.interface_table,
9 | 'l2_table': self.l2_table,
10 | 'l3_table': self.l3_table,
11 | 'ospf_table': self.ospf_table,
12 | 'vip_table': self.vip_table
13 | }
14 |
15 | def bgp_table(self, list_bgp_table):
16 | bgp_table = PrettyTable()
17 | bgp_table.field_names = ['Device', 'Expected Peers', 'Enabled Peers', 'UP Peers', 'pfxrcd']
18 | bgp_table.align['Information'] = 'l'
19 | if type(list_bgp_table[0]) is list: # If is a list of list iterate
20 | for x in list_bgp_table:
21 | bgp_table.add_row(x)
22 | else:
23 | bgp_table.add_row(list_bgp_table)
24 | output = '=' * 5 + ' BGP Table ' + '=' * 5 + '\n\n{}\n\n'.format(bgp_table)
25 | return output
26 |
27 | def edge_table(self, list_edge_table):
28 | edge_table = PrettyTable()
29 | edge_table.field_names = ['Device', 'Total L2L VPNs', 'Up L2L VPNs', 'Total NAT Translations']
30 | edge_table.align['Information'] = 'l'
31 | if type(list_edge_table[0]) is list: # If is a list of list iterate
32 | for x in list_edge_table:
33 | edge_table.add_row(x)
34 | else:
35 | edge_table.add_row(list_edge_table) # If single list add to table
36 | output = '=' * 5 + ' XNET Edge Table ' + '=' * 5 + '\n\n{}\n\n'.format(edge_table)
37 | return output
38 |
39 | def l2_table(self, list_l2_table):
40 | l2_table = PrettyTable()
41 | l2_table.field_names = ['Device', 'VLANs', 'MACs', 'cfg POs', 'UP POs', 'PO member ports', 'UP PO member ports']
42 | l2_table.align['Information'] = 'l'
43 | if type(list_l2_table[0]) is list: # If is a list of list iterate
44 | for x in list_l2_table:
45 | l2_table.add_row(x)
46 | else:
47 | l2_table.add_row(list_l2_table) # If single list add to table
48 | output = '=' * 5 + ' Layer2 Table ' + '=' * 5 + '\n\n{}\n\n'.format(l2_table)
49 | return output
50 |
51 | def l3_table(self, list_l3_table):
52 | l3_table = PrettyTable()
53 | l3_table.field_names = ['Device', 'ARP Table', 'Routing Table']
54 | l3_table.align['Information'] = 'l'
55 | if type(list_l3_table[0]) is list: # If is a list of list iterate
56 | for x in list_l3_table:
57 | l3_table.add_row(x)
58 | else:
59 | l3_table.add_row(list_l3_table) # If single list add to table
60 | output = '=' * 5 + ' Layer3 Table ' + '=' * 5 + '\n\n{}\n\n'.format(l3_table)
61 | return output
62 |
63 | def interface_table(self, list_interface_table):
64 | interface_table = PrettyTable()
65 | interface_table.field_names = ['Device', 'Enabled Interfaces', 'UP Interfaces', 'LLDP Neighbors']
66 | interface_table.align['Information'] = 'l'
67 | if type(list_interface_table[0]) is list: # If is a list of list iterate
68 | for x in list_interface_table:
69 | interface_table.add_row(x)
70 | else:
71 | interface_table.add_row(list_interface_table)
72 | output = '=' * 5 + ' Interface Table ' + '=' * 5 + '\n\n{}\n\n'.format(interface_table)
73 | return output
74 |
75 | def ospf_table(self, list_ospf_table):
76 | ospf_table = PrettyTable()
77 | ospf_table.field_names = ['Device', 'Expected Neighors', 'UP Neighbors', 'Total LSAs']
78 | ospf_table.align['Information'] = 'l'
79 | if type(list_ospf_table[0]) is list: # If is a list of list iterate
80 | for x in list_ospf_table:
81 | ospf_table.add_row(x)
82 | else:
83 | ospf_table.add_row(list_ospf_table)
84 | output = '=' * 5 + ' OSPF Table ' + '=' * 5 + '\n\n{}\n\n'.format(ospf_table)
85 | return output
86 |
87 | def vip_table(self, list_vip_table):
88 | vip_table = PrettyTable()
89 | vip_table.field_names = ['Device', 'Enabled VIPs', 'UP VIPs', 'Total Nodes', 'UP Nodes']
90 | vip_table.align['Information'] = 'l'
91 | if type(list_vip_table[0]) is list: # If is a list of list iterate
92 | for x in list_vip_table:
93 | vip_table.add_row(x)
94 | else:
95 | vip_table.add_row(list_vip_table)
96 | output = '=' * 5 + ' VIP Table ' + '=' * 5 + '\n\n{}\n\n'.format(vip_table)
97 | return output
98 |
99 | ####################################### Used for testing a table #######################################
100 | # def def table(self, list_interface_table):
101 | # return
102 |
--------------------------------------------------------------------------------
/network_state_report/roles/report/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "SYS >> Creating BGP Table"
3 | copy:
4 | dest: "{{ tmp_path }}/bgp_table.txt"
5 | content: "{{ list_bgp_table | bgp_table }}"
6 | tags: [bgp]
7 |
8 | - name: "SYS >> Creating Edge Table"
9 | copy:
10 | dest: "{{ tmp_path }}/edge_table.txt"
11 | content: "{{ list_edge_table | edge_table }}"
12 | tags: [edge]
13 |
14 | - name: "SYS >> Creating Interface Table"
15 | copy:
16 | dest: "{{ tmp_path }}/interface_table.txt"
17 | content: "{{ list_interface_table | interface_table }}"
18 | tags: [itf]
19 |
20 | - name: "SYS >> Creating L2 Table"
21 | copy:
22 | dest: "{{ tmp_path }}/l2_table.txt"
23 | content: "{{ list_l2_table | l2_table }}"
24 | tags: [l2]
25 |
26 | - name: "SYS >> Creating L3 Table"
27 | copy:
28 | dest: "{{ tmp_path }}/l3_table.txt"
29 | content: "{{ list_l3_table | l3_table }}"
30 | tags: [l3]
31 |
32 | - name: "SYS >> Creating OSPF Table"
33 | copy:
34 | dest: "{{ tmp_path }}/ospf_table.txt"
35 | content: "{{ list_ospf_table | ospf_table }}"
36 | tags: [ospf]
37 |
38 | - name: "SYS >> Creating VIP Table"
39 | copy:
40 | dest: "{{ tmp_path }}/vip_table.txt"
41 | content: "{{ list_vip_table | vip_table }}"
42 | tags: [vip]
43 |
--------------------------------------------------------------------------------
/network_state_report/roles/vips/filter_plugins/vip_filter.py:
--------------------------------------------------------------------------------
1 | class FilterModule(object):
2 | def filters(self):
3 | return {
4 | 'vip_filter': self.vip_filter,
5 | }
6 |
7 | # Create a list of OSPF facts from the ospf output
8 | def vip_filter(self, vip_output, inventory_hostname):
9 | enabled_vip = 0
10 | up_vip = 0
11 | total_node = 0
12 | up_node = 0
13 |
14 | # Count total and up VIPs
15 | for a in vip_output['virtual_servers']:
16 | if a['enabled'] == 'yes':
17 | enabled_vip += 1
18 | if a['availability_status'] == 'available':
19 | up_vip += 1
20 |
21 | # Count total and up Nodes
22 | for b in vip_output['ltm_pools']:
23 | total_node = total_node + int(b['member_count'])
24 | for b in vip_output['ltm_pools']:
25 | up_node = up_node + int(b['active_member_count'])
26 |
27 | vip_table = [inventory_hostname, enabled_vip, up_vip, total_node, up_node]
28 | return vip_table
29 |
--------------------------------------------------------------------------------
/network_state_report/roles/vips/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - block:
3 | - name: NET >> Collecting BIG-IP facts
4 | bigip_device_facts:
5 | gather_subset:
6 | - virtual-servers
7 | - ltm-pools
8 | provider: "{{ creds_big_ip }}"
9 | register: vip_output
10 | delegate_to: localhost
11 |
12 | - name: "SYS >> Passing through Python Filter"
13 | set_fact: # Passes output through filter
14 | vip_table: "{{ vip_output | vip_filter(inventory_hostname) }}"
15 | changed_when: False
16 | tags: [vip]
17 |
18 | # Creates localhost_fact that is a list of all other host_facts (lists)
19 | - name: "SYS >> Creating nested list of all outputs"
20 | set_fact: # Need if statement to stop errors wehn only one device as cant create list
21 | list_vip_table: |
22 | {% if vip_table | length >= 2 %}
23 | {% set x = [] %}
24 | {% for host in ansible_play_hosts %}
25 | {% do x.append(hostvars[host].vip_table) %}
26 | {% endfor %}
27 | {{ x|join(',') }}
28 | {% endif %}
29 | run_once: once
30 | delegate_to: localhost
31 | delegate_facts: True # What makes it a localhost_fact
32 | tags: [vip]
33 |
--------------------------------------------------------------------------------
/stesworld_network_topology.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sjhloco/ip_auto_lab/a0c1a850157d2e7357fcd602ebd1b9910002da0d/stesworld_network_topology.pdf
--------------------------------------------------------------------------------