├── Data-Models
├── hosts
├── .gitignore
├── Transformation
│ ├── .gitignore
│ ├── inventory.j2
│ ├── Makefile
│ ├── network.yml
│ ├── create-configs.yml
│ ├── ios-config.j2
│ ├── transform-data.yml
│ └── device-data.j2
├── Validation
│ ├── host_vars
│ │ ├── S2.yml
│ │ └── S1.yml
│ ├── network.yml
│ ├── validate.sh
│ ├── hosts.schema.json
│ └── network.schema.json
├── Network_Dict
│ ├── host_vars
│ │ ├── S1.yml
│ │ └── S2.yml
│ ├── configs.yml
│ ├── network.yml
│ └── config.j2
├── Network_Prefix
│ ├── host_vars
│ │ ├── S1.yml
│ │ └── S2.yml
│ ├── config.j2
│ ├── configs.yml
│ ├── network.yml
│ ├── interfaces.j2
│ └── bgp.j2
├── Network
│ ├── host_vars
│ │ ├── S2.yml
│ │ └── S1.yml
│ ├── configs.yml
│ ├── network.yml
│ └── config.j2
├── Network_Macro
│ ├── host_vars
│ │ ├── S2.yml
│ │ └── S1.yml
│ ├── configs.yml
│ ├── network.yml
│ └── config.j2
├── BGP_AS
│ ├── configs.yml
│ ├── host_vars
│ │ ├── S1.yml
│ │ └── S2.yml
│ └── config.j2
├── BGP_IF
│ ├── configs.yml
│ ├── host_vars
│ │ ├── S1.yml
│ │ └── S2.yml
│ └── config.j2
├── Initial
│ ├── configs.yml
│ ├── host_vars
│ │ ├── S2.yml
│ │ └── S1.yml
│ └── config.j2
├── setup.sh
├── ansible.cfg
└── README.md
├── Collect-Printouts
├── dummy.j2
├── ansible.cfg
├── printouts.yml
├── gitcommit.sh
└── collect.yml
├── VIRL2Inventory
├── .gitignore
├── README.md
└── VIRL2Inventory.py
├── Git-to-Candidate
├── .gitignore
├── ansible.cfg
├── helpers
│ ├── banners.yml
│ └── enable_scp.yml
├── banners.yml
├── hosts
├── copy_candidate.yml
└── git_checkout.yml
├── Summary-Reports
├── .gitignore
├── framework
│ ├── template
│ │ ├── text.j2
│ │ ├── csv.j2
│ │ └── html.j2
│ ├── read
│ │ ├── vars.yml
│ │ ├── snmp.yml
│ │ └── facts.yml
│ ├── report
│ │ ├── save.yml
│ │ ├── text.yml
│ │ └── template.yml
│ ├── config
│ │ └── enable-snmp.yml
│ └── framework.yml
├── inventory
│ ├── csv.j2
│ ├── html.j2
│ └── report.yml
├── hosts-file
│ ├── cleanup.yml
│ ├── hosts.j2
│ ├── zone.j2
│ └── hosts.yml
└── README.md
├── Sample-Summary-Report
├── .gitignore
├── configlets
│ └── snmp.cfg
├── ansible.cfg
├── uptime
│ ├── uptime-text.j2
│ ├── read-vars.yml
│ ├── read-device.yml
│ ├── report-save.yml
│ ├── report-text.yml
│ ├── report-text-j2.yml
│ ├── html.j2
│ └── report-template.yml
├── hosts
├── uptime.yml
├── uptime-data.yml
├── uptime-data-yaml.yml
├── uptime-data-snmp.yml
├── uptime-text-fromvars.yml
├── deployConfig.yml
├── README.md
└── uptime-text.yml
├── DHCP-Pools
├── .gitignore
├── hosts
├── cleanup.j2
├── host_vars
│ └── gw.yml
├── ansible.cfg
├── private-setup.sh
├── pools.yml
├── cleanup.yml
├── check.yml
├── extract.j2
├── extract.yml
├── configure.yml
├── include
│ └── getPools.yml
├── pools.j2
└── README.md
├── Sample-Compliance-Check
├── .gitignore
├── group_vars
│ └── all.yml
├── ansible.cfg
├── run-checks.yml
├── reports
│ ├── json-debug.j2
│ └── json-simple.j2
├── hosts
├── tests
│ ├── snmp-server.yml
│ ├── snmp-community.yml
│ └── syslog-server.yml
├── do-checks.sh
├── report-results.yml
├── README.md
└── break-config.yml
├── DMVPN
├── .gitignore
├── check-facts.yaml
├── roles
│ ├── routing
│ │ ├── templates
│ │ │ ├── ibgp-LAN-interface.j2
│ │ │ ├── 30-ibgp-spoke.j2
│ │ │ └── 30-ibgp-hub.j2
│ │ └── tasks
│ │ │ └── main.yml
│ ├── dmvpn
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── 20-dmvpn.j2
│ ├── virl
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── 90-virl.j2
│ ├── libvirt
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── 90-libvirt.j2
│ └── base
│ │ ├── templates
│ │ ├── 99-common.j2
│ │ ├── 00-common.j2
│ │ └── 10-interfaces.j2
│ │ └── tasks
│ │ └── main.yml
├── group_vars
│ ├── spokes.yml
│ └── all.yml
├── deploy_setup.cfg
├── ansible.cfg
├── host_vars
│ ├── C1.yml
│ ├── C2.yml
│ ├── R1A.yml
│ ├── R1B.yml
│ ├── R3.yml
│ └── R2.yml
├── enable_scp.yml
├── libvirt
│ ├── ansible.cfg
│ ├── hosts-libvirt.yml
│ ├── vagrant-libvirt.xml
│ ├── README.md
│ └── Vagrantfile
├── setup-libvirt.sh
├── hosts
├── build.yml
└── deploy_scp.yml
├── Description-to-Links
├── .gitignore
├── ansible.cfg
├── hosts
├── config-enable-snmp.yml
├── links.j2
├── group_vars
│ └── all.yml
├── graph.j2
├── extract-links.yml
└── README.md
├── Compare-State-Snapshots
├── .gitignore
├── tests
│ ├── hosts
│ └── test_remove_keys.yml
├── fix
│ └── deploy_lldp.yml
├── savers
│ ├── yaml.j2
│ └── yaml.yml
├── ansible.cfg
├── getters
│ ├── ios
│ │ ├── ospf_interface.yml
│ │ ├── ospf_neighbor.yml
│ │ ├── parse_ospf_neighbor.filter
│ │ └── parse_ospf_interfaces.filter
│ └── napalm
│ │ └── get-facts.yml
├── hosts
├── get-state.yml
└── Script.md
├── Config-to-Git
├── .gitignore
├── tests
│ ├── ansible.cfg
│ ├── show_run.expected
│ ├── clean.yml
│ └── show_run.original
├── group_vars
│ └── all.yml
├── getandcommit.yml
├── filter_plugins
│ └── clean_config.py
├── gc_show.yml
├── gc_napalm.yml
├── git_commit.yml
├── README.md
├── gi_napalm.yml
└── gc_scp.yml
├── Trace-Executed-Commands
├── configLogging.cfg
├── disableLogging.cfg
├── ansible.cfg
├── enableLogging.cfg
├── sshKeys.cfg
├── trace.yml
└── README.md
├── .gitignore
├── LLDP-to-Graph-pyeznc
├── hosts
├── roles
│ └── README.md
├── graph-eznc.j2
├── LLDP-to-Graph-eznc.yml
└── README.md
├── OSPF-Deployment
├── .gitignore
├── lldp
│ ├── facts.yml
│ ├── wait.yml
│ ├── napalm_lldp_facts.yml
│ └── validate.yml
├── ios
│ ├── deploy_lldp.yml
│ ├── interface-config.j2
│ ├── deploy_ospf.yml
│ ├── deploy_interfaces.yml
│ ├── ospf-config-from-fabric.j2
│ ├── ospf-config.j2
│ └── verify_ospf.yml
├── ansible.cfg
├── tools
│ ├── clean.yml
│ ├── create_config_dir.yml
│ ├── log_changes_init.yml
│ └── log_changes.yml
├── model
│ ├── fabric-to-hosts.j2
│ ├── fabric-to-nodes.yml
│ ├── fabric-to-vars.j2
│ ├── fabric-to-vars.yml
│ └── fabric-to-nodes.j2
├── common
│ ├── config_ospf.yml
│ └── config_interfaces.yml
├── hosts.fqdn
├── addressing.yml
├── deploy.yml
├── validate-fabric.yml
├── README.md
├── fabric.yml
└── tests
│ ├── fabric-wrong-if.yml
│ └── fabric-wrong-node.yml
├── LLDP-to-Graph
├── .gitignore
├── ansible.cfg
├── neighbors.j2
├── format.j2
├── links.j2
├── simple-graph.j2
├── LLDP-test.yml
├── hosts
├── links-fix.j2
├── graph-simple.j2
├── LLDP-to-Graph.yml
├── graph.j2
├── disable-LLDP-on-edge.yml
└── README.md
├── AWS
├── ansible.cfg
├── instances.yml
├── params.yml
├── vpc-facts.yml
├── security-groups.yml
├── spinup.yml
├── instances.j2
├── cleanup.yml
├── subnets.yml
├── create-instances.yml
└── json2txt
├── Description-to-Fabric
├── ansible.cfg
├── hosts
├── get-connectivity
│ ├── remote-interface.yml
│ ├── internal-links.yml
│ └── interas-links.yml
├── config-enable-snmp.yml
├── create-fabric-model.yml
├── fabric.yml.j2
└── fabric.yml
├── Send-Message
├── message.yml
├── ansible.cfg
└── README.md
├── 3-router-setup
├── hosts.yml
├── ansible.cfg
├── setup.sh
└── README.md
├── 6-router-setup
├── ansible.cfg
├── libvirt
│ ├── ansible.cfg
│ ├── hosts-libvirt.yml
│ ├── vagrant-libvirt.xml
│ └── Vagrantfile
├── setup.sh
├── setup-libvirt.sh
├── hosts.yml
└── README.md
├── .gitmodules
├── tools
└── include
│ └── snapshot.yml
├── backup-multicontext-asa-configurations.yml
└── Plugins
└── filter
└── list.py
/Data-Models/hosts:
--------------------------------------------------------------------------------
1 | S1
2 | S2
3 |
--------------------------------------------------------------------------------
/Collect-Printouts/dummy.j2:
--------------------------------------------------------------------------------
1 | {{ item.1 }}
--------------------------------------------------------------------------------
/Data-Models/.gitignore:
--------------------------------------------------------------------------------
1 | */*.cfg
2 |
--------------------------------------------------------------------------------
/VIRL2Inventory/.gitignore:
--------------------------------------------------------------------------------
1 | *.virl
2 |
--------------------------------------------------------------------------------
/Git-to-Candidate/.gitignore:
--------------------------------------------------------------------------------
1 | Candidate
2 |
--------------------------------------------------------------------------------
/Summary-Reports/.gitignore:
--------------------------------------------------------------------------------
1 | /results
2 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/.gitignore:
--------------------------------------------------------------------------------
1 | /results
2 |
--------------------------------------------------------------------------------
/DHCP-Pools/.gitignore:
--------------------------------------------------------------------------------
1 | /configs
2 | /printouts
3 | /pools
4 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/.gitignore:
--------------------------------------------------------------------------------
1 | /results
2 | /configs
3 |
--------------------------------------------------------------------------------
/DHCP-Pools/hosts:
--------------------------------------------------------------------------------
1 | gw ansible_user=cisco ansible_ssh_pass=cisco
2 |
--------------------------------------------------------------------------------
/DMVPN/.gitignore:
--------------------------------------------------------------------------------
1 | ansible.log
2 | compiled
3 | configs
4 | goscp
5 |
--------------------------------------------------------------------------------
/Description-to-Links/.gitignore:
--------------------------------------------------------------------------------
1 | working
2 | links.txt
3 | *.png
4 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/.gitignore:
--------------------------------------------------------------------------------
1 | snap_*
2 | snapshot
3 | *.retry
4 |
--------------------------------------------------------------------------------
/Config-to-Git/.gitignore:
--------------------------------------------------------------------------------
1 | *.retry
2 | ansible.log
3 | napalm-ansible
4 |
--------------------------------------------------------------------------------
/Data-Models/Transformation/.gitignore:
--------------------------------------------------------------------------------
1 | hosts.yml
2 | host_vars
3 | configs
4 |
--------------------------------------------------------------------------------
/DMVPN/check-facts.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - ios_facts:
5 |
--------------------------------------------------------------------------------
/Data-Models/Validation/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S2
3 | bgp_as: 65002
4 |
--------------------------------------------------------------------------------
/Data-Models/Network_Dict/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 |
--------------------------------------------------------------------------------
/Data-Models/Network_Dict/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S2
3 | bgp_as: 65002
4 |
--------------------------------------------------------------------------------
/Data-Models/Network_Prefix/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 |
--------------------------------------------------------------------------------
/Data-Models/Network_Prefix/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S2
3 | bgp_as: 65002
4 |
--------------------------------------------------------------------------------
/Trace-Executed-Commands/configLogging.cfg:
--------------------------------------------------------------------------------
1 | archive
2 | log config
3 | notify syslog
4 | !
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | Private/
2 | *.retry
3 | ansible.log
4 | sftp-config.json
5 | *.pyc
6 | .DS_Store
7 |
--------------------------------------------------------------------------------
/LLDP-to-Graph-pyeznc/hosts:
--------------------------------------------------------------------------------
1 | [your_tag_if_any]
2 | router1.example.com
3 | router2.example.com
4 |
--------------------------------------------------------------------------------
/OSPF-Deployment/.gitignore:
--------------------------------------------------------------------------------
1 | hosts
2 | nodes.yml
3 | WAN
4 | /configs
5 | /printouts
6 | napalm-ansible
7 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/tests/hosts:
--------------------------------------------------------------------------------
1 | E1 ansible_host=172.16.1.110 ansible_user=cisco ansible_ssh_pass=cisco
2 |
--------------------------------------------------------------------------------
/Data-Models/Validation/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 | description: Unexpected
5 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/.gitignore:
--------------------------------------------------------------------------------
1 | *.retry
2 | ansible.log
3 | napalm-ansible
4 | snap_data
5 | network.*
6 | *.txt
7 |
--------------------------------------------------------------------------------
/OSPF-Deployment/lldp/facts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - include_tasks: napalm_lldp_facts.yml
5 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/configlets/snmp.cfg:
--------------------------------------------------------------------------------
1 | snmp-server community cisco RO
2 | snmp-server contact admin@lab.local
--------------------------------------------------------------------------------
/DHCP-Pools/cleanup.j2:
--------------------------------------------------------------------------------
1 | # Extra pools to remove
2 | {% for pool in extraPools %}
3 | no ip dhcp pool {{pool}}
4 | {% endfor %}
--------------------------------------------------------------------------------
/DMVPN/roles/routing/templates/ibgp-LAN-interface.j2:
--------------------------------------------------------------------------------
1 | {# set OSPF area on interface #}
2 | ip ospf 1 area 0
3 | !
4 | {# #}
--------------------------------------------------------------------------------
/Trace-Executed-Commands/disableLogging.cfg:
--------------------------------------------------------------------------------
1 | no event manager applet CLIlog
2 | archive
3 | log config
4 | no notify syslog
5 |
--------------------------------------------------------------------------------
/AWS/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | gathering=explicit
3 | retry_files_enabled=false
4 | transport=local
5 | force_color=1
6 |
7 |
--------------------------------------------------------------------------------
/DHCP-Pools/host_vars/gw.yml:
--------------------------------------------------------------------------------
1 | ---
2 | interfaces:
3 | Vlan1:
4 | ip: 10.217.233.1/24
5 | dhcp: enabled
6 | domain: nil.si
--------------------------------------------------------------------------------
/DMVPN/group_vars/spokes.yml:
--------------------------------------------------------------------------------
1 | ---
2 | uplink_vrf:
3 | 0: { name: "ISP_A", id: "65000:1" }
4 | 1: { name: "ISP_B", id: "65000:2" }
--------------------------------------------------------------------------------
/Sample-Compliance-Check/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | snmp_server: 10.0.0.1
3 | snmp_community: myPass
4 | syslog_server: 10.0.0.1
5 |
--------------------------------------------------------------------------------
/DHCP-Pools/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 |
--------------------------------------------------------------------------------
/Data-Models/Network/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S2
3 | bgp_as: 65002
4 | interfaces:
5 | Vlan101:
6 | ip: 192.168.2.1/24
7 |
--------------------------------------------------------------------------------
/Data-Models/Network_Prefix/config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | !
3 | {% include 'interfaces.j2' %}
4 | !
5 | {% include 'bgp.j2' %}
6 |
--------------------------------------------------------------------------------
/Config-to-Git/tests/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | gathering=explicit
3 | retry_files_enabled=false
4 | filter_plugins=../filter_plugins
5 |
--------------------------------------------------------------------------------
/Data-Models/Network/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 |
5 | interfaces:
6 | Vlan101:
7 | ip: 192.168.1.1/24
8 |
--------------------------------------------------------------------------------
/Data-Models/Network_Macro/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S2
3 | bgp_as: 65002
4 | interfaces:
5 | Vlan101:
6 | ip: 192.168.2.1/24
7 |
--------------------------------------------------------------------------------
/Git-to-Candidate/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 |
--------------------------------------------------------------------------------
/Collect-Printouts/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 |
--------------------------------------------------------------------------------
/Data-Models/Network_Macro/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 |
5 | interfaces:
6 | Vlan101:
7 | ip: 192.168.1.1/24
8 |
--------------------------------------------------------------------------------
/Description-to-Fabric/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 |
--------------------------------------------------------------------------------
/Description-to-Links/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | transport=local
5 | retry_files_enabled=false
6 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | transport=local
5 | retry_files_enabled=false
6 |
--------------------------------------------------------------------------------
/Trace-Executed-Commands/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 |
--------------------------------------------------------------------------------
/Data-Models/BGP_AS/configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - template:
5 | src: config.j2
6 | dest: "{{inventory_hostname}}.cfg"
7 |
--------------------------------------------------------------------------------
/Data-Models/BGP_IF/configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - template:
5 | src: config.j2
6 | dest: "{{inventory_hostname}}.cfg"
7 |
--------------------------------------------------------------------------------
/Data-Models/Initial/configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - template:
5 | src: config.j2
6 | dest: "{{inventory_hostname}}.cfg"
7 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/fix/deploy_lldp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | name: Enable LLDP on IOS devices
4 | tasks:
5 | - ios_config: commands="lldp run"
6 |
--------------------------------------------------------------------------------
/Collect-Printouts/printouts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | printouts:
3 | - command: show running
4 | save: running
5 | - command: show startup
6 | save: startup
7 |
--------------------------------------------------------------------------------
/Data-Models/setup.sh:
--------------------------------------------------------------------------------
1 | #
2 | # shell script to set environment variables
3 | #
4 | echo "Will use ansible.cfg from `pwd`"
5 | export ANSIBLE_CONFIG=`pwd`/ansible.cfg
6 |
--------------------------------------------------------------------------------
/LLDP-to-Graph-pyeznc/roles/README.md:
--------------------------------------------------------------------------------
1 | ## Directory contents
2 | Put ansible-junos-stdlib from https://github.com/Juniper/ansible-junos-stdlib inside this directory
3 |
--------------------------------------------------------------------------------
/Send-Message/message.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Send message to all TTYs
3 | hosts: all
4 | tasks:
5 | - ios_command:
6 | commands: "send *\nTesting...\n\x1A\n\n"
7 |
--------------------------------------------------------------------------------
/AWS/instances.yml:
--------------------------------------------------------------------------------
1 | ---
2 | vm:
3 | - name: i1
4 | ip: 172.31.16.16
5 | - name: i2
6 | ip: 172.31.16.18
7 | - name: db1
8 | ip: 172.31.131.31
9 | subnet: db
10 |
--------------------------------------------------------------------------------
/OSPF-Deployment/ios/deploy_lldp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable LLDP on fabric links
3 | #
4 | ---
5 | - name: Enable LLDP on fabric links
6 | ios_config:
7 | commands:
8 | - lldp run
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/uptime-text.j2:
--------------------------------------------------------------------------------
1 | {% for host,facts in allhosts|dictsort %}
2 | {{'%-20s %8s'|format(facts.inventory_hostname,facts.ansible_sysuptime)}}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/template/text.j2:
--------------------------------------------------------------------------------
1 | {% for host,facts in hostvars|dictsort %}
2 | {{'%-20s %8s'|format(facts.inventory_hostname,facts.ansible_sysuptime)}}
3 | {% endfor %}
4 |
--------------------------------------------------------------------------------
/DMVPN/deploy_setup.cfg:
--------------------------------------------------------------------------------
1 | hostname {{ inventory_hostname }}
2 | ip scp server enable
3 | enable secret {{ ansible_become_password|default(ansible_ssh_pass) }}
4 | archive
5 | path flash:
6 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/template/csv.j2:
--------------------------------------------------------------------------------
1 | host,uptime
2 | {% for host,facts in hostvars|dictsort %}
3 | {{ facts.inventory_hostname }},{{ facts.ansible_sysuptime }}
4 | {% endfor %}
5 |
--------------------------------------------------------------------------------
/Data-Models/Network/configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - include_vars: network.yml
5 | - template:
6 | src: config.j2
7 | dest: "{{inventory_hostname}}.cfg"
8 |
--------------------------------------------------------------------------------
/Data-Models/Network_Dict/configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - include_vars: network.yml
5 | - template:
6 | src: config.j2
7 | dest: "{{inventory_hostname}}.cfg"
8 |
--------------------------------------------------------------------------------
/Data-Models/Network_Macro/configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - include_vars: network.yml
5 | - template:
6 | src: config.j2
7 | dest: "{{inventory_hostname}}.cfg"
8 |
--------------------------------------------------------------------------------
/Data-Models/Network_Prefix/configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - include_vars: network.yml
5 | - template:
6 | src: config.j2
7 | dest: "{{inventory_hostname}}.cfg"
8 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 | library=/usr/local/lib/python2.7/dist-packages/napalm_ansible
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/read-vars.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Reads device facts from a YAML file instead of gathering them from the device
3 | #
4 | ---
5 | - include_vars: "{{results}}/{{inventory_hostname}}.yml"
6 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/read/vars.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Reads device facts from a YAML file instead of gathering them from the device
3 | #
4 | ---
5 | - include_vars: "{{results}}/{{inventory_hostname}}.yml"
6 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/savers/yaml.j2:
--------------------------------------------------------------------------------
1 | {% for k,v in hostvars[inventory_hostname].items() if 'state_' in k %}
2 | {{ k.replace('state_','') }}:
3 | {{ v|to_nice_yaml(indent=2)|indent(2,true) }}
4 | {% endfor %}
--------------------------------------------------------------------------------
/Data-Models/Transformation/inventory.j2:
--------------------------------------------------------------------------------
1 | # Ansible inventory generated from network data model
2 | #
3 | ---
4 | all:
5 | hosts:
6 | {% for hostname in nodes.keys() %}
7 | {{ hostname }}:
8 | {% endfor %}
9 |
--------------------------------------------------------------------------------
/Trace-Executed-Commands/enableLogging.cfg:
--------------------------------------------------------------------------------
1 | event manager applet CLIlog
2 | event cli pattern ".*" sync no skip no
3 | action 1.0 syslog priority informational msg "$_cli_msg"
4 | action 2.0 set _exit_status "1"
--------------------------------------------------------------------------------
/Config-to-Git/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | git_repository: ../Private/ConfigRepo
3 | git_branch: master
4 | clean_patterns:
5 | - "\\A[^!]*"
6 | - "ntp clock-period..."
7 | - "! Last configuration change..."
8 |
9 |
--------------------------------------------------------------------------------
/OSPF-Deployment/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 | library=/usr/local/lib/python2.7/dist-packages/napalm_ansible
7 | forks=10
8 |
--------------------------------------------------------------------------------
/DMVPN/roles/dmvpn/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: DMVPN and IPsec configuration
3 | template: src=20-dmvpn.j2 dest={{ build_dir }}/{{inventory_hostname}}/20-dmvpn.conf
4 | check_mode: no
5 | changed_when: false
6 |
--------------------------------------------------------------------------------
/DMVPN/roles/virl/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Build VIRL-specific configuration
3 | template: src=90-virl.j2 dest={{ build_dir }}/{{inventory_hostname}}/90-virl.conf
4 | check_mode: no
5 | changed_when: false
6 |
--------------------------------------------------------------------------------
/Data-Models/Network/network.yml:
--------------------------------------------------------------------------------
1 | links:
2 | - left_node: S1
3 | left_interface: GigabitEthernet0/1
4 | left_ip: 172.16.0.1/30
5 | right_node: S2
6 | right_interface: GigabitEthernet0/1
7 | right_ip: 172.16.0.2/30
8 |
--------------------------------------------------------------------------------
/DMVPN/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | transport=network_cli
5 | retry_files_enabled=false
6 | roles_path=.
7 | forks=10
8 |
9 | [persistent_connection]
10 | command_timeout=60
11 |
--------------------------------------------------------------------------------
/DMVPN/roles/libvirt/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Build libvirt-specific configuration
3 | template: src=90-libvirt.j2 dest={{ build_dir }}/{{inventory_hostname}}/90-libvirt.conf
4 | check_mode: no
5 | changed_when: false
6 |
--------------------------------------------------------------------------------
/Data-Models/Network_Macro/network.yml:
--------------------------------------------------------------------------------
1 | links:
2 | - left_node: S1
3 | left_interface: GigabitEthernet0/1
4 | left_ip: 172.16.0.1/30
5 | right_node: S2
6 | right_interface: GigabitEthernet0/1
7 | right_ip: 172.16.0.2/30
8 |
--------------------------------------------------------------------------------
/Data-Models/Transformation/Makefile:
--------------------------------------------------------------------------------
1 | configs/%.cfg: host_vars/%.yml hosts.yml
2 | ansible-playbook -i hosts.yml create-configs.yml
3 | touch $@
4 |
5 | hosts.yml: network.yml
6 | ansible-playbook transform-data.yml
7 | touch $@
8 |
--------------------------------------------------------------------------------
/Config-to-Git/tests/show_run.expected:
--------------------------------------------------------------------------------
1 | !
2 | !
3 | version 15.6
4 | service timestamps debug datetime msec
5 | service timestamps log datetime msec
6 | no service password-encryption
7 | !
8 | hostname R1
9 | !
10 | !
11 | end
--------------------------------------------------------------------------------
/Git-to-Candidate/helpers/banners.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable SCP on Cisco IOS
3 | #
4 | ---
5 | - hosts: all
6 | tasks:
7 | - name: Fix banners that trip IOS config replace
8 | ios_config:
9 | match: none
10 | src: banners.cfg
--------------------------------------------------------------------------------
/Data-Models/BGP_AS/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 | interfaces:
5 | - name: GigabitEthernet0/1
6 | ip: 172.16.0.1/30
7 | - name: Vlan101
8 | ip: 192.168.1.1/24
9 |
10 | neighbors:
11 | - ip: 172.16.0.2
12 | name: S2
13 |
--------------------------------------------------------------------------------
/Data-Models/BGP_AS/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S2
3 | bgp_as: 65002
4 | interfaces:
5 | - name: GigabitEthernet0/1
6 | ip: 172.16.0.2/30
7 | - name: Vlan101
8 | ip: 192.168.2.1/24
9 |
10 | neighbors:
11 | - ip: 172.16.0.1
12 | name: S1
13 |
--------------------------------------------------------------------------------
/Data-Models/Initial/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | hostname: S2
2 | bgp_as: 65002
3 | interfaces:
4 | - name: GigabitEthernet0/1
5 | ip: 172.16.0.2/30
6 | - name: Vlan101
7 | ip: 192.168.2.1/24
8 |
9 | neighbors:
10 | - ip: 172.16.0.1
11 | bgp_as: 65001
12 |
--------------------------------------------------------------------------------
/OSPF-Deployment/lldp/wait.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Wait for LLDP to start after enabling LLDP on fabric devices
3 | #
4 | ---
5 | - name: Wait for LLDP to start
6 | pause: seconds=15 prompt="Waiting for LLDP to start"
7 | tags: [ validate ]
8 | when: wait_flag is defined
9 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/report/save.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Saves all Ansible host variables into a YAML file
3 | #
4 | ---
5 | - copy:
6 | content: |
7 | {{hostvars[inventory_hostname]|to_nice_yaml(indent=4)}}
8 | dest: "{{results}}/{{inventory_hostname}}.yml"
--------------------------------------------------------------------------------
/Data-Models/BGP_IF/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 | interfaces:
5 | GigabitEthernet0/1:
6 | ip: 172.16.0.1/30
7 | Vlan101:
8 | ip: 192.168.1.1/24
9 |
10 | neighbors:
11 | - name: S2
12 | interface: GigabitEthernet0/1
13 |
--------------------------------------------------------------------------------
/Data-Models/BGP_IF/host_vars/S2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S2
3 | bgp_as: 65002
4 | interfaces:
5 | GigabitEthernet0/1:
6 | ip: 172.16.0.2/30
7 | Vlan101:
8 | ip: 192.168.2.1/24
9 |
10 | neighbors:
11 | - name: S1
12 | interface: GigabitEthernet0/1
13 |
--------------------------------------------------------------------------------
/Data-Models/Initial/host_vars/S1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hostname: S1
3 | bgp_as: 65001
4 | interfaces:
5 | - name: GigabitEthernet0/1
6 | ip: 172.16.0.1/30
7 | - name: Vlan101
8 | ip: 192.168.1.1/24
9 |
10 | neighbors:
11 | - ip: 172.16.0.2
12 | bgp_as: 65002
13 |
--------------------------------------------------------------------------------
/Description-to-Links/hosts:
--------------------------------------------------------------------------------
1 | # Sample inventory file
2 | # Change it
3 | E1 ansible_host=172.16.1.110
4 | E2 ansible_host=172.16.1.111
5 | PE1 ansible_host=172.16.1.112
6 | E3 ansible_host=172.16.1.120
7 | E4 ansible_host=172.16.1.121
8 | PE2 ansible_host=172.16.1.122
--------------------------------------------------------------------------------
/AWS/params.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ami_id: ami-14c5486b
3 | instance_type: t2.micro
4 | zone: us-east-1a
5 | region: us-east-1
6 | key_name: aws_testing
7 |
8 | subnets:
9 | - name: db
10 | cidr: 172.31.131.0/24
11 | zone: us-east-1a
12 | ipv6: yes
13 | route: local
14 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/run-checks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check whether SNMP community is configured on devices. Report missing communities
3 | hosts: all
4 | tasks:
5 | - include_tasks: "{{item}}"
6 | with_fileglob: [ "tests/*.yml" ]
7 | ignore_errors: true
8 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/read/snmp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Reads SNMP and vendor-specific facts from a networking device
3 | #
4 | # Include it in an Ansible play
5 | #
6 | ---
7 | - snmp_facts:
8 | host: "{{ansible_host}}"
9 | version: v2
10 | community: cisco
11 |
--------------------------------------------------------------------------------
/Git-to-Candidate/helpers/enable_scp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable SCP on Cisco IOS
3 | #
4 | ---
5 | - hosts: all
6 | tasks:
7 | - name: Enable SCP server on Cisco IOS devices
8 | ios_config:
9 | lines:
10 | - ip scp server enable
11 | authorize: yes
12 |
--------------------------------------------------------------------------------
/Summary-Reports/inventory/csv.j2:
--------------------------------------------------------------------------------
1 | Host,FQDN,IP address,Serial number,Software version
2 | {% for host,facts in hostvars|dictsort %}
3 | {{ host }},{{ facts.napalm_fqdn }},{{ ansible_host|default('') }},{{ facts.napalm_serial_number }},"{{ facts.napalm_os_version }}"
4 | {% endfor %}
5 |
--------------------------------------------------------------------------------
/3-router-setup/hosts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | all:
3 | hosts:
4 | R1: { ansible_host: 172.16.1.101 }
5 | R2: { ansible_host: 172.16.1.102 }
6 | R3: { ansible_host: 172.16.1.103 }
7 | vars:
8 | ansible_user: cisco
9 | ansible_ssh_pass: cisco
10 | ansible_os: ios
11 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 | filter_plugins=../Plugins/filter
7 | callback_plugins=../Plugins/callback
8 | library=/usr/local/lib/python2.7/dist-packages/napalm_ansible
--------------------------------------------------------------------------------
/LLDP-to-Graph/neighbors.j2:
--------------------------------------------------------------------------------
1 | {% for local in play_hosts %}
2 | {% for ifname,lldp in hostvars[local].napalm_lldp_neighbors|dictsort if lldp|length > 0 %}
3 | {% for n in lldp %}
4 | {{local}}:{{ifname}} -- {{n.hostname}}:{{n.port}}
5 | {% endfor %}
6 | {% endfor %}
7 | {% endfor %}
8 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/reports/json-debug.j2:
--------------------------------------------------------------------------------
1 | {% for host in groups ['all'] %}
2 | {% for t_result in results %}
3 | {% set h_result = t_result.hosts[host]|default({}) %}
4 | {{ host }} - {{ h_result.failed|default('*') }} - {{ t_result.task.name }}
5 | {% endfor %}
6 | {% endfor %}
--------------------------------------------------------------------------------
/Trace-Executed-Commands/sshKeys.cfg:
--------------------------------------------------------------------------------
1 | event manager applet ssh-keys
2 | event syslog occurs 1 pattern "%SYS-5-RESTART: System restarted"
3 | action 1.0 cli command "enable"
4 | action 2.0 cli command "configure terminal"
5 | action 3.0 cli command "crypto key generate rsa modulus 1024"
6 |
--------------------------------------------------------------------------------
/DHCP-Pools/private-setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # This script sets environment variables that point to private (non-repo) versions of inventory file and DHCP pools
4 | #
5 | export PS1="\W $ "
6 | export ANSIBLE_INVENTORY="../Private/gw-hosts"
7 | export EXTRA_POOLS="../Private/pools.yml"
8 |
--------------------------------------------------------------------------------
/Data-Models/Validation/network.yml:
--------------------------------------------------------------------------------
1 | links:
2 | #
3 | # Core links
4 | #
5 | - prefix: 172.16.0.0/30
6 | S1: GigabitEthernet0/1
7 | S2: GigabitEthernet0/1
8 |
9 | #
10 | # Edge links
11 | #
12 | - S1:
13 | Vlan101: 192.168.1.1/24
14 | - S2:
15 | Vlan101: 192.168.2.1/24
16 |
--------------------------------------------------------------------------------
/OSPF-Deployment/tools/clean.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Deploy IBGP and EBGP routing in a WAN fabric
3 | #
4 | ---
5 | - name: Clean configuration directory
6 | local_action: file path={{configs}} state=absent
7 | run_once: true
8 | check_mode: no
9 | changed_when: no
10 | tags: [ clean ]
11 |
--------------------------------------------------------------------------------
/OSPF-Deployment/tools/create_config_dir.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Create configuration directory if it doesn't exist
3 | #
4 | ---
5 | - name: Create configuration directory
6 | local_action: file path={{configs}} state=directory
7 | run_once: true
8 | check_mode: no
9 | changed_when: no
10 |
--------------------------------------------------------------------------------
/3-router-setup/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | transport=local
5 | retry_files_enabled=false
6 | forks=10
7 |
8 | # Change the next line to match napalm-ansible installation on your system
9 | library=/usr/local/lib/python2.7/dist-packages/napalm_ansible
--------------------------------------------------------------------------------
/6-router-setup/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | transport=local
5 | retry_files_enabled=false
6 | forks=10
7 |
8 | # Change the next line to match napalm-ansible installation on your system
9 | library=/usr/local/lib/python2.7/dist-packages/napalm_ansible
--------------------------------------------------------------------------------
/Config-to-Git/getandcommit.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Get configuration from managed hosts (change the included
3 | # playbook to switch between various retrieval mathods)
4 | # and store it into Git repo in branch Actual
5 | #
6 | ---
7 | - import_playbook: gc_napalm.yml
8 | - import_playbook: git_commit.yml
9 |
--------------------------------------------------------------------------------
/DMVPN/host_vars/C1.yml:
--------------------------------------------------------------------------------
1 | ---
2 | loopback: { ip: 10.0.1.1 }
3 | LAN:
4 | interface: 'GigabitEthernet0/1'
5 | ip: 172.16.0.1
6 | WAN:
7 | 0:
8 | interface: 'GigabitEthernet0/2'
9 | ip: 10.0.7.17
10 | subnet: 255.255.0.0
11 | DMVPN: {
12 | tunnel0: { ip: 192.168.0.1 }
13 | }
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/read-device.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Reads SNMP and vendor-specific facts from a networking device
3 | #
4 | # Include it in an Ansible play
5 | #
6 | ---
7 | - snmp_facts:
8 | host: "{{ansible_host}}"
9 | version: v2
10 | community: cisco
11 | - ios_facts:
12 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/report-save.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Saves all Ansible host variables into a YAML file
3 | #
4 | ---
5 | - file: path={{results}} state=directory
6 | run_once: true
7 | - copy: content="{{hostvars[inventory_hostname]|to_nice_yaml(indent=4)}}" dest={{results}}/{{inventory_hostname}}.yml
--------------------------------------------------------------------------------
/DMVPN/host_vars/C2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | loopback: { ip: 10.0.1.2 }
3 | LAN:
4 | interface: 'GigabitEthernet0/1'
5 | ip: 172.16.0.2
6 | WAN:
7 | 0:
8 | interface: 'GigabitEthernet0/2'
9 | ip: 10.0.7.13
10 | subnet: 255.255.0.0
11 | DMVPN: {
12 | tunnel1: { ip: 192.168.1.1 }
13 | }
14 |
--------------------------------------------------------------------------------
/Data-Models/Network_Prefix/network.yml:
--------------------------------------------------------------------------------
1 | links:
2 | #
3 | # Core links
4 | #
5 | - prefix: 172.16.0.0/30
6 | S1: GigabitEthernet0/1
7 | S2: GigabitEthernet0/1
8 |
9 | #
10 | # Edge links
11 | #
12 | - S1:
13 | Vlan101: 192.168.1.1/24
14 | - S2:
15 | Vlan101: 192.168.2.1/24
16 |
17 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/report-text.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - file: path="{{results}}/uptime.log" state=touch
3 | run_once: true
4 | - lineinfile:
5 | dest: "{{results}}/uptime.log"
6 | regexp: "{{inventory_hostname}}"
7 | line: "{{'%-20s %8s'|format(inventory_hostname,ansible_sysuptime)}}"
8 |
--------------------------------------------------------------------------------
/DMVPN/enable_scp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable SCP on Cisco IOS
3 | #
4 | ---
5 | - hosts: all
6 | tasks:
7 | - name: Enable SCP server on Cisco IOS devices
8 | ios_config:
9 | lines:
10 | - ip scp server enable
11 | - alias exec replace configure replace
12 | authorize: yes
13 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/hosts:
--------------------------------------------------------------------------------
1 | E1 ansible_host=172.16.1.110
2 | E2 ansible_host=172.16.1.111
3 | PE1 ansible_host=172.16.1.112
4 | E3 ansible_host=172.16.1.120
5 | E4 ansible_host=172.16.1.121
6 | PE2 ansible_host=172.16.1.122
7 |
8 | [all:vars]
9 | ansible_user=cisco
10 | ansible_ssh_pass=cisco
11 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/report/text.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - file: path="{{results}}/uptime.log" state=touch
3 | changed_when: false
4 | - lineinfile:
5 | dest: "{{results}}/uptime.log"
6 | regexp: "\\A{{inventory_hostname}} "
7 | line: "{{'%-20s %8s'|format(inventory_hostname,ansible_sysuptime)}}"
8 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/format.j2:
--------------------------------------------------------------------------------
1 | {% macro ifname(name) %}{{
2 | name.replace('GigabitEthernet','Gi')
3 | }}{% endmacro %}
4 | {% macro shortname(name) %}{{
5 | name.partition('.')[0]
6 | }}{% endmacro %}
7 | {% macro hostname(name) %}{{
8 | name.partition('.')[0] if no_domain is defined else name
9 | }}{% endmacro %}
--------------------------------------------------------------------------------
/OSPF-Deployment/tools/log_changes_init.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Create directory for change logging
3 | #
4 | ---
5 | - name: Create configuration changes directory
6 | file: path={{configs}}/node.changes state=directory
7 | delegate_to: localhost
8 | run_once: true
9 | check_mode: no
10 | changed_when: no
11 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Collect SNMP Facts from devices. Save them to YAML files
3 | hosts: all
4 | vars:
5 | - results: "{{inventory_dir}}/results"
6 | tasks:
7 | - include: "uptime/read-{{src|default('device')}}.yml"
8 | - include: "uptime/report-{{dst|default('text')}}.yml"
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/report-text-j2.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This module creates uptime.log text file from a Jinja2 template based on information collected with snmp_facts
3 | #
4 | ---
5 | - template: src="uptime-text.j2" dest="{{results}}/uptime.log"
6 | vars:
7 | allhosts: "{{hostvars}}"
8 | run_once: true
9 |
--------------------------------------------------------------------------------
/DMVPN/roles/base/templates/99-common.j2:
--------------------------------------------------------------------------------
1 | !
2 | no ip http server
3 | no ip http secure-server
4 | ip scp server enable
5 | !
6 | line con 0
7 | password {{ansible_ssh_pass}}
8 | line aux 0
9 | line vty 0 4
10 | exec-timeout 720 0
11 | login authentication default
12 | transport input telnet ssh
13 | !
14 | end
--------------------------------------------------------------------------------
/Data-Models/Network_Dict/network.yml:
--------------------------------------------------------------------------------
1 | links:
2 | #
3 | # Core links
4 | #
5 | - S1:
6 | GigabitEthernet0/1: 172.16.0.1/30
7 | S2:
8 | GigabitEthernet0/1: 172.16.0.2/30
9 |
10 | #
11 | # Edge links
12 | #
13 | - S1:
14 | Vlan101: 192.168.1.1/24
15 | - S2:
16 | Vlan101: 192.168.2.1/24
17 |
18 |
--------------------------------------------------------------------------------
/Git-to-Candidate/banners.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable SCP on Cisco IOS
3 | #
4 | ---
5 | - hosts: all
6 | tasks:
7 | - name: Fix banners that trip IOS config replace
8 | ios_config:
9 | lines:
10 | - no banner incoming
11 | - no banner login
12 | - no banner exec
13 | match: none
14 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/config/enable-snmp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable SNMP on Cisco IOS devices
3 | #
4 | ---
5 | - name: Enable SNMP on Cisco IOS devices
6 | hosts: all
7 | tasks:
8 | - ios_config:
9 | lines:
10 | - snmp-server community cisco RO
11 | - snmp-server contact admin@lab.local
12 |
--------------------------------------------------------------------------------
/Data-Models/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./hosts
3 | gathering=explicit
4 | transport=local
5 | retry_files_enabled=false
6 | stdout_callback=yaml
7 |
8 | # Change the next line to match napalm-ansible installation on your system
9 | library=/usr/local/lib/python2.7/dist-packages/napalm_ansible
10 |
--------------------------------------------------------------------------------
/AWS/vpc-facts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | name: Get VPC facts
4 | vars_files:
5 | - params.yml
6 | tasks:
7 | - ec2_vpc_net_facts:
8 | region: "{{region}}"
9 | register: ec2_vpc_facts
10 | - set_fact:
11 | default_vpc_id: "{{ ec2_vpc_facts|json_query('vpcs[?is_default].id')|first }}"
12 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/read/facts.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Reads SNMP and vendor-specific facts from a networking device
3 | #
4 | # Include it in an Ansible play
5 | #
6 | ---
7 | - ios_facts:
8 | when: ansible_os == 'ios'
9 | - nxos_facts:
10 | when: ansible_os == 'nxos'
11 | - eos_facts:
12 | when: ansible_os == 'eos'
13 |
--------------------------------------------------------------------------------
/AWS/security-groups.yml:
--------------------------------------------------------------------------------
1 | - ec2_group:
2 | name: ssh
3 | description: SSH access to EC2 instances
4 | rules:
5 | - proto: tcp
6 | ports: [ 22 ]
7 | cidr_ip: 0.0.0.0/0
8 | - proto: tcp
9 | ports: [ 80 ]
10 | cidr_ip: 0.0.0.0/0
11 | rules_egress:
12 | region: "{{region}}"
13 |
--------------------------------------------------------------------------------
/DMVPN/libvirt/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | gathering=explicit
3 | transport=network_cli
4 | retry_files_enabled=false
5 | roles_path=.
6 | forks=10
7 | interpreter_python=auto_silent
8 |
9 | [persistent_connection]
10 | command_timeout=60
11 |
12 | [paramiko_connection]
13 | host_key_auto_add=no
14 | host_key_checking=no
15 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/hosts:
--------------------------------------------------------------------------------
1 | E1 ansible_host=172.16.1.110
2 | E2 ansible_host=172.16.1.111
3 | PE1 ansible_host=172.16.1.112
4 | E3 ansible_host=172.16.1.120
5 | E4 ansible_host=172.16.1.121
6 | PE2 ansible_host=172.16.1.122
7 |
8 | [all:vars]
9 | ansible_user=cisco
10 | ansible_ssh_pass=cisco
11 | ansible_os=ios
12 |
--------------------------------------------------------------------------------
/Summary-Reports/hosts-file/cleanup.yml:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env ansible-playbook
2 | #
3 | ---
4 | - hosts: all
5 | name: Cleanup /etc/hosts
6 | tasks:
7 | - blockinfile:
8 | path: /etc/hosts
9 | marker: ""
10 | delegate_to: localhost
11 | run_once: true
12 | become: yes
--------------------------------------------------------------------------------
/3-router-setup/setup.sh:
--------------------------------------------------------------------------------
1 | #
2 | # shell script to set environment variables
3 | #
4 | echo "Will use ansible.cfg from `pwd`"
5 | export ANSIBLE_CONFIG=`pwd`/ansible.cfg
6 | echo "Ansible inventory set to `pwd`/hosts.yml"
7 | export ANSIBLE_INVENTORY=`pwd`/hosts.yml
8 | echo "Output set to /tmp/output"
9 | export OUTPUT=/tmp/output
10 |
--------------------------------------------------------------------------------
/6-router-setup/libvirt/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | gathering=explicit
3 | transport=network_cli
4 | retry_files_enabled=false
5 | roles_path=.
6 | forks=10
7 | interpreter_python=auto_silent
8 |
9 | [persistent_connection]
10 | command_timeout=60
11 |
12 | [paramiko_connection]
13 | host_key_auto_add=no
14 | host_key_checking=no
15 |
--------------------------------------------------------------------------------
/6-router-setup/setup.sh:
--------------------------------------------------------------------------------
1 | #
2 | # shell script to set environment variables
3 | #
4 | echo "Will use ansible.cfg from `pwd`"
5 | export ANSIBLE_CONFIG=`pwd`/ansible.cfg
6 | echo "Ansible inventory set to `pwd`/hosts.yml"
7 | export ANSIBLE_INVENTORY=`pwd`/hosts.yml
8 | echo "Output set to /tmp/output"
9 | export OUTPUT=/tmp/output
10 |
--------------------------------------------------------------------------------
/Description-to-Links/config-enable-snmp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This playbook enable SNMP on all Cisco IOS routers in the inventory
3 | #
4 | ---
5 | - hosts: all
6 | tasks:
7 | - name: "Configure SNMP on IOS devices"
8 | ios_config:
9 | lines:
10 | - "snmp-server community {{snmp_community}} RO"
11 | when: os == 'ios'
12 |
--------------------------------------------------------------------------------
/6-router-setup/setup-libvirt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export ANSIBLE_CONFIG=$(pwd)/libvirt/ansible.cfg
3 | export ANSIBLE_INVENTORY=$(pwd)/libvirt/hosts-libvirt.yml
4 | echo "Ansible setup for remote libvirt environment"
5 | echo "============================================"
6 | echo "inventory: $ANSIBLE_INVENTORY"
7 | echo "config: $ANSIBLE_CONFIG"
8 |
--------------------------------------------------------------------------------
/Description-to-Fabric/hosts:
--------------------------------------------------------------------------------
1 | [AS64500]
2 | E1 ip=172.16.1.110
3 | E2 ip=172.16.1.111
4 | PE1 ip=172.16.1.112
5 |
6 | [AS64501]
7 | E3 ip=172.16.1.120
8 | E4 ip=172.16.1.121
9 | PE2 ip=172.16.1.122
10 |
11 | [all:vars]
12 | ansible_user=cisco
13 | ansible_connection=local
14 | ansible_ssh_pass=cisco
15 | snmp_community=cisco
16 | os=ios
--------------------------------------------------------------------------------
/Sample-Compliance-Check/tests/snmp-server.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Check whether SNMP community is configured on devices
3 | #
4 | ---
5 | - name: "Check: {{snmp_server}} is not configured as SNMP trap host"
6 | ios_command:
7 | commands: "show snmp host"
8 | register: result
9 | failed_when: "not('host: '~snmp_server in result.stdout[0])"
10 |
--------------------------------------------------------------------------------
/DMVPN/setup-libvirt.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export ANSIBLE_CONFIG=$(pwd)/libvirt/ansible.cfg
3 | export ANSIBLE_INVENTORY=$(pwd)/hosts,$(pwd)/libvirt/hosts-libvirt.yml
4 | echo "Ansible setup for remote libvirt environment"
5 | echo "============================================"
6 | echo "inventory: $ANSIBLE_INVENTORY"
7 | echo "config: $ANSIBLE_CONFIG"
8 |
--------------------------------------------------------------------------------
/DHCP-Pools/pools.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Sample host pools
3 | #
4 | ---
5 | hostPools:
6 | h1.example.com: { id: ff6d.4040.4142.4344.4546.47, ip: 192.168.200.194 }
7 | h2.example.com: { id: 0100.aa00.1111.22, ip: 192.168.200.195 }
8 | h3.example.com: { id: 0100.aa00.e9fb.b6, ip: 192.168.200.196 }
9 | extra.example.com: { id: 1234.abcd.ffff, ip: 10.0.0.1 }
--------------------------------------------------------------------------------
/Sample-Compliance-Check/tests/snmp-community.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Check whether SNMP community is configured on devices
3 | #
4 | ---
5 | - name: "Check: SNMP community {{snmp_community}} is not defined"
6 | ios_command:
7 | commands: "show snmp community"
8 | register: result
9 | failed_when: "not('name: '~snmp_community in result.stdout[0])"
10 |
--------------------------------------------------------------------------------
/Send-Message/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | library=/usr/local/lib/python2.7/dist-packages/napalm_ansible
3 | gathering=explicit
4 | retry_files_enabled=false
5 | transport=local
6 | force_color=1
7 | host_key_checking=False
8 |
9 | [paramiko_connection]
10 | look_for_keys=False
11 | host_key_auto_add=False
12 | record_host_keys=False
13 |
14 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/tests/test_remove_keys.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Test list handling plugins
3 | #
4 | # Run from parent directory with ansible-playbook -i tests tests/test_remove_keys.yml
5 | #
6 | ---
7 | - hosts: E1
8 | tasks:
9 | - debug:
10 | msg: >
11 | {{ napalm_bgp_neighbors.global.peers|
12 | remove_keys('uptime',recurse=true) }}
--------------------------------------------------------------------------------
/Sample-Compliance-Check/tests/syslog-server.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Check whether SYSLOG server is configured on devices
3 | #
4 | ---
5 | - name: "Check: {{syslog_server}} is not configured as syslog server"
6 | ios_command:
7 | commands: "show run | include logging host"
8 | register: result
9 | failed_when: |
10 | not(syslog_server in result.stdout[0])
11 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | [submodule "VLAN-Service"]
2 | path = VLAN-Service
3 | url = https://github.com/ipspace/VLAN-service.git
4 | [submodule "Routing-Deployment"]
5 | path = Routing-Deployment
6 | url = https://github.com/ipspace/MPLS-infrastructure
7 | [submodule "Private/ConfigRepo"]
8 | path = Private/ConfigRepo
9 | url = https://gitlab.com/ipspace/ConfigRepo
10 |
--------------------------------------------------------------------------------
/OSPF-Deployment/ios/interface-config.j2:
--------------------------------------------------------------------------------
1 | {% set node = nodes[inventory_hostname] %}
2 | hostname {{ inventory_hostname }}
3 | !
4 | lldp run
5 | !
6 | {% if node.links is defined %}
7 | {% for intf,data in node.links|dictsort %}
8 | !
9 | interface {{intf}}
10 | no shutdown
11 | ip address {{ data.ip }} 255.255.255.252
12 | {% endfor %}
13 | {% endif %}
14 |
--------------------------------------------------------------------------------
/Data-Models/Transformation/network.yml:
--------------------------------------------------------------------------------
1 | nodes:
2 | S1:
3 | bgp_as: 65001
4 | S2:
5 | bgp_as: 65002
6 |
7 | links:
8 | #
9 | # Core links
10 | #
11 | - prefix: 172.16.0.0/30
12 | S1: GigabitEthernet0/1
13 | S2: GigabitEthernet0/1
14 |
15 | #
16 | # Edge links
17 | #
18 | - S1:
19 | Vlan101: 192.168.1.1/24
20 | - S2:
21 | Vlan101: 192.168.2.1/24
22 |
23 |
--------------------------------------------------------------------------------
/Description-to-Links/links.j2:
--------------------------------------------------------------------------------
1 | {% for local in play_hosts %}
2 | {% for intf in hostvars[local].ansible_interfaces.values() if intf.description.find('to ') >= 0%}
3 | {% set remote = intf.description.partition('to ')[2] %}
4 | {% if remote and (remote > local or (not remote in play_hosts)) %}
5 | {{local}} <--> {{remote}}
6 | {% endif %}
7 | {% endfor %}
8 | {% endfor %}
9 |
--------------------------------------------------------------------------------
/OSPF-Deployment/model/fabric-to-hosts.j2:
--------------------------------------------------------------------------------
1 | #
2 | # Auto-generated inventory file
3 | #
4 | {% for node in nodes %}
5 | {{ node.name }} ansible_host={{ node.mgmt }} ansible_os={{node.os|default(common.os)
6 | }} ansible_user={{node.username|default(common.username)|default("missing")
7 | }} ansible_ssh_pass={{node.password|default(common.password)|default("missing")}}
8 | {% endfor %}
9 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime-data.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Collect IOS Facts from devices. Save them to files
3 | hosts: all
4 | vars:
5 | - results: results
6 | tasks:
7 | - file: path={{results}} state=directory
8 | run_once: true
9 | - ios_facts:
10 | register: result
11 | - copy: content="{{result.ansible_facts}}" dest={{results}}/{{inventory_hostname}}.json
--------------------------------------------------------------------------------
/AWS/spinup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_playbook: vpc-facts.yml
3 | tags: [ always ]
4 |
5 | - hosts: localhost
6 | vars_files:
7 | - params.yml
8 | - instances.yml
9 | tasks:
10 | - include_tasks: subnets.yml
11 | tags: [ routing ]
12 | - include_tasks: security-groups.yml
13 | tags: [ security ]
14 | - include_tasks: create-instances.yml
15 | tags: [ vm ]
16 |
--------------------------------------------------------------------------------
/DMVPN/host_vars/R1A.yml:
--------------------------------------------------------------------------------
1 | ---
2 | loopback: { ip: 10.0.1.3 }
3 | LAN:
4 | interface: 'GigabitEthernet0/1'
5 | ip: 172.16.10.1
6 | WAN:
7 | 0:
8 | interface: 'GigabitEthernet0/2'
9 | ip: 10.0.7.22
10 | subnet: 255.255.0.0
11 | # 1: { interface: 'FastEthernet1/0', ip: DHCP }
12 | DMVPN: {
13 | tunnel0: { ip: 192.168.0.3 },
14 | tunnel1: { ip: 192.168.1.3 }
15 | }
16 |
--------------------------------------------------------------------------------
/DMVPN/host_vars/R1B.yml:
--------------------------------------------------------------------------------
1 | ---
2 | loopback: { ip: 10.0.1.4 }
3 | LAN:
4 | interface: 'GigabitEthernet0/1'
5 | ip: 172.16.10.2
6 | WAN:
7 | 0:
8 | interface: 'GigabitEthernet0/2'
9 | ip: 10.0.7.26
10 | subnet: 255.255.0.0
11 | # 1: { interface: 'FastEthernet1/0', ip: DHCP }
12 | DMVPN: {
13 | tunnel0: { ip: 192.168.0.4 },
14 | tunnel1: { ip: 192.168.1.4 }
15 | }
16 |
--------------------------------------------------------------------------------
/DMVPN/host_vars/R3.yml:
--------------------------------------------------------------------------------
1 | ---
2 | loopback: { ip: 10.0.1.6 }
3 | LAN:
4 | interface: 'GigabitEthernet0/1'
5 | ip: 172.16.12.1
6 | WAN:
7 | 0:
8 | interface: 'GigabitEthernet0/2'
9 | ip: 10.0.7.5
10 | subnet: 255.255.0.0
11 | # 1: { interface: 'FastEthernet1/0', ip: DHCP }
12 | DMVPN: {
13 | tunnel0: { ip: 192.168.0.6 },
14 | tunnel1: { ip: 192.168.1.6 }
15 | }
16 |
--------------------------------------------------------------------------------
/DMVPN/host_vars/R2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | loopback: { ip: 10.0.1.5 }
3 | LAN:
4 | interface: 'GigabitEthernet0/1'
5 | ip: 172.16.11.1
6 | WAN:
7 | 0:
8 | interface: 'GigabitEthernet0/2'
9 | ip: 10.0.7.9
10 | subnet: 255.255.255.252
11 | # 1: { interface: 'FastEthernet1/0', ip: DHCP }
12 | DMVPN: {
13 | tunnel0: { ip: 192.168.0.5 },
14 | tunnel1: { ip: 192.168.1.5}
15 | }
16 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/getters/ios/ospf_interface.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Retrieve Cisco IOS OSPF neighbor information
3 | ---
4 | - ios_command:
5 | commands: [ show ip ospf interface ]
6 | register: ospf_printout
7 |
8 | - set_fact:
9 | state_ospf_interfaces: >
10 | {{ ospf_printout.stdout[0] |
11 | parse_cli(filter_dir|default(playbook_dir)+"/parse_ospf_interfaces.filter") }}
--------------------------------------------------------------------------------
/Description-to-Links/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | #
3 | # Default username and SNMP community
4 | #
5 | # Unless you're using VIRL you should probably change them
6 | #
7 | ansible_user: cisco
8 | ansible_ssh_pass: cisco
9 | snmp_community: cisco
10 |
11 | #
12 | # Default device OS. Overwrite with group or host variables
13 | # or specify in the inventory file
14 | #
15 | os: ios
16 |
--------------------------------------------------------------------------------
/Git-to-Candidate/hosts:
--------------------------------------------------------------------------------
1 | E1 ansible_host=172.16.1.110
2 | E2 ansible_host=172.16.1.111
3 | PE1 ansible_host=172.16.1.112
4 | E3 ansible_host=172.16.1.120
5 | E4 ansible_host=172.16.1.121
6 | PE2 ansible_host=172.16.1.122
7 |
8 | [all:vars]
9 | ansible_user=cisco
10 | ansible_ssh_pass=cisco
11 | repository=git@gitlab.com:ipspace/ConfigRepo.git
12 | branch=Candidate
13 | filesystem=flash:
14 |
--------------------------------------------------------------------------------
/Data-Models/Transformation/create-configs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create directory structure
3 | hosts: localhost
4 | tasks:
5 | - file:
6 | name: configs
7 | state: directory
8 |
9 | - name: Create device configurations
10 | hosts: all
11 | tasks:
12 | - template:
13 | src: ios-config.j2
14 | dest: "configs/{{inventory_hostname}}.cfg"
15 | delegate_to: localhost
16 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/report/template.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Generate "create report from a template" module
3 | #
4 | # Variables
5 | # results - output directory
6 | # fmt - desired template
7 | # dest - destination file name
8 | #
9 | ---
10 | - template:
11 | src: "template/{{fmt|default('text')}}.j2"
12 | dest: "{{results}}/{{output|default('uptime.log')}}"
13 | run_once: true
14 |
--------------------------------------------------------------------------------
/6-router-setup/hosts.yml:
--------------------------------------------------------------------------------
1 | all:
2 | hosts:
3 | E1: { ansible_host: 172.16.1.110 }
4 | E2: { ansible_host: 172.16.1.111 }
5 | PE1: { ansible_host: 172.16.1.112 }
6 | E3: { ansible_host: 172.16.1.120 }
7 | E4: { ansible_host: 172.16.1.121 }
8 | PE2: { ansible_host: 172.16.1.122 }
9 | vars:
10 | ansible_user: cisco
11 | ansible_ssh_pass: cisco
12 | ansible_os: ios
13 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime-data-yaml.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Collect IOS facts from devices. Save them to YAML files
3 | hosts: all
4 | vars:
5 | - results: results
6 | tasks:
7 | - file: path={{results}} state=directory
8 | run_once: true
9 | - ios_facts:
10 | register: result
11 | - copy: content="{{result.ansible_facts|to_nice_yaml(indent=4)}}" dest={{results}}/{{inventory_hostname}}.yml
--------------------------------------------------------------------------------
/Description-to-Links/graph.j2:
--------------------------------------------------------------------------------
1 | graph network {
2 | {% for local in play_hosts %}
3 | {% for intf in hostvars[local].ansible_interfaces.values() if intf.description.find('to ') >= 0%}
4 | {% set remote = intf.description.partition('to ')[2] %}
5 | {% if remote and (remote > local or (not remote in play_hosts)) %}
6 | "{{local}}" -- "{{remote}}"
7 | {% endif %}
8 | {% endfor %}
9 | {% endfor %}
10 | }
--------------------------------------------------------------------------------
/LLDP-to-Graph/links.j2:
--------------------------------------------------------------------------------
1 | {% from 'format.j2' import hostname,ifname with context %}
2 | {% for local in play_hosts %}
3 | {% for intf,lldp in hostvars[local].napalm_lldp_neighbors|dictsort if lldp|length > 0 %}
4 | {% for n in lldp if local < n.hostname %}
5 | {{ hostname(hostvars[local].napalm_fqdn)}}:{{ifname(intf)}} -- {{hostname(n.hostname)}}:{{ifname(n.port)}}
6 | {% endfor %}
7 | {% endfor %}
8 | {% endfor %}
9 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/html.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | Device uptime report
4 |
5 |
6 | Device uptime report
7 |
8 | | Device | Uptime in seconds |
9 | {% for host,facts in allhosts|dictsort %}
10 | | {{facts.inventory_hostname}} | {{facts.ansible_sysuptime}} |
11 | {% endfor %}
12 |
13 |
14 |
--------------------------------------------------------------------------------
/Description-to-Links/extract-links.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - name: Collect SNMP facts
5 | snmp_facts:
6 | host: "{{ansible_host|default(inventory_hostname)}}"
7 | version: v2
8 | community: cisco
9 | tags: [ facts ]
10 |
11 | - name: Generate links
12 | template: src={{template|default('links.j2')}} dest=./{{output|default('links.txt')}}
13 | run_once: true
14 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/do-checks.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | set -e
4 | echo "Performing compliance checks..."
5 | ANSIBLE_STDOUT_CALLBACK=json \
6 | ansible-playbook run-checks.yml >/tmp/$$.json
7 | echo "... done"
8 | echo
9 | echo "Generating reports from $$.json"
10 | ANSIBLE_STDOUT_CALLBACK=dense \
11 | ansible-playbook report-results.yml \
12 | -e input=/tmp/$$.json
13 | echo
14 | echo "Cleanup"
15 | rm /tmp/$$.json
--------------------------------------------------------------------------------
/Summary-Reports/framework/template/html.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | Device uptime report
4 |
5 |
6 | Device uptime report
7 |
8 | | Device | Uptime in miliseconds |
9 | {% for host,facts in hostvars|dictsort %}
10 | | {{facts.inventory_hostname}} | {{facts.ansible_sysuptime}} |
11 | {% endfor %}
12 |
13 |
14 |
--------------------------------------------------------------------------------
/DMVPN/group_vars/all.yml:
--------------------------------------------------------------------------------
1 | ---
2 | log_dir: logs
3 | build_dir: compiled
4 | config_dir: configs
5 | domain_name: lab.ipspace.net
6 | routing: ibgp
7 |
8 | as: 65000
9 | tunnel:
10 | mtu: 1400
11 | mss: 1360
12 | tunnel0:
13 | auth: WanExamp
14 | hub_router: C1
15 | nhrp_id: 12345
16 | gre: 12345
17 | tunnel1:
18 | auth: WanExamp
19 | hub_router: C2
20 | nhrp_id: 12346
21 | gre: 12346
22 |
--------------------------------------------------------------------------------
/AWS/instances.j2:
--------------------------------------------------------------------------------
1 | {{ "%-10s %-30s %15s"|format('tag','Private DNS','Private IP') }}
2 | =================================================================
3 | {% for r in Reservations %}
4 | {% for i in r.Instances %}
5 | {{ "%-10s"|format(i.Tags[0].Value|default('')) }}{{ " %-30s %15s"|format(i.PrivateDnsName | default (''),i.PrivateIpAddress)
6 | }}{{ " %15s"|format(i.PublicIpAddress|default('')) }}
7 | {% endfor %}
8 | {% endfor %}
9 |
--------------------------------------------------------------------------------
/Config-to-Git/tests/clean.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Test clean-config plugin
3 | #
4 | ---
5 | - name: Test plugin
6 | hosts: localhost
7 | vars:
8 | config: "{{ lookup('file','show_run.original') }}"
9 | patterns:
10 | - "\\A[^!]*"
11 | - "ntp clock-period..."
12 | - "! Last configuration change..."
13 | tasks:
14 | - copy:
15 | content: "{{ config|clean_config(patterns) }}"
16 | dest: "./show_run.expected"
--------------------------------------------------------------------------------
/Config-to-Git/tests/show_run.original:
--------------------------------------------------------------------------------
1 |
2 | Building configuration...
3 |
4 |
5 | Current configuration : 2331 bytes
6 | !
7 | ! Last configuration change at 05:37:16 UTC Mon Apr 9 2018 by cisco
8 | !
9 | version 15.6
10 | service timestamps debug datetime msec
11 | service timestamps log datetime msec
12 | no service password-encryption
13 | !
14 | hostname R1
15 | !
16 | ntp clock-period 1234567
17 | !
18 | end
19 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/simple-graph.j2:
--------------------------------------------------------------------------------
1 | graph network {
2 | {% for local in play_hosts %}
3 | "{{local}}" [shape=box]
4 | {% endfor %}
5 | {% for local in play_hosts %}
6 | {% for ifname,lldp in hostvars[local].napalm_lldp_neighbors|dictsort if lldp|length > 0 %}
7 | {% for n in lldp if local < n.hostname or n.hostname not in play_hosts %}
8 | "{{local}}" -- "{{n.hostname}}";
9 | {% endfor %}
10 | {% endfor %}
11 | {% endfor %}
12 | }
13 |
--------------------------------------------------------------------------------
/OSPF-Deployment/common/config_ospf.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Deploy OSPF routing in a WAN fabric
3 | #
4 | ---
5 | - name: Create configuration directory
6 | local_action: file path={{configs}} state=directory
7 | run_once: true
8 | check_mode: no
9 | changed_when: no
10 |
11 | - name: Create configurations
12 | template: src=../{{ansible_network_os}}/ospf-config.j2 dest={{configs}}/{{inventory_hostname}}.ospf.cfg
13 | check_mode: no
14 |
--------------------------------------------------------------------------------
/OSPF-Deployment/ios/deploy_ospf.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Deploy OSPF routing in a WAN fabric
3 | #
4 | ---
5 | - include_tasks: ../tools/log_changes_init.yml
6 |
7 | - name: Deploy configurations
8 | ios_config:
9 | src: "{{configs}}/{{inventory_hostname}}.ospf.cfg"
10 | register: changes
11 | tags: [ print_action ]
12 |
13 | - include_tasks: ../tools/log_changes.yml
14 | args:
15 | apply:
16 | vars:
17 | component: OSPF
18 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime/report-template.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Generate "create report from a template" module
3 | #
4 | # Variables
5 | # results - output directory
6 | # fmt - desired template
7 | # dest - destination file name
8 | #
9 | ---
10 | - template:
11 | src: "{{fmt|default('uptime-text.j2')}}"
12 | dest: "{{results}}/{{output|default('uptime.log')}}"
13 | vars:
14 | allhosts: "{{hostvars}}"
15 | run_once: true
16 |
--------------------------------------------------------------------------------
/Git-to-Candidate/copy_candidate.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Copy files from Candidate configuration branch into devices
3 | #
4 | ---
5 | - hosts: all
6 | tasks:
7 | - name: Copy candidate configuration into routers
8 | local_action: >
9 | command /usr/bin/sshpass -p {{ansible_ssh_pass}}
10 | /usr/bin/scp Candidate/{{inventory_hostname}}.cfg
11 | {{ansible_user}}@{{ansible_host|default(inventory_hostname)}}:{{filesystem}}candidate.cfg
12 |
13 |
--------------------------------------------------------------------------------
/Send-Message/README.md:
--------------------------------------------------------------------------------
1 | # Send a complex command to Cisco IOS device
2 |
3 | The _message.yml_ playbook uses **send** Cisco IOS command to send messages to all other lines (VTY + CON) on the same device.
4 |
5 | The trick:
6 |
7 | * Figure out the whole sequence of characters that needs to be sent to the device (until you get the router prompt back)
8 | * Replace "return" with \n and control characters with \xCC (where _CC_ = control character ASCII code in hex)
9 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/getters/ios/ospf_neighbor.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Retrieve Cisco IOS OSPF neighbor information
3 | ---
4 | - ios_command:
5 | commands: [ show ip ospf neighbor ]
6 | register: ospf_printout
7 |
8 | - set_fact:
9 | parse_ospf_neighbors: >
10 | {{ ospf_printout.stdout[0] |
11 | parse_cli(filter_dir|default(playbook_dir)+"/parse_ospf_neighbor.filter") }}
12 | - set_fact: state_ospf_neighbors={{ parse_ospf_neighbors.neighbors }}
13 |
--------------------------------------------------------------------------------
/Description-to-Fabric/get-connectivity/remote-interface.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - set_fact:
3 | remote: "{{intf.description.replace('to ','')}}"
4 | - set_fact:
5 | remintf: |
6 | {% set riflist = hostvars[remote].ansible_interfaces|default({}) %}
7 | {% set local = "to "+inventory_hostname %}
8 | {% set remIfList = riflist.values()|selectattr('description','equalto',local)|list %}
9 | {{ remIfList[0]|default({}) }}
10 | - set_fact: link={}
11 |
--------------------------------------------------------------------------------
/OSPF-Deployment/lldp/napalm_lldp_facts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get interfaces and LLDP neighbors
3 | napalm_get_facts:
4 | hostname: "{{ansible_host|default(inventory_hostname)}}"
5 | username: "{{ansible_user}}"
6 | password: "{{ansible_ssh_pass}}"
7 | dev_os: "{{ansible_os}}"
8 | optional_args:
9 | port: "{{api_port|default(ansible_port)|default(22)}}"
10 | filter:
11 | - lldp_neighbors
12 | - interfaces
13 | tags: [ validate ]
--------------------------------------------------------------------------------
/OSPF-Deployment/common/config_interfaces.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Create interface IP configuration, change hostname, enable LLDP
3 | #
4 | ---
5 | - name: Create configuration directory
6 | local_action: file path={{configs}} state=directory
7 | run_once: true
8 | check_mode: no
9 | changed_when: no
10 |
11 | - name: Create configurations
12 | template: src=../{{ansible_network_os}}/interface-config.j2 dest={{configs}}/{{inventory_hostname}}.if.cfg
13 | check_mode: no
14 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/savers/yaml.yml:
--------------------------------------------------------------------------------
1 | # Snapshot: create a snapshot of inventory file and host facts in specified folder
2 | #
3 | # Variables:
4 | # - snapshot: directory (relative to inventory directory)
5 | #
6 | ---
7 | - name: Create snapshot directory
8 | file:
9 | path: "{{output}}"
10 | state: directory
11 | run_once: true
12 |
13 | - name: Create host variable files
14 | template:
15 | src: "yaml.j2"
16 | dest: "{{output}}/{{inventory_hostname}}.yml"
17 |
--------------------------------------------------------------------------------
/Description-to-Fabric/config-enable-snmp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This playbook enable SNMP on all Cisco IOS routers in the inventory
3 | #
4 | ---
5 | - hosts: all
6 | tasks:
7 | - name: "Configure SNMP on IOS devices"
8 | ios_config:
9 | username: "{{ansible_user}}"
10 | password: "{{ansible_password}}"
11 | host: "{{ip|default(inventory_hostname)}}"
12 | lines:
13 | - "snmp-server community {{snmp_community}} RO"
14 | when: "'{{os}}' == 'ios'"
15 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime-data-snmp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Collect SNMP Facts from devices. Save them to YAML files
3 | hosts: all
4 | vars:
5 | - results: results
6 | tasks:
7 | - file: path={{results}} state=directory
8 | run_once: true
9 | - snmp_facts:
10 | host: "{{ansible_host}}"
11 | version: v2
12 | community: cisco
13 | - ios_facts:
14 | - copy: content="{{hostvars[inventory_hostname]|to_nice_yaml(indent=4)}}" dest={{results}}/{{inventory_hostname}}.yml
--------------------------------------------------------------------------------
/Data-Models/Initial/config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | {% for intf in interfaces %}
3 | !
4 | interface {{ intf.name }}
5 | ip address {{ intf.ip|ipaddr('address') }} {{ intf.ip|ipaddr('netmask') }}
6 | {% endfor %}
7 | !
8 | router bgp {{ bgp_as }}
9 | {% for n in neighbors %}
10 | neighbor {{ n.ip }} remote-as {{ n.bgp_as }}
11 | {% endfor %}
12 | {% for intf in interfaces if 'Vlan' in intf.name %}
13 | network {{ intf.ip|ipaddr('network') }} {{ intf.ip|ipaddr('netmask') }}
14 | {% endfor %}
15 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/LLDP-test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - name: Get LLDP neighbors
5 | napalm_get_facts:
6 | hostname: "{{ansible_host|default(inventory_hostname)}}"
7 | username: "{{ansible_user}}"
8 | password: "{{ansible_ssh_pass}}"
9 | dev_os: "{{ansible_os}}"
10 | optional_args:
11 | port: "{{api_port|default(ansible_port)|default(22)}}"
12 | filter:
13 | - lldp_neighbors
14 |
15 | - debug: var=hostvars[inventory_hostname]
16 |
--------------------------------------------------------------------------------
/DMVPN/roles/routing/templates/30-ibgp-spoke.j2:
--------------------------------------------------------------------------------
1 | router bgp {{as}}
2 | bgp log-neighbor-changes
3 | redistribute ospf 1
4 | {% for ifnum,intf in DMVPN|default({})|dictsort %}
5 | {% set peerip = hostvars[tunnel[ifnum].hub_router].DMVPN[ifnum].ip %}
6 | neighbor {{peerip}} remote-as {{as}}
7 | neighbor {{peerip}} description Hub {{tunnel[ifnum].hub_router}}
8 | neighbor {{peerip}} update-source {{ifnum|capitalize}}
9 | {% endfor %}
10 | !
11 | router ospf 1
12 | default-information originate
13 | !
14 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/hosts:
--------------------------------------------------------------------------------
1 | spine-1 ansible_host=10.0.2.2 ansible_port=20001 api_port=21001 ansible_os=eos ansible_user=admin ansible_ssh_pass=admin
2 | spine-2 ansible_host=10.0.2.2 ansible_port=20002 api_port=21002 ansible_os=eos ansible_user=admin ansible_ssh_pass=admin
3 | leaf-1 ansible_host=10.0.2.2 ansible_port=20003 api_port=21003 ansible_os=eos ansible_user=admin ansible_ssh_pass=admin
4 | leaf-2 ansible_host=10.0.2.2 ansible_port=20004 api_port=21004 ansible_os=eos ansible_user=admin ansible_ssh_pass=admin
5 |
--------------------------------------------------------------------------------
/OSPF-Deployment/ios/deploy_interfaces.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable interfaces and LLDP, deploy IP addressing in a WAN fabric
3 | #
4 | ---
5 | - include_tasks: ../tools/log_changes_init.yml
6 |
7 | - name: Deploy configurations
8 | ios_config:
9 | src: "{{configs}}/{{inventory_hostname}}.if.cfg"
10 | save_when: changed
11 | register: changes
12 | tags: [ print_action ]
13 |
14 | - include_tasks: ../tools/log_changes.yml
15 | args:
16 | apply:
17 | vars:
18 | component: interface
19 |
--------------------------------------------------------------------------------
/DHCP-Pools/cleanup.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This playbook enable SNMP on all Cisco IOS routers in the inventory
3 | #
4 | ---
5 | - hosts: all
6 | name: Check DHCP pools configured on the device
7 | vars_files:
8 | - "{{ lookup('env','EXTRA_POOLS')|default('pools.yml') }}"
9 | tasks:
10 | - include: include/getPools.yml
11 | - ios_config:
12 | username: "{{ansible_user}}"
13 | password: "{{ansible_ssh_pass}}"
14 | host: "{{ip|default(inventory_hostname)}}"
15 | src: cleanup.j2
--------------------------------------------------------------------------------
/Data-Models/Transformation/ios-config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | {% for intf in interfaces %}
3 | !
4 | interface {{ intf.name }}
5 | ip address {{ intf.ip|ipaddr('address') }} {{ intf.ip|ipaddr('netmask') }}
6 | {% endfor %}
7 | !
8 | router bgp {{ bgp_as }}
9 | {% for n in neighbors %}
10 | neighbor {{ n.ip }} remote-as {{ n.bgp_as }}
11 | {% endfor %}
12 | {% for intf in interfaces if 'Vlan' in intf.name %}
13 | network {{ intf.ip|ipaddr('network') }} {{ intf.ip|ipaddr('netmask') }}
14 | {% endfor %}
15 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime-text-fromvars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Read device data from saved .YML files. Generate text report.
3 | hosts: all
4 | vars:
5 | - results: results
6 | tasks:
7 | - include_vars: "{{results}}/{{inventory_hostname}}.yml"
8 | - file: path="{{results}}/uptime.log" state=touch
9 | run_once: true
10 | - lineinfile:
11 | dest: "{{results}}/uptime.log"
12 | regexp: "{{inventory_hostname}}"
13 | line: "{{'%-20s %8s'|format(inventory_hostname,ansible_sysuptime)}}"
--------------------------------------------------------------------------------
/LLDP-to-Graph/links-fix.j2:
--------------------------------------------------------------------------------
1 | {% from 'format.j2' import hostname,ifname with context %}
2 | {% for local in play_hosts %}
3 | {% for intf,lldp in hostvars[local].napalm_lldp_neighbors|dictsort if lldp|length > 0 %}
4 | {% for n in lldp
5 | if local < n.hostname or
6 | (n.hostname not in play_hosts and shortname(n.hostname) not in play_hosts) %}
7 | {{ hostname(hostvars[local].napalm_fqdn)}}:{{ifname(intf)}} -- {{hostname(n.hostname)}}:{{ifname(n.port)}}
8 | {% endfor %}
9 | {% endfor %}
10 | {% endfor %}
11 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/reports/json-simple.j2:
--------------------------------------------------------------------------------
1 | {% for host in groups ['all']|sort %}
2 | {% for t_result in results
3 | if t_result.hosts[host] is defined and (
4 | t_result.hosts[host].failed_when_result|default(false)
5 | or t_result.hosts[host].failed|default(false)) %}
6 | {% if loop.first %}
7 | {{ host }}
8 | ========================
9 | {% endif %}
10 | -{{ t_result.task.name.replace("Check:","") }}
11 | {% if loop.last %}
12 |
13 | {% endif %}
14 | {% endfor %}
15 | {% endfor %}
16 |
--------------------------------------------------------------------------------
/AWS/cleanup.yml:
--------------------------------------------------------------------------------
1 | - hosts: localhost
2 | connection: local
3 | gather_facts: False
4 | vars_files:
5 | - params.yml
6 | - instances.yml
7 | tasks:
8 | - ec2_instance_facts:
9 | region: "{{region}}"
10 | register: ec2_facts
11 |
12 | - ec2:
13 | instance_id: "{{ item.instance_id }}"
14 | state: absent
15 | region: "{{ region }}"
16 | with_items: "{{ ec2_facts.instances }}"
17 | # private_ip: "{{ item.ip }}"
18 | # region: "{{region}}"
19 | # zone: "{{zone}}"
20 | # wait: yes
21 |
--------------------------------------------------------------------------------
/DMVPN/hosts:
--------------------------------------------------------------------------------
1 | [hubs]
2 | C1 ansible_host=172.16.1.110
3 | C2 ansible_host=172.16.1.111
4 |
5 | [spokes:children]
6 | spoke_SOHO
7 | spoke_medium
8 | spoke_redundant
9 |
10 | [spoke_SOHO]
11 | R2 ansible_host=172.16.1.122
12 |
13 | [spoke_medium]
14 | R3 ansible_host=172.16.1.123
15 |
16 | [spoke_redundant]
17 | R1A ansible_host=172.16.1.120
18 | R1B ansible_host=172.16.1.121
19 |
20 | [all:vars]
21 | ansible_user=cisco
22 | ansible_ssh_pass=cisco
23 | ansible_network_os=ios
24 | ansible_become_method=enable
25 | ansible_become_password=cisco
26 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/deployConfig.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable or disable logging of commands executed on Cisco IOS devices
3 | #
4 | ---
5 | - name: Deploy a configuration snippet on Cisco IOS routers
6 | hosts: all
7 | tasks:
8 | - assert:
9 | that: src is defined
10 | msg: "Must specify configuration snippet filename with --extra-vars"
11 | run_once: 1
12 |
13 | - ios_config:
14 | src: "{{src}}"
15 | host: "{{ansible_host}}"
16 | username: "{{ansible_user}}"
17 | password: "{{ansible_ssh_pass}}"
18 |
--------------------------------------------------------------------------------
/Data-Models/BGP_AS/config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | {% for intf in interfaces %}
3 | !
4 | interface {{ intf.name }}
5 | ip address {{ intf.ip|ipaddr('address') }} {{ intf.ip|ipaddr('netmask') }}
6 | {% endfor %}
7 | !
8 | router bgp {{ bgp_as }}
9 | {% for n in neighbors %}
10 | neighbor {{ n.ip }} remote-as {{ hostvars[n.name].bgp_as }}
11 | neighbor {{ n.ip }} description {{ n.name }}
12 | {% endfor %}
13 | {% for intf in interfaces if 'Vlan' in intf.name %}
14 | network {{ intf.ip|ipaddr('network') }} {{ intf.ip|ipaddr('netmask') }}
15 | {% endfor %}
16 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/graph-simple.j2:
--------------------------------------------------------------------------------
1 | {% from 'format.j2' import hostname,shortname,ifname with context %}
2 | graph network {
3 | {% for local in play_hosts %}
4 | {% for intf,lldp in hostvars[local].napalm_lldp_neighbors|dictsort if lldp|length > 0 %}
5 | {% for n in lldp
6 | if local < n.hostname or
7 | (n.hostname not in play_hosts and shortname(n.hostname) not in play_hosts) %}
8 | {% set lname = hostvars[local].napalm_fqdn %}
9 | "{{hostname(lname)}}" -- "{{hostname(n.hostname)}}";
10 | {% endfor %}
11 | {% endfor %}
12 | {% endfor %}
13 | }
14 |
--------------------------------------------------------------------------------
/OSPF-Deployment/ios/ospf-config-from-fabric.j2:
--------------------------------------------------------------------------------
1 | {% macro internal_link(name) %}
2 | interface {{ name }}
3 | ip ospf 1 area 0
4 | {% endmacro %}
5 |
6 | {% set node = nodes|selectattr('name','equalto',inventory_hostname)|first %}
7 |
8 | default router ospf 1
9 | router ospf 1
10 | router-id {{ node.rid }}
11 |
12 | {% for link in fabric %}
13 | {% if inventory_hostname == link.left %}
14 | {{ internal_link(link.left_port) }}
15 | {% elif inventory_hostname == link.right %}
16 | {{ internal_link(link.right_port) }}
17 | {% endif %}
18 | {% endfor %}
--------------------------------------------------------------------------------
/6-router-setup/README.md:
--------------------------------------------------------------------------------
1 | # Ansible configuration files used with Inter-AS VIRL topology
2 |
3 | This directory contains Ansible inventory file (in YAML format) and Ansible configuration file matching the [Inter-AS VIRL topology](https://github.com/ipspace/VIRL) heavily used in [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) and [Ansibel for Networking Engineers](http://www.ipspace.net/Ansible_for_Networking_Engineers) online courses.
4 |
5 | To use these files with your playbooks run `source setup.sh` which sets the Ansible environment variables.
--------------------------------------------------------------------------------
/DHCP-Pools/check.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This playbook checks for extra DHCP pools on managed routers and fails if there are any
3 | #
4 | # You could change the ASSERT action into something more appropriate (example: send an email)
5 | #
6 | ---
7 | - hosts: all
8 | name: Check DHCP pools configured on the device
9 | vars_files:
10 | - "{{ lookup('env','EXTRA_POOLS')|default('pools.yml') }}"
11 | tasks:
12 | - include: include/getPools.yml
13 | - assert:
14 | that: "extraPools|length == 0"
15 | msg: "Extra DHCP pools have to be removed first: {{extraPools|join(', ')}}"
16 |
--------------------------------------------------------------------------------
/DMVPN/roles/base/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Baseline configuration - start
3 | template: src=00-common.j2 dest={{ build_dir }}/{{inventory_hostname}}/00-common.conf
4 | check_mode: no
5 | changed_when: false
6 | - name: Baseline configuration - IP interfaces
7 | template: src=10-interfaces.j2 dest={{ build_dir }}/{{inventory_hostname}}/10-interfaces.conf
8 | check_mode: no
9 | changed_when: false
10 | - name: Baseline configuration - end
11 | template: src=99-common.j2 dest={{ build_dir }}/{{inventory_hostname}}/99-common.conf
12 | check_mode: no
13 | changed_when: false
14 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/getters/ios/parse_ospf_neighbor.filter:
--------------------------------------------------------------------------------
1 | # CLI filter for "show ip ospf neighbor" Cisco IOS printout
2 | #
3 | ---
4 | vars:
5 | neighbor:
6 | key: "{{ item.id }}"
7 | values:
8 | state: "{{ item.state }}"
9 | up: "{{ item.state == 'FULL' }}"
10 | address: "{{ item.address}}"
11 | interface: "{{ item.interface }}"
12 |
13 | keys:
14 | neighbors:
15 | type: list
16 | value: "{{ neighbor }}"
17 | items: "^(?P\\d+\\.\\d+\\.\\d+\\.\\d+)[ 0-9]+(?P[A-Z0-9-]+).*?(?P\\d+\\.\\d+\\.\\d+\\.\\d+)\\s+(?P\\S+)"
18 |
--------------------------------------------------------------------------------
/Sample-Summary-Report/README.md:
--------------------------------------------------------------------------------
1 | # Create a simple device uptime report
2 |
3 | This directory contains the source code for the _Creating Reports_ video in the [Creating Summary and Compliance Reports](https://my.ipspace.net/bin/list?id=NetAutSol&module=2#M2S2) part of [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) online course.
4 |
5 | The sources have been modified to work with Ansible 2.4:
6 |
7 | * Removed `provider` parameter from `ios_facts` module
8 | * Removed `group_vars` directory (no longer needed because we don't use the *provider* parameter)
--------------------------------------------------------------------------------
/OSPF-Deployment/ios/ospf-config.j2:
--------------------------------------------------------------------------------
1 | {% set node = nodes[inventory_hostname] %}
2 | {% if node.links is defined %}
3 | router ospf 1
4 | router-id {{ node.rid }}
5 | !
6 | interface Loopback0
7 | ip address {{ node.rid }} 255.255.255.255
8 | ip ospf 1 area 0
9 | !
10 | {% for intf,data in node.links|dictsort %}
11 | interface {{intf}}
12 | ip ospf 1 area 0
13 | ip ospf hello 3
14 | ip ospf dead 10
15 | ip ospf network point-to-multipoint
16 | {% if data.cost is defined %}
17 | ip ospf cost {{data.cost}}
18 | {% endif %}
19 | !
20 | {% endfor %}
21 | {% endif %}
22 |
--------------------------------------------------------------------------------
/Data-Models/Transformation/transform-data.yml:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env ansible-playbook
2 | #
3 | ---
4 | - name: Transform network data model into device data model(s)
5 | hosts: localhost
6 | tasks:
7 | - name: Read network data model
8 | include_vars: network.yml
9 | - name: Create per-device data model(s)
10 | template:
11 | src: device-data.j2
12 | dest: host_vars/{{ hostname }}.yml
13 | loop: "{{ nodes.keys() }}"
14 | loop_control:
15 | loop_var: hostname
16 | - name: Create Ansible inventory
17 | template:
18 | src: inventory.j2
19 | dest: hosts.yml
20 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/hosts:
--------------------------------------------------------------------------------
1 | #
2 | # Auto-generated inventory file
3 | #
4 | E1 ansible_host=172.16.1.110 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
5 | E2 ansible_host=172.16.1.111 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
6 | E3 ansible_host=172.16.1.120 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
7 | E4 ansible_host=172.16.1.121 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
8 | PE1 ansible_host=172.16.1.112 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
9 | PE2 ansible_host=172.16.1.122 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
10 |
--------------------------------------------------------------------------------
/Git-to-Candidate/git_checkout.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Check out candidate configurations from Git repository
3 | #
4 | ---
5 | - hosts: localhost
6 | name: Checkout candidate configuration
7 | tasks:
8 | - file: dest=Candidate state=absent
9 | - shell: "git clone {{repository}} Candidate"
10 | name: Clone configuration repository
11 | - block:
12 | - shell: "git checkout Candidate"
13 | name: Check out Candidate branch
14 | args:
15 | chdir: Candidate
16 | rescue:
17 | - file: dest=Candidate state=absent
18 | - fail: msg="Cannot check out the candidate branch '{{branch}}'"
19 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/LLDP-to-Graph.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - name: Get LLDP neighbors
5 | napalm_get_facts:
6 | hostname: "{{ansible_host|default(inventory_hostname)}}"
7 | username: "{{ansible_user}}"
8 | password: "{{ansible_ssh_pass}}"
9 | dev_os: "{{ansible_os}}"
10 | optional_args:
11 | port: "{{api_port|default(ansible_port)|default(22)}}"
12 | filter:
13 | - facts
14 | - lldp_neighbors
15 |
16 | - name: Generate graph description file
17 | template: src={{template|default('graph.j2')}} dest=./{{output|default('network.dot')}}
18 | run_once: true
19 |
--------------------------------------------------------------------------------
/Summary-Reports/hosts-file/hosts.j2:
--------------------------------------------------------------------------------
1 | {% for host,facts in hostvars|dictsort
2 | if facts.napalm_interfaces_ip is defined %}
3 | {% for ifname,ifdata in facts.napalm_interfaces_ip.items()
4 | if ifdata.ipv4 is defined %}
5 | {% for ip,prefix in ifdata.ipv4.items() %}
6 | {{'%-15s %s'|format(ip,facts.napalm_fqdn) }}
7 | {% endfor %}
8 | {% endfor %}
9 | {% for ifname,ifdata in facts.napalm_interfaces_ip.items()
10 | if ifdata.ipv6 is defined %}
11 | {% for ip,prefix in ifdata.ipv6.items() %}
12 | {{'%-30s %s'|format(ip,facts.napalm_fqdn) }}
13 | {% endfor %}
14 | {% endfor %}
15 | {% endfor %}
16 |
--------------------------------------------------------------------------------
/DMVPN/roles/virl/templates/90-virl.j2:
--------------------------------------------------------------------------------
1 | vrf definition Mgmt-intf
2 | !
3 | address-family ipv4
4 | exit-address-family
5 | !
6 | address-family ipv6
7 | exit-address-family
8 | !
9 | interface GigabitEthernet0/0
10 | description OOB Management
11 | vrf forwarding Mgmt-intf
12 | ip address {{ansible_host}} 255.255.255.0
13 | duplex full
14 | speed auto
15 | media-type rj45
16 | !
17 | event manager applet ssh-keys
18 | event syslog occurs 1 pattern "%SYS-5-RESTART: System restarted"
19 | action 1.0 cli command "enable"
20 | action 2.0 cli command "configure terminal"
21 | action 3.0 cli command "crypto key generate rsa modulus 1024"
--------------------------------------------------------------------------------
/6-router-setup/libvirt/hosts-libvirt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | all:
3 | hosts:
4 | E1: { ansible_host: 192.168.121.103 }
5 | E2: { ansible_host: 192.168.121.104 }
6 | PE1: { ansible_host: 192.168.121.101 }
7 | E3: { ansible_host: 192.168.121.105 }
8 | E4: { ansible_host: 192.168.121.106 }
9 | PE2: { ansible_host: 192.168.121.102 }
10 | vars:
11 | ansible_user: vagrant
12 | ansible_ssh_private_key_file: ~/.vagrant.d/insecure_private_key
13 | ansible_network_os: ios
14 | ansible_connection: network_cli
15 | ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q brick.local"'
16 | simulation: libvirt
17 |
--------------------------------------------------------------------------------
/OSPF-Deployment/ios/verify_ospf.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Deploy OSPF routing in a WAN fabric
3 | #
4 | ---
5 | - name: Wait for OSPF to start
6 | pause: seconds=15 prompt="Waiting for OSPF to start"
7 | when: wait_flag is defined
8 |
9 | - name: Collect OSPF neighbors
10 | ios_command:
11 | commands:
12 | - "show ip ospf neighbor | include ^[1-9]"
13 | register: ospf_neighbors
14 |
15 | - name: Verify OSPF is running on all internal interfaces
16 | assert:
17 | that: "'{{ item.key }}' in ospf_neighbors.stdout[0]"
18 | msg: "OSPF session on interface {{item.key}} is missing"
19 | with_dict: "{{ nodes[inventory_hostname].links }}"
20 |
--------------------------------------------------------------------------------
/DMVPN/roles/routing/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Routing configuration - spokes
3 | template: src=30-{{routing|default('ibgp')}}-spoke.j2 dest={{ build_dir }}/{{inventory_hostname}}/30-routing.conf
4 | check_mode: no
5 | changed_when: false
6 | when: "'spokes' in group_names"
7 |
8 | - name: Routing configuration - hubs
9 | template: src=30-{{routing|default('ibgp')}}-hub.j2 dest={{ build_dir }}/{{inventory_hostname}}/30-routing.conf
10 | check_mode: no
11 | changed_when: false
12 | when: "'hubs' in group_names"
13 |
14 | #- set_fact: routing_includes={{role_path}}/templates/
15 | - set_fact: routing_includes=roles/routing/templates/
--------------------------------------------------------------------------------
/Sample-Summary-Report/uptime-text.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Collect SNMP and vendor-specific facts from devices. Create a text report
3 | hosts: all
4 | vars:
5 | - results: results
6 | tasks:
7 | - file: path={{results}} state=directory
8 | run_once: true
9 | - file: path={{results}}/uptime.log state=touch
10 | run_once: true
11 | - snmp_facts:
12 | host: "{{ansible_host}}"
13 | version: v2
14 | community: cisco
15 | - ios_facts:
16 | - lineinfile:
17 | dest: "{{results}}/uptime.log"
18 | regexp: "{{inventory_hostname}}"
19 | line: "{{'%20s %8s'|format(inventory_hostname,ansible_sysuptime)}}"
--------------------------------------------------------------------------------
/Compare-State-Snapshots/get-state.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Collect state from network devices. Execute all playbooks in getters/{{os}}
3 | #
4 | ---
5 | - hosts: all
6 | vars:
7 | output: "{{inventory_dir}}/snapshot"
8 | format: yaml
9 | tasks:
10 | - include_tasks: "{{item}}"
11 | vars:
12 | filter_dir: getters/{{ansible_os}}
13 | with_fileglob: [ "getters/{{ansible_os}}/*.yml" ]
14 |
15 | - include_tasks: "{{item}}"
16 | with_fileglob: [ "getters/napalm/*.yml" ]
17 |
18 | - include_tasks: "../tools/include/snapshot.yml"
19 | when: snapshot is defined
20 |
21 | - name: Save
22 | include_tasks: "{{ 'savers/'+format+'.yml' }}"
23 |
--------------------------------------------------------------------------------
/Config-to-Git/filter_plugins/clean_config.py:
--------------------------------------------------------------------------------
1 | #
2 | # Simple list append filter
3 | #
4 | from __future__ import (absolute_import, division, print_function)
5 | __metaclass__ = type
6 |
7 | from jinja2 import TemplateError
8 | import re
9 |
10 | def clean_config(l,*argv):
11 | for element in argv:
12 | if type(element) is list:
13 | for value in element:
14 | l = clean_config(l,value)
15 | else:
16 | regex = re.sub('\.\.\.','.*?\\n',element)
17 | l = re.sub(regex,'',l)
18 | return l
19 |
20 |
21 | class FilterModule(object):
22 |
23 | def filters(self):
24 | return {
25 | 'clean_config': clean_config
26 | }
--------------------------------------------------------------------------------
/OSPF-Deployment/hosts.fqdn:
--------------------------------------------------------------------------------
1 | #
2 | # Auto-generated inventory file
3 | #
4 | E1.virl.info ansible_host=172.16.1.110 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
5 | E2.virl.info ansible_host=172.16.1.111 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
6 | E3.virl.info ansible_host=172.16.1.120 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
7 | E4.virl.info ansible_host=172.16.1.121 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
8 | PE1.virl.info ansible_host=172.16.1.112 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
9 | PE2.virl.info ansible_host=172.16.1.122 ansible_os=ios ansible_user=cisco ansible_ssh_pass=cisco
10 |
--------------------------------------------------------------------------------
/Summary-Reports/hosts-file/zone.j2:
--------------------------------------------------------------------------------
1 | {% for host,facts in hostvars|dictsort
2 | if facts.napalm_interfaces_ip is defined %}
3 | {% for ifname,ifdata in facts.napalm_interfaces_ip.items()
4 | if ifdata.ipv4 is defined %}
5 | {% for ip,prefix in ifdata.ipv4.items() %}
6 | {{'%-30s %s'|format(ip|ipaddr('revdns'),facts.napalm_fqdn) }}
7 | {% endfor %}
8 | {% endfor %}
9 | {% for ifname,ifdata in facts.napalm_interfaces_ip.items()
10 | if ifdata.ipv6 is defined %}
11 | {% for ip,prefix in ifdata.ipv6.items() %}
12 | {{'%-60s %s'|format(ip|ipaddr('revdns'),facts.napalm_fqdn) }}
13 | {% endfor %}
14 | {% endfor %}
15 | {% endfor %}
16 |
--------------------------------------------------------------------------------
/Data-Models/BGP_IF/config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | {% for ifname,ifdata in interfaces|dictsort %}
3 | !
4 | interface {{ ifname }}
5 | ip address {{ ifdata.ip|ipaddr('address') }} {{ ifdata.ip|ipaddr('netmask') }}
6 | {% endfor %}
7 | !
8 | router bgp {{ bgp_as }}
9 | {% for n in neighbors %}
10 | {% set n_ip = hostvars[n.name].interfaces[n.interface].ip|ipaddr('address') %}
11 | neighbor {{ n_ip }} remote-as {{ hostvars[n.name].bgp_as }}
12 | neighbor {{ n_ip }} description {{ n.name }}
13 | {% endfor %}
14 | {% for ifname,ifdata in interfaces|dictsort if 'Vlan' in ifname %}
15 | network {{ ifdata.ip|ipaddr('network') }} {{ ifdata.ip|ipaddr('netmask') }}
16 | {% endfor %}
17 |
--------------------------------------------------------------------------------
/AWS/subnets.yml:
--------------------------------------------------------------------------------
1 | - ec2_vpc_subnet:
2 | region: "{{ region }}"
3 | vpc_id: "{{ item.vpc | default(default_vpc_id) }}"
4 | az: "{{ item.zone | default(zone) }}"
5 | cidr: "{{ item.cidr }}"
6 | resource_tags:
7 | name: "{{ item.name }}"
8 | # assign_instances_ipv6: "{{ item.ipv6 | default('no') }}"
9 | with_items: "{{ subnets }}"
10 | when: subnets is defined
11 |
12 | - ec2_vpc_route_table:
13 | region: "{{ region }}"
14 | vpc_id: "{{ default_vpc_id }}"
15 | subnets: "{{ subnets | json_query ('[?route == `local`].cidr') }}"
16 | tags:
17 | name: local
18 | when: (subnets is defined) and (subnets | json_query ('[?route == `local`].cidr') | first)
19 |
20 |
--------------------------------------------------------------------------------
/OSPF-Deployment/model/fabric-to-nodes.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: no
4 | tasks:
5 | - include_vars: "{{ item }}"
6 | with_first_found:
7 | - "{{dir}}/{{fabric|default('fabric.yml')}}"
8 | - "{{inventory_dir}}/{{fabric|default('fabric.yml')}}"
9 | - "{{fabric|default('fabric.yml')}}"
10 | tags: [ hosts, nodes ]
11 | - name: Create inventory file from fabric data model
12 | template: src=fabric-to-hosts.j2 dest={{dir|default(inventory_dir)}}/hosts
13 | tags: [ hosts ]
14 | - name: Create per-node data model from fabric data model
15 | template: src=fabric-to-nodes.j2 dest={{dir|default(inventory_dir)}}/nodes.yml
16 | tags: [ nodes ]
17 |
--------------------------------------------------------------------------------
/Summary-Reports/inventory/html.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 | Device uptime report
4 |
5 |
6 | Device inventory report
7 |
8 |
9 | | Device name |
10 | FQDN |
11 | Management IP |
12 | Serial number |
13 | Software version |
14 |
15 | {% for host,facts in hostvars|dictsort %}
16 |
17 | | {{ host }} |
18 | {{ facts.napalm_fqdn }} |
19 | {{ ansible_host|default('') }} |
20 | {{ facts.napalm_serial_number }} |
21 | {{ facts.napalm_os_version }} |
22 |
23 | {% endfor %}
24 |
25 |
26 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/getters/napalm/get-facts.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Get device state using NAPALM
3 | #
4 | - napalm_get_facts:
5 | hostname={{ansible_host|default(inventory_hostname)}}
6 | username={{ansible_user}}
7 | password={{ansible_ssh_pass}}
8 | dev_os={{ansible_os}}
9 | filter='bgp_neighbors,interfaces,interfaces_ip,lldp_neighbors'
10 | - set_fact:
11 | state_bgp_peers: >
12 | {{ napalm_bgp_neighbors.global.peers |
13 | remove_keys('uptime',true) }}
14 | state_interfaces: >
15 | {{ napalm_interfaces |
16 | combine(napalm_interfaces_ip,recursive=True) |
17 | remove_keys('last_flapped',true) }}
18 | state_lldp_neighbors: "{{napalm_lldp_neighbors}}"
19 |
20 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/getters/ios/parse_ospf_interfaces.filter:
--------------------------------------------------------------------------------
1 | # CLI filter for "show ip ospf neighbor" Cisco IOS printout
2 | #
3 | ---
4 | vars:
5 | interface:
6 | key: "{{ item[0].ifname }}"
7 | values:
8 | admin_state: "{{ item[0].admin_state }}"
9 | op_state: "{{ item[0].op_state }}"
10 | address: "{{ item[1].addr }}"
11 | area: "{{ item[1].area }}"
12 |
13 | keys:
14 | interfaces:
15 | start_block: ".*line protocol is"
16 | end_block: "^$"
17 | type: list
18 | value: "{{ interface }}"
19 | items:
20 | - "^(?P\\S+) is (?P[^,]+), line protocol is (?P\\S+)"
21 | - ".*Internet Address (?P[0-9./]+), Area (?P[0-9.])"
22 |
23 |
--------------------------------------------------------------------------------
/Data-Models/Network_Prefix/interfaces.j2:
--------------------------------------------------------------------------------
1 | {#
2 | Interface macro
3 | #}
4 | {% macro interface(name,addr) -%}
5 | !
6 | interface {{ name }}
7 | ip address {{ addr|ipaddr('address') }} {{ addr|ipaddr('netmask') }}
8 | {%- endmacro %}
9 | {#
10 | Interfaces
11 | #}
12 | {% for link in links %}
13 | {% for node,iflist in link|dictsort if node != 'prefix' %}
14 | {% if node == inventory_hostname %}
15 | {% if link.prefix is defined %}
16 | {{ interface(iflist,link.prefix|ipaddr(loop.index)) }}
17 | {% else %}
18 | {% for ifname,ip in iflist|dictsort %}
19 | {{ interface(ifname,ip) }}
20 | {% endfor %}
21 | {% endif %}
22 | {% endif %}
23 | {% endfor %}
24 | {% endfor %}
25 |
--------------------------------------------------------------------------------
/LLDP-to-Graph-pyeznc/graph-eznc.j2:
--------------------------------------------------------------------------------
1 | graph network {
2 | {% for local in play_hosts %}
3 | "{{local}}" [shape=record,
4 | label="{{local}}|{ {%
5 | for key in hostvars[local].lldp_results.resource|sort(attribute='local_int')
6 | %}<{{- key['local_int'] -}}>{{- key['local_int'] -}}{% if not(loop.last) %}|{% endif %}{%
7 | endfor %} }"];
8 | {% endfor %}
9 | {% for local in play_hosts %}
10 | {% for x in hostvars[local].lldp_results.resource|sort(attribute='local_int') if x|length > 0 %}
11 | {% if local < x.remote_sysname or x.remote_sysname not in play_hosts %}
12 | "{{local}}":"{{ x.local_int }}" -- "{{x.remote_sysname}}":"{{x.remote_port_id}}";
13 | {% endif %}
14 | {% endfor %}
15 | {% endfor %}
16 | }
17 |
--------------------------------------------------------------------------------
/3-router-setup/README.md:
--------------------------------------------------------------------------------
1 | # Ansible configuration files used with Inter-AS VIRL topology
2 |
3 | This directory contains Ansible and VIRL files used in a 3-router topology:
4 |
5 | * `hosts.yml` - Ansible inventory file in YAML format
6 | * `ansible.cfg` - Ansible configuration file (sets connection and NAPALM plugin directory)
7 | * `3-router.virl` - VIRL topology file
8 | * `setup.sh` - sets environment variables to select Ansible inventory and configuration files.
9 |
10 | This topology was used in _Managing network device configurations with Git_ section of [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) online course.
11 |
12 | To use these files with your playbooks run `source setup.sh`.
--------------------------------------------------------------------------------
/Data-Models/Validation/validate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # Validate prefix-based network model (see Network_Prefix directory for details)
4 | #
5 |
6 | # Exit on first failure - also ensures the exit code will be set correctly
7 | set -e
8 |
9 | # Convert host variables from Ansible inventory into a JSON file
10 | # Use hosts JSON schema to validate the data
11 | #
12 | ansible-inventory -i ../hosts --list | jq ._meta.hostvars >/tmp/$$.hosts.json
13 | jsonschema -i /tmp/$$.hosts.json hosts.schema.json
14 |
15 | # Convert network YAML file into a JSON file and validate it with network JSON schema
16 | #
17 | yq /tmp/$$.network.json
18 | jsonschema -i /tmp/$$.network.json network.schema.json
19 |
20 | # Cleanup
21 | rm /tmp/$$.*.json
22 |
--------------------------------------------------------------------------------
/DMVPN/roles/libvirt/templates/90-libvirt.j2:
--------------------------------------------------------------------------------
1 | vrf definition MGMT
2 | description Management interface
3 | !
4 | address-family ipv4
5 | exit-address-family
6 | !
7 | interface GigabitEthernet0/0
8 | description vagrant-management
9 | vrf forwarding MGMT
10 | ip address dhcp
11 | duplex full
12 | speed auto
13 | media-type rj45
14 | !
15 | ip ssh version 2
16 | ip ssh pubkey-chain
17 | username vagrant
18 | key-hash ssh-rsa DD3BB82E850406E9ABFFA80AC0046ED6
19 | !
20 | event manager applet ENABLE-MGMT
21 | event syslog pattern "SYS-5-RESTART"
22 | action 0 cli command "enable"
23 | action 1 cli command "conf t"
24 | action 3 cli command "interface GigabitEthernet0/0"
25 | action 4 cli command "no shutdown"
26 | action 5 cli command "exit"
27 |
--------------------------------------------------------------------------------
/DMVPN/roles/routing/templates/30-ibgp-hub.j2:
--------------------------------------------------------------------------------
1 | router bgp {{as}}
2 | bgp log-neighbor-changes
3 | redistribute ospf 1
4 | neighbor spokes peer-group
5 | neighbor spokes remote-as {{as}}
6 | neighbor spokes route-reflector-client
7 | neighbor spokes default-originate
8 | neighbor spokes send-community
9 | {% for ifnum,intf in DMVPN|default({})|dictsort %}
10 | {% set ifip = intf.ip+"/24" %}
11 | bgp listen range {{ifip|ipaddr(0)}} peer-group spokes
12 | {% endfor %}
13 | {% for hub in groups['hubs'] %}
14 | {% if hub != inventory_hostname %}
15 | {% set peerip = hostvars[hub].loopback.ip %}
16 | neighbor {{peerip}} remote-as {{as}}
17 | neighbor {{peerip}} next-hop-self all
18 | {% endif %}
19 | {% endfor %}
20 | !
21 | router ospf 1
22 | !
23 |
--------------------------------------------------------------------------------
/Summary-Reports/framework/framework.yml:
--------------------------------------------------------------------------------
1 | #
2 | # The playbook collects facts from managed devices
3 | # and saves them to one or more files
4 | # in results directory
5 | #
6 | # The playbook behavior is controlled with extra variables
7 | #
8 | # src: data source (default: facts)
9 | # dst: report generator (default: Jinja2)
10 | # fmt: report template (when using Jinja2 report generator)
11 | #
12 | ---
13 | - name: Collect facts from devices and write a report
14 | hosts: all
15 | tasks:
16 | - set_fact: results={{ lookup('env','OUTPUT') | default('results') }}
17 | - include_tasks: "read/{{src|default('snmp')}}.yml"
18 | - file: path={{results}} state=directory
19 | run_once: true
20 | - include_tasks: "report/{{dst|default('template')}}.yml"
21 |
--------------------------------------------------------------------------------
/LLDP-to-Graph-pyeznc/LLDP-to-Graph-eznc.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Fetch LLDP information from network elements and produce physical topology
4 | hosts: all
5 | gather_facts: no
6 | connection: local
7 |
8 | tasks:
9 |
10 | - name: Get LLDP neighbors using table/views
11 | junos_get_table:
12 | table=LLDPNeighborTable
13 | file=lldp.yml
14 | host={{ inventory_hostname }}
15 | port=22
16 | register: lldp_results
17 |
18 | - name: print
19 | debug: msg="{{ lldp_results }}"
20 |
21 | - name: Generate graph description file
22 | template: src={{template|default('graph-eznc.j2')}} dest=./{{output|default('network.dot')}}
23 | run_once: true
24 |
25 | roles:
26 | - ansible-junos-stdlib
27 |
--------------------------------------------------------------------------------
/OSPF-Deployment/addressing.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Deploy IP interface addressing in a WAN fabric
3 | #
4 | ---
5 | - name: Enable WAN interfaces, apply IP addresses
6 | hosts: all
7 | vars:
8 | configs: "configs"
9 | tasks:
10 | - include_vars: "{{ item }}"
11 | with_first_found:
12 | - "{{ inventory_dir }}/nodes.yml"
13 | - nodes.yml
14 | tags: [ configs,verify,validate ]
15 |
16 | - include_tasks: tools/clean.yml
17 | tags: [ clean ]
18 |
19 | - include_tasks: common/config_interfaces.yml
20 | tags: [ configs ]
21 | args:
22 | apply:
23 | tags: [ configs ]
24 |
25 | - include_tasks: "{{ansible_network_os}}/deploy_interfaces.yml"
26 | tags: [ deploy ]
27 | args:
28 | apply:
29 | tags: [ deploy ]
30 |
--------------------------------------------------------------------------------
/OSPF-Deployment/deploy.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Deploy OSPF routing in a WAN fabric
3 | #
4 | ---
5 | - name: Create and deploy OSPF configurations
6 | hosts: all
7 | vars:
8 | configs: "configs"
9 | tasks:
10 | - include_vars: "{{ item }}"
11 | with_first_found:
12 | - "{{ inventory_dir }}/nodes.yml"
13 | - nodes.yml
14 | tags: [ configs,verify,validate ]
15 |
16 | - include_tasks: tools/clean.yml
17 | tags: [ clean ]
18 |
19 | - include_tasks: common/config_ospf.yml
20 | tags: [ configs ]
21 |
22 | - block:
23 | - include_tasks: "{{ansible_network_os}}/deploy_ospf.yml"
24 | - set_fact: wait_flag=1
25 | tags: [ deploy ]
26 |
27 | - include_tasks: "{{ansible_network_os}}/verify_ospf.yml"
28 | tags: [ verify,validate ]
29 |
--------------------------------------------------------------------------------
/OSPF-Deployment/model/fabric-to-vars.j2:
--------------------------------------------------------------------------------
1 | #
2 | # Ansible host variables created from the fabric data model
3 | #
4 | {% macro internal_link(name,ip,cost,remote) %}
5 | {{ name }}:
6 | ip: {{ip}}
7 | remote: {{remote}}
8 | {% if cost %}cost: {{cost}}{% endif %}{% endmacro %}
9 |
10 | ---
11 | {% for node in nodes if node.name == inventory_hostname %}
12 | mgmt: {{ node.mgmt }}
13 | rid: {{ node.rid }}
14 | links:
15 | {% for link in fabric %}
16 | {% if link.left == node.name %}
17 | {{ internal_link(link.left_port,link.left_ip,link.cost|default(''),link.right) }}
18 | {% elif link.right == node.name %}
19 | {{ internal_link(link.right_port,link.right_ip, link.cost|default(''),link.left) }}
20 | {% endif %}
21 | {% endfor %}
22 | {% endfor %}
23 |
--------------------------------------------------------------------------------
/OSPF-Deployment/model/fabric-to-vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: no
4 | tasks:
5 | - include_vars: "{{ input }}"
6 | - file: dest={{output}} state=directory
7 | - file: dest={{output}}/host_vars state=directory
8 | - name: Create inventory file from fabric data model
9 | template: src=fabric-to-hosts.j2 dest={{output}}/hosts
10 | - name: Create dynamic hosts
11 | add_host:
12 | name: "{{item.name}}"
13 | group: devices
14 | with_items: "{{nodes}}"
15 |
16 | - hosts: devices
17 | gather_facts: no
18 | connection: local
19 | tasks:
20 | - include_vars: "{{ input }}"
21 | - name: Create per-node vars files from fabric data model
22 | template: src=fabric-to-vars.j2 dest={{output}}/host_vars/{{inventory_hostname}}.yml
23 |
--------------------------------------------------------------------------------
/DHCP-Pools/extract.j2:
--------------------------------------------------------------------------------
1 | {% macro WritePool(host,mac,ip) %}
2 | {{ host }}: { id: '{{mac|default('')}}', ip: '{{ip|default('')}}' }{% endmacro %}
3 | ---
4 | {% for line in pools.stdout_lines[0] %}
5 | {% set poolname = line|regex_findall('^ip dhcp pool (\S+)$') %}
6 | {% set ip = line|regex_findall('host ([0-9.]+)') %}
7 | {% set mac = line|regex_findall('(client-identifier|hardware-address)\s+([0-9a-f.]+)') %}
8 | {% if poolname[0] is defined %}
9 | {% if not (host is defined) %}
10 | hostPools:
11 | {% endif %}
12 | {% set host = poolname[0] %}
13 | {% set host_ip = "" %}
14 | {% endif %}
15 | {% set host_ip = ip[0] |default(host_ip)|default("") %}
16 | {% if mac[0] is defined %}
17 | {{ WritePool(host,mac[0][1],host_ip) }}
18 | {% endif %}
19 | {% endfor %}
--------------------------------------------------------------------------------
/Config-to-Git/gc_show.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Get configuration from managed hosts using "show running" command
3 | #
4 | ---
5 | - name: Get device configuration
6 | hosts: all
7 | tasks:
8 | - name: Make sure we're in the right branch
9 | shell: git checkout {{branch|default('master')}}
10 | args:
11 | chdir: "{{repository}}"
12 | delegate_to: localhost
13 | run_once: true
14 | changed_when: no
15 |
16 | - name: Grab configuration
17 | ios_command:
18 | commands: show running
19 | register: cfg
20 | - set_fact: config={{ cfg.stdout[0]|regex_replace("\A[^!]*","") }}
21 |
22 | - name: Save configuration to local file
23 | copy:
24 | content: "{{config}}"
25 | dest: "{{repository}}/{{inventory_hostname}}.cfg"
26 | delegate_to: localhost
27 |
--------------------------------------------------------------------------------
/Description-to-Fabric/get-connectivity/internal-links.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: remote-interface.yml
3 | - set_fact:
4 | link: |
5 | { 'left': '{{inventory_hostname}}',
6 | 'left_port': '{{intf.name}}',
7 | 'left_ip': '{{intf.ipv4[0].address}}',
8 | 'right': '{{remote}}',
9 | 'right_port': '{{remintf.name}}',
10 | 'right_ip': '{{remintf.ipv4[0].address}}'
11 | }
12 | when: |
13 | {{
14 | remintf.ipv4 is defined and
15 | remintf.ipv4[0].address > intf.ipv4[0].address and
16 | group_names | intersect(hostvars[remote].group_names) | length > 0
17 | }}
18 | - set_fact:
19 | internal_links: "{% set x = internal_links.append(link) %}{{internal_links}}"
20 | when: "{{ link.left is defined }}"
21 |
--------------------------------------------------------------------------------
/AWS/create-instances.yml:
--------------------------------------------------------------------------------
1 | - ec2_vpc_subnet_facts:
2 | region: "{{ region }}"
3 | register: vpc_subnets
4 |
5 | - ec2_instance_facts:
6 | region: "{{ region }}"
7 | filters:
8 | "instance-state-name": "running"
9 | register: ec2_instances
10 |
11 | - ec2:
12 | private_ip: "{{ item.ip | default('') }}"
13 | region: "{{ region }}"
14 | zone: "{{ zone }}"
15 | image: "{{ ami_id }}"
16 | instance_type: "{{ instance_type }}"
17 | group: ssh
18 | vpc_subnet_id: "{{ vpc_subnets|json_query('subnets[?tags.name == `'+item.subnet+'`].id')|first if item.subnet is defined else '' }}"
19 | instance_tags:
20 | Name: "{{ item.name }}"
21 | id: "{{ item.name }}"
22 | count: 1
23 | key_name: "{{key_name}}"
24 | wait: yes
25 | with_items: "{{ vm }}"
26 |
--------------------------------------------------------------------------------
/DMVPN/libvirt/hosts-libvirt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | all:
3 | vars:
4 | ansible_user: vagrant
5 | ansible_ssh_pass: vagrant
6 | ansible_become_method: enable
7 | ansible_become_password: vagrant
8 |
9 | #
10 | # Replace brick.local with FQDN of your libvirt host. Remove this line if you're running
11 | # Ansible on the same host as IOSv virtual machines
12 | #
13 | ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q brick.local"'
14 | simulation: libvirt
15 | hosts:
16 | C1:
17 | ansible_host: 192.168.121.101
18 | C2:
19 | ansible_host: 192.168.121.102
20 | R1A:
21 | ansible_host: 192.168.121.103
22 | R1B:
23 | ansible_host: 192.168.121.104
24 | R2:
25 | ansible_host: 192.168.121.105
26 | R3:
27 | ansible_host: 192.168.121.106
28 |
--------------------------------------------------------------------------------
/Summary-Reports/README.md:
--------------------------------------------------------------------------------
1 | # Create simple summary reports
2 |
3 | This directory contains several simple summary reports discussed in the _Easy Wins_ part of [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) online course:
4 |
5 | * **inventory** - Simple inventory report listing device name, software version and serial number in text, CSV and HTML format;
6 | * **framework** - An extensible report framework generating device uptime reports in various formats
7 | * **hosts-file** - Creates `/etc/hosts` file and `in-addr.arpa` DNS zone file from IP addresses collected from network devices.
8 |
9 | Reports are stored into directory specified in environment variable $OUTPUT or in the **results** subdirectory of the current directory.
10 |
11 | The playbooks have been tested with Ansible 2.4 and napalm-ansible 0.7.0.
--------------------------------------------------------------------------------
/DMVPN/build.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Prepare for configuration build
3 | hosts: localhost
4 | tasks:
5 | - shell: |
6 | mkdir -p {{config_dir}}
7 | rm -fr {{build_dir}}
8 | mkdir -p {{build_dir}}
9 | cd {{build_dir}}
10 | mkdir {{groups['all']|join(' ')}}
11 | args:
12 | warn: false
13 | changed_when: false
14 |
15 | - name: Generate configs
16 | hosts: all
17 | connection: local
18 | roles:
19 | - routing
20 | - base
21 | - dmvpn
22 | tasks:
23 | - include_role:
24 | name: "{{ simulation|default('virl') }}"
25 |
26 | - name: Assemble configurations
27 | hosts: all
28 | connection: local
29 | tasks:
30 | - assemble:
31 | src: "{{build_dir}}/{{inventory_hostname}}"
32 | dest: "{{config_dir}}/{{inventory_hostname}}.conf"
33 | ignore_hidden: yes
34 | check_mode: no
35 |
--------------------------------------------------------------------------------
/Data-Models/Validation/hosts.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "http://json-schema.org/draft-07/schema#",
3 | "$id": "https://www.ipSpace.net/hosts.schema.json",
4 | "title": "Ansible inventory data",
5 | "description": "A quick check to verify that the Ansible inventory contains the expected values",
6 | "definitions": {
7 | "router" : {
8 | "type" : "object",
9 | "properties": {
10 | "bgp_as": {
11 | "type": "number",
12 | "minimum": 1,
13 | "maximum": 65535
14 | },
15 | "hostname": {
16 | "type": "string"
17 | }
18 | },
19 | "required": [ "bgp_as","hostname" ],
20 | "additionalProperties": false
21 | }
22 | },
23 | "type": "object",
24 | "patternProperties": {
25 | ".*" : { "$ref" : "#/definitions/router" }
26 | },
27 | "minProperties": 1
28 | }
--------------------------------------------------------------------------------
/Sample-Compliance-Check/report-results.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Read JSON file generated by the "compliance checks" Ansible
3 | # playbook and generate a summary report
4 | #
5 | # Inputs (specified as extra-vars)
6 | #
7 | # - input: file name
8 | # - format: Jinja2 template name (in reports directory)
9 | # - output: Outout file name
10 | #
11 | # Defaults:
12 | # - format = json-simple.j2
13 | # - output = errors-txt.j2
14 | #
15 | ---
16 | - hosts: localhost
17 | tasks:
18 | - assert:
19 | that: input is defined
20 | msg: Have to specify INPUT extra variable
21 | - set_fact:
22 | json_data: "{{ lookup('file',input)|from_json }}"
23 | - set_fact:
24 | results: "{{ json_data|json_query('plays[].tasks[]') }}"
25 | - template:
26 | src: "reports/{{format|default('json-simple')}}.j2"
27 | dest: "results/{{output|default('errors.txt')}}"
28 |
--------------------------------------------------------------------------------
/OSPF-Deployment/model/fabric-to-nodes.j2:
--------------------------------------------------------------------------------
1 | #
2 | # Nodes in the network
3 | #
4 | {% macro internal_link(name,ip,cost,remote) %}
5 | {{ name }}: { ip: {{ip}}, remote: {{remote}} {% if cost %}, cost: {{cost}}{% endif %} }{% endmacro %}
6 |
7 | ---
8 | common:
9 | domain: {{common.domain|default("")}}
10 | suffix: {% if common.domain is defined %}.{{common.domain}}{% endif %}
11 |
12 | nodes:
13 | {% for node in nodes %}
14 |
15 | {{ node.name }}:
16 | mgmt: {{ node.mgmt }}
17 | rid: {{ node.rid }}
18 | links:
19 | {% for link in fabric %}
20 | {% if link.left == node.name %}
21 | {{ internal_link(link.left_port,link.left_ip,link.cost|default(''),link.right) }}
22 | {% elif link.right == node.name %}
23 | {{ internal_link(link.right_port,link.right_ip, link.cost|default(''),link.left) }}
24 | {% endif %}
25 | {% endfor %}
26 | {% endfor %}
27 |
--------------------------------------------------------------------------------
/Trace-Executed-Commands/trace.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Enable or disable logging of commands executed on Cisco IOS devices
3 | #
4 | ---
5 | - name: Enable or disable command logging on Cisco IOS
6 | hosts: all
7 | vars:
8 | configlets: { enabled: enableLogging.cfg, disabled: disableLogging.cfg, config: configLogging.cfg, keys: sshKeys.cfg }
9 | tasks:
10 | - set_fact: logging={{log|default('enabled')}}
11 | - set_fact: src={{configlets[logging]|default("")}}
12 | - assert:
13 | that: src != ""
14 | msg: "Unknown logging type {{logging}}"
15 |
16 | - debug:
17 | msg: Target logging state is {{logging}} configuring from {{src}}
18 |
19 | - name: Enable/Disable command logging
20 | ios_config:
21 | src: "{{src}}"
22 | host: "{{ip|default(ansible_host)}}"
23 | username: "{{ansible_user}}"
24 | password: "{{ansible_ssh_pass}}"
25 |
--------------------------------------------------------------------------------
/DMVPN/libvirt/vagrant-libvirt.xml:
--------------------------------------------------------------------------------
1 |
2 | vagrant-libvirt
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
--------------------------------------------------------------------------------
/Trace-Executed-Commands/README.md:
--------------------------------------------------------------------------------
1 | # Log executed commands on Cisco IOS
2 |
3 | The playbook in this directory enables or disables command logging (via EEM applet or config logger)
4 | on Cisco IOS devices.
5 |
6 | ## Use
7 | **ansible-playbook -i _inventory_ --extra-vars log=[enabled|disabled|config] trace.yml**
8 |
9 | ## Behind the scenes
10 |
11 | Using the **log** variable (default value: enabled) the playbook takes a configuration snippet and pushes
12 | it to all devices in the inventory file using **ios_config** module.
13 |
14 | ## Build-your-own ideas
15 |
16 | You can use the playbook as a generic configure-something-on-IOS solution. Change the **configlets** dictionary (embedded in the playbook) to have more (or fewer) configuration snippets.
17 |
18 | And when you discover you want to know more, [register for the Building Network Automation Solutions online course](http://ipspace.net/NetAutSol).
19 |
--------------------------------------------------------------------------------
/DMVPN/roles/base/templates/00-common.j2:
--------------------------------------------------------------------------------
1 | version 15.6
2 | service timestamps debug datetime msec
3 | service timestamps log datetime msec
4 | no service password-encryption
5 | !
6 | hostname {{inventory_hostname}}
7 | !
8 | boot-start-marker
9 | boot-end-marker
10 | !
11 | logging buffered 4096
12 | !
13 | enable password {{ ansible_become_password|default(ansible_ssh_pass) }}
14 | !
15 | aaa new-model
16 | !
17 | aaa authentication login default local
18 | aaa authorization exec default local
19 | !
20 | !
21 | no ip source-route
22 | ip cef
23 | ipv6 unicast-routing
24 | !
25 | no ip domain lookup
26 | {% if domain_name is defined %}
27 | ip domain name {{domain_name}}
28 | {% endif %}
29 | !
30 | !
31 | username {{ansible_user}} privilege 15 secret {{ansible_ssh_pass}}
32 | !
33 | redundancy
34 | !
35 | archive
36 | path flash:
37 | log config
38 | hidekeys
39 | !
40 | alias exec replace configure replace
41 | !
42 |
--------------------------------------------------------------------------------
/DHCP-Pools/extract.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This playbook checks for extra DHCP pools on managed routers and fails if there are any
3 | #
4 | # You could change the ASSERT action into something more appropriate (example: send an email)
5 | #
6 | ---
7 | - hosts: all
8 | name: Extract DHCP pools from existing device configurations
9 | vars:
10 | extract_path: "{{extract_pools|default('pools')}}"
11 | tasks:
12 | - name: Create pools directory
13 | file: path={{extract_path}} state=directory
14 | run_once: yes
15 |
16 | - ios_command:
17 | username: "{{ansible_user}}"
18 | password: "{{ansible_ssh_pass}}"
19 | host: "{{ip|default(inventory_hostname)}}"
20 | commands:
21 | - "show running | section ip dhcp pool"
22 | register: pools
23 |
24 | - debug: var=pools.stdout_lines
25 |
26 | - local_action: template src=extract.j2 dest={{extract_path}}/{{inventory_hostname}}.pools.yml
--------------------------------------------------------------------------------
/Sample-Compliance-Check/README.md:
--------------------------------------------------------------------------------
1 | # Execute a battery of compliance checks and create a report
2 |
3 | This directory contains the source code for the _Compliance Checks_ video in the [Creating Summary and Compliance Reports](https://my.ipspace.net/bin/list?id=NetAutSol&module=2#M2S2) part of [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) online course.
4 |
5 | The code has been refactored to produce a compliance error report based on Ansible playbook results collected with JSON callback (described in the _Putting It All Together_ part of the course).
6 |
7 | ## Getting older version of the code
8 |
9 | To check out the original sources modified to work with Ansible 2.4 use
10 |
11 | git checkout Sample-Compliance-Check-v1.1
12 |
13 | To check out the original sources as shown in the online course videos use
14 |
15 | git checkout Sample-Compliance-Check-v1.0
--------------------------------------------------------------------------------
/LLDP-to-Graph/graph.j2:
--------------------------------------------------------------------------------
1 | {% from 'format.j2' import hostname,shortname,ifname with context %}
2 | graph network {
3 | {% for local in play_hosts %}
4 | {% set lname = hostvars[local].napalm_fqdn %}
5 | "{{hostname(lname)}}" [shape=record,
6 | label="{{hostname(lname)}}|{ {%
7 | for intf,lldp in hostvars[local].napalm_lldp_neighbors|dictsort
8 | %}<{{- ifname(intf) -}}>{{- ifname(intf) -}}{% if not(loop.last) %}|{% endif %}{%
9 | endfor %} }"];
10 | {% endfor %}
11 | {% for local in play_hosts %}
12 | {% for intf,lldp in hostvars[local].napalm_lldp_neighbors|dictsort if lldp|length > 0 %}
13 | {% for n in lldp
14 | if local < n.hostname or
15 | (n.hostname not in play_hosts and shortname(n.hostname) not in play_hosts) %}
16 | {% set lname = hostvars[local].napalm_fqdn %}
17 | "{{hostname(lname)}}":"{{ifname(intf)}}" -- "{{hostname(n.hostname)}}":"{{ifname(n.port)}}";
18 | {% endfor %}
19 | {% endfor %}
20 | {% endfor %}
21 | }
22 |
--------------------------------------------------------------------------------
/Collect-Printouts/gitcommit.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | base_dir=/opt/netprod/configs
4 | cd $base_dir
5 | git_directories=`ls | grep -v update-git.sh`
6 | timestamp=`date "+%Y-%m-%d %H:%M"`
7 |
8 | for git_dir in $git_directories ; do
9 | i=0
10 | echo $git_dir
11 | cd $base_dir/$git_dir
12 | added_devices=`git status -s | awk '{ print $1 " " $2 }' | grep -E '^\?\? ' | awk '{ print $2 }'`
13 | modified_devices=`git status -s | awk '{ print $1 " " $2 }' | grep -E '^M ' | awk '{ print $2 }'`
14 | for device in $added_devices ; do
15 | #echo "Added $device $timestamp"
16 | git add $device
17 | git commit -m "Added $device $timestamp"
18 | let i=($i+1)
19 | done
20 | for device in $modified_devices ; do
21 | #echo "Updated $device $timestamp"
22 | git add $device
23 | git commit -m "Updated $device $timestamp"
24 | let i=($i+1)
25 | done
26 | if [ "$i" -gt "0" ]
27 | then
28 | git push -u origin master
29 | fi
30 | done
--------------------------------------------------------------------------------
/Config-to-Git/gc_napalm.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Get configuration from managed hosts using scp from system:running-config
3 | #
4 | ---
5 | - name: Get device configuration
6 | hosts: all
7 | connection: local
8 | tasks:
9 | - name: Make sure we're in the right branch
10 | shell: git checkout {{git_branch|default('master')}}
11 | args:
12 | chdir: "{{git_repository}}"
13 | run_once: true
14 | changed_when: no
15 |
16 | - name: Get configuration with NAPALM
17 | napalm_get_facts:
18 | hostname: "{{ansible_host|default(inventory_hostname)}}"
19 | username: "{{ansible_user}}"
20 | password: "{{ansible_ssh_pass}}"
21 | dev_os: ios
22 | filter: [ config ]
23 |
24 | - set_fact: clean_config={{ napalm_config.running|clean_config(clean_patterns) }}
25 |
26 | - name: Save configuration to local file
27 | copy:
28 | content: "{{clean_config}}"
29 | dest: "{{git_repository}}/{{inventory_hostname}}.cfg"
30 | delegate_to: localhost
31 |
--------------------------------------------------------------------------------
/Description-to-Fabric/get-connectivity/interas-links.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include: remote-interface.yml
3 | - set_fact:
4 | link: |
5 | { 'left': '{{inventory_hostname}}',
6 | 'left_port': '{{intf.name}}',
7 | 'left_ip': '{{intf.ipv4[0].address}}',
8 | 'left_as': '{% set as = group_names|select("match","AS")|first %}{{ as.replace("AS","") }}',
9 | 'right': '{{remote}}',
10 | 'right_port': '{{remintf.name}}',
11 | 'right_ip': '{{remintf.ipv4[0].address}}',
12 | 'right_as': '{% set as = hostvars[remote].group_names|select("match","AS")|first %}{{ as.replace("AS","") }}'
13 | }
14 | when: |
15 | {{
16 | remintf.ipv4 is defined and
17 | remintf.ipv4[0].address > intf.ipv4[0].address and
18 | group_names|intersect(hostvars[remote].group_names)|length == 0
19 | }}
20 | - set_fact:
21 | interas_links: "{% set x = interas_links.append(link) %}{{interas_links}}"
22 | when: "{{ link.left is defined }}"
23 |
--------------------------------------------------------------------------------
/Collect-Printouts/collect.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Deploy IBGP and EBGP routing in a WAN fabric
3 | #
4 | ---
5 | - name: Collect confgurations from Cisco IOS devices
6 | hosts: all
7 | vars:
8 | dir: "{{inventory_dir}}/printouts"
9 |
10 | tasks:
11 | - local_action: file path={{dir}} state=directory
12 | run_once: true
13 |
14 | - include_vars: "{{ item }}"
15 | with_first_found:
16 | - "{{ inventory_dir }}/printouts.yml"
17 | - printouts.yml
18 |
19 | - name: Collect printouts
20 | ios_command:
21 | host: "{{ip|default(ansible_host)}}"
22 | username: "{{ansible_user}}"
23 | password: "{{ansible_ssh_pass}}"
24 | commands: "{{ printouts | map(attribute='command') | list }}"
25 | register: results
26 |
27 | - name: Save printouts into files
28 | template: src=dummy.j2 dest="{{dir}}/{{inventory_hostname}}.{{item.0}}.txt"
29 | when: item.1 is defined
30 | with_together:
31 | - "{{ printouts | map(attribute='save') | list }}"
32 | - "{{ results.stdout }}"
--------------------------------------------------------------------------------
/6-router-setup/libvirt/vagrant-libvirt.xml:
--------------------------------------------------------------------------------
1 |
2 | vagrant-libvirt
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
--------------------------------------------------------------------------------
/Config-to-Git/git_commit.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Commit configuration changes to Git
3 | #
4 | ---
5 | - name: Commit configuration changes
6 | hosts: localhost
7 | tasks:
8 | - name: Get timestamp
9 | shell: date
10 | register: timestamp
11 |
12 | - name: Make sure we're in the right branch
13 | shell: git checkout {{git_branch|default('master')}}
14 | args:
15 | chdir: "{{git_repository}}"
16 |
17 | - name: Pull remote changes
18 | shell: git pull
19 | args:
20 | chdir: "{{git_repository}}"
21 |
22 | - name: Check the directory status
23 | shell: git status
24 | args:
25 | chdir: "{{git_repository}}"
26 | register: changes
27 |
28 | - name: "Add files to Git staging area, commit and push"
29 | shell: |
30 | git add .
31 | git commit -m "Configuration changed on {{timestamp.stdout}}"
32 | git push --set-upstream origin {{git_branch|default('master')}}
33 | args:
34 | chdir: "{{git_repository}}"
35 | when: not("working directory clean" in changes.stdout)
36 |
--------------------------------------------------------------------------------
/VIRL2Inventory/README.md:
--------------------------------------------------------------------------------
1 | # Convert VIRL topology file into Ansible inventory
2 |
3 | The VIRL2Inventory.py script reads VIRL topology file (in XML format)
4 | and produces list of hosts in simple Ansible inventory format.
5 |
6 | ## Usage
7 | Usage: VIRL2Inventory -i file [-jva]
8 |
9 | -i: specify input file (default: topology.virl)
10 | -j: output JSON
11 | -a: use ansible_host instead of ip variable
12 | -v: verbose
13 |
14 | ## Behavior
15 | For every <node> object in the XML file the script extracts:
16 |
17 | * node name (**name** attribute)
18 | * operating system (**subtype** attribute)
19 | * management IP address (**ipv4** attribute)
20 |
21 | The management IP address is replaced with the extensions entry with
22 | key=**static_ip** when you configured external management network with
23 | static IP addresses in VIRL (highly recommended).
24 | ## Output
25 | One line per VIRL object in format
26 |
27 | *name* ip=*ip* os=*os*
28 |
29 | Value **ip** is replaced with **ansible_host** when the -a option is specified.
--------------------------------------------------------------------------------
/Data-Models/Network_Prefix/bgp.j2:
--------------------------------------------------------------------------------
1 | {#
2 | BGP neighbor macro
3 | #}
4 | {% macro neighbor(name,ip) -%}
5 | {% set n_ip = ip|ipaddr('address') %}
6 | neighbor {{ n_ip }} remote-as {{ hostvars[name].bgp_as }}
7 | neighbor {{ n_ip }} description {{ name }}
8 | {%- endmacro %}
9 | {#
10 | BGP routing protocol configuration
11 | #}
12 | router bgp {{ bgp_as }}
13 | {#
14 | BGP neighbors - find links that contain local nodename and create
15 | neighbors for all other nodenames
16 | #}
17 | {% for link in links if link|length > 1 and inventory_hostname in link.keys() %}
18 | {% for node,ifname in link|dictsort if node != 'prefix' %}
19 | {% if node != inventory_hostname %}
20 | {{ neighbor(node,link.prefix|ipaddr(loop.index)) }}
21 | {% endif %}
22 | {% endfor %}
23 | {% endfor %}
24 | !
25 | {% for link in links if link|length == 1 %}
26 | {% for node,iflist in link|dictsort if node == inventory_hostname %}
27 | {% for ifname,ip in iflist|dictsort %}
28 | network {{ ip|ipaddr('network') }} {{ ip|ipaddr('netmask') }}
29 | {% endfor %}
30 | {% endfor %}
31 | {% endfor %}
32 |
--------------------------------------------------------------------------------
/LLDP-to-Graph/disable-LLDP-on-edge.yml:
--------------------------------------------------------------------------------
1 | # created by abaretta@falco-networks.com
2 | ---
3 | - hosts: all
4 | connection: network_cli
5 | gather_facts: no
6 |
7 | tasks:
8 | - name: Get IOS STP edge ports
9 | ios_command:
10 | commands:
11 | - "sh span | inc Edge"
12 | register: spanedgeoutput
13 |
14 | - name: Create list of IOS STP edge ports
15 | set_fact:
16 | stp_edge: |
17 | {{ spanedgeoutput.stdout_lines[0] |
18 | map('regex_replace','(?P^\S*)\s+.*$','\g') |
19 | map('join') | list }}
20 |
21 | - debug: var=stp_edge
22 |
23 | - name: disable lldp on access ports
24 | ios_config:
25 | lines:
26 | - no lldp transmit
27 | - no lldp receive
28 | parents: "interface {{ edge_interface }}"
29 | save_when: modified
30 | loop: "{{ stp_edge }}"
31 | loop_control:
32 | loop_var: edge_interface
33 | when: edge_interface | length > 0
34 |
35 | - name: enable lldp
36 | ios_config:
37 | lines:
38 | - lldp run
39 | save_when: modified
40 |
--------------------------------------------------------------------------------
/Data-Models/README.md:
--------------------------------------------------------------------------------
1 | This set of directories illustrates the data model deduplication
2 | article published on ipSpace.net during spring/summer 2019.
3 | URLs will be inserted at the time individual parts of the article
4 | are published.
5 |
6 | To use the examples:
7 |
8 | * Clone the repository
9 | * Execute `. setup.sh` in current directory to set Ansible environment
10 | variables
11 | * Within a subdirectory execute `ansible-playbook configs.yml`
12 | * Inspect .cfg files within the subdirectory
13 |
14 | The subdirectories contain these steps in data model evolution:
15 |
16 | * **Initial**: initial box-specific data model
17 | * **BGP_AS**: data model replacing BGP AS number with name of BGP neighbor
18 | * **BGP_IF**: data model replacing peer IP address with name of BGP neighbor's interface
19 | * **Network**: data model describing network nodes and links
20 | * **Network_Macro**: Refactored Jinja2 configuration template for the *Network* data model
21 | * **Network_Dict**: Refactored network data model supporting stub interfaces and multi-access links
22 | * **Network_Prefix**: Replace per-node IP addresses with link prefixes
23 |
--------------------------------------------------------------------------------
/Config-to-Git/README.md:
--------------------------------------------------------------------------------
1 | # Collect Cisco IOS configurations and store them to Git repository
2 |
3 | The playbooks in this directory collect Cisco IOS configurations
4 | and store them in a Git repository.
5 |
6 | ## Setup
7 |
8 | Ansible variables required for proper operation:
9 |
10 | * **repository**: directory already set up as a local Git repository
11 | * **branch**: git branch used for actual device configurations. The branch has to exist before the *git_commit.yml* playbook is run
12 |
13 | Optional:
14 | * Install *NAPALM* library (if required) using [NAPALM installation instructions](https://napalm.readthedocs.io/en/latest/installation/index.html)
15 | * Install *napalm-ansible* library with ```git clone https://github.com/napalm-automation/napalm-ansible```
16 |
17 | ## Usage
18 |
19 | Collect device configurations with one of these methods:
20 |
21 | * SCP (*gc_scp.yml*) - requires "ip scp enable" configured on the device
22 | * **show running** (*gc_show.yml*)
23 | * *NAPALM* (*gc_napalm.yml*) - needs NAPALM library and napalm-ansible module
24 |
25 | Commit the changes in device configurations to git repository with *git_commit.yml*
--------------------------------------------------------------------------------
/Config-to-Git/gi_napalm.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Get configuration from managed hosts using scp from system:running-config
3 | #
4 | ---
5 | - name: Prepare local copy of device configurations
6 | hosts: localhost
7 | tasks:
8 | - name: Check out the target Git branch
9 | shell: git checkout {{git_branch|default('master')}}
10 | args:
11 | chdir: "{{git_repository}}"
12 | changed_when: no
13 | check_mode: no
14 | - name: Pull changes from Git repository
15 | shell: git pull
16 | args:
17 | chdir: "{{git_repository}}"
18 | changed_when: no
19 | check_mode: no
20 |
21 | - name: Install device configuration from Git
22 | hosts: all
23 | tasks:
24 | - name: Push configurations with NAPALM
25 | napalm_install_config:
26 | hostname: "{{ansible_host|default(inventory_hostname)}}"
27 | username: "{{ansible_user}}"
28 | password: "{{ansible_ssh_pass}}"
29 | dev_os: ios
30 | config_file: "{{git_repository}}/{{inventory_hostname}}.cfg"
31 | replace_config: true
32 | commit_changes: "{{ not ansible_check_mode }}"
33 | get_diffs: true
34 | tags: [ print_action ]
35 |
36 |
--------------------------------------------------------------------------------
/Summary-Reports/inventory/report.yml:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env ansible-playbook
2 | #
3 | ---
4 | - hosts: all
5 | tasks:
6 | - set_fact: results={{ lookup('env','OUTPUT') | default('results') }}
7 | - file: path={{results}} state=directory
8 | run_once: true
9 |
10 | - name: Get device facts
11 | napalm_get_facts:
12 | hostname: "{{ansible_host|default(inventory_hostname)}}"
13 | username: "{{ansible_user}}"
14 | password: "{{ansible_ssh_pass}}"
15 | dev_os: "{{ansible_os}}"
16 | optional_args:
17 | port: "{{api_port|default(ansible_port)|default(22)}}"
18 | filter:
19 | - facts
20 |
21 | - name: Save structured data in YAML files
22 | copy:
23 | content: |
24 | {{hostvars[inventory_hostname]|to_nice_yaml(indent=4)}}
25 | dest: "{{results}}/{{inventory_hostname}}.yml"
26 |
27 | - name: Create CSV report
28 | template:
29 | src: "csv.j2"
30 | dest: "{{results}}/inventory.csv"
31 | run_once: true
32 |
33 | - name: Create HTML report
34 | template:
35 | src: "html.j2"
36 | dest: "{{results}}/inventory.html"
37 | run_once: true
38 |
--------------------------------------------------------------------------------
/Compare-State-Snapshots/Script.md:
--------------------------------------------------------------------------------
1 | # Demo script for "Compare State Snapshots" case study
2 |
3 | ## Prepare for the demo
4 |
5 | Collect SSH keys with `get-keys.yml`
6 |
7 | Turn on LLDP with
8 | ```
9 | ansible-playbook fix/deploy_lldp.yml
10 | ```
11 | Check out the inital branch with time-dependent state
12 | ```
13 | git checkout Compare-State-Initial
14 | ```
15 |
16 | ## Step 1: Time-dependent state
17 |
18 | Collect the state
19 | ```
20 | ansible-playbook get-state.yml -e output=snapshot
21 | more snapshot/E1.yml
22 | ```
23 |
24 | ## Step 2: Fixed state gathering
25 |
26 | Check out the final branch
27 | ```
28 | git checkout Work
29 | ```
30 | Collect state
31 | ```
32 | ansible-playbook get-state.yml -e output=snap_before
33 | colordiff -au snapshot snap_before|less -r
34 | ```
35 | Log into one of the routers, turn off an interface
36 | ```
37 | sshpass -p cisco ssh cisco@172.16.1.111
38 | config terminal
39 | interface gig 0/2
40 | shutdown
41 | ```
42 | Repeat state gathering
43 | ```
44 | ansible-playbook get-state.yml -e output=snap_after
45 | ```
46 | Compare the state
47 | ```
48 | colordiff -au snap_before snap_after|less -r
49 | ```
50 |
--------------------------------------------------------------------------------
/Config-to-Git/gc_scp.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Get configuration from managed hosts using scp from system:running-config
3 | #
4 | ---
5 | - name: Get device configuration
6 | hosts: all
7 | connection: local
8 | vars:
9 | actual_config: "{{git_repository}}/{{inventory_hostname}}.actual"
10 | saved_config: "{{git_repository}}/{{inventory_hostname}}.cfg"
11 | tasks:
12 | - name: Make sure we're in the right branch
13 | shell: git checkout {{git_branch|default('master')}}
14 | args:
15 | chdir: "{{git_repository}}"
16 | run_once: true
17 | changed_when: no
18 |
19 | - set_fact:
20 | scp_cmd: >
21 | sshpass -p {{ansible_ssh_pass}}
22 | scp {{ansible_user}}@{{ansible_host|default(inventory_hostname)}}:system:running-config
23 | {{ actual_config }}
24 |
25 | - name: Grab configuration
26 | command: "{{scp_cmd}}"
27 |
28 | - name: Save cleaned-up configuration
29 | copy:
30 | content: "{{ lookup('file',actual_config)|clean_config(clean_patterns) }}"
31 | dest: "{{ saved_config }}"
32 |
33 | - name: Remove actual configuration file
34 | file: name={{actual_config}} state=absent
35 |
--------------------------------------------------------------------------------
/DHCP-Pools/configure.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This playbook configures DHCP pools on managed devices
3 | #
4 | ---
5 | - hosts: all
6 | name: Configure DHCP pools on managed devices
7 | vars:
8 | config_path: "{{configs|default('configs')}}"
9 | vars_files:
10 | - "{{ lookup('env','EXTRA_POOLS')|default('pools.yml') }}"
11 |
12 | tasks:
13 | - fail: msg="Please run this playbook with tags set to create and/or install"
14 |
15 | - name: Create config directory
16 | file: path={{config_path}} state=directory
17 | run_once: yes
18 | tags: [ create, install ]
19 |
20 | - name: Get exiting DHCP pools
21 | include: include/getPools.yml
22 | tags: [ install ]
23 |
24 | - name: Verify that there are no extra pools
25 | assert:
26 | that: "extraPools|length == 0"
27 | msg: "Extra DHCP pools have to be removed first: {{extraPools|join(', ')}}"
28 | tags: [ install ]
29 |
30 | - name: Create pool configuration
31 | template: src=pools.j2 dest={{config_path}}/{{inventory_hostname}}.cfg
32 | tags: [ create ]
33 |
34 | - name: Configure DHCP pools
35 | ios_config:
36 | src: pools.j2
37 | tags: [ install ]
--------------------------------------------------------------------------------
/Description-to-Fabric/create-fabric-model.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | tasks:
4 | - name: Check that the device belongs to exactly one AS
5 | assert: that="{{ group_names|select('match','AS')|list|length == 1 }}"
6 | - snmp_facts:
7 | host: "{{ip}}"
8 | version: v2
9 | community: cisco
10 | tags: [ facts,internal,interas ]
11 |
12 | - set_fact:
13 | internal_links: []
14 | interas_links: []
15 |
16 | - name: Generate Internal links
17 | include: get-connectivity/internal-links.yml intf="{{item}}"
18 | with_items: "{{ansible_interfaces.values()}}"
19 | when: "{{intf.description.find('to ') >= 0}}"
20 | tags: [ internal ]
21 |
22 | - name: Generate Inter-AS links
23 | include: get-connectivity/interas-links.yml intf="{{item}}"
24 | with_items: "{{ansible_interfaces.values()}}"
25 | when: "{{intf.description.find('to ') >= 0}}"
26 | tags: [ interas ]
27 |
28 | - name: Generate fabric.yml file
29 | template: src=fabric.yml.j2 dest=./fabric.yml
30 | vars:
31 | allhosts: "{{hostvars}}"
32 | run_once: true
33 | connection: local
34 | tags: [ internal,interas,nodes ]
--------------------------------------------------------------------------------
/Description-to-Fabric/fabric.yml.j2:
--------------------------------------------------------------------------------
1 | #
2 | # Fabric definition
3 | #
4 | ---
5 | fabric:
6 | {% for host,facts in allhosts|dictsort %}
7 | {% if facts.internal_links is defined and facts.internal_links|length > 0 %}
8 | {{ facts.internal_links|to_yaml|indent(2,true) }}
9 | {% endif %}
10 | {% endfor %}
11 |
12 | interas:
13 | {% for host,facts in allhosts|dictsort %}
14 | {% if facts.interas_links is defined and facts.interas_links|length > 0 %}
15 | {{ facts.interas_links|to_yaml|indent(2,true) }}
16 | {% endif %}
17 | {% endfor %}
18 |
19 | nodes:
20 | {% for host,facts in allhosts|dictsort %}
21 | - name: {{facts.inventory_hostname}}
22 | {% if facts.ansible_interfaces is defined %}
23 | {% for idx,intf in facts.ansible_interfaces|dictsort %}
24 | {# dump: {{ intf|to_nice_json }} -- debugging #}
25 | {% if intf.description.find("OOB") >= 0 and intf.ipv4 is defined and intf.ipv4[0] is defined %}
26 | mgmt: {{intf.ipv4[0].address}}
27 | {% endif %}
28 | {% if intf.name == "Loopback0" and intf.ipv4 is defined and intf.ipv4[0] is defined %}
29 | rid: {{intf.ipv4[0].address}}
30 | {% endif %}
31 | {% endfor %}
32 | {% endif %}
33 | {% endfor %}
34 |
--------------------------------------------------------------------------------
/DHCP-Pools/include/getPools.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This set of tasks will get interface and pool list from an IOS router
3 | #
4 | ---
5 | - name: "Get interface and pool list"
6 | ios_command:
7 | # username: "{{ansible_user}}"
8 | # password: "{{ansible_ssh_pass}}"
9 | # host: "{{ip|default(inventory_hostname)}}"
10 | commands:
11 | - "show ip interface brief | inc \\.[0-9]+[ ]+YES"
12 | - "show ip dhcp pool | inc ^Pool"
13 | register: printout
14 |
15 | #
16 | # Parse printouts to get a list of interfaces and existing DHCP pools on the device
17 | #
18 | - set_fact:
19 | intf: "{{printout.stdout_lines[0] | map('regex_findall','^([A-Za-z]+[0-9./]+)') | map('join') | list }}"
20 | pool: "{{printout.stdout_lines[1] | map('regex_search','Pool\\s+(?P.+)\\s+:','\\g') | map('join') | list }}"
21 |
22 | #
23 | # Pools that are not interface pools, host pools, or default pools should not be on the box.
24 | #
25 | # Get the list of extraPools, the parent playbook will figure out what to do with them.
26 | #
27 | - set_fact:
28 | extraPools: |
29 | {{ pool | difference(intf)
30 | | difference(hostPools)
31 | | difference(defaultPools | default(['DHCP']))
32 | }}
33 |
--------------------------------------------------------------------------------
/OSPF-Deployment/validate-fabric.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | name: Create and read node data model
4 | tasks:
5 | - include_vars: "{{item}}"
6 | run_once: true
7 | with_first_found:
8 | - "{{inventory_dir}}/{{model|default('fabric.yml')}}"
9 | - "fabric.yml"
10 | tags: [ model ]
11 | - name: Create per-node data model from fabric data model
12 | template: src=model/fabric-to-nodes.j2 dest=nodes.yml
13 | run_once: true
14 | tags: [ model ]
15 | - include_vars: "nodes.yml"
16 | tags: [ always ]
17 |
18 | - hosts: all
19 | name: Configure LLDP on fabric devices
20 | tags: [ config ]
21 | tasks:
22 | - include_tasks: "{{ansible_network_os}}/deploy_lldp.yml"
23 | - name: Set 'wait for LLDP flag'
24 | set_fact: wait_flag=1
25 |
26 | - hosts: all
27 | name: Validate fabric connectivity using LLDP neighbors
28 | tags: [ validate ]
29 | tasks:
30 | - name: Wait for LLDP to start
31 | include_tasks: lldp/wait.yml
32 | - name: Gather LLDP facts
33 | include_tasks: "{{item}}"
34 | with_first_found:
35 | - "{{ansible_network_os}}/lldp_facts.yml"
36 | - "lldp/napalm_lldp_facts.yml"
37 | - name: Validate fabric connectivity using LLDP neighbors
38 | include_tasks: lldp/validate.yml
39 |
--------------------------------------------------------------------------------
/tools/include/snapshot.yml:
--------------------------------------------------------------------------------
1 | # Snapshot: create a snapshot of inventory file and host facts in specified folder
2 | #
3 | # Variables:
4 | # - snapshot: directory (relative to inventory directory)
5 | #
6 | ---
7 | - set_fact: snap_path={{inventory_dir}}/{{snapshot}}
8 | when: snap_path is not defined
9 |
10 | - name: Create snapshot directory
11 | file:
12 | path: "{{snap_path}}"
13 | state: directory
14 | run_once: true
15 | check_mode: false
16 |
17 | - name: Create inventory file in snapshot directory
18 | copy:
19 | dest: "{{snap_path}}/hosts"
20 | content: |
21 | {% for h in play_hosts %}
22 | {{inventory_hostname}} ansible_host={{ansible_host}} ansible_user={{ansible_user}} ansible_ssh_pass={{ansible_ssh_pass}}
23 | {% endfor %}
24 | run_once: true
25 | delegate_to: localhost
26 | check_mode: false
27 |
28 | - name: Create host_vars directory in snapshot directory
29 | file:
30 | path: "{{snap_path}}/host_vars"
31 | state: directory
32 | run_once: true
33 | check_mode: false
34 |
35 | - name: Create host variable files
36 | copy:
37 | dest: "{{snap_path}}/host_vars/{{inventory_hostname}}.yml"
38 | content: |
39 | {{hostvars[inventory_hostname]|to_nice_yaml(indent=2)}}
40 | check_mode: false
41 |
42 |
--------------------------------------------------------------------------------
/Description-to-Links/README.md:
--------------------------------------------------------------------------------
1 | # Extract network topology from interface descriptions
2 |
3 | The *extract-links* Ansible playbook uses interface descriptions to generate list of links within the network. It assumes that all links (node-to-node and node-to-LAN) have description "to *remotenode*" (as generated by VIRL). If you use a different naming convention, play with the `set remote=...` statement in Jinja2 templates.
4 |
5 | The playbook uses snmp_facts to get interface descriptions.
6 |
7 | ## Multi-vendor support (on your own)
8 |
9 | The *extract-links* playbook works on any devices that support SNMP MIB II. The *config-enable-snmp.yml* playbook works with Cisco IOS, but feel free to extend it to any other operating system.
10 |
11 | ## Usage
12 |
13 | * Create your inventory file (**hosts** in the current directory)
14 | * Change usernames and SNMP community in group_vars/all.yml. Even better, use SSH keys instead of hard-coded usernames and passwords.
15 |
16 | * Configure SNMP community on Cisco IOS devices if needed:
17 | ```
18 | ansible-playbook config-enable.snmp.yml
19 | ```
20 | * Check that SNMP works as expected
21 | ```
22 | ansible-playbook extract-links.yml -t facts
23 | ```
24 | * Generate the links file with
25 | ```
26 | ansible-playbook extract-links.yml
27 | ```
28 | * Enjoy, modify and submit a pull request when you add something awesome
29 |
--------------------------------------------------------------------------------
/Data-Models/Transformation/device-data.j2:
--------------------------------------------------------------------------------
1 | {#
2 | Jinja2 macros to create interface- and BGP neighbor data structure.
3 | Be very careful about proper indentation - it's YAML after all.
4 | #}
5 | {% macro interface(name,addr) -%}
6 | - name: {{ name }}
7 | ip: {{ addr }}
8 | {%- endmacro %}
9 | {% macro neighbor(name,ip) -%}
10 | - bgp_as: {{ nodes[name].bgp_as }}
11 | ip: {{ ip|ipaddr('address') }}
12 | {%- endmacro %}
13 | #
14 | # host_vars data for {{ hostname }} generated from network device data model
15 | #
16 | ---
17 | hostname: {{ hostname }}
18 | bgp_as: {{ nodes[hostname].bgp_as }}
19 | interfaces:
20 | {% for link in links %}
21 | {% for node,iflist in link|dictsort if node != 'prefix' %}
22 | {% if node == hostname %}
23 | {% if link.prefix is defined %}
24 | {{ interface(iflist,link.prefix|ipaddr(loop.index)) }}
25 | {% else %}
26 | {% for ifname,ip in iflist|dictsort %}
27 | {{ interface(ifname,ip) }}
28 | {% endfor %}
29 | {% endif %}
30 | {% endif %}
31 | {% endfor %}
32 | {% endfor %}
33 | neighbors:
34 | {% for link in links if link|length > 1 and hostname in link.keys() %}
35 | {% for node,ifname in link|dictsort if node != 'prefix' %}
36 | {% if node != hostname %}
37 | {{ neighbor(node,link.prefix|ipaddr(loop.index)) }}
38 | {% endif %}
39 | {% endfor %}
40 | {% endfor %}
41 |
--------------------------------------------------------------------------------
/OSPF-Deployment/tools/log_changes.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Create files documenting changes made to individual devices
3 | #
4 | ---
5 | - name: Cleanup old changes file
6 | local_action: file path={{configs}}/node.changes/{{inventory_hostname}}.{{component}}.changes state=absent
7 | check_mode: no
8 | changed_when: no
9 |
10 | - name: Document changes
11 | copy:
12 | content: |
13 | ****************************************************
14 | {{component}} changes on {{inventory_hostname}}
15 | {% if ansible_check_mode %}
16 | Running in check mode, configuration was not changed
17 | {% endif %}
18 | ****************************************************
19 | {% for line in changes.commands %}
20 | {{ line }}
21 | {% endfor %}
22 |
23 | dest: "{{configs}}/node.changes/{{inventory_hostname}}.{{component}}.changes"
24 | delegate_to: localhost
25 | check_mode: no
26 | when: changes.commands|default([])|length
27 |
28 | - assemble: src={{configs}}/node.changes dest={{configs}}/changes.txt
29 | check_mode: no
30 | changed_when: no
31 | delegate_to: localhost
32 | run_once: "{{allnodes|default(true)}}"
33 | when: true
34 |
35 | - name: Cleanup changes files
36 | local_action: file path={{configs}}/node.changes/{{inventory_hostname}}.{{component}}.changes state=absent
37 | check_mode: no
38 | changed_when: no
39 |
--------------------------------------------------------------------------------
/Data-Models/Validation/network.schema.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "http://json-schema.org/draft-07/schema#",
3 | "$id": "https://www.ipSpace.net/network.schema.json",
4 | "title": "Network Infrastructure",
5 | "description": "An optimized data model describing a network",
6 | "definitions": {
7 | "core-link" : {
8 | "type" : "object",
9 | "minProperties": 3,
10 | "maxProperties": 3,
11 | "properties": {
12 | "prefix": {
13 | "type": "string",
14 | "format": "ipv4"
15 | }
16 | },
17 | "additionalProperties": {
18 | "type" : "string"
19 | },
20 | "required": [ "prefix" ]
21 | },
22 | "edge-link" : {
23 | "type" : "object",
24 | "minProperties": 1,
25 | "maxProperties": 1,
26 | "additionalProperties": {
27 | "type": "object",
28 | "patternProperties": {
29 | "^Vlan" : { "type" : "string" }
30 | },
31 | "additionalProperties": false
32 | }
33 | }
34 | },
35 | "type": "object",
36 | "properties": {
37 | "links": {
38 | "description": "Core and edge links",
39 | "type": "array",
40 | "items" : {
41 | "anyOf": [
42 | { "$ref" : "#/definitions/core-link" },
43 | { "$ref" : "#/definitions/edge-link" }
44 | ]
45 | }
46 | }
47 | },
48 | "required": [ "links" ]
49 | }
--------------------------------------------------------------------------------
/AWS/json2txt:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import sys,json
4 | import getopt
5 | import pprint
6 | from jinja2 import Environment, FileSystemLoader, Undefined, StrictUndefined, make_logging_undefined
7 |
8 | def getOptions():
9 | try:
10 | options, args = getopt.getopt(sys.argv[1:], "n:sw", ["notrim", "strict", "warning"])
11 | except getopt.GetoptError as err:
12 | # print help information and exit:
13 | print str(err) # will print something like "option -a not recognized"
14 | sys.exit(2)
15 |
16 | global yamlfile,jinjafile,trim,undefined
17 | trim = True
18 | opts = 0
19 |
20 | for opt,arg in options:
21 | opts = opts + 1
22 | if opt in ("-n","-notrim"):
23 | trim = False
24 | elif opt in ("-w","-warning"):
25 | undefined = make_logging_undefined (base = Undefined)
26 | elif opt in ("-s","-strict"):
27 | undefined = make_logging_undefined (base = StrictUndefined)
28 |
29 | return opts > 0
30 |
31 | trim = True
32 | undefined = Undefined
33 |
34 | getOptions()
35 | ENV = Environment(loader=FileSystemLoader('.'),trim_blocks=trim,lstrip_blocks=trim,undefined=undefined)
36 |
37 | if (len(sys.argv) < 2):
38 | print "Usage: json2txt j2-filter-spec"
39 | sys.exit()
40 |
41 | jinjafile = sys.argv[1]
42 | if (jinjafile.find('.') == -1):
43 | jinjafile = jinjafile + '.j2'
44 |
45 | data = json.load(sys.stdin)
46 |
47 | # pprint.pprint(data)
48 | template = ENV.get_template(jinjafile)
49 | print(template.render(**data))
50 |
--------------------------------------------------------------------------------
/OSPF-Deployment/README.md:
--------------------------------------------------------------------------------
1 | # Deploy OSPF in a network fabric
2 |
3 | This directory contains a set of playbooks that deploy OSPF in
4 | a network fabric defined in the *fabric.yml* data model:
5 |
6 | * **validate-fabric.yml** starts LLDP on all network devices and validates interface status and fabric connectivity using LLDP
7 | * **deploy.yml** configures OSPF and validates OSPF adjacencies
8 |
9 | You'll find detailed description of these playbooks in the [Ansible for Networking Engineers](http://www.ipspace.net/Ansible_for_Networking_Engineers) online course.
10 |
11 | Note: to get the sources used in the online course do `git checkout OSPF-Deployment-v1.0`
12 |
13 | ## Installation guide
14 |
15 | The playbooks were tested with these versions of Ansible and NAPALM:
16 |
17 | * Ansible 2.4
18 | * napalm 1.2.0 (or greater)
19 | * napalm-ansible 0.7.0 (or greater)
20 |
21 | Notes:
22 |
23 | * The playbooks have been updated to work with Ansible 2.4 and will not work with previous versions of Ansible
24 | * Run `napalm-ansible` to find path to your distribution of NAPALM and update ansible.cfg accordingly
25 |
26 | ## More information
27 |
28 | * [Ansible for Networking Engineers](http://www.ipspace.net/Ansible_for_Networking_Engineers) online course ([contents](https://my.ipspace.net/bin/list?id=AnsibleOC))
29 | * [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) online course ([contents](https://my.ipspace.net/bin/list?id=NetAutSol))
30 |
--------------------------------------------------------------------------------
/Summary-Reports/hosts-file/hosts.yml:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env ansible-playbook
2 | #
3 | ---
4 | - hosts: all
5 | tasks:
6 | - block:
7 | - set_fact: results={{ lookup('env','OUTPUT') | default('results') }}
8 | - file: path={{results}} state=directory
9 | run_once: true
10 | tags: [ always ]
11 |
12 | - name: Get device facts
13 | napalm_get_facts:
14 | hostname: "{{ansible_host|default(inventory_hostname)}}"
15 | username: "{{ansible_user}}"
16 | password: "{{ansible_ssh_pass}}"
17 | dev_os: "{{ansible_os}}"
18 | optional_args:
19 | port: "{{api_port|default(ansible_port)|default(22)}}"
20 | filter:
21 | - facts
22 | - interfaces_ip
23 | tags: [ dump,create ]
24 |
25 | - name: Save structured data in YAML files
26 | copy:
27 | content: |
28 | {{hostvars[inventory_hostname]|to_nice_yaml(indent=4)}}
29 | dest: "{{results}}/{{inventory_hostname}}.yml"
30 | tags: [ dump ]
31 |
32 | - name: Create host file entries
33 | template: src=hosts.j2 dest={{results}}/hosts
34 | tags: [ create, install ]
35 |
36 | - name: Create zone file entries
37 | template: src=zone.j2 dest={{results}}/inaddr.arpa
38 | tags: [ create, install ]
39 |
40 | - blockinfile:
41 | path: /etc/hosts
42 | block: "{{ lookup('file', results ~ '/hosts') }}"
43 | marker: ""
44 | delegate_to: localhost
45 | run_once: true
46 | become: yes
47 | tags: [ install ]
48 |
--------------------------------------------------------------------------------
/Data-Models/Network_Dict/config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | {#
3 | Interface macro
4 | #}
5 | {% macro interface(name,addr) -%}
6 | !
7 | interface {{ name }}
8 | ip address {{ addr|ipaddr('address') }} {{ addr|ipaddr('netmask') }}
9 | {%- endmacro %}
10 | {#
11 | Interfaces
12 | #}
13 | {% for link in links %}
14 | {% for node,iflist in link|dictsort if node == inventory_hostname %}
15 | {% for ifname,ip in iflist|dictsort %}
16 | {{ interface(ifname,ip) }}
17 | {% endfor %}
18 | {% endfor %}
19 | {% endfor %}
20 | !
21 | {#
22 | BGP neighbor macro
23 | #}
24 | {% macro neighbor(name,ip) -%}
25 | {% set n_ip = ip|ipaddr('address') %}
26 | neighbor {{ n_ip }} remote-as {{ hostvars[name].bgp_as }}
27 | neighbor {{ n_ip }} description {{ name }}
28 | {%- endmacro %}
29 | {#
30 | BGP routing protocol configuration
31 | #}
32 | router bgp {{ bgp_as }}
33 | {#
34 | BGP neighbors - find links that contain local nodename and create
35 | neighbors for all other nodenames
36 | #}
37 | {% for link in links if link|length > 1 and inventory_hostname in link.keys() %}
38 | {% for node,iflist in link|dictsort if node != inventory_hostname %}
39 | {% for ifname,ip in iflist|dictsort %}
40 | {{ neighbor(node,ip) }}
41 | {% endfor %}
42 | {% endfor %}
43 | {% endfor %}
44 | !
45 | {% for link in links if link|length == 1 %}
46 | {% for node,iflist in link|dictsort if node == inventory_hostname %}
47 | {% for ifname,ip in iflist|dictsort %}
48 | network {{ ip|ipaddr('network') }} {{ ip|ipaddr('netmask') }}
49 | {% endfor %}
50 | {% endfor %}
51 | {% endfor %}
52 |
--------------------------------------------------------------------------------
/LLDP-to-Graph-pyeznc/README.md:
--------------------------------------------------------------------------------
1 | # Generate network topology graph from LLDP neighbors using junos-eznc
2 |
3 | The *LLDP-to-Graph-eznc* Ansible playbook uses LLDP neighbor data collected
4 | with Juniper's stdlib ansible module to generate network diagram in *Graphviz* .dot file format.
5 |
6 | ## Installation guide
7 |
8 | The playbooks were tested with these versions of Ansible, junos-eznc and ansible-junos-stdlib:
9 |
10 | * Ansible 2.4
11 | * junos-eznc 2.1.7
12 | * ansible-junos-stdlib 2.0.0
13 |
14 | Notes:
15 |
16 | * The playbook collects data only from Juniper equipment using the default junos-eznc and ansible-junos-stdlib libraries.
17 |
18 |
19 | ## Usage
20 |
21 | * Create your inventory file named ***hosts***.
22 | * Install ***junos-eznc*** using the pip tool inside your virtualenv,
23 | * Install the ***ansible-junos-stdlib*** from https://github.com/Juniper/ansible-junos-stdlib in the roles subdirectory,
24 | * Install *graphviz*
25 | * Generate the network topology file with
26 | ```
27 | ansible-playbook LLDP-to-Graph-pyeznc.yml
28 | ```
29 | * Generate the network diagram (in PNG format) with
30 | ```
31 | dot -Tpng network.dot >network.png
32 | ```
33 | * Enjoy, modify and submit a pull request when you add something awesome
34 |
35 | ## More information
36 |
37 | * [Ansible for Networking Engineers](http://www.ipspace.net/Ansible_for_Networking_Engineers) online course ([contents](https://my.ipspace.net/bin/list?id=AnsibleOC))
38 | * [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) online course ([contents](https://my.ipspace.net/bin/list?id=NetAutSol))
39 |
--------------------------------------------------------------------------------
/DHCP-Pools/pools.j2:
--------------------------------------------------------------------------------
1 | {% for ifname,intf in interfaces|dictsort %}
2 | ip dhcp pool {% if intf.pool is defined %}{{intf.pool}}{% else %}{{ifname}}{% endif %}
3 |
4 | network {{intf.ip|ipaddr('network')}} {{intf.ip|ipaddr('netmask')}}
5 | {% if intf.domain %}
6 | domain-name {{intf.domain}}
7 | {% endif %}
8 | !
9 | {% endfor %}
10 | {% for name,data in hostPools|dictsort %}
11 | {% for intf in interfaces.values() %}
12 | {% if data.ip|ipaddr(intf.ip) and data.id is defined %}
13 | {#
14 | Found the interface/subnet the client IP belongs to -->
15 | it makes sense to create the host DHCP pool
16 | #}
17 | ip dhcp pool {{name}}
18 | {#
19 | If the client ID matches MAC address format generate hardware-address
20 | otherwise use client-identifier
21 | #}
22 | {% if data.id is match('\A([0-9a-f]{4}\.){2}[0-9a-f]{4}\Z') %}
23 | hardware-address {{data.id}}
24 | {% else %}
25 | client-identifier {{data.id}}
26 | {% endif %}
27 | host {{data.ip}} {{intf.ip|ipaddr('netmask')}}
28 | {% if name.find('.') > 0 %}{# is client name FQDN? #}
29 | client-name {{name.partition('.')[0]}}
30 | {% if name.partition('.')[2] != intf.domain|default('') %}
31 | {# client domain is not equal to equal to interface domain #}
32 | domain-name {{name.partition('.')[2]}}
33 | {% endif %}
34 | {% else %}{# name has no dot #}
35 | client-name {{name}}
36 | {% endif %}
37 | {% endif %}
38 | {% endfor %}
39 | {% endfor %}
40 | !
41 | {% for name,data in hostPools|dictsort %}
42 | {% if data.ip is defined %}
43 | ip host {{name}} {{data.ip}}
44 | {% endif %}
45 | {% endfor %}
46 |
--------------------------------------------------------------------------------
/Data-Models/Network_Macro/config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | {#
3 | Interface macro
4 | #}
5 | {% macro interface(name,addr) -%}
6 | !
7 | interface {{ name }}
8 | ip address {{ addr|ipaddr('address') }} {{ addr|ipaddr('netmask') }}
9 | {%- endmacro %}
10 | {#
11 | Stub interfaces
12 | #}
13 | {% for ifname,ifdata in interfaces|dictsort %}
14 | {{ interface(ifname,ifdata.ip) }}
15 | {% endfor %}
16 | {#
17 | Nodes on the left side of links
18 | #}
19 | {% for link in links if link.left_node == inventory_hostname %}
20 | {{ interface(link.left_interface,link.left_ip) }}
21 | {% endfor %}
22 | {#
23 | Nodes on the right side of links
24 | #}
25 | {% for link in links if link.right_node == inventory_hostname %}
26 | {{ interface(link.right_interface,link.right_ip) }}
27 | {% endfor %}
28 | !
29 | {#
30 | BGP neighbor macro
31 | #}
32 | {% macro neighbor(name,ip) -%}
33 | {% set n_ip = ip|ipaddr('address') %}
34 | neighbor {{ n_ip }} remote-as {{ hostvars[name].bgp_as }}
35 | neighbor {{ n_ip }} description {{ name }}
36 | {%- endmacro %}
37 | {#
38 | BGP routing protocol configuration
39 | #}
40 | router bgp {{ bgp_as }}
41 | {#
42 | Nodes on the left side of links
43 | #}
44 | {% for link in links if link.left_node == inventory_hostname %}
45 | {{ neighbor(link.right_node,link.right_ip) }}
46 | {% endfor %}
47 | {#
48 | Nodes on the right side of links
49 | #}
50 | {% for link in links if link.right_node == inventory_hostname %}
51 | {{ neighbor(link.left_node,link.left_ip) }}
52 | {% endfor %}
53 | !
54 | {% for ifname,ifdata in interfaces|dictsort %}
55 | network {{ ifdata.ip|ipaddr('network') }} {{ ifdata.ip|ipaddr('netmask') }}
56 | {% endfor %}
57 |
--------------------------------------------------------------------------------
/Data-Models/Network/config.j2:
--------------------------------------------------------------------------------
1 | hostname {{ hostname }}
2 | {# stub interfaces #}
3 | {% for ifname,ifdata in interfaces|dictsort %}
4 | !
5 | interface {{ ifname }}
6 | ip address {{ ifdata.ip|ipaddr('address') }} {{ ifdata.ip|ipaddr('netmask') }}
7 | {% endfor %}
8 | {# nodes on the left side of a link #}
9 | {% for link in links if link.left_node == inventory_hostname %}
10 | !
11 | interface {{ link.left_interface }}
12 | ip address {{ link.left_ip|ipaddr('address') }} {{ link.left_ip|ipaddr('netmask') }}
13 | {% endfor %}
14 | {# nodes on the right side of a link #}
15 | {% for link in links if link.right_node == inventory_hostname %}
16 | !
17 | interface {{ link.right_interface }}
18 | ip address {{ link.right_ip|ipaddr('address') }} {{ link.right_ip|ipaddr('netmask') }}
19 | {% endfor %}
20 | !
21 | router bgp {{ bgp_as }}
22 | {# nodes on the left side of a link #}
23 | {% for link in links if link.left_node == inventory_hostname %}
24 | {% set neighbor = link.right_node %}
25 | {% set n_ip = link.right_ip|ipaddr('address') %}
26 | neighbor {{ n_ip }} remote-as {{ hostvars[neighbor].bgp_as }}
27 | neighbor {{ n_ip }} description {{ neighbor }}
28 | {% endfor %}
29 | {# nodes on the right side of a link #}
30 | {% for link in links if link.right_node == inventory_hostname %}
31 | {% set neighbor = link.left_node %}
32 | {% set n_ip = link.left_ip|ipaddr('address') %}
33 | neighbor {{ n_ip }} remote-as {{ hostvars[neighbor].bgp_as }}
34 | neighbor {{ n_ip }} description {{ neighbor }}
35 | {% endfor %}
36 | !
37 | {# stub interfaces #}
38 | {% for ifname,ifdata in interfaces|dictsort %}
39 | network {{ ifdata.ip|ipaddr('network') }} {{ ifdata.ip|ipaddr('netmask') }}
40 | {% endfor %}
41 |
--------------------------------------------------------------------------------
/OSPF-Deployment/lldp/validate.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Validate fabric connectivity using LLDP
3 | #
4 | ---
5 | - set_fact: node={{nodes[inventory_hostname]}}
6 | - block:
7 | - set_fact: fabric_intf={{node.links.keys()}}
8 | - set_fact: device_intf={{napalm_interfaces.keys()}}
9 | - assert:
10 | that: fabric_intf|difference(device_intf)|length == 0
11 | msg: >
12 | Some interfaces used in fabric definition are not present
13 | on {{inventory_hostname}}
14 | - assert:
15 | that: napalm_interfaces[item.key].is_enabled
16 | msg: |
17 | Interface {{item.key}} is not enabled on {{inventory_hostname}}
18 | with_dict: "{{node.links}}"
19 | - assert:
20 | that: napalm_interfaces[item.key].is_up
21 | msg: >
22 | Interface {{item.key}} is down on {{inventory_hostname}}
23 | with_dict: "{{node.links}}"
24 |
25 | - set_fact: lldp_intf={{napalm_lldp_neighbors.keys()}}
26 | - assert:
27 | that: fabric_intf|difference(lldp_intf)|length == 0
28 | msg: >
29 | LLDP is not enabled on some fabric interfaces on {{inventory_hostname}}
30 | - assert:
31 | that: >
32 | napalm_lldp_neighbors[item.key]|map(attribute='hostname')|list|
33 | intersect(item.value.remote+common.suffix)|length > 0
34 | msg: "Neighbor {{item.value.remote}} is not present on {{item.key}}"
35 | with_dict: "{{node.links}}"
36 | - assert:
37 | that: >
38 | napalm_lldp_neighbors[item.key]|map(attribute='hostname')|list|
39 | difference(item.value.remote+common.suffix)|length == 0
40 | msg: "Extra LLDP neighbors are present on {{item.key}}"
41 | with_dict: "{{node.links}}"
42 | when: node.links is defined
43 |
--------------------------------------------------------------------------------
/Description-to-Fabric/fabric.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Fabric definition
3 | #
4 | ---
5 | fabric:
6 | - {left: E1, left_ip: 10.0.0.21, left_port: GigabitEthernet0/2, right: E2, right_ip: 10.0.0.22,
7 | right_port: GigabitEthernet0/2}
8 | - {left: E1, left_ip: 10.0.0.13, left_port: GigabitEthernet0/1, right: PE1, right_ip: 10.0.0.14,
9 | right_port: GigabitEthernet0/1}
10 | - {left: E2, left_ip: 10.0.0.17, left_port: GigabitEthernet0/1, right: PE1, right_ip: 10.0.0.18,
11 | right_port: GigabitEthernet0/2}
12 | - {left: E3, left_ip: 10.0.0.41, left_port: GigabitEthernet0/2, right: E4, right_ip: 10.0.0.42,
13 | right_port: GigabitEthernet0/2}
14 | - {left: E3, left_ip: 10.0.0.29, left_port: GigabitEthernet0/3, right: PE2, right_ip: 10.0.0.30,
15 | right_port: GigabitEthernet0/1}
16 | - {left: E4, left_ip: 10.0.0.37, left_port: GigabitEthernet0/3, right: PE2, right_ip: 10.0.0.38,
17 | right_port: GigabitEthernet0/2}
18 |
19 | interas:
20 | - {left: E1, left_as: '64500', left_ip: 10.0.0.25, left_port: GigabitEthernet0/3,
21 | right: E3, right_as: '64501', right_ip: 10.0.0.26, right_port: GigabitEthernet0/1}
22 | - {left: E2, left_as: '64500', left_ip: 10.0.0.33, left_port: GigabitEthernet0/3,
23 | right: E4, right_as: '64501', right_ip: 10.0.0.34, right_port: GigabitEthernet0/1}
24 |
25 | nodes:
26 | - name: E1
27 | mgmt: 172.16.1.110
28 | rid: 192.168.0.2
29 | - name: E2
30 | mgmt: 172.16.1.111
31 | rid: 192.168.0.4
32 | - name: E3
33 | mgmt: 172.16.1.120
34 | rid: 192.168.0.5
35 | - name: E4
36 | mgmt: 172.16.1.121
37 | rid: 192.168.0.6
38 | - name: PE1
39 | mgmt: 172.16.1.112
40 | rid: 192.168.0.1
41 | - name: PE2
42 | mgmt: 172.16.1.122
43 | rid: 192.168.0.3
44 |
--------------------------------------------------------------------------------
/DMVPN/roles/base/templates/10-interfaces.j2:
--------------------------------------------------------------------------------
1 | !
2 | {% if loopback.ip is defined %}
3 | interface Loopback0
4 | ip address {{loopback.ip}} 255.255.255.255
5 | {% include [ routing_includes+routing+'-loopback-interface.j2',routing_includes+routing+'-LAN-interface.j2'] ignore missing %}
6 | {% endif %}
7 | !
8 | {% if LAN is defined %}
9 | interface {{LAN.interface}}
10 | description LAN interface
11 | ip address {{LAN.ip}} {{LAN.mask|default('255.255.255.0')}}
12 | {% include routing_includes+routing+'-LAN-interface.j2' ignore missing %}
13 | {% if 'Gigabit' in LAN.interface %}
14 | duplex auto
15 | speed auto
16 | {% endif %}
17 | !
18 | {% endif %}
19 | !
20 | {% for ifnum,intf in WAN|default({})|dictsort %}
21 | {% if uplink_vrf is defined %}
22 | {% if uplink_vrf[ifnum] is defined %}
23 | {% set vrf = uplink_vrf[ifnum] %}
24 | {% endif %}
25 | {% endif %}
26 | {% if vrf is defined %}
27 | ip vrf {{vrf.name|default('Internet')}}
28 | rd {{vrf.id|default('65000:1')}}
29 | !
30 | {% endif %}
31 | interface {{intf.interface}}
32 | {% if vrf is defined %}
33 | description Uplink to {{vrf.name|default('Internet')}}
34 | {% else %}
35 | description WAN uplink
36 | {% endif %}
37 | {% if vrf is defined %}
38 | ip vrf forwarding {{vrf.name|default('Internet')}}
39 | {% endif %}
40 | {% if intf.ip == 'DHCP' %}
41 | ip address dhcp
42 | {% else %}
43 | ip address {{intf.ip}} {{intf.mask|default("255.255.255.0")}}
44 | {% endif %}
45 | {% if 'Gigabit' in intf.interface %}
46 | duplex auto
47 | speed auto
48 | {% endif %}
49 | !
50 | {% if vrf is defined and intf.gw is defined %}
51 | ip route vrf {{vrf.name|default('Internet')}} 0.0.0.0 0.0.0.0 {{intf.interface}} {{intf.gw}}
52 | {% endif %}
53 | !
54 | {% endfor %}
--------------------------------------------------------------------------------
/backup-multicontext-asa-configurations.yml:
--------------------------------------------------------------------------------
1 | - name: Backup ASA configs
2 | connection: network_cli
3 | gather_facts: false
4 | hosts: ASA
5 | tasks:
6 | - name: set fact date-time
7 | set_fact:
8 | date: "{{ lookup('pipe','date +%Y%m%d-%H%M%S') }}"
9 |
10 | - name: show run config url
11 | become_method: enable
12 | become: yes
13 | asa_command:
14 | commands:
15 | - "terminal pager 0"
16 | - "show run context | i config-url"
17 | context: system
18 | register: confurl
19 |
20 | - name: configurl = regex confurl
21 | vars:
22 | regexp: "(?<=\\/)\\S+\\.cfg"
23 | #regex above. (?+) positive lookahead...match after / S=any character + greedy match .cfg
24 | set_fact:
25 | configurl: "{{ confurl.stdout[1] | regex_findall(regexp) }}"
26 |
27 | - name: register all configs
28 | loop: "{{ configurl }}"
29 | asa_command:
30 | commands:
31 | - "more {{ item }}"
32 | context: system
33 | register: configs
34 |
35 | - name: create directory with date
36 | local_action: file path={{ homedir }}{{ inventory_hostname }}/{{ date }} state=directory
37 |
38 | - name: store files
39 | loop: "{{ configs.results }}"
40 | local_action: "copy content={{ item.stdout[0] }} dest={{ homedir }}{{ inventory_hostname }}/{{ date }}/{{ item.item }}"
41 |
42 | - name: register system running config
43 | asa_command:
44 | commands:
45 | - "more system:running-config"
46 | context: system
47 | register: runconf
48 |
49 | - name: store system running conf files
50 | local_action: "copy content={{ runconf.stdout[0] }} dest={{ homedir }}{{ inventory_hostname }}/{{ date }}/running-conf"
51 |
--------------------------------------------------------------------------------
/OSPF-Deployment/fabric.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Fabric definition
3 | #
4 | ---
5 | common:
6 | username: cisco
7 | password: cisco
8 | os: ios
9 | domain: virl.info
10 |
11 | nodes:
12 | - name: E1
13 | mgmt: 172.16.1.110
14 | rid: 192.168.0.2
15 | - name: E2
16 | mgmt: 172.16.1.111
17 | rid: 192.168.0.4
18 | - name: E3
19 | mgmt: 172.16.1.120
20 | rid: 192.168.0.5
21 | - name: E4
22 | mgmt: 172.16.1.121
23 | rid: 192.168.0.6
24 | - name: PE1
25 | mgmt: 172.16.1.112
26 | rid: 192.168.0.1
27 | - name: PE2
28 | mgmt: 172.16.1.122
29 | rid: 192.168.0.3
30 |
31 | fabric:
32 | - {left: E1, left_ip: 10.0.0.21, left_port: GigabitEthernet0/2,
33 | right: E2, right_ip: 10.0.0.22, right_port: GigabitEthernet0/2,
34 | cost: 5 }
35 | - {left: E1, left_ip: 10.0.0.13, left_port: GigabitEthernet0/1,
36 | right: PE1, right_ip: 10.0.0.14, right_port: GigabitEthernet0/1,
37 | cost: 10 }
38 | - {left: E2, left_ip: 10.0.0.17, left_port: GigabitEthernet0/1,
39 | right: PE1, right_ip: 10.0.0.18, right_port: GigabitEthernet0/2,
40 | cost: 1 }
41 | - {left: E3, left_ip: 10.0.0.41, left_port: GigabitEthernet0/2,
42 | right: E4, right_ip: 10.0.0.42, right_port: GigabitEthernet0/2 }
43 | - {left: E3, left_ip: 10.0.0.29, left_port: GigabitEthernet0/3,
44 | right: PE2, right_ip: 10.0.0.30, right_port: GigabitEthernet0/1 }
45 | - {left: E4, left_ip: 10.0.0.37, left_port: GigabitEthernet0/3,
46 | right: PE2, right_ip: 10.0.0.38, right_port: GigabitEthernet0/2 }
47 | - {left: E1, left_ip: 10.0.0.25, left_port: GigabitEthernet0/3,
48 | right: E3, right_ip: 10.0.0.26, right_port: GigabitEthernet0/1}
49 | - {left: E2, left_ip: 10.0.0.33, left_port: GigabitEthernet0/3,
50 | right: E4, right_ip: 10.0.0.34, right_port: GigabitEthernet0/1}
51 |
52 |
--------------------------------------------------------------------------------
/OSPF-Deployment/tests/fabric-wrong-if.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Fabric definition
3 | #
4 | ---
5 | common:
6 | username: cisco
7 | password: cisco
8 | os: ios
9 | domain: virl.info
10 |
11 | nodes:
12 | - name: E1
13 | mgmt: 172.16.1.110
14 | rid: 192.168.0.2
15 | - name: E2
16 | mgmt: 172.16.1.111
17 | rid: 192.168.0.4
18 | - name: E3
19 | mgmt: 172.16.1.120
20 | rid: 192.168.0.5
21 | - name: E4
22 | mgmt: 172.16.1.121
23 | rid: 192.168.0.6
24 | - name: PE1
25 | mgmt: 172.16.1.112
26 | rid: 192.168.0.1
27 | - name: PE2
28 | mgmt: 172.16.1.122
29 | rid: 192.168.0.3
30 |
31 | fabric:
32 | - {left: E1, left_ip: 10.0.0.21, left_port: GigabitEthernet0/4,
33 | right: E2, right_ip: 10.0.0.22, right_port: GigabitEthernet0/2,
34 | cost: 5 }
35 | - {left: E1, left_ip: 10.0.0.13, left_port: GigabitEthernet0/1,
36 | right: PE1, right_ip: 10.0.0.14, right_port: GigabitEthernet0/1,
37 | cost: 10 }
38 | - {left: E2, left_ip: 10.0.0.17, left_port: GigabitEthernet0/1,
39 | right: PE1, right_ip: 10.0.0.18, right_port: GigabitEthernet0/2,
40 | cost: 1 }
41 | - {left: E3, left_ip: 10.0.0.41, left_port: GigabitEthernet0/2,
42 | right: E4, right_ip: 10.0.0.42, right_port: GigabitEthernet0/2 }
43 | - {left: E3, left_ip: 10.0.0.29, left_port: GigabitEthernet0/3,
44 | right: PE2, right_ip: 10.0.0.30, right_port: GigabitEthernet0/1 }
45 | - {left: E4, left_ip: 10.0.0.37, left_port: GigabitEthernet0/3,
46 | right: PE2, right_ip: 10.0.0.38, right_port: GigabitEthernet0/2 }
47 | - {left: E1, left_ip: 10.0.0.25, left_port: GigabitEthernet0/3,
48 | right: E3, right_ip: 10.0.0.26, right_port: GigabitEthernet0/1}
49 | - {left: E2, left_ip: 10.0.0.33, left_port: GigabitEthernet0/3,
50 | right: E4, right_ip: 10.0.0.34, right_port: GigabitEthernet0/1}
51 |
52 |
--------------------------------------------------------------------------------
/OSPF-Deployment/tests/fabric-wrong-node.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Fabric definition
3 | #
4 | ---
5 | common:
6 | username: cisco
7 | password: cisco
8 | os: ios
9 | domain: virl.info
10 |
11 | nodes:
12 | - name: E1
13 | mgmt: 172.16.1.110
14 | rid: 192.168.0.2
15 | - name: E2
16 | mgmt: 172.16.1.111
17 | rid: 192.168.0.4
18 | - name: E3
19 | mgmt: 172.16.1.120
20 | rid: 192.168.0.5
21 | - name: E4
22 | mgmt: 172.16.1.121
23 | rid: 192.168.0.6
24 | - name: PE1
25 | mgmt: 172.16.1.112
26 | rid: 192.168.0.1
27 | - name: PE2
28 | mgmt: 172.16.1.122
29 | rid: 192.168.0.3
30 |
31 | fabric:
32 | - {left: E1, left_ip: 10.0.0.21, left_port: GigabitEthernet0/2,
33 | right: E4, right_ip: 10.0.0.22, right_port: GigabitEthernet0/2,
34 | cost: 5 }
35 | - {left: E1, left_ip: 10.0.0.13, left_port: GigabitEthernet0/1,
36 | right: PE1, right_ip: 10.0.0.14, right_port: GigabitEthernet0/1,
37 | cost: 10 }
38 | - {left: E2, left_ip: 10.0.0.17, left_port: GigabitEthernet0/1,
39 | right: PE1, right_ip: 10.0.0.18, right_port: GigabitEthernet0/2,
40 | cost: 1 }
41 | - {left: E3, left_ip: 10.0.0.41, left_port: GigabitEthernet0/2,
42 | right: E4, right_ip: 10.0.0.42, right_port: GigabitEthernet0/2 }
43 | - {left: E3, left_ip: 10.0.0.29, left_port: GigabitEthernet0/3,
44 | right: PE2, right_ip: 10.0.0.30, right_port: GigabitEthernet0/1 }
45 | - {left: E4, left_ip: 10.0.0.37, left_port: GigabitEthernet0/3,
46 | right: PE2, right_ip: 10.0.0.38, right_port: GigabitEthernet0/2 }
47 | - {left: E1, left_ip: 10.0.0.25, left_port: GigabitEthernet0/3,
48 | right: E3, right_ip: 10.0.0.26, right_port: GigabitEthernet0/1}
49 | - {left: E2, left_ip: 10.0.0.33, left_port: GigabitEthernet0/3,
50 | right: E4, right_ip: 10.0.0.34, right_port: GigabitEthernet0/1}
51 |
52 |
--------------------------------------------------------------------------------
/DMVPN/libvirt/README.md:
--------------------------------------------------------------------------------
1 | # DMVPN Topology Using IOSv with Vagrant on libvirt
2 |
3 | This directory contains the files needed to set up the DMVPN topology using Vagrant on libvirt (tested on Ubuntu 20.04).
4 |
5 | To recreate this environment in your lab:
6 |
7 | * Create a Ubuntu host
8 | * Install Vagrant, KVM and libvirt
9 | * [Install libvirt Vagrant provider](https://codingpackets.com/blog/using-the-libvirt-provider-with-vagrant/). When the recipe asks you to create **vagrant-libvirt.xml** file, use the one in this directory to set up static DHCP mappings.
10 | * [Build IOSv Vagrant box](https://codingpackets.com/blog/cisco-iosv-vagrant-libvirt-box-install/)
11 | * Copy **Vagrantfile** from this directory to your _libvirt_ host and create the DMVPN network with **vagrant up**
12 | * Check that you can reach the IOS hosts on IP addresses 192.168.121.101 through 192.168.121.106
13 |
14 | ## Accessing libvirt Virtual Machines
15 |
16 | Vagrant _libvirt_ plugin uses **vagrant-libvirt** virtual network for management interfaces. If you run Ansible playbooks on the same machine, you can connect directly to these IP addresses, but if you want to run Ansible somewhere else (example: in your development environment) you have to use SSH proxy to access these virtual machines:
17 |
18 | * Create SSH key pair if needed using **ssh-keygen**
19 | * Copy your public SSH key to the _libvirt_ host using **ssh-copy-id**
20 | * Check that you can connect to the _libvirt_ host using your SSH key by executing **ssh _hostname_**
21 |
22 | Once this infrastructure is in place, you can SSH to the virtual machines using **ssh** -J option:
23 |
24 | ```
25 | ssh -J bastionhost vagrant@vm-ip
26 | ```
27 |
28 | ## Using DMVPN Examples
29 |
30 | Examples in the parent directory assume a VIRL-based lab. To use your newly-created _libvirt_ environment execute `. setup-libvirt.sh` to use a different Ansible configuration file, and libvirt-specific addendum to your Ansible inventory.
31 |
--------------------------------------------------------------------------------
/Sample-Compliance-Check/break-config.yml:
--------------------------------------------------------------------------------
1 | #
2 | # Break configuration on various devices in the network
3 | #
4 | ---
5 | - name: Collect configurations
6 | hosts: all
7 | tasks:
8 | - file: path=configs state=directory
9 | - ios_command: commands="show running"
10 | register: running
11 | - copy: content="{{ running.stdout[0] }}" dest=configs/{{inventory_hostname}}.cfg
12 |
13 | - name: Disable SNMP community
14 | hosts: all
15 | tasks:
16 | - ios_config:
17 | lines:
18 | - "no snmp-server community cisco"
19 | - "no snmp-server community myPass"
20 | running_config: configs/{{inventory_hostname}}.cfg
21 | ignore_errors: true
22 |
23 | - name: Enable SNMP community
24 | hosts: E2,E4,PE1,PE2
25 | tasks:
26 | - ios_config:
27 | lines:
28 | - "snmp-server community myPass"
29 | running_config: configs/{{inventory_hostname}}.cfg
30 |
31 | - name: Disable SNMP traps
32 | hosts: all
33 | tasks:
34 | - ios_config:
35 | lines:
36 | - "no snmp-server host 1.0.0.1 traps cisco"
37 | running_config: configs/{{inventory_hostname}}.cfg
38 | ignore_errors: true
39 |
40 | - name: Enable SNMP traps
41 | hosts: E1,E2,E4,PE2
42 | tasks:
43 | - ios_config:
44 | lines:
45 | - "snmp-server host 10.0.0.1 traps cisco"
46 | running_config: configs/{{inventory_hostname}}.cfg
47 |
48 | - name: Disable syslog
49 | hosts: all
50 | tasks:
51 | - block:
52 | - ios_config:
53 | lines:
54 | - "no logging host 10.0.0.1"
55 | running_config: configs/{{inventory_hostname}}.cfg
56 | rescue:
57 | - ios_command: commands="end"
58 |
59 | - name: Enable syslog
60 | hosts: E1,E4,PE1,PE2
61 | tasks:
62 | - ios_config:
63 | lines:
64 | - "logging host 10.0.0.1"
65 | running_config: configs/{{inventory_hostname}}.cfg
66 |
67 | - name: Saving configuration changes
68 | hosts: all
69 | tasks:
70 | - ios_config:
71 | save_when: modified
72 |
--------------------------------------------------------------------------------
/DMVPN/deploy_scp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy and deploy configurations
3 | hosts: all
4 | tasks:
5 | - name: Enable SCP server and configuration archive
6 | ios_config:
7 | src: deploy_setup.cfg
8 | become: yes
9 | tags: [ copy,deploy ]
10 |
11 | - block:
12 | - stat: path={{config_dir}}/{{inventory_hostname}}.conf
13 | register: stat
14 | - assert:
15 | that: stat.stat.exists
16 | msg: "Cannot find router configuration file for {{inventory_hostname}}"
17 | - name: Copy candidate configuration into routers
18 | net_put:
19 | src: "{{config_dir}}/{{inventory_hostname}}.conf"
20 | dest: "flash:candidate.cfg"
21 | tags: [ copy ]
22 |
23 | - block:
24 | - cli_command:
25 | command: disable
26 | - name: Enter Enable mode
27 | cli_command:
28 | command: enable
29 | prompt: Password
30 | answer: "{{ ansible_become_password|default(ansible_ssh_pass) }}"
31 | - name: Replace configuration
32 | cli_command:
33 | command: configure replace flash:candidate.cfg force revert trigger timer 120
34 | register: replace
35 | tags: [ print_action ]
36 | vars:
37 | ansible_command_timeout: 240
38 | - name: Confirm configuration replace
39 | cli_command:
40 | command: configure confirm
41 |
42 | - file: path={{build_dir}}/results state=directory
43 | check_mode: no
44 | - name: Save configuration replace results
45 | copy:
46 | content: |
47 |
48 | *********************************
49 | Results on {{inventory_hostname}}
50 | *********************************
51 |
52 | {{replace.stdout}}
53 | dest: "{{build_dir}}/results/{{inventory_hostname}}.txt"
54 | delegate_to: localhost
55 | - name: Assemble final report
56 | assemble: src={{build_dir}}/results dest={{build_dir}}/results.txt
57 | run_once: true
58 | delegate_to: localhost
59 | tags: [ deploy ]
60 |
--------------------------------------------------------------------------------
/DHCP-Pools/README.md:
--------------------------------------------------------------------------------
1 | # A Quick-and-Dirty Solution to Manage DHCP Pools
2 |
3 | The Ansible playbooks in this directory manage DHCP pools and DNS host
4 | mappings on one or more Cisco IOS routers or switches.
5 |
6 | I use them to manage the configuration of my home router (the actual data is stored in another directory)
7 |
8 | ## Setup
9 |
10 | The scripts expect:
11 | * managed hosts in _hosts_ Ansible inventory;
12 | * DHCP mappings in _pools.yml_;
13 | * list of managed interfaces in host variables.
14 |
15 | The SSH username and password are specified directly in the inventory file; obviously you should use Ansible vault in production deployments.
16 |
17 | ## Data model
18 |
19 | Host DHCP mappings are defined in _hostPools_ variable in a YAML file. The value of that variable is a dictionary - keys are fully-qualified host names, values are DHCP mappings.
20 |
21 | Each DHCP mapping is a dictionary with these values:
22 | * **id**: DHCP client ID or MAC address
23 | * **ip**: static IP assigned to that host
24 |
25 | Host variables should contain the list of managed interfaces. _interfaces_ variable should be a dictionary of interface names/parameters (see also _host___vars/gw.yml_). Interface parameters include:
26 | * **ip**: IP prefix configured on the interface
27 | * **dhcp**: must be set to *enabled* to include the interface in DHCP configuration
28 | * **domain**: DHCP domain name
29 |
30 | ## Playbooks
31 |
32 | The following playbooks are used to configure DHCP pools on Cisco IOS devices:
33 | * **extract.yml**: Extract existing host DHCP pools into per-device YAML data model in_pools_ directory
34 | * **check.yml**: Checks the existing pools configured on managed devices and reports extraneous pools (see also _device configuration_ section)
35 | * **cleanup.yml**: Removes extraneous pools (those reported by *check.yml*) from managed device configuration.
36 | * **configure.yml**: Configure DHCP pools
37 |
38 | ## Device configuration
39 |
40 | The playbooks expect two types of DHCP pools on managed devices:
41 | * Interface pools (pool name = DHCP or interface name)
42 | * Host pools (pool name = host FQDN as specified in the data model)
43 |
44 | All other pools are considered extraneous.
45 |
46 | The playbooks create and/or remove host pools and do not manage interface pools.
47 |
--------------------------------------------------------------------------------
/DMVPN/roles/dmvpn/templates/20-dmvpn.j2:
--------------------------------------------------------------------------------
1 | crypto isakmp policy 10
2 | authentication pre-share
3 | group 2
4 | !
5 | !
6 | crypto ipsec transform-set DMVPN esp-des esp-sha-hmac
7 | mode transport
8 | !
9 | crypto ipsec profile DMVPN
10 | set transform-set DMVPN
11 | !
12 | {% if uplink_vrf is not defined %}
13 | crypto keyring DMVPN
14 | pre-shared-key address 0.0.0.0 0.0.0.0 key TESTING
15 | {% endif %}
16 | !
17 | {% for ifnum,intf in DMVPN|default({})|dictsort if 'tunnel' in ifnum %}
18 | {# Find underlying WAN interface #}
19 | {% if WAN[ifnum] is defined %}
20 | {% set WANIF = WAN[ifnum] %}
21 | {% set wannum = ifnum %}
22 | {% else %}
23 | {% set WANIF = WAN[0] %}
24 | {% set wannum = 0 %}
25 | {% endif %}
26 | {# Find VRF associated with WAN interface #}
27 | {% if uplink_vrf is defined %}
28 | {% if uplink_vrf[wannum] is defined %}
29 | {% set vrf = uplink_vrf[wannum] %}
30 | {% endif %}
31 | {% endif %}
32 | {% if vrf is defined %}
33 | crypto keyring DMVPN vrf {{vrf.name|default('Internet')}}
34 | pre-shared-key address 0.0.0.0 0.0.0.0 key TESTING
35 | {% endif %}
36 | !
37 | interface {{ifnum|capitalize}}
38 | ip address {{intf.ip}} {{intf.mask|default("255.255.255.0")}}
39 | no ip redirects
40 | ip mtu 1400
41 | ip tcp adjust-mss 1360
42 | ip nhrp authentication {{tunnel[ifnum].auth}}
43 | {% if 'hubs' in group_names %}
44 | ip nhrp map multicast dynamic
45 | tunnel mode gre multipoint
46 | {% else %}
47 | ip nhrp map {{hostvars[tunnel[ifnum].hub_router].DMVPN[ifnum].ip}} {{hostvars[tunnel[ifnum].hub_router].WAN[0].ip}}
48 | ip nhrp map multicast {{hostvars[tunnel[ifnum].hub_router].WAN[0].ip}}
49 | ip nhrp holdtime 60
50 | ip nhrp nhs {{hostvars[tunnel[ifnum].hub_router].DMVPN[ifnum].ip}}
51 | ip nhrp registration timeout 30
52 | {% if tunnel[ifnum].phase|default('3') == '1' %}
53 | tunnel mode gre
54 | {% else %}
55 | tunnel mode gre multipoint
56 | {% endif %}
57 | {% endif %}
58 | ip nhrp network-id {{tunnel[ifnum].nhrp_id}}
59 | tunnel source {{WANIF.interface}}
60 | {% if vrf is defined %}
61 | tunnel vrf {{vrf.name|default('Internet')}}
62 | {% endif %}
63 | tunnel key {{tunnel[ifnum].gre}}
64 | tunnel protection ipsec profile DMVPN shared
65 | !
66 | {% endfor %}
67 |
--------------------------------------------------------------------------------
/VIRL2Inventory/VIRL2Inventory.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | try:
4 | import sys
5 | import getopt
6 | import pprint
7 | from lxml import etree
8 | except ImportError as e:
9 | print "Import error:",e
10 | sys.exit(2)
11 |
12 | def usage():
13 | print "Usage: VIRL2Inventory -i file [-jva]"
14 | print ""
15 | print " -i: specify input file (default: topology.virl)"
16 | print " -j: output JSON"
17 | print " -a: use ansible_host instead of ip variable"
18 | print " -v: verbose"
19 |
20 | def extractSingleNode(node):
21 | name = node.get("name")
22 | ip = node.get("ipv4")
23 | os = node.get("subtype")
24 |
25 | if os == "IOSv": os = "ios"
26 |
27 | ns = { 'virl' : 'http://www.cisco.com/VIRL' }
28 | extlist = node.findall("virl:extensions/virl:entry",ns)
29 | for ext in extlist:
30 | if ext.get("key") == "static_ip": ip = ext.findtext(".",ns)
31 |
32 | return { 'name': name, 'ip': ip, 'os': os }
33 |
34 | def extractNodes(doc):
35 | ns = { 'virl' : 'http://www.cisco.com/VIRL' }
36 | nodes = doc.findall('//virl:node',ns)
37 |
38 | nodeList = []
39 | for node in nodes:
40 | nodeData = extractSingleNode(node)
41 | nodeList.append(nodeData)
42 |
43 | return nodeList
44 |
45 | def readXMLFile(f):
46 | try:
47 | return etree.parse(f)
48 | except IOError as e:
49 | print e
50 | except etree.XMLSyntaxError as e:
51 | print "XML syntax error: {0}".format(e)
52 | except:
53 | print "Unexpected error:", sys.exc_info()[0]
54 | sys.exit(2)
55 |
56 | def printInventory(nodeList,osVar):
57 | for node in nodeList:
58 | if 'ip' in node and node['ip']:
59 | print "{name:<10} {osvar}={ip:<18} os={os}".format(osvar=osVar,**node)
60 |
61 | def getOptions():
62 | try:
63 | options, args = getopt.getopt(sys.argv[1:], "i:jvha", ["input=", "json"])
64 | except getopt.GetoptError as err:
65 | # print help information and exit:
66 | print str(err) # will print something like "option -a not recognized"
67 | usage()
68 | sys.exit(2)
69 |
70 | opts = { 'file': 'topology.virl', 'osvar': 'ip' }
71 |
72 | for opt,arg in options:
73 | if opt in ("-i","-input"):
74 | opts['file'] = arg
75 | elif opt in ("-j","-json"):
76 | opts['json'] = True
77 | elif opt == "-v":
78 | opts['verbose'] = True
79 | elif opt == "-a":
80 | opts['osvar'] = "ansible_host"
81 | elif opt == "-h":
82 | usage()
83 | sys.exit(0)
84 | else:
85 | assert False,format("Unhandled option {0}",opt)
86 |
87 | return opts
88 |
89 | def main():
90 | opts = getOptions()
91 | assert not 'json' in opts,"JSON printout not implemented yet"
92 |
93 | if 'verbose' in opts: print "Reading: "+opts['file']
94 |
95 | doc = readXMLFile(opts['file'])
96 | nodeList = extractNodes(doc)
97 | printInventory(nodeList,opts['osvar'])
98 |
99 | main()
--------------------------------------------------------------------------------
/LLDP-to-Graph/README.md:
--------------------------------------------------------------------------------
1 | # Generate network topology graph from LLDP neighbors
2 |
3 | The *LLDP-to-Graph* Ansible playbook uses LLDP neighbor data collected
4 | with *napalm_get_facts* Ansible module to generate network diagram in
5 | *Graphviz* .dot file format.
6 |
7 | You can find sample solutions attendees of our
8 | [Building Network Automation Solutions](https://www.ipspace.net/Building_Network_Automation_Solutions)
9 | created based on this concept in the Network Diagrams part of our
10 | [network automation solutions showcase](https://www.ipspace.net/NetAutSol/Solutions).
11 |
12 | ## Installation guide
13 |
14 | The playbooks were tested with these versions of Ansible and NAPALM:
15 |
16 | * Ansible 2.4
17 | * napalm 1.2.0 (or greater)
18 | * napalm-ansible 0.7.0 (or greater)
19 |
20 | Notes:
21 |
22 | * The playbooks have been updated to work with Ansible 2.4 and will not work with previous versions of Ansible
23 | * Run `napalm-ansible` to find path to your distribution of NAPALM and update ansible.cfg accordingly
24 |
25 | Updates from the [Ansible for Networking Engineers](https://www.ipspace.net/Ansible) case study (details [here](https://my.ipspace.net/bin/list?id=AnsibleOC#SAMPLES)):
26 |
27 | * The playbooks and Jinja2 templates have been updated to support platforms (like Cisco IOS) that send shortened interface names in LLDP updates
28 | * The playbooks work with a mix of hostnames and FQDNs (for example, E1 versus E1.virl.info)
29 | * Use `-e no_domain=1` option on `ansible-playbook` command line to use short hostnames in printouts and graphs
30 |
31 | Use `git checkout LLDP-to-Graph-v1.0` to get the source files matching the original case study.
32 |
33 | ## Usage
34 |
35 | * Create your inventory file. The current **hosts** file uses vEOS leaf-and-spine topology. Set IP addresses, usernames, passwords and ports in the inventory file.
36 | * Install NAPALM Ansible modules with `git clone https://github.com/napalm-automation/napalm-ansible/` (assuming you already installed NAPALM)
37 | * Install *graphviz*
38 | * Generate the network topology file with
39 | ```
40 | ansible-playbook LLDP-to-Graph.yml
41 | ```
42 | * Generate the network diagram (in PNG format) with
43 | ```
44 | dot -Tpng network.dot >network.png
45 | ```
46 | The 'disable-LLDP-on-edge' playbook can be used to rollout LLDP on STP P2P links while keeping it disabled on STP edge ports. This keeps the topology free from LLDP speaking hosts.
47 |
48 | * Enjoy, modify and submit a pull request when you add something awesome
49 |
50 | ## More information
51 |
52 | * [Ansible for Networking Engineers](http://www.ipspace.net/Ansible_for_Networking_Engineers) online course ([contents](https://my.ipspace.net/bin/list?id=AnsibleOC))
53 | * [Building Network Automation Solutions](http://www.ipspace.net/Building_Network_Automation_Solutions) online course ([contents](https://my.ipspace.net/bin/list?id=NetAutSol))
54 |
--------------------------------------------------------------------------------
/Plugins/filter/list.py:
--------------------------------------------------------------------------------
1 | #
2 | # Simple list append filter
3 | #
4 | from __future__ import (absolute_import, division, print_function)
5 | __metaclass__ = type
6 |
7 | from jinja2 import TemplateError
8 |
9 | class FilterModule(object):
10 |
11 |
12 | #
13 | # Append a number of items to the list
14 | #
15 | def list_append(self,l,*argv):
16 | if type(l) is not list:
17 | raise TemplateError("First argument of append filter must be a list")
18 |
19 | for element in argv:
20 | if type(element) is list:
21 | l.extend(element)
22 | else:
23 | l.append(element)
24 | return l
25 |
26 | def list_flatten(self,l):
27 | if type(l) is not list:
28 | raise TemplateError("flatten filter takes a list")
29 |
30 | def recurse_flatten(l):
31 | if type(l) is not list:
32 | return [l]
33 | r = []
34 | for i in l:
35 | r.extend(recurse_flatten(i))
36 | return r
37 |
38 | return recurse_flatten(l)
39 |
40 | def check_duplicate_attr(self,d,attr = None,mandatory = False):
41 | seen = {}
42 | stat = []
43 |
44 | def get_value(value):
45 |
46 | def get_single_value(v,k):
47 | if not(k in v):
48 | if mandatory:
49 | raise TemplateError("Missing mandatory attribute %s in %s" % (k,v))
50 | else:
51 | return None
52 | return v[k]
53 |
54 | if type(attr) is list:
55 | retval = ""
56 | for a in attr:
57 | item = get_single_value(value,a)
58 | retval += " " if retval else ""
59 | retval += "%s=%s" % (a,item)
60 | return retval
61 | else:
62 | return get_single_value(value,attr)
63 |
64 | def check_unique_value(key,value):
65 | if key is not None:
66 | value['key'] = key
67 | v = get_value(value)
68 | if v in seen:
69 | stat.append("Duplicate value %s of attribute %s found in %s and %s" %
70 | (v,attr,
71 | seen[v]['key'] if ('key' in seen[v]) else seen[v],
72 | value['key'] if ('key' in value) else value))
73 | else:
74 | seen[v] = value
75 |
76 | # sanity check: do we know which attribute to check?
77 | #
78 | if attr is None:
79 | raise TemplateError("You have to specify attr=name in checkunique")
80 |
81 | # iterate over a list or a dictionary, fail otherwise
82 | #
83 | if type(d) is list:
84 | for value in d:
85 | check_unique_value(None,value)
86 | elif type(d) is dict:
87 | for key in d:
88 | check_unique_value(key,d[key])
89 | else:
90 | raise TemplateError("")
91 |
92 | if len(stat) == 0:
93 | return None
94 | else:
95 | return stat
96 |
97 |
98 | def dict_to_list(self,o):
99 | if type(o) is not dict:
100 | raise TemplateError("dict_to_list can only be used on dictionaries")
101 |
102 | l = []
103 | for k,v in o.items():
104 | v['id'] = k
105 | l.append(v)
106 |
107 | return l
108 |
109 |
110 | def remove_keys(self,val,keylist,recurse = False):
111 | if type(keylist) is str:
112 | keylist = [ keylist ]
113 | if type(val) is dict:
114 | for k,v in val.items():
115 | if k in keylist:
116 | del val[k]
117 | elif recurse:
118 | val[k] = self.remove_keys(v,keylist,recurse)
119 | return val
120 | elif type(val) is list:
121 | newval = []
122 | for v in val:
123 | newval.append(self.remove_keys(v,keylist,recurse))
124 | return newval
125 | else:
126 | return val
127 |
128 | def filters(self):
129 | return {
130 | 'append' : self.list_append,
131 | 'flatten' : self.list_flatten,
132 | 'dupattr' : self.check_duplicate_attr,
133 | 'to_list' : self.dict_to_list,
134 | 'remove_keys': self.remove_keys
135 | }
--------------------------------------------------------------------------------
/DMVPN/libvirt/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "cisco/iosv"
3 | config.vm.synced_folder ".", "/vagrant", disabled: true
4 | config.ssh.insert_key = false
5 | config.vm.boot_timeout = 180
6 | config.vm.guest = :freebsd
7 |
8 | config.vm.provider :libvirt do |domain|
9 | domain.nic_adapter_count = 8
10 | domain.memory = 512
11 | domain.cpus = 1
12 | domain.driver = "kvm"
13 | domain.nic_model_type = "e1000"
14 | end
15 |
16 | config.vm.define "c1" do |c1|
17 | c1.vm.network :private_network, :libvirt__network_name => "DMVPN-hub", :ip => "10.255.0.2",
18 | :libvirt__forward_mode => "veryisolated", :libvirt__dhcp_enabled => false, :auto_config => false
19 | c1.vm.network :private_network, :ip => "10.0.7.17", :libvirt__network_name => "DMVPN-Internet",
20 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
21 | c1.vm.provider :libvirt do |domain|
22 | domain.management_network_mac = "08-4F-A9-00-00-01"
23 | end
24 | end
25 |
26 | config.vm.define "c2" do |c2|
27 | c2.vm.network :private_network, :libvirt__network_name => "DMVPN-hub", :ip => "10.255.0.3",
28 | :libvirt__forward_mode => "veryisolated", :libvirt__dhcp_enabled => false, :auto_config => false
29 | c2.vm.network :private_network, :ip => "10.0.7.13", :libvirt__network_name => "DMVPN-Internet",
30 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
31 | c2.vm.provider :libvirt do |domain|
32 | domain.management_network_mac = "08-4F-A9-00-00-02"
33 | end
34 | end
35 |
36 | config.vm.define "r1a" do |r1a|
37 | r1a.vm.network :private_network, :libvirt__network_name => "DMVPN-site-1", :ip => "10.255.1.2",
38 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
39 | r1a.vm.network :private_network, :ip => "10.0.7.22", :libvirt__network_name => "DMVPN-Internet",
40 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
41 | r1a.vm.provider :libvirt do |domain|
42 | domain.management_network_mac = "08-4F-A9-00-00-03"
43 | end
44 | end
45 |
46 | config.vm.define "r1b" do |r1b|
47 | r1b.vm.network :private_network, :libvirt__network_name => "DMVPN-site-1", :ip => "10.255.1.3",
48 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
49 | r1b.vm.network :private_network, :ip => "10.0.7.26", :libvirt__network_name => "DMVPN-Internet",
50 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
51 | r1b.vm.provider :libvirt do |domain|
52 | domain.management_network_mac = "08-4F-A9-00-00-04"
53 | end
54 | end
55 |
56 | config.vm.define "r2" do |r2|
57 | r2.vm.network :private_network, :libvirt__network_name => "DMVPN-site-2", :ip => "10.255.2.2",
58 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
59 | r2.vm.network :private_network, :ip => "10.0.7.9", :libvirt__network_name => "DMVPN-Internet",
60 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
61 | r2.vm.provider :libvirt do |domain|
62 | domain.management_network_mac = "08-4F-A9-00-00-05"
63 | end
64 | end
65 |
66 | config.vm.define "r3" do |r3|
67 | r3.vm.network :private_network, :libvirt__network_name => "DMVPN-site-3", :ip => "10.255.3.2",
68 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
69 | r3.vm.network :private_network, :ip => "10.0.7.5", :libvirt__network_name => "DMVPN-Internet",
70 | :libvirt__forward_mode => "none", :libvirt__dhcp_enabled => false, :auto_config => false
71 | r3.vm.provider :libvirt do |domain|
72 | domain.management_network_mac = "08-4F-A9-00-00-06"
73 | end
74 | end
75 |
76 | end
77 |
--------------------------------------------------------------------------------
/6-router-setup/libvirt/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "cisco/iosv"
3 | config.vm.synced_folder ".", "/vagrant", disabled: true
4 | config.ssh.insert_key = false
5 | config.vm.boot_timeout = 180
6 | config.vm.guest = :freebsd
7 |
8 | config.vm.provider :libvirt do |domain|
9 | domain.nic_adapter_count = 8
10 | domain.memory = 512
11 | domain.cpus = 1
12 | domain.driver = "kvm"
13 | domain.nic_model_type = "e1000"
14 | end
15 |
16 | config.vm.define "pe1" do |pe1|
17 | pe1.vm.network :private_network,:libvirt__tunnel_type => "udp",
18 | :libvirt__tunnel_local_ip => "127.1.1.1",:libvirt__tunnel_local_port => "10001",
19 | :libvirt__tunnel_ip => "127.1.3.1",:libvirt__tunnel_port => "10001",
20 | auto_config: false
21 | pe1.vm.network :private_network,:libvirt__tunnel_type => "udp",
22 | :libvirt__tunnel_local_ip => "127.1.1.2",:libvirt__tunnel_local_port => "10002",
23 | :libvirt__tunnel_ip => "127.1.4.1",:libvirt__tunnel_port => "10002",
24 | auto_config: false
25 | pe1.vm.network :private_network, :libvirt__network_name => "6-router-site-1", :ip => "10.255.10.2",
26 | :libvirt__forward_mode => "veryisolated", :libvirt__dhcp_enabled => false, :auto_config => false
27 | pe1.vm.provider :libvirt do |domain|
28 | domain.management_network_mac = "08-4F-A9-00-00-01"
29 | end
30 | end
31 |
32 | config.vm.define "pe2" do |pe2|
33 | pe2.vm.network :private_network,:libvirt__tunnel_type => "udp",
34 | :libvirt__tunnel_local_ip => "127.1.2.1",:libvirt__tunnel_local_port => "10003",
35 | :libvirt__tunnel_ip => "127.1.5.1",:libvirt__tunnel_port => "10003",
36 | auto_config: false
37 | pe2.vm.network :private_network,:libvirt__tunnel_type => "udp",
38 | :libvirt__tunnel_local_ip => "127.1.2.2",:libvirt__tunnel_local_port => "10004",
39 | :libvirt__tunnel_ip => "127.1.6.1",:libvirt__tunnel_port => "10004",
40 | auto_config: false
41 | pe2.vm.network :private_network, :libvirt__network_name => "6-router-site-2", :ip => "10.255.11.2",
42 | :libvirt__forward_mode => "veryisolated", :libvirt__dhcp_enabled => false, :auto_config => false
43 | pe2.vm.provider :libvirt do |domain|
44 | domain.management_network_mac = "08-4F-A9-00-00-02"
45 | end
46 | end
47 |
48 | config.vm.define "e1" do |e1|
49 | e1.vm.network :private_network,:libvirt__tunnel_type => "udp",
50 | :libvirt__tunnel_local_ip => "127.1.3.1",:libvirt__tunnel_local_port => "10001",
51 | :libvirt__tunnel_ip => "127.1.1.1",:libvirt__tunnel_port => "10001",
52 | auto_config: false
53 | e1.vm.network :private_network,:libvirt__tunnel_type => "udp",
54 | :libvirt__tunnel_local_ip => "127.1.3.2",:libvirt__tunnel_local_port => "10005",
55 | :libvirt__tunnel_ip => "127.1.4.2",:libvirt__tunnel_port => "10005",
56 | auto_config: false
57 | e1.vm.network :private_network,:libvirt__tunnel_type => "udp",
58 | :libvirt__tunnel_local_ip => "127.1.3.3",:libvirt__tunnel_local_port => "10006",
59 | :libvirt__tunnel_ip => "127.1.5.3",:libvirt__tunnel_port => "10006",
60 | auto_config: false
61 | e1.vm.provider :libvirt do |domain|
62 | domain.management_network_mac = "08-4F-A9-00-00-03"
63 | end
64 | end
65 |
66 | config.vm.define "e2" do |e2|
67 | e2.vm.network :private_network,:libvirt__tunnel_type => "udp",
68 | :libvirt__tunnel_local_ip => "127.1.4.1",:libvirt__tunnel_local_port => "10002",
69 | :libvirt__tunnel_ip => "127.1.1.2",:libvirt__tunnel_port => "10002",
70 | auto_config: false
71 | e2.vm.network :private_network,:libvirt__tunnel_type => "udp",
72 | :libvirt__tunnel_local_ip => "127.1.4.2",:libvirt__tunnel_local_port => "10005",
73 | :libvirt__tunnel_ip => "127.1.3.2",:libvirt__tunnel_port => "10005",
74 | auto_config: false
75 | e2.vm.network :private_network,:libvirt__tunnel_type => "udp",
76 | :libvirt__tunnel_local_ip => "127.1.4.3",:libvirt__tunnel_local_port => "10007",
77 | :libvirt__tunnel_ip => "127.1.6.3",:libvirt__tunnel_port => "10007",
78 | auto_config: false
79 | e2.vm.provider :libvirt do |domain|
80 | domain.management_network_mac = "08-4F-A9-00-00-04"
81 | end
82 | end
83 |
84 | config.vm.define "e3" do |e3|
85 | e3.vm.network :private_network,:libvirt__tunnel_type => "udp",
86 | :libvirt__tunnel_local_ip => "127.1.5.3",:libvirt__tunnel_local_port => "10006",
87 | :libvirt__tunnel_ip => "127.1.3.3",:libvirt__tunnel_port => "10006",
88 | auto_config: false
89 | e3.vm.network :private_network,:libvirt__tunnel_type => "udp",
90 | :libvirt__tunnel_local_ip => "127.1.5.2",:libvirt__tunnel_local_port => "10008",
91 | :libvirt__tunnel_ip => "127.1.6.2",:libvirt__tunnel_port => "10008",
92 | auto_config: false
93 | e3.vm.network :private_network,:libvirt__tunnel_type => "udp",
94 | :libvirt__tunnel_local_ip => "127.1.5.1",:libvirt__tunnel_local_port => "10003",
95 | :libvirt__tunnel_ip => "127.1.2.1",:libvirt__tunnel_port => "10003",
96 | auto_config: false
97 | e3.vm.provider :libvirt do |domain|
98 | domain.management_network_mac = "08-4F-A9-00-00-05"
99 | end
100 | end
101 |
102 | config.vm.define "e4" do |e4|
103 | e4.vm.network :private_network,:libvirt__tunnel_type => "udp",
104 | :libvirt__tunnel_local_ip => "127.1.6.3",:libvirt__tunnel_local_port => "10007",
105 | :libvirt__tunnel_ip => "127.1.4.3",:libvirt__tunnel_port => "10007",
106 | auto_config: false
107 | e4.vm.network :private_network,:libvirt__tunnel_type => "udp",
108 | :libvirt__tunnel_local_ip => "127.1.6.2",:libvirt__tunnel_local_port => "10008",
109 | :libvirt__tunnel_ip => "127.1.5.2",:libvirt__tunnel_port => "10008",
110 | auto_config: false
111 | e4.vm.network :private_network,:libvirt__tunnel_type => "udp",
112 | :libvirt__tunnel_local_ip => "127.1.6.1",:libvirt__tunnel_local_port => "10004",
113 | :libvirt__tunnel_ip => "127.1.2.2",:libvirt__tunnel_port => "10004",
114 | auto_config: false
115 | e4.vm.provider :libvirt do |domain|
116 | domain.management_network_mac = "08-4F-A9-00-00-06"
117 | end
118 | end
119 |
120 | end
121 |
--------------------------------------------------------------------------------