├── .circleci ├── README.md └── config.yml ├── .github ├── Dockerfile └── workflows │ ├── ci.yml │ ├── publish-actions-runner.yml │ └── publish-container.yml ├── .gitignore ├── .gitmodules ├── Dockerfile ├── LICENSE ├── README.md ├── TODO.md ├── ansible ├── .ansible-lint ├── ansible.cfg ├── day_-1 │ ├── build-ca.yml │ ├── build-virl.yml │ ├── clean-virl.yml │ └── group_vars │ │ └── all │ │ └── placeholder ├── day_0 │ ├── build-control-plane.yml │ ├── check-reqs.yml │ ├── check-vmanage.yml │ ├── clean-certs.yml │ ├── clean-control-plane.yml │ ├── clean-edges.yml │ ├── config-certificates.yml │ ├── config-control-plane.yml │ ├── config-vmanage.yml │ ├── deploy-control-plane.yml │ ├── get-bootstrap.yml │ ├── group_vars │ │ └── all │ │ │ └── placeholder │ ├── onboard-edges.yml │ ├── remove-device.yml │ ├── templates │ │ ├── asa │ │ │ ├── cli.j2 │ │ │ ├── cli │ │ │ │ ├── bgp.j2 │ │ │ │ ├── dns.j2 │ │ │ │ ├── failover.j2 │ │ │ │ ├── interfaces.j2 │ │ │ │ ├── license.j2 │ │ │ │ ├── nat.j2 │ │ │ │ ├── ntp.j2 │ │ │ │ ├── ospf.j2 │ │ │ │ ├── prefix-lists.j2 │ │ │ │ ├── static-routes.j2 │ │ │ │ └── system.j2 │ │ │ └── virl.j2 │ │ ├── host.j2 │ │ ├── ios │ │ │ ├── cli.j2 │ │ │ ├── cli │ │ │ │ ├── bgp.j2 │ │ │ │ ├── dns.j2 │ │ │ │ ├── interfaces.j2 │ │ │ │ ├── ntp.j2 │ │ │ │ ├── ospf.j2 │ │ │ │ ├── static-routes.j2 │ │ │ │ └── system.j2 │ │ │ ├── nfvis.j2 │ │ │ └── virl.j2 │ │ ├── sdwan │ │ │ ├── cedge_user-data.j2 │ │ │ ├── cedge_user-data_attached.j2 │ │ │ ├── iosxe.j2 │ │ │ ├── netconf.j2 │ │ │ ├── netconf │ │ │ │ ├── system.j2 │ │ │ │ └── vpn.j2 │ │ │ ├── user-data.j2 │ │ │ ├── vbond_user-data.j2 │ │ │ └── vedge.j2 │ │ ├── terraform │ │ │ ├── aws_control_tfvars.j2 │ │ │ ├── aws_edge_network_tfvars.j2 │ │ │ ├── aws_edges_tfvars.j2 │ │ │ ├── aws_network_tfvars.j2 │ │ │ ├── azure_edge_network_tfvars.j2 │ │ │ ├── azure_edges_tfvars.j2 │ │ │ ├── gcp_edge_network_tfvars.j2 │ │ │ ├── gcp_edges_tfvars.j2 │ │ │ ├── vmware_control_tfvars.j2 │ │ │ ├── vmware_edges_tfvars.j2 │ │ │ └── vmware_edges_tfvars.old.j2 │ │ ├── viptela │ │ │ ├── netconf.j2 │ │ │ ├── vedge.j2 │ │ │ └── virl.j2 │ │ ├── virl │ │ │ ├── host.j2 │ │ │ ├── lxc.j2 │ │ │ ├── topology_v1.j2 │ │ │ └── vmanage.j2 │ │ └── virl1_topology.j2 │ ├── terraform-apply-edges.yml │ └── terraform-apply.yml ├── day_1 │ ├── activate-policy.yml │ ├── attach-template.yml │ ├── config-sdwan.yml │ ├── deactivate-policy.yml │ ├── delete-templates.yml │ ├── detach-template.yml │ ├── export-policy.yml │ ├── export-templates.yml │ ├── group_vars │ │ └── all │ │ │ └── placeholder │ ├── import-policy.yml │ └── import-templates.yml ├── files │ ├── config.yaml │ ├── hq1.yaml │ ├── hq1_vmanage_policy_v19.yml │ ├── hq1_vmanage_policy_v20.yml │ ├── hq1_vmanage_templates_v19.yml │ ├── hq1_vmanage_templates_v20.yml │ ├── hq2.yaml │ ├── hq2_vmanage_policy_v19.yml │ ├── hq2_vmanage_policy_v20.yml │ ├── hq2_vmanage_templates_v19.yml │ ├── hq2_vmanage_templates_v20.yml │ ├── hq3_vmanage_policy_v19.yml │ ├── hq3_vmanage_policy_v20.yml │ ├── hq3_vmanage_templates_v19.yml │ ├── hq3_vmanage_templates_v20.yml │ ├── serialFile.viptela │ └── vmanage-templates.yml ├── inventory │ ├── hq1 │ │ ├── group_vars │ │ │ └── all │ │ │ │ ├── ping_tests.yml │ │ │ │ └── system.yml │ │ ├── host_vars │ │ │ ├── hq-dc-rtr1 │ │ │ │ ├── network.yml │ │ │ │ └── virl.yml │ │ │ ├── hq-rtr1 │ │ │ │ ├── network.yml │ │ │ │ └── virl.yml │ │ │ ├── internet │ │ │ │ ├── network.yml │ │ │ │ └── virl.yml │ │ │ ├── sp1-host1 │ │ │ │ ├── network.yml │ │ │ │ └── virl.yml │ │ │ └── sp1-rtr1 │ │ │ │ ├── network.yml │ │ │ │ └── virl.yml │ │ ├── network.yml │ │ ├── sdwan.yml │ │ └── virl.yml │ ├── hq2 │ │ ├── group_vars │ │ │ └── all │ │ │ │ └── ping_tests.yml │ │ ├── sdwan.yml │ │ ├── terraform.py │ │ └── virl.yml │ └── hq3 │ │ └── sdwan.yml ├── roles │ └── sdwan_tests │ │ ├── README.md │ │ ├── defaults │ │ └── main.yml │ │ ├── handlers │ │ └── main.yml │ │ ├── meta │ │ └── main.yml │ │ ├── tasks │ │ ├── ping-cedge.yml │ │ ├── ping-test.yml │ │ └── ping-vedge.yml │ │ ├── tests │ │ ├── inventory │ │ └── test.yml │ │ └── vars │ │ └── main.yml └── show-inventory.yml ├── bin ├── config_build.sh ├── configure.sh ├── conftest.sh ├── decrypt_secrets.sh ├── delete_cp.sh ├── delete_edges.sh ├── initialize.sh ├── install_ca.sh ├── install_cp.sh ├── install_edges.sh ├── minimal_env.sh ├── play.sh ├── ssh_key_gen.py ├── vars.py ├── vault_api.py ├── vault_cli.sh ├── vpn.sh └── vsphere_env_CoLAB.sh ├── config ├── .gitignore ├── config.example.yaml ├── metadata.yaml ├── policy │ ├── README.md │ ├── config.rego │ └── data.yaml └── templates │ ├── day-1_local.j2 │ ├── day0_local.j2 │ ├── day1_local.j2 │ └── sdwan_inventory.j2 ├── docs ├── deploying_controllers_cloud.md ├── deploying_edges_cloud.md ├── gitlab.md ├── images │ ├── hq1.png │ └── hq2.png ├── simulation.md ├── virl-hq1.md ├── virl-hq2.md └── vmware-hq2.md ├── extras ├── create-gitlab-project.sh └── delete-gitlab-project.sh ├── requirements.txt └── sdwan_config_builder ├── README.md ├── pyproject.toml ├── requirements.txt ├── sdwan_config_build.py └── src └── sdwan_config_builder ├── __init__.py ├── __main__.py ├── __version__.py ├── commands.py └── loader ├── __init__.py ├── models.py └── validators.py /.circleci/README.md: -------------------------------------------------------------------------------- 1 | # Setting up CircleCI 2 | 3 | ## Variables 4 | 5 | See the list of possible environment variables in the following table. They can be set in the project settings, or part of an organization-wide context. Without setting the **required** variables, the pipeline will fail. 6 | 7 | > **NOTE** The default values usually come from Terraform defaults defined in the `terraform-sdwan` submodule, not as variables defined in the Ansible code. That means that the variables themselves may not be defined at all, even if the table shows they have a default value. 8 | 9 | | Name | Importance | Default value | Recommended value | Notes | 10 | |-------------------------|-------------|----------------|------------------------|-------| 11 | | IMAGE | optional | ghcr.io/ciscodevnet/sdwan-devops:cloud || Docker image to use for running the Ansible playbooks, and Terraform | 12 | | PROJ_ROOT | required | - | /home/circleci/project | The directory where the repository will be checked out, may depend on the executor image | 13 | | VAULT_PASS | required | - | - | The clear text password for Ansible Vault, needed to decrypt the included `ansible/files/serialFile.viptela` | 14 | | AWS_ACCESS_KEY_ID | required | - | - | Required if deploying something on AWS | 15 | | AWS_SECRET_ACCESS_KEY | required | - | - | Required if deploying something on AWS | 16 | | AWS_SESSION_TOKEN | optional | - | - | Alternative to the above two, depending on how authentication on AWS is set up | 17 | | GOOGLE_CREDENTIALS | required | - | - | Contents (not the path) of a GCP service account key file in JSON format (without newline characters), for deploying a cEdge on GCP | 18 | |GOOGLE_OAUTH_ACCESS_TOKEN| optional | - | - | Alternative to the above two, depending on how authentication on GCP is set up | 19 | | ARM_CLIENT_ID | optional | - | - | Required if deploying something on Azure | 20 | | ARM_CLIENT_SECRET | optional | - | - | Required if deploying something on Azure | 21 | | ARM_SUBSCRIPTION_ID | optional | - | - | Required if deploying something on Azure | 22 | | ARM_TENANT_ID | optional | - | - | Required if deploying something on Azure | 23 | | CONFIG_BUILDER_METADATA | required | - | ../config/metadata.yaml| Configure the sdwan_config_builder | 24 | | VPN_GW | optional | - | - | OpenConnect compatible VPN gateway hostname, for setting up an OpenConnect VPN (used for on-prem access) | 25 | | VPN_USER | optional | - | - | VPN gateway username | 26 | | VPN_PASS | optional | - | - | VPN gateway password | 27 | | VPN_HOST | optional | - | - | DC host to ping to check connectivity | 28 | 29 | ### External pipeline 30 | 31 | | Name | Importance | Notes | 32 | |----------------------------|-------------|-------| 33 | | CIRCLE_TOKEN | required | A CircleCI personal access token with rights to trigger the external pipeline | 34 | | EXTERNAL_PIPELINE_REPOUSER | required | The organization or username of the repository for the external pipeline | 35 | | EXTERNAL_PIPELINE_REPONAME | required | The external pipline repository name | 36 | | EXTERNAL_PIPELINE_BRANCH | required | The Git branch on the external pipeline repository | 37 | 38 | ## Pipeline parameters 39 | 40 | The pipeline accepts the following parameters (can be set with an API trigger, see below): 41 | 42 | - `deploy-infra` -- the default value is `aws`, and it is the only one that works for now, but `vmware` and `azure` may come in the future 43 | - `remove-deployment` -- whether or not to remove all resources created in AWS after a successful run (for unsuccessful runs they are removed regardless of the value of this variable). The default value is `true`. Set to `false` if you want to use the pipeline to create an SD-WAN deployment that you need to keep after the pipeline finishes. It will need to be cleaned up manually 44 | - `wait-for-external-pipeline` -- we trigger an external pipeline (see variables above for definition) which in turn will run API calls against the deployment. This parameter controls how long we wait before initiating destruction of resources (unless `remove-deployment` is set to `false`). Default is 180 seconds 45 | 46 | ## Manual trigger 47 | 48 | A [personal API token](https://app.circleci.com/settings/user/tokens) needs to be generated first, and its value stored in the `CIRCLE_TOKEN` environment variable. One can check CircleCI API access with: 49 | 50 | curl https://circleci.com/api/v2/me --header "Circle-Token: $CIRCLE_TOKEN" 51 | 52 | To trigger the pipeline: 53 | 54 | curl -X POST https://circleci.com/api/v2/project/gh/ljakab/sdwan-devops/pipeline \ 55 | -H "Circle-Token: $CIRCLE_TOKEN" \ 56 | -H "Content-Type: application/json" \ 57 | -d '{"branch":"cloud","parameters":{"deploy-infra":"aws","remove-deployment":true}}' 58 | -------------------------------------------------------------------------------- /.github/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM summerwind/actions-runner:v2.287.1-ubuntu-20.04 2 | 3 | ARG build_date=unspecified 4 | 5 | LABEL org.opencontainers.image.title="Cisco-SDWAN" \ 6 | org.opencontainers.image.description="Cisco SDWAN DevOps" \ 7 | org.opencontainers.image.vendor="Cisco Systems" \ 8 | org.opencontainers.image.created="${build_date}" \ 9 | org.opencontainers.image.url="https://github.com/CiscoDevNet/sdwan-devops" 10 | 11 | RUN sudo apt-get update && sudo apt-get install -y \ 12 | sshpass \ 13 | python3-dev \ 14 | libffi-dev \ 15 | libxml2-dev \ 16 | libxslt-dev \ 17 | mkisofs \ 18 | git 19 | 20 | COPY requirements.txt /tmp/requirements.txt 21 | RUN pip3 install -r /tmp/requirements.txt 22 | 23 | WORKDIR /tmp 24 | ARG terraform_version=0.13.7 25 | RUN wget --quiet https://releases.hashicorp.com/terraform/${terraform_version}/terraform_${terraform_version}_linux_amd64.zip 26 | RUN unzip terraform_${terraform_version}_linux_amd64.zip 27 | RUN sudo mv terraform /usr/bin 28 | RUN rm terraform_${terraform_version}_linux_amd64.zip 29 | 30 | ENV ANSIBLE_HOST_KEY_CHECKING=false 31 | ENV ANSIBLE_RETRY_FILES_ENABLED=false 32 | ENV ANSIBLE_SSH_PIPELINING=true 33 | ENV ANSIBLE_LOCAL_TMP=/tmp 34 | ENV ANSIBLE_REMOTE_TMP=/tmp 35 | 36 | WORKDIR /ansible 37 | -------------------------------------------------------------------------------- /.github/workflows/publish-actions-runner.yml: -------------------------------------------------------------------------------- 1 | name: Publish actions-runner 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | # Publish semver tags as releases. 7 | tags: [ '*.*.*' ] 8 | pull_request: 9 | branches: [ master ] 10 | paths: 11 | - '.github/Dockerfile' 12 | workflow_dispatch: 13 | 14 | env: 15 | REGISTRY: ghcr.io 16 | IMAGE_NAME: ${{ github.repository }}-runner 17 | 18 | 19 | jobs: 20 | build: 21 | runs-on: ubuntu-latest 22 | permissions: 23 | contents: read 24 | packages: write 25 | steps: 26 | - name: Checkout repository 27 | uses: actions/checkout@v2 28 | 29 | # Login against a Docker registry except on PR 30 | # https://github.com/docker/login-action 31 | - name: Log into registry ${{ env.REGISTRY }} 32 | if: github.event_name != 'pull_request' 33 | uses: docker/login-action@28218f9b04b4f3f62068d7b6ce6ca5b26e35336c 34 | with: 35 | registry: ${{ env.REGISTRY }} 36 | username: ${{ github.actor }} 37 | password: ${{ secrets.GITHUB_TOKEN }} 38 | 39 | # Extract metadata (tags, labels) for Docker 40 | # https://github.com/docker/metadata-action 41 | - name: Extract Docker metadata 42 | id: meta 43 | uses: docker/metadata-action@98669ae865ea3cffbcbaa878cf57c20bbf1c6c38 44 | with: 45 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 46 | 47 | # Build and push Docker image with Buildx (don't push on PR) 48 | # https://github.com/docker/build-push-action 49 | - name: Build and push Docker image 50 | uses: docker/build-push-action@ad44023a93711e3deb337508980b4b5e9bcdc5dc 51 | with: 52 | context: . 53 | file: .github/Dockerfile 54 | push: ${{ github.event_name != 'pull_request' }} 55 | tags: ${{ steps.meta.outputs.tags }} 56 | labels: ${{ steps.meta.outputs.labels }} 57 | -------------------------------------------------------------------------------- /.github/workflows/publish-container.yml: -------------------------------------------------------------------------------- 1 | name: Publish container 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - main 8 | - cloud 9 | # Publish semver tags as releases. 10 | tags: [ '*.*.*' ] 11 | pull_request: 12 | branches: [ master ] 13 | paths: 14 | - 'Dockerfile' 15 | workflow_dispatch: 16 | 17 | env: 18 | REGISTRY: ghcr.io 19 | IMAGE_NAME: ${{ github.repository }} 20 | 21 | 22 | jobs: 23 | build: 24 | runs-on: ubuntu-latest 25 | permissions: 26 | contents: read 27 | packages: write 28 | steps: 29 | - name: Checkout repository 30 | uses: actions/checkout@v4 31 | 32 | # Set up QEMU for multi-platform build 33 | - name: Set up QEMU 34 | uses: docker/setup-qemu-action@v3 35 | 36 | - name: Set up Docker Buildx 37 | uses: docker/setup-buildx-action@v3 38 | 39 | # Login against a Docker registry except on PR 40 | # https://github.com/docker/login-action 41 | - name: Log into registry ${{ env.REGISTRY }} 42 | if: github.event_name != 'pull_request' 43 | uses: docker/login-action@v3 44 | with: 45 | registry: ${{ env.REGISTRY }} 46 | username: ${{ github.actor }} 47 | password: ${{ secrets.GITHUB_TOKEN }} 48 | 49 | # Extract metadata (tags, labels) for Docker 50 | # https://github.com/docker/metadata-action 51 | - name: Extract Docker metadata 52 | id: meta 53 | uses: docker/metadata-action@v5 54 | with: 55 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 56 | 57 | # Build and push Docker image with Buildx (don't push on PR) 58 | # https://github.com/docker/build-push-action 59 | - name: Build and push Docker image 60 | uses: docker/build-push-action@v5 61 | with: 62 | context: . 63 | platforms: linux/amd64,linux/arm64 64 | push: ${{ github.event_name != 'pull_request' }} 65 | tags: ${{ steps.meta.outputs.tags }} 66 | labels: ${{ steps.meta.outputs.labels }} 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.retry 3 | /ansible/myCA 4 | viptela_api_cookie 5 | topo.virl 6 | /ansible/licenses 7 | /ansible/files/vault-password-file 8 | /ansible/facts_cache 9 | /ansible/inventory/sdwan_inventory.yml 10 | /ansible/day_-1/group_vars/all/local.yml 11 | /ansible/day_0/group_vars/all/local.yml 12 | /ansible/day_1/group_vars/all/local.yml 13 | .virlrc 14 | /.virl 15 | set*.sh 16 | *env_*.sh 17 | logs/ 18 | bin/key.json 19 | # python 20 | __pycache__/ 21 | *.py[cod] 22 | *$py.class 23 | # venv 24 | .env 25 | .venv 26 | env/ 27 | venv/ 28 | ENV/ 29 | env.bak/ 30 | venv.bak/ 31 | # IDE project files 32 | .vscode/ 33 | *.code-workspace 34 | .idea 35 | config/config_20_9.yaml 36 | config/config_20_10.yaml 37 | docs/NOTES.md 38 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "terraform-sdwan"] 2 | path = terraform-sdwan 3 | url = https://github.com/CiscoDevNet/terraform-sdwan.git 4 | branch = cloud 5 | [submodule "sdwan-edge"] 6 | path = sdwan-edge 7 | url = https://github.com/CiscoDevNet/sdwan-edge.git 8 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:22.04 2 | 3 | # Moving on from Alpine and simplifying, see: 4 | # https://pythonspeed.com/articles/alpine-docker-python/ 5 | # https://pythonspeed.com/articles/base-image-python-docker-images/ 6 | 7 | ARG build_date=$(date -u +"%Y-%m-%dT%H:%M:%SZ") 8 | # After releasing 1.5.5, Hashicorp changed the license from MPL to BSL. 9 | # Unless OSPO/Cisco Legal confirms that we can keep using Terraform under the 10 | # new license, we shouldn't update this version. 11 | ARG terraform_version=1.5.5 12 | ARG TARGETARCH 13 | 14 | LABEL org.opencontainers.image.title="Cisco SD-WAN" \ 15 | org.opencontainers.image.description="Cisco SD-WAN DevOps" \ 16 | org.opencontainers.image.vendor="Cisco Systems" \ 17 | org.opencontainers.image.created="${build_date}" \ 18 | org.opencontainers.image.url="https://github.com/CiscoDevNet/sdwan-devops" 19 | 20 | ENV DEBIAN_FRONTEND noninteractive 21 | 22 | RUN apt update && \ 23 | apt install -y curl genisoimage git python-is-python3 python3-pip unzip && \ 24 | apt clean && \ 25 | rm -rf /var/lib/apt/lists/* 26 | 27 | RUN curl -#O https://releases.hashicorp.com/terraform/${terraform_version}/terraform_${terraform_version}_linux_${TARGETARCH}.zip 28 | RUN unzip terraform_${terraform_version}_linux_${TARGETARCH}.zip 29 | RUN mv terraform /usr/bin 30 | RUN rm terraform_${terraform_version}_linux_${TARGETARCH}.zip 31 | 32 | # This adds almost 1GB to the container size. Installing the .deb package has 33 | # similar results, and it doesn't support arm64. We really need to find a way 34 | # to avoid installing azure-cli 35 | #RUN pip install azure-cli 36 | 37 | COPY requirements.txt /tmp/requirements.txt 38 | RUN pip install -r /tmp/requirements.txt 39 | 40 | ENV ANSIBLE_HOST_KEY_CHECKING=false 41 | ENV ANSIBLE_RETRY_FILES_ENABLED=false 42 | ENV ANSIBLE_SSH_PIPELINING=true 43 | ENV ANSIBLE_LOCAL_TMP=/tmp 44 | ENV ANSIBLE_REMOTE_TMP=/tmp 45 | 46 | RUN git clone https://github.com/CiscoDevNet/sastre-ansible /tmp/sastre-ansible && \ 47 | ansible-galaxy collection build /tmp/sastre-ansible/cisco/sastre --output-path /tmp/sastre-ansible && \ 48 | ansible-galaxy collection install -f /tmp/sastre-ansible/cisco-sastre-1.0.20.tar.gz && \ 49 | rm -fr /tmp/sastre-ansible 50 | 51 | COPY sdwan_config_builder/ /tmp/sdwan_config_builder/ 52 | RUN pip install /tmp/sdwan_config_builder && \ 53 | rm -fr /tmp/sdwan_config_builder 54 | 55 | WORKDIR /ansible 56 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | CISCO SAMPLE CODE LICENSE 2 | Version 1.0 3 | Copyright (c) 2017 Cisco and/or its affiliates 4 | 5 | These terms govern this Cisco example or demo source code and its 6 | associated documentation (together, the "Sample Code"). By downloading, 7 | copying, modifying, compiling, or redistributing the Sample Code, you 8 | accept and agree to be bound by the following terms and conditions (the 9 | "License"). If you are accepting the License on behalf of an entity, you 10 | represent that you have the authority to do so (either you or the entity, 11 | "you"). Sample Code is not supported by Cisco TAC and is not tested for 12 | quality or performance. This is your only license to the Sample Code and 13 | all rights not expressly granted are reserved. 14 | 15 | 1. LICENSE GRANT: Subject to the terms and conditions of this License, 16 | Cisco hereby grants to you a perpetual, worldwide, non-exclusive, non- 17 | transferable, non-sublicensable, royalty-free license to copy and 18 | modify the Sample Code in source code form, and compile and 19 | redistribute the Sample Code in binary/object code or other executable 20 | forms, in whole or in part, solely for use with Cisco products and 21 | services. For interpreted languages like Java and Python, the 22 | executable form of the software may include source code and 23 | compilation is not required. 24 | 25 | 2. CONDITIONS: You shall not use the Sample Code independent of, or to 26 | replicate or compete with, a Cisco product or service. Cisco products 27 | and services are licensed under their own separate terms and you shall 28 | not use the Sample Code in any way that violates or is inconsistent 29 | with those terms (for more information, please visit: 30 | www.cisco.com/go/terms. 31 | 32 | 3. OWNERSHIP: Cisco retains sole and exclusive ownership of the Sample 33 | Code, including all intellectual property rights therein, except with 34 | respect to any third-party material that may be used in or by the 35 | Sample Code. Any such third-party material is licensed under its own 36 | separate terms (such as an open source license) and all use must be in 37 | full accordance with the applicable license. This License does not 38 | grant you permission to use any trade names, trademarks, service 39 | marks, or product names of Cisco. If you provide any feedback to Cisco 40 | regarding the Sample Code, you agree that Cisco, its partners, and its 41 | customers shall be free to use and incorporate such feedback into the 42 | Sample Code, and Cisco products and services, for any purpose, and 43 | without restriction, payment, or additional consideration of any kind. 44 | If you initiate or participate in any litigation against Cisco, its 45 | partners, or its customers (including cross-claims and counter-claims) 46 | alleging that the Sample Code and/or its use infringe any patent, 47 | copyright, or other intellectual property right, then all rights 48 | granted to you under this License shall terminate immediately without 49 | notice. 50 | 51 | 4. LIMITATION OF LIABILITY: CISCO SHALL HAVE NO LIABILITY IN CONNECTION 52 | WITH OR RELATING TO THIS LICENSE OR USE OF THE SAMPLE CODE, FOR 53 | DAMAGES OF ANY KIND, INCLUDING BUT NOT LIMITED TO DIRECT, INCIDENTAL, 54 | AND CONSEQUENTIAL DAMAGES, OR FOR ANY LOSS OF USE, DATA, INFORMATION, 55 | PROFITS, BUSINESS, OR GOODWILL, HOWEVER CAUSED, EVEN IF ADVISED OF THE 56 | POSSIBILITY OF SUCH DAMAGES. 57 | 58 | 5. DISCLAIMER OF WARRANTY: SAMPLE CODE IS INTENDED FOR EXAMPLE PURPOSES 59 | ONLY AND IS PROVIDED BY CISCO "AS IS" WITH ALL FAULTS AND WITHOUT 60 | WARRANTY OR SUPPORT OF ANY KIND. TO THE MAXIMUM EXTENT PERMITTED BY 61 | LAW, ALL EXPRESS AND IMPLIED CONDITIONS, REPRESENTATIONS, AND 62 | WARRANTIES INCLUDING, WITHOUT LIMITATION, ANY IMPLIED WARRANTY OR 63 | CONDITION OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON- 64 | INFRINGEMENT, SATISFACTORY QUALITY, NON-INTERFERENCE, AND ACCURACY, 65 | ARE HEREBY EXCLUDED AND EXPRESSLY DISCLAIMED BY CISCO. CISCO DOES NOT 66 | WARRANT THAT THE SAMPLE CODE IS SUITABLE FOR PRODUCTION OR COMMERCIAL 67 | USE, WILL OPERATE PROPERLY, IS ACCURATE OR COMPLETE, OR IS WITHOUT 68 | ERROR OR DEFECT. 69 | 70 | 6. GENERAL: This License shall be governed by and interpreted in 71 | accordance with the laws of the State of California, excluding its 72 | conflict of laws provisions. You agree to comply with all applicable 73 | United States export laws, rules, and regulations. If any provision of 74 | this License is judged illegal, invalid, or otherwise unenforceable, 75 | that provision shall be severed and the rest of the License shall 76 | remain in full force and effect. No failure by Cisco to enforce any of 77 | its rights related to the Sample Code or to a breach of this License 78 | in a particular situation will act as a waiver of such rights. In the 79 | event of any inconsistencies with any other terms, this License shall 80 | take precedence. 81 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # SDWAN DevOps 2 | 3 | This repo contains a set of tools to automate workflows and build CI/CD pipelines for Cisco SDWAN. 4 | 5 | > Note: The tools in this repo only work from a Unix environment with Docker (e.g. Linux, MacOS, etc.) due to issues with Ansible and file permissions mapping between Windows and the Linux container used in play.sh. WSL2 may fix this issue and we will revisit when WSL2 is released. 6 | 7 | All operations are run out of the sdwan-devops directory: `cd sdwan-devops` 8 | 9 | The folder `sdwan-edge` allows the deployment of C8000v in AWS, Azure, GCP. Openstack and VMware. 10 | 11 | The folder `sdwan-terraform` allows the deployment of SDWAN Controllers in AWS, Azure and VMware. 12 | 13 | A video demonstration of the use of this repository is available on [Vidcast](https://app.vidcast.io/share/1e934c26-ece4-4167-a986-4db17f125423). 14 | 15 | ## Clone repository 16 | 17 | Clone the sdwan-devops repo using the main branch (default: origin/main): 18 | 19 | ```shell 20 | git clone --single-branch --recursive https://github.com/ciscodevnet/sdwan-devops.git 21 | ``` 22 | 23 | Make sure you use `--recursive` to also clone folders sdwan-edge and terraform-sdwan. 24 | 25 | ## Openssl version3 26 | 27 | If you are on a Mac: we need openssl version3, while on mac this is LibreSSL. 28 | 29 | Upgrade openssl: 30 | 31 | ```shell 32 | brew install openssl@3 33 | ``` 34 | 35 | ## Software Dependencies 36 | 37 | All software dependencies have been rolled into a Docker container. Ansible playbooks are launched via the container using the play.sh shell script. 38 | 39 | All you need is a valid installation of docker on your system. 40 | 41 | > Note: The Dockerfile included in this repo is used to automatically build the sdwan-devops container image and publish it to the GitHub Container Registry. For a detailed list of the dependencies required to run the playbooks, refer to the Dockerfile. 42 | 43 | ## Running playbooks via the Docker container 44 | 45 | To run playbooks in this repo, use the play.sh shell script as shown below: 46 | 47 | - `./play.sh ` 48 | 49 | This will start the docker container published in the GitHub Container Registry, run the playbooks inside the container and remove it once finished. 50 | 51 | ## Deploying Controllers on AWS 52 | 53 | The sdwan-devops can be used to instantiates controllers on AWS. 54 | 55 | [Deploying Controllers on AWS](docs/deploying_controllers_cloud.md) 56 | 57 | - Deploy vBond, vSmart and vManage controllers in a VPC 58 | - Provides bootstrap configuration 59 | 60 | ## Deploying C8000v 61 | 62 | C8000v can be deployed in a transit VPC/VNET in AWS, Azure and GCP, and can also be deployed on VMware and Openstack. 63 | 64 | [Deploying C8000v](docs/deploying_edges_cloud.md) 65 | 66 | - Generates bootstrap configuration (cloud-init format) 67 | - Creates transit VPC if required 68 | - Deploy C8000v 69 | 70 | ## Simulation 71 | 72 | Simulation can be used for developing new deployments as well as testing changes to current deployments. Simulation capabilities are provided by CML^2 or VMware. The [Ansible CML^2 Modules](https://github.com/ciscodevnet/ansible-virl) are used to automate deployments in CML^2. The [Terraform Modules](https://github.com/CiscoDevNet/terraform-sdwan) are used to automate deployments in VMware. 73 | 74 | [Simulation](docs/simulation.md) 75 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # TODO for sdwan-devops 2 | 3 | There's a LOT to be worked on to make SD-WAN insfratructure-as-code a reality first, and as streamlined as possible eventually. Feel free to add more tasks or specific sub-tasks, and put your name to some you'd like to work on. 4 | 5 | Some of the tasks listed here relate to the `terraform-sdwan` submodule 6 | 7 | ## Tasks 8 | 9 | ### Generic 10 | 11 | - [ ] **Update README to cover changes** 12 | - [x] Switch to the public [sdwan-edge](https://github.com/CiscoDevNet/sdwan-edge/) code (imported as a Git submodule) for deploying cEdges. 13 | - [x] Fix the network interface detection issue 14 | - [ ] Fix the `deviceIP` issue affecting 20.9.1 (and DHCP deployments) in the `config-sdwan.yml` play 15 | - [ ] Discuss Ansible variable approach with some Ansible experts from the team 16 | - [ ] Take advantage of Ansible's tags feature 17 | - [x] Support for specifying password and encoding it 18 | - [ ] Static addressing has some conflicting configurations, clean that up 19 | - [x] Update Docker container (pull in Nathan's work) 20 | - [x] Split out day 1 automation (pull in Marcelo's work) 21 | - [x] Decouple serial file management from certificate authority, as it is related to edge deployment 22 | - [x] Optimize Docker image (it's very large, and every little update changes all layers!) 23 | 24 | ### Generic multi-infra support 25 | 26 | We should be able to use the same Ansible workflow to deploy both on-prem and all public clouds. 27 | 28 | - [x] Add generic support for bastion hosts / proxies so that we can deploy behind NAT (or firewalls) with static IPs 29 | - [x] Create infra specific Jinja templates as necessary, with the infra name in the file name, and defined as a variable in the inventory file 30 | - [ ] Adapt the AWS code to use the inventory approach of `control.tfvars` to define the VMs to be deployed and their `user-data` (terraform-sdwan). 31 | - [x] **Alternatively, create an AWS specific `control.tfvars`, once the infra specific Jinja task is done.** (Lori) 32 | - [x] Add support for *optional* VPC/network creation 33 | - [x] Pick up terraform outputs from VPC creation as Ansible facts 34 | - [ ] Support deploying into existing VPC 35 | - [x] Support for ACLs or adding IP ranges to the security group 36 | - [ ] On AWS, if possible, use the APIs to check if quotas are enough for VPC and elastic IP before attempting to deploy. For bonus points, request quota increase automatically 37 | - [ ] Support IPv6 Elastic IPs on AWS 38 | - [ ] Support firewall rules for edges 39 | - [ ] Support parallel deployments on GCP (like on AWS) 40 | 41 | ### Integrations with SaaS tools 42 | 43 | - [ ] [Terraform Cloud](https://app.terraform.io/) integration (for remote state management, multi-tenancy and better CI/CD integration) 44 | - [ ] [CircleCI](https://app.circleci.com/) integration (Lori) 45 | - [ ] Build the Docker container 46 | - [ ] Test the Docker container (how?) 47 | - [ ] Publish the Docker container (besaed on branch and Git tag: different flavors, dev, release) 48 | 49 | ## Questions 50 | 51 | - [ ] Where do we get licenses from? 52 | - [ ] Why are the playbooks not in a `playbooks` directory? 53 | - [ ] Why is there not a script to set all env variables? 54 | - [ ] Why are the top level steps in, for exampe, virl-hq1.md, not in a script? 55 | - [ ] Can we replace `inventory = ./inventory/hq1` with an environment variable? 56 | - [ ] Is `./ansible/inventory/hq2/terraform.py` used by anything, or is it just a leftover from the early days of the repo? 57 | - [x] What is the equivalent of TF destroy? 58 | - That would be `./play.sh /ansible/day_0/clean-vmware.yml`, which does a bit more than just TF destroy, but it fits the bill. 59 | -------------------------------------------------------------------------------- /ansible/.ansible-lint: -------------------------------------------------------------------------------- 1 | exclude_paths: 2 | - facts_cache/ 3 | - files/ 4 | - licenses/ 5 | - myCA/ 6 | 7 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | roles_path = roles/ 3 | host_key_checking = False 4 | inventory = ./inventory/sdwan_inventory.yml 5 | inventory_plugins = ./roles/ansible-virl/inventory_plugins 6 | interpreter_python = auto_silent 7 | hash_behaviour = merge 8 | library = ../python-viptela/ansible/modules/viptela 9 | module_utils = ../python-viptela/ansible/module_utils 10 | force_valid_group_names = ignore 11 | gathering = smart 12 | stdout_callback = debug 13 | fact_caching = yaml 14 | fact_caching_connection = facts_cache 15 | fact_caching_timeout = 0 16 | 17 | [inventory] 18 | # enable_plugins = host_list, script, auto, yaml 19 | enable_plugins = host_list, script, auto, yaml, virl2 20 | 21 | [persistent_connection] 22 | connect_timeout = 60 23 | command_timeout = 60 24 | 25 | [colors] 26 | debug = blue 27 | -------------------------------------------------------------------------------- /ansible/day_-1/build-ca.yml: -------------------------------------------------------------------------------- 1 | - name: Create local CA 2 | hosts: localhost 3 | connection: local 4 | tags: 5 | - control 6 | - CA 7 | any_errors_fatal: true 8 | gather_facts: no 9 | tasks: 10 | - name: Ensure directory exists for local self-signed TLS certs. 11 | file: 12 | path: "{{ sdwan_cert_dir }}" 13 | state: directory 14 | 15 | - name: Generate an OpenSSL private key. 16 | openssl_privatekey: 17 | cipher: auto 18 | passphrase: "{{ sdwan_ca_passphrase }}" 19 | path: "{{ sdwan_cert_dir }}/myCA.key" 20 | 21 | - name: Generate an OpenSSL CSR. 22 | openssl_csr: 23 | path: "{{ sdwan_cert_dir }}/myCA.csr" 24 | privatekey_path: "{{ sdwan_cert_dir }}/myCA.key" 25 | privatekey_passphrase: "{{ sdwan_ca_passphrase }}" 26 | basic_constraints_critical: true 27 | basic_constraints: 28 | - CA:TRUE 29 | common_name: CiscoCA.local 30 | organizational_unit_name: "{{ vmanage_org }}" 31 | 32 | - name: Generate a Self Signed OpenSSL certificate. 33 | openssl_certificate: 34 | path: "{{ sdwan_cert_dir }}/myCA.pem" 35 | privatekey_path: "{{ sdwan_cert_dir }}/myCA.key" 36 | privatekey_passphrase: "{{ sdwan_ca_passphrase }}" 37 | csr_path: "{{ sdwan_cert_dir }}/myCA.csr" 38 | # select_crypto_backend: pyopenssl 39 | provider: selfsigned -------------------------------------------------------------------------------- /ansible/day_-1/clean-virl.yml: -------------------------------------------------------------------------------- 1 | - hosts: sdwan 2 | connection: local 3 | gather_facts: no 4 | tags: 5 | - always 6 | tasks: 7 | - name: Delete Certificates 8 | file: 9 | path: "{{ item }}" 10 | state: absent 11 | loop: 12 | - "{{ sdwan_cert_dir }}/{{ inventory_hostname }}.csr" 13 | - "{{ sdwan_cert_dir }}/{{ inventory_hostname }}.crt" 14 | 15 | - hosts: sdwan 16 | connection: local 17 | gather_facts: no 18 | tags: 19 | - vmanage 20 | vars: 21 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 22 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 23 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 24 | tasks: 25 | - block: 26 | - name: Remove device from vManage 27 | vmanage_device: 28 | host: "{{ vmanage_ip }}" 29 | user: "{{ vmanage_user }}" 30 | password: "{{ vmanage_pass }}" 31 | uuid: "{{ viptela.uuid | default(omit) }}" 32 | name: "{{ inventory_hostname }}" 33 | personality: "{{ viptela.personality | default(omit) }}" 34 | state: absent 35 | delegate_to: localhost 36 | register: result 37 | ignore_errors: yes 38 | when: vmanage_ip is defined and vmanage_ip 39 | 40 | when: inventory_hostname in groups.sdwan 41 | 42 | 43 | - hosts: virl_hosts 44 | connection: local 45 | gather_facts: no 46 | tags: 47 | - always 48 | tasks: 49 | - name: Stop the node 50 | virl_node: 51 | host: "{{ virl_host }}" 52 | user: "{{ virl_username }}" 53 | password: "{{ virl_password }}" 54 | name: "{{ inventory_hostname }}" 55 | lab: "{{ virl_lab }}" 56 | state: stopped 57 | tags: 58 | - stop 59 | - wipe 60 | 61 | - name: Wipe the node 62 | virl_node: 63 | host: "{{ virl_host }}" 64 | user: "{{ virl_username }}" 65 | password: "{{ virl_password }}" 66 | name: "{{ inventory_hostname }}" 67 | lab: "{{ virl_lab }}" 68 | state: wiped 69 | tags: 70 | - wipe 71 | 72 | - hosts: localhost 73 | connection: local 74 | gather_facts: no 75 | tags: 76 | - delete 77 | tasks: 78 | - name: Delete the lab 79 | virl_lab: 80 | host: "{{ virl_host }}" 81 | user: "{{ virl_username }}" 82 | password: "{{ virl_password }}" 83 | lab: "{{ virl_lab }}" 84 | state: absent 85 | -------------------------------------------------------------------------------- /ansible/day_-1/group_vars/all/placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/sdwan-devops/f1893cd70902de91a4ca6337f9b004caa71749ae/ansible/day_-1/group_vars/all/placeholder -------------------------------------------------------------------------------- /ansible/day_0/build-control-plane.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: deploy-control-plane.yml 2 | 3 | - import_playbook: config-control-plane.yml 4 | 5 | - name: Print IP summary 6 | hosts: localhost 7 | connection: local 8 | gather_facts: no 9 | tasks: 10 | - debug: 11 | msg: "vManage IP: {{ sdwan_vmanage }}\nvBond IP: {{ sdwan_vbond }}" 12 | -------------------------------------------------------------------------------- /ansible/day_0/check-reqs.yml: -------------------------------------------------------------------------------- 1 | - name: Check playbook requirements 2 | hosts: localhost 3 | connection: network_cli 4 | tags: 5 | - network 6 | - CA 7 | any_errors_fatal: true 8 | gather_facts: no 9 | tasks: 10 | - name: Check for the license file 11 | stat: 12 | path: "{{ sdwan_serial_file }}" 13 | register: stat_result 14 | delegate_to: localhost 15 | run_once: yes 16 | 17 | - assert: 18 | that: 19 | - stat_result.stat.exists 20 | - vmanage_org != "" 21 | msg: "'vmanage_org' must be defined and {{ sdwan_serial_file }} must exist. Verify the requirements in README are met." 22 | delegate_to: localhost 23 | run_once: yes -------------------------------------------------------------------------------- /ansible/day_0/check-vmanage.yml: -------------------------------------------------------------------------------- 1 | - name: Wait for vmanage mgmt interface to be available (this could take a few minutes) 2 | hosts: localhost 3 | connection: local 4 | tags: 5 | - check_control 6 | - check_all 7 | - control 8 | - CA 9 | any_errors_fatal: true 10 | gather_facts: no 11 | vars: 12 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 13 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 14 | environment: "{{ proxy_env }}" 15 | tasks: 16 | - name: Wait until the mgmt interface comes up 17 | virl_lab_facts: 18 | host: "{{ virl_host }}" 19 | user: "{{ virl_username }}" 20 | password: "{{ virl_password }}" 21 | lab: "{{ virl_lab }}" 22 | register: result 23 | until: result.virl_facts.nodes[vmanage_host][vmanage_mgmt_interface] | default(False) 24 | when: vmanage_mgmt_interface == 'ansible_host' 25 | retries: 12 26 | delay: 10 27 | 28 | - name: Refresh Inventory 29 | meta: refresh_inventory 30 | 31 | - set_fact: 32 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 33 | 34 | - debug: 35 | var: vmanage_ip 36 | 37 | - name: Verify that vManage is fully operational 38 | hosts: localhost 39 | connection: local 40 | tags: 41 | - check_control 42 | - check_all 43 | - control 44 | - CA 45 | vars: 46 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 47 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 48 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 49 | any_errors_fatal: true 50 | gather_facts: no 51 | environment: "{{ proxy_env }}" 52 | tasks: 53 | - name: Waiting for vManage API to start 54 | uri: 55 | url: "https://{{ vmanage_ip }}/dataservice/system/device/controllers" 56 | method: POST 57 | body: 58 | j_username: "{{ vmanage_user }}" 59 | j_password: "{{ vmanage_pass }}" 60 | body_format: form-urlencoded 61 | return_content: yes 62 | validate_certs: no 63 | no_log: true 64 | register: uri_results 65 | delegate_to: localhost 66 | failed_when: false 67 | until: uri_results.msg.find("OK") != -1 68 | retries: 75 69 | delay: 15 70 | -------------------------------------------------------------------------------- /ansible/day_0/clean-certs.yml: -------------------------------------------------------------------------------- 1 | - hosts: sdwan_control 2 | connection: local 3 | gather_facts: no 4 | tags: 5 | - certs 6 | tasks: 7 | 8 | - name: Delete Certificates 9 | file: 10 | path: "{{ item }}" 11 | state: absent 12 | loop: 13 | - "{{ sdwan_cert_dir }}/{{ inventory_hostname }}.csr" 14 | - "{{ sdwan_cert_dir }}/{{ inventory_hostname }}.crt" -------------------------------------------------------------------------------- /ansible/day_0/clean-control-plane.yml: -------------------------------------------------------------------------------- 1 | # TODO There's a lot of repeating code here, needs generalization/refactor 2 | 3 | - import_playbook: clean-certs.yml 4 | tags: [control] 5 | 6 | - name: Delete Control Plane VMs 7 | hosts: localhost 8 | connection: local 9 | tags: 10 | - terraform 11 | - control 12 | any_errors_fatal: true 13 | gather_facts: no 14 | tasks: 15 | - name: Check for control.tfvars file 16 | stat: 17 | path: "{{ terraform_project_path[sdwan_control_infra]['control'] }}/{{ sdwan_control_infra }}_control.tfvars" 18 | register: control_stat_result 19 | 20 | - name: Terraform Destroy Control Plane VMs 21 | community.general.terraform: 22 | project_path: "{{ terraform_project_path[sdwan_control_infra]['control'] }}" 23 | state: absent 24 | workspace: "{{ sdwan_control_infra }}_control" 25 | variables_file: "{{ sdwan_control_infra }}_control.tfvars" 26 | when: control_stat_result.stat.exists == true 27 | 28 | - name: Delete Control Plane Network 29 | hosts: localhost 30 | connection: local 31 | tags: 32 | - terraform 33 | - network 34 | any_errors_fatal: true 35 | gather_facts: no 36 | tasks: 37 | - name: Check for network.tfvars file 38 | stat: 39 | path: "{{ terraform_project_path[sdwan_control_infra]['network'] }}/{{ sdwan_control_infra }}_network.tfvars" 40 | register: network_stat_result 41 | 42 | - name: Terraform Destroy Control Plane Network 43 | community.general.terraform: 44 | project_path: "{{ terraform_project_path[sdwan_control_infra]['network'] }}" 45 | state: absent 46 | workspace: "{{ sdwan_control_infra }}_network" 47 | variables_file: "{{ sdwan_control_infra }}_network.tfvars" 48 | when: network_stat_result.stat.exists == true 49 | 50 | - name: Remove terraform state files 51 | hosts: localhost 52 | connection: local 53 | tags: 54 | - delete 55 | - never 56 | gather_facts: no 57 | tasks: 58 | - name: Delete control state 59 | file: 60 | state: absent 61 | path: "{{ terraform_project_path[sdwan_control_infra]['control'] }}/terraform.tfstate.d/{{ sdwan_control_infra }}_control/terraform.tfstate" 62 | 63 | - name: Delete control network state 64 | file: 65 | state: absent 66 | path: "{{ terraform_project_path[sdwan_control_infra]['network'] }}/terraform.tfstate.d/{{ sdwan_control_infra }}_network/terraform.tfstate" 67 | -------------------------------------------------------------------------------- /ansible/day_0/clean-edges.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: remove-device.yml 2 | vars: 3 | passed: sdwan_edge 4 | tags: [edges] 5 | 6 | - name: Delete Edge VMs 7 | hosts: sdwan_edge 8 | connection: local 9 | tags: 10 | - terraform 11 | - edges 12 | any_errors_fatal: true 13 | gather_facts: no 14 | tasks: 15 | - name: Check for edge .tfvars file 16 | stat: 17 | path: "{{ terraform_project_path[infra]['edges'] }}/{{ infra }}_edges_{{ inventory_hostname }}.tfvars" 18 | register: edges_stat_result 19 | 20 | - name: Terraform Destroy Edge VMs 21 | community.general.terraform: 22 | project_path: "{{ terraform_project_path[infra]['edges'] }}" 23 | state: absent 24 | workspace: "{{ infra }}_edges_{{ inventory_hostname }}" 25 | variables_file: "{{ infra }}_edges_{{ inventory_hostname }}.tfvars" 26 | when: edges_stat_result.stat.exists == true 27 | 28 | - name: Delete Edge Networks 29 | hosts: sdwan_edge 30 | connection: local 31 | tags: 32 | - terraform 33 | - edge_network 34 | any_errors_fatal: true 35 | gather_facts: no 36 | tasks: 37 | - name: Check for edge network .tfvars file 38 | stat: 39 | path: "{{ terraform_project_path[infra]['edge_network'] }}/{{ infra }}_edge_network_{{ inventory_hostname }}.tfvars" 40 | register: edge_networks_stat_result 41 | 42 | - name: Terraform Destroy Edge Networks 43 | community.general.terraform: 44 | project_path: "{{ terraform_project_path[infra]['edge_network'] }}" 45 | state: absent 46 | workspace: "{{ infra }}_edge_network_{{ inventory_hostname }}" 47 | variables_file: "{{ infra }}_edge_network_{{ inventory_hostname }}.tfvars" 48 | when: edge_networks_stat_result.stat.exists == true 49 | 50 | - name: Remove terraform state files 51 | hosts: sdwan_edge 52 | connection: local 53 | tags: 54 | - delete 55 | - never 56 | gather_facts: no 57 | tasks: 58 | - name: Delete edge state 59 | file: 60 | state: absent 61 | path: "{{ terraform_project_path[infra]['edges'] }}/terraform.tfstate.d/{{ infra }}_edges_{{ inventory_hostname }}/terraform.tfstate" 62 | 63 | - name: Delete edge network state 64 | file: 65 | state: absent 66 | path: "{{ terraform_project_path[infra]['edge_network'] }}/terraform.tfstate.d/{{ infra }}_edge_network_{{ inventory_hostname }}/terraform.tfstate" 67 | -------------------------------------------------------------------------------- /ansible/day_0/config-certificates.yml: -------------------------------------------------------------------------------- 1 | - name: Configure certificates 2 | hosts: localhost 3 | vars: 4 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 5 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 6 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 7 | vbond_controller: "{{ groups.vbond_hosts[0] }}" 8 | tags: 9 | - control 10 | - vmanage 11 | any_errors_fatal: true 12 | gather_facts: no 13 | environment: "{{ proxy_env }}" 14 | tasks: 15 | - name: Add the Enterprise CA 16 | cisco.sastre.settings_enterprise_ca: 17 | address: "{{ vmanage_ip }}" 18 | user: "{{ vmanage_user }}" 19 | password: "{{ vmanage_pass }}" 20 | root_cert: "{{ lookup('file', '{{ sdwan_ca_cert }}') }}" 21 | register: result 22 | retries: 10 23 | delay: 10 24 | until: result is not failed 25 | 26 | - name: Generate CSRs 27 | vmanage_device_certificate: 28 | host: "{{ vmanage_ip }}" 29 | user: "{{ vmanage_user }}" 30 | password: "{{ vmanage_pass }}" 31 | name: "{{ item }}" 32 | transport_ip: "{{ hostvars[item].vpn0_ip | ansible.utils.ipaddr('address')}}" 33 | state: csr 34 | loop: "{{ groups.sdwan_control }}" 35 | register: control_devices 36 | retries: 10 37 | delay: 10 38 | until: control_devices is not failed 39 | 40 | - name: Write out CSR 41 | copy: 42 | content: "{{ item.deviceCSR }}" 43 | dest: "{{ sdwan_cert_dir }}/{{ item.item }}.csr" 44 | loop: "{{ control_devices.results }}" 45 | delegate_to: localhost 46 | 47 | - name: Sign Controller Cert 48 | openssl_certificate: 49 | csr_path: "{{ sdwan_cert_dir }}/{{ item }}.csr" 50 | path: "{{ sdwan_cert_dir }}/{{ item }}.crt" 51 | provider: ownca 52 | ownca_path: "{{ sdwan_cert_dir }}/myCA.pem" 53 | ownca_privatekey_path: "{{ sdwan_cert_dir }}/myCA.key" 54 | ownca_privatekey_passphrase: "{{ sdwan_ca_passphrase }}" 55 | loop: "{{ groups.sdwan_control }}" 56 | delegate_to: localhost 57 | 58 | - name: Add Certificate to Control Hosts 59 | vmanage_device_certificate: 60 | host: "{{ vmanage_ip }}" 61 | user: "{{ vmanage_user }}" 62 | password: "{{ vmanage_pass }}" 63 | name: "{{ item }}" 64 | transport_ip: "{{ hostvars[item].vpn0_ip | ansible.utils.ipaddr('address')}}" 65 | cert: "{{lookup('file', '{{ sdwan_cert_dir }}/{{ item }}.crt')}}" 66 | loop: "{{ groups.sdwan_control }}" 67 | register: result 68 | retries: 10 69 | delay: 10 70 | until: result is not failed 71 | 72 | - name: Push Certs to Controllers 73 | vmanage_device_certificate: 74 | host: "{{ vmanage_ip }}" 75 | user: "{{ vmanage_user }}" 76 | password: "{{ vmanage_pass }}" 77 | state: push 78 | register: result 79 | retries: 10 80 | delay: 10 81 | until: result is not failed 82 | 83 | - name: Install Serial File 84 | vmanage_fileupload: 85 | host: "{{ vmanage_ip }}" 86 | user: "{{ vmanage_user }}" 87 | password: "{{ vmanage_pass }}" 88 | file: "{{ sdwan_serial_file }}" 89 | delegate_to: localhost 90 | register: result 91 | retries: 10 92 | delay: 10 93 | until: result is not failed 94 | 95 | - debug: 96 | msg: "vManage IP: {{ vmanage_ip }}" 97 | 98 | - debug: 99 | msg: "vManage external IP: {{ sdwan_vmanage }}" 100 | when: sdwan_vmanage is defined 101 | -------------------------------------------------------------------------------- /ansible/day_0/config-control-plane.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: check-reqs.yml 2 | 3 | - import_playbook: check-vmanage.yml 4 | 5 | - import_playbook: config-vmanage.yml 6 | 7 | - import_playbook: config-certificates.yml 8 | -------------------------------------------------------------------------------- /ansible/day_0/config-vmanage.yml: -------------------------------------------------------------------------------- 1 | - name: Configure vmanage 2 | hosts: localhost 3 | vars: 4 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 5 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 6 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 7 | vbond_controller: "{{ groups.vbond_hosts[0] }}" 8 | tags: 9 | - control 10 | - vmanage 11 | any_errors_fatal: true 12 | gather_facts: no 13 | environment: "{{ proxy_env }}" 14 | tasks: 15 | - name: vManage Settings 16 | vmanage_settings: 17 | host: "{{ vmanage_ip }}" 18 | user: "{{ vmanage_user }}" 19 | password: "{{ vmanage_pass }}" 20 | organization: "{{ vmanage_org }}" 21 | vbond: "{{ sdwan_vbond }}" 22 | register: result 23 | retries: 30 24 | delay: 10 25 | until: result is not failed 26 | 27 | - name: Add Control Hosts 28 | vmanage_device: 29 | host: "{{ vmanage_ip }}" 30 | user: "{{ vmanage_user }}" 31 | password: "{{ vmanage_pass }}" 32 | device_username: "{{ hostvars[item].ansible_user }}" 33 | device_password: "{{ hostvars[item].ansible_password }}" 34 | name: "{{ item }}" 35 | personality: "{{ hostvars[item].sdwan_personality }}" 36 | transport_ip: "{{ hostvars[item].vpn0_ip | ansible.utils.ipaddr('address')}}" 37 | loop: "{{ groups.vbond_hosts + groups.vsmart_hosts }}" 38 | register: result 39 | retries: 10 40 | delay: 10 41 | until: result is not failed 42 | 43 | - debug: 44 | msg: "vManage IP: {{ vmanage_ip }}" 45 | 46 | - debug: 47 | msg: "vManage external IP: {{ sdwan_vmanage }}" 48 | when: sdwan_vmanage is defined 49 | -------------------------------------------------------------------------------- /ansible/day_0/deploy-control-plane.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: terraform-apply.yml 2 | vars: 3 | infra: "{{ sdwan_control_infra }}" 4 | sdwan_component: network 5 | project_path: "{{ terraform_project_path[infra][sdwan_component] }}" 6 | when: sdwan_control_infra == "aws" 7 | 8 | - name: Generate day0 configs for control hosts 9 | hosts: sdwan_control 10 | connection: local 11 | gather_facts: no 12 | tasks: 13 | - name: Generating day0 config for virl_config_template 14 | set_fact: 15 | day0_config: "{{ lookup('template', virl_config_template) }}" 16 | when: virl_config_template is defined 17 | 18 | - import_playbook: terraform-apply.yml 19 | vars: 20 | infra: "{{ sdwan_control_infra }}" 21 | sdwan_component: control 22 | project_path: "{{ terraform_project_path[infra][sdwan_component] }}" 23 | -------------------------------------------------------------------------------- /ansible/day_0/get-bootstrap.yml: -------------------------------------------------------------------------------- 1 | - name: Get bootstrap 2 | hosts: "{{ passed }}" 3 | gather_facts: no 4 | tags: 5 | - bootstrap 6 | - edges 7 | connection: local 8 | vars: 9 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 10 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 11 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 12 | sdwan_vbond: "{{ hostvars['localhost']['sdwan_vbond'] }}" 13 | # TODO make this conditional 14 | ntp_server: "{{ ntp_servers[hostvars[inventory_hostname]['infra']] }}" 15 | ansible_network_os: ios 16 | generated_cloud_init: "{{ terraform_project_path[infra]['edges'] }}/cloud-init/ciscosdwan_cloud_init.{{ inventory_hostname }}.cfg" 17 | ca_cert: "{{ lookup('file', sdwan_ca_cert) | indent(3, True) | replace('\n', '\\n') }}" 18 | environment: "{{ hostvars['localhost']['proxy_env'] }}" 19 | serial: 1 20 | # This is a workaround for a bug in vmanage. When multiple requests 21 | # for boostrap are made to vmanage at the same time, it gets corrupted 22 | tasks: 23 | - name: Get day0 config 24 | cisco.sastre.device_bootstrap: 25 | address: "{{ vmanage_ip }}" 26 | user: "{{ vmanage_user }}" 27 | password: "{{ vmanage_pass }}" 28 | uuid: "{{ sdwan_uuid }}" 29 | include_default_root_certs: False 30 | register: result 31 | retries: 10 32 | delay: 10 33 | until: result is not failed 34 | delegate_to: localhost 35 | 36 | # replace filter is a hack to workaround cloud-init defect for enterprise ca in viptela 20.5 and up 37 | - name: Set variables 38 | set_fact: 39 | bootstrap: "{{ result.bootstrap_config | replace('- path: /etc/viptela/rootcert_reinstall', '- path: /usr/share/viptela/symantec-root-ca.crt')}}" 40 | cacheable: yes 41 | when: result.bootstrap_config is defined 42 | 43 | # VMware can't have hostname in cloud-init file name 44 | - name: Set correct file name for VMware cloud-init 45 | set_fact: 46 | generated_cloud_init: "{{ terraform_project_path[infra]['edges'] }}/cloud-init/ciscosdwan_cloud_init.cfg" 47 | when: infra == "vmware" 48 | 49 | - name: Save cloud-init to terraform directory 50 | template: 51 | src: templates/sdwan/cedge_user-data.j2 52 | dest: "{{ generated_cloud_init }}" 53 | when: '"multipart/mixed" not in bootstrap' 54 | 55 | - name: Save cloud-init to terraform directory (template is attached in vmanage) 56 | template: 57 | src: templates/sdwan/cedge_user-data_attached.j2 58 | dest: "{{ generated_cloud_init }}" 59 | when: '"multipart/mixed" in bootstrap' 60 | -------------------------------------------------------------------------------- /ansible/day_0/group_vars/all/placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/sdwan-devops/f1893cd70902de91a4ca6337f9b004caa71749ae/ansible/day_0/group_vars/all/placeholder -------------------------------------------------------------------------------- /ansible/day_0/onboard-edges.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: get-bootstrap.yml 2 | vars: 3 | passed: sdwan_edge 4 | 5 | - import_playbook: terraform-apply-edges.yml 6 | vars: 7 | sdwan_component: edge_network 8 | 9 | - import_playbook: terraform-apply-edges.yml 10 | vars: 11 | sdwan_component: edges 12 | -------------------------------------------------------------------------------- /ansible/day_0/remove-device.yml: -------------------------------------------------------------------------------- 1 | - name: Remove device from vManage 2 | hosts: "{{ passed }}" 3 | vars: 4 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 5 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 6 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 7 | connection: local 8 | gather_facts: no 9 | environment: "{{ hostvars['localhost']['proxy_env'] }}" 10 | tasks: 11 | - name: Remove device from vManage 12 | vmanage_device: 13 | host: "{{ vmanage_ip }}" 14 | user: "{{ vmanage_user }}" 15 | password: "{{ vmanage_pass }}" 16 | uuid: "{{ viptela.uuid | default(omit) }}" 17 | name: "{{ inventory_hostname }}" 18 | personality: "{{ viptela.personality | default(omit) }}" 19 | state: absent 20 | delegate_to: localhost 21 | register: result 22 | ignore_errors: yes 23 | when: vmanage_ip is defined and vmanage_ip 24 | -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli.j2: -------------------------------------------------------------------------------- 1 | {# -------- #} 2 | {# System #} 3 | {# -------- #} 4 | {{ lookup('template', 'asa/cli/system.j2') }} 5 | ! 6 | username {{ ansible_user }} password {{ ansible_password }} 7 | aaa authorization exec LOCAL auto-enable 8 | ! 9 | {# ---------- #} 10 | {# Interfaces #} 11 | {# ---------- #} 12 | {{ lookup('template', 'asa/cli/interfaces.j2') }} 13 | ! 14 | {# ------------- #} 15 | {# Static Routes #} 16 | {# ------------- #} 17 | {{ lookup('template', 'asa/cli/static-routes.j2') }} 18 | ! 19 | {# ---- #} 20 | {# OSPF #} 21 | {# ---- #} 22 | {{ lookup('template', 'asa/cli/ospf.j2') }} 23 | ! 24 | {# --- #} 25 | {# NAT #} 26 | {# --- #} 27 | {{ lookup('template', 'asa/cli/nat.j2') }} 28 | ! 29 | {# ------------- #} 30 | {# Prefix_lists #} 31 | {# ------------- #} 32 | {{ lookup('template', 'asa/cli/prefix-lists.j2') }} 33 | ! 34 | {# ---- #} 35 | {# BGP #} 36 | {# ---- #} 37 | {{ lookup('template', 'asa/cli/bgp.j2') }} 38 | ! 39 | {# -------- #} 40 | {# Failover #} 41 | {# -------- #} 42 | {#{{ lookup('template', 'asa/cli/failover.j2') }}#} 43 | {#!#} 44 | same-security-traffic permit inter-interface 45 | ! 46 | http server enable 47 | http 0.0.0.0 0.0.0.0 management 48 | http 0.0.0.0 0.0.0.0 outside 49 | aaa authentication http console LOCAL 50 | ! 51 | ssh version 2 52 | crypto key generate rsa modulus 2048 53 | ssh key-exchange group dh-group14-sha1 54 | ssh 0.0.0.0 0.0.0.0 management 55 | ssh 0.0.0.0 0.0.0.0 outside 56 | aaa authentication ssh console LOCAL 57 | ! 58 | {# --- #} 59 | {# DNS #} 60 | {# --- #} 61 | {{ lookup('template', 'asa/cli/dns.j2') }} 62 | ! 63 | {# --- #} 64 | {# NTP #} 65 | {# --- #} 66 | {{ lookup('template', 'asa/cli/ntp.j2') }} 67 | ! 68 | {# --------------- #} 69 | {# Smart Licensing #} 70 | {# --------------- #} 71 | {{ lookup('template', 'asa/cli/license.j2') }} 72 | 73 | -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/bgp.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {# --- #} 3 | {# BGP #} 4 | {# --- #} 5 | {% if router is defined %} 6 | {% if router.bgp is defined %} 7 | router bgp {{ router.bgp.id }} 8 | {% if router.bgp.address_family is defined %} 9 | {% for address_family, address_family_config in router.bgp.address_family.items() %} 10 | address-family {{ address_family }} unicast 11 | {# Networks #} 12 | {% for network in address_family_config.networks|default([]) %} 13 | network {{ network.network|ipaddr('network') }} mask {{ network.network|ipaddr('netmask') }} 14 | {% endfor %} 15 | {% for aggregate in address_family_config.aggregate_address|default([]) %} 16 | {% set options = '' %} 17 | {% if aggregate.summary_only is sameas true %} 18 | {% set options = options + ' summary-only' %} 19 | {% endif %} 20 | aggregate-address {{ aggregate.network|ipaddr('network') }} {{ aggregate.network|ipaddr('netmask') }} {{ options | trim }} 21 | {% endfor %} 22 | {# Neighbors #} 23 | {% for neighbor in address_family_config.neighbors|default([]) %} 24 | neighbor {{ neighbor.id }} remote-as {{ neighbor.remote_as }} 25 | {% if neighbor.activate is sameas true %} 26 | neighbor {{ neighbor.id }} activate 27 | {% endif %} 28 | {% if neighbor.next_hop_self is sameas true %} 29 | neighbor {{ neighbor.id }} next-hop-self 30 | {% endif %} 31 | {% if neighbor.prefix_lists is defined %} 32 | {% for direction, prefix_list in neighbor.prefix_lists.items() %} 33 | neighbor {{ neighbor.id }} prefix-list {{ prefix_list }} {{ direction }} 34 | {% endfor %} 35 | {% endif %} 36 | {% endfor %} 37 | {# Redistribute #} 38 | {% if address_family_config.redistribute is defined %} 39 | {% if address_family_config.redistribute.connected is defined %} 40 | {% set options = '' %} 41 | {% if address_family_config.redistribute.connected.route_map is defined %} 42 | {% set options = options + ' route_map ' + address_family_config.redistribute.connected.route_map %} 43 | {% endif %} 44 | {% if address_family_config.redistribute.connected.metric is defined %} 45 | {% set options = options + ' metric ' + address_family_config.redistribute.connected.metric %} 46 | {% endif %} 47 | redistribute connected {{ options | trim }} 48 | {% endif %} 49 | {% endif %} 50 | {# Options #} 51 | {% if address_family_config.auto_summary is defined %} 52 | {{ 'no ' if address_family_config.auto_summary is sameas false else '' }}auto-summary 53 | {% endif %} 54 | {% if address_family_config.synchronization is defined %} 55 | {{ 'no ' if address_family_config.synchronization is sameas false else '' }}synchronization 56 | {% endif %} 57 | exit-address-family 58 | {% endfor %} 59 | {% endif %} 60 | {% if router.bgp.router_id %} 61 | bgp router-id {{ router.bgp.router_id }} 62 | {% endif %} 63 | {% endif %} 64 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/dns.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% if dns_servers is defined %} 3 | {% if dns_interface is defined %} 4 | dns domain-lookup {{ dns_interface }} 5 | {% endif %} 6 | dns server-group DefaultDNS 7 | {% for server in dns_servers|default([]) %} 8 | name-server {{ server }} 9 | {% endfor %} 10 | {% if domain_name is defined %} 11 | domain-name {{ domain_name }} 12 | {% endif %} 13 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/failover.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% if failover is defined %} 3 | failover 4 | failover lan unit {{ failover.role }} 5 | failover lan interface asa_fail {{ failover.interface }} 6 | failover interface ip asa_fail {{ failover.ip | ipaddr('address') }} {{ failover.ip | ipaddr('netmask') }} standby {{ failover.peer_ip | ipaddr('address') }} 7 | {% for interface in failover.ignore_interface|default([]) %} 8 | no monitor-interface {{ interface }} 9 | {% endfor %} 10 | {% endif %}{# failover is defined #} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/interfaces.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% for key, value in interfaces.items() %} 3 | interface {{ key }} 4 | {% if value.vlan is defined %} 5 | vlan {{ value.vlan }} 6 | {% endif %}{# value.vlan is defined #} 7 | {% if value.nameif is defined %} 8 | nameif {{ value.nameif }} 9 | {% endif %}{# value.vlan is defined #} 10 | {% if (value.management_only is defined) and (value.management_only is sameas true) %} 11 | management-only 12 | {% endif %}{# management-only is defined #} 13 | {% if value.security_level is defined %} 14 | security-level {{ value.security_level }} 15 | {% endif %}{# value.security_level is defined #} 16 | {% if value.description is defined %} 17 | description {{ interface_item.description }} 18 | {% endif %} 19 | {% if value.ip is defined %} 20 | {% if value.ip.primary is defined %} 21 | {% if value.ip.primary == 'dhcp' %} 22 | ip address dhcp setroute 23 | {% elif value.ip.standby is defined %} 24 | ip address {{ value.ip.primary|ipaddr('address') }} {{ value.ip.primary|ipaddr('netmask') }} standby {{ value.ip.standby|ipaddr('address') }} 25 | {% else %} 26 | ip address {{ value.ip.primary|ipaddr('address') }} {{ value.ip.primary|ipaddr('netmask') }} 27 | {% endif %}{# value.ip.primary == 'dhcp' #} 28 | {% endif %}{# if value.ip.primary is defined #} 29 | {% endif %}{# if value.ip is defined #} 30 | {% if value.enabled is sameas true %} 31 | no shut 32 | {% if value.vlan is defined %} 33 | interface {{ key | regex_replace('^([a-zA-Z0-9\/]+)\.[^a-zA-Z]+', '\\1') }} 34 | no shut 35 | {% endif %}{# value.vlan is defined #} 36 | {% else %}{# disabled #} 37 | shut 38 | {% endif %}{# enabled #} 39 | {# ------------- #} 40 | {# Default Route #} 41 | {# ------------- #} 42 | {% if value.ip is defined %} 43 | {% if value.ip.gateway is defined %} 44 | route {{ value.nameif }} 0.0.0.0 0.0.0.0 {{ value.ip.gateway }} 45 | {% endif %}{# value.ip.primary.gateway is defined #} 46 | {% endif %} 47 | {% endfor %}{# interfaces #} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/license.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% if licensing is defined %} 3 | license smart 4 | {% if licensing.feature is defined %} 5 | feature tier {{ licensing.feature }} 6 | {% endif %} 7 | {% if licensing.throughput is defined %} 8 | throughput level {{ licensing.throughput }} 9 | {% endif %} 10 | ! 11 | {% if licensing.token is defined %} 12 | license smart register idtoken {{ licensing.token }} 13 | {% endif %} 14 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/nat.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% if nat is defined %} 3 | {% for nat_entry in nat %} 4 | {% set options = '' %} 5 | {% if nat_entry.source_type is defined %} 6 | {% set options = options + ' source ' + nat_entry.source_type %} 7 | {% endif %} 8 | nat ({{ nat_entry.inside_interface }},{{ nat_entry.outside_interface }}) source {{ nat_entry.source_type }} {{ nat_entry.source_list }} interface 9 | {% endfor %} 10 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/ntp.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% for server in ntp_servers|default([]) %} 3 | ntp server {{ server }} 4 | {% endfor %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/ospf.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% if router is defined %} 3 | {% if router.ospf is defined %} 4 | router ospf {{ router.ospf.id }} 5 | {% if router.ospf.router_id is defined %} 6 | router-id {{ router.ospf.router_id }} 7 | {% endif %}{# router.ospf.router_id is defined #} 8 | {% for network in router.ospf.networks|default([]) %} 9 | network {{ network.network|ipaddr('network') }} {{ network.network|ipaddr('netmask') }} area {{ network.area }} 10 | {% endfor %} 11 | {% for area in router.ospf.areas|default([]) %} 12 | area {{ area.number }} {{ area.options|default([])|join(' ') }} 13 | {% endfor %} 14 | {% endif %}{# router.ospf is defined #} 15 | {% endif %}{# router is defined #} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/prefix-lists.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% if prefix_lists is defined %} 3 | {% for list_name, list_data in prefix_lists.items() %} 4 | {% for list_entry in list_data %} 5 | prefix-list {{ list_name }} seq {{ list_entry.seq | default(loop.index * 10) }} {{ list_entry.action | default('permit') }} {{ list_entry.network }} 6 | {% endfor %} 7 | {% endfor %} 8 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/static-routes.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {% if static_routes is defined %} 3 | {% for vrf, value in static_routes.items() %} 4 | {% for route in value|default([]) %} 5 | {% if vrf == "Null0" %} 6 | route {{ vrf }} {{ route.network|ipaddr('network') }} {{ route.network|ipaddr('netmask') }} 7 | {% else %} 8 | {% for dest in route.fwd_list|default([]) %} 9 | route {{ vrf }} {{ route.network|ipaddr('network') }} {{ route.network|ipaddr('netmask') }} {{ dest.fwd }} {{ dest.metric|default('') }} 10 | {% endfor %} 11 | {% endif %} 12 | {% endfor %} 13 | {% endfor %} 14 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/cli/system.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | hostname {{ inventory_hostname.split('.')[0] }} 3 | {% if domain_name is defined %} 4 | domain-name {{ domain_name }} 5 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/asa/virl.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {# -------- #} 3 | {# System #} 4 | {# -------- #} 5 | {{ lookup('template', 'asa/cli/system.j2') }} 6 | ! 7 | username {{ ansible_user }} password {{ ansible_password }} 8 | aaa authorization exec LOCAL auto-enable 9 | ! 10 | interface Management0/0 11 | shut 12 | interface Management0/0 13 | management-only 14 | nameif management 15 | ip address dhcp setroute 16 | no shut 17 | ! 18 | {# ---------- #} 19 | {# Interfaces #} 20 | {# ---------- #} 21 | {{ lookup('template', 'asa/cli/interfaces.j2') }} 22 | ! 23 | {# ------------- #} 24 | {# Static Routes #} 25 | {# ------------- #} 26 | {{ lookup('template', 'asa/cli/static-routes.j2') }} 27 | ! 28 | {# ------------- #} 29 | {# Prefix_lists #} 30 | {# ------------- #} 31 | {{ lookup('template', 'asa/cli/prefix-lists.j2') }} 32 | ! 33 | {# ---- #} 34 | {# BGP #} 35 | {# ---- #} 36 | {{ lookup('template', 'asa/cli/bgp.j2') }} 37 | ! 38 | {# ---- #} 39 | {# OSPF #} 40 | {# ---- #} 41 | {{ lookup('template', 'asa/cli/ospf.j2') }} 42 | ! 43 | {# --- #} 44 | {# NAT #} 45 | {# --- #} 46 | {{ lookup('template', 'asa/cli/nat.j2') }} 47 | ! 48 | 49 | {# -------- #} 50 | {# Failover #} 51 | {# -------- #} 52 | {#{{ lookup('template', 'asa/cli/failover.j2') }}#} 53 | {#!#} 54 | ! 55 | {# --- #} 56 | {# DNS #} 57 | {# --- #} 58 | {{ lookup('template', 'asa/cli/dns.j2') }} 59 | ! 60 | {# --- #} 61 | {# NTP #} 62 | {# --- #} 63 | {{ lookup('template', 'asa/cli/ntp.j2') }} 64 | ! 65 | {# --------------- #} 66 | {# Smart Licensing #} 67 | {# --------------- #} 68 | {{ lookup('template', 'asa/cli/license.j2') }} 69 | ! 70 | same-security-traffic permit inter-interface 71 | ! 72 | crypto key generate rsa modulus 2048 73 | ssh key-exchange group dh-group14-sha1 74 | ssh version 2 75 | aaa authentication ssh console LOCAL 76 | ssh 0.0.0.0 0.0.0.0 management 77 | ! 78 | http server enable 79 | aaa authentication http console LOCAL 80 | http 0.0.0.0 0.0.0.0 management -------------------------------------------------------------------------------- /ansible/day_0/templates/host.j2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | password: {{ ansible_password }} 3 | chpasswd: { expire: False } 4 | ssh_pwauth: True 5 | {#bootcmd:#} 6 | {#- ln -s -t /etc/rc.d /etc/rc.local#} 7 | hostname: {{ inventory_hostname }} 8 | manage_etc_hosts: true 9 | runcmd: 10 | - systemctl start rc-local 11 | - sed -i -e 's/%admin ALL=(ALL) ALL/%admin ALL=NOPASSWD:ALL/g' /etc/sudoers 12 | {# - sed -i '/^\s*PasswordAuthentication\s\+no/d' /etc/ssh/sshd_config#} 13 | {# - echo "UseDNS no" >> /etc/ssh/sshd_config#} 14 | {# - service ssh restart#} 15 | {# - service sshd restart#} 16 | users: 17 | {# - default#} 18 | - name: virl 19 | plain-text-passwd: {{ ansible_password }} 20 | gecos: User configured by VIRL Configuration Engine 0.23.10 21 | groups: admin 22 | lock-passwd: false 23 | shell: /bin/bash 24 | {# ssh-authorized-keys:#} 25 | {# - VIRL-USER-SSH-PUBLIC-KEY#} 26 | {#write_files:#} 27 | {#- path: /etc/systemd/system/dhclient@.service#} 28 | {# content: |#} 29 | {# [Unit]#} 30 | {# Description=Run dhclient on %i interface#} 31 | {# After=network.target#} 32 | {# [Service]#} 33 | {# Type=oneshot#} 34 | {# ExecStart=/sbin/dhclient %i -pf /var/run/dhclient.%i.pid -lf /var/lib/dhclient/dhclient.%i.lease#} 35 | {# RemainAfterExit=yes#} 36 | {# owner: root:root#} 37 | {# permissions: '0644'#} 38 | {#- path: /etc/rc.local#} 39 | {# owner: root:root#} 40 | {# permissions: '0755'#} 41 | {# content: |-#} 42 | {# #!/bin/sh#} 43 | {# ifconfig eth1 up 192.168.1.10 netmask 255.255.255.0#} 44 | {# ip route add 10.0.0.0/8 via 192.168.1.1#} 45 | {# ip route add 172.20.0.0/16 via 192.168.1.1#} 46 | {# ip route add 192.168.0.0/16 via 192.168.1.1#} 47 | {# exit 0f#} -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {{ lookup('template', 'ios/cli/system.j2') }} 3 | ! 4 | license smart enable 5 | ! 6 | username {{ ansible_user }} privilege 15 secret {{ ansible_password }} 7 | crypto key generate rsa modulus 2048 8 | ! 9 | vrf definition Mgmt-intf 10 | address-family ipv4 11 | exit-address-family 12 | address-family ipv6 13 | exit-address-family 14 | ! 15 | {# ---------- #} 16 | {# Interfaces #} 17 | {# ---------- #} 18 | {{ lookup('template', 'ios/cli/interfaces.j2') }} 19 | ! 20 | {# ------------- #} 21 | {# Static Routes #} 22 | {# ------------- #} 23 | {{ lookup('template', 'ios/cli/static-routes.j2') }} 24 | ! 25 | {# ----------------- #} 26 | {# Routing Protocols #} 27 | {# ----------------- #} 28 | {# --- #} 29 | {# BGP #} 30 | {# --- #} 31 | {{ lookup('template', 'ios/cli/bgp.j2') }} 32 | ! 33 | {# ---- #} 34 | {# OSPF #} 35 | {# ---- #} 36 | {{ lookup('template', 'ios/cli/ospf.j2') }} 37 | ! 38 | {# --- #} 39 | {# DNS #} 40 | {# --- #} 41 | {{ lookup('template', 'ios/cli/dns.j2') }} 42 | ! 43 | {# --- #} 44 | {# NTP #} 45 | {# --- #} 46 | {{ lookup('template', 'ios/cli/ntp.j2') }} 47 | ! 48 | line vty 0 4 49 | login local 50 | transport input ssh 51 | exit -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli/bgp.j2: -------------------------------------------------------------------------------- 1 | {# --- #} 2 | {# BGP #} 3 | {# --- #} 4 | {% if router is defined %} 5 | {% if router.bgp is defined %} 6 | router bgp {{ router.bgp.id }} 7 | {% if router.bgp.router_id %} 8 | bgp router-id {{ router.bgp.router_id }} 9 | {% endif %} 10 | {% for neighbor in router.bgp.neighbors|default([]) %} 11 | neighbor {{ neighbor.id }} remote-as {{ neighbor.remote_as }} 12 | {% endfor %} 13 | {% if router.bgp.address_family is defined %} 14 | {% for vrf, address_family_list in router.bgp.address_family.items() %} 15 | {% for address_family, address_family_config in address_family_list.items() %} 16 | address-family {{ address_family }} {{ '' if vrf == 'global' else 'vrf ' + address_family }} 17 | {% for network in address_family_config.networks|default([]) %} 18 | network {{ network.network|ipaddr('network') }} mask {{ network.network|ipaddr('netmask') }} 19 | {% endfor %} 20 | {% for aggregate in address_family_config.aggregate_address|default([]) %} 21 | {% set options = '' %} 22 | {% if aggregate.summary_only is sameas true %} 23 | {% set options = options + ' summary-only' %} 24 | {% endif %} 25 | aggregate-address {{ aggregate.network|ipaddr('network') }} {{ aggregate.network|ipaddr('netmask') }} {{ options | trim }} 26 | {% endfor %} 27 | {% for neighbor in address_family_config.neighbors|default([]) %} 28 | {% if neighbor.activate is sameas true %} 29 | neighbor {{ neighbor.id }} activate 30 | {% endif %} 31 | {% if neighbor.next_hop_self is sameas true %} 32 | neighbor {{ neighbor.id }} next-hop-self 33 | {% endif %} 34 | {% endfor %} 35 | {% endfor %} 36 | {% endfor %} 37 | {% endif %} 38 | {% endif %} 39 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli/dns.j2: -------------------------------------------------------------------------------- 1 | {% if dns_servers is defined %} 2 | ip name-server {{ dns_servers | join(' ') }} 3 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli/interfaces.j2: -------------------------------------------------------------------------------- 1 | {% for key, value in interfaces.items() %} 2 | interface {{ key }} 3 | {% if value.vlan is defined %} 4 | encapsulation dot1Q {{ value.vlan }} 5 | {% endif %}{# value.vlan is defined #} 6 | {% if value.vrf is defined %} 7 | vrf forwarding {{ value.vrf }} 8 | {% endif %}{# value.vrf is defined #} 9 | {% if value.description is defined %} 10 | description {{ value.description }} 11 | {% endif %} 12 | {% if value.ip.primary.address is defined %} 13 | ip address {{ value.ip.primary.address }} {{ value.ip.primary.mask }} 14 | {% endif %} 15 | {% if value.ip is defined %} 16 | {% if value.ip.primary is defined %} 17 | {% if value.ip.primary == 'dhcp' %} 18 | ip address dhcp 19 | {% else %} 20 | ip address {{ value.ip.primary|ipaddr('address') }} {{ value.ip.primary|ipaddr('netmask') }} 21 | {% endif %}{# if value.ip.primary is defined #} 22 | {% if value.ip.standby is defined %} 23 | standby {{ value.ip.standby.group }} priority {{ value.ip.standby.priority }} 24 | standby {{ value.ip.standby.group }} ip {{ value.ip.standby.address }} 25 | {% endif %}{# if value.ip.standby is defined #} 26 | {% endif %}{# value.ip.primary == 'dhcp' #} 27 | {% endif %}{# if value.ip is defined #} 28 | {% if value.ospf is defined %} 29 | {% if value.ospf.lls == 'disable' %} 30 | ip ospf lls disable 31 | {% endif %}{# value.ospf.lls == 'disable' #} 32 | {% endif %}{# value.ospf is defined #} 33 | {% if value.enabled is sameas true %} 34 | no shut 35 | {% if value.vlan is defined %} 36 | interface {{ key | regex_replace('^([a-zA-Z0-9\/]+)\.[^a-zA-Z]+', '\\1') }} 37 | no shut 38 | {% endif %}{# value.vlan is defined #} 39 | {% endif %}{# enabled #} 40 | {% endfor %}{# interfaces #} -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli/ntp.j2: -------------------------------------------------------------------------------- 1 | {% for server in ntp_servers|default([]) %} 2 | ntp server {{ server }} 3 | {% endfor %} -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli/ospf.j2: -------------------------------------------------------------------------------- 1 | {% if router is defined %} 2 | {% if router.ospf is defined %} 3 | router ospf {{ router.ospf.id }} 4 | {% if router.ospf.router_id is defined %} 5 | router-id {{ router.ospf.router_id }} 6 | {% endif %}{# router.ospf.router_id is defined #} 7 | {% for protocol in router.ospf.redistribute|default([]) %} 8 | redistribute {{ protocol.protocol }} {{ '' if protocol.protocol_id is not defined else protocol.protocol_id }} {{ '' if protocol.options is not defined else protocol.options | join(' ') }} 9 | {% endfor %} 10 | {% for network in router.ospf.networks|default([]) %} 11 | network {{ network.network|ipaddr('network') }} {{ network.network|ipaddr('netmask') }} area {{ network.area }} 12 | {% endfor %} 13 | {% if router.ospf.default_information_originate is defined and router.ospf.default_information_originate is sameas true %} 14 | default-information originate 15 | {% endif %}{# router.ospf.default_information_originate is defined #} 16 | {% endif %}{# router.ospf is defined #} 17 | {% endif %}{# router is defined #} -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli/static-routes.j2: -------------------------------------------------------------------------------- 1 | {% if static_routes is defined %} 2 | {% for vrf, value in static_routes.items() %} 3 | {% for route in value|default([]) %} 4 | {% for dest in route.fwd_list|default([]) %} 5 | ip route {{ '' if vrf == 'global' else vrf }} {{ route.network|ipaddr('network') }} {{ route.network|ipaddr('netmask') }} {{ dest.fwd }} {{ dest.metric|default('') }} 6 | {% endfor %} 7 | {% endfor %} 8 | {% endfor %} 9 | {% endif %} -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/cli/system.j2: -------------------------------------------------------------------------------- 1 | hostname {{ inventory_hostname.split('.')[0] }} 2 | ip domain name {{ domain_name | default('local') }} 3 | ! -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/nfvis.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {{ lookup('template', './cli/system.j2') }} 3 | ! 4 | license smart enable 5 | ! 6 | username {{ ansible_user }} privilege 15 secret {{ ansible_password }} 7 | crypto key generate rsa modulus 2048 8 | ! 9 | vrf definition Mgmt-intf 10 | address-family ipv4 11 | exit-address-family 12 | address-family ipv6 13 | exit-address-family 14 | ! 15 | {# ---------- #} 16 | {# Interfaces #} 17 | {# ---------- #} 18 | {{ lookup('template', './cli/interfaces.j2') }} 19 | ! 20 | {# ------------- #} 21 | {# Static Routes #} 22 | {# ------------- #} 23 | {{ lookup('template', './cli/static-routes.j2') }} 24 | 25 | ip route vrf Mgmt-intf 0.0.0.0 0.0.0.0 ${NICID_0_GATEWAY} 26 | ! 27 | {# ----------------- #} 28 | {# Routing Protocols #} 29 | {# ----------------- #} 30 | {# --- #} 31 | {# BGP #} 32 | {# --- #} 33 | {{ lookup('template', './cli/bgp.j2') }} 34 | ! 35 | {# ---- #} 36 | {# OSPF #} 37 | {# ---- #} 38 | {{ lookup('template', './cli/ospf.j2') }} 39 | ! 40 | {# --- #} 41 | {# DNS #} 42 | {# --- #} 43 | {{ lookup('template', './cli/dns.j2') }} 44 | ! 45 | {# --- #} 46 | {# NTP #} 47 | {# --- #} 48 | {{ lookup('template', './cli/ntp.j2') }} 49 | ! 50 | line vty 0 4 51 | login local 52 | transport input ssh 53 | exit -------------------------------------------------------------------------------- /ansible/day_0/templates/ios/virl.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | {{ lookup('template', 'ios/cli/system.j2') }} 3 | ! 4 | license smart enable 5 | ! 6 | username {{ ansible_user }} privilege 15 secret {{ ansible_password }} 7 | crypto key generate rsa modulus 2048 8 | ! 9 | vrf definition Mgmt-intf 10 | address-family ipv4 11 | exit-address-family 12 | address-family ipv6 13 | exit-address-family 14 | ! 15 | {# ---------- #} 16 | {# Interfaces #} 17 | {# ---------- #} 18 | {{ lookup('template', 'ios/cli/interfaces.j2') }} 19 | ! 20 | {# ------------- #} 21 | {# Static Routes #} 22 | {# ------------- #} 23 | {{ lookup('template', 'ios/cli/static-routes.j2') }} 24 | ! 25 | {# ----------------- #} 26 | {# Routing Protocols #} 27 | {# ----------------- #} 28 | {# --- #} 29 | {# BGP #} 30 | {# --- #} 31 | {{ lookup('template', 'ios/cli/bgp.j2') }} 32 | ! 33 | {# ---- #} 34 | {# OSPF #} 35 | {# ---- #} 36 | {{ lookup('template', 'ios/cli/ospf.j2') }} 37 | ! 38 | {# --- #} 39 | {# DNS #} 40 | {# --- #} 41 | {{ lookup('template', 'ios/cli/dns.j2') }} 42 | ! 43 | {# --- #} 44 | {# NTP #} 45 | {# --- #} 46 | {{ lookup('template', 'ios/cli/ntp.j2') }} 47 | ! 48 | line vty 0 4 49 | login local 50 | transport input ssh 51 | exit -------------------------------------------------------------------------------- /ansible/day_0/templates/sdwan/cedge_user-data.j2: -------------------------------------------------------------------------------- 1 | Content-Type: multipart/mixed; boundary="==BOUNDARY==" 2 | MIME-Version: 1.0 3 | 4 | --==BOUNDARY== 5 | Content-Type: text/cloud-config; charset="us-ascii" 6 | 7 | {{ bootstrap }} 8 | --==BOUNDARY== 9 | Content-Type: text/cloud-boothook; charset="us-ascii" 10 | 11 | #cloud-boothook 12 | 13 | hostname {{ inventory_hostname }} 14 | ! 15 | username admin privilege 15 password {{ vmanage_pass }} 16 | ! 17 | ntp server {{ ntp_server }} 18 | ! 19 | system 20 | system-ip {{ sdwan_system_ip }} 21 | overlay-id 1 22 | site-id {{ sdwan_site_id }} 23 | organization-name "{{ vmanage_org }}" 24 | sp-organization-name "{{ vmanage_org }}" 25 | vbond {{ sdwan_vbond }} 26 | ! 27 | interface GigabitEthernet1 28 | description TRANSPORT - INET 29 | {% if vpn0_ip_netmask is defined %} 30 | ip address {{ vpn0_ip_netmask }} 31 | {% else %} 32 | ip address dhcp 33 | {% endif %}{# vpn0_ip_netmask #} 34 | negotiation auto 35 | no mop enabled 36 | no mop sysid 37 | ! 38 | interface GigabitEthernet2 39 | description SERVICE 40 | vrf forwarding 1 41 | {% if vpn1_ip_netmask is defined %} 42 | ip address {{ vpn1_ip_netmask }} 43 | {% else %} 44 | ip address dhcp 45 | {% endif %}{# vpn1_ip_netmask #} 46 | negotiation auto 47 | no mop enabled 48 | no mop sysid 49 | ! 50 | interface Tunnel1 51 | ip unnumbered GigabitEthernet1 52 | tunnel source GigabitEthernet1 53 | tunnel mode sdwan 54 | ! 55 | sdwan 56 | interface GigabitEthernet1 57 | tunnel-interface 58 | encapsulation ipsec 59 | color default 60 | allow-service all 61 | no allow-service bgp 62 | allow-service dhcp 63 | allow-service dns 64 | allow-service icmp 65 | allow-service sshd 66 | no allow-service netconf 67 | no allow-service ntp 68 | no allow-service ospf 69 | no allow-service stun 70 | no allow-service https 71 | no allow-service snmp 72 | no allow-service bfd 73 | ! 74 | no ip http server 75 | no ip http secure-server 76 | {% if vpn0_default_gateway is defined %} 77 | ip route 0.0.0.0 0.0.0.0 {{ vpn0_default_gateway }} 78 | ! 79 | {% endif %}{# vpn0_default_gateway #} 80 | vrf definition 1 81 | rd 1:1 82 | address-family ipv4 83 | route-target export 1:1 84 | route-target import 1:1 85 | exit-address-family 86 | ! 87 | address-family ipv6 88 | exit-address-family 89 | ! 90 | ! 91 | {% if ssh_pubkey_fp is defined and ssh_pubkey_fp != "" %} 92 | ip ssh pubkey-chain 93 | username admin 94 | key-hash ssh-rsa {{ ssh_pubkey_fp }} 95 | ! 96 | {% endif %}{# ssh_pubkey_fp #} 97 | line vty 0 4 98 | transport input ssh 99 | ! 100 | --==BOUNDARY== 101 | -------------------------------------------------------------------------------- /ansible/day_0/templates/sdwan/cedge_user-data_attached.j2: -------------------------------------------------------------------------------- 1 | {{ bootstrap }} 2 | -------------------------------------------------------------------------------- /ansible/day_0/templates/sdwan/iosxe.j2: -------------------------------------------------------------------------------- 1 | Content-Type: multipart/mixed; boundary="===============6560338015520979320==" 2 | MIME-Version: 1.0 3 | 4 | {% if viptela is defined %} 5 | --===============6560338015520979320== 6 | Content-Type: text/cloud-config; charset="us-ascii" 7 | MIME-Version: 1.0 8 | Content-Transfer-Encoding: 7bit 9 | Content-Disposition: attachment; filename="vedge.cloud-config" 10 | 11 | #cloud-config 12 | vinitparam: 13 | {% if viptela.otp is defined %} 14 | - otp: {{ viptela.otp }} 15 | {% endif %}{# viptela.otp is defined #} 16 | {% if viptela.uuid is defined %} 17 | - uuid: {{ viptela.uuid }} 18 | {% endif %}{# viptela.uuid is defined #} 19 | {% if vmanage_org is defined %} 20 | - org: {{ vmanage_org }} 21 | {% endif %}{# viptela.org is defined #} 22 | {% if viptela.vbond is defined %} 23 | - vbond: {{ viptela.vbond.remote }} 24 | {% endif %} 25 | {% endif %} 26 | 27 | --===============6560338015520979320== 28 | Content-Type: text/cloud-boothook; charset="us-ascii" 29 | MIME-Version: 1.0 30 | Content-Transfer-Encoding: 7bit 31 | Content-Disposition: attachment; filename="vedge.init-config" 32 | 33 | #cloud-boothook 34 | 35 | system 36 | personality vedge 37 | device-model vedge-CSR-1000v 38 | host-name {{ inventory_hostname.split('.')[0] }} 39 | {% if viptela is defined %} 40 | {% if viptela.system_ip is defined %} 41 | system-ip {{ viptela.system_ip }} 42 | {% endif %}{# viptela.system_ip is defined #} 43 | {% if viptela.domain_id is defined %} 44 | domain-id {{ viptela.domain_id }} 45 | {% endif %}{# viptela.domain_id is defined #} 46 | {% if viptela.site_id is defined %} 47 | site-id {{ viptela.site_id }} 48 | {% endif %}{# viptela.site_id is defined #} 49 | no route-consistency-check 50 | {% if vmanage_org is defined %} 51 | organization-name "{{ vmanage_org }}" 52 | {% endif %}{# viptela.org is defined #} 53 | {% if viptela.vbond is defined %} 54 | vbond {{ viptela.vbond.remote }} {{ 'local' if vbond.local is sameas true else '' }} 55 | {% endif %}{# viptela.vbond is defined #} 56 | {% endif %} 57 | ! 58 | interface GigabitEthernet1 59 | no shutdown 60 | arp timeout 1200 61 | vrf forwarding Mgmt-intf 62 | ip address dhcp client-id GigabitEthernet1 63 | ip redirects 64 | ip dhcp client default-router distance 1 65 | ip mtu 1500 66 | mtu 1500 67 | negotiation auto 68 | exit 69 | ! 70 | no ip http server 71 | no ip http secure-server 72 | --===============6560338015520979320== -------------------------------------------------------------------------------- /ansible/day_0/templates/sdwan/netconf/system.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | 3 | 4 | {{ inventory_hostname }} 5 | {% if sdwan_system_ip is defined %} 6 | {{ sdwan_system_ip }} 7 | {% endif %} 8 | {% if sdwan_site_id is defined %} 9 | {{ sdwan_site_id }} 10 | {% endif %} 11 | {% if vmanage_org is defined %} 12 | {{ vmanage_org }} 13 | {% endif %} 14 | {% if sdwan_personality is defined and sdwan_personality == 'vbond' %} 15 | 16 | {{ sdwan_vbond }} 17 | {% if sdwan_vbond_port is defined %} 18 | {{ sdwan_vbond_port }} 19 | {% endif %} 20 | 21 | 22 | {% elif sdwan_vbond is defined %} 23 | 24 | {{ sdwan_vbond }} 25 | {% if sdwan_vbond_port is defined %} 26 | {{ sdwan_vbond_port }} 27 | {% endif %} 28 | 29 | {% endif %} 30 | 31 | -------------------------------------------------------------------------------- /ansible/day_0/templates/sdwan/netconf/vpn.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: True, trim_blocks: True 2 | 3 | 4 | {% for vpn in vpn_instances|default([]) %} 5 | 6 | {{ vpn.vpn_id }} 7 | 8 | {% for interface in vpn.interfaces %} 9 | {{ interface.if_name }} 10 | {% if interface.ip is defined %} 11 | 12 | {% if interface.ip.address is defined %} 13 |
{{ interface.ip.address }}
14 | {% elif interface.ip.dhcp_client is defined %} 15 | {{ 'true' if interface.ip.dhcp_client else 'false' }} 16 | {% endif %}{# interface.ip.address #} 17 |
18 | {% endif %}{# interface.ip #} 19 | {% if interface.tunnel_interface is defined %} 20 | 21 | {% if interface.tunnel_interface.allow_service is defined %} 22 | 23 | {% for service in interface.tunnel_interface.allow_service %} 24 | <{{ service }}>true 25 | {% endfor %}{# service #} 26 | 27 | {% endif %}{# interface.allow_service #} 28 | 29 | {% endif %}{# interface.tunnel_inteface #} 30 | {% if interface.enabled is defined %} 31 | {% if interface.enabled is sameas true %} 32 | false 33 | {% else %} 34 | true 35 | {% endif %} 36 | {% endif %}{# interface.shutdown #} 37 |
38 | {% endfor %}{# vpn.interfaces #} 39 | {% if vpn.ospf is defined %} 40 | 41 | 42 | {% if vpn.ospf.redistribute is defined %} 43 | 44 | {% for protocol in vpn.ospf.redistribute|default([]) %} 45 | {{ protocol }} 46 | {% endfor %}{# vpn.redistribute.protocols #} 47 | 48 | {% endif %}{# vpn.redistribute #} 49 | 50 | {{ vpn.ospf.area }} 51 | {% for interface in vpn.ospf.interfaces|default([]) %} 52 | 53 | {{ interface.if_name }} 54 | {{ interface.hello_interval|default('10') }} 55 | {{ interface.dead_interval|default('40') }} 56 | {{ interface.retransmit_interval|default('5') }} 57 | {{ interface.priority|default('1') }} 58 | {{ interface.network|default('broadcast') }} 59 | 60 | {% endfor %}{# vpn.ospf.interfaces #} 61 | 62 | 63 | 64 | {% endif %}{# vpn.ospf is defined #} 65 | {% if vpn.routes is defined %} 66 | 67 | {% for route in vpn.routes|default([]) %} 68 | 69 | {{ route.prefix }} 70 | {% if route.next_hop is defined %} 71 | 72 |
{{ route.next_hop.address }}
73 | {{ route.next_hop.distance if route.next_hop.distance is defined else '1' }} 74 |
75 | {% endif %}{# route.next_hop #} 76 |
77 | {% endfor %}{# vpn.routes #} 78 |
79 | {% endif %}{# vpn.routes is defined #} 80 |
81 | {% endfor %}{# vpn_instances #} 82 |
83 |
-------------------------------------------------------------------------------- /ansible/day_0/templates/sdwan/vedge.j2: -------------------------------------------------------------------------------- 1 | Content-Type: multipart/mixed; boundary="==BOUNDARY==" 2 | MIME-Version: 1.0 3 | 4 | --==BOUNDARY== 5 | Content-Type: text/cloud-boothook; charset="us-ascii" 6 | 7 | #cloud-boothook 8 | system 9 | host-name vbond 10 | vbond ztp.viptela.com local 11 | 12 | aaa 13 | auth-order local radius tacacs 14 | usergroup basic 15 | task system read write 16 | task interface read write 17 | ! 18 | 19 | usergroup netadmin 20 | ! 21 | 22 | usergroup operator 23 | task system read 24 | task interface read 25 | task policy read 26 | task routing read 27 | task security read 28 | ! 29 | 30 | user admin 31 | password $6$Ju0z0n7YKtLgYWlJ$38aW.MBat6e8xL2vuLdbfGVH.DAYh7lq2nA/dBdvL.rWdJyGGB2gxLeVh8jQBKTCxti8vX5RKpbtzEadiZc6J. 32 | ! 33 | ! 34 | 35 | logging 36 | disk 37 | enable 38 | ! 39 | ! 40 | ! 41 | 42 | omp 43 | no shutdown 44 | graceful-restart 45 | advertise connected 46 | advertise static 47 | ! 48 | 49 | security 50 | ipsec 51 | authentication-type ah-sha1-hmac sha1-hmac 52 | ! 53 | ! 54 | 55 | vpn 0 56 | interface ge0/0 57 | ip dhcp-client 58 | tunnel-interface 59 | allow-service dhcp 60 | allow-service dns 61 | allow-service icmp 62 | no allow-service sshd 63 | allow-service netconf 64 | no allow-service ntp 65 | no allow-service stun 66 | allow-service https 67 | ! 68 | no shutdown 69 | ! 70 | ! 71 | 72 | vpn 512 73 | interface eth0 74 | ip dhcp-client 75 | no shutdown 76 | ! 77 | ! 78 | --==BOUNDARY== -------------------------------------------------------------------------------- /ansible/day_0/templates/terraform/aws_control_tfvars.j2: -------------------------------------------------------------------------------- 1 | network_state_file = "../Provision_VPC/terraform.tfstate.d/aws_network/terraform.tfstate" 2 | vbond_ami = "{{ vbond_image_id }}" 3 | vmanage_ami = "{{ vmanage_image_id }}" 4 | vmanage_count = "1" 5 | vsmart_ami = "{{ vsmart_image_id }}" 6 | vsmart_count = "1" 7 | vbond_instances_type = "{{ vbond_instance_type }}" 8 | vmanage_instances_type = "{{ vmanage_instance_type }}" 9 | vsmart_instances_type = "{{ vsmart_instance_type }}" 10 | ssh_pubkey = "{{ ssh_pubkey }}" 11 | sdwan_org = "{{ vmanage_org }}" 12 | {% if aws_route53_zone is defined and aws_route53_zone != "" %} 13 | route53_zone = "{{ aws_route53_zone }}" 14 | {% endif %} 15 | vmanage_day0 = < 8 | 9 | 10 | flat 11 | {# false#} 12 | 13 | {# #} 14 | {# Network Nodes #} 15 | {# #} 16 | {% for node in groups.virt_virl %} 17 | {# Add each host in the inventory to the topology if they have the 'virl' #} 18 | {# information defined #} 19 | {% if hostvars[node].virl is defined %} 20 | {% set node_number = global.node_count %} 21 | 22 | 23 | virl_node 24 | {% if hostvars[node].day0_config is defined %} 25 | {# #} 26 | {# Add day0 config #} 27 | {# #} 28 | {{ hostvars[node].day0_config }} 29 | {% endif %}{# virl.config is defined #} 30 | 31 | {% for interface in hostvars[node].virl.interfaces|default([]) %} 32 | {% set network = network_connections[interface.network]|default([]) %} 33 | {% set network = network + [{'node': node_number, 'interface': loop.index}] %} 34 | {% set _ = network_connections.update({interface.network: network}) %} 35 | 36 | {% endfor %} 37 | 38 | {% set _ = global.update({'node_count': node_number + 1}) %} 39 | {% endif %}{# virl is defined #} 40 | {% endfor %} 41 | {# #} 42 | {# Networks #} 43 | {# #} 44 | {% for network, connections in network_connections.items() %} 45 | {% set node_count = global.node_count %} 46 | 47 | {% for connection in connections %} 48 | 49 | {% endfor %} 50 | 51 | {% endfor %} 52 | {# #} 53 | {# Connections #} 54 | {# #} 55 | {% for network, connections in network_connections.items() %} 56 | {% set node_count = global.node_count %} 57 | {% set node_number = loop.index0 + node_count %} 58 | {% for connection in connections %} 59 | 60 | {% endfor %} 61 | {% endfor %} 62 | -------------------------------------------------------------------------------- /ansible/day_0/templates/virl/vmanage.j2: -------------------------------------------------------------------------------- 1 | Content-Type: multipart/mixed; boundary="===============6560338015520979320==" 2 | MIME-Version: 1.0 3 | 4 | --===============6560338015520979320== 5 | Content-Type: text/cloud-config; charset="us-ascii" 6 | MIME-Version: 1.0 7 | Content-Transfer-Encoding: 7bit 8 | Content-Disposition: attachment; filename="vedge.cloud-config" 9 | 10 | #cloud-config 11 | vinitparam: 12 | - format-partition : 1 13 | 14 | --===============6560338015520979320== 15 | Content-Type: text/cloud-boothook; charset="us-ascii" 16 | MIME-Version: 1.0 17 | Content-Transfer-Encoding: 7bit 18 | Content-Disposition: attachment; filename="vedge.init-config" 19 | 20 | #cloud-boothook 21 | aaa 22 | auth-order local radius tacacs 23 | usergroup basic 24 | task system read write 25 | task interface read write 26 | ! 27 | 28 | usergroup netadmin 29 | ! 30 | 31 | usergroup operator 32 | task system read 33 | task interface read 34 | task policy read 35 | task routing read 36 | task security read 37 | ! 38 | 39 | user admin 40 | password $6$u3jhmUlHRY16I1lI$OpkApAwXhGNo2xdKR.j2x7LF7OkRa9P02aSgACGMvmhidKHvxQ2poG6HTulDYp6BXfsm3clE6dz/wvOKLxA8e1 41 | ! 42 | ! 43 | 44 | logging 45 | disk 46 | enable 47 | ! 48 | ! 49 | ! 50 | 51 | vpn 0 52 | no interface eth0 53 | 54 | vpn 512 55 | interface eth0 56 | ip dhcp-client 57 | no shutdown 58 | ! 59 | --===============6560338015520979320==-- -------------------------------------------------------------------------------- /ansible/day_0/templates/virl1_topology.j2: -------------------------------------------------------------------------------- 1 | {# #} 2 | {# Globals #} 3 | {# #} 4 | {% set network_connections = {} %} 5 | {% set global = {} %} 6 | {% set _ = global.update({'node_count': 1}) %} 7 | 8 | 9 | 10 | flat 11 | {# false#} 12 | 13 | {# #} 14 | {# Network Nodes #} 15 | {# #} 16 | {% for node in groups.virl_nodes %} 17 | {# Add each host in the inventory to the topology if they have the 'virl' #} 18 | {# information defined #} 19 | {% if hostvars[node].virl is defined %} 20 | {% set node_number = global.node_count %} 21 | 22 | 23 | virl_node 24 | {% if hostvars[node].day0_config is defined %} 25 | {# #} 26 | {# Add day0 config #} 27 | {# #} 28 | {{ hostvars[node].day0_config }} 29 | {% endif %}{# virl.config is defined #} 30 | 31 | {% for interface in hostvars[node].virl.interfaces|default([]) %} 32 | {% set network = network_connections[interface.network]|default([]) %} 33 | {% set network = network + [{'node': node_number, 'interface': loop.index}] %} 34 | {% set _ = network_connections.update({interface.network: network}) %} 35 | 36 | {% endfor %} 37 | 38 | {% set _ = global.update({'node_count': node_number + 1}) %} 39 | {% endif %}{# virl is defined #} 40 | {% endfor %} 41 | {# #} 42 | {# Networks #} 43 | {# #} 44 | {% for network, connections in network_connections.items() %} 45 | {% set node_count = global.node_count %} 46 | 47 | {% for connection in connections %} 48 | 49 | {% endfor %} 50 | 51 | {% endfor %} 52 | {# #} 53 | {# Connections #} 54 | {# #} 55 | {% for network, connections in network_connections.items() %} 56 | {% set node_count = global.node_count %} 57 | {% set node_number = loop.index0 + node_count %} 58 | {% for connection in connections %} 59 | 60 | {% endfor %} 61 | {% endfor %} 62 | -------------------------------------------------------------------------------- /ansible/day_0/terraform-apply-edges.yml: -------------------------------------------------------------------------------- 1 | - name: Run terraform 2 | hosts: sdwan_edge 3 | connection: local 4 | any_errors_fatal: true 5 | vars: 6 | project_path: "{{ terraform_project_path[infra][sdwan_component] }}" 7 | tasks: 8 | - assert: 9 | that: 10 | - sdwan_component == "edge_network" or sdwan_component == "edges" 11 | msg: "sdwan_component must be set to either 'edge_network' or 'edges'" 12 | 13 | - name: Generate Terraform variables file 14 | template: 15 | src: "terraform/{{ infra }}_{{ sdwan_component }}_tfvars.j2" 16 | dest: "{{ project_path }}/{{ infra }}_{{ sdwan_component }}_{{ inventory_hostname }}.tfvars" 17 | tags: [tfvars, plan] 18 | when: not (sdwan_component == "edge_network" and infra == "vmware") 19 | 20 | - name: Terraform plan 21 | community.general.terraform: 22 | project_path: "{{ project_path }}" 23 | state: planned 24 | plan_file: "{{ infra }}_{{ sdwan_component }}_{{ inventory_hostname }}.tfplan" 25 | workspace: "{{ infra }}_{{ sdwan_component }}_{{ inventory_hostname }}" 26 | variables_file: "{{ infra }}_{{ sdwan_component }}_{{ inventory_hostname }}.tfvars" 27 | force_init: yes 28 | tags: [plan, apply] 29 | when: not (sdwan_component == "edge_network" and infra == "vmware") 30 | 31 | - name: Terraform apply 32 | community.general.terraform: 33 | project_path: "{{ project_path }}" 34 | state: present 35 | plan_file: "{{ infra }}_{{ sdwan_component }}_{{ inventory_hostname }}.tfplan" 36 | workspace: "{{ infra }}_{{ sdwan_component }}_{{ inventory_hostname }}" 37 | variables_file: "{{ infra }}_{{ sdwan_component }}_{{ inventory_hostname }}.tfvars" 38 | tags: apply 39 | when: not (sdwan_component == "edge_network" and infra == "vmware") 40 | register: outputs 41 | 42 | - name: Save VPC ID 43 | set_fact: 44 | sdwan_network: "{{ outputs.outputs.vpc_id.value }}" 45 | cacheable: yes 46 | when: sdwan_component == "edge_network" and infra == "aws" 47 | 48 | - debug: 49 | msg: "cEdge external IP: {{ outputs.outputs.cedge_transport_public_ip.value }}" 50 | when: sdwan_component == "edges" and (infra == "aws" or infra == "gcp") 51 | 52 | - debug: 53 | msg: "cEdge IP: {{ outputs.outputs.cedge_transport_ip.value }}" 54 | when: sdwan_component == "edges" and infra == "vmware" 55 | 56 | - meta: refresh_inventory 57 | tags: always 58 | -------------------------------------------------------------------------------- /ansible/day_0/terraform-apply.yml: -------------------------------------------------------------------------------- 1 | - name: Run terraform 2 | hosts: localhost 3 | connection: local 4 | any_errors_fatal: true 5 | tasks: 6 | - assert: 7 | that: 8 | - sdwan_component == "network" or sdwan_component == "control" 9 | msg: "sdwan_component must be set to either 'network' or 'control'" 10 | 11 | - name: Generate Terraform variables file 12 | template: 13 | src: "terraform/{{ infra }}_{{ sdwan_component }}_tfvars.j2" 14 | dest: "{{ project_path }}/{{ infra }}_{{ sdwan_component }}.tfvars" 15 | tags: [tfvars, plan] 16 | when: sdwan_component is defined 17 | 18 | - name: Terraform plan 19 | community.general.terraform: 20 | project_path: "{{ project_path }}" 21 | state: planned 22 | plan_file: "{{ infra }}_{{ sdwan_component }}.tfplan" 23 | workspace: "{{ infra }}_{{ sdwan_component }}" 24 | variables_file: "{{ infra }}_{{ sdwan_component }}.tfvars" 25 | force_init: yes 26 | tags: [plan, apply] 27 | when: sdwan_component is defined 28 | 29 | - name: Terraform apply 30 | community.general.terraform: 31 | project_path: "{{ project_path }}" 32 | state: present 33 | plan_file: "{{ infra }}_{{ sdwan_component }}.tfplan" 34 | workspace: "{{ infra }}_{{ sdwan_component }}" 35 | variables_file: "{{ infra }}_{{ sdwan_component }}.tfvars" 36 | tags: apply 37 | when: sdwan_component is defined 38 | register: outputs 39 | 40 | - name: Save vBond and proxy IP 41 | set_fact: 42 | sdwan_vbond: "{{ outputs.outputs.vbond_ip.value }}" 43 | proxy_env: 44 | https_proxy: "http://{{ outputs.outputs.devbox_public_ip.value }}:8443" 45 | cacheable: yes 46 | when: sdwan_component == "network" 47 | 48 | - debug: 49 | msg: "vBond external IP: {{ sdwan_vbond }}\nProxy URL: {{ proxy_env }}" 50 | when: sdwan_component == "network" 51 | 52 | - name: Save vManage external IP 53 | set_fact: 54 | sdwan_vmanage: "{{ outputs.outputs.vmanages_vmanageEth1EIP.value[0] }}" 55 | cacheable: yes 56 | when: sdwan_component == "control" and infra == "aws" 57 | 58 | - debug: 59 | msg: "vManage external IP: {{ sdwan_vmanage }}\nvBond external IP: {{ sdwan_vbond }}" 60 | when: sdwan_component == "control" and infra == "aws" 61 | 62 | - meta: refresh_inventory 63 | tags: always 64 | -------------------------------------------------------------------------------- /ansible/day_1/activate-policy.yml: -------------------------------------------------------------------------------- 1 | - name: Activate central policy 2 | hosts: localhost 3 | gather_facts: no 4 | connection: local 5 | vars: 6 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 7 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 8 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 9 | tags: 10 | - policy 11 | environment: "{{ proxy_env }}" 12 | tasks: 13 | - name: Activate central policy 14 | vmanage_central_policy: 15 | user: "{{ ansible_user }}" 16 | host: "{{ vmanage_ip }}" 17 | password: "{{ ansible_password }}" 18 | state: activated 19 | name: "{{ sdwan_central_policy_name }}" 20 | wait: yes -------------------------------------------------------------------------------- /ansible/day_1/attach-template.yml: -------------------------------------------------------------------------------- 1 | - name: Attach Templates 2 | hosts: "{{ passed | default('sdwan') }}" 3 | connection: local 4 | gather_facts: no 5 | vars: 6 | ansible_async_dir: "/tmp/ansible_async" 7 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 8 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 9 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 10 | environment: "{{ hostvars['localhost']['proxy_env'] }}" 11 | tasks: 12 | - name: Validate edge network ranges 13 | assert: 14 | that: 15 | - (cidr_range | ansible.utils.ipaddr('prefix')) == 23 16 | when: passed == "sdwan_edge" and cidr_range is defined 17 | 18 | - name: Attach template to device 19 | vmanage_device_attachment: 20 | user: "{{ vmanage_user }}" 21 | host: "{{ vmanage_ip }}" 22 | password: "{{ vmanage_pass }}" 23 | uuid: "{{ sdwan_uuid | default(omit) }}" 24 | device: "{{ inventory_hostname }}" 25 | system_ip: "{{ sdwan_system_ip | default(omit) }}" 26 | site_id: "{{ sdwan_site_id | default(omit) }}" 27 | template: "{{ sdwan_template.name }}" 28 | variables: "{{ sdwan_template.variables | default(omit) }}" 29 | wait: yes 30 | state: present 31 | when: sdwan_template is defined 32 | async: 120 33 | poll: 10 34 | register: attachment_results 35 | tags: 36 | - attach -------------------------------------------------------------------------------- /ansible/day_1/config-sdwan.yml: -------------------------------------------------------------------------------- 1 | - import_playbook: import-templates.yml 2 | 3 | # - import_playbook: import-policy.yml 4 | 5 | # - import_playbook: attach-template.yml 6 | # vars: 7 | # passed: vsmart_hosts 8 | 9 | # - import_playbook: activate-policy.yml 10 | 11 | - import_playbook: attach-template.yml 12 | vars: 13 | passed: sdwan_edge 14 | -------------------------------------------------------------------------------- /ansible/day_1/deactivate-policy.yml: -------------------------------------------------------------------------------- 1 | - name: Deactivate central policy 2 | hosts: localhost 3 | gather_facts: no 4 | connection: local 5 | vars: 6 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 7 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 8 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 9 | tags: 10 | - policy 11 | tasks: 12 | - name: Deactivate central policy 13 | vmanage_central_policy: 14 | user: "{{ ansible_user }}" 15 | host: "{{ vmanage_ip }}" 16 | password: "{{ ansible_password }}" 17 | state: deactivated 18 | name: "{{ sdwan_central_policy_name }}" 19 | wait: no -------------------------------------------------------------------------------- /ansible/day_1/delete-templates.yml: -------------------------------------------------------------------------------- 1 | - name: Delete vManage Templates 2 | connection: local 3 | hosts: localhost 4 | vars: 5 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 6 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 7 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 8 | file: vmanage_templates.yml 9 | gather_facts: no 10 | tasks: 11 | - name: Get templates 12 | vmanage_template_facts: 13 | user: "{{ ansible_user }}" 14 | host: "{{ vmanage_ip }}" 15 | password: "{{ ansible_password }}" 16 | register: template_facts 17 | 18 | - name: Delete device templates 19 | vmanage_device_template: 20 | user: "{{ ansible_user }}" 21 | host: "{{ vmanage_ip }}" 22 | password: "{{ ansible_password }}" 23 | state: absent 24 | name: "{{ item.templateName }}" 25 | loop: "{{ template_facts.templates.device_templates }}" 26 | 27 | - name: Delete feature templates 28 | vmanage_feature_template: 29 | user: "{{ ansible_user }}" 30 | host: "{{ vmanage_ip }}" 31 | password: "{{ ansible_password }}" 32 | state: absent 33 | name: "{{ item.templateName }}" 34 | loop: "{{ template_facts.templates.feature_templates }}" 35 | # 36 | # - debug: 37 | # var: policy_facts -------------------------------------------------------------------------------- /ansible/day_1/detach-template.yml: -------------------------------------------------------------------------------- 1 | - name: Detach Template 2 | hosts: "{{ passed | default('sdwan') }}" 3 | connection: local 4 | gather_facts: no 5 | vars: 6 | ansible_async_dir: "/tmp/ansible_async" 7 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 8 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 9 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 10 | tasks: 11 | 12 | - name: Detach template from device 13 | vmanage_device_attachment: 14 | user: "{{ ansible_user }}" 15 | host: "{{ vmanage_ip }}" 16 | password: "{{ ansible_password }}" 17 | device: "{{ inventory_hostname }}" 18 | wait: yes 19 | state: absent 20 | when: sdwan_template is defined 21 | async: 90 22 | poll: 10 23 | register: attachment_results 24 | -------------------------------------------------------------------------------- /ansible/day_1/export-policy.yml: -------------------------------------------------------------------------------- 1 | - name: Export vManage Policy Lists 2 | hosts: localhost 3 | connection: local 4 | vars: 5 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 6 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 7 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 8 | file: vmanage-policy.yml 9 | environment: "{{ proxy_env }}" 10 | gather_facts: no 11 | tasks: 12 | - vmanage_export_policy: 13 | user: "{{ vmanage_user }}" 14 | host: "{{ vmanage_ip }}" 15 | password: "{{ vmanage_pass }}" 16 | file: "{{ file }}" 17 | -------------------------------------------------------------------------------- /ansible/day_1/export-templates.yml: -------------------------------------------------------------------------------- 1 | - name: Export vManage Templates 2 | hosts: localhost 3 | connection: local 4 | vars: 5 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 6 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 7 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 8 | file: "/ansible/files/vmanage-templates.yml" 9 | environment: "{{ proxy_env }}" 10 | gather_facts: no 11 | tasks: 12 | - vmanage_template_export: 13 | user: "{{ vmanage_user }}" 14 | host: "{{ vmanage_ip }}" 15 | password: "{{ vmanage_pass }}" 16 | file: "{{ file }}" 17 | -------------------------------------------------------------------------------- /ansible/day_1/group_vars/all/placeholder: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/sdwan-devops/f1893cd70902de91a4ca6337f9b004caa71749ae/ansible/day_1/group_vars/all/placeholder -------------------------------------------------------------------------------- /ansible/day_1/import-policy.yml: -------------------------------------------------------------------------------- 1 | - name: Import vManage Policy Lists 2 | hosts: localhost 3 | connection: local 4 | tags: 5 | - policy 6 | vars: 7 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 8 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 9 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 10 | gather_facts: no 11 | environment: "{{ proxy_env }}" 12 | tasks: 13 | - vmanage_policy_import: 14 | user: "{{ ansible_user }}" 15 | host: "{{ vmanage_ip }}" 16 | password: "{{ ansible_password }}" 17 | file: "{{ vmanage_policy_file }}" 18 | when: vmanage_templates_file is defined 19 | -------------------------------------------------------------------------------- /ansible/day_1/import-templates.yml: -------------------------------------------------------------------------------- 1 | - name: Import vManage Feature/Device Templates 2 | hosts: localhost 3 | gather_facts: no 4 | connection: local 5 | vars: 6 | vmanage_host: "{{ groups.vmanage_hosts | first }}" 7 | vmanage_mgmt_interface: "{{ hostvars[vmanage_host].mgmt_interface | default('ansible_host') }}" 8 | vmanage_ip: "{{ hostvars[vmanage_host][vmanage_mgmt_interface] | ansible.utils.ipaddr('address') }}" 9 | environment: "{{ proxy_env }}" 10 | tasks: 11 | - vmanage_template_import: 12 | user: "{{ vmanage_user }}" 13 | host: "{{ vmanage_ip }}" 14 | password: "{{ vmanage_pass }}" 15 | file: "{{ vmanage_templates_file }}" 16 | when: vmanage_templates_file is defined 17 | -------------------------------------------------------------------------------- /ansible/files/vmanage-templates.yml: -------------------------------------------------------------------------------- 1 | vmanage_device_templates: 2 | - templateId: 9b29a576-1807-4b0d-9c2a-545eb1172689 3 | templateName: branch-c8000v 4 | templateDescription: Branch C8000v Template 5 | deviceType: vedge-C8000V 6 | deviceRole: sdwan-edge 7 | configType: template 8 | factoryDefault: false 9 | policyId: '' 10 | featureTemplateUidRange: [] 11 | draftMode: false 12 | connectionPreferenceRequired: true 13 | connectionPreference: true 14 | templateClass: cedge 15 | attached_devices: [] 16 | input: 17 | columns: 18 | - title: Prefix(vpn_ipv4_ip_prefix) 19 | property: /1/vpn-instance/ip/route/vpn_ipv4_ip_prefix/prefix 20 | variable: vpn_ipv4_ip_prefix 21 | - title: Address(lan_next_hop_ipaddress) 22 | property: /1/vpn-instance/ip/route/vpn_ipv4_ip_prefix/next-hop/lan_next_hop_ipaddress/address 23 | variable: lan_next_hop_ipaddress 24 | - title: Interface Name(bootstrap_interface) 25 | property: /1/bootstrap_interface/interface/if-name 26 | variable: bootstrap_interface 27 | - title: Color(vpn_if_tunnel_color_value) 28 | property: /0/GigabitEthernet1/interface/tunnel-interface/color/value 29 | variable: vpn_if_tunnel_color_value 30 | - title: Hostname(host-name) 31 | property: //system/host-name 32 | variable: host-name 33 | - title: System IP(system-ip) 34 | property: //system/system-ip 35 | variable: system-ip 36 | - title: Site ID(site-id) 37 | property: //system/site-id 38 | variable: site-id 39 | data: [] 40 | generalTemplates: 41 | - templateName: Factory_Default_AAA_CISCO_Template 42 | templateType: cedge_aaa 43 | - templateName: Default_BFD_Cisco_V01 44 | templateType: cisco_bfd 45 | - templateName: Default_GCP_C8000V_OMP_IPv46_V01 46 | templateType: cisco_omp 47 | - templateName: Default_Security_Cisco_V01 48 | templateType: cisco_security 49 | - templateName: Default_System_Cisco_V01 50 | templateType: cisco_system 51 | subTemplates: 52 | - templateName: Default_Logging_Cisco_V01 53 | templateType: cisco_logging 54 | - templateName: Default_GCP_C8000V_VPN0_V01 55 | templateType: cisco_vpn 56 | subTemplates: 57 | - templateName: Default_GCP_C8000V_VPN0_INTF_GE1_V01 58 | templateType: cisco_vpn_interface 59 | - templateName: Default_GCP_C8000V_VPN512_V01 60 | templateType: cisco_vpn 61 | - templateName: Default_SDBranch_Service_VPN1_V01 62 | templateType: cisco_vpn 63 | subTemplates: 64 | - templateName: Default_BOOTSTRAP_DHCP_V01 65 | templateType: cisco_vpn_interface 66 | - templateName: Factory_Default_Global_CISCO_Template 67 | templateType: cedge_global 68 | vmanage_feature_templates: [] 69 | -------------------------------------------------------------------------------- /ansible/inventory/hq1/group_vars/all/ping_tests.yml: -------------------------------------------------------------------------------- 1 | ping_tests: 2 | # 3 | # Test colo-to-site 4 | # 5 | - dst_ip: '10.0.1.10' 6 | src_ip: '192.168.1.1' 7 | vedge: site1-cedge1 8 | vpn: '1' 9 | pass: yes 10 | - dst_ip: '10.0.1.10' 11 | vedge: site2-vedge1 12 | vpn: '1' 13 | pass: yes 14 | # 15 | # Test site-to-site 16 | # 17 | - dst_ip: '192.168.2.10' 18 | src_ip: '192.168.1.1' 19 | vedge: site1-cedge1 20 | vpn: '1' 21 | pass: yes 22 | - dst_ip: '192.168.1.10' 23 | vedge: site2-vedge1 24 | vpn: '1' 25 | pass: yes 26 | # 27 | # Test offsite 28 | # 29 | - dst_ip: '8.8.8.8' 30 | src_ip: '192.168.1.1' 31 | vedge: site1-cedge1 32 | vpn: '1' 33 | pass: no 34 | - dst_ip: '8.8.8.8' 35 | vedge: site2-vedge1 36 | vpn: '1' 37 | pass: no 38 | -------------------------------------------------------------------------------- /ansible/inventory/hq1/group_vars/all/system.yml: -------------------------------------------------------------------------------- 1 | domain_name: virl.local 2 | 3 | ntp_servers: 4 | - '192.5.41.40' 5 | - '192.5.41.41' 6 | 7 | name_servers: 8 | - '208.67.222.222' 9 | - '208.67.220.220' 10 | 11 | ntp_server_list: 12 | - { ip-address: '192.5.41.40' } 13 | - { ip-address: '192.5.41.41' } 14 | 15 | login_banner: This system is for the use of authorized clients only. -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/hq-dc-rtr1/network.yml: -------------------------------------------------------------------------------- 1 | interfaces: 2 | GigabitEthernet1: 3 | description: OOB Management 4 | vrf: Mgmt-intf 5 | enabled: true 6 | ip: 7 | primary: dhcp 8 | GigabitEthernet2: 9 | description: DC Border 10 | enabled: true 11 | ip: 12 | primary: 10.0.255.2/30 13 | GigabitEthernet3: 14 | description: DC LAN 15 | enabled: true 16 | ip: 17 | primary: 10.0.1.1/24 18 | GigabitEthernet4: 19 | description: hq-cedge1 20 | enabled: true 21 | ip: 22 | primary: 10.0.255.5/30 23 | 24 | router: 25 | ospf: 26 | id: 65001 27 | router_id: 10.0.255.5 28 | # default_information_originate: true 29 | networks: 30 | - network: 10.0.255.0/30 31 | area: 0 32 | - network: 10.0.255.4/30 33 | area: 0 34 | - network: 10.0.1.0/24 35 | area: 0 -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/hq-dc-rtr1/virl.yml: -------------------------------------------------------------------------------- 1 | virl_config_template: ios/virl.j2 2 | -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/hq-rtr1/network.yml: -------------------------------------------------------------------------------- 1 | interfaces: 2 | GigabitEthernet1: 3 | description: OOB Management 4 | vrf: Mgmt-intf 5 | enabled: true 6 | ip: 7 | primary: dhcp 8 | GigabitEthernet2: 9 | description: Internet 10 | enabled: true 11 | ip: 12 | primary: 172.20.0.6/30 13 | GigabitEthernet3: 14 | description: DMZ 15 | enabled: true 16 | ip: 17 | primary: 10.0.0.1/24 18 | GigabitEthernet4: 19 | description: DC Core 20 | enabled: true 21 | ip: 22 | primary: 10.0.255.1/30 23 | 24 | static_routes: 25 | global: 26 | - network: 10.0.0.0/16 27 | fwd_list: 28 | - fwd: Null0 29 | 30 | router: 31 | ospf: 32 | id: 65001 33 | router_id: 10.0.255.1 34 | default_information_originate: true 35 | networks: 36 | - network: 10.0.255.0/30 37 | area: 0 38 | bgp: 39 | id: 65001 40 | log_neighbor_changes: true 41 | router_id: 172.20.0.6 42 | neighbors: 43 | - id: 172.20.0.5 44 | remote_as: 65000 45 | address_family: 46 | global: 47 | ipv4: 48 | neighbors: 49 | - id: 172.20.0.5 50 | activate: true 51 | networks: 52 | - network: 10.0.0.0/16 -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/hq-rtr1/virl.yml: -------------------------------------------------------------------------------- 1 | virl_config_template: ios/virl.j2 -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/internet/network.yml: -------------------------------------------------------------------------------- 1 | interfaces: 2 | GigabitEthernet1: 3 | vrf: Mgmt-intf 4 | enabled: true 5 | ip: 6 | primary: dhcp 7 | GigabitEthernet2: 8 | enabled: true 9 | ip: 10 | primary: 172.20.0.5/30 11 | GigabitEthernet3: 12 | enabled: true 13 | ip: 14 | primary: 172.20.0.9/30 15 | GigabitEthernet4: 16 | enabled: true 17 | ip: 18 | primary: 172.20.0.13/30 19 | GigabitEthernet5: 20 | enabled: true 21 | ip: 22 | primary: 172.20.0.17/30 23 | 24 | static_routes: 25 | global: 26 | - network: 0.0.0.0/0 27 | fwd_list: 28 | - fwd: Null0 29 | - network: 172.16.0.0/12 30 | fwd_list: 31 | - fwd: Null0 32 | 33 | router: 34 | bgp: 35 | id: 65000 36 | log_neighbor_changes: true 37 | router_id: 172.20.0.5 38 | neighbors: 39 | - id: 172.20.0.6 40 | remote_as: 65001 41 | - id: 172.20.0.18 42 | remote_as: 65002 43 | address_family: 44 | global: 45 | ipv4: 46 | neighbors: 47 | - id: 172.20.0.6 48 | activate: true 49 | - id: 172.20.0.18 50 | activate: true 51 | networks: 52 | - network: 172.16.0.0/12 53 | -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/internet/virl.yml: -------------------------------------------------------------------------------- 1 | virl_config_template: ios/virl.j2 2 | -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/sp1-host1/network.yml: -------------------------------------------------------------------------------- 1 | interfaces: 2 | eth1: 3 | enabled: true 4 | ip: 5 | primary: 10.100.1.10/24 6 | 7 | static_routes: 8 | global: 9 | - network: 10.0.0.0/16 10 | fwd_list: 11 | - fwd: 10.100.1.1 12 | - network: 10.100.0.0/16 13 | fwd_list: 14 | - fwd: 192.168.1.1 15 | - network: 172.20.0.0/16 16 | fwd_list: 17 | - fwd: 10.100.1.1 18 | - network: 192.168.0.0/16 19 | fwd_list: 20 | - fwd: 10.100.1.1 -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/sp1-host1/virl.yml: -------------------------------------------------------------------------------- 1 | virl_config_template: virl/lxc.j2 -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/sp1-rtr1/network.yml: -------------------------------------------------------------------------------- 1 | interfaces: 2 | GigabitEthernet1: 3 | vrf: Mgmt-intf 4 | enabled: true 5 | ip: 6 | primary: dhcp 7 | GigabitEthernet2: 8 | enabled: true 9 | ip: 10 | primary: 172.20.0.18/30 11 | GigabitEthernet3: 12 | enabled: true 13 | ip: 14 | primary: 10.100.1.1/24 15 | 16 | static_routes: 17 | global: 18 | - network: 10.100.0.0/16 19 | fwd_list: 20 | - fwd: Null0 21 | 22 | router: 23 | bgp: 24 | id: 65002 25 | log_neighbor_changes: true 26 | router_id: 172.20.0.18 27 | neighbors: 28 | - id: 172.20.0.17 29 | remote_as: 65000 30 | address_family: 31 | global: 32 | ipv4: 33 | neighbors: 34 | - id: 172.20.0.17 35 | activate: true 36 | networks: 37 | - network: 10.100.0.0/16 -------------------------------------------------------------------------------- /ansible/inventory/hq1/host_vars/sp1-rtr1/virl.yml: -------------------------------------------------------------------------------- 1 | virl_config_template: ios/virl.j2 -------------------------------------------------------------------------------- /ansible/inventory/hq1/network.yml: -------------------------------------------------------------------------------- 1 | all: 2 | vars: 3 | ansible_user: admin 4 | ansible_password: admin 5 | ansible_network_os: ios 6 | netconf_template_os: ios 7 | children: 8 | network: 9 | vars: 10 | virl_image_definition: "{{ csr1000v_image }}" 11 | children: 12 | internet_routers: 13 | hosts: 14 | internet: 15 | sp1-rtr1: 16 | hq-rtr1: 17 | router: 18 | hosts: 19 | internet: 20 | sp1-rtr1: 21 | hq-rtr1: 22 | hq-dc-rtr1: 23 | children: 24 | colo_routers: 25 | hosts: 26 | hq-rtr1: 27 | -------------------------------------------------------------------------------- /ansible/inventory/hq1/virl.yml: -------------------------------------------------------------------------------- 1 | plugin: virl -------------------------------------------------------------------------------- /ansible/inventory/hq2/group_vars/all/ping_tests.yml: -------------------------------------------------------------------------------- 1 | ping_tests: 2 | # 3 | # Test colo-to-site 4 | # 5 | - dst_ip: 10.0.1.1 6 | src_ip: 192.168.1.1 7 | vedge: site1-cedge1 8 | vpn: 1 9 | pass: yes 10 | - dst_ip: 10.0.1.1 11 | vedge: site2-vedge1 12 | vpn: 1 13 | pass: yes 14 | # 15 | # Test site-to-site 16 | # 17 | - dst_ip: 192.168.2.1 18 | src_ip: 192.168.1.1 19 | vedge: site1-cedge1 20 | vpn: 1 21 | pass: yes 22 | - dst_ip: 192.168.1.1 23 | vedge: site2-vedge1 24 | vpn: 1 25 | pass: yes 26 | # 27 | # Test offsite 28 | # 29 | - dst_ip: 8.8.8.8 30 | src_ip: 192.168.1.1 31 | vedge: site1-cedge1 32 | vpn: 1 33 | pass: no 34 | - dst_ip: 8.8.8.8 35 | vedge: site2-vedge1 36 | vpn: 1 37 | pass: no 38 | -------------------------------------------------------------------------------- /ansible/inventory/hq2/terraform.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | import json 6 | import argparse 7 | 8 | 9 | STATE_FILES = [ 10 | './terraform-sdwan/vmware/terraform.tfstate.d/control/terraform.tfstate', 11 | './terraform-sdwan/vmware/terraform.tfstate.d/edges/terraform.tfstate' 12 | ] 13 | 14 | def parse_args(): 15 | parser = argparse.ArgumentParser() 16 | 17 | parser.add_argument('--list', action='store_true', 18 | help='List host records from NIOS for use in Ansible') 19 | 20 | parser.add_argument('--host', 21 | help='List meta data about single host (not used)') 22 | 23 | return parser.parse_args() 24 | 25 | 26 | def main(): 27 | args = parse_args() 28 | hostvars = {} 29 | all_hosts = [] 30 | 31 | inventory = { 32 | '_meta': { 33 | 'hostvars': hostvars 34 | }, 35 | 'all': { 36 | 'hosts': all_hosts, 37 | }, 38 | 'vmware_hosts': { 39 | 'hosts': all_hosts, 40 | } 41 | } 42 | 43 | for state_file in STATE_FILES: 44 | try: 45 | # with open(state_file) as state: 46 | state = open(state_file) 47 | data = json.load(state) 48 | default_ip_addresses = data['outputs']['default_ip_addresses']['value'] 49 | for item in default_ip_addresses: 50 | all_hosts.append(item['name']) 51 | hostvars[item['name']] = {'ansible_host': item['default_ip_address']} 52 | except: 53 | pass 54 | sys.stdout.write(json.dumps(inventory, indent=4)) 55 | sys.exit(0) 56 | 57 | 58 | if __name__ == '__main__': 59 | main() 60 | -------------------------------------------------------------------------------- /ansible/inventory/hq2/virl.yml: -------------------------------------------------------------------------------- 1 | plugin: virl -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for sdwan_tests -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for sdwan_tests -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Cisco Public Sector 3 | description: Role for checking SD-WAN 4 | company: Cisco Systems Inc. 5 | 6 | license: Cisco Sample Code License 7 | 8 | min_ansible_version: 2.4 9 | 10 | # If this a Container Enabled role, provide the minimum Ansible Container version. 11 | # min_ansible_container_version: 12 | 13 | # 14 | # Provide a list of supported platforms, and for each platform a list of versions. 15 | # If you don't wish to enumerate all versions for a particular platform, use 'all'. 16 | # To view available platforms and versions (or releases), visit: 17 | # https://galaxy.ansible.com/api/v1/platforms/ 18 | # 19 | # platforms: 20 | # - name: Fedora 21 | # versions: 22 | # - all 23 | # - 25 24 | # - name: SomePlatform 25 | # versions: 26 | # - all 27 | # - 1.0 28 | # - 7 29 | # - 99.99 30 | platforms: 31 | - name: all 32 | 33 | galaxy_tags: [] 34 | # List tags for your role here, one per line. A tag is a keyword that describes 35 | # and categorizes the role. Users find roles by searching for tags. Be sure to 36 | # remove the '[]' above, if you add tags to this list. 37 | # 38 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 39 | # Maximum 20 tags per role. 40 | 41 | dependencies: [] 42 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 43 | # if you add dependencies to this list. 44 | -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/tasks/ping-cedge.yml: -------------------------------------------------------------------------------- 1 | - name: Run the ping 2 | ios_ping: 3 | provider: 4 | host: "{{ hostvars[ping_vedge].ansible_host }}" 5 | username: admin 6 | password: admin 7 | source: "{{ ping_src_ip }}" 8 | dest: "{{ ping_dst_ip }}" 9 | vrf: "{{ ping_vpn }}" 10 | state: "{{ 'present' if ping_pass else 'absent' }}" 11 | connection: network_cli 12 | delegate_to: localhost 13 | register: ping 14 | 15 | - name: Record results 16 | set_fact: 17 | ping_rx: "{{ ping.packets_rx }}" 18 | ping_tx: "{{ ping.packets_tx }}" 19 | ping_loss: "{{ ping.packet_loss }}" 20 | ping_rtt_min: "{{ ping.rtt.min }}" 21 | ping_rtt_max: "{{ ping.rtt.max }}" 22 | ping_rtt_avg: "{{ ping.rtt.avg }}" -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/tasks/ping-test.yml: -------------------------------------------------------------------------------- 1 | - name: Determine edge type 2 | set_fact: 3 | edge_type: "{{ 'vedge' if hostvars[ping_vedge].sdwan_model is regex('^vedge-[c0-9]') else 'cedge' }}" 4 | 5 | - include_tasks: 6 | file: "ping-{{ edge_type }}.yml" 7 | 8 | - name: Output ping results 9 | debug: 10 | msg: "{{ ping_vedge }}(VPN {{ ping_vpn }}) => {{ ping_dst_ip }}: {{ ping_rx }}/{{ ping_tx }}, {{ ping_loss }} loss, Pass(actual/expected) {{ actual_result }}/{{ expected_result }}" 11 | failed_when: actual_result != expected_result 12 | ignore_errors: False 13 | vars: 14 | actual_result: "{{ True if ping_loss != '100%' else False }}" 15 | expected_result: "{{ ping_pass }}" 16 | -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/tasks/ping-vedge.yml: -------------------------------------------------------------------------------- 1 | - name: Run the ping 2 | vmanage_nping: 3 | user: "{{ vmanage_user }}" 4 | host: "{{ vmanage_ip }}" 5 | password: "{{ vmanage_pass }}" 6 | dst_ip: "{{ ping_dst_ip }}" 7 | vedge: "{{ ping_vedge }}" 8 | vpn: "{{ ping_vpn }}" 9 | count: "{{ ping_count | default(5) }}" 10 | rapid: no 11 | register: nping 12 | delegate_to: localhost 13 | 14 | - name: Record results 15 | set_fact: 16 | ping_tx: "{{ nping.json.packetsTransmitted }}" 17 | ping_rx: "{{ nping.json.rawOutput | select('search', 'Echo reply') | list | length | default('0') }}" 18 | ping_loss: "{{ (100 - (100 * (ping_rx|int / ping_tx|int))) | int | string }}%" 19 | ping_rtt_min: "{{ nping.json.minRoundTrip }}" 20 | ping_rtt_max: "{{ nping.json.maxRoundTrip }}" 21 | ping_rtt_avg: "{{ nping.json.avgRoundTrip }}" 22 | -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - sdwan-tests -------------------------------------------------------------------------------- /ansible/roles/sdwan_tests/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for sdwan-tests -------------------------------------------------------------------------------- /ansible/show-inventory.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | gather_facts: no 3 | tasks: 4 | - debug: 5 | var: hostvars[inventory_hostname] -------------------------------------------------------------------------------- /bin/config_build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFTEST_VERSION=${CONFTEST_VERSION:-v0.56.0} 4 | 5 | set -o pipefail 6 | 7 | # Uncomment the line below if you want to enforce the OPA rules in `config/policy/config.rego` that check the AWS instance types for given SD-WAN versions. 8 | # See config/policy/README for further explanation. 9 | #set -e 10 | 11 | echo "[i] Running conftest on config.yaml ..." 12 | docker run --rm -v $PROJ_ROOT/config:/project openpolicyagent/conftest:${CONFTEST_VERSION} test config.yaml -d policy/data.yaml 13 | echo "" 14 | 15 | echo "[i] Rendering config.yaml to Ansible configuration ..." 16 | echo "" 17 | ./play.sh -c render -u 18 | -------------------------------------------------------------------------------- /bin/configure.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | 5 | ./play.sh /ansible/day_1/config-sdwan.yml 6 | -------------------------------------------------------------------------------- /bin/conftest.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | CONFTEST_VERSION=${CONFTEST_VERSION:-v0.56.0} 4 | 5 | docker run --rm -v $PROJ_ROOT/config:/project openpolicyagent/conftest:${CONFTEST_VERSION} test config.yaml -d policy/data.yaml 6 | -------------------------------------------------------------------------------- /bin/decrypt_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | 5 | IMAGE=${IMAGE:-ghcr.io/ciscodevnet/sdwan-devops:main} 6 | 7 | if [ -z ${PROJ_ROOT+x} ]; then echo "PROJ_ROOT is unset, please source the env script"; exit 1; else echo "PROJ_ROOT is set to '$PROJ_ROOT'"; fi 8 | 9 | mkdir -p $PROJ_ROOT/ansible/licenses 10 | echo "$VAULT_PASS" > $PROJ_ROOT/ansible/files/vault-password-file 11 | docker run -it --rm -v $PROJ_ROOT/ansible:/ansible --env PWD="/ansible" $IMAGE \ 12 | ansible-vault decrypt --vault-password-file /ansible/files/vault-password-file \ 13 | /ansible/files/serialFile.viptela \ 14 | --output /ansible/licenses/serialFile.viptela 15 | 16 | docker run -it --rm -v $PROJ_ROOT/ansible:/ansible -v $PROJ_ROOT/config:/config --env PWD="/ansible" $IMAGE \ 17 | ansible-vault decrypt --vault-password-file /ansible/files/vault-password-file \ 18 | /ansible/files/config.yaml \ 19 | --output /config/config.yaml 20 | -------------------------------------------------------------------------------- /bin/delete_cp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./play.sh /ansible/day_0/clean-control-plane.yml 4 | -------------------------------------------------------------------------------- /bin/delete_edges.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | 5 | ./play.sh /ansible/day_0/clean-edges.yml 6 | -------------------------------------------------------------------------------- /bin/initialize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | export AWS_PAGER="" 3 | export VAULT_ADDR=$VAULT_ADDR 4 | export VAULT_TOKEN=$SSH_TOKEN 5 | vault login --no-print $VAULT_TOKEN 6 | python3 ssh_key_gen.py 7 | #python3 vault_api.py 8 | #python3 vault_cli.py -------------------------------------------------------------------------------- /bin/install_ca.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | 5 | if [ -z ${PROJ_ROOT+x} ]; then echo "PROJ_ROOT is unset, please source the env script"; exit 1; else echo "PROJ_ROOT is set to '$PROJ_ROOT'"; fi 6 | 7 | rm -rf $PROJ_ROOT/myCA 8 | ./play.sh "/ansible/day_-1/build-ca.yml" 9 | -------------------------------------------------------------------------------- /bin/install_cp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | 5 | ./play.sh /ansible/day_0/build-control-plane.yml 6 | -------------------------------------------------------------------------------- /bin/install_edges.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | 5 | ./play.sh /ansible/day_0/onboard-edges.yml 6 | -------------------------------------------------------------------------------- /bin/minimal_env.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )"; 4 | export PROJ_ROOT=$SCRIPT_DIR/.. 5 | 6 | export AWS_PROFILE="default" 7 | export AWS_ACCESS_KEY_ID=$(aws configure get $AWS_PROFILE.aws_access_key_id) 8 | export AWS_SECRET_ACCESS_KEY=$(aws configure get $AWS_PROFILE.aws_secret_access_key) 9 | export AWS_SESSION_TOKEN=$(aws configure get $AWS_PROFILE.aws_session_token) 10 | 11 | # You need only one of the two following variables. The first one is best for 12 | # local runs, and is more secure. The second one is the easiest option for CI/CD 13 | # and other automation 14 | # export GOOGLE_OAUTH_ACCESS_TOKEN=$(gcloud auth print-access-token) 15 | # export GOOGLE_CREDENTIALS=$(cat key.json | tr -s '\n' ' ') 16 | 17 | # Azure credentials, if you have a service principal configured 18 | # export ARM_SUBSCRIPTION_ID="00000000-0000-0000-0000-000000000000" 19 | # export ARM_TENANT_ID="00000000-0000-0000-0000-000000000000" 20 | # export ARM_CLIENT_ID="00000000-0000-0000-0000-000000000000" 21 | # export ARM_CLIENT_SECRET="00000000-0000-0000-0000-000000000000" 22 | 23 | # TODO: Remove value before commit 24 | export VAULT_PASS= 25 | 26 | export CONFIG_BUILDER_METADATA="../config/metadata.yaml" 27 | 28 | -------------------------------------------------------------------------------- /bin/play.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | IMAGE=${IMAGE:-ghcr.io/ciscodevnet/sdwan-devops:main} 3 | OPTIONS="" 4 | 5 | if [[ ! -z "$ANSIBLE_VAULT_PASSWORD_FILE" ]]; then 6 | OPTIONS="--env ANSIBLE_VAULT_PASSWORD_FILE=/tmp/vault.pw -v $ANSIBLE_VAULT_PASSWORD_FILE:/tmp/vault.pw" 7 | fi 8 | 9 | OPTION_LIST=( \ 10 | "AWS_ACCESS_KEY_ID" \ 11 | "AWS_SECRET_ACCESS_KEY" \ 12 | "AWS_SESSION_TOKEN" \ 13 | "ARM_CLIENT_ID" \ 14 | "ARM_CLIENT_SECRET" \ 15 | "ARM_SUBSCRIPTION_ID" \ 16 | "ARM_TENANT_ID" \ 17 | "GOOGLE_OAUTH_ACCESS_TOKEN" \ 18 | "GCP_PROJECT" \ 19 | "PROJ_ROOT" \ 20 | "CONFIG_BUILDER_METADATA" 21 | ) 22 | 23 | for OPTION in ${OPTION_LIST[*]}; do 24 | if [[ ! -z "${!OPTION}" ]]; then 25 | OPTIONS="$OPTIONS --env $OPTION=${!OPTION}" 26 | fi 27 | done 28 | 29 | OPTIONS="$OPTIONS --env ANSIBLE_ROLES_PATH=/ansible/roles --env ANSIBLE_STDOUT_CALLBACK=debug" 30 | 31 | while getopts ":dlc" opt; do 32 | case $opt in 33 | d) 34 | docker run -it --rm -v $PROJ_ROOT/ansible:/ansible \ 35 | -v $PROJ_ROOT/terraform-sdwan:/terraform-sdwan \ 36 | -v $PROJ_ROOT/sdwan-edge:/sdwan-edge \ 37 | -v $PROJ_ROOT/config:/config \ 38 | -v $PWD/../python-viptela:/python-viptela \ 39 | --env PWD="/ansible" \ 40 | --env USER="$USER" \ 41 | $OPTIONS \ 42 | $IMAGE /bin/bash 43 | exit 44 | ;; 45 | l) 46 | docker run -it --rm -v $PROJ_ROOT/ansible:/ansible \ 47 | --env PWD="/ansible" \ 48 | --env USER="$USER" \ 49 | $OPTIONS \ 50 | $IMAGE ansible-lint --offline 51 | exit 52 | ;; 53 | c) 54 | shift $((OPTIND-1)) 55 | docker run -it --rm -v $PROJ_ROOT/ansible:/ansible \ 56 | -v $PROJ_ROOT/terraform-sdwan:/terraform-sdwan \ 57 | -v $PROJ_ROOT/sdwan-edge:/sdwan-edge \ 58 | -v $PROJ_ROOT/config:/config \ 59 | --env PWD="/ansible" \ 60 | --env USER="$USER" \ 61 | $OPTIONS \ 62 | $IMAGE sdwan_config_build "$@" 63 | exit 64 | ;; 65 | esac 66 | done 67 | docker run -it --rm -v $PROJ_ROOT/ansible:/ansible \ 68 | -v $PROJ_ROOT/terraform-sdwan:/terraform-sdwan \ 69 | -v $PROJ_ROOT/sdwan-edge:/sdwan-edge \ 70 | `# Uncomment the following line if you are using a container image with the Azure CLI included and want to deploy a cEdge on Azure` \ 71 | `#-v $HOME/.azure:/root/.azure` \ 72 | --env PWD="/ansible" \ 73 | --env USER="$USER" \ 74 | --env GOOGLE_CREDENTIALS="$GOOGLE_CREDENTIALS" \ 75 | $OPTIONS \ 76 | $IMAGE ansible-playbook "$@" 77 | -------------------------------------------------------------------------------- /bin/ssh_key_gen.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import json, re, sys, os, json, time, logging, requests, urllib3 3 | from requests.structures import CaseInsensitiveDict 4 | urllib3.disable_warnings() 5 | from requests.structures import CaseInsensitiveDict 6 | import subprocess 7 | from subprocess import call, check_output 8 | 9 | #Passing in env vars from build container 10 | VAULT_ADDR = os.getenv('VAULT_ADDRR') 11 | VAULT_TOKEN = os.getenv('SSH_TOKEN') #This gets the vault token from the ephemeral build container 12 | 13 | vars='vars.py' 14 | import vars 15 | from vars import * 16 | 17 | outfile_vars="vars" 18 | sg_name=name 19 | 20 | #1 - Create a Key Pair 21 | keypair_name=name 22 | outfile_key_pair = 'keypair_name' + '.json' 23 | 24 | #Inject the vault var vals into the ephemeral oci build container 25 | 26 | VAULT_ADDR = os.getenv('VAULT_ADDR') 27 | VAULT_TOKEN = os.getenv('SSH_TOKEN') 28 | #VAULT_TOKEN = os.getenv('VAULT_TOKEN') 29 | #Writing the AWS SSH Key to the vault 30 | url = "http://prod-vault.devops-ontap.com:8200/v1/concourse/sdwan/" + name + "/" + "key_name" 31 | headers = CaseInsensitiveDict() 32 | headers["X-Vault-Token"] = VAULT_TOKEN 33 | headers["Content-Type"] = "application/json" 34 | #data = f'{{"token": "{TOKEN}"}}' 35 | data_json = {"key_name": name } 36 | resp = requests.post(url, headers=headers, json=data_json) 37 | print(resp.status_code) 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /bin/vars.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | NAME="us-west=2a" 3 | SDWAN_CONTROL_INFRA="aws" 4 | AWS_PROFILE="default" 5 | REGION=$REGION 6 | 7 | #amis are for us-west-2 8 | VMANAGE_AMI="ami-0f727aeff8bfca1be" 9 | VMANAGE_INSTANCE_TYPE="t2.xlarge" 10 | VBOND_AMI="ami-0669c73a744a9071a" 11 | VBOND_INSTANCE_TYPE="t2.medium" 12 | VSMART_AMI="ami-0b3dcbdd1621b4819" 13 | VSMART_INSTANCE_TYPE="t2.medium" 14 | 15 | # Example to generate a random password 16 | # TODO save it somewhere? 17 | VMANAGE_USERNAME="admin" 18 | VMANAGE_PASS="$(openssl rand -base64 12)" 19 | VMANAGE_ENCRYPTED_PASS="$(echo "$VMANAGE_PASS" | openssl passwd -6 -stdin)" 20 | SDWAN_CA_PASSPHRASE="$(openssl rand -base64 15)" 21 | 22 | # Distinguishing single and double quotes is very important for this to work 23 | ACL_RANGES_IPV4_BASE64='"0.0.0.0/1", "128.0.0.0/1"' 24 | ACL_RANGES_IPV6_BASE64=echo '"::/0"' 25 | 26 | # SDWAN_DATACENTER is the generic variable which is the region for AWS or the 27 | # vSphere datacenter, depending on the infra 28 | SDWAN_DATACENTER=$AWS_REGION 29 | # Terraform for AWS has some computation built-in, hence the below values 30 | # TODO document static addressing 31 | NETWORK_CIDR=10.128.0.0/22 32 | VMANAGE1_IP=10.128.1.11/24 33 | VBOND1_IP=10.128.1.12/24 34 | VSMART1_IP=10.128.1.13/24 35 | VPN0_GATEWAY=10.128.1.1 36 | 37 | # This should be the VPC ID eventually 38 | #VPN0_PORTGROUP="cpn-rtp-colab4" 39 | #VPN512_PORTGROUP="cpn-rtp-colab4" 40 | #SERVICEVPN_PORTGROUP="cpn-rtp-colab4" 41 | 42 | HQ_EDGE1_IP="1.1.1.4/24" 43 | SITE1_EDGE1_IP="1.1.1.5/24" 44 | SITE2_EDGE1_IP="1.1.1.6/24" 45 | 46 | IOSXE_SDWAN_IMAGE="iosxe-sdwan-16.12.5" 47 | 48 | VIPTELA_VERSION="20.8.1" 49 | 50 | VMANAGE_ORG="CIDR_SDWAN_WORKSHOPS" 51 | 52 | CLOUDINIT_TYPE="v2" 53 | -------------------------------------------------------------------------------- /bin/vault_cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | export AWS_PAGER="" 3 | #This is required for vault 4 | setcap cap_ipc_lock= /usr/bin/vault 5 | export VAULT_ADDR=$VAULT_ADDR 6 | export VAULT_TOKEN=$SSH_TOKEN 7 | vault login --no-print $VAULT_TOKEN 8 | vault kv get --field=ssh-key concourse/sdwabn/$NAME >> sshkey.pem 9 | SSHKEY='sshkey.pem' 10 | #vault kv put concourse/sdwan/$NAME/ssh-key ssh-key=@$PRIVATE_KEY 11 | 12 | -------------------------------------------------------------------------------- /bin/vpn.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o pipefail 4 | 5 | if [[ -n "$VPN_GW" && -n "$VPN_USER" && -n "$VPN_PASS" ]]; then 6 | echo "$VPN_PASS" | sudo openconnect --background --passwd-on-stdin --user=$VPN_USER $VPN_GW 7 | if [ -n "$VPN_HOST" ]; then 8 | ping -c 3 $VPN_HOST 9 | fi 10 | else 11 | echo "OpenConnect VPN configuration required! Set VPN_GW, VPN_USER, VPN_PASS and VPN_HOST environment variables" 12 | exit 1 13 | fi 14 | -------------------------------------------------------------------------------- /bin/vsphere_env_CoLAB.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # Exporting with variable names that match the Terraform convention makes it possible to use the same 3 | # environment variables with Packer. 4 | #The HX1 cluster is more reliable, but requires admin access, so be very careful! 5 | #TODO, at some point, in a not too distant future, these variable names will need to be normalised across domains 6 | 7 | SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]:-$0}"; )" &> /dev/null && pwd 2> /dev/null; )"; 8 | 9 | export PROJ_ROOT=$SCRIPT_DIR/.. 10 | export SDWAN_CONTROL_INFRA="vmware" 11 | 12 | #export TF_VAR_vsphere_username="nsowatsk@ciscops.net" 13 | export TF_VAR_vsphere_user="nsowatsk@ciscops.net" 14 | 15 | export TF_VAR_vsphere_password="EZ3rZ2SMgvRqb^rI}7p[" 16 | 17 | export TF_VAR_vsphere_server="cpn-rtp-vc1.ciscops.net" 18 | #export TF_VAR_vsphere_cluster="cpn-rtp-hx1" 19 | export TF_VAR_cluster="cpn-rtp-hx1" 20 | 21 | #export TF_VAR_vsphere_datacenter="RTP" 22 | export TF_VAR_datacenter="RTP" 23 | 24 | export TF_VAR_vsphere_content_library_datastore="cpn-rtp-hx1-datastore1" 25 | export TF_VAR_vsphere_content_library="SD-WAN" 26 | export TF_VAR_vsphere_network="cpn-rtp-colab4" 27 | export TF_VAR_host_system_id="192.133.177.42" 28 | #export TF_VAR_host_system_id="192.133.177.64" 29 | #export TF_VAR_vsphere_datastore="cpn-rtp-hx1-datastore1" 30 | export TF_VAR_datastore="cpn-rtp-hx1-datastore1" 31 | 32 | #export TF_VAR_vsphere_sdwan_folder="[cpn-rtp-hx1-datastore1]/sdwan" 33 | #export TF_VAR_vsphere_sdwan_folder="sdwan" 34 | export TF_VAR_folder="sdwan" 35 | 36 | #export TF_VAR_vsphere_iso_datastore="cpn-rtp-hx1-datastore1" 37 | export TF_VAR_iso_datastore="cpn-rtp-hx1-datastore1" 38 | 39 | #export TF_VAR_vsphere_sdwan_iso_path="cloud-init" 40 | export TF_VAR_iso_path=cloud-init 41 | 42 | #export TF_VAR_vsphere_resource_pool="" 43 | export TF_VAR_resource_pool= 44 | 45 | export TF_VAR_govc_cmd="/opt/homebrew/bin/govc" 46 | export GOVC_USERNAME=$TF_VAR_vsphere_username 47 | export GOVC_PASSWORD=$TF_VAR_vsphere_password 48 | export GOVC_URL=$TF_VAR_vsphere_server 49 | export GOVC_INSECURE=True 50 | 51 | export VMANAGE1_IP=192.133.184.73/22 52 | export VBOND1_IP=192.133.184.76/22 53 | export VSMART1_IP=192.133.184.75/22 54 | export VPN0_GATEWAY=192.133.184.1 55 | 56 | export VPN0_PORTGROUP="cpn-rtp-colab4" 57 | export VPN512_PORTGROUP="cpn-rtp-colab4" 58 | export SERVICEVPN_PORTGROUP="cpn-rtp-colab4" 59 | 60 | export HQ_EDGE1_IP=1.1.1.4/24 61 | export SITE1_EDGE1_IP=1.1.1.5/24 62 | export SITE2_EDGE1_IP=1.1.1.6/24 63 | 64 | export IOSXE_SDWAN_IMAGE=iosxe-sdwan-16.12.5 65 | 66 | export VIPTELA_VERSION=20.8.1 67 | 68 | export VMANAGE_ORG=CIDR_SDWAN_WORKSHOPS 69 | 70 | export CLOUDINIT_TYPE=v2 -------------------------------------------------------------------------------- /config/.gitignore: -------------------------------------------------------------------------------- 1 | config.yaml 2 | config_*.yaml 3 | metadata_*.yaml 4 | -------------------------------------------------------------------------------- /config/config.example.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | global_config: 3 | # Different cloud have different restrictions on the characters allowed in 4 | # the tags. AWS is the most permissive, GCP is the most restrictive, Azure is 5 | # somewhere in the middle. If you use all lowercase letters and numbers, you 6 | # should be fine. 7 | common_tags: 8 | Owner: "Owner_Name" 9 | Project: "Project_Name" 10 | Automation: "sdwan-devops" 11 | ubuntu_image: "ubuntu-22-04" 12 | # ssh_public_key_file: "{home_dir}/.ssh/id_rsa.pub" 13 | ssh_public_key: "" 14 | 15 | infra_providers: 16 | aws: 17 | ntp_server: "169.254.169.123" 18 | gcp: 19 | ntp_server: "metadata.google.internal" 20 | project: "gcp-sdwaniac-nprd-84677" 21 | azure: 22 | ntp_server: "pool.ntp.org" 23 | vmware: 24 | ntp_server: "pool.ntp.org" 25 | vsphere_server: "" 26 | vsphere_user: "" 27 | vsphere_password: "" 28 | 29 | 30 | controllers: 31 | infra: 32 | provider: "aws" 33 | region: "us-east-1" 34 | # dns_domain: "acme.com" 35 | sw_version: "20.12.1" 36 | cloud_init_format: v2 37 | config: 38 | organization_name: "Cisco DevOps" 39 | site_id: 1 40 | acl_ingress_ipv4: [ "0.0.0.0/1", "128.0.0.0/1" ] 41 | acl_ingress_ipv6: [ "::/0" ] 42 | cidr: "10.128.0.0/22" 43 | vpn0_gateway: "10.128.1.1" 44 | certificate_authority: 45 | cert_dir: '/ansible/myCA' 46 | vmanage: 47 | infra: 48 | image_id: "ami-02b3d4da3fc136dab" 49 | instance_type: "c5.4xlarge" 50 | config: 51 | username: "admin" 52 | password: "C1sco12345" 53 | system_ip: "1.1.1.1" 54 | vpn0_interface_ipv4: "10.128.1.11/24" 55 | vbond: 56 | infra: 57 | image_id: "ami-05ea2898dccbd19bc" 58 | instance_type: "c5.large" 59 | config: 60 | system_ip: "1.1.1.2" 61 | vpn0_interface_ipv4: "10.128.1.12/24" 62 | vsmart: 63 | infra: 64 | image_id: "ami-08a51380a937e415f" 65 | instance_type: "c5.large" 66 | config: 67 | system_ip: "1.1.1.3" 68 | vpn0_interface_ipv4: "10.128.1.13/24" 69 | 70 | wan_edges: 71 | cedge-aws: 72 | infra: 73 | provider: "aws" 74 | region: "us-east-1" 75 | image_id: "ami-09db869a0fe05bd4e" 76 | instance_type: "c5n.large" 77 | sw_version: "17.08.01a" 78 | sdwan_model: vedge-C8000V 79 | sdwan_uuid: "C8K-F6A3B082-EDDB-BA43-93DB-2AA42DFE58DB" 80 | config: 81 | site_id: 101 82 | system_ip: "1.1.1.4" 83 | cidr: "10.128.4.0/23" 84 | cedge-gcp: 85 | infra: 86 | provider: "gcp" 87 | region: "us-central1" 88 | zone: "us-central1-a" 89 | image_id: "cisco-public/cisco-c8k-17-08-01a" 90 | instance_type: "n1-standard-4" 91 | sw_version: "17.08.01a" 92 | sdwan_model: vedge-C8000V 93 | sdwan_uuid: "C8K-6DF43510-2590-5146-3AAA-EEF129DAC175" 94 | config: 95 | site_id: 102 96 | system_ip: "1.1.1.5" 97 | cidr: "10.128.6.0/23" 98 | cedge-azure: 99 | infra: 100 | provider: "azure" 101 | region: "northcentralus" 102 | image_id: "17_08_01a-byol" 103 | instance_type: "Standard_DS1_v2" 104 | sw_version: "17.08.01a" 105 | sdwan_model: vedge-C8000V 106 | sdwan_uuid: "C8K-0EEBC36C-18EC-2FF6-40F4-A9DEC1BBB25D" 107 | config: 108 | site_id: 103 109 | system_ip: "1.1.1.6" 110 | cidr: "10.128.8.0/23" 111 | cedge-dc: 112 | infra: 113 | provider: "vmware" 114 | image_id: "c8000v-17.08.01a" 115 | sw_version: "17.08.01a" 116 | sdwan_model: vedge-C8000V 117 | sdwan_uuid: "C8K-B63650AB-5DB8-BAC8-2A63-D32EA7676FC2" 118 | vmware: 119 | datacenter: "" 120 | cluster: "" 121 | datastore: "" 122 | folder: "" 123 | vpn0_portgroup: "VM Network" 124 | vpn512_portgroup: "VM Network" 125 | servicevpn_portgroup: "VM Network" 126 | config: 127 | site_id: 104 128 | system_ip: "1.1.1.7" 129 | vpn0_range: "22.1.140.0/24" 130 | vpn1_range: "22.1.11.0/24" 131 | vpn0_interface_ipv4: "22.1.140.100/24" 132 | 133 | ... 134 | -------------------------------------------------------------------------------- /config/metadata.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | loader_config: 3 | top_level_config: "../config/config.yaml" 4 | 5 | targets_config: 6 | jinja_renderer: 7 | templates_dir: "../config/templates" 8 | targets: 9 | - description: "Ansible day_-1 vars" 10 | template: "day-1_local.j2" 11 | filename: "../ansible/day_-1/group_vars/all/local.yml" 12 | - description: "Ansible day_0 vars" 13 | template: "day0_local.j2" 14 | filename: "../ansible/day_0/group_vars/all/local.yml" 15 | - description: "Ansible day_1 vars" 16 | template: "day1_local.j2" 17 | filename: "../ansible/day_1/group_vars/all/local.yml" 18 | - description: "Ansible SDWAN inventory" 19 | template: "sdwan_inventory.j2" 20 | filename: "../ansible/inventory/sdwan_inventory.yml" 21 | 22 | 23 | logging_config: 24 | version: 1 25 | formatters: 26 | simple: 27 | format: "%(levelname)s: %(message)s" 28 | detailed: 29 | format: "%(asctime)s: %(name)s: %(levelname)s: %(message)s" 30 | handlers: 31 | console: 32 | class: "logging.StreamHandler" 33 | level: "INFO" 34 | formatter: "simple" 35 | root: 36 | handlers: 37 | - "console" 38 | level: "DEBUG" 39 | ... 40 | -------------------------------------------------------------------------------- /config/policy/README.md: -------------------------------------------------------------------------------- 1 | The [config.rego](./config.rego) file in this directory implements [Open Policy Agent](https://www.openpolicyagent.org/docs/latest/policy-language/) uses the contents of [data.yaml](./data.yaml) to check that the correct AWS VM instance types are being used with respect to the given SD-WAN contoller versions. 2 | 3 | If this policy is not enforced, then the controllers and vManage might not start up correctly. -------------------------------------------------------------------------------- /config/policy/config.rego: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | 4 | # 5 | # Functions 6 | # 7 | 8 | # Given a full SD-WAN version, get just the version train 9 | get_version_train(version) := v { 10 | parts = split(version, ".") 11 | x := parts[0] 12 | y := parts[1] 13 | v := sprintf("%s.%s", [x, y]) 14 | } 15 | 16 | 17 | # 18 | # Inputs 19 | # 20 | 21 | sdwan_version_train := get_version_train(input.controllers.infra.sw_version) 22 | infra := input.controllers.infra.provider 23 | vmanage_accepted_types := data.instance_type_support_matrix[sdwan_version_train][infra].vmanage 24 | controller_accepted_types := data.instance_type_support_matrix[sdwan_version_train][infra].controllers 25 | 26 | contains_vmanage_accepted_types { 27 | input.controllers.vmanage.infra.instance_type == vmanage_accepted_types[_] 28 | } 29 | 30 | contains_vbond_accepted_types { 31 | input.controllers.vbond.infra.instance_type == controller_accepted_types[_] 32 | } 33 | 34 | contains_vsmart_accepted_types { 35 | input.controllers.vsmart.infra.instance_type == controller_accepted_types[_] 36 | } 37 | 38 | 39 | # 40 | # Rules 41 | # 42 | 43 | # Make sure we're using a supported instance type for vManage for the given SD-WAN version 44 | deny[msg] { 45 | not contains_vmanage_accepted_types 46 | 47 | msg := sprintf("Instance type '%s' is not supported for vManage %s.x on '%s' infra, please use one of %s", 48 | [input.controllers.vmanage.infra.instance_type, sdwan_version_train, infra, vmanage_accepted_types]) 49 | } 50 | 51 | # Make sure we're using a supported instance type for vBond for the given SD-WAN version 52 | deny[msg] { 53 | not contains_vbond_accepted_types 54 | 55 | msg := sprintf("Instance type '%s' is not supported for vBond %s.x on '%s' infra, please use one of %s", 56 | [input.controllers.vbond.infra.instance_type, sdwan_version_train, infra, controller_accepted_types]) 57 | } 58 | 59 | # Make sure we're using a supported instance type for vSmart for the given SD-WAN version 60 | deny[msg] { 61 | not contains_vsmart_accepted_types 62 | 63 | msg := sprintf("Instance type '%s' is not supported for vSmart %s.x on '%s' infra, please use one of %s", 64 | [input.controllers.vsmart.infra.instance_type, sdwan_version_train, infra, controller_accepted_types]) 65 | } 66 | -------------------------------------------------------------------------------- /config/policy/data.yaml: -------------------------------------------------------------------------------- 1 | instance_type_support_matrix: 2 | 20.8: 3 | aws: 4 | vmanage: 5 | - c5.2xlarge 6 | - c5.4xlarge 7 | - c5.9xlarge 8 | - c5.12xlarge 9 | controllers: 10 | - c5.large 11 | - c5.xlarge 12 | - c5.2xlarge 13 | - c5.4xlarge 14 | - c5.9xlarge 15 | - c5.12xlarge 16 | 20.12: 17 | aws: 18 | vmanage: 19 | - c5.4xlarge 20 | - c5.9xlarge 21 | - c5.12xlarge 22 | controllers: 23 | - c5.large 24 | - c5.xlarge 25 | - c5.2xlarge 26 | - c5.4xlarge 27 | - c5.9xlarge 28 | - c5.12xlarge 29 | -------------------------------------------------------------------------------- /config/templates/day-1_local.j2: -------------------------------------------------------------------------------- 1 | vmanage_org: "{{ controllers.config.organization_name }}" 2 | 3 | sdwan_ca_passphrase: "{{ controllers.certificate_authority.passphrase }}" 4 | sdwan_cert_dir: "{{ controllers.certificate_authority.cert_dir }}" 5 | sdwan_ca_cert: "{{ controllers.certificate_authority.ca_cert }}" 6 | -------------------------------------------------------------------------------- /config/templates/day0_local.j2: -------------------------------------------------------------------------------- 1 | vmanage_org: "{{ controllers.config.organization_name }}" 2 | vmanage_user: "{{ controllers.vmanage.config.username }}" 3 | vmanage_pass: "{{ controllers.vmanage.config.password }}" 4 | viptela_encrypted_pass: "{{ controllers.vmanage.config.password_hashed }}" 5 | ansible_user: "{{ controllers.vmanage.config.username }}" 6 | ansible_password: "{{ controllers.vmanage.config.password }}" 7 | ssh_pubkey: "{{ global_config.ssh_public_key }}" 8 | ssh_pubkey_fp: "{{ global_config.ssh_public_key_fp }}" 9 | sdwan_serial_file: "/ansible/licenses/serialFile.viptela" 10 | common_tags: 11 | {% for key, value in global_config.common_tags.items() %} 12 | {{ key }}: "{{ value }}" 13 | {% endfor %} 14 | 15 | sdwan_ca_passphrase: "{{ controllers.certificate_authority.passphrase }}" 16 | sdwan_cert_dir: "{{ controllers.certificate_authority.cert_dir }}" 17 | sdwan_ca_cert: "{{ controllers.certificate_authority.ca_cert }}" 18 | 19 | terraform_project_path: 20 | vmware: 21 | network: "" 22 | control: "/terraform-sdwan/vmware" 23 | edge_network: "" 24 | edges: "/sdwan-edge/Catalyst8000v/vmware/cedge" 25 | aws: 26 | network: "/terraform-sdwan/aws/Provision_VPC" 27 | control: "/terraform-sdwan/aws/Provision_Instances" 28 | edge_network: "/sdwan-edge/Catalyst8000v/aws/vpc" 29 | edges: "/sdwan-edge/Catalyst8000v/aws/cedge" 30 | gcp: 31 | network: "" 32 | control: "" 33 | edge_network: "/sdwan-edge/Catalyst8000v/gcp/vpc" 34 | edges: "/sdwan-edge/Catalyst8000v/gcp/cedge" 35 | azure: 36 | network: "" 37 | control: "" 38 | edge_network: "/sdwan-edge/Catalyst8000v/azure/vnet" 39 | edges: "/sdwan-edge/Catalyst8000v/azure/cedge" 40 | 41 | # VMware 42 | vmware_vmanage_template: "viptela-manage-{{ controllers.infra.sw_version }}" 43 | vmware_vsmart_template: "viptela-smart-{{ controllers.infra.sw_version }}" 44 | vmware_vbond_template: "viptela-edge-{{ controllers.infra.sw_version }}" 45 | vmware_vedge_template: "viptela-edge-{{ controllers.infra.sw_version }}" 46 | vmware_cedge_template: "iosxe-sdwan-16.12.5" 47 | 48 | 49 | -------------------------------------------------------------------------------- /config/templates/day1_local.j2: -------------------------------------------------------------------------------- 1 | vmanage_org: "{{ controllers.config.organization_name }}" 2 | vmanage_user: "{{ controllers.vmanage.config.username }}" 3 | vmanage_pass: "{{ controllers.vmanage.config.password }}" 4 | viptela_encrypted_pass: "{{ controllers.vmanage.config.password_hashed }}" 5 | ansible_user: "{{ controllers.vmanage.config.username }}" 6 | ansible_password: "{{ controllers.vmanage.config.password }}" 7 | ssh_pubkey: "{{ global_config.ssh_public_key }}" 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/deploying_controllers_cloud.md: -------------------------------------------------------------------------------- 1 | # Deploying Controllers on AWS 2 | 3 | Check the [whitepaper on CCO](https://www.cisco.com/c/en/us/td/docs/routers/sdwan/configuration/sdwan-xe-gs-book/controller-aws.html) for more information. 4 | 5 | ## Cloning repo 6 | 7 | Clone the sdwan-devops repo using the main branch (default: origin/main): 8 | 9 | ```shell 10 | git clone --single-branch --recursive https://github.com/ciscodevnet/sdwan-devops.git 11 | ``` 12 | 13 | Make sure you use `--recursive` to also clone folders sdwan-edge and terraform-sdwan. 14 | 15 | All operations are run out of the sdwan-devops directory: `cd sdwan-devops` 16 | 17 | ## Openssl version3 18 | 19 | If you are on a Mac: we need openssl version3, while on mac this is LibreSSL. 20 | 21 | Upgrade openssl: 22 | 23 | ```shell 24 | brew install openssl@3 25 | ``` 26 | 27 | ## License file 28 | 29 | Generate a licence file corresponding to your org-name that will contain your device UUIDs. Go to CCO under [Network Plug and Play](https://software.cisco.com/software/csws/ws/platform/home?locale=en_US#pnp-devices) using your Smart Account / Virtual Account (SA/VA). 30 | 31 | Download and copy into `ansible/licences` 32 | 33 | ## Configure parameters 34 | 35 | All parameters are defined in a single configuration file named **config.yaml** under folder **config**. 36 | 37 | Go to the **config** directory 38 | 39 | - copy `config.example.yaml` to `config.yaml` 40 | - Update required parameters, most likely your ssh_public_key, controllers and wan-edge ami image identifiers as well as wan-edge UUIDs. 41 | 42 | ## Define environnement variables 43 | 44 | Go to the **bin** folder. 45 | 46 | Update your AWS profile name in file **minimal_env.sh** (if this is not "default"). 47 | 48 | Set environnement variables (make sure to use source ....) 49 | 50 | ```shell 51 | source minimal_env.sh 52 | ``` 53 | 54 | ## Config Builder 55 | 56 | With **bin** as your current folder, build all ansible parameter files: 57 | 58 | ```shell 59 | ./config_build.sh 60 | ``` 61 | 62 | This will render parameter files for ansible playbooks based on **config/config.yaml** and **jinja templates**: 63 | 64 | - Ansible day-1 vars: 'config/templates/day-1_local.j2' -> '../ansible/day_-1/group_vars/all/local.yml' 65 | - Ansible day_0 vars: 'config/templates/day0_local.j2' -> '../ansible/day_0/group_vars/all/local.yml' 66 | - Ansible day_1 vars: 'config/templates/day1_local.j2' -> '../ansible/day_1/group_vars/all/local.yml' 67 | - Ansible SDWAN inventory: 'config/templates/sdwan_inventory.j2' -> '../ansible/inventory/sdwan_inventory.yml' 68 | 69 | ## Create CA (Day -1) 70 | 71 | With **bin** as your current folder, run the script to create certificates: 72 | 73 | ```shell 74 | ./install_ca.sh 75 | ``` 76 | 77 | This will create a local CA in **ansible/myCA**. 78 | 79 | ## Deploy Controllers (day0) 80 | 81 | With **bin** as your current folder, deploy and configure Control Plane: 82 | 83 | ```shell 84 | ./install_cp.sh 85 | ``` 86 | 87 | This will execute ansible playbook: **/ansible/day_0/build-control-plane.yml** 88 | 89 | Which imports: 90 | 91 | - /ansible/day_0/deploy-control-plane.yml 92 | - /ansible/day_0/config-control-plane.yml 93 | - which in turns, imports: 94 | - /ansible/day_0/check-reqs.yml 95 | - /ansible/day_0/check-vmanage.yml 96 | - /ansible/day_0/config-vmanage.yml 97 | 98 | You can also execute individual playbooks if you want to check every step of the process: 99 | 100 | ```shell 101 | ./play.sh /ansible/day_0/deploy-control-plane.yml 102 | ./play.sh /ansible/day_0/check-reqs.yml 103 | ./play.sh /ansible/day_0/check-vmanage.yml 104 | ./play.sh /ansible/day_0/config-vmanage.yml 105 | ./play.sh /ansible/day_0/config-certificates.yml 106 | ``` 107 | 108 | Notes: 109 | 110 | - deploy-control-plane.yml: generate day0 configurations and instantiate all controllers. 111 | - check-reqs.yml: check org-name is defined and serial file exists. 112 | - check-vmanage.yml: check vManage is able to respond to REST API calls. Can take a while (~60 retries or 15 sec each). 113 | - config-vmanage.yml: configure vManage settings and add vBond and vSmart controllers to vManage. 114 | - config-certificates.yml: add certificates for all controllers. 115 | 116 | ## Deleting Controllers 117 | 118 | With **bin** as your current folder: 119 | 120 | ```shell 121 | ./delete_cp.sh 122 | ``` 123 | -------------------------------------------------------------------------------- /docs/deploying_edges_cloud.md: -------------------------------------------------------------------------------- 1 | # Deploying C8000v on Cloud (AWS, Azure and GCP) 2 | 3 | Details in [Deploying C8000v](https://github.com/CiscoDevNet/sdwan-edge/blob/main/README.md) 4 | 5 | ## Cloning repo 6 | 7 | Clone the sdwan-devops repo using the main branch (default: origin/main): 8 | 9 | ```shell 10 | git clone --single-branch --recursive https://github.com/ciscodevnet/sdwan-devops.git 11 | ``` 12 | 13 | Make sure you use `--recursive` to also clone folders sdwan-edge and terraform-sdwan. 14 | 15 | All operations are run out of the sdwan-devops directory: `cd sdwan-devops` 16 | 17 | ## C8000v AWS AMI 18 | 19 | To find Image id: 20 | 21 | - Go to the [AWS Marketplace](https://aws.amazon.com/marketplace/) page 22 | - search for the image called: 'Cisco Catalyst 8000V Edge Software - BYOL' 23 | - Click on the image title. 24 | - Click **Continue to Subscribe** button. 25 | - Click **Continue to Configuration** button. 26 | - Verify **Fulfillment Option**, **Software Version**, and **Region** values. Changing any of these can change the **Ami Id**. 27 | - Find and save the **Ami Id** - this will be used in the config.yml configuration file in the next section. 28 | 29 | ## Configure parameters 30 | 31 | All parameters are defined in a single configuration file named **config.yaml** under folder **config**. 32 | 33 | Go to the **config** directory 34 | 35 | - copy `config.example.yaml` to `config.yaml` 36 | - Update required parameters, most likely your ssh_public_key, controllers and wan-edge ami image identifiers as well as wan-edge UUIDs. 37 | 38 | ## Define environnement variables 39 | 40 | Go to the **bin** folder. 41 | 42 | Update your credentials in file **minimal_env.sh**. 43 | 44 | Set environnement variables (make sure to use source ....) 45 | 46 | ```shell 47 | source minimal_env.sh 48 | ``` 49 | 50 | ## Config Builder 51 | 52 | With **bin** as your current folder, build all ansible parameter files: 53 | 54 | ```shell 55 | ./config_build.sh 56 | ``` 57 | 58 | This will render parameter files for ansible playbooks based on **config/config.yaml** and **jinja templates**: 59 | 60 | - Ansible day-1 vars: 'config/templates/day-1_local.j2' -> '../ansible/day_-1/group_vars/all/local.yml' 61 | - Ansible day_0 vars: 'config/templates/day0_local.j2' -> '../ansible/day_0/group_vars/all/local.yml' 62 | - Ansible day_1 vars: 'config/templates/day1_local.j2' -> '../ansible/day_1/group_vars/all/local.yml' 63 | - Ansible SDWAN inventory: 'config/templates/sdwan_inventory.j2' -> '../ansible/inventory/sdwan_inventory.yml' 64 | 65 | ## Deploy C8000v 66 | 67 | With **bin** as your current folder, deploy the C8000v: 68 | 69 | ```shell 70 | install_edges.sh 71 | ``` 72 | 73 | which invokes: `./play.sh /ansible/day_0/onboard-edges.yml` 74 | 75 | that uses the following playbooks: 76 | 77 | - get-bootstrap.yml 78 | - terraform-apply-edges.yml 79 | - terraform-apply-edges.yml 80 | 81 | Note: 82 | 83 | - If no template is attached to the UUIDs specified, a basic day0 configuration will be used. 84 | - If a template is attached, the vManage generated configuration will be used as day0 config. 85 | 86 | ## Deleting C8000v 87 | 88 | With **bin** as your current folder: 89 | 90 | ```shell 91 | ./delete_edges.sh 92 | ``` 93 | -------------------------------------------------------------------------------- /docs/gitlab.md: -------------------------------------------------------------------------------- 1 | # Building a CI pipeline with GitLab 2 | 3 | The steps below will build out a CI pipeline for SD-WAN using GitLab. These instructions assume that you are already able to run all the playbooks manually to build out the topology and configure the SD-WAN. If you have not done that first, go back and make sure that the playbooks run successfully before trying them in GitLab CI. 4 | 5 | ## Setup 6 | 7 | 1. Clone the repo. 8 | ``` 9 | git clone https://github.com/CiscoDevNet/sdwan-devops.git 10 | ``` 11 | 12 | 1. Set the organization name. Replace the value below with your organization name. 13 | ``` 14 | export VMANAGE_ORG=myorgname 15 | ``` 16 | 17 | 1. Copy a valid license file to `licenses/serialFile.viptela`. 18 | 19 | 1. Edit `ansible.cfg` and set the inventory variable to point to hq1. 20 | ``` 21 | inventory = ./inventory/hq1 22 | ``` 23 | 24 | 1. Set the needed environment variables for access to your CML infrastucture. Replace the values below with your server, credentials and lab name. 25 | ``` 26 | export VIRL_HOST=myvirlhost.example.com 27 | export VIRL_USERNAME=myusername 28 | export VIRL_PASSWORD=mypasword 29 | export VIRL_LAB=myusername_sdwan 30 | ``` 31 | 32 | 1. Set the version of IOS-XE image to use for edge devices. 33 | ``` 34 | export IOSXE_SDWAN_IMAGE=iosxe-sdwan-16.12.2r 35 | ``` 36 | 37 | 1. Set the version of CSR1000v image to use for underlay devices. 38 | ``` 39 | export CSR1000V_IMAGE=csr1000v-170301 40 | ``` 41 | 42 | 1. And finally, set the version of control plane to use. 43 | ``` 44 | export VIPTELA_VERSION=19.2.1 45 | ``` 46 | 47 | >Note: This value gets appended to the image name (e.g. viptela-manage, viptela-smart, etc.) so make sure these names line up with the image definitions you have in CML. 48 | 49 | 1. Log into your GitLab instance and create a GITLAB_API_TOKEN 50 | - In the upper left of console, click on the symbol for your account and then Settings 51 | - In the left menu, click on "Access Tokens" 52 | - Provide a token name, expiration, the api scope, click "Create personal access token", and save the generated TOKEN for the next step 53 | 54 | 1. Export the following variables to match your environment. 55 | ``` 56 | export GITLAB_HOST=https://gitlab.example.com 57 | export GITLAB_USER= 58 | export GITLAB_API_TOKEN= 59 | export GITLAB_PROJECT=sdwan-devops 60 | ``` 61 | 62 | ## Create a CI pipeline in GitLab 63 | 64 | 1. Create the GitLab project and CI/CD variables. 65 | ``` 66 | extras/create-gitlab-project.sh 67 | ``` 68 | 69 | 1. Remove the old origin, add new origin and push to GitLab. 70 | ``` 71 | git remote remove origin 72 | git remote add origin $GITLAB_HOST/$GITLAB_USER/$GITLAB_PROJECT.git 73 | git config http.version HTTP/1.1 74 | ``` 75 | 76 | 1. Commit your license file to the repo. 77 | ``` 78 | git add -f licenses/serialFile.viptela 79 | git commit -m "Adding license file" 80 | ``` 81 | 82 | 1. Push the repo to GitLab. 83 | ``` 84 | git push --set-upstream origin master 85 | ``` 86 | 87 | >Note: enter your GitLab credentials if asked 88 | 89 | 1. From the GitLab web UI, navigate to the CI/CD -> Pipelines page for the project. You should see a pipeline currently active since we committed the model-driven-devops code and we had a `.gitlab-ci.yml` file present. If that file is present, GitLab will automatically try to execute the CI pipeline defined inside. 90 | 91 | 1. Use the graphical representation of the pipeline to click through the console output of the various stages. The entire pipeline will take approximately ~8 minutes to complete. Wait until it completes to go onto the next step. 92 | 93 | ## Cleanup 94 | 1. Remove project from GitLab. 95 | ``` 96 | extras/delete-gitlab-project.sh 97 | ``` 98 | 99 | 1. Delete lab from CML. 100 | ``` 101 | ./play.sh clean-virl.yml --tags "delete" 102 | ``` 103 | -------------------------------------------------------------------------------- /docs/images/hq1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/sdwan-devops/f1893cd70902de91a4ca6337f9b004caa71749ae/docs/images/hq1.png -------------------------------------------------------------------------------- /docs/images/hq2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/sdwan-devops/f1893cd70902de91a4ca6337f9b004caa71749ae/docs/images/hq2.png -------------------------------------------------------------------------------- /docs/simulation.md: -------------------------------------------------------------------------------- 1 | # Simulation 2 | 3 | Simulation can be used for developing new deployments as well as testing changes to current deployments. Simulation capabilities are provided by CML^2 or VMware. The [Ansible CML^2 Modules](https://github.com/ciscodevnet/ansible-virl) are used to automate deployments in CML^2. The [Terraform Modules](https://github.com/CiscoDevNet/terraform-sdwan) are used to automate deployments in VMware. 4 | 5 | ## Quick start instructions 6 | 7 | If you want to skip all the info and documentation below and just run the automation, use the following links, otherwise read on for more details. 8 | 9 | - [Build the hq1 topology in CML](docs/virl-hq1.md) 10 | - [Build the hq2 topology in CML](docs/virl-hq2.md) 11 | - [Build the hq2 topology in VMware](docs/vmware-hq2.md) 12 | - [GitLab CI pipeline](docs/gitlab.md) 13 | 14 | 15 | ## Licensing Requirements 16 | 17 | The following licensing-related tasks need to be completed prior to running the playbooks: 18 | 1. Copy a valid Viptela license file into `licenses/serialFile.viptela` 19 | 1. Set organization name as an environment variable using `export VMANAGE_ORG=myorgname`. 20 | 21 | These values can also be set permanently in `group_vars/all/local.yml` if desired. 22 | 23 | ```yaml 24 | organization_name: "" 25 | license_token: XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX 26 | ``` 27 | 28 | > Note: Edge device UUIDs must be updated to reflect the ones from the `serialFile.viptela` provided. This is done by updating `sdwan_uuid` value for each edge in the `sdwan.yml` inventory file (e.g. `inventory/hq1/sdwan.yml`). See the `Variables` section for more information. 29 | 30 | 31 | ## Automation Playbooks 32 | 33 | * `build-ca.yml` 34 | * Create a local CA in `./myCA` 35 | * `build-virl.yml` or `build-vmware.yml` 36 | * Creates Day0 config for VNFs based on the data in the `sdwan.yml` file 37 | * Provision and start VNFs on virtual infrastructure 38 | * `config-sdwan.yml` 39 | * Configure settings on vmanage 40 | * Add vbonds and vsmarts to vmanage 41 | * Create CSRs for vbonds and vsmarts 42 | * Install certificates into vmanage 43 | * Push certificates to controllers 44 | * Import templates if present 45 | * Import policy if present 46 | * `deploy-virl.yml` or `deploy-vmware.yml` 47 | * Create Day0 config for edge VNFs 48 | * Provision and boot edge VNFs on virtual infrastructure 49 | * `import-templates.yml` 50 | * Imports device and feature templates into vManage 51 | * `attach-templates.yml` 52 | * Attaches templates to devices as specified in the `sdwan.yml` file 53 | * `import-policy.yml` 54 | * Imports policy into vManage 55 | * `activate-policy.yml` 56 | * Activates policy 57 | * `waitfor-sync.yml` 58 | * Waits until all edge devices are in sync on vManage 59 | 60 | ## Validation Playbooks 61 | * `check-sdwan.yml` 62 | * Check overlay connectivity using ping 63 | * Can check for things that should, or should not, work 64 | 65 | ## Testing 66 | 67 | Jenkins CI is used for automatic and manual testing. The various Jenkinsfiles in use are in the `jenkins` directory. A `gitlab-ci.yml` file is also included for running CI from GitLab. 68 | 69 | 70 | ## Structure 71 | 72 | ### Inventories 73 | 74 | The repo contains a set of playbooks, roles, and templates that are fed from the included inventories. Several built-in topologies located in the inventory and more can be added. There are two topologies that are provided in the `inventory` directory: 75 | 76 | * `hq1` builds only on CML^2 and includes an underlay network, SD-WAN control plane and SD-WAN edges (see [hq1.png](docs/images/hq1.png)) 77 | * `hq2` builds on CML^2 and VMware and includes the SD-WAN control plane and SD-WAN edges in a flat network (see [hq2.png](docs/images/hq2.png)) 78 | 79 | To switch between topologies, either edit `ansible.cfg` and point `inventory` to the proper directory: 80 | 81 | For example, change: 82 | ``` 83 | inventory = ./inventory/hq1 84 | ``` 85 | to 86 | ``` 87 | inventory = ./inventory/hq2 88 | ``` 89 | 90 | or specify `-i` with every command (e.g. `./play.sh -i inventory/hq1 build-virl.yml`) 91 | 92 | The local defaults for all inventories are set in `sdwan-devops/group_vars/all/local.yml` 93 | 94 | ### Variables 95 | 96 | There are a set of required variables that must be set for each device in the topology. An example for a typical edge device is shown below. Note that in the case of the `hq1` inventory, you don't need to modify any of these values if you just want to test out the automation. However, the `hq2 97 | 98 | ```yaml 99 | sdwan_system_ip: 192.168.255.13 100 | sdwan_site_id: 1 101 | sdwan_vbond: 10.0.0.11 102 | sdwan_model: 'vedge-CSR-1000v' 103 | sdwan_uuid: 'CSR-82DEC3C6-3A28-B866-6F4A-40BEA274CA00' 104 | sdwan_personality: vedge 105 | sdwan_template: 106 | name: 'hq-csr1000v' 107 | variables: 108 | 'vpn512_interface': GigabitEthernet1 109 | 'vpn0_internet_ipv4_address': 10.0.0.13/24 110 | 'vpn0_default_gateway': 10.0.0.1 111 | 'vpn0_interface': GigabitEthernet2 112 | 'vpn1_ipv4_address': 10.0.255.6/30 113 | 'vpn1_interface': GigabitEthernet3 114 | 'vpn1_ospf_interface': GigabitEthernet3 115 | 'system_latitude': 37.411343 116 | 'system_longitude': -121.938803 117 | 'system_site_id': 1 118 | 'system_host_name': hq-cedge1 119 | 'system_system_ip': 192.168.255.13 120 | 'banner_login': "{{ login_banner }}" 121 | 'banner_motd': Welcome to hq-cedge1! 122 | ``` 123 | 124 | -------------------------------------------------------------------------------- /docs/virl-hq1.md: -------------------------------------------------------------------------------- 1 | # Building the SD-WAN control plane on CML 2 | 3 | The steps below will build and configure the SD-WAN control plane and edges on CML. 4 | 5 | >Note: The repo is designed for CML2. 6 | 7 | ## Create the SD-WAN images 8 | 9 | If your CML server does not have the SD-WAN images installed, follow the steps [here](https://github.com/CiscoSE/virl-howtos/blob/master/virl2-sdwan-images/virl2-sdwan-devops.md) to create the proper node definitions and upload the images. 10 | 11 | ## Complete the prerequisites 12 | 13 | 1. Set the organization name. Replace the value below with your organization name. 14 | ``` 15 | export VMANAGE_ORG=myorgname 16 | ``` 17 | 18 | 1. Copy a valid license file to `licenses/serialFile.viptela`. 19 | 20 | 1. Edit `ansible.cfg` and set the inventory variable to point to hq1. 21 | ``` 22 | inventory = ./inventory/hq1 23 | ``` 24 | 25 | 1. Set the needed environment variables for access to your CML infrastucture. Replace the values below with your server, credentials and lab name. 26 | ``` 27 | export VIRL_HOST=myvirlhost.example.com 28 | export VIRL_USERNAME=myusername 29 | export VIRL_PASSWORD=mypasword 30 | export VIRL_LAB=myusername_sdwan 31 | ``` 32 | 33 | 1. Set the version of IOS-XE image to use for edge devices. 34 | ``` 35 | export IOSXE_SDWAN_IMAGE=iosxe-sdwan-16.12.2r 36 | ``` 37 | 38 | 1. Set the version of CSR1000v image to use for underlay devices. 39 | ``` 40 | export CSR1000V_IMAGE=csr1000v-170301 41 | ``` 42 | 43 | 1. And finally, set the version of control plane to use. 44 | ``` 45 | export VIPTELA_VERSION=19.2.1 46 | ``` 47 | 48 | >Note: This value gets appended to the image name (e.g. viptela-manage, viptela-smart, etc.) so make sure these names line up with the image definitions you have in CML. 49 | 50 | ## Run the playbooks 51 | 52 | 1. Create the local CA used for certificate signing. 53 | ``` 54 | ./play.sh build-ca.yml 55 | ``` 56 | 57 | 1. Build out the underlay and control plane. 58 | ``` 59 | ./play.sh build-virl.yml 60 | ``` 61 | 62 | 1. Configure the SD-WAN control plane using the supplied inventory data. 63 | ``` 64 | ./play.sh config-sdwan.yml 65 | ``` 66 | 67 | 1. Provision and bootstrap the edges. 68 | ``` 69 | ./play.sh deploy-virl.yml 70 | ``` 71 | 72 | 1. Wait for the edges to sync in vManage. 73 | ``` 74 | ./play.sh waitfor-sync.yml 75 | ``` 76 | > Note: The `waitfor-sync.yml` playbook is useful when we want to wait until all edges are in sync and the service VPN is active. We need to do this before validating the simulation, otherwise the validation check will fail. 77 | 78 | 1. Get inventory information. 79 | ``` 80 | ./play.sh virl-inventory.yml 81 | ``` 82 | 83 | 1. Get detailed inventory information for a single host. 84 | ``` 85 | ./play.sh virl-inventory.yml --tags=detail --limit=vmanage1 86 | ``` 87 | 88 | 1. Run the `check-sdwan.yml` playbook to validate the topology. 89 | ``` 90 | ./play.sh check-sdwan.yml 91 | ``` 92 | 93 | ## Clean the topology 94 | 95 | To stop and wipe all of the nodes in the lab. 96 | ``` 97 | ./play.sh clean-virl.yml 98 | ``` 99 | 100 | To clean individual nodes, use `--limit`. 101 | ``` 102 | ./play.sh clean-virl.yml --limit=site1-cedge1 103 | ``` 104 | -------------------------------------------------------------------------------- /docs/virl-hq2.md: -------------------------------------------------------------------------------- 1 | # Building the SD-WAN control plane on CML 2 | 3 | The steps below will build and configure the SD-WAN control plane and edges on CML. 4 | 5 | >Note: The repo is designed for CML2. 6 | 7 | ## Create the SD-WAN images 8 | 9 | If your CML server does not have the SD-WAN images installed, follow the steps [here](https://github.com/CiscoSE/virl-howtos/blob/master/virl2-sdwan-images/virl2-sdwan-devops.md) to create the proper node definitions and upload the images. 10 | 11 | ## Complete the prerequisites 12 | 13 | 1. Set the organization name. Replace the value below with your organization name. 14 | ``` 15 | export VMANAGE_ORG=myorgname 16 | ``` 17 | 18 | 1. Copy a valid license file to `licenses/serialFile.viptela`. 19 | 20 | 1. Edit `ansible.cfg` and set the inventory variable to point to hq1. 21 | ``` 22 | inventory = ./inventory/hq2 23 | ``` 24 | 25 | 1. Set the needed environment variables for access to your CML infrastucture. Replace the values below with your server, credentials and lab name. 26 | ``` 27 | export VIRL_HOST=myvirlhost.example.com 28 | export VIRL_USERNAME=myusername 29 | export VIRL_PASSWORD=mypasword 30 | export VIRL_LAB=myusername_sdwan 31 | ``` 32 | 33 | 1. Set the IP addressing for your control plane components. Make sure these are valid and reachable IP addresses for your environment and that they are specified in CIDR notation (except for the `VPN0_GATEWAY`). 34 | ``` 35 | export VMANAGE1_IP=1.1.1.1/24 36 | export VBOND1_IP=1.1.1.2/24 37 | export VSMART1_IP=1.1.1.3/24 38 | export VPN0_GATEWAY=1.1.1.254 39 | ``` 40 | 41 | 1. Set the IP addressing for your edges. 42 | ``` 43 | export HQ_EDGE1_IP=1.1.1.4/24 44 | export SITE1_EDGE1_IP=1.1.1.5/24 45 | export SITE2_EDGE1_IP=1.1.1.6/24 46 | ``` 47 | >Note: You do not need to supply this info if you are not going to deploy edges. 48 | 49 | 1. Set the version of IOS-XE to use for edge devices. Set this to the ID of the image you want to use in CML. 50 | ``` 51 | export IOSXE_SDWAN_IMAGE=iosxe-sdwan-16.12.2r 52 | ``` 53 | 54 | 1. And finally, set the version of control plane to use. 55 | ``` 56 | export VIPTELA_VERSION=19.2.1 57 | ``` 58 | 59 | >Note: This value gets appended to the image name (e.g. viptela-manage, viptela-smart, etc.) so make sure these names line up with the image definitions you have in CML. 60 | 61 | ## Run the playbooks 62 | 63 | 1. Create the local CA used for certificate signing. 64 | ``` 65 | ./play.sh build-ca.yml 66 | ``` 67 | 68 | 1. Build out the underlay and control plane. 69 | ``` 70 | ./play.sh build-virl.yml 71 | ``` 72 | 73 | 1. Configure the SD-WAN control plane using the supplied inventory data. 74 | ``` 75 | ./play.sh config-sdwan.yml 76 | ``` 77 | 78 | 1. Provision and bootstrap the edges. 79 | ``` 80 | ./play.sh deploy-virl.yml 81 | ``` 82 | 83 | 1. Wait for the edges to sync in vManage. 84 | ``` 85 | ./play.sh waitfor-sync.yml 86 | ``` 87 | > Note: The `waitfor-sync.yml` playbook is useful when we want to wait until all edges are in sync and the service VPN is active. We need to do this before validating the simulation, otherwise the validation check will fail. 88 | 89 | 1. Get inventory information. 90 | ``` 91 | ./play.sh virl-inventory.yml 92 | ``` 93 | 94 | 1. Get detailed inventory information for a single host. 95 | ``` 96 | ./play.sh virl-inventory.yml --tags=detail --limit=vmanage1 97 | ``` 98 | 99 | 1. Run the `check-sdwan.yml` playbook to validate the topology. 100 | ``` 101 | ./play.sh check-sdwan.yml 102 | ``` 103 | 104 | ## Clean the topology 105 | 106 | To stop and wipe all of the nodes in the lab. 107 | ``` 108 | ./play.sh clean-virl.yml 109 | ``` 110 | 111 | To clean individual nodes, use `--limit`. 112 | ``` 113 | ./play.sh clean-virl.yml --limit=site1-cedge1 114 | ``` 115 | -------------------------------------------------------------------------------- /extras/create-gitlab-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Uncomment the following and define proper values (or specify as environment variables) 4 | 5 | # GITLAB_HOST=https://gitlab.example.com 6 | # GITLAB_USER=foo 7 | # GITLAB_API_TOKEN=abc123 8 | # GITLAB_PROJECT=sdwan-devops 9 | # VIRL_HOST=cml.example.com 10 | # VIRL_USERNAME=foo 11 | # VIRL_PASSWORD=bar 12 | # VIRL_LAB=sdwan-devops 13 | # VMANAGE_ORG=your-org 14 | 15 | # Add new project 16 | curl --request POST -sSLk --header "PRIVATE-TOKEN: $GITLAB_API_TOKEN" "$GITLAB_HOST/api/v4/projects" --form "name=$GITLAB_PROJECT" 17 | 18 | # Add new vars 19 | OPTION_LIST=( \ 20 | "VIRL_HOST" \ 21 | "VIRL_USERNAME" \ 22 | "VIRL_PASSWORD" \ 23 | "VIRL_LAB" \ 24 | "VMANAGE_ORG" \ 25 | "VMANAGE1_IP" \ 26 | "VBOND1_IP" \ 27 | "VSMART1_IP" \ 28 | "HQ_EDGE1_IP" \ 29 | "SITE1_EDGE1_IP" \ 30 | "SITE2_EDGE1_IP" \ 31 | "VPN0_GATEWAY" \ 32 | "TF_VAR_vsphere_user" \ 33 | "TF_VAR_vsphere_password" \ 34 | "TF_VAR_vsphere_server" \ 35 | "TF_VAR_datacenter" \ 36 | "TF_VAR_cluster" \ 37 | "TF_VAR_folder" \ 38 | "TF_VAR_resource_pool" \ 39 | "TF_VAR_datastore" \ 40 | "TF_VAR_iso_datastore" \ 41 | "TF_VAR_iso_path" \ 42 | "VPN0_PORTGROUP" \ 43 | "VPN512_PORTGROUP" \ 44 | "SERVICEVPN_PORTGROUP" \ 45 | "IOSXE_SDWAN_IMAGE" \ 46 | "CSR1000V_IMAGE" \ 47 | "UBUNTU_IMAGE" \ 48 | "VIPTELA_VERSION" \ 49 | ) 50 | for OPTION in ${OPTION_LIST[*]}; do 51 | if [[ ! -z "${!OPTION}" ]]; then 52 | curl --request POST -sSLk --header "PRIVATE-TOKEN: $GITLAB_API_TOKEN" "$GITLAB_HOST/api/v4/projects/$GITLAB_USER%2f$GITLAB_PROJECT/variables" --form "key=$OPTION" --form "value=${!OPTION}" 53 | fi 54 | done -------------------------------------------------------------------------------- /extras/delete-gitlab-project.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Uncomment the following and define proper values (or specify as environment variables) 4 | 5 | # GITLAB_HOST=https://gitlab.example.com 6 | # GITLAB_USER=foo 7 | # GITLAB_API_TOKEN=abc123 8 | # GITLAB_PROJECT=sdwan-devops 9 | 10 | # Delete project 11 | curl --request DELETE -sSLk --header "PRIVATE-TOKEN: $GITLAB_API_TOKEN" "$GITLAB_HOST/api/v4/projects/$GITLAB_USER%2f$GITLAB_PROJECT/" 12 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==9.4.* 2 | ansible-lint==24.2.2 3 | ansible-virl==0.0.7 4 | netaddr 5 | pydantic-settings 6 | viptela==0.3.10 7 | virl2_client==2.7.1 8 | cisco-sdwan==1.24 9 | 10 | # pytz is required by netbox.netbox.nb_inventory 11 | pytz 12 | -------------------------------------------------------------------------------- /sdwan_config_builder/README.md: -------------------------------------------------------------------------------- 1 | # SDWAN Configuration Builder 2 | 3 | ## Installation 4 | 5 | SDWAN config builder requires Python 3.8 or newer. This can be verified by pasting the following to a terminal window: 6 | ``` 7 | % python3 -c "import sys;assert sys.version_info>(3,8)" && echo "ALL GOOD" 8 | ``` 9 | 10 | If 'ALL GOOD' is printed it means Python requirements are met. If not, download and install the latest 3.x version at Python.org (https://www.python.org/downloads/). 11 | 12 | Go to the sdwan_config_builder directory and create a virtual environment 13 | ``` 14 | % cd sdwan_config_builder 15 | % python3 -m venv venv 16 | ``` 17 | 18 | Activate the virtual environment: 19 | ``` 20 | % source venv/bin/activate 21 | (venv) % 22 | ``` 23 | - Note that the prompt is updated with the virtual environment name (venv), indicating that the virtual environment is active. 24 | 25 | Upgrade built-in virtual environment packages: 26 | ``` 27 | (venv) % pip install --upgrade pip setuptools 28 | ``` 29 | 30 | Install config builder: 31 | ``` 32 | (venv) % pip install --upgrade . 33 | ``` 34 | 35 | Validate that config builder is installed: 36 | ``` 37 | (venv) % sdwan_config_build --version 38 | SDWAN Config Builder Tool Version 0.9 39 | ``` 40 | 41 | ## Running 42 | 43 | The metadata file defines the location of the source configuration file, jinja2 templates, and where the output files 44 | should be saved. By default sdwan_config_build looks for a 'metadata.yaml' file in the directory where it is run. 45 | The CONFIG_BUILDER_METADATA environment variable can be used to specify a custom location for the metadata file. 46 | 47 | ``` 48 | (venv) % sdwan_config_build --help 49 | usage: sdwan_config_build [-h] [--version] {render,export,schema} ... 50 | 51 | SDWAN Config Builder Tool 52 | 53 | options: 54 | -h, --help show this help message and exit 55 | --version show program's version number and exit 56 | 57 | commands: 58 | {render,export,schema} 59 | render render configuration files 60 | export export source configuration as JSON file 61 | schema generate source configuration JSON schema 62 | ``` 63 | 64 | To build the configuration files use render command: 65 | ``` 66 | (venv) % sdwan_config_build render --update 67 | INFO: Rendered Ansible day_-1 vars: 'day-1_local.j2' -> '../ansible/day_-1/group_vars/all/local.yml' 68 | INFO: Rendered Ansible day_0 vars: 'day0_local.j2' -> '../ansible/day_0/group_vars/all/local.yml' 69 | INFO: Rendered Ansible day_1 vars: 'day1_local.j2' -> '../ansible/day_1/group_vars/all/local.yml' 70 | INFO: Rendered Ansible SDWAN inventory: 'sdwan_inventory.j2' -> '../ansible/inventory/sdwan_inventory.yml' 71 | ``` 72 | 73 | By default, sdwan_config_build will not override a target config file if it is already present. The --update (or -u) 74 | flag changes this behavior override any pre-existing target files. 75 | -------------------------------------------------------------------------------- /sdwan_config_builder/pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "sdwan_config_builder" 7 | description = "Cisco SDWAN Configuration Builder Tool" 8 | license = "MIT" 9 | classifiers = [ 10 | "Programming Language :: Python :: 3 :: Only", 11 | "Programming Language :: Python :: 3.8", 12 | "Programming Language :: Python :: 3.9", 13 | "Programming Language :: Python :: 3.10", 14 | "License :: OSI Approved :: MIT License", 15 | "Operating System :: OS Independent", 16 | "Topic :: System :: Networking", 17 | "Typing :: Typed", 18 | "Development Status :: 4 - Beta" 19 | ] 20 | keywords = ["sdwan", "cisco", "viptela", "network"] 21 | requires-python = ">=3.8" 22 | dependencies = [ 23 | "Jinja2", 24 | "PyYAML", 25 | "pydantic", 26 | "passlib", 27 | "sshpubkeys" 28 | ] 29 | dynamic = ["version"] 30 | 31 | [project.scripts] 32 | sdwan_config_build = "sdwan_config_builder.__main__:main" 33 | 34 | [tool.hatch.version] 35 | path = "src/sdwan_config_builder/__version__.py" 36 | 37 | [tool.hatch.build.targets.sdist] 38 | include = [ 39 | "/src", 40 | ] 41 | -------------------------------------------------------------------------------- /sdwan_config_builder/requirements.txt: -------------------------------------------------------------------------------- 1 | pydantic>=2.6 2 | pydantic-settings>=2.2.1 3 | PyYAML>=6.0.1 4 | Jinja2>=3.1 5 | passlib>=1.7.4 6 | sshpubkeys>=3.3 7 | -------------------------------------------------------------------------------- /sdwan_config_builder/sdwan_config_build.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | """ 3 | SDWAN Config Builder Tool 4 | 5 | """ 6 | import re 7 | import sys 8 | 9 | from src.sdwan_config_builder.__main__ import main 10 | 11 | if __name__ == '__main__': 12 | sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) 13 | sys.exit(main()) 14 | -------------------------------------------------------------------------------- /sdwan_config_builder/src/sdwan_config_builder/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.config 3 | import logging.handlers 4 | from os import environ 5 | from pathlib import Path 6 | from typing import Any 7 | from .loader import load_metadata 8 | 9 | 10 | METADATA_FILENAME = Path(environ.get("CONFIG_BUILDER_METADATA", "metadata.yaml")) 11 | 12 | 13 | def setup_logging(logging_config: dict[str, Any]) -> None: 14 | file_handler = logging_config.get("handlers", {}).get("file") 15 | if file_handler is not None: 16 | Path(file_handler["filename"]).parent.mkdir(parents=True, exist_ok=True) 17 | 18 | logging.config.dictConfig(logging_config) 19 | 20 | 21 | app_config = load_metadata(METADATA_FILENAME) 22 | setup_logging(app_config.logging_config) 23 | -------------------------------------------------------------------------------- /sdwan_config_builder/src/sdwan_config_builder/__main__.py: -------------------------------------------------------------------------------- 1 | """ 2 | SDWAN Config Builder Tool 3 | 4 | """ 5 | import argparse 6 | import logging 7 | from datetime import date 8 | from .__version__ import __version__ as version 9 | from .commands import render_cmd, export_cmd, schema_cmd 10 | 11 | 12 | logger = logging.getLogger('sdwan_config_builder.main') 13 | 14 | 15 | def main(): 16 | cli_parser = argparse.ArgumentParser(description=__doc__) 17 | cli_parser.add_argument("--version", action="version", version=f"SDWAN Config Builder Tool Version {version}") 18 | commands = cli_parser.add_subparsers(title="commands") 19 | commands.required = True 20 | 21 | render_parser = commands.add_parser("render", help="render configuration files") 22 | render_parser.set_defaults(cmd_handler=render_cmd) 23 | render_parser.add_argument("-u", "--update", action="store_true", 24 | help="override target files that already exist, by default they are skipped") 25 | 26 | export_parser = commands.add_parser("export", help="export source configuration as JSON file") 27 | export_parser.set_defaults(cmd_handler=export_cmd) 28 | export_parser.add_argument("-f", "--file", metavar="", default=f"config_{date.today():%Y%m%d}.json", 29 | help="export filename (default: %(default)s)") 30 | 31 | schema_parser = commands.add_parser("schema", help="generate source configuration JSON schema") 32 | schema_parser.set_defaults(cmd_handler=schema_cmd) 33 | schema_parser.add_argument("-f", "--file", metavar="", default=f"config_schema.json", 34 | help="export filename (default: %(default)s)") 35 | 36 | cli_args = cli_parser.parse_args() 37 | try: 38 | cli_args.cmd_handler(cli_args) 39 | except KeyboardInterrupt: 40 | logger.critical("Interrupted by user") 41 | 42 | 43 | if __name__ == '__main__': 44 | main() 45 | -------------------------------------------------------------------------------- /sdwan_config_builder/src/sdwan_config_builder/__version__.py: -------------------------------------------------------------------------------- 1 | __copyright__ = "Copyright (c) 2022-2024 Cisco Systems, Inc. and/or its affiliates" 2 | __version__ = "0.9.0" 3 | -------------------------------------------------------------------------------- /sdwan_config_builder/src/sdwan_config_builder/loader/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import yaml 4 | from typing import Any, TypeVar, Type, Union, List 5 | from pydantic import BaseModel, ValidationError 6 | from .models import ConfigModel 7 | 8 | 9 | class LoaderException(Exception): 10 | """ Exception indicating loader errors """ 11 | pass 12 | 13 | 14 | M = TypeVar('M', bound=BaseModel) 15 | 16 | 17 | def load_yaml(model_cls: Type[M], description: str, filename: Union[os.PathLike, str]) -> M: 18 | try: 19 | with open(filename) as yaml_file: 20 | yaml_dict = yaml.safe_load(yaml_file) 21 | 22 | return model_cls(**yaml_dict) 23 | 24 | except FileNotFoundError as ex: 25 | raise LoaderException(f"Could not open {description} file: {ex}") from None 26 | except yaml.YAMLError as ex: 27 | raise LoaderException(f'YAML syntax error in {description} file: {ex}') from None 28 | except ValidationError as ex: 29 | raise LoaderException(f"Invalid {description} file: {ex}") from None 30 | 31 | 32 | class LoaderConfigModel(BaseModel): 33 | top_level_config: str 34 | 35 | 36 | class JinjaTargetModel(BaseModel): 37 | description: str 38 | template: str 39 | filename: str 40 | 41 | 42 | class JinjaRendererModel(BaseModel): 43 | templates_dir: str 44 | targets: List[JinjaTargetModel] 45 | 46 | 47 | class TargetsConfigModel(BaseModel): 48 | jinja_renderer: JinjaRendererModel 49 | 50 | 51 | class MetadataModel(BaseModel): 52 | loader_config: LoaderConfigModel 53 | targets_config: TargetsConfigModel 54 | logging_config: dict[str, Any] 55 | 56 | 57 | def load_metadata(filename: Union[os.PathLike, str]) -> MetadataModel: 58 | try: 59 | return load_yaml(MetadataModel, 'metadata', filename) 60 | except LoaderException as ex: 61 | print(ex) 62 | 63 | sys.exit(1) 64 | -------------------------------------------------------------------------------- /sdwan_config_builder/src/sdwan_config_builder/loader/validators.py: -------------------------------------------------------------------------------- 1 | from typing import Set, Dict, Optional, Callable, Iterator 2 | from ipaddress import IPv4Address, IPv4Network, IPv4Interface 3 | from pydantic import ValidationInfo 4 | 5 | 6 | # 7 | # Reusable validators 8 | # 9 | def formatted_string(v: str, info: ValidationInfo) -> str: 10 | """ 11 | Process v as a python formatted string 12 | :param v: Value to be validated 13 | :param info: A ValidationInfo instance with previously validated model fields 14 | :return: Expanded formatted string 15 | """ 16 | try: 17 | return v.format(**info.data) if v is not None else v 18 | except KeyError as ex: 19 | raise ValueError(f"Variable not found: {ex}") from None 20 | 21 | 22 | _used_addresses: Set[IPv4Address] = set() 23 | 24 | 25 | def unique_system_ip(system_ip: IPv4Address) -> IPv4Address: 26 | if system_ip in _used_addresses: 27 | raise ValueError(f'system-ip "{system_ip}" is already in use') 28 | 29 | _used_addresses.add(system_ip) 30 | return system_ip 31 | 32 | 33 | def constrained_cidr( 34 | *, 35 | min_length: Optional[int] = None, 36 | max_length: Optional[int] = None, 37 | length: Optional[int] = None 38 | ) -> Callable[[IPv4Network], IPv4Network]: 39 | def validator(ipv4_network: IPv4Network) -> IPv4Network: 40 | if length is not None and ipv4_network.prefixlen != length: 41 | raise ValueError(f'IPv4 prefix length needs to be /{length}') 42 | if max_length is not None and ipv4_network.prefixlen > max_length: 43 | raise ValueError(f'IPv4 prefix length needs to be <= /{max_length}') 44 | if min_length is not None and ipv4_network.prefixlen < min_length: 45 | raise ValueError(f'IPv4 prefix length needs to be >= /{min_length}') 46 | 47 | return ipv4_network 48 | 49 | return validator 50 | 51 | 52 | def cidr_subnet( 53 | *, 54 | cidr_field: str, 55 | prefix_len: int = 24 56 | ) -> Callable[[IPv4Network, ValidationInfo], IPv4Network]: 57 | 58 | subnet_gen_map: Dict[IPv4Network, Iterator[IPv4Network]] = {} 59 | 60 | def validator(subnet: IPv4Network, info: ValidationInfo) -> IPv4Network: 61 | if subnet is None: 62 | cidr = info.data.get(cidr_field, ...) 63 | if cidr is ...: 64 | raise ValueError(f"no cidr_field name {cidr_field}") 65 | if cidr is None: 66 | raise ValueError(f"{cidr_field} needs to be provided when subnet is not specified") 67 | try: 68 | subnet = next(subnet_gen_map.setdefault(cidr, cidr.subnets(new_prefix=prefix_len))) 69 | except StopIteration: 70 | raise ValueError(f"no more /{prefix_len} subnets available in CIDR {cidr}") from None 71 | 72 | return subnet 73 | 74 | return validator 75 | 76 | 77 | def subnet_interface( 78 | *, 79 | subnet_field: str, 80 | host_index: int 81 | ) -> Callable[[IPv4Interface, ValidationInfo], IPv4Interface]: 82 | def validator(ipv4_interface: IPv4Interface, info: ValidationInfo) -> IPv4Interface: 83 | if ipv4_interface is None: 84 | subnet = info.data.get(subnet_field, ...) 85 | if subnet is ...: 86 | raise ValueError(f"no subnet_field name {subnet_field}") 87 | if subnet is None: 88 | raise ValueError(f"{subnet_field} was not set") 89 | try: 90 | ipv4_interface = IPv4Interface((list(subnet.hosts())[host_index], subnet.prefixlen)) 91 | except IndexError: 92 | raise ValueError(f"host_index {host_index} is out of bounds for /{subnet.prefixlen}") from None 93 | 94 | return ipv4_interface 95 | 96 | return validator 97 | 98 | 99 | def subnet_address( 100 | *, 101 | subnet_field: str, 102 | host_index: int 103 | ) -> Callable[[IPv4Address, ValidationInfo], IPv4Address]: 104 | def validator(ipv4_address: IPv4Address, info: ValidationInfo) -> IPv4Address: 105 | if ipv4_address is None: 106 | subnet = info.data.get(subnet_field, ...) 107 | if subnet is ...: 108 | raise ValueError(f"no subnet_field name {subnet_field}") 109 | if subnet is None: 110 | raise ValueError(f"{subnet_field} was not set") 111 | try: 112 | ipv4_address = list(subnet.hosts())[host_index] 113 | except IndexError: 114 | raise ValueError(f"host_index {host_index} is out of bounds for /{subnet.prefixlen}") from None 115 | 116 | return ipv4_address 117 | 118 | return validator 119 | --------------------------------------------------------------------------------