├── .envrc.example ├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── TODO.md ├── config.yml ├── documentation ├── AWS.md └── Azure.md ├── images ├── azure-resource-group.png ├── azure-storage-browser.png ├── flowchart.png ├── flowchart.uml ├── perm-details.png ├── permissions.png ├── policies.png ├── policies.uml └── upload-refplat.png ├── main.tf ├── modules ├── deploy │ ├── aws-mini │ │ ├── main.tf │ │ ├── output.tf │ │ └── variables.tf │ ├── aws-off.t-f │ ├── aws-on.t-f │ ├── aws.tf │ ├── aws │ │ ├── main.tf │ │ ├── output.tf │ │ └── variables.tf │ ├── azure-off.t-f │ ├── azure-on.t-f │ ├── azure.tf │ ├── azure │ │ ├── main.tf │ │ ├── output.tf │ │ └── variables.tf │ ├── data │ │ ├── 00-patch_vmx.sh │ │ ├── 03-letsencrypt.sh │ │ ├── 04-customize.sh │ │ ├── 99-dummy.sh │ │ ├── README.md │ │ ├── cloud-config.txt │ │ ├── cml.sh │ │ ├── common.sh │ │ ├── copyfile.sh │ │ ├── del.sh │ │ ├── interface_fix.py │ │ ├── license.py │ │ ├── vars.sh │ │ └── virl2-base-config.yml │ ├── dummy │ │ └── main.tf │ ├── main.tf │ ├── output.tf │ └── variables.tf ├── readyness │ ├── main.tf │ ├── output.tf │ └── terraform.tf └── secrets │ ├── conjur-off.t-f │ ├── conjur-on.t-f │ ├── conjur.tf │ ├── conjur │ ├── main.tf │ ├── output.tf │ ├── terraform.tf │ └── variables.tf │ ├── dummy │ ├── main.tf │ ├── output.tf │ ├── terraform.tf │ └── variables.tf │ ├── main.tf │ ├── output.tf │ ├── variables.tf │ ├── vault-off.t-f │ ├── vault-on.t-f │ ├── vault.tf │ └── vault │ ├── main.tf │ ├── output.tf │ ├── terraform.tf │ └── variables.tf ├── output.tf ├── prepare.bat ├── prepare.sh ├── terraform.tf ├── upload-images-to-aws.sh └── variables.tf /.envrc.example: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2024, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | ######### 8 | # Configs 9 | ######### 10 | #export TF_VAR_cfg_file="" 11 | #export TF_VAR_cfg_extra_vars="" 12 | 13 | ######## 14 | # Clouds 15 | ######## 16 | 17 | # 18 | # AWS 19 | # 20 | 21 | #export TF_VAR_aws_access_key="" 22 | #export TF_VAR_aws_secret_key="" 23 | 24 | # 25 | # Azure 26 | # 27 | 28 | #export TF_VAR_azure_subscription_id="" 29 | #export TF_VAR_azure_tenant_id="" 30 | 31 | ######### 32 | # Secrets 33 | ######### 34 | 35 | # 36 | # Conjur 37 | # 38 | 39 | #export CONJUR_APPLIANCE_URL="https://conjur-server.example.com" 40 | #export CONJUR_ACCOUNT="example" 41 | ## Initialize Conjur, saving the Certificate to the user's home in 42 | ## ~/conjur-server.pem 43 | # conjur init --url "$CONJUR_APPLIANCE_URL" --account "$CONJUR_ACCOUNT" --force 44 | ## Log in with a Host API Key. The user's short hostname is used to identify 45 | ## the host. These would be set up ahead of time in Conjur. This only needs 46 | ## to be performed once. 47 | # conjur login --id "host/org/tenant/$(hostname -s)" 48 | # conjur whoami 49 | ## Once you are logged in with the Conjur CLI, you can use the macOS Keychain 50 | ## to access the required credentials to set up the environment variables. 51 | #export CONJUR_AUTHN_LOGIN="$(security find-generic-password -s ${CONJUR_APPLIANCE_URL}/authn -a login -w | cut -d ':' -f 2 | base64 -d -i -)" 52 | #export CONJUR_AUTHN_API_KEY="$(security find-generic-password -s ${CONJUR_APPLIANCE_URL}/authn -a password -w | cut -d ':' -f 2 | base64 -d -i -)" 53 | ## Or, change for other OSes 54 | #export CONJUR_AUTHN_LOGIN="" 55 | #export CONJUR_AUTHN_API_KEY="" 56 | #export CONJUR_CERT_FILE="/etc/conjur.pem" 57 | # -or for Windows- 58 | #set CONJUR_APPLIANCE_URL=https://conjur-server.example.com 59 | #set CONJUR_ACCOUNT=example 60 | #set CONJUR_AUTHN_LOGIN="" 61 | #set CONJUR_AUTHN_API_KEY="" 62 | #set CONJUR_CERT_FILE=C:\conjur-server.pem 63 | 64 | # 65 | # Hashicorp Vault 66 | # 67 | 68 | #export VAULT_ADDR="https://vault-server.example.com:8200" 69 | ## This logs into the Vault CLI and refreshes the users' token. 70 | # vault login #-method=ldap 71 | # -or for Windows- 72 | #set VAULT_ADDR=https://vault-server.example.com:8200 73 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior: 15 | 1. ... 16 | 2. ... 17 | 3. ... 18 | 4. See error 19 | 20 | **Expected behavior** 21 | A clear and concise description of what you expected to happen. 22 | 23 | **Screenshots** 24 | If applicable, add screenshots to help explain your problem. 25 | 26 | **Software versions (please complete the following information):** 27 | - OS: [e.g. macOS, Windows, Linux, ...] 28 | - Terraform version 29 | - Plugin versions 30 | 31 | **Additional context** 32 | Add any other context about the problem here. 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the solution you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context or screenshots about the feature request here. 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .envrc 3 | .terraform 4 | .terraform.lock.hcl 5 | terraform.tfstate* 6 | .terraform.tfstate.lock.info 7 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Cisco CML2 Cloud provisioning tooling 2 | 3 | Lists the changes for the tool releases. 4 | 5 | ## Version 2.8.0 6 | 7 | - using "aws\_" and "azure\_" prefixes to provide tokens and IDs in the environment (see `.envrc.example`) 8 | - adapt tooling to work with 2.8.0 (move base OS from 20.04 to 24.04) 9 | - allow to use the `allowed_ipv4_subnets` also for Azure 10 | - improve network manager handling while provisioning 11 | - licensing now uses the PCL instead of curl and bash 12 | - documentation improvements and fixes 13 | 14 | ## Version 2.7.2 15 | 16 | - added the AWS mini variant which does not manage any network resources, the 17 | subnet and security group ID 18 | - change elastic IP allocation for AWS from dynamic to static to make it work 19 | again 20 | - this is the last release to support CML 2.7 and before 21 | - changed the versioning to match the CML version so that it's easier to find the proper version / release of cloud-cml which works with the CML version to be used 22 | 23 | ## Version 0.3.0 24 | 25 | - allow cluster deployments on AWS. 26 | - manage and use a non-default VPC 27 | - optionally allow to use an already existing VPC and gateway 28 | - allow to enable EBS encryption (fixes #8) 29 | - a `cluster` section has been added to the config file. Some keywords have changed (`hostname` -> `controller_hostname`). See also a new "Cluster" section in the [AWS documentation](documentation/AWS.md) 30 | - introduce secret managers for storing secrets. 31 | - supported are dummy (use raw_secrets, as before), Conjur and Vault 32 | - also support randomly generated secrets 33 | - by default, the dummy module with random secrets is configured 34 | - the license token secret needs to be configured regardless 35 | - use the CML .pkg software distribution file instead of multiple .deb packages (this is a breaking change -- you need to change the configuration and upload the .pkg to cloud storage instead of the .deb. `deb` -> `software`. 36 | - the PaTTY customization script has been removed. PaTTY is included in the .pkg. Its installation and configuration is now controlled by a new keyword `enable_patty` in the `common` section of the config. 37 | > [!NOTE] 38 | > Poll time is hard-coded to 5 seconds in the `cml.sh` script. If a longer poll time and/or additional options like console and VNC access are needed then this needs to be changed manually in the script. 39 | - add a common script file which has currently a function to determine whether the instance is a controller or not. This makes it easier to install only controller relevant elements and omit them on computes (usable within the main `cml.sh` file as well as in the customization scripts). 40 | - explicitly disable bridge0 and also disable the virl2-bridge-setup.py script by inserting `exit()` as the 2nd line. This will ensure that service restarts will not try to re-create the bridge0 interface. This will be obsolete / a no-op with 2.7.1 which includes a "skip bridge creation" flag. 41 | - each instance will be rebooted at the end of cloud-init to come up with newly installed software / kernel and in a clean state. 42 | - add configuration option `cfg.aws.vpc_id` and `cfg.aws.gw_id` to specify the VPC and gateway ID that should be used. If left empty, then a custom VPC ID will be created (fixes #9) 43 | 44 | ## Version 0.2.1 45 | 46 | - allow to select provider using a script and split out TF providers 47 | - added prepare.sh / prepare.bat script for this purpose 48 | - initial state has AWS ON (config.yml example also is set to AWS) 49 | - fixed image paths for the AWS documentation 50 | - mentioned the necessary "prepare" step in the overall README.md 51 | - fix copying from cloud-storage to instance storage 52 | - address 16KB cloud-init limitation in AWS (not entirely removed but pushed out farther) 53 | 54 | ## Version 0.2.0 55 | 56 | - added multi-cloud support 57 | - big re-factor to accommodate different cloud-targets 58 | - currently supported: AWS and Azure 59 | - updated documentation, split into different cloud providers 60 | 61 | ## Version 0.1.4 62 | 63 | - improved upload tool 64 | - better error handling in case no images are available 65 | - modified help text 66 | - completely reworked the AWS policy creation section to provide step-by-step instructions to accurately describe the policy creation process 67 | - added the current ref-plat images to the `config.yml` file 68 | - provided the current .pkg file name to the `config.yml` file 69 | 70 | ## Version 0.1.3 71 | 72 | - documentation update 73 | - make PATty installation script more robust 74 | - fix location for .pkg file in the `upload-images-to-aws.sh` script 75 | 76 | ## Version 0.1.2 77 | 78 | Documentation update. Added a diagram for policy dependencies 79 | 80 | ## Version 0.1.1 81 | 82 | - depend on 0.6.2 of the CML Terraform provider 83 | - updated documentation / README 84 | - changed some wording / corrected some sections (fixes #1) 85 | - added proxy section 86 | - added a troubleshooting section 87 | - ensure the AWS provider uses the region provided in `config.yml` 88 | - use the new `ignore_errors` flag when waiting for the system to become ready 89 | 90 | ## Version 0.1.0 91 | 92 | Initial release of the tooling with support for AWS metal flavors. 93 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at ciscovirl@cisco.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce the Code of Conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | 77 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Guidance on how to contribute 2 | 3 | Contributions to this code are welcome and appreciated. 4 | Please adhere to our [Code of Conduct](CODE_OF_CONDUCT.md) at all times. 5 | 6 | > All contributions to this code will be released under the terms of the 7 | > [LICENSE](./LICENSE) of this code. By submitting a pull request or filing a 8 | > bug, issue, or feature request, you are agreeing to comply with this waiver of 9 | > copyright interest. Details can be found in our [LICENSE](./LICENSE). 10 | 11 | There are two primary ways to contribute: 12 | 13 | 1. Using the issue tracker 14 | 2. Changing the codebase 15 | 16 | ## Using the issue tracker 17 | 18 | Use the issue tracker to suggest feature requests, report bugs, and ask 19 | questions. This is also a great way to connect with the developers of the 20 | project as well as others who are interested in this solution. 21 | 22 | Use the issue tracker to find ways to contribute. Find a bug or a feature, 23 | mention in the issue that you will take on that effort, then follow the 24 | _Changing the codebase_ guidance below. 25 | 26 | ## Changing the codebase 27 | 28 | Generally speaking, you should fork this repository, make changes in your own 29 | fork, and then submit a pull request. All new code should have associated unit 30 | tests (if applicable) that validate implemented features and the presence or 31 | lack of defects. 32 | 33 | Additionally, the code should follow any stylistic and architectural guidelines 34 | prescribed by the project. In the absence of such guidelines, mimic the styles 35 | and patterns in the existing codebase. 36 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2019-2025 Cisco 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | Version 2.8.0, January 10 2025 4 | 5 | CML instances can run on Azure and AWS cloud infrastructure. This repository provides automation tooling using Terraform to deploy and manage CML in the cloud. We have tested CML deployments using this tool chain in both clouds. **The use of this tool is considered BETA**. The tool has certain requirements and prerequisites which are described in this README and in the [documentation](documentation) directory. 6 | 7 | *It is very likely that this tool chain can not be used "as-is"*. It should be forked and adapted to specific customer requirements and environments. 8 | 9 | > [!IMPORTANT] 10 | > 11 | > **Version 2.7 vs 2.8** 12 | > 13 | > CML2 version 2.8 has been released in November 2024. As CML 2.8 uses Ubuntu 24.04 as the base operating system, cloud-cml needs to accommodate for that during image selection when bringing up the VM on the hosting service (AWS, Azure, ...). This means that going forward, cloud-cml supports 2.8 and not 2.7 anymore. If CML versions earlier than CML 2.8 should be used then please select the release with the tag `v2.7.2` that still supports CML 2.7! 14 | > 15 | > **Support:** 16 | > 17 | > - For customers with a valid service contract, CML cloud deployments are supported by TAC within the outlined constraints. Beyond this, support is done with best effort as cloud environments, requirements and policy can differ to a great extent. 18 | > - With no service contract, support is done on a best effort basis via the issue tracker. 19 | > 20 | > **Features and capabilities:** Changes to the deployment tooling will be considered like any other feature by adding them to the product roadmap. This is done at the discretion of the CML team. 21 | > 22 | > **Error reporting:** If you encounter any errors or problems that might be related to the code in this repository then please open an issue on the [Github issue tracker for this repository](https://github.com/CiscoDevNet/cloud-cml/issues). 23 | 24 | > [!IMPORTANT] 25 | > Read the section below about [cloud provider selection](#important-cloud-provider-selection) (prepare script). 26 | 27 | ## General requirements 28 | 29 | The tooling uses Terraform to deploy CML instances in the Cloud. It's therefore required to have a functional Terraform installation on the computer where this tool chain should be used. 30 | 31 | Furthermore, the user needs to have access to the cloud service. E.g. credentials and permissions are needed to create and modify the required resources. Required resources are 32 | 33 | - service accounts 34 | - storage services 35 | - compute and networking services 36 | 37 | The tool chain / build scripts and Terraform can be installed on the on-prem CML controller or, when this is undesirable due to support concerns, on a separate Linux instance. 38 | 39 | That said, the tooling also runs on macOS with tools installed via [Homebrew](https://brew.sh/). Or on Windows with WSL. However, Windows hasn't been tested by us. 40 | 41 | ### Preparation 42 | 43 | Some of the steps and procedures outlined below are preparation steps and only need to be done once. Those are 44 | 45 | - cloning of the repository 46 | - installation of software (Terraform, cloud provider CLI tooling) 47 | - creating and configuring of a service account, including the creation of associated access credentials 48 | - creating the storage resources and uploading images and software into it 49 | - creation of an SSH key pair and making the public key available to the cloud service 50 | - editing the `config.yml` configuration file including the selection of the cloud service, an instance flavor, region, license token and other parameters 51 | 52 | #### Important: Cloud provider selection 53 | 54 | The tooling supports multiple cloud providers (currently AWS and Azure). Not everyone wants both providers. The **default configuration is set to use AWS only**. If Azure should be used either instead or in addition then the following steps are mandatory: 55 | 56 | 1. Run the `prepare.sh` script to modify and prepare the tool chain. If on Windows, use `prepare.bat`. You can actually choose to use both, if that's what you want. 57 | 2. Configure the proper target ("aws" or "azure") in the configuration file 58 | 59 | The first step is unfortunately required, since it is impossible to dynamically select different cloud configurations within the same Terraform HCL configuration. See [this SO link](https://stackoverflow.com/questions/70428374/how-to-make-the-provider-configuration-optional-and-based-on-the-condition-in-te) for more some context and details. 60 | 61 | The default "out-of-the-box" configuration is AWS, so if you want to run on Azure, don't forget to run the prepare script. 62 | 63 | #### Managing secrets 64 | 65 | > [!WARNING] 66 | > It is a best practice to **not** keep your CML secrets and passwords in Git! 67 | 68 | CML cloud supports these storage methods for the required platform and application secrets: 69 | 70 | - Raw secrets in the configuration file (as supported with previous versions) 71 | - Random secrets by not specifiying any secrets 72 | - [Hashicorp Vault](https://www.vaultproject.io/) 73 | - [CyberArk Conjur](https://www.conjur.org/) 74 | 75 | See the sections below for additional details how to use and manage secrets. 76 | 77 | ##### Referencing secrets 78 | 79 | You can refer to the secret maintained in the secrets manager by updating `config.yml` appropriately. If you use the `dummy` secrets manager, it will use the `raw_secret` as specified in the `config.yml` file, and the secrets will **not** be protected. 80 | 81 | ```yaml 82 | secret: 83 | manager: conjur 84 | secrets: 85 | app: 86 | username: admin 87 | # Example using Conjur 88 | path: example-org/example-project/secret/admin_password 89 | ``` 90 | 91 | Refer to the `.envrc.example` file for examples to set up environment variables to use an external secrets manager. 92 | 93 | ##### Random secrets 94 | 95 | If you want random passwords to be generated when applying, based on [random_password](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password), leave the `raw_secret` undefined: 96 | 97 | ```yaml 98 | secret: 99 | manager: dummy 100 | secrets: 101 | app: 102 | username: admin 103 | # raw_secret: # Undefined 104 | ``` 105 | 106 | > [!NOTE] 107 | > 108 | > You can retrieve the generated passwords after applying with `terraform output cml2secrets`. 109 | 110 | The included default `config.yml` configures generated passwords for the following secrets: 111 | 112 | - App password (for the UI) 113 | - System password for the OS system administration user 114 | - Cluster secret when clustering is enabled 115 | 116 | Regardless of the secret manager in use or whether you use random passwords or not: You **must** provide a valid Smart Licensing token for the sytem to work, though. 117 | 118 | ##### CyberArk Conjur installation 119 | 120 | > [!IMPORTANT] 121 | > CyberArk Conjur is not currently in the Terraform Registry. You must follow its [installation instructions](https://github.com/cyberark/terraform-provider-conjur?tab=readme-ov-file#terraform-provider-conjur) before running `terraform init`. 122 | 123 | These steps are only required if using CyberArk Conjur as an external secrets manager. 124 | 125 | 1. Download the [CyberArk Conjur provider](https://github.com/cyberark/terraform-provider-conjur/releases). 126 | 2. Copy the custom provider to `~/.terraform.d/plugins/localhost/cyberark/conjur///terraform-provider-conjur_v` 127 | 128 | ```bash 129 | $ mkdir -vp ~/.terraform.d/plugins/localhost/cyberark/conjur/0.6.7/darwin_arm64/ 130 | $ unzip ~/terraform-provider-conjur_0.6.7-4_darwin_arm64.zip -d ~/.terraform.d/plugins/localhost/cyberark/conjur/0.6.7/darwin_arm64/ 131 | $ 132 | ``` 133 | 134 | 3. Create a `.terraformrc` file in the user's home: 135 | 136 | ```hcl 137 | provider_installation { 138 | filesystem_mirror { 139 | path = "/Users/example/.terraform.d/plugins" 140 | include = ["localhost/cyberark/conjur"] 141 | } 142 | direct { 143 | exclude = ["localhost/cyberark/conjur"] 144 | } 145 | } 146 | ``` 147 | 148 | ### Terraform installation 149 | 150 | Terraform can be downloaded for free from [here](https://developer.hashicorp.com/terraform/downloads). This site has also instructions how to install it on various supported platforms. 151 | 152 | Deployments of CML using Terraform were tested using the versions mentioned below on Ubuntu Linux. 153 | 154 | ```bash 155 | $ terraform version 156 | Terraform v1.10.4 157 | on linux_amd64 158 | + provider registry.terraform.io/ciscodevnet/cml2 v0.8.1 159 | + provider registry.terraform.io/hashicorp/aws v5.83.0 160 | + provider registry.terraform.io/hashicorp/cloudinit v2.3.5 161 | + provider registry.terraform.io/hashicorp/random v3.6.1 162 | $ 163 | ``` 164 | 165 | It is assumed that the CML cloud repository was cloned to the computer where Terraform was installed. The following command are all executed within the directory that has the cloned repositories. In particular, this `README.md`, the `main.tf` and the `config.yml` files, amongst other files. 166 | 167 | When installed, run `terraform init` to initialize Terraform. This will download the required providers and create the state files. 168 | 169 | ## Cloud specific instructions 170 | 171 | See the documentation directory for cloud specific instructions: 172 | 173 | - [Amazon Web Services (AWS)](documentation/AWS.md) 174 | - [Microsoft Azure](documentation/Azure.md) 175 | 176 | ## Customization 177 | 178 | There's two Terraform variables which can be defined / set to further customize the behavior of the tool chain: 179 | 180 | - `cfg_file`: This variable defines the configuration file. It defaults to `config.yml`. 181 | - `cfg_extra_vars`: This variable defines the name of a file with additional variable definitions. The default is "none". 182 | 183 | Here's an example of an `.envrc` file to set environment variable. Note the last two lines which define the configuration file to use and the extra shell file which defines additional environment variables. 184 | 185 | ```bash 186 | export TF_VAR_aws_access_key="aws-something" 187 | export TF_VAR_aws_secret_key="aws-somethingelse" 188 | 189 | # export TF_VAR_azure_subscription_id="azure-something" 190 | # export TF_VAR_azure_tenant_id="azure-something-else" 191 | 192 | export TF_VAR_cfg_file="config-custom.yml" 193 | export TF_VAR_cfg_extra_vars="extras.sh" 194 | ``` 195 | 196 | A typical extra vars file would look like this (as referenced by `extras.sh` in the code above): 197 | 198 | ```plain 199 | CFG_UN="username" 200 | CFG_PW="password" 201 | CFG_HN="domainname" 202 | CFG_EMAIL="noone@acme.com" 203 | ``` 204 | 205 | In this example, four additional variables are defined which can be used in customization scripts during deployment to provide data (usernames, passwords, ...) for specific services like configuring DNS. See the `03-letsencrypt.sh` file which installs a valid certificate into CML, using LetsEncrypt and DynDNS for domain name services. 206 | 207 | See the AWS specific document for additional information how to define variables in the environment using tools like `direnv` or `mise`. 208 | 209 | ## Additional customization scripts 210 | 211 | The deploy module has a couple of extra scripts which are not enabled / used by default. They are: 212 | 213 | - request/install certificates from LetsEncrypt (`03-letsencrypt.sh`) 214 | - customize additional settings, here: add users and resource pools (`04-customize.sh`). 215 | 216 | These additional scripts serve mostly as an inspiration for customization of the system to adapt to local requirements. 217 | 218 | ### Requesting a cert 219 | 220 | The letsencrypt script requests a cert if there's none already present. The cert can then be manually copied from the host to the cloud storage with the hostname as a prefix. If the host with the same hostname is started again at a later point in time and the cert files exist in cloud storage, then those files are simply copied back to the host without requesting a new certificate. This avoids running into any certificate request limits. 221 | 222 | Certificates are stored in `/etc/letsencrypt/live` in a directory with the configured hostname. 223 | 224 | ## Limitations 225 | 226 | Extra variable definitions and additional scripts will all be stored in the user-data that is provided via cloud-init to the cloud host. There's a limitation in size for the user-data in AWS. The current limit is 16KB. Azure has a much higher limit (unknown what the limit actually is, if any). 227 | 228 | All scripts are copied as they are including all comments which will require even more space. 229 | 230 | Cloud-cml currently uses the cloud-init Terraform provider which allows compressed storage of this data. This allows to store more scripts and configuration due to the compression. The 16KB limit is still in place for the compressed data, though. 231 | 232 | EOF 233 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | # TODO 2 | 3 | Here's a list of things which should be implemented going forward. This is in no particular order at the moment. 4 | 5 | 1. Allow for multiple instances in same account/resource group. Right now, resources do not have a unique name and they should. Using the random provider as done with the AWS VPC already. 6 | 2. Allow cluster installs on Azure (AWS is working, see below). 7 | 3. Allow for certs to be pushed to cloud storage, once requested/installed. 8 | 4. Allow more than one clouds at the same time as `prepare.sh` suggests. Right now, this does not work as it requires only ONE `required_providers` block but both template files introduce an individual one. Should be addressed by making this smarter, introducing a `versions.tf` file which is built by `prepare.sh`. See 9 | 10 | ## Done items 11 | 12 | 1. Work around 16kb user data limit in AWS (seems to not be an issue in Azure). 13 | 2. Allow cluster installs (e.g. multiple computes, adding a VPC cluster network). Works on AWS, thanks to amieczko. 14 | 3. Allow to use an already existing VPC 15 | -------------------------------------------------------------------------------- /config.yml: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | # at this time, "aws" and "azure" are defined targets 8 | # make sure that you ran the prepare.sh / prepare.bat script! 9 | target: aws 10 | 11 | aws: 12 | region: us-east-1 13 | availability_zone: us-east-1a 14 | bucket: bucket-name-goes-here 15 | # flavor: c5.2xlarge 16 | flavor: m5zn.metal 17 | flavor_compute: m5zn.metal 18 | profile: permission-profile-goes-here 19 | # 20 | # The following two attributes are used in the aws-mini variant. They specify 21 | # the subnet and the security group ID the CML VM should use. The are NOT 22 | # used when using the regular AWS deployment option (non-mini). 23 | subnet_id: "" 24 | sg_id: "" 25 | # 26 | # The followin values are used by the regular AWS deployment option. 27 | # When specifying a VPC ID below then this prefix must exist on that VPC! 28 | public_vpc_ipv4_cidr: 10.0.0.0/16 29 | enable_ebs_encryption: false 30 | # 31 | # Leave empty to create a custom VPC / Internet gateway, or provide the IDs 32 | # of the VPC / gateway to use, they must exist and properly associated. 33 | # also: an IPv6 CIDR prefix must be associated with the specified VPC 34 | vpc_id: "" 35 | gw_id: "" 36 | # 37 | # Use spot instances, when available for the VMs forming the cluster 38 | spot_instances: 39 | use_spot_for_controller: false 40 | use_spot_for_computes: false 41 | 42 | azure: 43 | resource_group: resource-group-name 44 | size: Standard_D4d_v4 45 | size_compute: unused_at_the_moment 46 | storage_account: storage-account-name 47 | container_name: container-name 48 | 49 | common: 50 | disk_size: 64 51 | controller_hostname: cml-controller 52 | key_name: ssh-key-name 53 | allowed_ipv4_subnets: ["0.0.0.0/0"] 54 | enable_patty: true 55 | 56 | cluster: 57 | enable_cluster: false 58 | # No longer used, see the secret manager section below 59 | #secret: your-secret-password 60 | allow_vms_on_controller: true 61 | number_of_compute_nodes: 0 62 | compute_hostname_prefix: cml-compute 63 | compute_disk_size: 32 64 | 65 | secret: 66 | # At this time, 'vault', 'conjur' and 'dummy' are supported secrets managers. 67 | # Make sure that you also run the prepare.sh / prepare.bat script, otherwise 68 | # a 'raw_secret' will be used. If 'raw_secret' is not defined, a random 69 | # password will be used. 70 | # https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password 71 | #manager: vault 72 | #manager: conjur 73 | manager: dummy 74 | 75 | conjur: 76 | 77 | vault: 78 | # Only the v2 version of the key value secret engine is supported 79 | # https://developer.hashicorp.com/vault/docs/secrets/kv/kv-v2 80 | kv_secret_v2_mount: secret 81 | # Set this to true to prevent the creation of ephemeral child token used by this provider. 82 | skip_child_token: true 83 | 84 | # These are the secrets that will be used by the CML instances. This key 85 | # gets mapped to the main configuration under 'secrets'. The values are 86 | # filled in by the secrets manager and are accessible using the 'secret'. For 87 | # example, the 'app' password is accessed using 'secrets.app.secret'. The 88 | # SmartLicense token is accessed using 'secrets.smartlicense_token.secret'. 89 | secrets: 90 | app: 91 | username: admin 92 | # Used with dummy secret manager. If unspecified, a random password will 93 | # be generated. You need to escape special chars: 94 | #raw_secret: '\"!@$%' 95 | #raw_secret: your-secret-password 96 | # Path to secret, used with both Vault and Conjur: 97 | #path: example-org/example-project/admin_password 98 | # Used with Vault only: 99 | #field: secret 100 | 101 | sys: 102 | username: sysadmin 103 | # Used with dummy secret manager. If unspecified, a random password will 104 | # be generated. 105 | #raw_secret: your-secret-password 106 | # Path to secret, used with both Vault and Conjur: 107 | #path: example-org/example-project/sysadmin_password 108 | # Used with Vault only: 109 | #field: secret 110 | 111 | smartlicense_token: 112 | # Only used with dummy secret manager 113 | raw_secret: your-smart-licensing-token 114 | # Path to secret, used with both Vault and Conjur: 115 | #path: example-org/example-project/smartlicense_token 116 | # Used with Vault only: 117 | #field: token 118 | 119 | cluster: 120 | # Used with dummy secret manager. If unspecified, a random password will 121 | # be generated. 122 | #raw_secret: your-secret-password 123 | # Path to secret, used with both Vault and Conjur: 124 | #path: example-org/example-project/cluster_secret 125 | # Used with Vault only: 126 | #field: secret 127 | 128 | app: 129 | # **No longer used, see the secret manager section above** 130 | #user: admin 131 | #pass: your-secret-password 132 | software: cml2_2.8.0-6_amd64-32.pkg 133 | # The list must have at least ONE element, this is what the dummy is for in 134 | # case 00- and 01- are commented out! 135 | customize: 136 | # - 00-patch_vmx.sh 137 | - 99-dummy.sh 138 | 139 | license: 140 | flavor: CML_Enterprise 141 | # **No longer used, see the secret manager section above** 142 | #token: your-smart-licensing-token 143 | # Unless you have additional node licenses available, leave this at zero 144 | nodes: 0 145 | 146 | # Select the reference platforms needed by un-/commenting them. The selected 147 | # reference platforms will be copied from the specified cloud storage and must 148 | # be available prior to starting an instance. Ensure that each definition has 149 | # also a corresponding image! A smaller selection: less copying, faster bring-up 150 | # time! 151 | # 152 | # NOTE: the list below should be adapted to your needs and also to the exact 153 | # names of the files which have been copied to your cloud storage. Ensure 154 | # that those files are there! 155 | refplat: 156 | definitions: 157 | - alpine 158 | - alpine-trex 159 | - alpine-wanem 160 | - asav 161 | - cat8000v 162 | - cat9000v-q200 163 | - cat9000v-uadp 164 | - cat-sdwan-edge 165 | - cat-sdwan-controller 166 | - cat-sdwan-manager 167 | - cat-sdwan-validator 168 | - cat-sdwan-vedge 169 | - csr1000v 170 | - desktop 171 | - iol-xe 172 | - ioll2-xe 173 | - iosv 174 | - iosvl2 175 | - iosxrv9000 176 | - nxosv9000 177 | - server 178 | - ubuntu 179 | images: 180 | - alpine-base-3-20-3 181 | - alpine-desktop-3-20-3 182 | # - alpine-trex-3-20-3 183 | # - alpine-wanem-3-20-3 184 | # - asav-9-22-1-1 185 | # - cat8000v-17-15-01a 186 | # - cat9000v-q200-17-15-01 187 | # - cat9000v-uadp-17-15-01 188 | # - csr1000v-17-03-08a 189 | - iol-xe-17-15-01 190 | - ioll2-xe-17-15-01 191 | - iosv-159-3-m9 192 | - iosvl2-2020 193 | # - iosxrv9000-24-3-1 194 | # - nxosv9300-10-5-1-f 195 | - server-tcl-15-0 196 | - ubuntu-24-04-20241004 197 | -------------------------------------------------------------------------------- /documentation/AWS.md: -------------------------------------------------------------------------------- 1 | # AWS 2 | 3 | This document contains specific configuration steps to deploy a CML instance in AWS. Some sections from the top level document are repeated here with additional detail regarding AWS. 4 | 5 | > [!IMPORTANT] 6 | > The repository includes an alternative deployment method for AWS (aws-mini) which does not create any network resources. It therefore relies on these resources to be available at the time of deploying CML. See the ["Mini vs regular deployments"](#mini-vs-regular-deployments) section below! 7 | 8 | ## General requirements 9 | 10 | The tooling uses Terraform to deploy CML instances on AWS. It's therefore required to have a functional Terraform installation on the computer where this tool chain should be used. 11 | 12 | Furthermore, the user needs to have access to AWS console to create or modify an automation account with the required permissions and policies applied. 13 | 14 | In addition, the `upload-images-to-aws.sh` script requires the AWS CLI and the `dialog`utility to upload images to S3. It is a Bash shell script that requires Linux to run. 15 | 16 | The AWS CLI and Terraform can be installed on the on-prem CML controller or, when this is undesirable due to support concerns, on a separate Linux instance. 17 | 18 | ### Preparation 19 | 20 | Some of the steps and procedures outlined below are preparation steps and only need to be done once. Those are 21 | 22 | - cloning of the repository 23 | - installation of software (Terraform, AWS CLI) 24 | - creating and configuring the AWS automation account, including the creation of associated access credentials 25 | - creating the AWS S3 bucket and uploading images and software into it 26 | - creation of an SSH key pair and installing it into AWS EC2 27 | - editing the `config.yml` configuration file including the selection of an instance flavor, region and other parameters 28 | 29 | ![flowchart](../images/flowchart.png) 30 | 31 | ### Mini vs regular deployments 32 | 33 | As mentioned at the top, there's an `aws-mini` deployment option as an alternative to the "regular" deployment option. The differences are: 34 | 35 | | What | regular flavor | mini flavor | 36 | | ------------------------------------------------------- | -------------- | ----------- | 37 | | Allow CML cluster | yes | no | 38 | | Create VPC | optional | no | 39 | | Create Subnet, IGW, NAT GW, Route table, security group | yes | no | 40 | | Create elastic IP | yes | optional* | 41 | | Create VM network interface | yes | yes | 42 | 43 | The mini flavor is useful in case the AWS networking infrastructure is already in place and can not or should not be modified, cloud-cml should simply create a CML instance that uses the existing networking infrastructure by providing the subnet ID and the security group ID that should be used to attach the CML VM to. 44 | 45 | If no Elastic IP should be used and the server should use a private IP from the configured subnet instead then this is configurable in the .tf file. See the comment for the `resource "aws_eip" "server_eip"` inside of `main.tf` for the mini variant. 46 | 47 | #### How to enable the mini variant 48 | 49 | Edit the `modules/deploy/aws-on.t-f` file and edit the following section: 50 | 51 | ```hcl 52 | module "aws" { 53 | # source = "./aws-mini" 54 | source = "./aws" 55 | count = var.cfg.target == "aws" ? 1 : 0 56 | options = local.options 57 | } 58 | ``` 59 | 60 | So that it reads: 61 | 62 | ```hcl 63 | module "aws" { 64 | source = "./aws-mini" 65 | # source = "./aws" 66 | count = var.cfg.target == "aws" ? 1 : 0 67 | options = local.options 68 | } 69 | ``` 70 | 71 | and run the `prepare.sh` script (only when AWS hasn't been selected before, it is selected by default when cloning the repository). 72 | 73 | #### Configure the mini variant 74 | 75 | Ensure that `aws.subnet_id` and `aws.sg_id`  have valid values and that those resources exist on AWS. 76 | 77 | ### Terraform installation 78 | 79 | Terraform can be downloaded for free from [here](https://developer.hashicorp.com/terraform/downloads). This site has also instructions how to install it on various supported platforms. 80 | 81 | Deployments of CML using Terraform were tested using version 1.8.4 on macOS. 82 | 83 | ```plain 84 | $ terraform version 85 | Terraform v1.8.4 86 | on darwin_arm64 87 | + provider registry.terraform.io/ciscodevnet/cml2 v0.7.0 88 | + provider registry.terraform.io/hashicorp/aws v5.51.0 89 | + provider registry.terraform.io/hashicorp/cloudinit v2.3.4 90 | + provider registry.terraform.io/hashicorp/random v3.6.1 91 | $ 92 | ``` 93 | 94 | It is assumed that the CML cloud repository was cloned to the computer where Terraform was installed. The following command are all executed within the directory that has the cloned repositories. In particular, the top level `README.md`, the `main.tf` and the `config.yml` files, amongst other files. 95 | 96 | When installed, run `terraform init` to initialize Terraform. This will download the required providers and create the state files. 97 | 98 | ### AWS CLI installation 99 | 100 | The AWS CLI can be downloaded from [here](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html). This was tested using the following AWS CLI version: 101 | 102 | ```plain 103 | $ aws --version 104 | aws-cli/2.15.56 Python/3.11.9 Darwin/23.5.0 source/arm64 105 | $ 106 | ``` 107 | 108 | ### Using a proxy 109 | 110 | If you need to use a proxy to access AWS then define it using environment variables. E.g. `export HTTPS_PROXY=http://my.proxy.corp:80/` when using bash. 111 | 112 | ## AWS requirements 113 | 114 | This section describes the resources required by the provisioning scripts to successfully deploy CML on AWS. These configurations and policies need to be created prior to using the tooling. This can be done on the AWS console or via the preferred deployment method (e.g. also via Terraform). 115 | 116 | > [!NOTE] 117 | > There's also a [video on YouTube](https://youtu.be/vzgUyO-GQio) which shows all the steps outlined below. 118 | 119 | ### IAM user and group 120 | 121 | A user is needed which can be used by Terraform to deploy the CML instance. It is recommended to also create a group and attach the required policies to the group. A user is then created and assigned to this group in the final step below. This user inherits the policies from the group. 122 | 123 | - click "User groups" 124 | - click "Create group" 125 | - enter a name ("terraform") 126 | - click "Create group" 127 | 128 | ### S3 access / bucket policy 129 | 130 | Next, we will create an S3 access policy which is reused to manage the bucket containing the reference platform as well as during CML deployment to allow copying the reference platform images to the EC2 instance. 131 | 132 | To create the policy, go to "Policies", then click "Create policy". There select "JSON" instead of "Visual" at the top right and paste the following JSON: 133 | 134 | ```json 135 | { 136 | "Version": "2012-10-17", 137 | "Statement": [ 138 | { 139 | "Sid": "VisualEditor0", 140 | "Effect": "Allow", 141 | "Action": [ 142 | "s3:PutObject", 143 | "s3:GetObject", 144 | "s3:ListBucket" 145 | ], 146 | "Resource": [ 147 | "arn:aws:s3:::bucket-name", 148 | "arn:aws:s3:::bucket-name/*" 149 | ] 150 | } 151 | ] 152 | } 153 | ``` 154 | 155 | Replace "bucket-name" to the bucket name of your S3 bucket. This permits Read/Write and List access to the specified bucket and all objects within that bucket. 156 | 157 | > [!NOTE] 158 | > This could be further tightened by removing the "PutObject" action from the policy as the EC2 instance / the CML controller only needs read access ("GetObject") and not write access access ("PutObject"). However, to upload images into the bucket, the write access is required at least initially. 159 | 160 | Click "Next" and provide a policy name, "cml-s3-access" for example. Finally, click "Create policy". 161 | 162 | ### AWS KMS policy for EC2 163 | 164 | This is an optional policy that can be added to IAM. This is not covered in the linked video as it was only added and documented with the 0.3.0 release of the tooling. This works in combination with the `enable_ebs_encryption` attribute in the AWS section of the configuration file. 165 | 166 | ```plain 167 | { 168 | "Version": "2012-10-17", 169 | "Statement": [ 170 | { 171 | "Sid": "VisualEditor0", 172 | "Effect": "Allow", 173 | "Action": [ 174 | "kms:Decrypt", 175 | "kms:Encrypt", 176 | "kms:ReEncrypt*", 177 | "kms:GenerateDataKey*", 178 | "kms:DescribeKey", 179 | "kms:CreateGrant" 180 | ], 181 | "Resource": "*" 182 | } 183 | ] 184 | } 185 | ``` 186 | 187 | For additional information, see this [documentation link](https://docs.aws.amazon.com/autoscaling/ec2/userguide/key-policy-requirements-EBS-encryption.html#policy-example-cmk-access). 188 | 189 | Create this policy in the same way as the S3 access policy, name it accordingly as 'cml-kms-access'. It can be referenced by the cml_terraform user below. It might be required to limit the resource where this policy should be applied to. The above example uses a wild-card. 190 | 191 | With the default settings on AWS, setting the `enable_ebs_encryption` to true in the aws section of `config.yml`, default KMS keys will be used, so there is no need for an extra IAM policy. 192 | 193 | It is required, however, when custom KMS keys are used on AWS; in such a case, the TF user needs to have access to those via IAM policy. 194 | 195 | > [!NOTE] 196 | > 197 | > In some companies, EBS encryption might be enforced regardless of whether `enable_ebs_encryption` is set to true. 198 | 199 | ### Create Role 200 | 201 | Now that we have the S3 access policy, we can create a role that uses this policy. 202 | 203 | 1. go to "Roles" 204 | 205 | 2. click "Create role" 206 | 207 | 3. select "AWS service" for the "Trusted entity type" (the default) 208 | 209 | 4. select "EC2" for the "Use case" 210 | 211 | 5. click "Next" 212 | 213 | 6. select the S3 access policy that was created in the previous section ("cml-s3-access") from the permission policy list 214 | 215 | 7. scroll to the bottom and click "Next" 216 | 217 | 8. provide a role name, use "s3-access-for-ec2" (this is important to note as this is the policy name that is also referenced in the Terraform configuration to deploy CML and in the inline role assignment). See [here](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/instance), search for `iam_instance_profile`, it says 218 | 219 | > IAM Instance Profile to launch the instance with. Specified as the name of the Instance Profile. Ensure your credentials have the correct permission to assign the instance profile according to the [EC2 documentation](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html#roles-usingrole-ec2instance-permissions), notably `iam:PassRole`. 220 | 221 | 9. click "Create role" at the bottom right 222 | 223 | ### Attach policies to user 224 | 225 | In the third step we attach permission policies to the group created in the step above. The policies in question are 226 | 227 | - AmazonEC2FullAccess, a pre-defined policy that allows to control EC2 instances 228 | - cml-s3-access, the S3 access policies allowing users in this group to read/write/list objects in the bucket specified by the policy 229 | - the "pass role" policy which passes the permission allowing access to the S3 bucket to EC2 instances the users in this group create 230 | - if the KMS access policy has been created and should be used, then attach it to the user as well (optional) 231 | 232 | To add these permission follow these steps: 233 | 234 | #### EC2 policy 235 | 236 | - click on "Add permissions" 237 | - select "Add permissions" from the drop down 238 | - select "Attach policies directly" 239 | - search for "EC2Full" which will result in the "AmazonEC2FullAccess" policy 240 | - select this policy 241 | - click "Next" 242 | - click "Add permissions" 243 | 244 | #### S3 access policy 245 | 246 | - click on "Add permissions" 247 | - select "Add permissions" from the drop down 248 | - select "Attach policies directly" 249 | - select "Customer managed" in the "Filter by type" drop down 250 | - select the "cml-s3-access" customer managed policy (the one we created above) 251 | - click "Next" 252 | - click "Add permissions" 253 | 254 | #### KMS policy (optional) 255 | 256 | - click on "Add permissions" 257 | - select "Add permissions" from the drop down 258 | - select "Attach policies directly" 259 | - select "Customer managed" in the "Filter by type" drop down 260 | - select the "cml-kms-access" customer managed policy (the one we created above) 261 | - click "Next" 262 | - click "Add permissions" 263 | 264 | #### Pass role policy 265 | 266 | - click on "Add permissions" 267 | - select "Create inline policy" from the drop down 268 | - click on "IAM" from the "Select a service" section 269 | - click on the "Write" Access level section 270 | - select the "PassRole" write action 271 | - in the "Resources" section click "Add arn" 272 | - in the dialog "Specify ARNs" 273 | - click "This account" 274 | - in the last field, add the "s3-access-for-ec2" policy to the end of the arn. It will look like "arn:aws:iam::111111111111111:role/s3-access-for-ec2" (where the numbers represent your account ID, which is already inserted for you by the UI) 275 | - click "Add ARN" 276 | - click "Next" 277 | - provide a Policy name, "pass role" works 278 | - click "Create policy" 279 | 280 | #### Create user 281 | 282 | The final step is to create a user and associate it with the group 283 | 284 | - click on "Users" 285 | - click "Add users" 286 | - provide a user name 287 | - click "Next" 288 | - select "Add user to group" (the default) 289 | - select the group previously created ("terraform") 290 | - click "Next" 291 | - click "Create user" 292 | 293 | #### Create credentials 294 | 295 | The final step is about creating access credentials that can be used with Terraform. We need an access key and a secret key. 296 | 297 | - click on "Users" 298 | - select the "cml_terraform" user 299 | - click on the "Security credentials" tab 300 | - scroll down to the "Access keys" section 301 | - click on "Create access key" 302 | - make a note of the access key and the secret key (copy them into an editor so that they can be later used when editing the `config.yml` of the deployment tool) 303 | 304 | This access key and the associated secret key must be provided to the AWS Terraform provider via the variables `aws_access_key` and `aws_secret_key`, ideally via environment variables or a vault. See the [Variables section](#terraform-variable-definition) below. 305 | 306 | #### Example 307 | 308 | The below screen shot shows an example of such a user with the required permission policies highlighted where the name of the user is "cml_terraform". Note that the required permission policies are listed. They are inherited from the "terraform" group. There's also an access key that has been created for this user. 309 | 310 | ![image-20230810161432721](../images/permissions.png) 311 | 312 | This role that is passed ("s3-access-for-ec2") is then configured in the `config.yml` attribute 'aws.profile'. The optional "cml-kms-access" policy would show as the fourth line above (not shown in the screen shot), if added. 313 | 314 | ![image-20230810162352026](../images/perm-details.png) 315 | 316 | The following diagram outlines the relation between the various IAM elements: 317 | 318 | ![image](../images/policies.png) 319 | 320 | ### Other resources 321 | 322 | In addition to the user and group policies set in the previous steps, the following resources for a successful deployment are required. 323 | 324 | #### Key name 325 | 326 | The key name specifies the name of the SSH key that exists for the EC2 service. A valid SSH key private key must be imported into EC2 and the given name must be referenced within the provisioning HCL. 327 | 328 | Key pairs are created locally and the public key is uploaded to EC2 329 | 330 | EC2 → Key pairs → Create key pair (choose ED25519 key pair type) 331 | 332 | Alternatively, it's also possible to import a public key without exposing the private key via 333 | 334 | EC2 → Key pairs → Actions → Import key pair. 335 | 336 | Another alternative is to manage keys via the `aws_key_pair` Terraform resource. See the [official documentation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair). 337 | 338 | #### Instance type 339 | 340 | The instance type defines the "hardware" of the created CML instance. For full functionality on AWS, a "metal" flavor is required as only metal flavors allow the use of nested virtualization. Please refer to the [instance type explorer](https://aws.amazon.com/ec2/instance-explorer/?ec2-instances-cards.sort-by=item.additionalFields.category-order&ec2-instances-cards.sort-order=asc&awsf.ec2-instances-filter-category=*all&awsf.ec2-instances-filter-processors=*all&awsf.ec2-instances-filter-accelerators=*all&awsf.ec2-instances-filter-capabilities=additional-capabilities%23bare-metal-instances). 341 | 342 | Limited usability can be achieved by using compute optimized C5 instances (link to the [documentation](https://aws.amazon.com/ec2/instance-types/c5/)). However, this is considered experimental and **not supported** as a lot of CML node types will not work when using on a non-metal flavor. This was tested using 'c5.2xlarge' instances and the following node types have been working OK: 343 | 344 | - External connector and unmanaged switch 345 | - All Linux node types 346 | - IOSv and IOSv-L2 347 | - IOL-XE and IOL-L2-XE 348 | 349 | To enable this experimental "feature", the `00-patch_vmx.sh` script must be uncommented in the `app.customize` list of the configuration file. See below. 350 | 351 | ### AWS CLI configuration 352 | 353 | Now that the deployment user has been defined, we can use the access credentials obtained in one of the previous steps to configure the AWS CLI tool. Ensure that you use the correct region and keys. 354 | 355 | ```plain 356 | $ aws configure 357 | AWS Access Key ID []: ******************** 358 | AWS Secret Access Key []: ******************** 359 | Default region name []: us-east-1 360 | Default output format []: json 361 | $ 362 | ``` 363 | 364 | AWS CLI configurations are stored in `$HOME/.aws`. 365 | 366 | If everything was configured correct then you should be able to list instances (remember that we permitted EC2 access for the deployment users): 367 | 368 | ```bash 369 | $ aws ec2 describe-instances 370 | { 371 | "Reservations": [] 372 | } 373 | $ 374 | ``` 375 | 376 | As there are no instances running in this case, the output is empty. The important thing here is that there's no error and the communication with AWS worked! 377 | 378 | ### Configuration file 379 | 380 | CML specific settings are specified in the configuration file `config.yml`. See also [VPC support](#vpc-support) and [Cluster support](#cluster-support) sections further down in the document. 381 | 382 | #### AWS section 383 | 384 | This holds the various configurations for the EC2 instance and S3 bucket to be used. The bucket and region values are also required on the actual instance to be able to copy the software onto the instance. 385 | 386 | - `aws.bucket`. This is the name of the bucket where the software and the reference platform files are stored. Must be accessible per the policy / role defined above 387 | - `aws.region`. This defines the region of the bucket and typically matches the region of the AWS CLI as configured above. It also defines the region where the EC2 instances are created 388 | - `aws.flavor`. The flavor / instance type to be used for the AWS CML instance. Typically a metal instance 389 | - `aws.profile`. The name of the permission profile to be used for the instance. This needs to permit access to the S3 bucket with the software and reference platforms. In the example given above, this was named "s3-access-for-ec2" 390 | - `aws.vpc_id`. If this is the empty string, a custom VPC will be created and used. If a VPC ID is specified, then instead of creating a new VPC, the specified VPC will be used instead 391 | - `aws.gw_id`. If this is the empty string, a new Internet gateway will be created and used. If an ID is specified, then this gateway will be used for the public subnet, where the external interfaces for controller (and computes) are attached to 392 | 393 | #### VPC usage 394 | 395 | The CML AWS tool chain creates all required network infrastructure including a custom VPC. This includes 396 | 397 | - VPC 398 | - Internet gateway 399 | - Security groups 400 | - Subnets 401 | - Route tables 402 | - NAT gateway 403 | - Transit gateway 404 | - Multicast domains 405 | - Addresses and network interfaces 406 | 407 | Some of these resources are only created when clustering is enabled. 408 | 409 | If a VPC ID is provided in the configuration, then that VPC is used and the required additional resources are attached to it. However, certain assumptions are made about the existing VPC: 410 | 411 | - a gateway exists and is attached to this VPC 412 | - the IPv4 CIDR prefix associated with this VPC matches the configured CIDR block in the `config.yml` 413 | - an IPv6 CIDR prefix is associated to this VPC 414 | 415 | The CML controller (and computes, if clustering is enabled) will be attached to a new subnet which is attached to the existing VPC, a default route is used to route all traffic from the public subnet. 416 | 417 | #### Common section 418 | 419 | - `common.keyname`. SSH key name which needs to be installed on AWS EC2. This key will be injected into the instance using cloud-init. 420 | - `common.disk_size`. The size of the disk in gigabytes. 64 is a good starting value but this truly depends on the kind of nodes and the planned instance lifetime. 421 | 422 | In theory, the EC2 instance can be run in a different region than the region of the bucket where the software is stored. The tooling, however, assumes that both are in the same region. 423 | 424 | > [!WARNING] 425 | > 426 | > Please verify the correct configuration attributes within the `config.yml` which is the reference. There's also additional information in the VPC and cluster sections below (new with 0.3.0). 427 | 428 | #### Host name 429 | 430 | Key name `controller_hostname`. Name of the instance, standard hostname rules apply. 431 | 432 | #### Secret section 433 | 434 | See the top level [README](/README.md) for information about secrets used throughout the CML cloud tooling. 435 | 436 | #### App section 437 | 438 | Within the app section, the following keys must be set with the correct values: 439 | 440 | - `app.software` the filename of the CML .pkg package with the software, stored in the specified S3 bucket at the top level 441 | - `app.customize` a list of scripts, located in the `scripts` folder which will be run as part of the instance creation to customize the install 442 | 443 | ##### Customization 444 | 445 | There are currently two scripts provided for CML instance customization. 446 | 447 | 1. Patch VMX. The `00-patch_vmx.sh` script disables/bypasses the VMX CPU flag check. This allows to run some reference platforms on non-metal AWS instance flavors. This limits the list of nodes that actually work quite significantly and is not supported. Use at your own risk. 448 | 2. Let's Encrypt. The `03-letsencrypt.sh` script copies a cert from storage if it exists and matches the configured hostname. If not, it requests one via the Let's Encrypt service. For this to work, it needs to have a valid hostname in DNS. The script uses DynDNS which likely has to be replaced with something else to make this work. Also note, that this script uses 'extra' variables to e.g. store the username and password for the DynDNS service. 449 | 450 | There's also a dummy entry in that list as the list must have at least one element. So, when not doing any of the predefined entries, at least the dummy must be present. 451 | 452 | > [!NOTE] 453 | > AWS userdata is limited to 16KB of data (Base64 encoded). That limit is easily reached. If more customization is done with additional scripts (like certificate installation or system customization), then it's likely to run into this limit. The tooling will eventually need to copy the script bundle to storage (S3) and download it from there during server bring-up (this is not done today!). See [this SO post](https://stackoverflow.com/questions/72099325/bypassing-16kb-ec2-user-data-limitation). 454 | 455 | #### License section 456 | 457 | This holds the license that should be applied to the instance. It consists of three keys: 458 | 459 | - `license.flavor`: either `CML_Enterprise`, `CML_Education`, `CML_Personal` or `CML_Personal40` are acceptable 460 | - `license.nodes`: the number of *additional* nodes, not applicable for the personal flavors. 461 | 462 | #### Refplat section 463 | 464 | Here, the reference platforms are listed which should be copied from the S3 bucket to the instance. There are two lists: 465 | 466 | - `refplat.definitions` lists the node definition IDs 467 | - `refplat.images` lists the associated image definition IDs 468 | 469 | It's mandatory that for each definition at least **one** matching image definition must be listed and that the name of these node and image definitions match with the names in the specified S3 bucket. 470 | 471 | > [!NOTE] 472 | > The external connector and unmanaged switch are baked into the software, there's no need to have them listed here again. 473 | 474 | ### Required "layout" of the software bucket 475 | 476 | The software and reference platform definition and images must be uploaded to the S3 bucket to be used by the provisioning script. This includes: 477 | 478 | - the CML software package as downloaded from CCO (the "update package", not the OVA) 479 | - the reference platform node definitions, image definitions and disk images of the reference platforms which should be available on the CML cloud instance 480 | 481 | The reference platform files are taken from the reference platform ISO and can be copied using the provided `upload-images-to-aws.sh` script or using the AWS CLI script or the Web UI directly into the bucket resulting in a folder hierarchy that looks similar to this: 482 | 483 | ```plain 484 | $ aws s3 ls --recursive s3://aws-bucket-name/ 485 | 2024-04-16 07:43:56 175189664 cml2_2.7.0-4_amd64-20.pkg 486 | 2023-03-02 14:38:10 2136 refplat/node-definitions/alpine.yaml 487 | 2023-03-03 11:29:24 1652 refplat/node-definitions/iosv.yaml 488 | 2023-03-03 11:29:23 1690 refplat/node-definitions/iosvl2.yaml 489 | 2023-03-02 14:38:11 2331 refplat/node-definitions/server.yaml 490 | 2023-03-02 14:38:09 51314688 refplat/virl-base-images/alpine-3-13-2-base/alpine-3-13-2-base.qcow2 491 | 2023-03-02 14:38:10 263 refplat/virl-base-images/alpine-3-13-2-base/alpine-3-13-2-base.yaml 492 | 2023-03-03 11:29:22 258 refplat/virl-base-images/iosv-159-3-m3/iosv-159-3-m3.yaml 493 | 2023-03-03 11:29:22 57296384 refplat/virl-base-images/iosv-159-3-m3/vios-adventerprisek9-m.spa.159-3.m3.qcow2 494 | 2023-03-03 11:29:23 267 refplat/virl-base-images/iosvl2-2020/iosvl2-2020.yaml 495 | 2023-03-03 11:29:22 90409984 refplat/virl-base-images/iosvl2-2020/vios_l2-adventerprisek9-m.ssa.high_iron_20200929.qcow2 496 | 2023-03-02 14:38:10 242 refplat/virl-base-images/server-tcl-11-1/server-tcl-11-1.yaml 497 | 2023-03-02 14:38:09 23134208 refplat/virl-base-images/server-tcl-11-1/tcl-11-1.qcow2 498 | ``` 499 | 500 | > [!NOTE] 501 | > The software package is in the top folder of the bucket and the platform files are in the refplat folder. Within that folder, the structure is identical to the structure of the reference platform ISO image. 502 | 503 | Uploading the files into the S3 bucket is only required for the first time or when updating software. Even when CML instances are stopped / destroyed, the software in the S3 bucket is typically not removed. 504 | 505 | #### Upload script 506 | 507 | The upload tool makes it easy to quickly select and upload the software package and images to a defined S3 bucket (the bucket must exist already). 508 | 509 | > [!NOTE] 510 | > The required CML software is the "pkg" file that is available for download from the Cisco software download page. Example: `cml2_2.7.0-4_amd64-20.pkg`. Also note the .pkg suffix. 511 | 512 | Start the tool by providing the bucket name as an argument and the location of the reference platform images. The defaults for both are `aws-cml-images` for the bucket name and `/var/lib/libvirt/images` for the reference platform image location. 513 | 514 | The tool will then display a simple dialog where the images which should be copied to the bucket can be selected: 515 | 516 | ![Dialog preview](../images/upload-refplat.png) 517 | 518 | After selecting OK the upload process will be started immediately. To abort the process, Ctrl-C can be used. 519 | 520 | > [!NOTE] 521 | > If a CML2 .pkg file is present in the directory where the tool is started, then the tool will offer to upload the software to the bucket. 522 | 523 | Help can be obtained via `./upload-images-to-aws.sh --help`. 524 | 525 | ### Terraform variable definition 526 | 527 | The `variable.tf` defines the authentication secrets needed by the Terraform AWS provider. 528 | 529 | Here's an example using a bash script that can be sourced and which defines those variables. To automate things further, a tool like [direnv](https://direnv.net/) or [mise-en-place](https://mise.jdx.dev/) can be used to load this environment when changing into the directory which has this file. 530 | 531 | Content of file `.envrc`: 532 | 533 | ```bash 534 | export TF_VAR_aws_access_key="your-access-key-string-from-iam" 535 | export TF_VAR_aws_secret_key="your-secret-key-string-from-iam" 536 | ``` 537 | 538 | Alternatively, it's also possible to provide values for variables via a file called `terraform.tfvars` file. There are various ways how to define / set variables with Terraform. See the Terraform [documentation](https://developer.hashicorp.com/terraform/language/values/variables#assigning-values-to-root-module-variables) for additional details. 539 | 540 | In addition to the above methods, Terraform can also automatically retrieve authentication credentials from the AWS configuration files located in the .aws folder. This includes credentials set up by running `aws configure`, which stores your access key and secret key in the `~/.aws/credentials` file. This method allows Terraform to use the same credentials configured for the AWS CLI, [documentation](https://registry.terraform.io/providers/hashicorp/aws/latest/docs). 541 | 542 | ## Lifecycle management 543 | 544 | When all requirements are met, an instance can be deployed using Terraform. 545 | 546 | ### Checklist 547 | 548 | - [ ] Terraform CLI installed 549 | - [ ] policies and users configured in AWS IAM 550 | - [ ] software and reference platforms uploaded into a bucket on AWS S3 551 | - [ ] configuration files prepared with correct values 552 | - [ ] variables defined in environment or in `terraform.tfvars` 553 | 554 | All configurations and variables relate to the relevant sections defined above. 555 | 556 | ### Starting an instance 557 | 558 | Starting an instance is done via `terraform plan` and `terraform apply`. The instance will be deployed and fully configured based on the provided configuration. Terraform will wait until CML is up and running, this will take approximately 5-10 minutes and depends a bit on the flavor used. 559 | 560 | At the end, the Terraform output shows the relevant information about the instance: 561 | 562 | - The URL to access it 563 | - The public IP address 564 | - The CML software version running 565 | - The command to automatically remove the license from the instance prior to destroying it (see below). 566 | 567 | ### Destroying an instance 568 | 569 | Before destroying an instance using `terraform destroy` it is important to remove the CML license either by using the provided script or by unregistering the instance (UI → Tools → Licensing → Actions → Deregister). Otherwise, the license is not freed up on the Smart Licensing servers and subsequent deployments might not succeed due to insufficient licenses available in the smart account. 570 | 571 | To remove the license using automation, a script is provided in `/provision/del.sh`. The output from the deployment can be used, it looks like this: 572 | 573 | ```plain 574 | ssh -p1122 sysadmin@IP_ADDRESS_OF_CONTROLLER /provision/del.sh 575 | ``` 576 | 577 | This requires all labs to be stopped (no running VMs allowed) prior to removing the license. It will only work as long as the provisioned usernames and passwords have not changed between deployment and destruction of the instance. 578 | 579 | ## VPC support 580 | 581 | With 0.3.0, the tooling always adds a custom VPC and doesn't use the default VPC anymore. Additional variables have been added to the configuration file `config.yml` to support this. 582 | 583 | | Attribute | Type | Comment | 584 | | ------------------------ | ------------------- | ------------------------------------------------ | 585 | | aws.public_vpc_ipv4_cidr | string, IPv4 prefix | defines the prefix to use on the VPC | 586 | | aws.availability.zone | string | needed for VPC creation, should match the region | 587 | 588 | There's also a new variable, `allowed_ipv4_subnets` which defines a list of prefixes which are allowed to access the CML instance. This defaults to "everywhere". 589 | 590 | ## Cluster support 591 | 592 | Cluster support has been added to AWS with version 0.3.0. This should be considered even more experimental than the rest of the tool chain. A 'cluster' configuration section has been added to the configuration file. The following tables describe the available attributes / settings: 593 | 594 | | Attribute | Type | Description / notes | 595 | | ------------------------------- | ------- | ------------------------------------------------------------ | 596 | | cluster.enable_cluster | boolean | If set to true, then a cluster will be created | 597 | | cluster.allow_vms_on_controller | boolean | If set to false, then controllers will not run any node VMs, only computes will | 598 | | cluster.number_of_compute_nodes | int | Amount of compute nodes to be created | 599 | | secrets.app.cluster | string | the common secret for computes to register with the controller | 600 | 601 | And here's the attributes of the 'aws' configuration dictionary: 602 | 603 | | Attribute | Type | Description / notes | 604 | | ------------------------------------------ | ------- | ------------------------------------------------------------ | 605 | | aws.region | string | as before | 606 | | aws.availability_zone | string | **new:** required for VPC creation | 607 | | aws.bucket | string | as before | 608 | | aws.flavor | string | as before, used for the controller | 609 | | aws.flavor_compute | string | **new:** flavor to use for the computes | 610 | | aws.profile | string | as before | 611 | | aws.public_vpc_ipv4_cidr | string | **new:** IPv4 prefix to use with the VPC (new, not using the default VPC anymore) | 612 | | aws.enable_ebs_encryption | boolean | **new:** sets the encryption flag for block storage | 613 | | aws.spot_instances.use_spot_for_controller | boolean | **new:** whether the controller should use a spot instance | 614 | | aws.spot_instances.use_spot_for_computes | boolean | **new:** whether all the computes should use a spot instances | 615 | 616 | Before deploying a cluster, it is strongly recommended to go with an all-in-one first to verify that all the required pieces (software, images, configuration, ...) are in place and work properly. 617 | 618 | ## Example run 619 | 620 | To deploy a CML instance on AWS and after configuring the required variables and editing the `config.yaml` file, a `terraform plan` will show all the planned changes. After reviewing those, a `terraform apply` will start and configure a CML instance on AWS. 621 | 622 | ```plain 623 | $ terraform apply -auto-approve 624 | module.deploy.data.aws_ami.ubuntu: Reading... 625 | module.deploy.data.aws_ami.ubuntu: Read complete after 1s [id=ami-0d497a49e7d359666] 626 | 627 | Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: 628 | + create 629 | <= read (data resources) 630 | 631 | Terraform will perform the following actions: 632 | 633 | # module.deploy.aws_instance.cml will be created 634 | + resource "aws_instance" "cml" { 635 | + ami = "ami-0d497a49e7d359666" 636 | + arn = (known after apply) 637 | + associate_public_ip_address = (known after apply) 638 | + availability_zone = (known after apply) 639 | + cpu_core_count = (known after apply) 640 | [...] 641 | 642 | Plan: 3 to add, 0 to change, 0 to destroy. 643 | 644 | Changes to Outputs: 645 | + cml2info = {} 646 | module.deploy.random_id.id: Creating... 647 | module.deploy.random_id.id: Creation complete after 0s [id=x1hR1Q] 648 | module.deploy.aws_security_group.sg-tf: Creating... 649 | module.deploy.aws_security_group.sg-tf: Creation complete after 2s [id=sg-04865f65e43aa917f] 650 | module.deploy.aws_instance.cml: Creating... 651 | module.deploy.aws_instance.cml: Still creating... [10s elapsed] 652 | module.deploy.aws_instance.cml: Creation complete after 13s [id=i-0e7697766ca6c18e1] 653 | module.ready.data.cml2_system.state: Reading... 654 | module.ready.data.cml2_system.state: Still reading... [10s elapsed] 655 | module.ready.data.cml2_system.state: Still reading... [20s elapsed] 656 | [...] 657 | module.ready.data.cml2_system.state: Still reading... [3m50s elapsed] 658 | module.ready.data.cml2_system.state: Still reading... [4m0s elapsed] 659 | module.ready.data.cml2_system.state: Read complete after 4m2s [id=dd68b604-8930-45c6-8d58-a1da578e02b4] 660 | 661 | Apply complete! Resources: 3 added, 0 changed, 0 destroyed. 662 | 663 | Outputs: 664 | 665 | cml2info = { 666 | "address" = "18.194.38.215" 667 | "del" = "ssh -p1122 sysadmin@18.194.38.215 /provision/del.sh" 668 | "url" = "https://18.194.38.215" 669 | "version" = "2.5.1+build.10" 670 | } 671 | 672 | $ 673 | ``` 674 | 675 | As can be seen above, a public IPv4 address has been assigned to the instance which can be used to access it via SSH and the provided SSH key pair (if this does not connect right away then the system isn't ready, yet and more wait is needed): 676 | 677 | ```plain 678 | $ ssh -p1122 sysadmin@18.194.38.215 679 | The authenticity of host '[18.194.38.215]:1122 ([18.194.38.215]:1122)' can't be established. 680 | ED25519 key fingerprint is SHA256:dz7GcRGzcWiyHbPb++NyQykP9r7UoG0rNiACi5ft1lQ. 681 | This key is not known by any other names 682 | Are you sure you want to continue connecting (yes/no/[fingerprint])? yes 683 | Warning: Permanently added '[18.194.38.215]:1122' (ED25519) to the list of known hosts. 684 | Welcome to Ubuntu 20.04.6 LTS (GNU/Linux 5.15.0-1033-aws x86_64) 685 | [...] 686 | sysadmin@rschmied-aws-2023042001:~$ 687 | ``` 688 | 689 | At this point, the status of the system can be checked: 690 | 691 | ```plain 692 | sysadmin@rschmied-aws-2023042001:~$ systemctl status | head 693 | ● rschmied-aws-2023042001 694 | State: running 695 | Jobs: 0 queued 696 | Failed: 0 units 697 | Since: Fri 2023-04-21 14:45:00 UTC; 4min 34s ago 698 | CGroup: / 699 | ├─23120 bpfilter_umh 700 | ├─user.slice 701 | │ └─user-1001.slice 702 | │ ├─user@1001.service 703 | sysadmin@rschmied-aws-2023042001:~$ systemctl status virl2.target 704 | ● virl2.target - CML2 Network Simulation System 705 | Loaded: loaded (/lib/systemd/system/virl2.target; enabled; vendor preset: enabled) 706 | Active: active since Fri 2024-04-21 14:47:58 UTC; 2min 13s ago 707 | 708 | Warning: some journal files were not opened due to insufficient permissions. 709 | sysadmin@rschmied-aws-2023042001:~$ 710 | ``` 711 | 712 | The system is running and the VIRL2 target (CML) is active! 713 | 714 | Prior to stopping the instance, the licensing token must be removed via the UI. Otherwise it's still considered "in use" in Smart Licensing. This is done via the UI or using the `del.sh` script / SSH command which is provided as part of the deploy output (see above). Then run the destroy command. 715 | 716 | > [!NOTE] 717 | > The `del.sh` has no output if the command is successful. 718 | 719 | ```plain 720 | $ ssh -p1122 sysadmin@18.194.38.215 /provision/del.sh 721 | The authenticity of host '[18.194.38.215]:1122 ([18.194.38.215]:1122)' can't be established. 722 | ED25519 key fingerprint is SHA256:4QxgLv9zzKR5gJP4rWE41STdnAHufBYkTKBpp/VA+k8. 723 | This key is not known by any other names 724 | Are you sure you want to continue connecting (yes/no/[fingerprint])? yes 725 | Warning: Permanently added '[18.194.38.215]:1122' (ED25519) to the list of known hosts. 726 | 727 | $ terraform destroy -auto-approve 728 | module.deploy.random_id.id: Refreshing state... [id=x1hR1Q] 729 | module.deploy.data.aws_ami.ubuntu: Reading... 730 | module.deploy.aws_security_group.sg-tf: Refreshing state... [id=sg-04865f65e43aa917f] 731 | module.deploy.data.aws_ami.ubuntu: Read complete after 1s [id=ami-0d497a49e7d359666] 732 | module.deploy.aws_instance.cml: Refreshing state... [id=i-0e7697766ca6c18e1] 733 | module.ready.data.cml2_system.state: Reading... 734 | module.ready.data.cml2_system.state: Read complete after 0s [id=cf22e2e6-7ef2-420b-8191-404f3f7f3600] 735 | 736 | Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: 737 | - destroy 738 | 739 | Terraform will perform the following actions: 740 | 741 | # module.deploy.aws_instance.cml will be destroyed 742 | - resource "aws_instance" "cml" { 743 | - ami = "ami-0d497a49e7d359666" -> null 744 | [...] 745 | 746 | Plan: 0 to add, 0 to change, 3 to destroy. 747 | 748 | Changes to Outputs: 749 | - cml2info = { 750 | - address = "18.194.38.215" 751 | - del = "ssh -p1122 sysadmin@18.194.38.215 /provision/del.sh" 752 | - url = "https://18.194.38.215" 753 | - version = "2.5.1+build.10" 754 | } -> null 755 | module.deploy.aws_instance.cml: Destroying... [id=i-0e7697766ca6c18e1] 756 | module.deploy.aws_instance.cml: Still destroying... [id=i-0e7697766ca6c18e1, 10s elapsed] 757 | module.deploy.aws_instance.cml: Still destroying... [id=i-0e7697766ca6c18e1, 20s elapsed] 758 | module.deploy.aws_instance.cml: Still destroying... [id=i-0e7697766ca6c18e1, 30s elapsed] 759 | module.deploy.aws_instance.cml: Destruction complete after 30s 760 | module.deploy.aws_security_group.sg-tf: Destroying... [id=sg-04865f65e43aa917f] 761 | module.deploy.aws_security_group.sg-tf: Destruction complete after 0s 762 | module.deploy.random_id.id: Destroying... [id=x1hR1Q] 763 | module.deploy.random_id.id: Destruction complete after 0s 764 | 765 | Destroy complete! Resources: 3 destroyed. 766 | 767 | $ 768 | ``` 769 | 770 | At this point, the compute resources have been released / destroyed. Images in the S3 bucket are still available for bringing up new instances. 771 | 772 | > [!NOTE] 773 | > Metal instances take significantly longer to bring up and to destroy. The `m5zn.metal` instance type takes about 5-10 minutes for both. Deployment times also depend on the number and size of reference platform images that should be copied to the instance. 774 | 775 | ## Troubleshooting 776 | 777 | In case of errors during deployment or when the CML instance won't become ready, the some troubleshooting guidance is provided below. 778 | 779 | - add a password to the root user in the `cml.sh` script within the `module-cml2-deply-aws/scripts` folder. Search for "troubleshooting", the line is commented out. Replace the "secret-password-here" with a proper password and uncomment the line by removing the leading hash character. 780 | - use the EC2 instance connect / serial console to gain access to the CML2 instance. When doing so soon after creating the instance, some log messages may already reveal what's going wrong 781 | - log in as the root user using the provided password on the serial console 782 | - check for errors in the log files in the `/var/log/cloud/` directory 783 | - check output of `cloud-init status` 784 | 785 | > [!NOTE] 786 | > Not all instance flavors have a serial console but metal flavors do! 787 | 788 | ## Caveats and limitations 789 | 790 | This section lists a couple of caveats and limitations when running CML in AWS. 791 | 792 | ### Metal flavor needed 793 | 794 | As pointed out above, full functionality **requires a metal instance flavor** because only the AWS metal flavors provide support for the VMX CPU flag to run accelerated nested VMs. A limited set of platforms works on non-metal flavors and when using the `00-patch_vmx.sh` customization script. 795 | 796 | ### No software upgrade 797 | 798 | Software upgrade or migration is **not supported** for cloud instances. We advise to download topologies or configurations prior to destroying the instance. 799 | 800 | ### No bridge support 801 | 802 | CML cloud instances with the default networking have only one external IP address allocated. In addition, it's mandatory that no L2 frames leak into the outside network as this could disable access to the management IP address. 803 | 804 | For this reason, CML cloud instances only have the NAT network available by default. Ensure that all external connectors use the NAT (`virbr0`) network and not the bridge network (`bridge0`). 805 | 806 | In case of advanced VPC configuration with additional networks and NICs inside of the CML controller, bridging could be set up manually. This is out of scope for this documentation / tooling. 807 | 808 | ### License removal 809 | 810 | If everything goes well (e.g. the license was successfully removed) then no additional output is shown when running the `ssh ... /provision/del.sh` command. Errors will be reported otherwise. 811 | 812 | The license can't be removed using the script when nodes are running. You will get this message: 813 | 814 | ```json 815 | { 816 | "description": "Licensing issue: Cannot de-register when nodes are running.", 817 | "code": 400 818 | } 819 | ``` 820 | 821 | If the license has already been removed, then this message is shown: 822 | 823 | ```json 824 | { 825 | "description": "Licensing issue: The product has already been de-registered.", 826 | "code": 400 827 | } 828 | ``` 829 | 830 | EOF 831 | -------------------------------------------------------------------------------- /documentation/Azure.md: -------------------------------------------------------------------------------- 1 | # Azure 2 | 3 | This document explains specific configuration steps to deploy a Cisco Modeling Labs (CML) instance in Azure. 4 | 5 | ## General requirements 6 | 7 | The requirements for Azure are mostly identical with those for AWS. Please refer to the AWS document for instructions how to install Terraform. Azure needs the Azure CLI which can be downloaded from [here](https://learn.microsoft.com/en-us/cli/azure/install-azure-cli). 8 | 9 | ## Authentication 10 | 11 | Once the Azure CLI (`az`) has been installed, it is required to log into Azure with an appropriate account. 12 | 13 | > [!NOTE] 14 | > It should also be possible to use a service principal with appropriate permissions. However, during the testing/development of the tool chain we did not have access to these resources. 15 | 16 | The below shows sample output (`az` has been configured to provide output JSON encoded via `az configure`): 17 | 18 | ``` 19 | $ az login 20 | A web browser has been opened at https://login.microsoftonline.com/organizations/oauth2/v2.0/authorize. Please continue the login in the web browser. If no web browser is available or if the web browser fails to open, use device code flow with `az login --use-device-code`. 21 | [ 22 | { 23 | "cloudName": "AzureCloud", 24 | "homeTenantId": "00000000-0000-4000-a000-000000000000", 25 | "id": "00000000-0000-4000-a000-000000000000", 26 | "isDefault": true, 27 | "managedByTenants": [], 28 | "name": "your-subscription-name", 29 | "state": "Enabled", 30 | "tenantId": "00000000-0000-4000-a000-000000000000", 31 | "user": { 32 | "name": "user@corp.com", 33 | "type": "user" 34 | } 35 | } 36 | ] 37 | ``` 38 | 39 | The provided subscription ID and the tenant ID need to be configured as Terraform variables. This can be done using environment variables and a shell script as shown here using `jq`: 40 | 41 | ```bash 42 | #!/bin/bash 43 | 44 | { read subID ; read tenantID; } <<< "$(az account list --output=json | jq -r '.[0]|.id,.tenantId')" 45 | 46 | export TF_VAR_azure_tenant_id="$tenantID" 47 | export TF_VAR_azure_subscription_id="$subID" 48 | ``` 49 | 50 | The values can be provided directly as well (e.g. copying and pasting them into the script). 51 | 52 | ## Software 53 | 54 | CML software needs to be present on Azure in a storage account / blob container. See the AWS document where to download the `.pkg` file with the Debian packages. The layout of the files inside of the container is otherwise identical to the layout described in the AWS document: 55 | 56 | ``` 57 | "storage_account" 58 | - "container_name" 59 | - cml2_2.6.1-11_amd64.deb 60 | - refplat 61 | - node-definitions 62 | - iosv.yaml 63 | - ... 64 | - virl-base-images 65 | - iosv-159-3-m3 66 | - iosv-159-3-m3.yaml 67 | - vios-adventerprisek9-m-spa.159-3.m3.qcow2 68 | - ... 69 | ``` 70 | 71 | Where "storage_accountname" and "container_name" are the names as configured in the `config.yml` file with the same attribute names. 72 | 73 | For uploading images / software to Azure, the "azcopy" tool can be used. Please look into [this page](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-cli) for a comprehensive overview on how to "Create, download, and list blobs with Azure CLI". 74 | 75 | The items required are: 76 | 77 | - create a storage account 78 | - create a "blob service" / container (access level is "private", authentication method is "access key", we could not test with Entra/service principals) 79 | - upload the refplat directory (with the node-definitions and virl-base-images folders) into the container. The azcopy tool provides a "`--recursive`" option 80 | - upload the Debian package into the container 81 | 82 | ## Compute size 83 | 84 | The size of the compute (called "flavor" in AWS) determines the amount of memory and CPU available to the CML instance. The important bit here is the inclusion of the VMX CPU flag to allow for virtualization acceleration. See [this link](https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/nested-virtualization) for additional information. 85 | 86 | The [Ddv4 series](https://learn.microsoft.com/en-us/azure/virtual-machines/ddv4-ddsv4-series) does support nested virtualization and comes in various sizes, as listed in the linked document. We've tested with Standard_D4d_v4 which provides 4 vCPUs and 16GB of memory. 87 | 88 | There are other compute series available which will likely also work, please ensure that they do support "Nested virtualization" prior to using them. 89 | 90 | ## Configuration 91 | 92 | In the `config.yml` file, ensure that the `target` attribute at the very top of the file is set to `azure`. Then configure the `storage_account` and `container_name` to match the resources you've created in Azure and where the software has been uploaded (CML debian package, reference platform images). 93 | 94 | Ensure that the hostname, disk size and key_name match your requirements and that the SSH public key has been uploaded the resource group on Azure: 95 | 96 | ![resource group](../images/azure-resource-group.png) 97 | 98 | Also ensure, that the layout of the software matches the required layout as specified above: 99 | 100 | ![storage browser](../images/azure-storage-browser.png) 101 | 102 | Note in the screenshot above: 103 | 104 | - The `refplat` folder has the reference platform images. 105 | - The `cml2_2.7.0-4_amd64-20.pkg` package is stored in the folder. The .deb file is included in the pkg file and is no longer needed separately. 106 | - The `hostname-fullchain.pem` and `hostname-privkey.pem` files contain a certificate/key that, if the hostname part of the filename matches the configured controller hostname, will be installed using the `03-letsencrypt.sh` script. 107 | - Make sure that the `app.software` config value corresponds to the file name of the CML .pkg package in your storage container. 108 | 109 | ## Running the deployment 110 | 111 | The usual `terraform plan` and `terraform apply` sequence can be applied to start the deployment. Depending on the amount of images you want to be available in the resulting VM, this can take anywhere from 5-10 minutes until the VM is deployed, configured and licensed. 112 | 113 | Make a note of the IP address and the "delete license" command so that the license is released prior to destroying the CML deployment. 114 | 115 | This can be done with `terraform destroy`. 116 | 117 | EOF 118 | -------------------------------------------------------------------------------- /images/azure-resource-group.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/cloud-cml/dd1212db5e6a51423dfb0578a0bf60540cb88408/images/azure-resource-group.png -------------------------------------------------------------------------------- /images/azure-storage-browser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/cloud-cml/dd1212db5e6a51423dfb0578a0bf60540cb88408/images/azure-storage-browser.png -------------------------------------------------------------------------------- /images/flowchart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/cloud-cml/dd1212db5e6a51423dfb0578a0bf60540cb88408/images/flowchart.png -------------------------------------------------------------------------------- /images/flowchart.uml: -------------------------------------------------------------------------------- 1 | @startuml flowchart 2 | start 3 | partition #LightCyan "Preparation" { 4 | if (already configured?) then (no) 5 | 6 | :* configure AWS 7 | ** permissions 8 | ** policies 9 | ** S3 bucket 10 | * upload to S3 bucket 11 | ** software 12 | ** reference platform images 13 | * create / install EC2 key pair 14 | * edit //config.yml//| 15 | else (yes) 16 | endif 17 | } 18 | partition #LightBlue "Using Terraform" { 19 | 20 | :create a CML instance; 21 | -> 22 | repeat 23 | :run simulation(s); 24 | repeat while (Done?) is (no) not (yes) 25 | :remove license (del.sh)| 26 | :destroy the CML instance; 27 | } 28 | end 29 | @enduml 30 | -------------------------------------------------------------------------------- /images/perm-details.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/cloud-cml/dd1212db5e6a51423dfb0578a0bf60540cb88408/images/perm-details.png -------------------------------------------------------------------------------- /images/permissions.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/cloud-cml/dd1212db5e6a51423dfb0578a0bf60540cb88408/images/permissions.png -------------------------------------------------------------------------------- /images/policies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/cloud-cml/dd1212db5e6a51423dfb0578a0bf60540cb88408/images/policies.png -------------------------------------------------------------------------------- /images/policies.uml: -------------------------------------------------------------------------------- 1 | @startuml 2 | 3 | 10 | 11 | ' skinparam handwritten true 12 | ' top to bottom direction 13 | skinparam arrowThickness 3 14 | left to right direction 15 | 16 | storage IAM as "AWS IAM" #lightblue;line:gray;text:lightblue { 17 | 18 | database Users #lightgray { 19 | folder user as "deployment user" { 20 | card permissions #lightcyan { 21 | file ara [ 22 | allow role assignment 23 | .... 24 | inline policy 25 | ] 26 | file ec2a [ 27 | AmazonEC2FullAccess 28 | .... 29 | AWS managed policy 30 | ] 31 | file s3cml [ 32 | s3-cml-bucket 33 | .... 34 | customer managed policy, 35 | references S3 access policy 36 | ] 37 | note right of s3cml 38 | This permission is only needed when the 39 | deployment user should also be used to 40 | upload images into the S3 bucket e.g. 41 | using the provided upload script and 42 | or using the AWS CLI tool. 43 | For production, this can be omitted. 44 | end note 45 | } 46 | card groups { 47 | file test1 [ 48 | group policies... 49 | .... 50 | (no specific use in this example) 51 | ] 52 | } 53 | card tags { 54 | file test2 [ 55 | tag definitions... 56 | .... 57 | (no specific use in this example) 58 | ] 59 | } 60 | card sc as "security credentials" { 61 | file test3 [ 62 | credentials / tokens ... 63 | .... 64 | Access key and secrets 65 | for the deployment user 66 | are managed here 67 | ] 68 | } 69 | actor test [ 70 | cml_terraform 71 | ] 72 | } 73 | } 74 | 75 | 76 | 77 | sc -[hidden]right- test 78 | test1 -[hidden]- test2 79 | test2 -[hidden]- test3 80 | 81 | database Roles { 82 | card s3_access_for_ec2 as "s3-access-for-ec2" { 83 | file s3_cml_bucket2 [ 84 | s3-cml-bucket 85 | .... 86 | references S3 access polciy 87 | ] 88 | } 89 | } 90 | note right of s3_access_for_ec2 91 | The name of this role 92 | is used in ""config.yml"" 93 | as value for ""aws.profile"" 94 | end note 95 | 96 | database Policies { 97 | file s3cml2 [ 98 | s3-cml-bucket 99 | .... 100 | define access to S3 bucket objects 101 | ] 102 | } 103 | 104 | 105 | ara =[#blue]down=> s3_access_for_ec2 : Allow ""iam:PassRole"" 106 | s3cml =[#blue]> s3cml2 107 | s3cml2 <=[#blue]= s3_cml_bucket2 108 | 109 | @enduml -------------------------------------------------------------------------------- /images/upload-refplat.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CiscoDevNet/cloud-cml/dd1212db5e6a51423dfb0578a0bf60540cb88408/images/upload-refplat.png -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | locals { 8 | raw_cfg = yamldecode(file(var.cfg_file)) 9 | cfg = merge( 10 | { 11 | for k, v in local.raw_cfg : k => v if k != "secret" 12 | }, 13 | { 14 | secrets = module.secrets.secrets 15 | } 16 | ) 17 | extras = var.cfg_extra_vars == null ? "" : ( 18 | fileexists(var.cfg_extra_vars) ? file(var.cfg_extra_vars) : var.cfg_extra_vars 19 | ) 20 | } 21 | 22 | module "secrets" { 23 | source = "./modules/secrets" 24 | cfg = local.raw_cfg 25 | } 26 | 27 | module "deploy" { 28 | source = "./modules/deploy" 29 | cfg = local.cfg 30 | extras = local.extras 31 | azure_subscription_id = var.azure_subscription_id 32 | azure_tenant_id = var.azure_tenant_id 33 | } 34 | 35 | provider "cml2" { 36 | address = "https://${module.deploy.public_ip}" 37 | username = local.cfg.secrets.app.username 38 | password = local.cfg.secrets.app.secret 39 | skip_verify = true 40 | dynamic_config = true 41 | } 42 | 43 | module "ready" { 44 | source = "./modules/readyness" 45 | depends_on = [ 46 | module.deploy.public_ip 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /modules/deploy/aws-mini/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | locals { 8 | # Late binding required as the token is only known within the module. 9 | # (Azure specific) 10 | vars = templatefile("${path.module}/../data/vars.sh", { 11 | cfg = merge( 12 | var.options.cfg, 13 | # Need to have this as it's referenced in the template (Azure specific) 14 | { sas_token = "undefined" } 15 | ) 16 | } 17 | ) 18 | 19 | cml_config_controller = templatefile("${path.module}/../data/virl2-base-config.yml", { 20 | hostname = var.options.cfg.common.controller_hostname, 21 | is_controller = true 22 | is_compute = true 23 | cfg = merge( 24 | var.options.cfg, 25 | # Need to have this as it's referenced in the template (Azure specific) 26 | { sas_token = "undefined" } 27 | ) 28 | } 29 | ) 30 | 31 | # Ensure there's no tabs in the template file! Also ensure that the list of 32 | # reference platforms has no single quotes in the file names or keys (should 33 | # be reasonable, but you never know...) 34 | cloud_config = templatefile("${path.module}/../data/cloud-config.txt", { 35 | vars = local.vars 36 | cml_config = local.cml_config_controller 37 | cfg = var.options.cfg 38 | cml = var.options.cml 39 | common = var.options.common 40 | copyfile = var.options.copyfile 41 | del = var.options.del 42 | interface_fix = var.options.interface_fix 43 | license = var.options.license 44 | extras = var.options.extras 45 | hostname = var.options.cfg.common.controller_hostname 46 | path = path.module 47 | }) 48 | } 49 | 50 | data "aws_subnet" "selected_subnet" { 51 | id = var.options.cfg.aws.subnet_id 52 | } 53 | 54 | data "aws_security_group" "selected_security_group" { 55 | id = var.options.cfg.aws.sg_id 56 | } 57 | 58 | resource "aws_network_interface" "pub_int_cml" { 59 | subnet_id = data.aws_subnet.selected_subnet.id 60 | security_groups = [data.aws_security_group.selected_security_group.id] 61 | } 62 | 63 | # If no EIP is needed/wanted, then 64 | # - change public_ip to private_ip in output.tf 65 | # - delete the resource block if no EIP is wanted/needed 66 | # In this case, the machine that runs Terraform / the provisioning must be able 67 | # to reach the private IP address and the security group must permit HTTPS to 68 | # the controller. 69 | resource "aws_eip" "server_eip" { 70 | network_interface = aws_network_interface.pub_int_cml.id 71 | } 72 | 73 | resource "aws_instance" "cml_controller" { 74 | instance_type = var.options.cfg.aws.flavor 75 | ami = data.aws_ami.ubuntu.id 76 | iam_instance_profile = var.options.cfg.aws.profile 77 | key_name = var.options.cfg.common.key_name 78 | tags = { Name = "CML-controller-${var.options.rand_id}" } 79 | ebs_optimized = "true" 80 | root_block_device { 81 | volume_size = var.options.cfg.common.disk_size 82 | volume_type = "gp3" 83 | encrypted = var.options.cfg.aws.enable_ebs_encryption 84 | } 85 | network_interface { 86 | network_interface_id = aws_network_interface.pub_int_cml.id 87 | device_index = 0 88 | } 89 | user_data = data.cloudinit_config.cml_controller.rendered 90 | } 91 | 92 | data "aws_ami" "ubuntu" { 93 | most_recent = true 94 | 95 | filter { 96 | name = "name" 97 | values = ["ubuntu/images/hvm-ssd/ubuntu-noble-24.04-amd64-server-*"] 98 | } 99 | 100 | filter { 101 | name = "virtualization-type" 102 | values = ["hvm"] 103 | } 104 | 105 | owners = ["099720109477"] # Owner ID of Canonical 106 | } 107 | 108 | data "cloudinit_config" "cml_controller" { 109 | gzip = true 110 | base64_encode = true # always true if gzip is true 111 | 112 | part { 113 | filename = "cloud-config.yaml" 114 | content_type = "text/cloud-config" 115 | content = local.cloud_config 116 | } 117 | } 118 | 119 | -------------------------------------------------------------------------------- /modules/deploy/aws-mini/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "public_ip" { 8 | # value = aws_instance.cml_controller.private_ip 9 | value = aws_instance.cml_controller.public_ip 10 | } 11 | 12 | output "sas_token" { 13 | value = "undefined" 14 | } 15 | -------------------------------------------------------------------------------- /modules/deploy/aws-mini/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "options" { 8 | type = any 9 | description = "module options of the CML deployment as an object" 10 | } 11 | 12 | -------------------------------------------------------------------------------- /modules/deploy/aws-off.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | module "aws" { 8 | source = "./dummy" 9 | count = var.cfg.target == "aws" ? 1 : 0 10 | } 11 | 12 | -------------------------------------------------------------------------------- /modules/deploy/aws-on.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | aws = { 10 | source = "hashicorp/aws" 11 | version = ">=4.56.0" 12 | } 13 | cloudinit = { 14 | source = "hashicorp/cloudinit" 15 | version = ">=2.3.3" 16 | } 17 | 18 | } 19 | required_version = ">= 1.1.0" 20 | } 21 | 22 | provider "aws" { 23 | secret_key = var.aws_secret_key 24 | access_key = var.aws_access_key 25 | region = var.cfg.aws.region 26 | } 27 | 28 | module "aws" { 29 | # source = "./aws-mini" 30 | source = "./aws" 31 | count = var.cfg.target == "aws" ? 1 : 0 32 | options = local.options 33 | } 34 | 35 | -------------------------------------------------------------------------------- /modules/deploy/aws.tf: -------------------------------------------------------------------------------- 1 | aws-on.t-f -------------------------------------------------------------------------------- /modules/deploy/aws/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | locals { 8 | num_computes = var.options.cfg.cluster.enable_cluster ? var.options.cfg.cluster.number_of_compute_nodes : 0 9 | compute_hostnames = [ 10 | for i in range(1, local.num_computes + 1) : 11 | format("%s-%d", var.options.cfg.cluster.compute_hostname_prefix, i) 12 | ] 13 | 14 | # Late binding required as the token is only known within the module. 15 | # (Azure specific) 16 | vars = templatefile("${path.module}/../data/vars.sh", { 17 | cfg = merge( 18 | var.options.cfg, 19 | # Need to have this as it's referenced in the template (Azure specific) 20 | { sas_token = "undefined" } 21 | ) 22 | } 23 | ) 24 | 25 | cml_config_controller = templatefile("${path.module}/../data/virl2-base-config.yml", { 26 | hostname = var.options.cfg.common.controller_hostname, 27 | is_controller = true 28 | is_compute = !var.options.cfg.cluster.enable_cluster || var.options.cfg.cluster.allow_vms_on_controller 29 | cfg = merge( 30 | var.options.cfg, 31 | # Need to have this as it's referenced in the template (Azure specific) 32 | { sas_token = "undefined" } 33 | ) 34 | } 35 | ) 36 | 37 | cml_config_compute = [for compute_hostname in local.compute_hostnames : templatefile("${path.module}/../data/virl2-base-config.yml", { 38 | hostname = compute_hostname, 39 | is_controller = false, 40 | is_compute = true, 41 | cfg = merge( 42 | var.options.cfg, 43 | # Need to have this as it's referenced in the template. 44 | # (Azure specific) 45 | { sas_token = "undefined" } 46 | ) 47 | } 48 | )] 49 | 50 | # Ensure there's no tabs in the template file! Also ensure that the list of 51 | # reference platforms has no single quotes in the file names or keys (should 52 | # be reasonable, but you never know...) 53 | cloud_config = templatefile("${path.module}/../data/cloud-config.txt", { 54 | vars = local.vars 55 | cml_config = local.cml_config_controller 56 | cfg = var.options.cfg 57 | cml = var.options.cml 58 | common = var.options.common 59 | copyfile = var.options.copyfile 60 | del = var.options.del 61 | interface_fix = var.options.interface_fix 62 | license = var.options.license 63 | extras = var.options.extras 64 | hostname = var.options.cfg.common.controller_hostname 65 | path = path.module 66 | }) 67 | 68 | cloud_config_compute = [for i in range(0, local.num_computes) : templatefile("${path.module}/../data/cloud-config.txt", { 69 | vars = local.vars 70 | cml_config = local.cml_config_compute[i] 71 | cfg = var.options.cfg 72 | cml = var.options.cml 73 | common = var.options.common 74 | copyfile = var.options.copyfile 75 | del = var.options.del 76 | interface_fix = var.options.interface_fix 77 | license = "empty" 78 | extras = var.options.extras 79 | hostname = local.compute_hostnames[i] 80 | path = path.module 81 | })] 82 | 83 | main_vpc = length(var.options.cfg.aws.vpc_id) > 0 ? data.aws_vpc.selected[0] : aws_vpc.main_vpc[0] 84 | main_gw_id = length(var.options.cfg.aws.gw_id) > 0 ? var.options.cfg.aws.gw_id : aws_internet_gateway.public_igw[0].id 85 | 86 | cml_ingress = [ 87 | { 88 | "description" : "allow SSH", 89 | "from_port" : 1122, 90 | "to_port" : 1122 91 | "protocol" : "tcp", 92 | "cidr_blocks" : var.options.cfg.common.allowed_ipv4_subnets, 93 | "ipv6_cidr_blocks" : [], 94 | "prefix_list_ids" : [], 95 | "security_groups" : [], 96 | "self" : false, 97 | }, 98 | { 99 | "description" : "allow CML termserver", 100 | "from_port" : 22, 101 | "to_port" : 22 102 | "protocol" : "tcp", 103 | "cidr_blocks" : var.options.cfg.common.allowed_ipv4_subnets, 104 | "ipv6_cidr_blocks" : [], 105 | "prefix_list_ids" : [], 106 | "security_groups" : [], 107 | "self" : false, 108 | }, 109 | { 110 | "description" : "allow Cockpit", 111 | "from_port" : 9090, 112 | "to_port" : 9090 113 | "protocol" : "tcp", 114 | "cidr_blocks" : var.options.cfg.common.allowed_ipv4_subnets, 115 | "ipv6_cidr_blocks" : [], 116 | "prefix_list_ids" : [], 117 | "security_groups" : [], 118 | "self" : false, 119 | }, 120 | { 121 | "description" : "allow HTTP", 122 | "from_port" : 80, 123 | "to_port" : 80 124 | "protocol" : "tcp", 125 | "cidr_blocks" : var.options.cfg.common.allowed_ipv4_subnets, 126 | "ipv6_cidr_blocks" : [], 127 | "prefix_list_ids" : [], 128 | "security_groups" : [], 129 | "self" : false, 130 | }, 131 | { 132 | "description" : "allow HTTPS", 133 | "from_port" : 443, 134 | "to_port" : 443 135 | "protocol" : "tcp", 136 | "cidr_blocks" : var.options.cfg.common.allowed_ipv4_subnets, 137 | "ipv6_cidr_blocks" : [], 138 | "prefix_list_ids" : [], 139 | "security_groups" : [], 140 | "self" : false, 141 | } 142 | ] 143 | 144 | cml_patty_range = [ 145 | { 146 | "description" : "allow PATty TCP", 147 | "from_port" : 2000, 148 | "to_port" : 7999 149 | "protocol" : "tcp", 150 | "cidr_blocks" : var.options.cfg.common.allowed_ipv4_subnets, 151 | "ipv6_cidr_blocks" : [], 152 | "prefix_list_ids" : [], 153 | "security_groups" : [], 154 | "self" : false, 155 | }, 156 | { 157 | "description" : "allow PATty UDP", 158 | "from_port" : 2000, 159 | "to_port" : 7999 160 | "protocol" : "udp", 161 | "cidr_blocks" : var.options.cfg.common.allowed_ipv4_subnets, 162 | "ipv6_cidr_blocks" : [], 163 | "prefix_list_ids" : [], 164 | "security_groups" : [], 165 | "self" : false, 166 | } 167 | ] 168 | } 169 | 170 | resource "aws_security_group" "sg_tf" { 171 | name = "tf-sg-cml-${var.options.rand_id}" 172 | description = "CML required ports inbound/outbound" 173 | tags = { 174 | Name = "tf-sg-cml-${var.options.rand_id}" 175 | } 176 | vpc_id = local.main_vpc.id 177 | egress = [ 178 | { 179 | "description" : "any", 180 | "from_port" : 0, 181 | "to_port" : 0 182 | "protocol" : "-1", 183 | "cidr_blocks" : [ 184 | "0.0.0.0/0" 185 | ], 186 | "ipv6_cidr_blocks" : [], 187 | "prefix_list_ids" : [], 188 | "security_groups" : [], 189 | "self" : false, 190 | } 191 | ] 192 | ingress = var.options.cfg.common.enable_patty ? concat(local.cml_ingress, local.cml_patty_range) : local.cml_ingress 193 | } 194 | 195 | resource "aws_security_group" "sg_tf_cluster_int" { 196 | name = "tf-sg-cml-cluster-int-${var.options.rand_id}" 197 | description = "Allowing all IPv6 traffic on the cluster interface" 198 | tags = { 199 | Name = "tf-sg-cml-cluster-int-${var.options.rand_id}" 200 | } 201 | vpc_id = local.main_vpc.id 202 | egress = [ 203 | { 204 | "description" : "any", 205 | "from_port" : 0, 206 | "to_port" : 0 207 | "protocol" : "-1", 208 | "cidr_blocks" : [], 209 | "ipv6_cidr_blocks" : ["::/0"], 210 | "prefix_list_ids" : [], 211 | "security_groups" : [], 212 | "self" : false, 213 | } 214 | ] 215 | ingress = [ 216 | { 217 | "description" : "any", 218 | "from_port" : 0, 219 | "to_port" : 0 220 | "protocol" : "-1", 221 | "cidr_blocks" : [], 222 | "ipv6_cidr_blocks" : ["::/0"], 223 | "prefix_list_ids" : [], 224 | "security_groups" : [], 225 | "self" : false, 226 | } 227 | ] 228 | } 229 | 230 | #----------------- if VPC ID was provided, select it -------------------------- 231 | data "aws_vpc" "selected" { 232 | id = var.options.cfg.aws.vpc_id 233 | count = length(var.options.cfg.aws.vpc_id) > 0 ? 1 : 0 234 | } 235 | 236 | #------------------- non-default VPC configuration ---------------------------- 237 | resource "aws_vpc" "main_vpc" { 238 | count = length(var.options.cfg.aws.vpc_id) > 0 ? 0 : 1 239 | cidr_block = var.options.cfg.aws.public_vpc_ipv4_cidr 240 | assign_generated_ipv6_cidr_block = true 241 | tags = { 242 | Name = "CML-vpc-${var.options.rand_id}" 243 | } 244 | } 245 | 246 | #------------------- public subnet, IGW and routing --------------------------- 247 | resource "aws_internet_gateway" "public_igw" { 248 | count = length(var.options.cfg.aws.gw_id) > 0 ? 0 : 1 249 | vpc_id = local.main_vpc.id 250 | tags = { "Name" = "CML-igw-${var.options.rand_id}" } 251 | } 252 | 253 | resource "aws_subnet" "public_subnet" { 254 | availability_zone = var.options.cfg.aws.availability_zone 255 | cidr_block = cidrsubnet(var.options.cfg.aws.public_vpc_ipv4_cidr, 8, 0) 256 | vpc_id = local.main_vpc.id 257 | map_public_ip_on_launch = true 258 | tags = { "Name" = "CML-public-${var.options.rand_id}" } 259 | } 260 | 261 | resource "aws_route_table" "for_public_subnet" { 262 | vpc_id = local.main_vpc.id 263 | route { 264 | cidr_block = "0.0.0.0/0" 265 | gateway_id = local.main_gw_id 266 | } 267 | tags = { "Name" = "CML-public-rt-${var.options.rand_id}" } 268 | } 269 | 270 | resource "aws_route_table_association" "public_subnet" { 271 | subnet_id = aws_subnet.public_subnet.id 272 | route_table_id = aws_route_table.for_public_subnet.id 273 | } 274 | 275 | resource "aws_network_interface" "pub_int_cml" { 276 | subnet_id = aws_subnet.public_subnet.id 277 | security_groups = [aws_security_group.sg_tf.id] 278 | tags = { Name = "CML-controller-pub-int-${var.options.rand_id}" } 279 | } 280 | 281 | resource "aws_eip" "server_eip" { 282 | network_interface = aws_network_interface.pub_int_cml.id 283 | tags = { "Name" = "CML-controller-eip-${var.options.rand_id}", "device" = "server" } 284 | depends_on = [aws_instance.cml_controller] 285 | } 286 | 287 | #------------- compute subnet, NAT GW, routing and interfaces ----------------- 288 | 289 | resource "aws_subnet" "compute_nat_subnet" { 290 | availability_zone = var.options.cfg.aws.availability_zone 291 | cidr_block = cidrsubnet(var.options.cfg.aws.public_vpc_ipv4_cidr, 8, 1) 292 | vpc_id = local.main_vpc.id 293 | tags = { "Name" = "CML-compute-nat-${var.options.rand_id}" } 294 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 295 | } 296 | 297 | resource "aws_eip" "nat_eip" { 298 | tags = { 299 | Name = "CML-compute-nat-gw-eip-${var.options.rand_id}" 300 | } 301 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 302 | } 303 | 304 | resource "aws_nat_gateway" "compute_nat_gw" { 305 | allocation_id = aws_eip.nat_eip[0].id // Allocate an EIP 306 | subnet_id = aws_subnet.public_subnet.id 307 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 308 | tags = { 309 | Name = "CML-compute-nat-gw-${var.options.rand_id}" 310 | } 311 | # Ensure creation after EIP and subnet resources exist 312 | depends_on = [ 313 | aws_eip.nat_eip, 314 | aws_subnet.compute_nat_subnet 315 | ] 316 | } 317 | 318 | resource "aws_route_table" "compute_route_table" { 319 | vpc_id = local.main_vpc.id 320 | 321 | route { 322 | cidr_block = "0.0.0.0/0" 323 | nat_gateway_id = aws_nat_gateway.compute_nat_gw[0].id 324 | } 325 | tags = { 326 | Name = "CML-cluster-rt-${var.options.rand_id}" 327 | } 328 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 329 | } 330 | 331 | resource "aws_route_table_association" "compute_subnet_assoc" { 332 | subnet_id = aws_subnet.compute_nat_subnet[0].id 333 | route_table_id = aws_route_table.compute_route_table[0].id 334 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 335 | } 336 | 337 | resource "aws_network_interface" "nat_int_cml_compute" { 338 | subnet_id = aws_subnet.compute_nat_subnet[0].id 339 | security_groups = [aws_security_group.sg_tf.id] 340 | tags = { Name = "CML-compute-${count.index + 1}-nat-int-${var.options.rand_id}" } 341 | count = local.num_computes 342 | } 343 | 344 | #-------------------- cluster subnet and interface ---------------------------- 345 | 346 | resource "aws_subnet" "cluster_subnet" { 347 | availability_zone = var.options.cfg.aws.availability_zone 348 | cidr_block = cidrsubnet(var.options.cfg.aws.public_vpc_ipv4_cidr, 8, 255) 349 | ipv6_cidr_block = cidrsubnet(local.main_vpc.ipv6_cidr_block, 8, 1) 350 | vpc_id = local.main_vpc.id 351 | tags = { "Name" = "CML-cluster-${var.options.rand_id}" } 352 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 353 | assign_ipv6_address_on_creation = true 354 | } 355 | 356 | resource "aws_network_interface" "cluster_int_cml" { 357 | subnet_id = aws_subnet.cluster_subnet[0].id 358 | security_groups = [aws_security_group.sg_tf_cluster_int.id] 359 | tags = { Name = "CML-controller-cluster-int-${var.options.rand_id}" } 360 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 361 | } 362 | 363 | resource "aws_network_interface" "cluster_int_cml_compute" { 364 | subnet_id = aws_subnet.cluster_subnet[0].id 365 | security_groups = [aws_security_group.sg_tf_cluster_int.id] 366 | tags = { Name = "CML-compute-${count.index + 1}-cluster-int-${var.options.rand_id}" } 367 | count = local.num_computes 368 | } 369 | 370 | #------------------ IPv6 multicast support for CML clustering ----------------- 371 | 372 | resource "aws_ec2_transit_gateway" "transit_gateway" { 373 | description = "CML Transit Gateway" 374 | multicast_support = "enable" 375 | default_route_table_association = "disable" 376 | default_route_table_propagation = "disable" 377 | dns_support = "disable" 378 | vpn_ecmp_support = "disable" 379 | tags = { 380 | Name = "CML-tgw-${var.options.rand_id}" 381 | } 382 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 383 | } 384 | 385 | resource "aws_ec2_transit_gateway_multicast_domain" "cml_mcast_domain" { 386 | transit_gateway_id = aws_ec2_transit_gateway.transit_gateway[0].id 387 | igmpv2_support = "enable" 388 | auto_accept_shared_associations = "enable" 389 | tags = { 390 | Name = "CML-mcast-domain-${var.options.rand_id}" 391 | } 392 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 393 | } 394 | 395 | resource "aws_ec2_transit_gateway_vpc_attachment" "vpc_attachment" { 396 | transit_gateway_id = aws_ec2_transit_gateway.transit_gateway[0].id 397 | vpc_id = local.main_vpc.id 398 | subnet_ids = [aws_subnet.cluster_subnet[0].id] 399 | ipv6_support = "enable" 400 | tags = { 401 | Name = "CML-tgw-vpc-attachment-${var.options.rand_id}" 402 | } 403 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 404 | } 405 | 406 | resource "aws_ec2_transit_gateway_multicast_domain_association" "cml_association" { 407 | transit_gateway_attachment_id = aws_ec2_transit_gateway_vpc_attachment.vpc_attachment[count.index].id 408 | transit_gateway_multicast_domain_id = aws_ec2_transit_gateway_multicast_domain.cml_mcast_domain[count.index].id 409 | subnet_id = aws_subnet.cluster_subnet[count.index].id 410 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 411 | } 412 | 413 | resource "aws_ec2_transit_gateway_multicast_group_member" "cml_controller_int" { 414 | group_ip_address = "ff02::fb" 415 | network_interface_id = aws_network_interface.cluster_int_cml[count.index].id 416 | transit_gateway_multicast_domain_id = aws_ec2_transit_gateway_multicast_domain_association.cml_association[count.index].transit_gateway_multicast_domain_id 417 | count = var.options.cfg.cluster.enable_cluster ? 1 : 0 418 | } 419 | 420 | resource "aws_ec2_transit_gateway_multicast_group_member" "cml_compute_int" { 421 | group_ip_address = "ff02::fb" 422 | network_interface_id = aws_network_interface.cluster_int_cml_compute[count.index].id 423 | transit_gateway_multicast_domain_id = aws_ec2_transit_gateway_multicast_domain_association.cml_association[0].transit_gateway_multicast_domain_id 424 | count = local.num_computes 425 | } 426 | 427 | resource "aws_instance" "cml_controller" { 428 | instance_type = var.options.cfg.aws.flavor 429 | ami = data.aws_ami.ubuntu.id 430 | iam_instance_profile = var.options.cfg.aws.profile 431 | key_name = var.options.cfg.common.key_name 432 | tags = { Name = "CML-controller-${var.options.rand_id}" } 433 | ebs_optimized = "true" 434 | depends_on = [aws_route_table_association.public_subnet] 435 | dynamic "instance_market_options" { 436 | for_each = var.options.cfg.aws.spot_instances.use_spot_for_controller ? [1] : [] 437 | content { 438 | market_type = "spot" 439 | spot_options { 440 | instance_interruption_behavior = "stop" 441 | spot_instance_type = "persistent" 442 | } 443 | } 444 | } 445 | root_block_device { 446 | volume_size = var.options.cfg.common.disk_size 447 | volume_type = "gp3" 448 | encrypted = var.options.cfg.aws.enable_ebs_encryption 449 | } 450 | network_interface { 451 | network_interface_id = aws_network_interface.pub_int_cml.id 452 | device_index = 0 453 | } 454 | dynamic "network_interface" { 455 | for_each = var.options.cfg.cluster.enable_cluster ? [1] : [] 456 | content { 457 | network_interface_id = aws_network_interface.cluster_int_cml[0].id 458 | device_index = 1 459 | } 460 | } 461 | user_data = data.cloudinit_config.cml_controller.rendered 462 | } 463 | 464 | resource "aws_instance" "cml_compute" { 465 | instance_type = var.options.cfg.aws.flavor_compute 466 | ami = data.aws_ami.ubuntu.id 467 | iam_instance_profile = var.options.cfg.aws.profile 468 | key_name = var.options.cfg.common.key_name 469 | tags = { Name = "CML-compute-${count.index + 1}-${var.options.rand_id}" } 470 | ebs_optimized = "true" 471 | count = local.num_computes 472 | depends_on = [aws_instance.cml_controller, aws_route_table_association.compute_subnet_assoc] 473 | dynamic "instance_market_options" { 474 | for_each = var.options.cfg.aws.spot_instances.use_spot_for_computes ? [1] : [] 475 | content { 476 | market_type = "spot" 477 | spot_options { 478 | instance_interruption_behavior = "stop" 479 | spot_instance_type = "persistent" 480 | } 481 | } 482 | } 483 | root_block_device { 484 | volume_size = var.options.cfg.cluster.compute_disk_size 485 | volume_type = "gp3" 486 | encrypted = var.options.cfg.aws.enable_ebs_encryption 487 | } 488 | network_interface { 489 | network_interface_id = aws_network_interface.nat_int_cml_compute[count.index].id 490 | device_index = 0 491 | } 492 | network_interface { 493 | network_interface_id = aws_network_interface.cluster_int_cml_compute[count.index].id 494 | device_index = 1 495 | } 496 | user_data = data.cloudinit_config.cml_compute[count.index].rendered 497 | } 498 | 499 | data "aws_ami" "ubuntu" { 500 | most_recent = true 501 | 502 | filter { 503 | name = "name" 504 | values = ["ubuntu/images/hvm-ssd-gp3/ubuntu-noble-24.04-amd64-server-*"] 505 | } 506 | 507 | filter { 508 | name = "virtualization-type" 509 | values = ["hvm"] 510 | } 511 | 512 | owners = ["099720109477"] # Owner ID of Canonical 513 | } 514 | 515 | data "cloudinit_config" "cml_controller" { 516 | gzip = true 517 | base64_encode = true # always true if gzip is true 518 | 519 | part { 520 | filename = "cloud-config.yaml" 521 | content_type = "text/cloud-config" 522 | content = local.cloud_config 523 | } 524 | } 525 | 526 | data "cloudinit_config" "cml_compute" { 527 | gzip = true 528 | base64_encode = true # always true if gzip is true 529 | count = local.num_computes 530 | 531 | part { 532 | filename = "cloud-config.yaml" 533 | content_type = "text/cloud-config" 534 | 535 | content = local.cloud_config_compute[count.index] 536 | } 537 | } 538 | -------------------------------------------------------------------------------- /modules/deploy/aws/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "public_ip" { 8 | value = aws_eip.server_eip.public_ip 9 | } 10 | 11 | output "sas_token" { 12 | value = "undefined" 13 | } 14 | -------------------------------------------------------------------------------- /modules/deploy/aws/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "options" { 8 | type = any 9 | description = "module options of the CML deployment as an object" 10 | } 11 | 12 | -------------------------------------------------------------------------------- /modules/deploy/azure-off.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | module "azure" { 8 | source = "./dummy" 9 | count = var.cfg.target == "azure" ? 1 : 0 10 | } 11 | 12 | -------------------------------------------------------------------------------- /modules/deploy/azure-on.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | azurerm = { 10 | source = "hashicorp/azurerm" 11 | version = ">=3.82.0" 12 | } 13 | } 14 | required_version = ">= 1.1.0" 15 | } 16 | 17 | provider "azurerm" { 18 | features {} 19 | 20 | subscription_id = var.azure_subscription_id 21 | tenant_id = var.azure_tenant_id 22 | 23 | # Configuration options 24 | } 25 | 26 | module "azure" { 27 | source = "./azure" 28 | count = var.cfg.target == "azure" ? 1 : 0 29 | options = local.options 30 | } 31 | 32 | -------------------------------------------------------------------------------- /modules/deploy/azure.tf: -------------------------------------------------------------------------------- 1 | azure-off.t-f -------------------------------------------------------------------------------- /modules/deploy/azure/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | locals { 8 | # late binding required as the token is only known within the module 9 | vars = templatefile("${path.module}/../data/vars.sh", { 10 | cfg = merge( 11 | var.options.cfg, 12 | { sas_token = data.azurerm_storage_account_sas.cml.sas } 13 | ) 14 | } 15 | ) 16 | 17 | cml_config_controller = templatefile("${path.module}/../data/virl2-base-config.yml", { 18 | hostname = var.options.cfg.common.controller_hostname, 19 | is_controller = true 20 | is_compute = !var.options.cfg.cluster.enable_cluster || var.options.cfg.cluster.allow_vms_on_controller 21 | cfg = merge( 22 | var.options.cfg, 23 | { sas_token = data.azurerm_storage_account_sas.cml.sas } 24 | ) 25 | } 26 | ) 27 | 28 | # Ensure there's no tabs in the template file! Also ensure that the list of 29 | # reference platforms has no single quotes in the file names or keys (should 30 | # be reasonable, but you never know...) 31 | cloud_config = templatefile("${path.module}/../data/cloud-config.txt", { 32 | vars = local.vars 33 | cml_config = local.cml_config_controller 34 | cfg = var.options.cfg 35 | cml = var.options.cml 36 | common = var.options.common 37 | copyfile = var.options.copyfile 38 | del = var.options.del 39 | interface_fix = var.options.interface_fix 40 | license = var.options.license 41 | extras = var.options.extras 42 | hostname = var.options.cfg.common.controller_hostname 43 | path = path.module 44 | }) 45 | 46 | # vmname = "cml-${var.options.rand_id}" 47 | } 48 | 49 | # this references an existing resource group 50 | data "azurerm_resource_group" "cml" { 51 | name = var.options.cfg.azure.resource_group 52 | } 53 | 54 | # this references an existing storage account within the resource group 55 | data "azurerm_storage_account" "cml" { 56 | name = var.options.cfg.azure.storage_account 57 | resource_group_name = data.azurerm_resource_group.cml.name 58 | } 59 | 60 | data "azurerm_storage_account_sas" "cml" { 61 | connection_string = data.azurerm_storage_account.cml.primary_connection_string 62 | https_only = true 63 | signed_version = "2022-11-02" 64 | 65 | resource_types { 66 | service = true 67 | container = true 68 | object = true 69 | } 70 | 71 | services { 72 | blob = true 73 | queue = false 74 | table = false 75 | file = false 76 | } 77 | 78 | start = timestamp() 79 | expiry = timeadd(timestamp(), "1h") 80 | 81 | permissions { 82 | read = true 83 | write = false 84 | delete = false 85 | list = true 86 | add = false 87 | create = false 88 | update = false 89 | process = false 90 | tag = false 91 | filter = false 92 | } 93 | } 94 | 95 | resource "azurerm_network_security_group" "cml" { 96 | name = "cml-sg-${var.options.rand_id}" 97 | location = data.azurerm_resource_group.cml.location 98 | resource_group_name = data.azurerm_resource_group.cml.name 99 | } 100 | 101 | resource "azurerm_network_security_rule" "cml_std" { 102 | name = "cml-std-in" 103 | priority = 100 104 | direction = "Inbound" 105 | access = "Allow" 106 | protocol = "Tcp" 107 | source_port_range = "*" 108 | destination_port_ranges = [22, 80, 443, 1122, 9090] 109 | source_address_prefixes = var.options.cfg.common.allowed_ipv4_subnets 110 | destination_address_prefix = "*" 111 | resource_group_name = data.azurerm_resource_group.cml.name 112 | network_security_group_name = azurerm_network_security_group.cml.name 113 | } 114 | 115 | resource "azurerm_network_security_rule" "cml_patty_tcp" { 116 | count = var.options.cfg.common.enable_patty ? 1 : 0 117 | name = "patty-tcp-in" 118 | priority = 200 119 | direction = "Inbound" 120 | access = "Allow" 121 | protocol = "Tcp" 122 | source_port_range = "*" 123 | destination_port_range = "2000-7999" 124 | source_address_prefixes = var.options.cfg.common.allowed_ipv4_subnets 125 | destination_address_prefix = "*" 126 | resource_group_name = data.azurerm_resource_group.cml.name 127 | network_security_group_name = azurerm_network_security_group.cml.name 128 | } 129 | 130 | resource "azurerm_network_security_rule" "cml_patty_udp" { 131 | count = var.options.cfg.common.enable_patty ? 1 : 0 132 | name = "patty-udp-in" 133 | priority = 300 134 | direction = "Inbound" 135 | access = "Allow" 136 | protocol = "Udp" 137 | source_port_range = "*" 138 | destination_port_range = "2000-7999" 139 | source_address_prefixes = var.options.cfg.common.allowed_ipv4_subnets 140 | destination_address_prefix = "*" 141 | resource_group_name = data.azurerm_resource_group.cml.name 142 | network_security_group_name = azurerm_network_security_group.cml.name 143 | } 144 | 145 | resource "azurerm_public_ip" "cml" { 146 | name = "cml-pub-ip-${var.options.rand_id}" 147 | resource_group_name = data.azurerm_resource_group.cml.name 148 | location = data.azurerm_resource_group.cml.location 149 | allocation_method = "Static" 150 | } 151 | 152 | resource "azurerm_virtual_network" "cml" { 153 | name = "cml-network-${var.options.rand_id}" 154 | address_space = ["10.0.0.0/16"] 155 | location = data.azurerm_resource_group.cml.location 156 | resource_group_name = data.azurerm_resource_group.cml.name 157 | } 158 | 159 | resource "azurerm_subnet" "cml" { 160 | name = "internal" 161 | resource_group_name = data.azurerm_resource_group.cml.name 162 | virtual_network_name = azurerm_virtual_network.cml.name 163 | address_prefixes = ["10.0.2.0/24"] 164 | } 165 | 166 | resource "azurerm_network_interface" "cml" { 167 | name = "cml-nic-${var.options.rand_id}" 168 | location = data.azurerm_resource_group.cml.location 169 | resource_group_name = data.azurerm_resource_group.cml.name 170 | 171 | ip_configuration { 172 | name = "internal" 173 | subnet_id = azurerm_subnet.cml.id 174 | private_ip_address_allocation = "Dynamic" 175 | public_ip_address_id = azurerm_public_ip.cml.id 176 | } 177 | } 178 | 179 | # Connect the security group to the network interface 180 | resource "azurerm_network_interface_security_group_association" "cml" { 181 | network_interface_id = azurerm_network_interface.cml.id 182 | network_security_group_id = azurerm_network_security_group.cml.id 183 | } 184 | 185 | resource "azurerm_linux_virtual_machine" "cml" { 186 | name = var.options.cfg.common.controller_hostname 187 | resource_group_name = data.azurerm_resource_group.cml.name 188 | location = data.azurerm_resource_group.cml.location 189 | 190 | # size = "Standard_F2" 191 | # https://learn.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/nested-virtualization 192 | # https://learn.microsoft.com/en-us/azure/virtual-machines/dv5-dsv5-series 193 | # Size vCPU Memory: GiB Temp storage (SSD) GiB Max data disks Max NICs Max network bandwidth (Mbps) 194 | # Standard_D2_v5 2 8 Remote Storage Only 4 2 12500 195 | # Standard_D4_v5 4 16 Remote Storage Only 8 2 12500 196 | # Standard_D8_v5 8 32 Remote Storage Only 16 4 12500 197 | # Standard_D16_v5 16 64 Remote Storage Only 32 8 12500 198 | # Standard_D32_v5 32 128 Remote Storage Only 32 8 16000 199 | # Standard_D48_v5 48 192 Remote Storage Only 32 8 24000 200 | # Standard_D64_v5 64 256 Remote Storage Only 32 8 30000 201 | # Standard_D96_v5 96 384 Remote Storage Only 32 8 35000 202 | # 203 | # https://learn.microsoft.com/en-us/azure/virtual-machines/ddv4-ddsv4-series 204 | # Size vCPU Memory: GiB Temp storage (SSD) GiB Max data disks Max temp storage throughput: IOPS/MBps* Max NICs Expected network bandwidth (Mbps) 205 | # Standard_D2d_v41 2 8 75 4 9000/125 2 5000 206 | # Standard_D4d_v4 4 16 150 8 19000/250 2 10000 207 | # Standard_D8d_v4 8 32 300 16 38000/500 4 12500 208 | # Standard_D16d_v4 16 64 600 32 75000/1000 8 12500 209 | # Standard_D32d_v4 32 128 1200 32 150000/2000 8 16000 210 | # Standard_D48d_v4 48 192 1800 32 225000/3000 8 24000 211 | # Standard_D64d_v4 64 256 2400 32 300000/4000 8 30000 212 | 213 | size = var.options.cfg.azure.size 214 | 215 | # uncomment this block for diagnostics and serial console access to the VM 216 | # boot_diagnostics { 217 | # } 218 | 219 | admin_username = "ubuntu" 220 | network_interface_ids = [ 221 | azurerm_network_interface.cml.id, 222 | ] 223 | 224 | admin_ssh_key { 225 | username = "ubuntu" 226 | public_key = data.azurerm_ssh_public_key.cml.public_key 227 | # public_key = file("~/.ssh/id_rsa.pub") 228 | } 229 | 230 | os_disk { 231 | caching = "ReadWrite" 232 | storage_account_type = "Standard_LRS" 233 | disk_size_gb = var.options.cfg.common.disk_size 234 | } 235 | 236 | # https://canonical-azure.readthedocs-hosted.com/en/latest/azure-explanation/daily-vs-release-images/ 237 | source_image_reference { 238 | publisher = "Canonical" 239 | offer = "ubuntu-24_04-lts" 240 | sku = "minimal" 241 | version = "latest" 242 | } 243 | 244 | custom_data = data.cloudinit_config.azure_ud.rendered 245 | } 246 | 247 | data "azurerm_ssh_public_key" "cml" { 248 | name = var.options.cfg.common.key_name 249 | resource_group_name = data.azurerm_resource_group.cml.name 250 | } 251 | 252 | data "cloudinit_config" "azure_ud" { 253 | gzip = true 254 | base64_encode = true # always true if gzip is true 255 | 256 | part { 257 | filename = "cloud-config.yaml" 258 | content_type = "text/cloud-config" 259 | 260 | content = local.cloud_config 261 | } 262 | } 263 | -------------------------------------------------------------------------------- /modules/deploy/azure/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "public_ip" { 8 | value = azurerm_public_ip.cml.ip_address 9 | } 10 | 11 | output "sas_token" { 12 | value = data.azurerm_storage_account_sas.cml.sas 13 | } 14 | -------------------------------------------------------------------------------- /modules/deploy/azure/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "options" { 8 | type = any 9 | description = "module options of the CML deployment as an object" 10 | } 11 | 12 | -------------------------------------------------------------------------------- /modules/deploy/data/00-patch_vmx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | # This is an unsupported workaround for the lack of the VMX CPU flag support on 9 | # AWS. 10 | # 11 | # It will disable the check for the VMX CPU flag -- when this patch is in place, 12 | # the system will always report the VMX flag to be present. 13 | # 14 | # Some platforms like Linux, IOSv and IOSv-L2 will still work but others will 15 | # not and crash! 16 | 17 | echo "no-VMX patch..." 18 | ( 19 | cd /var/local/virl2/.local/lib/python3.12/site-packages 20 | patch -p1 --forward < 26 | + 27 | nodename 28 | 29 | 30 | 384 31 | 1 32 | - 33 | + 34 | 35 | hvm 36 | 37 | diff -ru a/simple_drivers/low_level_driver/host_statistics.py b/simple_drivers/low_level_driver/host_statistics.py 38 | --- a/simple_drivers/low_level_driver/host_statistics.py 39 | +++ b/simple_drivers/low_level_driver/host_statistics.py 40 | @@ -489,7 +489,8 @@ class LLDSystemInfo: 41 | # return vmx or svm 42 | # 43 | virtualization: str | None = self._get_cpu_info_field("Virtualization") 44 | - return virtualization in ("VT-x", "AMD-V") 45 | + # return virtualization in ("VT-x", "AMD-V") 46 | + return True 47 | 48 | def stats(self) -> dict[str, dict[str, Any]]: 49 | """This is periodically called every heartbeat from 50 | EOF 51 | systemctl restart virl2.target 52 | ) 53 | echo "done" 54 | -------------------------------------------------------------------------------- /modules/deploy/data/03-letsencrypt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | source /provision/common.sh 9 | source /provision/copyfile.sh 10 | source /provision/vars.sh 11 | 12 | if ! is_controller; then 13 | echo "not a controller, exiting" 14 | return 15 | fi 16 | 17 | # define these in extras! 18 | # CFG_UN="" 19 | # CFG_PW="" 20 | # CFG_HN="" 21 | # CFG_EMAIL="" 22 | 23 | # if there's no hostname then return immediately (these scripts are sourced) 24 | if [ -z "${CFG_HN}" ]; then 25 | echo "no hostname configured, exiting" 26 | return 27 | fi 28 | 29 | # update our hostname on DynDNS 30 | IP=$(curl -s4 canhazip.com) 31 | auth=$(echo -n "$CFG_UN:$CFG_PW" | base64) 32 | attempts=5 33 | while [ $attempts -gt 0 ]; do 34 | status=$(curl -s -o/dev/null \ 35 | -w "%{http_code}" \ 36 | -H "Authorization: Basic $auth" \ 37 | -H "User-Agent: Update Client/v1.0" \ 38 | "https://members.dyndns.org/nic/update?hostname=$CFG_HN&myip=$IP") 39 | if [ $status -eq 200 ]; then 40 | break 41 | fi 42 | sleep 5 43 | echo "trying again... ($attempts)" 44 | ((attempts--)) 45 | done 46 | 47 | echo 48 | 49 | copyfile ${CFG_HN}-fullchain.pem /tmp/fullchain.pem 50 | copyfile ${CFG_HN}-privkey.pem /tmp/privkey.pem 51 | 52 | if openssl x509 -text /etc/letsencrypt/cli.ini </etc/cockpit/ws-certs.d/0-self-signed.key 84 | sed '/-----END CERTIFICATE-----/q' $ /etc/letsencrypt/live/$CFG_HN/fullchain.pem >/etc/cockpit/ws-certs.d/0-self-signed.cert 85 | 86 | # reload affected services 87 | systemctl reload nginx 88 | systemctl restart cockpit 89 | -------------------------------------------------------------------------------- /modules/deploy/data/04-customize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | source /provision/common.sh 9 | source /provision/copyfile.sh 10 | source /provision/vars.sh 11 | 12 | if ! is_controller; then 13 | echo "not a controller, exiting" 14 | return 15 | fi 16 | 17 | # copy the converter wheel to the webserver dir 18 | copyfile cml2tf-0.2.1-py3-none-any.whl /var/lib/nginx/html/client/ 19 | 20 | # stabilization timer 21 | constants="/var/local/virl2/.local/lib/python3.8/site-packages/simple_drivers/constants.py" 22 | sed -i -e'/^STABILIZATION_TIME = 3$/s/3/1/' $constants 23 | 24 | # script to create users and resource limits 25 | cat >/provision/users.py < 0: 39 | try: 40 | client = ClientLibrary(f"https://{hostname}", admin, password, ssl_verify=False) 41 | except HTTPStatusError as exc: 42 | print(exc) 43 | sleep(10) 44 | attempts -= 1 45 | else: 46 | break 47 | 48 | print(client) 49 | 50 | USER_COUNT = 20 51 | 52 | # create 20 users (and pod0 is for us to use, in total 21) 53 | 54 | # the below block is to remove users again, used for testing 55 | if False: 56 | for id in range(0, USER_COUNT + 1): 57 | user_id = client.user_management.user_id(f"pod{id}") 58 | client.user_management.delete_user(user_id) 59 | pools = client.resource_pool_management.resource_pools 60 | for id, pool in pools.items(): 61 | if pool.is_template: 62 | template = pool 63 | continue 64 | pool.remove() 65 | template.remove() 66 | exit(0) 67 | 68 | rp = client.resource_pool_management.create_resource_pool("pods", licenses=2, ram=2048) 69 | 70 | for id in range(0, USER_COUNT + 1): 71 | client.user_management.create_user(f"pod{id}", f"{id:#02}DevWks{id:#02}", resource_pool=rp.id) 72 | EOF 73 | 74 | export CFG_APP_PASS CFG_COMMON_HOSTNAME 75 | export HOME=/var/local/virl2 76 | python3 /provision/users.py 77 | -------------------------------------------------------------------------------- /modules/deploy/data/99-dummy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | true 9 | -------------------------------------------------------------------------------- /modules/deploy/data/README.md: -------------------------------------------------------------------------------- 1 | # Copy file behavior 2 | 3 | Azure copy and AWS CLI cp in recursive mode apparently behave different. While Azure takes the directory name into account, it seems like AWS does not. 4 | 5 | Example: 6 | 7 | cp --recursive srcdir/ dstdir/ 8 | 9 | Using Azure, the result will be (as expected): 10 | 11 | ``` 12 | dstdir/srcdir/file1 13 | /file 14 | ``` 15 | 16 | while with AWS, it's unfortunately: 17 | 18 | ``` 19 | dstdir/file1 20 | /file2 21 | ``` 22 | 23 | So, to make this work, the logic needs to add the srcdir to the dstdir but only when copying files on AWS, not on Azure. This is done in `copyfile.sh` with the `$ITEM` arg in case anyone is wondering. 24 | 25 | Not adding this lengthy comment to the file itself due to the 16KB cloud-init limitation on AWS. 26 | 27 | ## Service restart 28 | 29 | The cml.sh script post processing function originally stopped the target, ran through all the patches and then restarted the target at the end. However, some scripts might require the target to be running (to provision users, for example) while others might change something that does not require a full target restart. For this reason, I've removed the logic for stop/start of the target from post processing and moved it here for reference. If multiple scripts would require a stop/start, then it would be advised to indicate the restart requirement with a flag (e.g. a file flag or something) which then could be checked at the end of post processing. If present, services will be restarted. 30 | 31 | ```bash 32 | # systemctl stop virl2.target 33 | # while [ $(systemctl is-active virl2-controller.service) = active ]; do 34 | # sleep 5 35 | # done 36 | 37 | # sleep 5 38 | # # do this for good measure, best case this is a no-op 39 | # netplan apply 40 | # # restart the VIRL2 target now 41 | # systemctl restart virl2.target 42 | ``` 43 | -------------------------------------------------------------------------------- /modules/deploy/data/cloud-config.txt: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | package_update: true 3 | package_upgrade: true 4 | 5 | hostname: ${hostname} 6 | manage_etc_hosts: true 7 | 8 | packages: 9 | - curl 10 | - jq 11 | 12 | write_files: 13 | - path: /provision/refplat 14 | owner: root:root 15 | permissions: "0644" 16 | content: '${jsonencode(cfg.refplat)}' 17 | - path: /provision/cml.sh 18 | owner: root:root 19 | permissions: "0700" 20 | content: | 21 | ${indent(6, cml)} 22 | - path: /provision/common.sh 23 | owner: root:root 24 | permissions: "0700" 25 | content: | 26 | ${indent(6, common)} 27 | - path: /provision/copyfile.sh 28 | owner: root:root 29 | permissions: "0700" 30 | content: | 31 | ${indent(6, copyfile)} 32 | - path: /provision/vars.sh 33 | owner: root:root 34 | permissions: "0600" 35 | content: | 36 | ${indent(6, format("%s\n%s", vars, extras))} 37 | - path: /provision/del.sh 38 | owner: root:root 39 | permissions: "0700" 40 | content: | 41 | ${indent(6, del)} 42 | - path: /provision/interface_fix.py 43 | owner: root:root 44 | permissions: "0700" 45 | content: | 46 | ${indent(6, interface_fix)} 47 | - path: /provision/license.py 48 | owner: root:root 49 | permissions: "0700" 50 | content: | 51 | ${indent(6, license)} 52 | - path: /etc/virl2-base-config.yml 53 | owner: root:root 54 | permissions: "0644" 55 | content: | 56 | ${indent(6, format("%s\n", cml_config))} 57 | %{ for script in cfg.app.customize } 58 | - path: /provision/${script} 59 | owner: root:root 60 | permissions: "0644" 61 | content: | 62 | ${indent(6, file("${path}/../data/${script}"))} 63 | %{ endfor } 64 | 65 | runcmd: 66 | - /provision/cml.sh && touch /run/reboot || echo "CML provisioning failed. Not rebooting" 67 | 68 | power_state: 69 | mode: reboot 70 | condition: test -f /run/reboot 71 | -------------------------------------------------------------------------------- /modules/deploy/data/cml.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | # :%!shfmt -ci -i 4 - 8 | # set -x 9 | # set -e 10 | 11 | source /provision/common.sh 12 | source /provision/copyfile.sh 13 | source /provision/vars.sh 14 | 15 | function setup_pre_aws() { 16 | export AWS_DEFAULT_REGION=${CFG_AWS_REGION} 17 | apt-get install -y unzip 18 | curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" 19 | unzip -q awscliv2.zip 20 | ./aws/install 21 | rm -f awscliv2.zip 22 | rm -rf aws/ 23 | } 24 | 25 | function setup_pre_azure() { 26 | curl -LO https://aka.ms/downloadazcopy-v10-linux 27 | tar xvf down* --strip-components=1 -C /usr/local/bin 28 | chmod a+x /usr/local/bin/azcopy 29 | } 30 | 31 | function wait_for_network_manager() { 32 | counter=0 33 | max_wait=60 34 | 35 | while ! systemctl is-active --quiet NetworkManager && [ $counter -lt $max_wait ]; do 36 | echo "Waiting for NetworkManager to become active..." 37 | sleep 5 38 | counter=$((counter + 5)) 39 | done 40 | 41 | if systemctl is-active --quiet NetworkManager; then 42 | echo "NetworkManager is active." 43 | else 44 | echo "NetworkManager did not become active after $max_wait seconds." 45 | fi 46 | } 47 | 48 | function base_setup() { 49 | 50 | # Check if this device is a controller 51 | if is_controller; then 52 | # copy node definitions and images to the instance 53 | VLLI=/var/lib/libvirt/images 54 | NDEF=node-definitions 55 | IDEF=virl-base-images 56 | mkdir -p $VLLI/$NDEF 57 | 58 | # copy all node definitions as defined in the provisioned config 59 | if [ $(jq >/etc/default/patty.env 160 | sed -i '/^After/iWants=virl2-patty.service' /lib/systemd/system/virl2.target 161 | systemctl daemon-reload 162 | systemctl enable --now virl2-patty 163 | fi 164 | } 165 | 166 | function cml_configure() { 167 | API="http://ip6-localhost:8001/api/v0" 168 | 169 | clouduser="ubuntu" 170 | if [[ -d /home/${CFG_SYS_USER}/.ssh ]]; then 171 | # Directory exists - Move individual files within .ssh 172 | mv /home/$clouduser/.ssh/* /home/${CFG_SYS_USER}/.ssh/ 173 | else 174 | # Directory doesn't exist - Move the entire .ssh directory 175 | mv /home/$clouduser/.ssh/ /home/${CFG_SYS_USER}/ 176 | fi 177 | chown -R ${CFG_SYS_USER}:${CFG_SYS_USER} /home/${CFG_SYS_USER}/.ssh 178 | 179 | # disable access for the user but keep it as cloud-init requires it to be 180 | # present, otherwise one of the final modules will fail. 181 | usermod --expiredate 1 --lock $clouduser 182 | 183 | # allow this user to read the configuration vars 184 | chgrp ${CFG_SYS_USER} /provision/vars.sh 185 | chmod g+r /provision/vars.sh 186 | 187 | # Change the ownership of the del.sh script to the sysadmin user 188 | chown ${CFG_SYS_USER}:${CFG_SYS_USER} /provision/del.sh 189 | 190 | # Check if this device is a controller 191 | if ! is_controller; then 192 | echo "This is not a controller node. No need to install licenses." 193 | return 0 194 | fi 195 | 196 | until [ "true" = "$(curl -s $API/system_information | jq -r .ready)" ]; do 197 | echo "Waiting for controller to be ready..." 198 | sleep 5 199 | done 200 | 201 | # Put the license and users in place 202 | export CFG_APP_USER CFG_APP_PASS CFG_LICENSE_NODE CFG_LICENSE_FLAVOR CFG_LICENSE_TOKEN 203 | HOME=/var/local/virl2 python3 /provision/license.py 204 | } 205 | 206 | function postprocess() { 207 | FILELIST=$(find /provision/ -type f | grep -E '[0-9]{2}-[[:alnum:]_]+\.sh' | grep -v '99-dummy' | sort) 208 | if [ -n "$FILELIST" ]; then 209 | ( 210 | mkdir -p /var/log/provision 211 | for patch in $FILELIST; do 212 | echo "processing $patch ..." 213 | ( 214 | source "$patch" || true 215 | ) 2>&1 | tee "/var/log/${patch}.log" 216 | echo "done with ${patch}" 217 | done 218 | ) 219 | fi 220 | } 221 | 222 | echo "### Provisioning via cml.sh STARTS $(date)" 223 | 224 | # AWS specific (?): 225 | # For troubleshooting. To allow console access on AWS, the root user needs a 226 | # password. Note: not all instance types / flavors provide a serial console! 227 | # echo "root:secret-password-here" | /usr/sbin/chpasswd 228 | 229 | # Ensure non-interactive Debian package installation 230 | APT_OPTS="-o Dpkg::Options::=--force-confmiss -o Dpkg::Options::=--force-confnew" 231 | APT_OPTS+=" -o DPkg::Progress-Fancy=0 -o APT::Color=0" 232 | DEBIAN_FRONTEND=noninteractive 233 | export APT_OPTS DEBIAN_FRONTEND 234 | 235 | # Run the appropriate pre-setup function 236 | case $CFG_TARGET in 237 | aws) 238 | setup_pre_aws 239 | ;; 240 | azure) 241 | setup_pre_azure 242 | ;; 243 | *) 244 | echo "unknown target!" 245 | exit 1 246 | ;; 247 | esac 248 | 249 | # Only run the base setup when there's a provision directory both with 250 | # Terraform and with Packer but not when deploying an AMI 251 | if [ -d /provision ]; then 252 | echo "### base setup STARTS $(date)" 253 | base_setup 254 | fi 255 | 256 | # Only do a configure when this is not run within Packer / AMI building 257 | if [ ! -f /tmp/PACKER_BUILD ]; then 258 | echo "### configure STARTS $(date)" 259 | cml_configure ${CFG_TARGET} 260 | echo "### postprocess STARTS $(date)" 261 | postprocess 262 | # netplan apply 263 | # systemctl reboot 264 | fi 265 | 266 | echo "### Provisioning via cml.sh ENDS $(date)" 267 | -------------------------------------------------------------------------------- /modules/deploy/data/common.sh: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | CONFIG_FILE="/etc/virl2-base-config.yml" 7 | 8 | function is_controller() { 9 | [[ -r "$CONFIG_FILE" ]] && grep -qi "is_controller: true" "$CONFIG_FILE" 10 | } 11 | -------------------------------------------------------------------------------- /modules/deploy/data/copyfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | source /provision/vars.sh 9 | 10 | function copyfile() { 11 | SRC=$1 12 | DST=$2 13 | ITEM=$3 14 | RECURSIVE=$4 15 | case $CFG_TARGET in 16 | aws) 17 | if [ -n "$ITEM" ]; then 18 | ITEM="/$ITEM" 19 | fi 20 | aws s3 cp --no-progress $RECURSIVE "s3://$CFG_AWS_BUCKET/$SRC" $DST$ITEM 21 | ;; 22 | azure) 23 | LOC="https://${CFG_AZURE_STORAGE_ACCOUNT}.blob.core.windows.net/${CFG_AZURE_CONTAINER_NAME}" 24 | azcopy copy --output-level=quiet "$LOC/$SRC$CFG_SAS_TOKEN" $DST $RECURSIVE 25 | ;; 26 | *) ;; 27 | esac 28 | } 29 | -------------------------------------------------------------------------------- /modules/deploy/data/del.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | # 8 | # NOTE: this only works as long as the admin user password wasn't changed 9 | # from the value which was originally provisioned. 10 | 11 | # set -x 12 | # set -e 13 | 14 | source /provision/vars.sh 15 | 16 | function cml_remove_license() { 17 | API="http://ip6-localhost:8001/api/v0" 18 | 19 | # re-auth with new password 20 | TOKEN=$(echo '{"username":"'${CFG_APP_USER}'","password":"'${CFG_APP_PASS}'"}' \ | 21 | curl -s -d@- $API/authenticate | jq -r) 22 | 23 | # de-register the license from the controller 24 | curl -s -X "DELETE" \ 25 | "$API/licensing/deregistration" \ 26 | -H "Authorization: Bearer $TOKEN" \ 27 | -H "accept: application/json" \ 28 | -H "Content-Type: application/json" 29 | } 30 | 31 | # only de-register when the target is active 32 | if [ $(systemctl is-active virl2.target) = "active" ]; then 33 | cml_remove_license 34 | else 35 | echo "CML is not active, license can not be de-registered!" 36 | exit 255 37 | fi 38 | -------------------------------------------------------------------------------- /modules/deploy/data/interface_fix.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | import yaml 9 | 10 | 11 | def get_interface_names(netplan_file): 12 | """Parses the netplan file to extract interface names. 13 | 14 | Args: 15 | netplan_file (str): Path to the netplan configuration file. 16 | 17 | Returns: 18 | list: A list of interface names found in the file. 19 | """ 20 | 21 | with open(netplan_file, "r") as f: 22 | netplan_data = yaml.safe_load(f) 23 | 24 | interfaces = [] 25 | for interface_name, interface_config in netplan_data["network"][ 26 | "ethernets" 27 | ].items(): 28 | route_metric = interface_config.get("dhcp4-overrides", {}).get( 29 | "route-metric", float("inf") 30 | ) 31 | interfaces.append((interface_name, route_metric)) 32 | 33 | # Sort interfaces based on route-metric (ascending) to detect primary interface 34 | interfaces.sort(key=lambda item: item[1]) 35 | 36 | return [interface[0] for interface in interfaces] # Return just the interface names 37 | 38 | 39 | def update_netplan_config(netplan_file, primary_interface, renderer="NetworkManager"): 40 | """Updates the Netplan config file with the specified renderer. 41 | 42 | Args: 43 | netplan_file (str): Path to the Netplan configuration file. 44 | primary_interface (str): The primary network interface to update. 45 | renderer (str, optional): The renderer to use. Defaults to 'NetworkManager'. 46 | """ 47 | with open(netplan_file, "r") as f: 48 | netplan_data = yaml.safe_load(f) 49 | 50 | netplan_data.setdefault("network", {}) 51 | netplan_data["network"]["renderer"] = renderer 52 | 53 | ethernets = netplan_data["network"].get("ethernets", {}) 54 | if primary_interface in ethernets: 55 | ethernets[primary_interface]["renderer"] = renderer 56 | 57 | with open(netplan_file, "w") as f: 58 | yaml.safe_dump(netplan_data, f) 59 | 60 | 61 | def update_virl2_config(virl2_config_file, primary_interface, cluster_interface=None): 62 | """Updates the VIRL2 base config file with interface names. 63 | 64 | Args: 65 | virl2_config_file (str): Path to the VIRL2 base config file. 66 | primary_interface (str): Name of the primary interface. 67 | cluster_interface (str, optional): Name of the cluster interface (if any). 68 | """ 69 | 70 | with open(virl2_config_file, "r") as f: 71 | virl2_data = yaml.safe_load(f) 72 | 73 | virl2_data["primary_interface"] = primary_interface 74 | if cluster_interface: 75 | virl2_data["cluster_interface"] = cluster_interface 76 | 77 | with open(virl2_config_file, "w") as f: 78 | yaml.safe_dump(virl2_data, f) 79 | 80 | 81 | def main(): 82 | # Configuration paths 83 | netplan_file = "/etc/netplan/50-cloud-init.yaml" 84 | virl2_config_file = "/etc/virl2-base-config.yml" 85 | 86 | # Get interface names 87 | interface_names = get_interface_names(netplan_file) 88 | primary_interface = interface_names[0] 89 | cluster_interface = interface_names[1] if len(interface_names) > 1 else None 90 | 91 | # Update Netplan config 92 | update_netplan_config(netplan_file, primary_interface) 93 | 94 | # Update VIRL2 config 95 | update_virl2_config(virl2_config_file, primary_interface, cluster_interface) 96 | 97 | 98 | if __name__ == "__main__": 99 | main() 100 | -------------------------------------------------------------------------------- /modules/deploy/data/license.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | import os 9 | import sys 10 | from time import sleep 11 | 12 | import virl2_client as pcl 13 | 14 | 15 | def set_license() -> str: 16 | nodes = os.getenv("CFG_LICENSE_NODES") or 0 17 | token = os.getenv("CFG_LICENSE_TOKEN") or "" 18 | flavor = os.getenv("CFG_LICENSE_FLAVOR") 19 | admin_user = os.getenv("CFG_APP_USER") 20 | admin_pass = os.getenv("CFG_APP_PASS") 21 | if len(token) == 0: 22 | print("no token provided") 23 | return "" 24 | 25 | regid = "regid.2019-10.com.cisco.CML_NODE_COUNT,1.0_2607650b-6ca8-46d5-81e5-e6688b7383c4" 26 | client = pcl.ClientLibrary( 27 | "localhost", username=admin_user, password=admin_pass, ssl_verify=False 28 | ) 29 | 30 | try: 31 | client.licensing.set_product_license(flavor) 32 | except pcl.exceptions.APIError as exc: 33 | return str(exc) 34 | 35 | try: 36 | client.licensing.register(token) 37 | nn = int(nodes) 38 | if flavor == "CML_Enterprise" and nn > 0: 39 | client.licensing.update_features({regid: nn}) 40 | except pcl.exceptions.APIError as exc: 41 | return str(exc) 42 | 43 | authorized = False 44 | attempts = 24 45 | while not authorized and attempts > 0: 46 | status = client.licensing.status() 47 | authorized = status["authorization"]["status"] == "IN_COMPLIANCE" 48 | attempts -= 1 49 | sleep(5) 50 | 51 | if attempts == 0 and not authorized: 52 | return "system did not get into compliant state" 53 | return "" 54 | 55 | 56 | if __name__ == "__main__": 57 | exit_code = 0 58 | result = set_license() 59 | if len(result) > 0: 60 | exit_code = 1 61 | print(result) 62 | sys.exit(exit_code) 63 | -------------------------------------------------------------------------------- /modules/deploy/data/vars.sh: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | CFG_APP_SOFTWARE="${cfg.app.software}" 7 | CFG_APP_PASS="${cfg.secrets.app.secret}" 8 | CFG_APP_USER="${cfg.secrets.app.username}" 9 | CFG_AWS_BUCKET="${cfg.aws.bucket}" 10 | CFG_AWS_REGION="${cfg.aws.region}" 11 | CFG_AZURE_CONTAINER_NAME="${cfg.azure.container_name}" 12 | CFG_AZURE_STORAGE_ACCOUNT="${cfg.azure.storage_account}" 13 | CFG_COMMON_HOSTNAME="${cfg.common.controller_hostname}" 14 | CFG_COMMON_ENABLE_PATTY="${cfg.common.enable_patty}" 15 | CFG_LICENSE_FLAVOR="${cfg.license.flavor}" 16 | CFG_LICENSE_NODES="${cfg.license.nodes}" 17 | CFG_LICENSE_TOKEN="${cfg.secrets.smartlicense_token.secret}" 18 | CFG_SAS_TOKEN="${cfg.sas_token}" 19 | CFG_SYS_PASS="${cfg.secrets.sys.secret}" 20 | CFG_SYS_USER="${cfg.secrets.sys.username}" 21 | CFG_TARGET="${cfg.target}" 22 | 23 | -------------------------------------------------------------------------------- /modules/deploy/data/virl2-base-config.yml: -------------------------------------------------------------------------------- 1 | admins: 2 | controller: 3 | password: ${cfg.secrets.app.secret} 4 | username: ${cfg.secrets.app.username} 5 | system: 6 | password: ${cfg.secrets.sys.secret} 7 | username: ${cfg.secrets.sys.username} 8 | cluster_interface: "" 9 | compute_secret: ${cfg.secrets.cluster.secret} 10 | controller_name: ${cfg.common.controller_hostname} 11 | copy_iso_to_disk: false 12 | hostname: ${hostname} 13 | interactive: false 14 | is_cluster: ${cfg.cluster.enable_cluster} 15 | is_compute: ${is_compute} 16 | is_configured: false 17 | is_controller: ${is_controller} 18 | primary_interface: "" 19 | ssh_server: true 20 | use_ipv4_dhcp: true 21 | skip_primary_bridge: true 22 | -------------------------------------------------------------------------------- /modules/deploy/dummy/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "public_ip" { 8 | value = "0.0.0.0" 9 | } 10 | 11 | -------------------------------------------------------------------------------- /modules/deploy/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | resource "random_id" "id" { 8 | byte_length = 4 9 | } 10 | 11 | locals { 12 | options = { 13 | cfg = var.cfg 14 | cml = file("${path.module}/data/cml.sh") 15 | common = file("${path.module}/data/common.sh") 16 | copyfile = file("${path.module}/data/copyfile.sh") 17 | del = file("${path.module}/data/del.sh") 18 | interface_fix = file("${path.module}/data/interface_fix.py") 19 | license = file("${path.module}/data/license.py") 20 | extras = var.extras 21 | rand_id = random_id.id.hex 22 | } 23 | } 24 | 25 | -------------------------------------------------------------------------------- /modules/deploy/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "public_ip" { 8 | value = ( 9 | (var.cfg.target == "aws") ? 10 | module.aws[0].public_ip : 11 | (var.cfg.target == "azure" ? 12 | module.azure[0].public_ip : 13 | "127.0.0.1" 14 | ) 15 | ) 16 | } 17 | -------------------------------------------------------------------------------- /modules/deploy/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "cfg" { 8 | type = any 9 | description = "JSON configuration of the CML deployment" 10 | } 11 | 12 | variable "extras" { 13 | type = any 14 | description = "extra shell variable defininitions" 15 | } 16 | 17 | # AWS related vars 18 | 19 | variable "aws_access_key" { 20 | type = string 21 | description = "AWS access key / credential for the provisioning user" 22 | default = "" 23 | } 24 | 25 | variable "aws_secret_key" { 26 | type = string 27 | description = "AWS secret key matching the access key" 28 | default = "" 29 | } 30 | 31 | # Azure related vars 32 | 33 | variable "azure_subscription_id" { 34 | type = string 35 | description = "Azure subscription ID" 36 | default = "" 37 | } 38 | 39 | variable "azure_tenant_id" { 40 | type = string 41 | description = "Azure tenant ID" 42 | default = "" 43 | } 44 | 45 | -------------------------------------------------------------------------------- /modules/readyness/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | data "cml2_system" "state" { 8 | timeout = "20m" 9 | ignore_errors = true 10 | } 11 | 12 | # ignoring errors in the system data source deals with various error scenarios 13 | # during the time the public IP of the AWS instance is known but not really 14 | # reachable resulting in various "gateway timeouts", "service unavailable" or 15 | # other, related errors. Especially in cases when going through a proxy. 16 | -------------------------------------------------------------------------------- /modules/readyness/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "state" { 8 | value = data.cml2_system.state 9 | } 10 | -------------------------------------------------------------------------------- /modules/readyness/terraform.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | cml2 = { 10 | source = "CiscoDevNet/cml2" 11 | version = ">=0.6.2" 12 | } 13 | } 14 | required_version = ">= 1.1.0" 15 | } 16 | -------------------------------------------------------------------------------- /modules/secrets/conjur-off.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | module "conjur" { 8 | source = "./dummy" 9 | count = var.cfg.secret.manager == "conjur" ? 1 : 0 10 | cfg = var.cfg 11 | } 12 | -------------------------------------------------------------------------------- /modules/secrets/conjur-on.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | conjur = { 10 | source = "localhost/cyberark/conjur" 11 | version = ">=0.6.7" 12 | } 13 | } 14 | required_version = ">= 1.1.0" 15 | } 16 | 17 | module "conjur" { 18 | source = "./conjur" 19 | count = var.cfg.secret.manager == "conjur" ? 1 : 0 20 | cfg = var.cfg 21 | } 22 | -------------------------------------------------------------------------------- /modules/secrets/conjur.tf: -------------------------------------------------------------------------------- 1 | conjur-off.t-f -------------------------------------------------------------------------------- /modules/secrets/conjur/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | locals { 8 | exclude_keys = [ 9 | "raw_secret", 10 | ] 11 | secrets = { 12 | for k, v in var.cfg.secret.secrets : k => merge( 13 | { 14 | for k2, v2 in v : k2 => v2 if !contains(local.exclude_keys, k2) 15 | }, 16 | { 17 | secret = data.conjur_secret.conjur_secret[k].value 18 | } 19 | ) 20 | } 21 | } 22 | 23 | data "conjur_secret" "conjur_secret" { 24 | for_each = tomap(var.cfg.secret.secrets) 25 | name = each.value.path 26 | } 27 | -------------------------------------------------------------------------------- /modules/secrets/conjur/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "secrets" { 8 | value = local.secrets 9 | sensitive = true 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/conjur/terraform.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | conjur = { 10 | source = "localhost/cyberark/conjur" 11 | } 12 | } 13 | required_version = ">= 1.1.0" 14 | } 15 | -------------------------------------------------------------------------------- /modules/secrets/conjur/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "cfg" { 8 | type = any 9 | description = "YAML configuration of the CML deployment" 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/dummy/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | # 8 | # This is the dummy secrets module. It is used for testing purposes only 9 | # where you don't want to use a real secrets manager. This module will 10 | # return an object with the raw_secrets passed in copied to secrets. If 11 | # a raw_secret does not exist, a random_password of length 16 will be returned. 12 | # https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/password 13 | # 14 | 15 | locals { 16 | random_password_length = 16 17 | exclude_keys = [ 18 | "raw_secret", 19 | ] 20 | secrets = { 21 | for k, v in var.cfg.secret.secrets : k => merge( 22 | # In case the YAML refers to a value that's empty, we check for null. 23 | # This happens with a randomly generated cluster secret. 24 | (v != null ? 25 | { 26 | for k2, v2 in v : k2 => v2 if !contains(local.exclude_keys, k2) 27 | } 28 | : 29 | {} 30 | ), 31 | { 32 | secret = try(v.raw_secret, random_password.random_secret[k].result) 33 | } 34 | ) 35 | } 36 | } 37 | 38 | resource "random_password" "random_secret" { 39 | for_each = toset([for k in keys(var.cfg.secret.secrets) : k]) 40 | length = local.random_password_length 41 | # Some special characters need to be escaped, so disable 42 | special = false 43 | } 44 | -------------------------------------------------------------------------------- /modules/secrets/dummy/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "secrets" { 8 | value = local.secrets 9 | sensitive = true 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/dummy/terraform.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | random = { 10 | source = "hashicorp/random" 11 | version = "3.6.1" 12 | } 13 | } 14 | required_version = ">= 1.1.0" 15 | } 16 | -------------------------------------------------------------------------------- /modules/secrets/dummy/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "cfg" { 8 | type = any 9 | description = "YAML configuration of the CML deployment" 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | module "dummy" { 8 | source = "./dummy" 9 | cfg = var.cfg 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "secrets" { 8 | value = ( 9 | var.cfg.secret.manager == "conjur" ? 10 | try(module.conjur[0].secrets, {}) : 11 | var.cfg.secret.manager == "vault" ? 12 | try(module.vault[0].secrets, {}) : 13 | module.dummy.secrets 14 | ) 15 | sensitive = true 16 | } 17 | -------------------------------------------------------------------------------- /modules/secrets/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "cfg" { 8 | type = any 9 | description = "YAML configuration of the CML deployment" 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/vault-off.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | module "vault" { 8 | source = "./dummy" 9 | count = var.cfg.secret.manager == "vault" ? 1 : 0 10 | cfg = var.cfg 11 | } 12 | -------------------------------------------------------------------------------- /modules/secrets/vault-on.t-f: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | vault = { 10 | source = "hashicorp/vault" 11 | version = ">=4.2.0" 12 | } 13 | } 14 | required_version = ">= 1.1.0" 15 | } 16 | 17 | provider "vault" { 18 | skip_child_token = var.cfg.secret.vault.skip_child_token 19 | } 20 | 21 | module "vault" { 22 | source = "./vault" 23 | count = var.cfg.secret.manager == "vault" ? 1 : 0 24 | cfg = var.cfg 25 | } 26 | -------------------------------------------------------------------------------- /modules/secrets/vault.tf: -------------------------------------------------------------------------------- 1 | vault-off.t-f -------------------------------------------------------------------------------- /modules/secrets/vault/main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | locals { 8 | exclude_keys = [ 9 | "raw_secret", 10 | ] 11 | secrets = { 12 | for k, v in var.cfg.secret.secrets : k => merge( 13 | { 14 | for k2, v2 in v : k2 => v2 if !contains(local.exclude_keys, k2) 15 | }, 16 | { 17 | secret = data.vault_kv_secret_v2.vault_secret[k].data[v.field] 18 | } 19 | ) 20 | } 21 | } 22 | 23 | # Note we're using the v2 version of the key value secret engine 24 | # https://developer.hashicorp.com/vault/docs/secrets/kv/kv-v2 25 | data "vault_kv_secret_v2" "vault_secret" { 26 | for_each = tomap(var.cfg.secret.secrets) 27 | mount = var.cfg.secret.vault.kv_secret_v2_mount 28 | name = each.value.path 29 | } 30 | -------------------------------------------------------------------------------- /modules/secrets/vault/output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "secrets" { 8 | value = local.secrets 9 | sensitive = true 10 | } 11 | -------------------------------------------------------------------------------- /modules/secrets/vault/terraform.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | vault = { 10 | source = "hashicorp/vault" 11 | } 12 | } 13 | required_version = ">= 1.1.0" 14 | } 15 | -------------------------------------------------------------------------------- /modules/secrets/vault/variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | variable "cfg" { 8 | type = any 9 | description = "YAML configuration of the CML deployment" 10 | } 11 | -------------------------------------------------------------------------------- /output.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | output "cml2info" { 8 | value = { 9 | "address" : module.deploy.public_ip 10 | "del" : nonsensitive("ssh -p1122 ${local.cfg.secrets.sys.username}@${module.deploy.public_ip} /provision/del.sh") 11 | "url" : "https://${module.deploy.public_ip}" 12 | "version" : module.ready.state.version 13 | } 14 | } 15 | 16 | output "cml2secrets" { 17 | value = local.cfg.secrets 18 | sensitive = true 19 | } 20 | -------------------------------------------------------------------------------- /prepare.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | rem 3 | rem This file is part of Cisco Modeling Labs 4 | rem Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | rem All rights reserved. 6 | rem 7 | 8 | goto start 9 | 10 | :ask_yes_no 11 | set /p "answer=%~1 (yes/no): " 12 | set "answer=%answer:~0,1%" 13 | if /i "%answer%"=="y" ( 14 | exit /b 1 15 | ) else if /i "%answer%"=="n" ( 16 | exit /b 0 17 | ) else ( 18 | echo Please answer yes or no. 19 | goto :ask_yes_no 20 | ) 21 | 22 | :start 23 | cd modules\deploy 24 | 25 | call :ask_yes_no "Cloud - Enable AWS?" 26 | if errorlevel 1 ( 27 | echo Enabling AWS. 28 | copy aws-on.t-f aws.tf 29 | ) else ( 30 | echo Disabling AWS. 31 | copy aws-off.t-f aws.tf 32 | ) 33 | 34 | call :ask_yes_no "Cloud - Enable Azure?" 35 | if errorlevel 1 ( 36 | echo Enabling Azure. 37 | copy azure-on.t-f azure.tf 38 | ) else ( 39 | echo Disabling Azure. 40 | copy azure-off.t-f azure.tf 41 | ) 42 | 43 | cd ..\.. 44 | cd modules\secrets 45 | 46 | call :ask_yes_no "External Secrets Manager - Enable Conjur?" 47 | if errorlevel 1 ( 48 | echo Enabling Conjur. 49 | copy conjur-on.t-f conjur.tf 50 | ) else ( 51 | echo Disabling Conjur. 52 | copy conjur-off.t-f conjur.tf 53 | ) 54 | call :ask_yes_no "External Secrets Manager - Enable Vault?" 55 | if errorlevel 1 ( 56 | echo Enabling Vault. 57 | copy vault-on.t-f vault.tf 58 | ) else ( 59 | echo Disabling Vault. 60 | copy vault-off.t-f vault.tf 61 | ) 62 | -------------------------------------------------------------------------------- /prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This file is part of Cisco Modeling Labs 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | 8 | cd $(dirname $0) 9 | 10 | ask_yes_no() { 11 | while true; do 12 | read -p "$1 (yes/no): " answer 13 | answer=$(echo "$answer" | tr '[:upper:]' '[:lower:]') 14 | case $answer in 15 | yes | y | true | 1) 16 | return 0 17 | ;; 18 | no | n | false | 0) 19 | return 1 20 | ;; 21 | *) 22 | echo "Please answer yes or no." 23 | ;; 24 | esac 25 | done 26 | } 27 | 28 | cd modules/deploy 29 | 30 | if ask_yes_no "Cloud - Enable AWS?"; then 31 | echo "Enabling AWS." 32 | rm aws.tf 33 | ln -s aws-on.t-f aws.tf 34 | else 35 | echo "Disabling AWS." 36 | rm aws.tf 37 | ln -s aws-off.t-f aws.tf 38 | fi 39 | if ask_yes_no "Cloud - Enable Azure?"; then 40 | echo "Enabling Azure." 41 | rm azure.tf 42 | ln -s azure-on.t-f azure.tf 43 | else 44 | echo "Disabling Azure." 45 | rm azure.tf 46 | ln -s azure-off.t-f azure.tf 47 | fi 48 | 49 | cd ../.. 50 | cd modules/secrets 51 | 52 | if ask_yes_no "External Secrets Manager - Enable CyberArk Conjur?"; then 53 | echo "Enabling CyberArk Conjur." 54 | rm conjur.tf || true 55 | ln -s conjur-on.t-f conjur.tf 56 | else 57 | echo "Disabling CyberArk Conjur." 58 | rm conjur.tf || true 59 | ln -s conjur-off.t-f conjur.tf 60 | fi 61 | if ask_yes_no "External Secrets Manager - Enable Hashicorp Vault?"; then 62 | echo "Enabling Hashicorp Vault." 63 | rm vault.tf || true 64 | ln -s vault-on.t-f vault.tf 65 | else 66 | echo "Disabling Hashicorp Vault." 67 | rm vault.tf || true 68 | ln -s vault-off.t-f vault.tf 69 | fi 70 | -------------------------------------------------------------------------------- /terraform.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | terraform { 8 | required_providers { 9 | cml2 = { 10 | source = "CiscoDevNet/cml2" 11 | version = ">=0.6.2" 12 | } 13 | } 14 | required_version = ">= 1.1.0" 15 | } 16 | -------------------------------------------------------------------------------- /upload-images-to-aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 5 | # All rights reserved. 6 | # 7 | # This script can be installed on an on-prem CML controller which also has the 8 | # required reference platform images and definitions. 9 | # 10 | # In addition to standard tools already installed on the controller, the AWS CLI 11 | # utility must be installed and configured. For configuration, the access key 12 | # and secret must be known. Then, run "aws configure" to provide these. 13 | # 14 | # Alternatively, they can be provided via environment variables: 15 | # AWS_ACCESS_KEY_ID=ABCD AWS_SECRET_ACCESS_KEY=EF1234 aws ec2 describe-instances 16 | # 17 | 18 | DEFAULT_BUCKET="aws-cml-images" 19 | 20 | BUCKETNAME=${1:-$DEFAULT_BUCKET} 21 | ISO=${2:-/var/lib/libvirt/images} 22 | PKG=${3:-cml2_*.pkg} 23 | 24 | function help() { 25 | cmd=$(basename $0) 26 | cat </dev/null virl-base-images 94 | options=$(find . -name '*.yaml' -exec sh -c 'basename '{}'; echo "on"' \;) 95 | popd &>/dev/null 96 | 97 | if [ -z "$options" ]; then 98 | echo "there's apparently no images in the directory specified ($ISO)" 99 | echo "please ensure that there's at least one image and node definition" 100 | exit 255 101 | fi 102 | 103 | selection=$( 104 | dialog --stdout --no-items --separate-output --checklist \ 105 | "Select images to copy to AWS bucket \"${BUCKETNAME}\"" 0 60 20 $options 106 | ) 107 | s=$? 108 | clear 109 | if [ $s -eq 255 ]; then 110 | echo "reference platform image upload aborted..." 111 | exit 255 112 | fi 113 | 114 | declare -A nodedefs 115 | for imagedef in $selection; do 116 | fullpath=$(find $ISO -name $imagedef) 117 | defname=$(sed -nE '/^node_definition/s/^.*:(\s+)?(\S+)$/\2/p' $fullpath) 118 | nodedefs[$defname]="1" 119 | done 120 | 121 | if [ -n "$cmlpkg" ]; then 122 | dialog --progressbox "Upload software package to bucket" 20 70 < <( 123 | aws s3 cp $cmlpkg s3://${BUCKETNAME}/ 124 | ) 125 | fi 126 | 127 | target="s3://${BUCKETNAME}/refplat" 128 | 129 | dialog --progressbox "Upload node definitions to bucket" 20 70 < <( 130 | for nodedef in ${!nodedefs[@]}; do 131 | fname=$(grep -l $ISO/node-definitions/* -Ee "^id:(\s+)?${nodedef}$") 132 | aws s3 cp $fname $target/node-definitions/ 133 | s=$? 134 | if [ $s -ne 0 ]; then 135 | clear 136 | echo "An error occured during node definition upload, exiting..." 137 | exit 255 138 | fi 139 | done 140 | ) 141 | 142 | dialog --progressbox "Upload images to bucket" 20 70 < <( 143 | for imagedef in $selection; do 144 | imagedir=$(find $ISO -name $imagedef | xargs dirname) 145 | # https://www.linuxjournal.com/article/8919 146 | # ${imagedir <-- from variable imagedir 147 | # ## <-- greedy front trim 148 | # * <-- matches anything 149 | # / <-- until the last '/' 150 | # } 151 | aws s3 cp --recursive $imagedir $target/virl-base-images/${imagedir##*/} 152 | s=$? 153 | if [ $s -ne 0 ]; then 154 | clear 155 | echo "An error occured during image upload, exiting..." 156 | exit 255 157 | fi 158 | done 159 | ) 160 | 161 | clear 162 | echo "done!" 163 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # 2 | # This file is part of Cisco Modeling Labs 3 | # Copyright (c) 2019-2025, Cisco Systems, Inc. 4 | # All rights reserved. 5 | # 6 | 7 | # Common variables 8 | 9 | variable "cfg_file" { 10 | type = string 11 | description = "Name of the YAML config file to use" 12 | default = "config.yml" 13 | } 14 | 15 | variable "cfg_extra_vars" { 16 | type = string 17 | description = "extra variable definitions, typically empty" 18 | default = null 19 | } 20 | 21 | # AWS related vars 22 | 23 | variable "aws_access_key" { 24 | type = string 25 | description = "AWS access key / credential for the provisioning user" 26 | default = "notset" 27 | } 28 | 29 | variable "aws_secret_key" { 30 | type = string 31 | description = "AWS secret key matching the access key" 32 | default = "notset" 33 | } 34 | 35 | # Azure related vars 36 | 37 | variable "azure_subscription_id" { 38 | type = string 39 | description = "Azure subscription ID" 40 | default = "notset" 41 | } 42 | 43 | variable "azure_tenant_id" { 44 | type = string 45 | description = "Azure tenant ID" 46 | default = "notset" 47 | } 48 | --------------------------------------------------------------------------------