├── .gitattributes
├── .gitignore
├── .lfsconfig
├── Dockerfile
└── Dockerfile
├── LICENSE
├── README.md
├── docs
├── 1-add-networking.png
├── 2-virtualmac-portgroup.png
├── 3-standard-switch.png
├── 4-no-adapter.png
├── 5-submit.png
├── add-vm-network.md
├── docker-compose.yml
├── faqs.md
├── nats-transient-network.png
├── nsx-t-gen-Part1.mp4
├── nsx-t-gen-Part2.mp4
├── nsx-t-gen-jobs.png
├── nsx-v-staticrouting.png
└── static-routing-setup.md
├── functions
├── check_null_variables.sh
├── copy_ovas.sh
├── create_ansible_cfg.sh
├── create_answerfile.sh
├── create_extra_yaml_args.sh
├── create_hosts.sh
├── delete_vm_using_govc.sh
├── deploy_ova_using_govc.sh
├── uninstall-nsx-t-v2.1-vibs.sh
├── uninstall-nsx-t-v2.2-vibs.sh
└── uninstall-nsx-vibs.yml
├── pipelines
├── blobstore-upload-nsx-t-install-v2.1.yml
├── nsx-t-for-canned-pks-params.yml
├── nsx-t-for-pks-params.yml
├── nsx-t-install.yml
├── offline-nsx-t-install-v2.1.yml
├── sample-params.yml
└── user-inputs-for-canned-pks.yml
├── python
├── client.py
├── mobclient.py
├── nsx_t_gen.py
├── nsx_t_status.py
├── nsx_t_wipe.py
└── yaml2json.py
└── tasks
├── add-nsx-t-routers
├── task.sh
└── task.yml
├── config-nsx-t-extras
├── task.sh
└── task.yml
├── install-nsx-t
├── copy_and_customize_ovas.sh
├── task.sh
└── task.yml
└── uninstall-nsx-t
├── task.sh
└── task.yml
/.gitattributes:
--------------------------------------------------------------------------------
1 | docs/nsx-t-gen-Part1.mp4 filter=lfs diff=lfs merge=lfs -text
2 | docs/nsx-t-gen-Part2.mp4 filter=lfs diff=lfs merge=lfs -text
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Compiled source #
2 | ###################
3 | *.com
4 | *.class
5 | *.dll
6 | *.exe
7 | *.o
8 | *.so
9 | *.pyc
10 |
--------------------------------------------------------------------------------
/.lfsconfig:
--------------------------------------------------------------------------------
1 | [lfs]
2 | url = https://github.com/sparameswaran/nsx-t-gen/info/lfs
3 | fetchexclude="*"
4 |
--------------------------------------------------------------------------------
/Dockerfile/Dockerfile:
--------------------------------------------------------------------------------
1 | # Meant for support nsx-t v2.1
2 | FROM ubuntu:17.10
3 | COPY ./ ./
4 |
5 | #RUN apt-get update && apt-get install -y vim curl wget default-jdk maven gradle golang git jq python ruby-dev python-pip python-dev libffi-dev libssl-dev libxml2-dev libxslt1-dev zlib1g-dev sshpass
6 |
7 | RUN apt-get update \
8 | && apt-get install -y \
9 | vim \
10 | curl \
11 | wget \
12 | golang \
13 | git \
14 | jq \
15 | python \
16 | ruby-dev \
17 | python-pip \
18 | python-dev \
19 | libffi-dev \
20 | libssl-dev \
21 | libxml2 \
22 | libxml2-dev \
23 | libxslt1-dev \
24 | zlib1g-dev \
25 | sshpass \
26 | openssl \
27 | libssl-dev \
28 | libffi-dev \
29 | python-dev \
30 | build-essential
31 |
32 | RUN pip install --upgrade pip
33 | RUN pip install \
34 | pyVim \
35 | pyvmomi \
36 | six \
37 | pyquery \
38 | xmltodict \
39 | ipcalc \
40 | click \
41 | Jinja2 \
42 | shyaml \
43 | dicttoxml \
44 | pprint \
45 | PyYAML \
46 | requests \
47 | && pip install --upgrade \
48 | wheel \
49 | setuptools \
50 | lxml \
51 | enum \
52 | cffi \
53 | cryptography \
54 | enum34 \
55 | pyasn1==0.4.1 \
56 | && pip uninstall -y enum
57 |
58 | # Add ansible support
59 | RUN apt-get update \
60 | && apt-get install -y software-properties-common \
61 | && apt-add-repository -y ppa:ansible/ansible \
62 | && apt-get update \
63 | && apt-get install -y ansible
64 |
65 |
66 | # Add ovftool
67 | #COPY ./VMware-ovftool-4.2.0-5965791-lin.x86_64.bundle .
68 | #RUN chmod +x ./VMware-ovftool-4.2.0-5965791-lin.x86_64.bundle
69 | #RUN ./VMware-ovftool-4.2.0-5965791-lin.x86_64.bundle --eulas-agreed
70 |
71 |
72 | # Add nsx-t python sdk and runtime libraries
73 | COPY ./nsx_python_sdk-*.whl .
74 | COPY ./vapi_runtime-*.whl .
75 | COPY ./vapi_common-*.whl .
76 | COPY ./vapi_common_client-*.whl .
77 | RUN pip install nsx_python_sdk-*.whl \
78 | vapi_runtime-*.whl \
79 | vapi_common-*.whl \
80 | vapi_common_client-*.whl
81 |
82 | # Overwrite the pyopenssl 0.15.1 with 17.5.0 as ansible breaks otherwise
83 | RUN pip install -U pyopenssl==17.5.0
84 |
85 | # Include govc, build using golang-1.8
86 | ENV GOPATH="/root/go" PATH="$PATH:/root/go/bin"
87 | RUN mkdir -p /root/go/src /root/go/bin /root/go/pkg \
88 | && go get -u github.com/vmware/govmomi/govc \
89 | && cp /root/go/bin/* /usr/bin/ \
90 | && cp /root/go/bin/* /usr/local/bin/
91 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # nsx-t-gen
2 | Concourse pipeline to install NSX-T v2.x
3 |
4 | # DEPRECATED
5 |
6 | This pipeline is not supported or maintained going forward as of Oct 17th, 2018. This was a best-effort, unofficial, unsupported work at best. All development work has now stopped.
7 |
8 | Kindly use the [VMware's nsx-t-datacenter-pipelines](https://github.com/vmware/nsx-t-datacenter-ci-pipelines) which contains fork of this repository code (a bit older) that would be supported and maintained by VMware.
9 |
10 |
11 | # Details
12 |
13 | The concourse pipeline uses [ansible scripts](https://github.com/yasensim/nsxt-ansible) created by Yasen Simeonov and [forked](https://github.com/sparameswaran/nsxt-ansible) by the author of this pipeline.
14 |
15 | There is an associated blog post detailing the features, options here: [Introducing nsx-t-gen: Automating NSX-T Install with Concourse](https://allthingsmdw.blogspot.com/2018/05/introducing-nsx-t-gen-automating-nsx-t.html)
16 |
17 | Recommending checking the [FAQs](./docs/faqs.md) for full details on handling various issues/configurations before starting the install.
18 |
19 | Things handled by the pipeline:
20 |
21 | * Deploy the VMware NSX-T Manager, Controller and Edge ova images
22 | * Configure the Controller cluster and add it to the management plane
23 | * Configure hostswitches, profiles, transport zones
24 | * Configure the Edges and ESXi Hosts to be part of the Fabric
25 | * Create T0 Router (one per run, in HA vip mode) with uplink and static route
26 | * Configure arbitrary set of T1 Routers with logical switches and ports
27 | * NAT Rules setup for T0 Router
28 | * Container IP Pools and External IP Blocks
29 | * Self-signed cert generation and registration against NSX-T Manager
30 | * Route redistribution for T0 Router
31 | * HA Spoofguard Switching Profile
32 | * Load Balancer (with virtual servers and server pool) creation
33 | * Security Group creation and association with the server pools to handle dynamic membership
34 |
35 | Not handled by pipeline:
36 |
37 | * BGP or Static Route setup (outside of NSX-T) for T0 Routers
38 |
39 | Pipeline
40 |

41 |
42 | Users can choose to run the full install or portions of the install (base install to bring up the Fabric and Mgmt, Config routers and extras separately )
43 |
44 | ## Canned-pks
45 | For users looking at installing NSX-T in fully isolated or offline environments, please check the [canned-pks](https://github.com/pivotalservices/canned-pks) on how to use an offline version of this pipeline that would use cached/offlined copies of various dependencies (docker images, pipeline sources, ova or other install bits along with the offline version of the pipeline itself). The canned-pks install takes a very opinionated view of install (only single compute cluster supported vs any number of compute clusters in nsx-t-gen, pre-determined set of parameters for things that are not specific to env etc.)
46 |
47 | ## Note
48 | To install `NSX-T v2.2`, use the `nsxt-2.2` branch of pipeline templates of this repo. Similarly, use `nsxt-2.3` for installing `NSX-T v2.3`.
49 |
50 | ## Warning
51 | This is purely a work-in-progress and not officially supported by anyone. Use caution while using it at your own Risk!!.
52 |
53 | Also, NSX-T cannot co-reside on the same ESXi Host & Cluster as one already running NSX-V. So, ensure you are either using a different set of vCenter, Clusters and hosts or atleast the cluster that does not have NSX-V. Also, the ESXi hosts should be atleast 6.5. Please refer to NSX-T Documentation for detailed set of requirements for NSX-T.
54 |
55 | ## Pre-reqs
56 | * Concourse setup
57 | - If using [docker-compose to bring up local Concourse](https://github.com/concourse/concourse-docker) and there is a web proxy, make sure to specify the proxy server and dns details following the template provided in [docs/docker-compose.yml](docs/docker-compose.yml)
58 | - If the webserver & the ova images are not still reachable from concourse without a proxy in middle, check if ubuntu firewall got enabled. This can happen if you used concourse directly as well as docker-compose. In that case, either relax the iptable rules or allow routed in ufw or just disable it:
59 | ```
60 | sudo ufw allow 8080
61 | sudo ufw default allow routed
62 | ```
63 | * There should be atleast one free vmnic on each of the ESXi hosts
64 | * Ovftool would fail to deploy the Edge VMs in the absence of `VM Network` or standard switch (non NSX-T) with `Host did not have any virtual network defined` error message. So, ensure presence of either one.
65 | Refer to [Adding *VM Network*](./docs/add-vm-network.md) for detailed instructions.
66 | * Docker hub connectivity to pull docker image for the concourse pipeline
67 | * NSX-T 2.1 ova images and ovftool install bits for linux
68 | * Web server to serve the NSX-T ova images and ovftool
69 | ```
70 | # Sample nginx server to host bits
71 | sudo apt-get nginx
72 | cp <*ova> /var/www/html
73 | # Edit nginx config and start
74 | ```
75 | * vCenter Access
76 | * SSH enabled on the Hosts
77 |
78 | ## Offline envs
79 | This is only applicable if the docker image `nsxedgegen/nsx-t-gen-worker:` is unavailable or env is restricted to offline.
80 |
81 | * Download and copy the VMware ovftool install bundle (linux 64-bit version) along with nsx-t python modules (including vapi_common, vapi_runtime, vapi_common_client libs based on version of nsx-t) and copy that into the Dockerfile folder
82 | * Create and push the docker image using
83 | ```
84 | docker build -t nsx-t-gen-worker Dockerfile
85 | # To test image: docker run --rm -it nsx-t-gen-worker bash
86 | docker tag nsx-t-gen-worker nsxedgegen/nsx-t-gen-worker:latest
87 | docker push nsxedgegen/nsx-t-gen-worker:latest
88 | ```
89 |
90 |
91 | ## VMware NSX-T 2.* bits
92 |
93 | Download and make the following bits available on a webserver so it can be used by pipeline to install the NSX-T 2.x bits:
94 |
95 | ```
96 | # Download NSX-T 2.1 bits from
97 | # https://my.vmware.com/group/vmware/details?downloadGroup=NSX-T-210&productId=673
98 |
99 | #nsx-mgr-ova
100 | nsx-unified-appliance-2.1.0.0.0.7380167.ova
101 |
102 | #nsx-ctrl-ova
103 | nsx-controller-2.1.0.0.0.7395493.ova
104 |
105 | #nsx-edge-ova
106 | nsx-edge-2.1.0.0.0.7395502.ova
107 |
108 | # Download VMware ovftool from https://my.vmware.com/group/vmware/details?productId=614&downloadGroup=OVFTOOL420#
109 | VMware-ovftool-4.2.0-5965791-lin.x86_64.bundle
110 | ```
111 |
112 | Edit the pipelines/nsx-t-install.yml with the correct webserver endpoint and path to the files.
113 |
114 | ## Register with concourse
115 | Use the sample params template file (under pipelines) to fill in the nsx-t, vsphere and other configuration details.
116 | Register the pipeline and params against concourse.
117 |
118 | ## Sample setup
119 | Copy over the sample params as nsx-t-params.yml and then use following script to register the pipeline (after editing the concourse endpoint, target etc.)
120 |
121 | ```
122 | #!/bin/bash
123 |
124 | # EDIT names and domain
125 | CONCOURSE_ENDPOINT=concourse.corp.local.com
126 | CONCOURSE_TARGET=nsx-concourse
127 | PIPELINE_NAME=install-nsx-t
128 |
129 | alias fly-s="fly -t $CONCOURSE_TARGET set-pipeline -p $PIPELINE_NAME -c pipelines/nsx-t-install.yml -l nsx-t-params.yml"
130 | alias fly-l="fly -t $CONCOURSE_TARGET containers | grep $PIPELINE_NAME"
131 | alias fly-h="fly -t $CONCOURSE_TARGET hijack -b "
132 |
133 | echo "Concourse target set to $CONCOURSE_ENDPOINT"
134 | echo "Login using fly"
135 | echo ""
136 |
137 | fly --target $CONCOURSE_TARGET login --insecure --concourse-url https://${CONCOURSE_ENDPOINT} -n main
138 |
139 | ```
140 | After registering the pipeline, unpause the pipeline before kicking off any job group
141 |
142 | ## Video Recording of Pipeline Execution
143 |
144 | Follow the two part video for more details on the steps and usage of the pipeline:
145 | * [Part 1](docs/nsx-t-gen-Part1.mp4) - Install of OVAs and bringing up VMs
146 | * [Part 2](docs/nsx-t-gen-Part1.mp4) - Rest of install and config
147 |
148 | ## Options to run
149 | * Run the full-install-nsx-t group for full deployment of ova's followed by configuration of routers and nat rules.
150 |
151 | * Run the smaller independent group:
152 | > `base-install` for just deployment of ovas and control management plan.
153 | This uses ansible scripts under the covers.
154 |
155 | > `add-routers` for creation of the various transport zones, nodes, hostswitches and T0/T1 Routers with Logical switches. This also uses ansible scripts under the covers.
156 |
157 | > `config-nsx-t-extras` for adding nat rules, route redistribution, HA Switching Profile, Self-signed certs. This particular job is currently done via direct api calls and does not use Ansible scripts.
158 |
--------------------------------------------------------------------------------
/docs/1-add-networking.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/1-add-networking.png
--------------------------------------------------------------------------------
/docs/2-virtualmac-portgroup.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/2-virtualmac-portgroup.png
--------------------------------------------------------------------------------
/docs/3-standard-switch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/3-standard-switch.png
--------------------------------------------------------------------------------
/docs/4-no-adapter.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/4-no-adapter.png
--------------------------------------------------------------------------------
/docs/5-submit.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/5-submit.png
--------------------------------------------------------------------------------
/docs/add-vm-network.md:
--------------------------------------------------------------------------------
1 | ## Adding *VM Network*
2 |
3 | Thanks to Niran Even Chen, [@NiranEC](https://twitter.com/NiranEC), for the detailed steps.
4 |
5 | The ESXi servers where the OVAs get deployed is required to have a default `VM Network` port group.
6 | This is due to a bug with the Ovftool that the pipeline is utilizing. Users would get an error about `Host did not have any virtual network defined` and ovftool would fail to deploy the NSX Edges on the NSX-T management plane.
7 |
8 | If you don’t have the `VM Network`, set up a VSS port group as detailed below.
9 |
10 | * Create a “VM network” VSS port group in each server in the management cluster, no NICS are required to be attached to that.
11 | * In the vCenter client highlight the ESXi server and click Configure -> Virtual Switches -> Add networking:
12 | 
13 | * Select `Virtual Machine Port Group for a Standard Switch`
14 | 
15 | * Select `New Standard Switch`
16 | 
17 | * No Adapters are needed. Click Next.
18 | 
19 | * Click next to have `VM Network` port group
20 | * Submit Changes
21 | 
22 |
23 | * Repeat steps for all the other ESXi hosts that need to be part of the transport nodes.
--------------------------------------------------------------------------------
/docs/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # Original source: https://github.com/concourse/concourse-docker
2 | # Update or sync up with latest version
3 | # Has additional flags for Web Proxy and DNS settings
4 |
5 | version: '3'
6 |
7 | services:
8 | # purely for testing proxy settings of docker using ubuntu docker image
9 | #ubuntu:
10 | # image: ubuntu:17.10
11 | # command: sleep 600000
12 |
13 | concourse-db:
14 | image: postgres
15 | environment:
16 | - POSTGRES_DB=concourse
17 | - POSTGRES_PASSWORD=concourse_pass
18 | - POSTGRES_USER=concourse_user
19 | - PGDATA=/database
20 |
21 | concourse-web:
22 | image: concourse/concourse
23 | command: web
24 | links: [concourse-db]
25 | depends_on: [concourse-db]
26 | ports: ["8080:8080"] # EDIT if necessary
27 | volumes: ["./keys/web:/concourse-keys"]
28 | environment:
29 | - CONCOURSE_POSTGRES_HOST=concourse-db
30 | - CONCOURSE_POSTGRES_USER=concourse_user
31 | - CONCOURSE_POSTGRES_PASSWORD=concourse_pass
32 | - CONCOURSE_POSTGRES_DATABASE=concourse
33 | - CONCOURSE_EXTERNAL_URL= # EDIT ME
34 | - CONCOURSE_BASIC_AUTH_USERNAME=concourse
35 | - CONCOURSE_BASIC_AUTH_PASSWORD=concourse
36 | - CONCOURSE_NO_REALLY_I_DONT_WANT_ANY_AUTH
37 | - CONCOURSE_GARDEN_DNS_PROXY_ENABLE=true
38 | - CONCOURSE_WORKER_GARDEN_DNS_PROXY_ENABLE=true
39 | - CONCOURSE_BAGGAGECLAIM_DRIVER=overlay
40 |
41 | # Edit dns server for CONCOURSE_GARDEN_DNS_SERVER
42 | # Edit the no_proxy to your env to allow direct access
43 | # like the webserver hosting the ova bits.
44 | # Ensure there are no quotes or spaces in the values
45 |
46 | concourse-worker:
47 | image: concourse/concourse
48 | command: worker
49 | privileged: true
50 | links: [concourse-web]
51 | depends_on: [concourse-web]
52 | volumes:
53 | - "./keys/worker:/concourse-keys"
54 | environment:
55 | - CONCOURSE_TSA_HOST=concourse-web:2222
56 | - CONCOURSE_GARDEN_NETWORK
57 | - CONCOURSE_GARDEN_DNS_PROXY_ENABLE=true
58 | - CONCOURSE_WORKER_GARDEN_DNS_PROXY_ENABLE=true
59 | - CONCOURSE_GARDEN_DNS_SERVER= # EDIT ME
60 | - CONCOURSE_BAGGAGECLAIM_DRIVER=overlay
61 | # Fill details below if env uses a web proxy
62 | # Ensure there are no quotes or spaces in the values
63 | - http_proxy_url= # EDIT ME - sample: http://192.168.10.5:3128/
64 | - https_proxy_url= # EDIT ME - sample: http://192.168.10.5:3128/
65 | - no_proxy= # EDIT ME - sample: localhost,127.0.0.1,WEBSERVER-IP,8.8.8.8,10.193.99.2
66 | - HTTP_PROXY= # EDIT ME - sample: http://192.168.10.5:3128/
67 | - HTTPS_PROXY= # EDIT ME - sample: http://192.168.10.5:3128/
68 | - NO_PROXY= # EDIT ME - sample: localhost,127.0.0.1,WEBSERVER-IP,8.8.8.8,10.193.99.2
69 |
--------------------------------------------------------------------------------
/docs/faqs.md:
--------------------------------------------------------------------------------
1 | ## FAQs
2 | * Basics of [Concourse](https://concourse-ci.org/)
3 | * Basics of running [Concourse using docker-compose](https://github.com/concourse/concourse-docker)
4 | * Basics of the pipeline functioning
5 | * Check the blog post: [ Introducing nsx-t-gen: Automating NSX-T Install with Concourse](https://allthingsmdw.blogspot.com/2018/05/introducing-nsx-t-gen-automating-nsx-t.html)
6 | * `Adding additional edges after first install`.
7 | * Recommend planning ahead of time and creating the edges all in the beginning rather than adding them later.
8 | * If its really required, recommend manually installing any additional edges using direct deployment of OVAs while ensuring the names are following previously installed edge instance name convention (like nsx-t-edge-0?), then update the parameters to specify the additional edge ips (assuming they use the same edge naming convention) and let the controller (as part of the base-install or just full-install) to do a rejoin of the edges followed by other jobs/tasks. Only recommended for advanced users who are ready to drill down/debug.
9 | * Downloading the bits
10 | * Download NSX-T 2.1 bits from
11 | https://my.vmware.com/group/vmware/details?downloadGroup=NSX-T-210&productId=673
12 | Check https://my.vmware.com for link to new installs
13 | * Download [VMware-ovftool-4.2.0-5965791-lin.x86_64.bundle v4.2](https://my.vmware.com/group/vmware/details?productId=614&downloadGroup=OVFTOOL420#)
14 |
15 | Ensure ovftool version is 4.2. Older 4.0 has issues with deploying ova images.
16 | * Installing Webserver
17 | Install nginx and copy the bits to be served
18 | ```
19 | # Sample nginx server to host bits
20 | sudo apt-get nginx
21 | cp <*ova> /var/www/html
22 | # Edit nginx config and start
23 | ```
24 | * Unable to reach the webserver hosting the ova bits
25 | * Check for a web proxy interfering with the concourse containers.
26 | If using docker-compose, use the sample [docker-compose](./docker-compose.yml) template to add DNS and proxy settings. Add the webserver to the no_proxy list.
27 |
28 | Ensure you are using docker-compose version `1.18+` and docker compose file version is `3`
29 |
30 | Check with docker documentation on specifying proxies: https://docs.docker.com/network/proxy/
31 |
32 | Ensure the `/etc/systemd/system/docker.service.d/http-proxy.conf` specifies the HTTP_PROXY and HTTPS_PROXY env variables so docker can go out via the proxy.
33 | ```
34 | [Service]
35 | Environment="HTTP_PROXY=http://proxy.corp.local" # EDIT the proxy
36 | Environment="HTTPS_PROXY=http://proxy.corp.local" # EDIT the proxy
37 | Environment="NO_PROXY=localhost,127.0.0.1,"
38 | ```
39 |
40 | Stop the docker service, reload the daemons and then start back the docker service
41 | ```
42 | systemctl stop docker
43 | systemctl daemon-reload # to reload the docker service config
44 | systemctl start docker
45 | ```
46 |
47 | Or use the ~/.docker/config.json approach to specify the proxy.
48 |
49 | * Disable ubuntu firewall (ufw) or relax iptables rules if there was usage of both docker concourse and docker-compose.
50 | Change ufw
51 | ```
52 | sudo ufw allow 8080
53 | sudo ufw default allow routed
54 | ```
55 | or relax iptables rules
56 | ```
57 | sudo iptables -P INPUT ACCEPT
58 | sudo iptables -P FORWARD ACCEPT
59 | sudo iptables -P OUTPUT ACCEPT
60 | ```
61 |
62 | * If running out of disk space with docker compose, use `docker volume prune` command to clean up unused volumes.
63 |
64 | * If things are still not reachable to outside (like reaching the github repos or webserver), try to add an additional docker image to run alongside concourse like an vanilla ubuntu image for debug purpose and shell into it, then try to run a curl to outside after updating apt-get and installing curl.
65 |
66 | Sample entry for adding ubuntu docker container image to docker-compose.yml.
67 | ```
68 | services:
69 | # new ubuntu docker image
70 | ubuntu:
71 | image: ubuntu:17.10
72 | command: sleep 600000
73 |
74 | concourse-web:
75 | .....
76 | ```
77 | Find the docker container for the ubuntu image using `docker ps`
78 | Then shell into it using `docker exec -it /bin/bash`
79 | Run following and see fi it can connect to outside via the proxy:
80 | ```
81 | apt-get update -y && apt-get install -y curl
82 | curl www.google.com
83 | ```
84 | If the above curl command works but concourse is still not able to go out, then check the various `CONCOURSE_*` env variables specified for the proxy and garden and dns settings.
85 |
86 | * Pipeline exits after reporting problem with ovas or ovftool
87 | * Verify the file names and paths are correct. If the download of the ovas by the pipeline at start was too fast, then it means errors with the files downloaded as each of the ova is upwards of 500 MB.
88 | * Running out of memory resources on vcenter
89 | * Turn off reservation
90 | ```
91 | nsx_t_keep_reservation: false # for POC or memory constrained setup
92 | ```
93 | * Install pipeline reports the VMs are unreachable after deployment of the OVAs and creation of the VMs.
94 | Sample output:
95 | ```
96 | Deployment of NSX Edge ova succcessfull!! Continuing with rest of configuration!!
97 | Rechecking the status and count of Mgr, Ctrl, Edge instances !!
98 | All VMs of type NSX Mgr up, total: 1
99 | All VMs of type NSX Controller up, total: 3
100 | All VMs of type NSX Edge down, total: 2
101 | Would deploy NSX Edge ovas
102 |
103 | Some problem with the VMs, one or more of the vms (mgr, controller, edge) failed to come up or not accessible!
104 | Check the related vms!!
105 | ```
106 | If the vms are correctly up but suspect its a timing issue, just rerun the pipeline task.
107 | This should detect the vms are up and no need for redeploying the ovas again and continue to where it left of earlier.
108 |
109 | If the vms appear to be not reachable over ssh and they are on same host, problem might be due to known issue: https://kb.vmware.com/s/article/2093588
110 | ```Deploying a high number of virtual machines at the same time results in the network adapter connection failure and reports the error: Failed to connect virtual device Ethernet0 (2093588)```
111 | Reboot the esxi host thats hosting the vms and rerun the pipeline.
112 | * Unable to deploy the Edge OVAs with error message: `Host did not have any virtual network defined`.
113 | * Refer to [add-vm-network](./add-vm-network.md)
114 | * Or deploy the ovas directly ensuring the name of the edge instances follows the naming convention (like nsx-t-edge-01)
115 | * Unable to add ESXi Hosts. Error: `FAILED - RETRYING: Check Fabric Node Status` with error during ssh connection to the hosts.
116 | * Empty the value for `esxi_hosts_config` and fill in `compute_vcenter_...` section in the parameter file.
117 | ```
118 | esxi_hosts_config: # Leave it blank
119 |
120 | # Fill following fields
121 | compute_vcenter_manager: # FILL ME - any name for the compute vcenter manager
122 | compute_vcenter_host: # FILL ME - Addr of the vcenter host
123 | compute_vcenter_usr: # FILL ME - Use Compute vCenter Esxi hosts as transport node
124 | compute_vcenter_pwd: # FILL ME - Use Compute vCenter Esxi hosts as transport node
125 | compute_vcenter_cluster: # FILL ME - Use Compute vCenter Esxi hosts as transport node
126 | ```
127 | Apply the new params using set-pipeline and then rerun the pipeline.
128 | * Error during adding ESXi Hosts as Fabric nodes.
129 | Error message: ```mpa_connectivity_status_details : Client has not responded to 2 consecutive heartbeats,```
130 | Check the NSX Manager Web UI and see if the Hosts got added as Fabric Nodes (under Fabric -> Hosts) after some delay.
131 | If the hosts now appear healthy and part of the Fabric on the NSX Mgr, then retry the add-routers job in concourse and it should proceed to the remaining steps.
132 | * Use different Compute Manager or ESXi hosts for Transport nodes compared vCenter used for NSX-T components
133 | * The main vcenter configs would be used for deploying the NSX Mgr, Controller and Edges.
134 | The ESXi Hosts for transport nodes can be on a different vcenter or compute manager. Use the compute_vcenter_... fields or esxi_hosts_config to add them as needed. Caution: If the NSX Edges are really on a completely different network compared to the Hosts, then its suboptimal as the Edge has to be the gateway for the overlay/tep network with the hosts.
135 | * Control/specify which Edges are used to host a given T0 Router.
136 | * Edit the edge_indexes section within T0Router definition to specify different edge instances.
137 | Index starts with 1 (would map to nsx-t-edge-01).
138 | ```
139 | nsx_t_t0router_spec: |
140 | t0_router:
141 | name: DefaultT0Router
142 | ha_mode: 'ACTIVE_STANDBY'
143 | # Specify the edges to be used for hosting the T0Router instance
144 | edge_indexes:
145 | # Index starts from 1 -> denoting nsx-t-edge-01
146 | primary: 1 # Index for primary edge to be used
147 | secondary: 2 # Index for secondary edge to be used
148 | vip: 10.13.12.103/27
149 | ....
150 | ```
151 | * Adding additional T1 Routers or Logical Switches
152 | * Modify the parameters to specify additional T1 routers or switches and rerun add-routers.
153 | * Adding additional T0 Routers
154 | * Only one T0 Router can be created during a run of the pipeline. But additional T0Routers can be added by modifying the parameters and rerunning the add-routers and config-nsx-t-extras jobs.
155 | * Create a new copy or edit the parameters to modify the T0Router definition (it should provide index reference to nsx-t edges thats not used actively or as backup by another T0 Router).
156 | * Edit T0Router references across T1 Routers as well as any tags that should be used to identify a specific T0Router.
157 | * Add or edit any additional ip blocks or pools, nats, lbrs
158 | * Register parameters with the pipeline
159 | * Rerun add-routers followed by config-nsx-t-extras job group
160 |
161 | * Static Routing for NSX-T T0 Router
162 | Please refer to the [Static Routing Setup](./static-routing-setup.md) for details on the static routing.
163 |
164 | * Errors with NAT rule application
165 | Sample error1: `[Routing] Service IPs are overlapping with logical router ports`
166 | Sample error2: `[Routing] NAT service IP(s) overlap with HA VIP subnet`
167 | If the external assigned ip used as a SNAT translated ip falls in the Router uplink port range (like T0 router is using /27 and the specified translated ip falls within the /27 range), then the above errors might get thrown. Restrict or limit the cidr range using something like /29 (configured in the T0 spec) that limits it to just 6 ips and use an external ip thats outside of this uplink router ip range as translated ip.
168 |
169 | Sample:
170 | ```
171 | nsx_t_t0router_spec: |
172 | t0_router:
173 | name: DefaultT0Router
174 | ...
175 | vip: 10.13.12.103/29 # T0 router vip - make sure this range does not intrude with the external vip ranges
176 | ip1: 10.13.12.101/29 # T0 router uplink ports - make sure this range does not intrude with the external vip ranges
177 | ip2: 10.13.12.102/29 # T0 router uplink ports - make sure this range does not intrude with the external vip ranges
178 | ```
179 | And external ip:
180 | ```
181 | nsx_t_external_ip_pool_spec: |
182 | external_ip_pools:
183 | - name: snat-vip-pool-for-pas
184 | cidr: 10.100.0.0/24 # Should be a 0/24 or some valid cidr, matching the external exposed uplink
185 | gateway: 10.100.0.1
186 | start: 10.100.0.31 # Should not include gateway, not overlap with the T0 router uplink ips; reserve some for Ops Mgr, LB Vips for GoRouter, SSH Proxy
187 | end: 10.100.0.200 # Should not include gateway, not overlap with the T0 router uplink ips
188 | # Specify tags with PAS 2.0 and NSX Tile 2.1.0
189 | ```
190 | And nat rule:
191 | ```
192 | nsx_t_nat_rules_spec: |
193 | nat_rules:
194 | # Sample entry for PAS Infra network SNAT - egress
195 | - t0_router: DefaultT0Router
196 | nat_type: snat
197 | source_network: 192.168.1.0/24 # PAS Infra network cidr
198 | translated_network: 10.100.0.12 # SNAT External Address for PAS networks, outside of the T0 uplink ip range
199 | rule_priority: 8000
200 | ```
201 |
--------------------------------------------------------------------------------
/docs/nats-transient-network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/nats-transient-network.png
--------------------------------------------------------------------------------
/docs/nsx-t-gen-Part1.mp4:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:d94936f56bfaf25df90e5b0ba4c0c74266d13c86a4db1a534f6de5f3fda168df
3 | size 19186063
4 |
--------------------------------------------------------------------------------
/docs/nsx-t-gen-Part2.mp4:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:c23c1113ae896a532bcd5923fe74db7131b4ddc698d9d466d776e22a7b82bce5
3 | size 42899155
4 |
--------------------------------------------------------------------------------
/docs/nsx-t-gen-jobs.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/nsx-t-gen-jobs.png
--------------------------------------------------------------------------------
/docs/nsx-v-staticrouting.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/sparameswaran/nsx-t-gen/3fc26baff9e8f8da1990432c5d200899b6d7e33a/docs/nsx-v-staticrouting.png
--------------------------------------------------------------------------------
/docs/static-routing-setup.md:
--------------------------------------------------------------------------------
1 | # Configuring Static Routes for NSX-T T0 Router
2 |
3 | The components running on NSX-T Fabric can be exposed to the external world via two options: BGP and Static Routes. BGP is not discussed in this doc.
4 |
5 | ## Exposing External IPs over the T0 Router using static routing
6 |
7 | The NSX-T T0 router should be gateway for all the deployed components that are using the T0 Router (like PAS or PKS or Ops Mgr). Setting up this portion of a static route differs based on the conditions for routing to the NSX-T T0 Router.
8 |
9 | ### Using Transient Routable networks to route over T0 Router
10 |
11 | If the client setup allows using any CIDR or routable subnet to be used (like a POD or some dedicated private network and not necessarily shared with others or can be used) that can be self-administered, then one can use a transient routable network to be used as the exposed external ips while keeping the actual ip pools separate. This can be the case with a private Ubiquiti Edge Router that acts as gateway to the entire NSX-T install and where we can add a new routable network.
12 |
13 | Sample step to include static routing in a VMware vPod Environment on the main vPod Router vm:
14 | ```
15 | post-up route add -net 10.208.40.0 netmask 255.255.255.0 gw 192.168.100.3
16 | pre-down route del -net 10.208.40.0 netmask 255.255.255.0 gw 192.168.100.3
17 | ```
18 |
19 | | T0 Router | T0 IP | Transient Routable subnet for External IP Pool | \# of IPs that can be exposed | Sample DNAT and SNAT |
20 | |-------------|---------------|-------------------|----|-----------------|
21 | | T0 Router 1 | 10.193.105.10 | Pool1 - 10.208.40.0/24 | 254| Map DNAT from 10.208.40.11 to be translated to Ops Mgr at 172.23.1.5; SNAT from internal network 172.23.1.0/24 to 10.208.40.10 for going out |
22 | | T0 Router 1 | 10.193.105.10 | Pool2 - 10.208.50.0/24 | 254 | Map DNAT from 10.208.50.11 to be translated to something internal at 172.23.2.10; SNAT from internal network 172.23.2..0/24 to 10.208.50.10 for going out |
23 | | T0 Router 2 | 10.193.105.15 | Pool3 - 10.208.60.0/24 | 254 | Map DNAT from 10.208.60.11 to be translated to something internal at 182.23.1.10; SNAT from internal network 182.23.1..0/24 to 10.208.60.10 for going out |
24 | | ......... | ....... | .......... | ...| ..... |
25 |
26 | Here the transient network only exists between the External Router and the connected T0 Router in case of the VMware vPod env. In other envs that are exposed, the transient subnet needs to be routable from outside. IPs from this transient network would be using the NAT configurations to reach things like the Ops Mgr internal private ip. Load balancers might use the IP from the External IP pool to expose a VIP.
27 |
28 | After deciding on the IPs that are already reserved for exposed components like Ops Mgr, PAS GoRouter, PAS SSH Proxy, Harbor, PKS Controller etc., allot or divide up the remaining IPs for the PAS and PKS external ip pools by tweaking the range of the external IP Pool.
29 |
30 | | Pool Name | CIDR | Start | End | Notes
31 | |-----------|----------------|---------------|-------------|-----------------|
32 | | PAS-Pool1 | 10.208.40.0/24 | 10.208.40.21 | 10.208.40.254 | Reserving first 20 IPs for Ops Mgr, PAS external facing components. Externally exposed PAS Apps would use from the PAS-Pool.
33 | | PKS-Pool2 | 10.208.50.0/24 | 10.208.50.21 | 10.208.50.254 | Reserving first 20 IPs for PKS Controller, Harbor external facing components. Rest can be used by the PKS Clusters.
34 |
35 | Key note: the transient network used for external ips and statically routed via the T0 Router should be in the routable (reachable) from its external clients.
36 |
37 | ### Using Same CIDR for T0 and External IP Pool
38 |
39 | If the T0 Router and the external ip pool need to share the same CIDR and no additional routable network can be used, then it requires careful planning to setup the routing of the externally exposed IPs via the T0 Router ip. This is applicable in setups where a /24 CIDR is allotted to the client to use and everything needs to be within that CIDR to be routable or exposed to outside as there can be several such similar setups in a big shared infrastructure.
40 |
41 | Sample Requirement: User allowed to only use a specific CIDR for exposing to outside. All IPs need to be in the 10.193.105.28/25 range. Things need to be routed via the T0 Router IP : 10.193.105.10.
42 |
43 | This requires a careful division of the subnet (here 10.193.105.0/24) into smaller subnets so a specific CIDR would be statically routed through the T0 router without overlapping against the IPs meant for the T0 Routers.
44 |
45 | Here, we are dividing 10.193.105.0/24 into 2 big subnets, with first half allotted for the T0 Router (the split can only in ranges of 2) and external IPs in second half 10.193.105.128-10.193.105.255 getting routed via the 10.193.105.10.
46 |
47 | | T0 Router | T0 IP | Subnet for external ip pool |
48 | \# of IPs that can be exposed |
49 | |-------------|---------------|-------------------|---------|
50 | | T0 Router 1 | 10.193.105.10 | Pool1 - 10.193.105.128/25 | 128 |
51 |
52 | If more than one pool needs to be exposed, then divide the subnet to make them smaller so they are all routed via the same T0 Router:
53 |
54 | | T0 Router | T0 IP | Subnet for external ip pools |
55 | \# of IPs that can be exposed |
56 | |-------------|---------------|-------------------|---------|
57 | | T0 Router 1 | 10.193.105.10 | Pool1 - 10.193.105.128/26 | 64 |
58 | | T0 Router 1 | 10.193.105.10 | Pool2 - 10.193.105.192/26 | 64 |
59 |
60 |
61 | If there are additional T0 Routers, then this becomes a task of of reducing the range for the external pools and sharing it with other T0 Router instances.
62 | Same way, if more external pools need to be exposed, keep shrinking the pool size.
63 |
64 | | T0 Router | T0 IP | Subnet for external ip pool |
65 | \# of IPs that can be exposed |
66 | |------------|---------------|-------------------|---|
67 | | T0 Router 1 | 10.193.105.10 | Pool1 - 10.193.105.64/27 | 32 |
68 | | T0 Router 1 | 10.193.105.10 | Pool2 - 10.193.105.96/27 | 32 |
69 | | T0 Router 2 | 10.193.105.15 | Pool3 - 10.193.105.128/26 | 64 |
70 | | T0 Router 3 | 10.193.105.20 | Pool4 - 10.193.105.192/26 | 64 |
71 | | ......... | ....... | .......... | ...|
72 |
73 | If there are even more additional T0 Routers, then the above CIDR for external ip pool needs to be made even smaller to make room for another exposed subnet (like 10.193.105.128-10.193.105.192 using 10.193.105.128/26) and so on.
74 |
75 | The above table is assuming that the pool of IPs exposed to outside is quite small and there is just one /24 CIDR that can be used for a given install/client for both T0 Router and external IPs and it needs to be all completed within the /24 range.
76 |
77 | In the static route configuration, the next hop would be the gateway of the T0 Router. Set the admin distance for the hop to be 1.
78 |
79 | Sample Image of configuring static route when T0 Router and external ip pool are on the same CIDR
80 | 
81 |
82 | Similar to the transient network approach, after deciding on the IPs that are already reserved for exposed components like Ops Mgr, PAS GoRouter, PAS SSH Proxy, Harbor, PKS Controller etc., allot or divide up the remaining IPs for the PAS and PKS external ip pools by tweaking the range of the external IP Pool.
83 |
84 | | Pool Name | CIDR | Start | End | Notes |
85 | |-----------|----------------|---------------|-------------|-----------------|
86 | | PAS-Pool1 | 10.193.105.64/27 | 10.193.105.72 | 10.193.105.94 | Reserving first 8 IPs for Ops Mgr, PAS external facing components. Externally exposed PAS Apps would use from the PAS-Pool.|
87 | | PAS-Pool2 | 10.193.105.96/27 | 10.193.105.104 | 10.193.105.126 | Reserving first 8 IPs for PKS Controller, Harbor external facing components. Rest can be used by the PKS clusters.|
88 |
89 | ### Sample NAT setup
90 |
91 | Sample Image of NATs on T0 Router (external ip pools are on different CIDR than T0)
92 | 
93 |
--------------------------------------------------------------------------------
/functions/check_null_variables.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function check_null_variables {
4 |
5 | for token in $(env | grep '=' | grep "^[A-Z]*" | grep '=null$' | sed -e 's/=.*//g')
6 | do
7 | export ${token}=""
8 | done
9 | }
10 |
11 | if [ "$NSX_T_VERSION" == "" -o "$NSX_T_VERSION" == "" ]; then
12 | export NSX_T_VERSION=2.1
13 | fi
14 |
15 | check_null_variables
16 |
--------------------------------------------------------------------------------
/functions/copy_ovas.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function install_ovftool {
4 |
5 | # Install provided ovftool
6 | if [ ! -e "/usr/bin/ovftool" ]; then
7 | pushd $ROOT_DIR/ovftool
8 | ovftool_bundle=$(ls *)
9 | chmod +x $ovftool_bundle
10 |
11 | size_of_tool=$(ls -al $ovftool_bundle | awk '{print $5}')
12 | if [ $size_of_tool -lt 10000000 ]; then
13 | echo "ovftool downloaded is lesser than 10 MB!!"
14 | echo "Check the file name/paths. Exiting from ova copy and deploy!!"
15 | exit 1
16 | fi
17 |
18 | is_binary=$(file $ovftool_bundle | grep "executable" || true)
19 | if [ "$is_binary" == "" ]; then
20 | echo "ovftool downloaded was not a valid binary image!!"
21 | echo "Check the file name/paths. Exiting from ova copy and deploy!!"
22 | exit 1
23 | fi
24 |
25 | ./${ovftool_bundle} --eulas-agreed
26 | popd
27 | echo "Done installing ovftool"
28 | else
29 | echo "ovftool already installed!!"
30 | fi
31 | echo ""
32 | }
33 |
34 | function check_ovas {
35 |
36 | for ova_file in "$ROOT_DIR/nsx-mgr-ova/$NSX_T_MANAGER_OVA \
37 | $ROOT_DIR/nsx-ctrl-ova/$NSX_T_CONTROLLER_OVA \
38 | $ROOT_DIR/nsx-edge-ova/$NSX_T_EDGE_OVA "
39 | do
40 | is_tar=$(file $ova_file | grep "tar archive" || true)
41 | if [ "$is_tar" == "" ]; then
42 | echo "File $ova_file downloaded was not a valid OVA image!!"
43 | echo "Check the file name/paths. Exiting from ova copy and deploy!!"
44 | exit 1
45 | fi
46 | done
47 | }
48 |
49 | function copy_ovas_to_OVA_ISO_PATH {
50 |
51 | mkdir -p $OVA_ISO_PATH
52 | check_ovas
53 |
54 | mv $ROOT_DIR/nsx-mgr-ova/$NSX_T_MANAGER_OVA \
55 | $ROOT_DIR/nsx-ctrl-ova/$NSX_T_CONTROLLER_OVA \
56 | $ROOT_DIR/nsx-edge-ova/$NSX_T_EDGE_OVA \
57 | $OVA_ISO_PATH
58 |
59 | echo "Done moving ova images into $OVA_ISO_PATH"
60 | echo ""
61 | }
62 |
63 | function create_customize_ova_params {
64 |
65 | cat > customize_ova_vars.yml <<-EOF
66 | ovftool_path: '/usr/bin'
67 | ova_file_path: "$OVA_ISO_PATH"
68 | nsx_gw_filename: "$NSX_T_EDGE_OVA"
69 | nsx_manager_filename: "$NSX_T_MANAGER_OVA"
70 | nsx_controller_filename: "$NSX_T_CONTROLLER_OVA"
71 | EOF
72 |
73 | if [ "$NSX_T_KEEP_RESERVATION" == "false" ]; then
74 | echo "nsx_t_keep_reservation: $NSX_T_KEEP_RESERVATION" >> customize_ova_vars.yml
75 | fi
76 |
77 | #echo "$NSX_T_SIZING_SPEC" >> customize_ova_vars.yml
78 | }
79 |
--------------------------------------------------------------------------------
/functions/create_ansible_cfg.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function create_ansible_cfg {
4 |
5 | cat > ansible.cfg <<-EOF
6 | [defaults]
7 | host_key_checking = false
8 | EOF
9 | }
--------------------------------------------------------------------------------
/functions/create_answerfile.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function create_base_answerfile {
4 | export NSX_T_MANAGER_SHORT_HOSTNAME=$(echo $NSX_T_MANAGER_FQDN | awk -F '\.' '{print $1}')
5 |
6 | cat > answerfile.yml <<-EOF
7 | ovfToolPath: '/usr/bin'
8 | deployDataCenterName: "$VCENTER_DATACENTER"
9 | deployMgmtDatastoreName: "$VCENTER_DATASTORE"
10 | deployMgmtPortGroup: "$MGMT_PORTGROUP"
11 | deployCluster: "$VCENTER_CLUSTER"
12 | deployMgmtDnsServer: "$DNSSERVER"
13 | deployNtpServers: "$NTPSERVERS"
14 | deployMgmtDnsDomain: "$DNSDOMAIN"
15 | deployMgmtDefaultGateway: $DEFAULTGATEWAY
16 | deployMgmtNetmask: $NETMASK
17 | nsxAdminPass: "$NSX_T_MANAGER_ADMIN_PWD"
18 | nsxCliPass: "$NSX_T_MANAGER_ROOT_PWD"
19 | nsxOvaPath: "$OVA_ISO_PATH"
20 | deployVcIPAddress: "$VCENTER_HOST"
21 | deployVcUser: $VCENTER_USR
22 | deployVcPassword: "$VCENTER_PWD"
23 | compute_manager: "$VCENTER_MANAGER"
24 | cm_cluster: "$VCENTER_CLUSTER"
25 | sshEnabled: True
26 | allowSSHRootAccess: True
27 | nsxInstaller: "$NSX_T_INSTALLER"
28 |
29 | api_origin: 'localhost'
30 |
31 | controllerClusterPass: $NSX_T_CONTROLLER_CLUSTER_PWD
32 |
33 | compute_vcenter_host: "$COMPUTE_VCENTER_HOST"
34 | compute_vcenter_user: "$COMPUTE_VCENTER_USR"
35 | compute_vcenter_password: "$COMPUTE_VCENTER_PWD"
36 | compute_vcenter_cluster: "$COMPUTE_VCENTER_CLUSTER"
37 | compute_vcenter_manager: "$COMPUTE_VCENTER_MANAGER"
38 |
39 | edge_vcenter_host: "$EDGE_VCENTER_HOST"
40 | edge_vcenter_user: "$EDGE_VCENTER_USR"
41 | edge_vcenter_password: "$EDGE_VCENTER_PWD"
42 | edge_vcenter_cluster: "$EDGE_VCENTER_CLUSTER"
43 | edge_dc: "$EDGE_VCENTER_DATACENTER"
44 | edge_datastore: "$EDGE_VCENTER_DATASTORE"
45 | edge_portgroup: "$EDGE_MGMT_PORTGROUP"
46 | edge_dns_server: "$EDGE_DNSSERVER"
47 | edge_dns_domain: "$EDGE_DNSDOMAIN"
48 | edge_ntp_server: "$EDGE_NTPSERVERS"
49 | edge_gw: "$EDGE_DEFAULTGATEWAY"
50 | edge_mask: "$EDGE_NETMASK"
51 |
52 | managers:
53 | nsxmanager:
54 | hostname: $NSX_T_MANAGER_SHORT_HOSTNAME
55 | vmName: $NSX_T_MANAGER_VM_NAME
56 | ipAddress: $NSX_T_MANAGER_IP
57 | ovaFile: $NSX_T_MANAGER_OVA
58 |
59 | EOF
60 |
61 | }
62 |
63 | function create_answerfile {
64 |
65 | create_edge_config
66 | create_controller_config
67 |
68 | create_base_answerfile
69 |
70 |
71 | # Merge controller and edge config with answerfile
72 | cat controller_config.yml >> answerfile.yml
73 | echo "" >> answerfile.yml
74 | cat edge_config.yml >> answerfile.yml
75 | echo "" >> answerfile.yml
76 | }
77 |
78 | function create_controller_config {
79 | cat > controller_config.yml <<-EOF
80 | controllers:
81 | EOF
82 |
83 | count=1
84 | for controller_ip in $(echo $NSX_T_CONTROLLER_IPS | sed -e 's/,/ /g')
85 | do
86 | cat >> controller_config.yml <<-EOF
87 | $controller_config
88 | nsxController0${count}:
89 | hostname: "${NSX_T_CONTROLLER_HOST_PREFIX}-0${count}.${DNSDOMAIN}"
90 | vmName: "${NSX_T_CONTROLLER_VM_NAME_PREFIX}-0${count}"
91 | ipAddress: $controller_ip
92 | ovaFile: $NSX_T_CONTROLLER_OVA
93 | resource_pool: ""
94 | EOF
95 | (( count++ ))
96 | done
97 |
98 | }
99 |
100 | function create_edge_config {
101 | cat > edge_config.yml <<-EOF
102 | edges:
103 | EOF
104 |
105 | count=1
106 | for edge_ip in $(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g')
107 | do
108 | cat >> edge_config.yml <<-EOF
109 | $edge_config
110 | ${NSX_T_EDGE_HOST_PREFIX}-0${count}:
111 | hostname: "${NSX_T_EDGE_HOST_PREFIX}-0${count}"
112 | vmName: "${NSX_T_EDGE_VM_NAME_PREFIX}-0${count}"
113 | ipAddress: $edge_ip
114 | ovaFile: $NSX_T_EDGE_OVA
115 | portgroupExt: $NSX_T_EDGE_PORTGROUP_EXT
116 | portgroupTransport: $NSX_T_EDGE_PORTGROUP_TRANSPORT
117 | EOF
118 | (( count++ ))
119 | done
120 | }
121 |
--------------------------------------------------------------------------------
/functions/create_extra_yaml_args.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export PAS_NCP_CLUSTER_TAG='ncp/cluster'
3 |
4 |
5 | # function check_pas_cluster_tag {
6 | # env_variable=$1
7 |
8 | # # Search for the cluster tag and get the value stripping off any quotes around it
9 | # tag_value=$(echo "${!env_variable}" | grep $PAS_NCP_CLUSTER_TAG | awk '{print $2}' | sed -e "s/'//g" | sed -e 's/"//g' )
10 |
11 | # if [ "$tag_value" == "$NSX_T_PAS_NCP_CLUSTER_TAG" ]; then
12 | # echo "true"
13 | # else
14 | # echo "false"
15 | # fi
16 | # }
17 |
18 | # Check for existence of tag matching given value
19 | # Handle both array items (like external ip pool) and single item (like T0Router)
20 | function check_existence_of_tag {
21 | env_variable=$1
22 | tag_name=$2
23 | tag_value=$3
24 |
25 | top_key=$(echo "${!env_variable}" | shyaml keys)
26 | length=$(expr $(echo "${!env_variable}" | shyaml get-values $top_key | grep "^name:" | wc -l) - 1 || true )
27 |
28 | count=0
29 | if [ $length -ge 0 ]; then
30 | for index in $(seq 0 $length)
31 | do
32 | tmpfile=$(mktemp /tmp/temp-yaml.XXXXX)
33 | echo "${!env_variable}" | shyaml get-value ${top_key}.${index} > $tmpfile
34 | given_tag_value=$(cat $tmpfile | grep $tag_name | awk '{print $2}' | sed -e "s/'//g" | sed -e 's/"//g' )
35 | if [ "$given_tag_value" == "$tag_value" ]; then
36 | count=$(expr $count + 1)
37 | fi
38 | rm $tmpfile
39 | done
40 | else
41 | given_tag_value=$(echo "${!env_variable}" | grep $tag_name | awk '{print $2}' | sed -e "s/'//g" | sed -e 's/"//g' )
42 | if [ "$given_tag_value" == "$tag_value" ]; then
43 | count=$(expr $count + 1)
44 | fi
45 | fi
46 |
47 | # Length would be 0 for single items but count would be 1
48 | # For arrays, count should greater than length (as we subtracted 1 before)
49 | if [ $count -gt $length ]; then
50 | echo "true"
51 | fi
52 | }
53 |
54 | function check_for_json_payload {
55 | var_name=$1
56 | var_value="${!var_name}"
57 | has_json_open_tag=$(echo "${!var_name}" | grep '{' | wc -l )
58 | has_json_close_tag=$(echo "${!var_name}" | grep '}' | wc -l )
59 |
60 | if [ $has_json_open_tag -gt 0 and $has_json_close_tag -gt 0 ]; then
61 | echo "$var_name variable is expanding to JSON, probably missing a pipe character in params file!!, Exiting"
62 | exit -1
63 | else
64 | echo 1
65 | fi
66 | }
67 |
68 | function handle_external_ip_pool_spec {
69 | if [ "$NSX_T_EXTERNAL_IP_POOL_SPEC" == "null" -o "$NSX_T_EXTERNAL_IP_POOL_SPEC" == "" ]; then
70 | return
71 | fi
72 |
73 | # Has root element
74 | echo "$NSX_T_EXTERNAL_IP_POOL_SPEC" >> extra_yaml_args.yml
75 | match=$(check_existence_of_tag NSX_T_EXTERNAL_IP_POOL_SPEC 'ncp/cluster' $NSX_T_PAS_NCP_CLUSTER_TAG )
76 | if [ "$NSX_T_EXTERNAL_IP_POOL_SPEC" != "" -a "$match" == "" ]; then
77 | # There can be multiple entries and we can fail to add tag for previous ones
78 | echo "[Warning] Missing matching ncp/cluster tag in the External IP Pool defn, unsure if its for PAS or PKS"
79 | #exit 1
80 | #echo " ncp/cluster:$NSX_T_PAS_NCP_CLUSTER_TAG" >> extra_yaml_args.yml
81 | fi
82 | match=$(check_existence_of_tag NSX_T_EXTERNAL_IP_POOL_SPEC 'ncp/external' 'true' )
83 | if [ "$NSX_T_EXTERNAL_IP_POOL_SPEC" != "" -a "$match" == "" ]; then
84 | # There can be multiple entries and we can fail to add tag for previous ones
85 | echo "[Warning] Missing matching ncp/external tag in the External IP Pool defn, unsure if its for PAS or PKS"
86 | #exit 1
87 | #echo " ncp/cluster:$NSX_T_PAS_NCP_CLUSTER_TAG" >> extra_yaml_args.yml
88 | fi
89 | echo "" >> extra_yaml_args.yml
90 | }
91 |
92 | function handle_container_ip_block_spec {
93 | if [ "$NSX_T_CONTAINER_IP_BLOCK_SPEC" == "null" -o "$NSX_T_CONTAINER_IP_BLOCK_SPEC" == "" ]; then
94 | return
95 | fi
96 |
97 | }
98 |
99 | function handle_container_ip_block_spec {
100 | if [ "$NSX_T_CONTAINER_IP_BLOCK_SPEC" == "null" -o "$NSX_T_CONTAINER_IP_BLOCK_SPEC" == "" ]; then
101 | return
102 | fi
103 |
104 | # Has root element
105 | echo "$NSX_T_CONTAINER_IP_BLOCK_SPEC" >> extra_yaml_args.yml
106 | match=$(check_existence_of_tag NSX_T_CONTAINER_IP_BLOCK_SPEC 'ncp/cluster' $NSX_T_PAS_NCP_CLUSTER_TAG )
107 | if [ "$NSX_T_CONTAINER_IP_BLOCK_SPEC" != "" -a "$match" == "" ]; then
108 | echo "[Warning] Missing matching 'ncp/cluster' tag in the Container IP Block defn"
109 | #exit 1
110 | #echo " ncp/cluster:$NSX_T_PAS_NCP_CLUSTER_TAG" >> extra_yaml_args.yml
111 | fi
112 | echo "" >> extra_yaml_args.yml
113 | }
114 |
115 | function handle_ha_switching_profile_spec {
116 | if [ "$NSX_T_HA_SWITCHING_PROFILE_SPEC" == "null" -o "$NSX_T_HA_SWITCHING_PROFILE_SPEC" == "" ]; then
117 | return
118 | fi
119 |
120 | # Has root element and we expect only one HA switching profile
121 | echo "$NSX_T_HA_SWITCHING_PROFILE_SPEC" >> extra_yaml_args.yml
122 | match=$(check_existence_of_tag NSX_T_HA_SWITCHING_PROFILE_SPEC 'ncp/cluster' $NSX_T_PAS_NCP_CLUSTER_TAG )
123 | # if [ "$NSX_T_HA_SWITCHING_PROFILE_SPEC" != "" -a "$match" == "" ]; then
124 | # echo " ncp/cluster: $NSX_T_PAS_NCP_CLUSTER_TAG" >> extra_yaml_args.yml
125 | # fi
126 | # match=$(check_existence_of_tag NSX_T_HA_SWITCHING_PROFILE_SPEC 'ncp/ha' 'true' )
127 | # if [ "$match" == "" ]; then
128 | # echo " ncp/ha: true" >> extra_yaml_args.yml
129 | # fi
130 | echo "" >> extra_yaml_args.yml
131 | }
132 |
133 | function handle_routers_spec {
134 | if [ "$NSX_T_T0ROUTER_SPEC" == "null" -o "$NSX_T_T0ROUTER_SPEC" == "" ]; then
135 | return
136 | fi
137 |
138 | }
139 |
140 | function handle_routers_spec {
141 | if [ "$NSX_T_T0ROUTER_SPEC" == "null" -o "$NSX_T_T0ROUTER_SPEC" == "" ]; then
142 | return
143 | fi
144 |
145 | # Has root element
146 | echo "$NSX_T_T0ROUTER_SPEC" >> extra_yaml_args.yml
147 | match=$(check_existence_of_tag NSX_T_T0ROUTER_SPEC 'ncp/cluster' $NSX_T_PAS_NCP_CLUSTER_TAG )
148 | if [ "$NSX_T_T0ROUTER_SPEC" != "" -a "$match" == "" ]; then
149 | echo "[Warning] Missing matching 'ncp/cluster' tag in the T0 Router defn, check tags once T0Router is up!!"
150 | #exit 1
151 | fi
152 | echo "" >> extra_yaml_args.yml
153 |
154 | if [ "$NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC" == "null" -o "$NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC" == "" ]; then
155 | return
156 | fi
157 |
158 | # Has root element
159 | echo "$NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC" >> extra_yaml_args.yml
160 | echo "" >> extra_yaml_args.yml
161 | }
162 |
163 | function handle_exsi_vnics {
164 | if [ "$NSX_T_ESXI_VMNICS" == "null" -o "$NSX_T_ESXI_VMNICS" == "" ]; then
165 | return
166 | fi
167 |
168 | count=1
169 | # Create an extra_args.yml file for additional yaml style parameters outside of host and answerfile.yml
170 | echo "esxi_uplink_vmnics:" >> extra_yaml_args.yml
171 | for vmnic in $( echo $NSX_T_ESXI_VMNICS | sed -e 's/,/ /g')
172 | do
173 | #echo " - uplink-${count}: ${vmnic}" >> extra_yaml_args.yml
174 | echo " uplink-${count}: ${vmnic}" >> extra_yaml_args.yml
175 | (( count++ ))
176 | done
177 | echo "" >> extra_yaml_args.yml
178 | }
179 |
180 | function handle_compute_manager_configs {
181 | if [ "$COMPUTE_MANAGER_CONFIGS" == "null" -o "$COMPUTE_MANAGER_CONFIGS" == "" ]; then
182 | return
183 | fi
184 |
185 | echo "$COMPUTE_MANAGER_CONFIGS" >> extra_yaml_args.yml
186 | }
187 |
188 |
189 | function create_extra_yaml_args {
190 | # Start the extra yaml args
191 | echo "" > extra_yaml_args.yml
192 |
193 | handle_external_ip_pool_spec
194 | handle_container_ip_block_spec
195 | handle_ha_switching_profile_spec
196 | handle_routers_spec
197 | handle_exsi_vnics
198 | handle_compute_manager_configs
199 |
200 | # Going with single profile uplink ; so use uplink-1 for both vmnics for edge
201 | echo "edge_uplink_vmnics:" >> extra_yaml_args.yml
202 | echo " - uplink-1: ${NSX_T_EDGE_OVERLAY_INTERFACE} # network3 used for overlay/tep" >> extra_yaml_args.yml
203 | echo " - uplink-1: ${NSX_T_EDGE_UPLINK_INTERFACE} # network2 used for vlan uplink" >> extra_yaml_args.yml
204 | echo "# network1 and network4 are for mgmt and not used for uplink" >> extra_yaml_args.yml
205 | echo "" >> extra_yaml_args.yml
206 | }
207 |
--------------------------------------------------------------------------------
/functions/create_hosts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function create_controller_hosts {
4 | if [ "$NSX_T_CONTROLLERS_CONFIG" == "" -o "$NSX_T_CONTROLLERS_CONFIG" == "null" ]; then
5 | create_controller_hosts_on_cluster
6 | else
7 | create_controller_hosts_across_clusters
8 | fi
9 | }
10 |
11 | function create_controller_hosts_on_cluster {
12 |
13 | count=1
14 | echo "[nsxcontrollers]" > ctrl_vms
15 | for controller_ip in $(echo $NSX_T_CONTROLLER_IPS | sed -e 's/,/ /g')
16 | do
17 | cat >> ctrl_vms <<-EOF
18 | nsx-controller0${count} \
19 | ansible_ssh_host=$controller_ip \
20 | ansible_ssh_user=root \
21 | ansible_ssh_pass=$NSX_T_CONTROLLER_ROOT_PWD \
22 | dc="$VCENTER_DATACENTER" \
23 | cluster="$VCENTER_CLUSTER" \
24 | resource_pool="$VCENTER_RP" \
25 | datastore="$VCENTER_DATASTORE" \
26 | portgroup="$MGMT_PORTGROUP" \
27 | gw=$DEFAULTGATEWAY \
28 | mask=$NETMASK \
29 | vmname="${NSX_T_CONTROLLER_VM_NAME_PREFIX}-0${count}" \
30 | hostname="${NSX_T_CONTROLLER_HOST_PREFIX}-0${count}"
31 | EOF
32 | (( count++ ))
33 | done
34 |
35 | }
36 |
37 | function create_controller_hosts_across_clusters {
38 |
39 | count=1
40 | echo "[nsxcontrollers]" > ctrl_vms
41 |
42 | echo "$NSX_T_CONTROLLERS_CONFIG" > /tmp/controllers_config.yml
43 | is_valid_yml=$(cat /tmp/controllers_config.yml | shyaml get-values controllers || true)
44 |
45 | # Check if the esxi_hosts config is not empty and is valid
46 | if [ "$NSX_T_CONTROLLERS_CONFIG" != "" -a "$is_valid_yml" != "" ]; then
47 |
48 | NSX_T_CONTROLLER_VM_NAME_PREFIX=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.vm_name_prefix)
49 | NSX_T_CONTROLLER_HOST_PREFIX=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.host_prefix)
50 | NSX_T_CONTROLLER_ROOT_PWD=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.root_pwd)
51 | NSX_T_CONTROLLER_CLUSTER_PWD=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.cluster_pwd)
52 |
53 | length=$(expr $(cat /tmp/controllers_config.yml | shyaml get-values controllers.members | grep ip: | wc -l) - 1 || true )
54 | if ! [ $length == 0 -o $length == 2 ]; then
55 | echo "Error with # of controllers - should be odd (1 or 3)!!"
56 | echo "Exiting!!"
57 | exit -1
58 | fi
59 |
60 | for index in $(seq 0 $length)
61 | do
62 | NSX_T_CONTROLLER_INSTANCE_IP=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.ip)
63 | NSX_T_CONTROLLER_INSTANCE_CLUSTER=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.cluster)
64 | NSX_T_CONTROLLER_INSTANCE_RP=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.resource_pool)
65 | NSX_T_CONTROLLER_INSTANCE_DATASTORE=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.datastore)
66 |
67 | cat >> ctrl_vms <<-EOF
68 | nsx-controller0${count} \
69 | ansible_ssh_host=$NSX_T_CONTROLLER_INSTANCE_IP \
70 | ansible_ssh_user=root \
71 | ansible_ssh_pass=$NSX_T_CONTROLLER_ROOT_PWD \
72 | dc="$VCENTER_DATACENTER" \
73 | cluster="$NSX_T_CONTROLLER_INSTANCE_CLUSTER" \
74 | resource_pool="$NSX_T_CONTROLLER_INSTANCE_RP" \
75 | datastore="$NSX_T_CONTROLLER_INSTANCE_DATASTORE" \
76 | portgroup="$MGMT_PORTGROUP" \
77 | gw=$DEFAULTGATEWAY \
78 | mask=$NETMASK \
79 | vmname="${NSX_T_CONTROLLER_VM_NAME_PREFIX}-0${count}" \
80 | hostname="${NSX_T_CONTROLLER_HOST_PREFIX}-0${count}"
81 | EOF
82 | (( count++ ))
83 | done
84 | fi
85 |
86 | }
87 |
88 | function create_edge_hosts {
89 | if [ "$EDGE_VCENTER_HOST" != "" -a "$EDGE_VCENTER_HOST" != "null" ]; then
90 | create_edge_hosts_using_edge_vcenter
91 | else
92 | create_edge_hosts_using_mgmt_vcenter
93 | fi
94 | }
95 |
96 | function create_edge_hosts_using_edge_vcenter {
97 | count=1
98 | echo "[nsxedges]" > edge_vms
99 | for edge_ip in $(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g')
100 | do
101 | cat >> edge_vms <<-EOF
102 | ${NSX_T_EDGE_HOST_PREFIX}-0${count} \
103 | ansible_ssh_host=$edge_ip \
104 | ansible_ssh_user=root \
105 | ansible_ssh_pass=$NSX_T_EDGE_ROOT_PWD \
106 | vcenter_host="$EDGE_VCENTER_HOST" \
107 | vcenter_user="$EDGE_VCENTER_USR" \
108 | vcenter_pwd="$EDGE_VCENTER_PWD" \
109 | dc="$EDGE_VCENTER_DATACENTER" \
110 | datastore="$EDGE_VCENTER_DATASTORE" \
111 | cluster="$EDGE_VCENTER_CLUSTER" \
112 | resource_pool="$EDGE_VCENTER_RP" \
113 | dns_server="$EDGE_DNSSERVER" \
114 | dns_domain="$EDGE_DNSDOMAIN" \
115 | ntp_server="$EDGE_NTPSERVERS" \
116 | gw="$EDGE_DEFAULTGATEWAY" \
117 | mask="$EDGE_NETMASK" \
118 | vmname="${NSX_T_EDGE_VM_NAME_PREFIX}-0${count}" \
119 | hostname="${NSX_T_EDGE_HOST_PREFIX}-0${count}" \
120 | portgroup="$EDGE_MGMT_PORTGROUP" \
121 | portgroupExt="$NSX_T_EDGE_PORTGROUP_EXT" \
122 | portgroupTransport="$NSX_T_EDGE_PORTGROUP_TRANSPORT"
123 | EOF
124 | (( count++ ))
125 | done
126 | }
127 |
128 | function create_edge_hosts_using_mgmt_vcenter {
129 | count=1
130 | echo "[nsxedges]" > edge_vms
131 | for edge_ip in $(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g')
132 | do
133 | cat >> edge_vms <<-EOF
134 | ${NSX_T_EDGE_HOST_PREFIX}-0${count} \
135 | ansible_ssh_host=$edge_ip \
136 | ansible_ssh_user=root \
137 | ansible_ssh_pass=$NSX_T_EDGE_ROOT_PWD \
138 | vcenter_host="$VCENTER_HOST" \
139 | vcenter_user="$VCENTER_USR" \
140 | vcenter_pwd="$VCENTER_PWD" \
141 | dc="$VCENTER_DATACENTER" \
142 | datastore="$VCENTER_DATASTORE" \
143 | cluster="$VCENTER_CLUSTER" \
144 | resource_pool="$VCENTER_RP" \
145 | dns_server="$DNSSERVER" \
146 | dns_domain="$DNSDOMAIN" \
147 | ntp_server="$NTPSERVERS" \
148 | gw=$DEFAULTGATEWAY \
149 | mask=$NETMASK \
150 | vmname="${NSX_T_EDGE_VM_NAME_PREFIX}-0${count}" \
151 | hostname="${NSX_T_EDGE_HOST_PREFIX}-0${count}" \
152 | portgroup="$MGMT_PORTGROUP" \
153 | portgroupExt="$NSX_T_EDGE_PORTGROUP_EXT" \
154 | portgroupTransport="$NSX_T_EDGE_PORTGROUP_TRANSPORT"
155 | EOF
156 | (( count++ ))
157 | done
158 | }
159 |
160 | function create_esxi_hosts {
161 | touch esxi_hosts
162 | if [ "$ESXI_HOSTS_CONFIG" == "null" -o "$ESXI_HOSTS_CONFIG" == "" ]; then
163 | return
164 | fi
165 |
166 | if [ "$COMPUTE_MANAGER_CONFIGS" != "null" -a "$COMPUTE_MANAGER_CONFIGS" != "" ]; then
167 | echo "$COMPUTE_MANAGER_CONFIGS" > /tmp/compute_mgr_config.yml
168 | is_valid_yml=$(cat /tmp/compute_mgr_config.yml | shyaml get-values compute_managers || true)
169 | if [ "$is_valid_yml" != "" ]; then
170 | # Dont go with individual esxi hosts, use the compute_manager_configs
171 | echo "Both esxi_hosts_config and compute_manager_configs defined!"
172 | echo "Going with compute_manager_configs defn instead of individual Esxi Hosts!!"
173 | return
174 | fi
175 | fi
176 |
177 | echo "$ESXI_HOSTS_CONFIG" > /tmp/esxi_hosts_config.yml
178 |
179 | is_valid_yml=$(cat /tmp/esxi_hosts_config.yml | shyaml get-values esxi_hosts || true)
180 |
181 | # Check if the esxi_hosts config is not empty and is valid
182 | if [ "$ESXI_HOSTS_CONFIG" != "" -a "$is_valid_yml" != "" ]; then
183 |
184 | echo "[nsxtransportnodes]" > esxi_hosts
185 |
186 | length=$(expr $(cat /tmp/esxi_hosts_config.yml | shyaml get-values esxi_hosts | grep name: | wc -l) - 1 || true )
187 | for index in $(seq 0 $length)
188 | do
189 | ESXI_INSTANCE_HOST=$(cat /tmp/esxi_hosts_config.yml | shyaml get-value esxi_hosts.${index}.name)
190 | ESXI_INSTANCE_IP=$(cat /tmp/esxi_hosts_config.yml | shyaml get-value esxi_hosts.${index}.ip)
191 | ESXI_INSTANCE_PWD=$(cat /tmp/esxi_hosts_config.yml | shyaml get-value esxi_hosts.${index}.root_pwd)
192 | if [ "$ESXI_INSTANCE_PWD" == "" ]; then
193 | ESXI_INSTANCE_PWD=$ESXI_HOSTS_ROOT_PWD
194 | fi
195 |
196 | cat >> esxi_hosts <<-EOF
197 | $ESXI_INSTANCE_HOST ansible_ssh_host=$ESXI_INSTANCE_IP ansible_ssh_user=root ansible_ssh_pass=$ESXI_INSTANCE_PWD
198 | EOF
199 | done
200 | else
201 | echo "esxi_hosts_config not set to valid yaml, so ignoring it"
202 | echo "Would use compute manager configs to add hosts!!"
203 | echo "" >> esxi_hosts
204 | fi
205 | }
206 |
207 | function create_hosts {
208 |
209 | export NSX_T_MANAGER_SHORT_HOSTNAME=$(echo $NSX_T_MANAGER_FQDN | awk -F '\.' '{print $1}')
210 |
211 | cat > hosts <<-EOF
212 | [localhost]
213 | localhost ansible_connection=local
214 |
215 | [nsxmanagers]
216 | nsx-manager \
217 | ansible_ssh_host=$NSX_T_MANAGER_IP \
218 | ansible_ssh_user=root \
219 | ansible_ssh_pass=$NSX_T_MANAGER_ROOT_PWD \
220 | dc="$VCENTER_DATACENTER" \
221 | cluster="$VCENTER_CLUSTER" \
222 | resource_pool="$VCENTER_RP" \
223 | datastore="$VCENTER_DATASTORE" \
224 | portgroup="$MGMT_PORTGROUP" \
225 | gw=$DEFAULTGATEWAY \
226 | mask=$NETMASK \
227 | vmname="$NSX_T_MANAGER_VM_NAME" \
228 | hostname="$NSX_T_MANAGER_SHORT_HOSTNAME"
229 |
230 | [localhost:vars]
231 |
232 | ovfToolPath='/usr/bin'
233 | nsxOvaPath="$OVA_ISO_PATH"
234 | sshEnabled='True'
235 | allowSSHRootAccess='True'
236 | managerOva=$NSX_T_MANAGER_OVA
237 | controllerOva=$NSX_T_CONTROLLER_OVA
238 | edgeOva=$NSX_T_EDGE_OVA
239 |
240 | deployVcIPAddress="$VCENTER_HOST"
241 | deployVcUser=$VCENTER_USR
242 | deployVcPassword="$VCENTER_PWD"
243 | compute_manager="$VCENTER_MANAGER"
244 | cm_cluster="$VCENTER_CLUSTER"
245 |
246 | edge_vcenter_host="$EDGE_VCENTER_HOST"
247 | edge_vcenter_user="$EDGE_VCENTER_USR"
248 | edge_vcenter_password="$EDGE_VCENTER_PWD"
249 | edge_vcenter_cluster="$EDGE_VCENTER_CLUSTER"
250 | edge_dc="$EDGE_VCENTER_DATACENTER"
251 | edge_datastore="$EDGE_VCENTER_DATASTORE"
252 | edge_portgroup="$EDGE_MGMT_PORTGROUP"
253 | edge_dns_server="$EDGE_DNSSERVER"
254 | edge_dns_domain="$EDGE_DNSDOMAIN"
255 | edge_ntp_server="$EDGE_NTPSERVERS"
256 | edge_gw="$EDGE_DEFAULTGATEWAY"
257 | edge_mask="$EDGE_NETMASK"
258 |
259 | nsxInstaller="$NSX_T_INSTALLER"
260 | nsxAdminPass="$NSX_T_MANAGER_ADMIN_PWD"
261 | nsxCliPass="$NSX_T_MANAGER_ROOT_PWD"
262 |
263 | dns_server="$DNSSERVER"
264 | dns_domain="$DNSDOMAIN"
265 | ntp_server="$NTPSERVERS"
266 |
267 | # Sizing of vms for deployment
268 | nsx_t_mgr_deploy_size="$NSX_T_MGR_DEPLOY_SIZE"
269 | nsx_t_edge_deploy_size="$NSX_T_EDGE_DEPLOY_SIZE"
270 |
271 | tag_scope="ncp/cluster"
272 | tag=$NSX_T_PAS_NCP_CLUSTER_TAG
273 | overlay_tz_name=$NSX_T_OVERLAY_TRANSPORT_ZONE
274 | vlan_tz_name=$NSX_T_VLAN_TRANSPORT_ZONE
275 | vlan_hostswitch=$NSX_T_VLAN_HOSTSWITCH
276 | overlay_hostswitch=$NSX_T_OVERLAY_HOSTSWITCH
277 |
278 | tep_pool_name=$NSX_T_TEP_POOL_NAME
279 | tep_pool_cidr=$NSX_T_TEP_POOL_CIDR
280 | tep_pool_range="${NSX_T_TEP_POOL_START}-${NSX_T_TEP_POOL_END}"
281 | tep_pool_gw=$NSX_T_TEP_POOL_GATEWAY
282 |
283 | edge_single_uplink_profile_name=$NSX_T_SINGLE_UPLINK_PROFILE_NAME
284 | edge_single_uplink_profile_mtu=$NSX_T_SINGLE_UPLINK_PROFILE_MTU
285 | edge_single_uplink_profile_vlan=$NSX_T_SINGLE_UPLINK_PROFILE_VLAN
286 | edge_interface=$NSX_T_EDGE_OVERLAY_INTERFACE
287 | edge_uplink_interface=$NSX_T_EDGE_UPLINK_INTERFACE
288 |
289 | esxi_overlay_profile_name=$NSX_T_OVERLAY_PROFILE_NAME
290 | esxi_overlay_profile_mtu=$NSX_T_OVERLAY_PROFILE_MTU
291 | esxi_overlay_profile_vlan=$NSX_T_OVERLAY_PROFILE_VLAN
292 |
293 | edge_cluster="$NSX_T_EDGE_CLUSTER"
294 |
295 | EOF
296 |
297 | if [ "$VCENTER_RP" == "null" ]; then
298 | export VCENTER_RP=""
299 | fi
300 |
301 | create_edge_hosts
302 | create_controller_hosts
303 |
304 | cat ctrl_vms >> hosts
305 | echo "" >> hosts
306 | cat edge_vms >> hosts
307 | echo "" >> hosts
308 |
309 | if [ ! -z "$ESXI_HOSTS_CONFIG" ]; then
310 | create_esxi_hosts
311 | cat esxi_hosts >> hosts
312 | echo "" >> hosts
313 | fi
314 |
315 | }
316 |
--------------------------------------------------------------------------------
/functions/delete_vm_using_govc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | export GOVC_DEBUG=$ENABLE_ANSIBLE_DEBUG
6 |
7 | #export GOVC_TLS_CA_CERTS=/tmp/vcenter-ca.pem
8 | #echo "$GOVC_CA_CERT" > "$GOVC_TLS_CA_CERTS"
9 |
10 | function delete_vm_using_govc() {
11 | type_of_vm=$1
12 |
13 | if [ "$type_of_vm" == "mgr" ]; then
14 | delete_mgr_vm
15 | elif [ "$type_of_vm" == "edge" ]; then
16 | delete_edge_vm
17 | else
18 | delete_ctrl_vm
19 | fi
20 | }
21 |
22 | function destroy_vms_not_matching_nsx() {
23 | nsx_vm_name_pattern="${NSX_T_EDGE_VM_NAME_PREFIX}\|${NSX_T_MANAGER_VM_NAME}\|${NSX_T_CONTROLLER_VM_NAME_PREFIX}"
24 |
25 | default_options=" GOVC_URL=$VCENTER_HOST \
26 | GOVC_DATACENTER=$VCENTER_DATACENTER \
27 | GOVC_INSECURE=true \
28 | GOVC_DATASTORE=$VCENTER_DATASTORE \
29 | GOVC_CLUSTER=$VCENTER_CLUSTER \
30 | GOVC_USERNAME=$VCENTER_USR \
31 | GOVC_PASSWORD=$VCENTER_PWD "
32 |
33 | # Setup govc env variables coming via $default_options
34 | export $default_options
35 |
36 | # Shutdown and clean all non-nsx related vms in the management plane
37 | for vm_path in $(govc find . -type m | grep -v stemcell | grep -ve "$nsx_vm_name_pattern" | sed -e 's/ /::/g' || true)
38 | do
39 | actual_vm_path=$(echo $vm_path | sed -e 's/::/ /g' )
40 | echo "Please shutdown following vms: $actual_vm_path if they are using NSX-T logical switches proceeding with wipe!!"
41 | #govc vm.power -off "$actual_vm_path"
42 | #govc vm.destroy "$actual_vm_path"
43 | done
44 |
45 | # Shutdown and clean all non-nsx related vms in the compute clusters plane
46 | if [ "$COMPUTE_MANAGER_CONFIGS" != "" -a "$COMPUTE_MANAGER_CONFIGS" != "null" ]; then
47 |
48 | compute_manager_json_config=$(echo "$COMPUTE_MANAGER_CONFIGS" | python $PYTHON_LIB_DIR/yaml2json.py)
49 |
50 | total_count=$(echo $compute_manager_json_config | jq '.compute_managers | length')
51 | index=0
52 | while [ $index -lt $total_count ]
53 | do
54 | compute_vcenter=$( echo $compute_manager_json_config | jq --argjson index $index '.compute_managers[$index]' )
55 | compute_vcenter_host=$(echo $compute_vcenter | jq -r '.vcenter_host' )
56 | #compute_vcenter_dc=$(echo $compute_vcenter | jq -r '.vcenter_datacenter' )
57 | compute_vcenter_usr=$(echo $compute_vcenter | jq -r '.vcenter_usr' )
58 | compute_vcenter_pwd=$(echo $compute_vcenter | jq -r '.vcenter_pwd' )
59 |
60 | inner_total=$(echo $compute_vcenter | jq '.clusters | length' )
61 | inner_index=0
62 | while [ $inner_index -lt $inner_total ]
63 | do
64 | compute_cluster=$( echo $compute_vcenter | jq --argjson inner_index $inner_index '.clusters[$inner_index]' )
65 | compute_vcenter_cluster=$(echo $compute_cluster | jq -r '.vcenter_cluster' )
66 |
67 | custom_options="GOVC_URL=$compute_vcenter_host \
68 | GOVC_DATACENTER=$VCENTER_DATACENTER \
69 | GOVC_INSECURE=true \
70 | GOVC_CLUSTER=$compute_vcenter_cluster \
71 | GOVC_USERNAME=$compute_vcenter_usr \
72 | GOVC_PASSWORD=$compute_vcenter_pwd "
73 |
74 | # Setup govc env variables coming via the above options
75 | export $custom_options
76 |
77 | for vm_path in $(govc find . -type m | grep -v stemcell | grep -ve "$nsx_vm_name_pattern" | sed -e 's/ /::/g' || true)
78 | do
79 | actual_vm_path=$(echo $vm_path | sed -e 's/::/ /g' )
80 | echo "Please shutdown following vms: $actual_vm_path if they are using NSX-T logical switches proceeding with wipe!!"
81 | #govc vm.power -off "$actual_vm_path"
82 | #govc vm.destroy "$actual_vm_path"
83 | done
84 | inner_index=$(expr $inner_index + 1)
85 | done
86 | index=$(expr $index + 1)
87 | done
88 | fi
89 |
90 | echo "Sleeping for 60 seconds, please cancel the rest of the task if necessary!!"
91 | sleep 60
92 | }
93 |
94 | function destroy_vm_matching_name {
95 | vm_name=$1
96 | additional_options=$2
97 |
98 | # Setup govc env variables coming via $additional_options
99 | export $additional_options
100 |
101 | vm_path=$(govc find . -type m | grep "$vm_name" | sed -e 's/ /::/g' || true)
102 | actual_vm_path=$(echo $vm_path | sed -e 's/::/ /g' )
103 | if [ "$actual_vm_path" != "" ]; then
104 | echo "Powering off and destroying vm: $actual_vm_path"
105 | govc vm.power -off "$actual_vm_path"
106 | govc vm.destroy "$actual_vm_path"
107 | fi
108 | }
109 |
110 | function delete_mgr_vm() {
111 | vm_name=${NSX_T_MANAGER_VM_NAME}
112 |
113 | default_additional_options="GOVC_URL=$VCENTER_HOST \
114 | GOVC_DATACENTER=$VCENTER_DATACENTER \
115 | GOVC_INSECURE=true \
116 | GOVC_DATASTORE=$VCENTER_DATASTORE \
117 | GOVC_CLUSTER=$VCENTER_CLUSTER \
118 | GOVC_USERNAME=$VCENTER_USR \
119 | GOVC_PASSWORD=$VCENTER_PWD "
120 |
121 | destroy_vm_matching_name "$vm_name" "$default_additional_options"
122 |
123 | }
124 |
125 | function delete_edge_vm() {
126 | default_additional_options="GOVC_URL=$VCENTER_HOST \
127 | GOVC_DATACENTER=$VCENTER_DATACENTER \
128 | GOVC_INSECURE=true \
129 | GOVC_DATASTORE=$VCENTER_DATASTORE \
130 | GOVC_CLUSTER=$VCENTER_CLUSTER \
131 | GOVC_USERNAME=$VCENTER_USR \
132 | GOVC_PASSWORD=$VCENTER_PWD "
133 |
134 | if [ "$EDGE_VCENTER_HOST" == "" -o "$EDGE_VCENTER_HOST" == "null" ]; then
135 | count=1
136 | for nsx_edge_ip in $(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g')
137 | do
138 | destroy_vm_matching_name "${NSX_T_EDGE_VM_NAME_PREFIX}-0${count}" "$default_additional_options"
139 | (( count++ ))
140 | done
141 | return
142 | fi
143 |
144 | count=1
145 | edge_additional_options=" GOVC_URL=$EDGE_VCENTER_HOST \
146 | GOVC_DATACENTER=$EDGE_VCENTER_DATACENTER \
147 | GOVC_INSECURE=true \
148 | GOVC_DATASTORE=$EDGE_VCENTER_DATASTORE \
149 | GOVC_CLUSTER=$EDGE_VCENTER_CLUSTER \
150 | GOVC_USERNAME=$EDGE_VCENTER_USR \
151 | GOVC_PASSWORD=$EDGE_VCENTER_PWD "
152 |
153 | for nsx_edge_ip in $(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g')
154 | do
155 | destroy_vm_matching_name "${NSX_T_EDGE_VM_NAME_PREFIX}-0${count}" "$edge_additional_options"
156 | (( count++ ))
157 | done
158 | }
159 |
160 | function delete_ctrl_vm() {
161 | default_additional_options="GOVC_URL=$VCENTER_HOST \
162 | GOVC_DATACENTER=$VCENTER_DATACENTER \
163 | GOVC_INSECURE=true \
164 | GOVC_DATASTORE=$VCENTER_DATASTORE \
165 | GOVC_CLUSTER=$VCENTER_CLUSTER \
166 | GOVC_USERNAME=$VCENTER_USR \
167 | GOVC_PASSWORD=$VCENTER_PWD "
168 |
169 | if [ "$NSX_T_CONTROLLERS_CONFIG" == "" -o "$NSX_T_CONTROLLERS_CONFIG" == "null" ]; then
170 | count=1
171 | for nsx_ctrl_ip in $(echo $NSX_T_CONTROLLER_IPS | sed -e 's/,/ /g')
172 | do
173 | destroy_vm_matching_name "${NSX_T_CONTROLLER_VM_NAME_PREFIX}-0${count}" "$default_additional_options"
174 | (( count++ ))
175 | done
176 | return
177 | fi
178 |
179 | echo "$NSX_T_CONTROLLERS_CONFIG" > /tmp/controllers_config.yml
180 | is_valid_yml=$(cat /tmp/controllers_config.yml | shyaml get-values controllers || true)
181 |
182 | # Check if the esxi_hosts config is not empty and is valid
183 | if [ "$NSX_T_CONTROLLERS_CONFIG" != "" -a "$is_valid_yml" != "" ]; then
184 |
185 | NSX_T_CONTROLLER_VM_NAME_PREFIX=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.vm_name_prefix)
186 |
187 | count=1
188 | for index in $(seq 0 $length)
189 | do
190 | NSX_T_CONTROLLER_INSTANCE_CLUSTER=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.cluster)
191 | NSX_T_CONTROLLER_INSTANCE_RP=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.resource_pool)
192 | NSX_T_CONTROLLER_INSTANCE_DATASTORE=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.datastore)
193 |
194 | ctrl_additional_options=" GOVC_URL=$VCENTER_HOST \
195 | GOVC_DATACENTER=$VCENTER_DATACENTER \
196 | GOVC_INSECURE=true \
197 | GOVC_DATASTORE=$NSX_T_CONTROLLER_INSTANCE_DATASTORE \
198 | GOVC_CLUSTER=$NSX_T_CONTROLLER_INSTANCE_CLUSTER \
199 | GOVC_USERNAME=$VCENTER_USR \
200 | GOVC_PASSWORD=$VCENTER_PWD "
201 |
202 | destroy_vm_matching_name "${NSX_T_CONTROLLER_VM_NAME_PREFIX}-0${count}" "$ctrl_additional_options"
203 | (( count++ ))
204 | done
205 | fi
206 | }
207 |
--------------------------------------------------------------------------------
/functions/deploy_ova_using_govc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | export GOVC_DEBUG=$ENABLE_ANSIBLE_DEBUG
6 |
7 | #export GOVC_TLS_CA_CERTS=/tmp/vcenter-ca.pem
8 | #echo "$GOVC_CA_CERT" > "$GOVC_TLS_CA_CERTS"
9 |
10 | function deploy_ova_using_govc() {
11 | type_of_ova=$1
12 | path_to_ova=$2
13 |
14 | if [ "$type_of_ova" == "mgr" ]; then
15 | deploy_mgr_ova $path_to_ova
16 | elif [ "$type_of_ova" == "edge" ]; then
17 | deploy_edge_ova $path_to_ova
18 | else
19 | deploy_ctrl_ova $path_to_ova
20 | fi
21 | }
22 |
23 | function deploy_ova {
24 | path_to_ova=$1
25 | ova_options=$2
26 | resource_pool=$3
27 | additional_options=$4
28 |
29 | # Setup govc env variables coming via $additional_options
30 | export $additional_options
31 |
32 | if [ "$GOVC_DEBUG" != "" ]; then
33 | echo "Using VM options for ova upload"
34 | cat $ova_options | awk '/passw/ {getline; next} {print}'
35 | fi
36 |
37 | if [ "$resource_pool" == "" -o -z "$resource_pool" ]; then
38 | govc import.ova -options=$ova_options "$path_to_ova"
39 | else
40 | set +e
41 | found_rp=$(govc find . -name "${resource_pool}" -type p 2>&1 | grep Resources | grep "${resource_pool}$" || true )
42 | set -e
43 | if [ "$found_rp" == "" ]; then
44 | govc pool.create "$GOVC_CLUSTER/Resources/$resource_pool"
45 | GOVC_RESOURCE_POOL="$GOVC_CLUSTER/Resources/$resource_pool"
46 | fi
47 | GOVC_RESOURCE_POOL=$resource_pool
48 |
49 | govc import.ova -pool="$GOVC_RESOURCE_POOL" -options=$ova_options "$path_to_ova"
50 | fi
51 |
52 | }
53 |
54 | function deploy_mgr_ova() {
55 | path_to_ova=$1
56 |
57 | default_additional_options="GOVC_URL=$VCENTER_HOST \
58 | GOVC_DATACENTER=$VCENTER_DATACENTER \
59 | GOVC_INSECURE=true \
60 | GOVC_DATASTORE=$VCENTER_DATASTORE \
61 | GOVC_CLUSTER=$VCENTER_CLUSTER \
62 | GOVC_USERNAME=$VCENTER_USR \
63 | GOVC_PASSWORD=$VCENTER_PWD "
64 |
65 | ova_options=$(handle_nsx_mgr_ova_options $path_to_ova)
66 | deploy_ova $path_to_ova $ova_options "$VCENTER_RP" "$default_additional_options"
67 |
68 | }
69 |
70 | function deploy_edge_ova() {
71 | path_to_ova=$1
72 |
73 | default_additional_options="GOVC_URL=$VCENTER_HOST \
74 | GOVC_DATACENTER=$VCENTER_DATACENTER \
75 | GOVC_INSECURE=true \
76 | GOVC_DATASTORE=$VCENTER_DATASTORE \
77 | GOVC_CLUSTER=$VCENTER_CLUSTER \
78 | GOVC_USERNAME=$VCENTER_USR \
79 | GOVC_PASSWORD=$VCENTER_PWD "
80 |
81 | if [ "$EDGE_VCENTER_HOST" == "" -o "$EDGE_VCENTER_HOST" == "null" ]; then
82 | count=1
83 | for nsx_edge_ip in $(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g')
84 | do
85 | ova_options=$(handle_nsx_edge_ova_options $path_to_ova $nsx_edge_ip $count)
86 | deploy_ova $path_to_ova $ova_options "$VCENTER_RP" "$default_additional_options"
87 | (( count++ ))
88 | done
89 | return
90 | fi
91 |
92 | count=1
93 | edge_additional_options=" GOVC_URL=$EDGE_VCENTER_HOST \
94 | GOVC_DATACENTER=$EDGE_VCENTER_DATACENTER \
95 | GOVC_INSECURE=true \
96 | GOVC_DATASTORE=$EDGE_VCENTER_DATASTORE \
97 | GOVC_CLUSTER=$EDGE_VCENTER_CLUSTER \
98 | GOVC_USERNAME=$EDGE_VCENTER_USR \
99 | GOVC_PASSWORD=$EDGE_VCENTER_PWD "
100 |
101 | for nsx_edge_ip in $(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g')
102 | do
103 | ova_options=$(handle_custom_nsx_edge_ova_options $path_to_ova $nsx_edge_ip $count)
104 | deploy_ova $path_to_ova $ova_options "$EDGE_VCENTER_RP" "$edge_additional_options"
105 | (( count++ ))
106 | done
107 |
108 | }
109 |
110 | function deploy_ctrl_ova() {
111 | path_to_ova=$1
112 |
113 | default_additional_options="GOVC_URL=$VCENTER_HOST \
114 | GOVC_DATACENTER=$VCENTER_DATACENTER \
115 | GOVC_INSECURE=true \
116 | GOVC_DATASTORE=$VCENTER_DATASTORE \
117 | GOVC_CLUSTER=$VCENTER_CLUSTER \
118 | GOVC_USERNAME=$VCENTER_USR \
119 | GOVC_PASSWORD=$VCENTER_PWD "
120 |
121 | if [ "$NSX_T_CONTROLLERS_CONFIG" == "" -o "$NSX_T_CONTROLLERS_CONFIG" == "null" ]; then
122 | count=1
123 | for nsx_ctrl_ip in $(echo $NSX_T_CONTROLLER_IPS | sed -e 's/,/ /g')
124 | do
125 | ova_options=$(handle_nsx_ctrl_ova_options $path_to_ova $nsx_ctrl_ip $count)
126 | deploy_ova $path_to_ova $ova_options "$VCENTER_RP" "$default_additional_options"
127 | (( count++ ))
128 | done
129 | return
130 | fi
131 |
132 | echo "$NSX_T_CONTROLLERS_CONFIG" > /tmp/controllers_config.yml
133 | is_valid_yml=$(cat /tmp/controllers_config.yml | shyaml get-values controllers || true)
134 |
135 | # Check if the esxi_hosts config is not empty and is valid
136 | if [ "$NSX_T_CONTROLLERS_CONFIG" != "" -a "$is_valid_yml" != "" ]; then
137 |
138 | NSX_T_CONTROLLER_VM_NAME_PREFIX=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.vm_name_prefix)
139 | NSX_T_CONTROLLER_HOST_PREFIX=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.host_prefix)
140 | NSX_T_CONTROLLER_ROOT_PWD=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.root_pwd)
141 | NSX_T_CONTROLLER_CLUSTER_PWD=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.cluster_pwd)
142 |
143 | count=1
144 | for index in $(seq 0 $length)
145 | do
146 | NSX_T_CONTROLLER_INSTANCE_IP=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.ip)
147 | NSX_T_CONTROLLER_INSTANCE_CLUSTER=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.cluster)
148 | NSX_T_CONTROLLER_INSTANCE_RP=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.resource_pool)
149 | NSX_T_CONTROLLER_INSTANCE_DATASTORE=$(cat /tmp/controllers_config.yml | shyaml get-value controllers.members.${index}.datastore)
150 |
151 | ctrl_additional_options=" GOVC_URL=$VCENTER_HOST \
152 | GOVC_DATACENTER=$VCENTER_DATACENTER \
153 | GOVC_INSECURE=true \
154 | GOVC_DATASTORE=$NSX_T_CONTROLLER_INSTANCE_DATASTORE \
155 | GOVC_CLUSTER=$NSX_T_CONTROLLER_INSTANCE_CLUSTER \
156 | GOVC_USERNAME=$VCENTER_USR \
157 | GOVC_PASSWORD=$VCENTER_PWD "
158 |
159 | ova_options=$(handle_nsx_ctrl_ova_options $path_to_ova $NSX_T_CONTROLLER_INSTANCE_IP $count)
160 | deploy_ova $path_to_ova $ova_options "$NSX_T_CONTROLLER_INSTANCE_RP" "$ctrl_additional_options"
161 | (( count++ ))
162 | done
163 | fi
164 | }
165 |
166 |
167 | function handle_nsx_mgr_ova_options {
168 |
169 | nsx_mgr_ova_file_path=$1
170 | govc import.spec "$nsx_mgr_ova_file_path" | python -m json.tool > /tmp/nsx-mgr-import.json
171 |
172 | export NSX_T_MANAGER_SHORT_HOSTNAME=$(echo $NSX_T_MANAGER_FQDN | awk -F '\.' '{print $1}')
173 |
174 | cat > /tmp/nsx_mgr_filters <<'EOF'
175 | .Name = $vmName |
176 | .NetworkMapping[].Network = $mgmt_network |
177 | .IPAllocationPolicy = "fixedPolicy" |
178 | .PowerOn = true |
179 | .WaitForIP = true |
180 | .Deployment = $deployment_size |
181 | (.PropertyMapping[] | select(.Key == "nsx_hostname")).Value = $hostname |
182 | (.PropertyMapping[] | select(.Key == "nsx_dns1_0")).Value = $dnsServer |
183 | (.PropertyMapping[] | select(.Key == "nsx_domain_0")).Value = $dnsdomain |
184 | (.PropertyMapping[] | select(.Key == "nsx_ntp_0")).Value = $ntpServer |
185 | (.PropertyMapping[] | select(.Key == "nsx_gateway_0")).Value = $gateway |
186 | (.PropertyMapping[] | select(.Key == "nsx_ip_0")).Value = $ip |
187 | (.PropertyMapping[] | select(.Key == "nsx_netmask_0")).Value = $netmask |
188 | (.PropertyMapping[] | select(.Key == "nsx_cli_username")).Value = $adminName |
189 | (.PropertyMapping[] | select(.Key == "nsx_passwd_0")).Value = $adminPassword |
190 | (.PropertyMapping[] | select(.Key == "nsx_cli_passwd_0")).Value = $cliPassword |
191 | (.PropertyMapping[] | select(.Key == "nsx_isSSHEnabled")).Value = "True" |
192 | (.PropertyMapping[] | select(.Key == "nsx_allowSSHRootLogin")).Value = "True"
193 | EOF
194 |
195 | jq \
196 | --arg vmName "$NSX_T_MANAGER_VM_NAME" \
197 | --arg mgmt_network "$MGMT_PORTGROUP" \
198 | --arg deployment_size "$NSX_T_MGR_DEPLOY_SIZE" \
199 | --arg hostname "$NSX_T_MANAGER_SHORT_HOSTNAME" \
200 | --arg dnsServer "$DNSSERVER" \
201 | --arg dnsdomain "$DNSDOMAIN" \
202 | --arg ntpServer "$NTPSERVERS" \
203 | --arg gateway "$DEFAULTGATEWAY" \
204 | --arg ip "$NSX_T_MANAGER_IP" \
205 | --arg netmask "$NETMASK" \
206 | --arg adminName "admin" \
207 | --arg adminPassword "$NSX_T_MANAGER_ADMIN_PWD" \
208 | --arg cliPassword "$NSX_T_MANAGER_ROOT_PWD" \
209 | --from-file /tmp/nsx_mgr_filters \
210 | /tmp/nsx-mgr-import.json > /tmp/nsx-mgr-ova-options.json
211 |
212 | #cat /tmp/nsx-mgr-ova-options.json
213 | echo "/tmp/nsx-mgr-ova-options.json"
214 | }
215 |
216 |
217 | function handle_nsx_ctrl_ova_options {
218 |
219 | nsx_ctrl_ova_file_path=$1
220 | instance_ip=$2
221 | instance_index=$3
222 |
223 | govc import.spec "$nsx_ctrl_ova_file_path" | python -m json.tool > /tmp/nsx-ctrl-import.json
224 |
225 | cat > /tmp/nsx_ctrl_filters <<'EOF'
226 | .Name = $vmName |
227 | .IPAllocationPolicy = "fixedPolicy" |
228 | .NetworkMapping[].Network = $mgmt_network |
229 | .PowerOn = true |
230 | .WaitForIP = true |
231 | (.PropertyMapping[] | select(.Key == "nsx_hostname")).Value = $hostname |
232 | (.PropertyMapping[] | select(.Key == "nsx_dns1_0")).Value = $dnsServer |
233 | (.PropertyMapping[] | select(.Key == "nsx_domain_0")).Value = $dnsdomain |
234 | (.PropertyMapping[] | select(.Key == "nsx_ntp_0")).Value = $ntpServer |
235 | (.PropertyMapping[] | select(.Key == "nsx_gateway_0")).Value = $gateway |
236 | (.PropertyMapping[] | select(.Key == "nsx_ip_0")).Value = $ip |
237 | (.PropertyMapping[] | select(.Key == "nsx_netmask_0")).Value = $netmask |
238 | (.PropertyMapping[] | select(.Key == "nsx_cli_username")).Value = $adminName |
239 | (.PropertyMapping[] | select(.Key == "nsx_passwd_0")).Value = $adminPassword |
240 | (.PropertyMapping[] | select(.Key == "nsx_cli_passwd_0")).Value = $cliPassword |
241 | (.PropertyMapping[] | select(.Key == "nsx_isSSHEnabled")).Value = "True" |
242 | (.PropertyMapping[] | select(.Key == "nsx_allowSSHRootLogin")).Value = "True"
243 | EOF
244 |
245 | jq \
246 | --arg vmName "${NSX_T_CONTROLLER_VM_NAME_PREFIX}-0${instance_index}" \
247 | --arg mgmt_network "$MGMT_PORTGROUP" \
248 | --arg hostname "${NSX_T_CONTROLLER_HOST_PREFIX}-0${instance_index}" \
249 | --arg dnsServer "$DNSSERVER" \
250 | --arg dnsdomain "$DNSDOMAIN" \
251 | --arg ntpServer "$NTPSERVERS" \
252 | --arg gateway "$DEFAULTGATEWAY" \
253 | --arg ip "$instance_ip" \
254 | --arg netmask "$NETMASK" \
255 | --arg adminName "admin" \
256 | --arg adminPassword "$NSX_T_CONTROLLER_ROOT_PWD" \
257 | --arg cliPassword "$NSX_T_CONTROLLER_ROOT_PWD" \
258 | --from-file /tmp/nsx_ctrl_filters \
259 | /tmp/nsx-ctrl-import.json > /tmp/nsx-ctrl-0${count}-ova-options.json
260 |
261 | #cat /tmp/nsx-mgr-ova-options.json
262 | echo "/tmp/nsx-ctrl-0${count}-ova-options.json"
263 | }
264 |
265 |
266 | function handle_nsx_edge_ova_options {
267 |
268 | nsx_edge_ova_file_path=$1
269 | instance_ip=$2
270 | instance_index=$3
271 |
272 | govc import.spec "$nsx_edge_ova_file_path" | python -m json.tool > /tmp/nsx-edge-import.json
273 |
274 | cat > /tmp/nsx_edge_filters <<'EOF'
275 | .Name = $vmName |
276 | .IPAllocationPolicy = "fixedPolicy" |
277 | .NetworkMapping[0].Network = $mgmt_network |
278 | .NetworkMapping[1].Network = $portgroup_ext |
279 | .NetworkMapping[2].Network = $portgroup_transport |
280 | .NetworkMapping[3].Network = $mgmt_network |
281 | .PowerOn = true |
282 | .WaitForIP = true |
283 | .Deployment = $deployment_size |
284 | (.PropertyMapping[] | select(.Key == "nsx_hostname")).Value = $hostname |
285 | (.PropertyMapping[] | select(.Key == "nsx_dns1_0")).Value = $dnsServer |
286 | (.PropertyMapping[] | select(.Key == "nsx_domain_0")).Value = $dnsdomain |
287 | (.PropertyMapping[] | select(.Key == "nsx_ntp_0")).Value = $ntpServer |
288 | (.PropertyMapping[] | select(.Key == "nsx_gateway_0")).Value = $gateway |
289 | (.PropertyMapping[] | select(.Key == "nsx_ip_0")).Value = $ip |
290 | (.PropertyMapping[] | select(.Key == "nsx_netmask_0")).Value = $netmask |
291 | (.PropertyMapping[] | select(.Key == "nsx_cli_username")).Value = $adminName |
292 | (.PropertyMapping[] | select(.Key == "nsx_passwd_0")).Value = $adminPassword |
293 | (.PropertyMapping[] | select(.Key == "nsx_cli_passwd_0")).Value = $cliPassword |
294 | (.PropertyMapping[] | select(.Key == "nsx_isSSHEnabled")).Value = "True" |
295 | (.PropertyMapping[] | select(.Key == "nsx_allowSSHRootLogin")).Value = "True"
296 | EOF
297 |
298 | jq \
299 | --arg vmName "${NSX_T_EDGE_VM_NAME_PREFIX}-0${instance_index}" \
300 | --arg mgmt_network "$MGMT_PORTGROUP" \
301 | --arg portgroup_ext "$NSX_T_EDGE_PORTGROUP_EXT" \
302 | --arg portgroup_transport "$NSX_T_EDGE_PORTGROUP_TRANSPORT" \
303 | --arg deployment_size "$NSX_T_EDGE_DEPLOY_SIZE" \
304 | --arg hostname "${NSX_T_EDGE_HOST_PREFIX}-0${instance_index}" \
305 | --arg dnsServer "$DNSSERVER" \
306 | --arg dnsdomain "$DNSDOMAIN" \
307 | --arg ntpServer "$NTPSERVERS" \
308 | --arg gateway "$DEFAULTGATEWAY" \
309 | --arg ip "$instance_ip" \
310 | --arg netmask "$NETMASK" \
311 | --arg adminName "admin" \
312 | --arg adminPassword "$NSX_T_EDGE_ROOT_PWD" \
313 | --arg cliPassword "$NSX_T_EDGE_ROOT_PWD" \
314 | --from-file /tmp/nsx_edge_filters \
315 | /tmp/nsx-edge-import.json > /tmp/nsx-edge-0${count}-ova-options.json
316 |
317 | #cat /tmp/nsx-edge-ova-options.json
318 | echo "/tmp/nsx-edge-0${count}-ova-options.json"
319 | }
320 |
321 | function handle_custom_nsx_edge_ova_options {
322 |
323 | nsx_edge_ova_file_path=$1
324 | instance_ip=$2
325 | instance_index=$3
326 |
327 | govc import.spec "$nsx_edge_ova_file_path" | python -m json.tool > /tmp/nsx-edge-import.json
328 |
329 | cat > /tmp/nsx_edge_filters <<'EOF'
330 | .Name = $vmName |
331 | .IPAllocationPolicy = "fixedPolicy" |
332 | .NetworkMapping[0].Network = $mgmt_network |
333 | .NetworkMapping[1].Network = $portgroup_ext |
334 | .NetworkMapping[2].Network = $portgroup_transport |
335 | .PowerOn = true |
336 | .WaitForIP = true |
337 | .Deployment = $deployment_size |
338 | (.PropertyMapping[] | select(.Key == "nsx_hostname")).Value = $hostname |
339 | (.PropertyMapping[] | select(.Key == "nsx_dns1_0")).Value = $dnsServer |
340 | (.PropertyMapping[] | select(.Key == "nsx_domain_0")).Value = $dnsdomain |
341 | (.PropertyMapping[] | select(.Key == "nsx_ntp_0")).Value = $ntpServer |
342 | (.PropertyMapping[] | select(.Key == "nsx_gateway_0")).Value = $gateway |
343 | (.PropertyMapping[] | select(.Key == "nsx_ip_0")).Value = $ip |
344 | (.PropertyMapping[] | select(.Key == "nsx_netmask_0")).Value = $netmask |
345 | (.PropertyMapping[] | select(.Key == "nsx_cli_username")).Value = $adminName |
346 | (.PropertyMapping[] | select(.Key == "nsx_passwd_0")).Value = $adminPassword |
347 | (.PropertyMapping[] | select(.Key == "nsx_cli_passwd_0")).Value = $cliPassword |
348 | (.PropertyMapping[] | select(.Key == "nsx_isSSHEnabled")).Value = "True" |
349 | (.PropertyMapping[] | select(.Key == "nsx_allowSSHRootLogin")).Value = "True"
350 | EOF
351 |
352 | jq \
353 | --arg vmName "${NSX_T_EDGE_VM_NAME_PREFIX}-0${instance_index}" \
354 | --arg mgmt_network "$MGMT_PORTGROUP" \
355 | --arg portgroup_ext "$NSX_T_EDGE_PORTGROUP_EXT" \
356 | --arg portgroup_transport "$NSX_T_EDGE_PORTGROUP_TRANSPORT" \
357 | --arg deployment_size "$NSX_T_EDGE_DEPLOY_SIZE" \
358 | --arg hostname "${NSX_T_EDGE_HOST_PREFIX}-0${instance_index}" \
359 | --arg dnsServer "$EDGE_DNSSERVER" \
360 | --arg dnsdomain "$EDGE_DNSDOMAIN" \
361 | --arg ntpServer "$EDGE_NTPSERVERS" \
362 | --arg gateway "$EDGE_DEFAULTGATEWAY" \
363 | --arg ip "$instance_ip" \
364 | --arg netmask "$EDGE_NETMASK" \
365 | --arg adminName "admin" \
366 | --arg cliPassword "$NSX_T_EDGE_ROOT_PWD" \
367 | --arg adminPassword "$NSX_T_EDGE_ROOT_PWD" \
368 | --from-file /tmp/nsx_edge_filters \
369 | /tmp/nsx-edge-import.json > /tmp/nsx-edge-0${count}-ova-options.json
370 |
371 | #cat /tmp/nsx-edge-ova-options.json
372 | echo "/tmp/nsx-edge-0${count}-ova-options.json"
373 | }
374 |
--------------------------------------------------------------------------------
/functions/uninstall-nsx-t-v2.1-vibs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | esxcli software vib remove --no-live-install -n nsxa -n nsx-hyperbus -n nsx-nestdb \
4 | -n nsxcli -n nsx-exporter -n nsx-netcpa -n nsx-da -n nsx-nestdb-libs -n nsx-rpc-libs \
5 | -n nsx-metrics-libs -n nsx-lldp -n nsx-ctxteng -n nsx-aggservice -n nsx-common-libs \
6 | -n nsx-esx-datapath -n nsx-host -n nsx-support-bundle-client -n nsx-platform-client \
7 | -n nsx-sfhc -n nsx-mpa -n nsx-python-gevent -n nsx-python-greenlet -n nsx-python-protobuf \
8 | -n nsx-shared-libs -n epsec-mux
9 |
--------------------------------------------------------------------------------
/functions/uninstall-nsx-t-v2.2-vibs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | esxcli software vib remove --no-live-install -n nsxa -n nsx-hyperbus -n nsx-nestdb \
4 | -n nsxcli -n nsx-exporter -n nsx-netcpa -n nsx-da -n nsx-nestdb-libs -n nsx-rpc-libs \
5 | -n nsx-metrics-libs -n nsx-lldp -n nsx-ctxteng -n nsx-aggservice -n nsx-common-libs \
6 | -n nsx-esx-datapath -n nsx-host -n nsx-support-bundle-client -n nsx-platform-client \
7 | -n nsx-sfhc -n nsx-mpa -n nsx-python-gevent -n nsx-python-greenlet -n nsx-python-protobuf \
8 | -n nsx-shared-libs -n epsec-mux -n nsx-proxy -n nsx-profiling-libs -n nsx-opsagent
9 |
--------------------------------------------------------------------------------
/functions/uninstall-nsx-vibs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Execute uninitall nsx vib from Esxi host
3 | hosts:
4 | - all
5 | user: root
6 | tasks:
7 | - name: Execute uninstall NSX-T v2.1 vibs script
8 | script: ./uninstall-nsx-t-vibs.sh
9 |
--------------------------------------------------------------------------------
/pipelines/nsx-t-for-canned-pks-params.yml:
--------------------------------------------------------------------------------
1 | # Minio offline bucket params
2 | iaas: vsphere
3 | final_s3_endpoint: ((final_s3_endpoint)) # EDIT the minio address/port
4 | final_s3_bucket: ((final_s3_bucket))
5 | final_s3_access_key_id: ((final_s3_access_key_id))
6 | final_s3_secret_access_key: ((final_s3_secret_access_key))
7 | offline_run_id: ((offline_run_id))
8 | nsx_t_installer: ((nsx_t_installer))
9 |
10 | enable_ansible_debug: false # set value to true for verbose output from ansible
11 | nsx_t_installer: ((nsx_t_installer)) # Set to name of installer or env or any value so resources can be identified
12 | nsx_t_version: 2.1
13 |
14 | # vCenter details to deploy the Mgmt OVAs (Mgr, Edge, Controller)
15 | vcenter_host: ((vcenter_host)) # EDIT - this is for deployment of the ovas for the mgmt plane
16 | vcenter_usr: ((vcenter_usr)) # EDIT - this is for deployment of the ovas for the mgmt plane
17 | vcenter_pwd: ((vcenter_pwd)) # EDIT - this is for deployment of the ovas for the mgmt plane
18 | vcenter_datacenter: ((vcenter_datacenter)) # EDIT
19 | vcenter_datastore: ((vcenter_datastore)) # EDIT
20 | vcenter_cluster: ((nsx_t_mgmt_vcenter_cluster)) # EDIT
21 | vcenter_manager: ((vcenter_host)) # EDIT
22 | vcenter_rp: ((nsx_t_vcenter_rp)) # EDIT - can be blank - resource pool where mgmt plane vms would get deployed
23 |
24 | # OVA general network settings
25 | ntpservers: ((ntpservers)) # EDIT
26 | mgmt_portgroup: ((mgmt_portgroup)) # EDIT
27 | dnsserver: ((dnsserver)) # EDIT
28 | dnsdomain: ((dnsdomain)) # EDIT
29 | defaultgateway: ((defaultgateway)) # EDIT
30 | netmask: ((netmask)) # EDIT
31 |
32 | # The Esxi Hosts can be added to transport nodes in two ways:
33 | # a) specify the esxi hosts individually - first checked for
34 | # b) or use compute_vcenter_manager to add hosts under a specific vcenter as transport nodes
35 |
36 | # Specify passwd of the esxi hosts that should be used for nsx-t
37 | esxi_hosts_root_pwd: ((esxi_hosts_root_pwd)) # EDIT - Root password for the esxi hosts
38 |
39 | esxi_hosts_config:
40 |
41 | # Listing the individual hosts that needs to be used
42 | # esxi_hosts_config: |
43 | # esxi_hosts:
44 | # - name: esxi-host1.corp.local.io
45 | # ip: 10.13.12.10
46 | # root_pwd: rootPasswd
47 | # - name: esxi-host2.corp.local.io
48 | # ip: 10.13.12.11
49 | # root_pwd: rootPasswd
50 |
51 | # Handle multiple vcenter and compute clusters for Esxi Hosts
52 | # This would override esxi_hosts_config
53 | compute_manager_configs: |
54 | compute_managers:
55 | - vcenter_name: ((vcenter_host))
56 | vcenter_host: ((vcenter_host))
57 | vcenter_usr: ((vcenter_usr))
58 | vcenter_pwd: ((vcenter_pwd))
59 | clusters:
60 | # Multiple clusters under same vcenter can be specified
61 | - vcenter_cluster: ((compute_vcenter_cluster))
62 | overlay_profile_mtu: 1600 # Min 1600
63 | overlay_profile_vlan: ((nsx_t_overlay_profile_vlan)) # VLAN ID for the TEP/Overlay network
64 | uplink_vmnics: ((nsx_t_esxi_vmnics))
65 |
66 | nsx_t_controllers_config:
67 |
68 | # Using a separate vCenter to add Edges
69 | # Edge Specific vCenter settings
70 | # If Edges are going to use the same vCenter as Mgr, then dont set any of the following properties
71 | # edge_vcenter_host: ((vcenter_host)) # EDIT - If filled, then Edges would use this separate vcenter
72 | # edge_vcenter_usr: ((vcenter_usr)) # EDIT - Use Edge specific vCenter
73 | # edge_vcenter_pwd: ((vcenter_pwd)) # EDIT - Use Edge specific vCenter
74 | # edge_vcenter_datacenter: ((vcenter_datacenter)) # EDIT - Use Edge specific vCenter
75 | # edge_vcenter_datastore: ((vcenter_datastore)) # EDIT - Use Edge specific vCenter
76 | # edge_vcenter_cluster: ((vcenter_cluster)) # EDIT - Use Edge specific vCenter
77 | # edge_vcenter_rp: ((nsx_t_vcenter_rp)) # EDIT - Use Edge specific vCenter
78 | # edge_ntpservers: ((ntpservers)) # EDIT - Use Edge specific vCenter
79 | # edge_mgmt_portgroup: ((mgmt_portgroup)) # EDIT - Use Edge specific vCenter
80 | # edge_dnsserver: ((dnsserver)) # EDIT - Use Edge specific vCenter
81 | # edge_dnsdomain: ((dnsdomain)) # EDIT - Use Edge specific vCenter
82 | # edge_defaultgateway: ((defaultgateway)) # EDIT - Use Edge specific vCenter
83 | # edge_netmask: ((netmask)) # EDIT - Use Edge specific vCenter
84 | edge_vcenter_host:
85 | edge_vcenter_usr:
86 | edge_vcenter_pwd:
87 | edge_vcenter_datacenter:
88 | edge_vcenter_datastore:
89 | edge_vcenter_cluster:
90 | edge_vcenter_rp:
91 | edge_ntpservers:
92 | edge_mgmt_portgroup:
93 | edge_dnsserver:
94 | edge_dnsdomain:
95 | edge_defaultgateway:
96 | edge_netmask:
97 |
98 | # Edit fololowing parameters
99 | nsx_t_manager_host_name: nsx-t-mgr.((dnsdomain)) # Set as FQDN, will be used also as certificate common name
100 | nsx_t_manager_vm_name: 'NSX-T Mgr' # Can have spaces
101 | nsx_t_manager_ip: ((nsx_t_manager_ip))
102 | nsx_t_manager_admin_user: admin
103 | nsx_t_manager_admin_pwd: ((nsx_t_manager_admin_pwd)) # Min 8 chars, upper, lower, number, special digit
104 | nsx_t_manager_root_pwd: ((nsx_t_manager_admin_pwd)) # Min 8 chars, upper, lower, number, special digit
105 |
106 | # Following properties can be used for deploying controller to same cluster/rp
107 | nsx_t_controller_host_prefix: nsx-t-ctl # Without spaces, Generated controller would be nsx-t-ctrl-1.corp.local.io,...
108 | nsx_t_controller_vm_name_prefix: 'NSX-T Controller' # Generated edge host name would be "NSX-T Controller-1"
109 | nsx_t_controller_ips: ((nsx_t_controller_ips)) # Should be 1 or 3 ips to maintain quorum for Controller Cluster
110 | nsx_t_controller_root_pwd: ((nsx_t_controller_root_pwd)) # Min 8 chars, upper, lower, number, special digit
111 | nsx_t_controller_cluster_pwd: ((nsx_t_controller_root_pwd)) # Min 8 chars, upper, lower, number, special digit
112 |
113 | nsx_t_edge_host_prefix: nsx-t-edge # Without spaces, generated edge would be nsx-t-edge-1.corp.local.io,...
114 | nsx_t_edge_vm_name_prefix: 'NSX-T Edge' # Generated edge host name would be "NSX-T Edge-1"
115 | nsx_t_edge_ips: ((nsx_t_edge_ips)) # comma separated ips, requires min 2 for HA
116 | nsx_t_edge_root_pwd: ((nsx_t_edge_root_pwd))
117 | nsx_t_edge_portgroup_ext: ((nsx_t_edge_portgroup_ext)) # For external routing
118 | nsx_t_edge_portgroup_transport: ((nsx_t_edge_portgroup_transport)) # For TEP/overlay
119 |
120 | # If ova deployment succeeded but controller membership failed or edges didnt get to join for any reason
121 | # enable rerun of the configure controllers
122 | rerun_configure_controllers: true # set it to true if you want to rerun the configure controllers
123 | # (as part of base ova install job) even as ova deployment succeeded
124 |
125 | # Edge network interfaces
126 | # Network1 and Network4 are for mgmt and not used for uplink
127 | # Network2 is for external uplink
128 | # Network3 is for overlay
129 | # Change only if necessary
130 | nsx_t_edge_overlay_interface: fp-eth1 # Wired to Network3
131 | nsx_t_edge_uplink_interface: fp-eth0 # Wired to Network2
132 |
133 | # Tunnel endpoint network ip pool - change pool_end based on # of members in the tep pool
134 | nsx_t_tep_pool_name: tep-ip-pool
135 | nsx_t_tep_pool_cidr: 192.168.213.0/24
136 | nsx_t_tep_pool_gateway: 192.168.213.1
137 | nsx_t_tep_pool_start: 192.168.213.10
138 | nsx_t_tep_pool_end: 192.168.213.200
139 | #nsx_t_tep_pool_nameserver: 192.168.213.2 # Not required
140 |
141 | # Memory reservation is turned ON by default with the NSX-T OVAs.
142 | # This would mean a deployment of an edge or a mgr would reserve full memory
143 | # leading to memory constraints
144 | # if nsx_t_keep_reservation to true - would keep reservation ON, recommended for production setups.
145 | # if nsx_t_keep_reservation to false - would turn reservation OFF, recommended for POCs, smaller setups.
146 | nsx_t_keep_reservation: true # true for Prod setup
147 |
148 | nsx_t_mgr_deploy_size: small # Recommended for real barebones demo, smallest setup
149 | #nsx_t_edge_deploy_size: medium # Recommended for POCs, smaller setup (# of lbrs very limited)
150 | nsx_t_edge_deploy_size: large # Recommended when 4 small lbrs are required
151 |
152 | nsx_t_overlay_hostswitch: hostswitch2
153 | nsx_t_vlan_hostswitch: hostswitch1
154 |
155 | # For Edge External uplink
156 | # Check with network admin if its tagged or untagged
157 | nsx_t_transport_vlan: 0
158 |
159 | nsx_t_vlan_transport_zone: vlan-tz
160 | nsx_t_overlay_transport_zone: overlay-tz
161 |
162 | nsx_t_pas_ncp_cluster_tag: pks1
163 |
164 | nsx_t_edge_cluster: 'Edge Cluster'
165 |
166 | # For outbound uplink connection used by Edge
167 | nsx_t_single_uplink_profile_name: "single-uplink-profile"
168 | nsx_t_single_uplink_profile_mtu: 1600 # Min 1600
169 | nsx_t_single_uplink_profile_vlan: 0 # Default
170 |
171 | # For internal overlay connection used by Esxi hosts
172 | nsx_t_overlay_profile_name: "host-overlay-profile"
173 | nsx_t_overlay_profile_mtu: 1600 # Min 1600
174 | nsx_t_overlay_profile_vlan: ((nsx_t_overlay_profile_vlan)) # VLAN ID for the TEP/Overlay network
175 |
176 | # Specify an unused vmnic on esxi host to be used for nsx-t
177 | # can be multiple vmnics separated by comma
178 | nsx_t_esxi_vmnics: ((nsx_t_esxi_vmnics)) # vmnic1,vmnic2...
179 |
180 | # Configs for T0Router (only one per run), T1Routers, Logical switches and tags...
181 | # Make sure the ncp/cluster tag matches the one defined at the top level.
182 | # Expects to use atleast 2 edge instances for HA that need to be installed
183 | nsx_t_t0router_spec: |
184 | t0_router:
185 | name: DefaultT0Router
186 | ha_mode: 'ACTIVE_STANDBY'
187 | # Specify the edges to be used for hosting the T0Router instance
188 | edge_indexes:
189 | # Index starts from 1 -> denoting nsx-t-edge-01
190 | primary: 1 # Index for primary edge to be used
191 | secondary: 2 # Index for secondary edge to be used
192 | vip: ((nsx_t_t0router_vip))
193 | ip1: ((nsx_t_t0router_ip_edge1))
194 | ip2: ((nsx_t_t0router_ip_edge2))
195 | vlan_uplink: 0
196 | static_route:
197 | next_hop: ((nsx_t_t0router_gateway))
198 | network: 0.0.0.0/0
199 | admin_distance: 1
200 | tags:
201 | ncp/cluster: pks1 # Should match the top level ncp/cluster tag value
202 | ncp/shared_resource: 'true' # required for PKS
203 |
204 | # T1 Logical Router with associated logical switches
205 | # Add additional or comment off unnecessary t1 routers and switches as needed
206 | # Can have 3 different setups:
207 | # 1: One shared mgmt T1 Router and infra logical switch for both PKS & PAS
208 | # 2: One mgmt T1 Router and infra logical switch for either PKS or PAS..
209 | # Comment off the T1 router not required
210 | # 3: Separate mgmt T1 Router and infra logical switch for each PKS and PAS..
211 | # Add additional T1Router-Mgmt2 as needed with its infra logical switch
212 | # Name the routers and logical switches and cidrs differently to avoid conflict
213 | nsx_t_t1router_logical_switches_spec: |
214 | t1_routers:
215 | # Sample for PKS - Ops Mgr, Bosh Director
216 | - name: T1-Router-PKS-Infra
217 | switches:
218 | - name: PKS-Infra
219 | logical_switch_gw: 172.23.1.1 # Last octet should be 1 rather than 0
220 | subnet_mask: 24
221 |
222 | # Hosts the PKS Controller & Harbor
223 | - name: T1Router-PKS-Services
224 | switches:
225 | - name: PKS-Services
226 | logical_switch_gw: 172.23.2.1 # Last octet should be 1 rather than 0
227 | subnet_mask: 24
228 |
229 |
230 | # Make sure the ncp/cluster tag matches the one defined on the T0 Router
231 | # Additional the ncp/ha tag should be set for HA Spoof guard profile
232 | nsx_t_ha_switching_profile_spec: |
233 | ha_switching_profiles:
234 | - name: HASwitchingProfile
235 | tags:
236 | ncp/cluster: 'pks1' # Should match the top level ncp/cluster tag value
237 | ncp/ha: 'true' # Required for HA
238 |
239 |
240 | # Make sure the ncp/cluster tag matches the one defined on the T0 Router
241 | # Add additional container ip blocks as needed
242 | nsx_t_container_ip_block_spec: |
243 | container_ip_blocks:
244 | # For PKS clusters
245 | - name: node-container-ip-block-pks
246 | cidr: 172.24.0.0/14
247 | ncp/shared_resource: 'true'
248 | # No tags for this block
249 | - name: pod-container-ip-block-pks
250 | cidr: 172.28.0.0/14
251 | ncp/shared_resource: 'true'
252 |
253 |
254 |
255 | # Make sure the ncp/cluster tag matches the one defined on the T0 Router for PAS
256 | # Make sure the ncp/shared tag is set to true for PKS
257 | # Additional the ncp/external tag should be set for external facing ip pool
258 | # Add additional exernal ip pools as needed
259 | # Change the cidr, gateway, nameserver, dns_domain as needed
260 | # The cidr, gateway, start/end ips should be reachable via static or bgp routing through T0 router
261 | nsx_t_external_ip_pool_spec: |
262 | external_ip_pools:
263 | - name: snat-vip-pool-for-pks
264 | cidr: ((nsx_t_external_ip_pool_cidr)) # Should be a 0/24 or some valid cidr, matching the external exposed uplink
265 | gateway: ((nsx_t_external_ip_pool_gateway))
266 | start: ((nsx_t_external_ip_pool_start)) # Should not include gateway
267 | end: ((nsx_t_external_ip_pool_end)) # Should not include gateway
268 | nameserver: ((dnsserver))
269 | dns_domain: ((dnsserver))
270 | tags:
271 | ncp/external: 'true' # Required for external facing ips
272 | ncp/shared_resource: 'true' # Required for PKS
273 |
274 | # Specify NAT rules
275 | # Provide matching dnat and snat rule for specific vms by using ips for destination_network and translated_network that need to be exposed like Ops Mgr
276 | # Provide snat rules for outbound from either container or a vm by specifying the source_network (cidr) and translated network ip
277 | # Details of NAT:
278 | # Ingress into Ops Mgr: External IP of Ops Mgr -> DNAT -> translated into internal ip of Ops Mgr
279 | # Egress from Ops Mgr: internal ip of Ops Mgr -> SNAT -> translated into external IP of Ops Mgr
280 | # Ingress into PKS API Controller: External IP of controller -> DNAT -> translated into internal ip of controller
281 | # Egress from PKS API Controller: internal ip of controller -> SNAT -> translated into external IP of controller
282 | # Egress from PKS-Infra: cidr of pks infra -> SNAT -> translated into some external IP
283 | # Egress from PKS-Clusters: cidr of PKS-Clusters -> SNAT -> translated into some external IP
284 | nsx_t_nat_rules_spec: |
285 | nat_rules:
286 | # Sample entry for PKS PKS-Infra network - outbound/egress
287 | - t0_router: DefaultT0Router
288 | nat_type: snat
289 | source_network: 172.23.1.0/24 # PKS Infra network cidr
290 | translated_network: ((nsx_t_nat_rules_snat_translated_ip_for_all)) # SNAT External Address for PKS networks
291 | rule_priority: 8001 # Lower priority
292 |
293 | # Sample entry for PKS PKS-Clusters network - outbound/egress
294 | - t0_router: DefaultT0Router
295 | nat_type: snat
296 | source_network: 172.23.2.0/24 # PKS Clusters network cidr
297 | translated_network: ((nsx_t_nat_rules_snat_translated_ip_for_all)) # SNAT External Address for PKS networks
298 | rule_priority: 8001 # Lower priority
299 |
300 | # Sample entry for allowing inbound to PKS Ops manager - ingress
301 | - t0_router: DefaultT0Router
302 | nat_type: dnat
303 | destination_network: ((nsx_t_nat_rules_opsman_ip)) # External IP address for PKS opsmanager
304 | translated_network: 172.23.1.5 # Internal IP of PKS Ops manager
305 | rule_priority: 1024 # Higher priority
306 | # Sample entry for allowing outbound from PKS Ops Mgr to external - egress
307 | - t0_router: DefaultT0Router
308 | nat_type: snat
309 | source_network: 172.23.1.5 # Internal IP of PKS opsmanager
310 | translated_network: ((nsx_t_nat_rules_opsman_ip)) # External IP address for PKS opsmanager
311 | rule_priority: 1024 # Higher priority
312 |
313 | nsx_t_csr_request_spec: |
314 | csr_request:
315 | #common_name not required - would use nsx_t_manager_host_name
316 | org_name: ((csr_request_org_name)) # EDIT
317 | org_unit: ((csr_request_org_unit)) # EDIT
318 | country: ((csr_request_country)) # EDIT
319 | state: ((csr_request_state)) # EDIT
320 | city: ((csr_request_city)) # EDIT
321 | key_size: 2048 # Valid values: 2048 or 3072
322 | algorithm: RSA # Valid values: RSA or DSA
323 |
324 | nsx_t_lbr_spec:
325 | nsx_t_monitor_spec:
326 | nsx_t_nsgroup_spec:
327 |
--------------------------------------------------------------------------------
/pipelines/nsx-t-install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Use reference instead of repeating the nsx-t params
3 | nsx_t_gen_params: &nsx-t-gen-params
4 | NSX_T_INSTALLER: ((nsx_t_installer))
5 | NSX_T_VERSION: ((nsx_t_version))
6 | VCENTER_HOST: ((vcenter_host))
7 | VCENTER_USR: ((vcenter_usr))
8 | VCENTER_PWD: ((vcenter_pwd))
9 | VCENTER_DATACENTER: ((vcenter_datacenter))
10 | VCENTER_DATASTORE: ((vcenter_datastore))
11 | VCENTER_CLUSTER: ((vcenter_cluster))
12 | VCENTER_MANAGER: ((vcenter_manager))
13 | VCENTER_RP: ((vcenter_rp))
14 | NTPSERVERS: ((ntpservers))
15 | MGMT_PORTGROUP: ((mgmt_portgroup))
16 | DNSSERVER: ((dnsserver))
17 | DNSDOMAIN: ((dnsdomain))
18 | DEFAULTGATEWAY: ((defaultgateway))
19 | NETMASK: ((netmask))
20 | ESXI_HOSTS_ROOT_PWD: ((esxi_hosts_root_pwd))
21 | ESXI_HOSTS_CONFIG: ((esxi_hosts_config))
22 | COMPUTE_MANAGER_CONFIGS: ((compute_manager_configs))
23 | EDGE_VCENTER_HOST: ((edge_vcenter_host))
24 | EDGE_VCENTER_USR: ((edge_vcenter_usr))
25 | EDGE_VCENTER_PWD: ((edge_vcenter_pwd))
26 | EDGE_VCENTER_RP: ((edge_vcenter_rp))
27 | EDGE_VCENTER_DATACENTER: ((edge_vcenter_datacenter))
28 | EDGE_VCENTER_DATASTORE: ((edge_vcenter_datastore))
29 | EDGE_VCENTER_CLUSTER: ((edge_vcenter_cluster))
30 | EDGE_NTPSERVERS: ((edge_ntpservers))
31 | EDGE_MGMT_PORTGROUP: ((edge_mgmt_portgroup))
32 | EDGE_DNSSERVER: ((edge_dnsserver))
33 | EDGE_DNSDOMAIN: ((edge_dnsdomain))
34 | EDGE_DEFAULTGATEWAY: ((edge_defaultgateway))
35 | EDGE_NETMASK: ((edge_netmask))
36 | NSX_T_MANAGER_FQDN: ((nsx_t_manager_host_name))
37 | NSX_T_MANAGER_VM_NAME: ((nsx_t_manager_vm_name))
38 | NSX_T_MANAGER_HOST_NAME: ((nsx_t_manager_host_name))
39 | NSX_T_MANAGER_IP: ((nsx_t_manager_ip))
40 | NSX_T_MANAGER_ADMIN_USER: ((nsx_t_manager_admin_user))
41 | NSX_T_MANAGER_ADMIN_PWD: ((nsx_t_manager_admin_pwd))
42 | NSX_T_MANAGER_ROOT_PWD: ((nsx_t_manager_root_pwd))
43 | NSX_T_CONTROLLER_HOST_PREFIX: ((nsx_t_controller_host_prefix))
44 | NSX_T_CONTROLLER_VM_NAME_PREFIX: ((nsx_t_controller_vm_name_prefix))
45 | NSX_T_CONTROLLER_IPS: ((nsx_t_controller_ips))
46 | NSX_T_CONTROLLER_ROOT_PWD: ((nsx_t_controller_root_pwd))
47 | NSX_T_CONTROLLER_CLUSTER_PWD: ((nsx_t_controller_cluster_pwd))
48 | NSX_T_CONTROLLERS_CONFIG: ((nsx_t_controllers_config))
49 | NSX_T_EDGE_HOST_PREFIX: ((nsx_t_edge_host_prefix))
50 | NSX_T_EDGE_VM_NAME_PREFIX: ((nsx_t_edge_vm_name_prefix))
51 | NSX_T_EDGE_IPS: ((nsx_t_edge_ips))
52 | NSX_T_EDGE_ROOT_PWD: ((nsx_t_edge_root_pwd))
53 | NSX_T_EDGE_PORTGROUP_EXT: ((nsx_t_edge_portgroup_ext))
54 | NSX_T_EDGE_PORTGROUP_TRANSPORT: ((nsx_t_edge_portgroup_transport))
55 | NSX_T_KEEP_RESERVATION: ((nsx_t_keep_reservation))
56 | NSX_T_MGR_DEPLOY_SIZE: ((nsx_t_mgr_deploy_size))
57 | NSX_T_EDGE_DEPLOY_SIZE: ((nsx_t_edge_deploy_size))
58 | NSX_T_TEP_POOL_NAME: ((nsx_t_tep_pool_name))
59 | NSX_T_TEP_POOL_CIDR: ((nsx_t_tep_pool_cidr))
60 | NSX_T_TEP_POOL_GATEWAY: ((nsx_t_tep_pool_gateway))
61 | NSX_T_TEP_POOL_START: ((nsx_t_tep_pool_start))
62 | NSX_T_TEP_POOL_END: ((nsx_t_tep_pool_end))
63 | NSX_T_EDGE_OVERLAY_INTERFACE: ((nsx_t_edge_overlay_interface))
64 | NSX_T_EDGE_UPLINK_INTERFACE: ((nsx_t_edge_uplink_interface))
65 | NSX_T_OVERLAY_TRANSPORT_ZONE: ((nsx_t_overlay_transport_zone))
66 | NSX_T_VLAN_TRANSPORT_ZONE: ((nsx_t_vlan_transport_zone))
67 | NSX_T_SINGLE_UPLINK_PROFILE_NAME: ((nsx_t_single_uplink_profile_name))
68 | NSX_T_SINGLE_UPLINK_PROFILE_MTU: ((nsx_t_single_uplink_profile_mtu))
69 | NSX_T_SINGLE_UPLINK_PROFILE_VLAN: ((nsx_t_single_uplink_profile_vlan))
70 | NSX_T_EDGE_CLUSTER: ((nsx_t_edge_cluster))
71 | NSX_T_OVERLAY_PROFILE_NAME: ((nsx_t_overlay_profile_name))
72 | NSX_T_OVERLAY_PROFILE_MTU: ((nsx_t_overlay_profile_mtu))
73 | NSX_T_OVERLAY_PROFILE_VLAN: ((nsx_t_overlay_profile_vlan))
74 | NSX_T_ESXI_VMNICS: ((nsx_t_esxi_vmnics))
75 | NSX_T_OVERLAY_HOSTSWITCH: ((nsx_t_overlay_hostswitch))
76 | NSX_T_VLAN_HOSTSWITCH: ((nsx_t_vlan_hostswitch))
77 | NSX_T_TRANSPORT_VLAN: ((nsx_t_transport_vlan))
78 | NSX_T_T0ROUTER_SPEC: ((nsx_t_t0router_spec))
79 | NSX_T_PAS_NCP_CLUSTER_TAG: ((nsx_t_pas_ncp_cluster_tag))
80 | NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC: ((nsx_t_t1router_logical_switches_spec))
81 | NSX_T_HA_SWITCHING_PROFILE_SPEC: ((nsx_t_ha_switching_profile_spec))
82 | NSX_T_CONTAINER_IP_BLOCK_SPEC: ((nsx_t_container_ip_block_spec))
83 | NSX_T_EXTERNAL_IP_POOL_SPEC: ((nsx_t_external_ip_pool_spec))
84 | NSX_T_NAT_RULES_SPEC: ((nsx_t_nat_rules_spec))
85 | NSX_T_CSR_REQUEST_SPEC: ((nsx_t_csr_request_spec))
86 | NSX_T_LBR_SPEC: ((nsx_t_lbr_spec))
87 | NSX_T_MONITOR_SPEC: ((nsx_t_monitor_spec))
88 | NSX_T_NSGROUP_SPEC: ((nsx_t_nsgroup_spec))
89 | ENABLE_ANSIBLE_DEBUG: ((enable_ansible_debug))
90 | RERUN_CONFIGURE_CONTROLLERS: ((rerun_configure_controllers))
91 |
92 | groups:
93 |
94 | - name: full-install
95 | jobs:
96 | - install-nsx-t
97 | - add-nsx-t-routers
98 | - config-nsx-t-extras
99 |
100 | - name: base-install
101 | jobs:
102 | - standalone-install-nsx-t
103 |
104 | - name: add-routers
105 | jobs:
106 | - standalone-add-nsx-t-routers
107 |
108 | - name: config-nsx-t-extras
109 | jobs:
110 | - standalone-config-nsx-t-extras
111 |
112 | - name: wipe-env
113 | jobs:
114 | - uninstall-nsx-t
115 |
116 | resource_types:
117 | - name: file-url
118 | type: docker-image
119 | source:
120 | repository: pivotalservices/concourse-curl-resource
121 | tag: latest
122 |
123 |
124 | resources:
125 | - name: nsx-t-gen-pipeline
126 | type: git
127 | source:
128 | uri: https://github.com/sparameswaran/nsx-t-gen.git
129 | branch: master
130 | params:
131 | disable_git_lfs: true
132 |
133 | - name: nsxt-ansible
134 | type: git
135 | source:
136 | uri: https://github.com/sparameswaran/nsxt-ansible
137 | branch: master
138 |
139 | # Download NSX-T 2.1 bits from
140 | # https://my.vmware.com/group/vmware/details?downloadGroup=NSX-T-210&productId=673
141 |
142 | - name: nsx-mgr-ova
143 | type: file-url
144 | source:
145 | #username: username
146 | #password: password
147 | url: ((nsx_image_webserver))/((nsx_mgr_ova))
148 | filename: ((nsx_mgr_ova))
149 | skip_ssl_verification: true
150 |
151 | - name: nsx-ctrl-ova
152 | type: file-url
153 | source:
154 | #username: username
155 | #password: password
156 | url: ((nsx_image_webserver))/((nsx_controller_ova))
157 | filename: ((nsx_controller_ova))
158 | skip_ssl_verification: true
159 |
160 | - name: nsx-edge-ova
161 | type: file-url
162 | source:
163 | #username: username
164 | #password: password
165 | url: ((nsx_image_webserver))/((nsx_edge_ova))
166 | filename: ((nsx_edge_ova))
167 | skip_ssl_verification: true
168 |
169 | # Download from https://my.vmware.com/group/vmware/details?productId=614&downloadGroup=OVFTOOL420#
170 | - name: ovftool
171 | type: file-url
172 | source:
173 | #username: username
174 | #password: password
175 | url: ((nsx_image_webserver))/((ovftool_image))
176 | filename: ((ovftool_image))
177 | skip_ssl_verification: true
178 |
179 | jobs:
180 |
181 | - name: install-nsx-t
182 | plan:
183 | - aggregate:
184 | - get: nsx-t-gen-pipeline
185 | - get: nsxt-ansible
186 | - get: nsx-mgr-ova
187 | - get: nsx-ctrl-ova
188 | - get: nsx-edge-ova
189 | - get: ovftool
190 |
191 | - task: install-nsx-t
192 | file: nsx-t-gen-pipeline/tasks/install-nsx-t/task.yml
193 | params: *nsx-t-gen-params
194 |
195 | - name: add-nsx-t-routers
196 | plan:
197 | - aggregate:
198 | - get: nsx-t-gen-pipeline
199 | - get: nsxt-ansible
200 | params: {globs: []}
201 | passed: [install-nsx-t]
202 | trigger: true
203 |
204 | - task: add-nsx-t-routers
205 | file: nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.yml
206 | params: *nsx-t-gen-params
207 |
208 | - name: config-nsx-t-extras
209 | plan:
210 | - aggregate:
211 | - get: nsx-t-gen-pipeline
212 | - get: nsxt-ansible
213 | params: {globs: []}
214 | passed: [add-nsx-t-routers]
215 | trigger: true
216 |
217 | - task: config-nsx-t-extras
218 | file: nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.yml
219 | params: *nsx-t-gen-params
220 |
221 | - name: standalone-install-nsx-t
222 | plan:
223 | - aggregate:
224 | - get: nsx-t-gen-pipeline
225 | - get: nsxt-ansible
226 | - get: nsx-mgr-ova
227 | - get: nsx-ctrl-ova
228 | - get: nsx-edge-ova
229 | - get: ovftool
230 |
231 | - task: install-nsx-t
232 | file: nsx-t-gen-pipeline/tasks/install-nsx-t/task.yml
233 | params: *nsx-t-gen-params
234 |
235 | - name: standalone-add-nsx-t-routers
236 | plan:
237 | - aggregate:
238 | - get: nsx-t-gen-pipeline
239 | - get: nsxt-ansible
240 | params: {globs: []}
241 |
242 | - task: add-nsx-t-routers
243 | file: nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.yml
244 | params: *nsx-t-gen-params
245 |
246 | - name: standalone-config-nsx-t-extras
247 | plan:
248 | - aggregate:
249 | - get: nsx-t-gen-pipeline
250 | - get: nsxt-ansible
251 | params: {globs: []}
252 |
253 | - task: config-nsx-t-extras
254 | file: nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.yml
255 | params: *nsx-t-gen-params
256 |
257 | - name: uninstall-nsx-t
258 | plan:
259 | - aggregate:
260 | - get: nsx-t-gen-pipeline
261 | - get: nsxt-ansible
262 | params: {globs: []}
263 |
264 | - task: uninstall-nsx-t
265 | file: nsx-t-gen-pipeline/tasks/uninstall-nsx-t/task.yml
266 | params: *nsx-t-gen-params
267 |
--------------------------------------------------------------------------------
/pipelines/offline-nsx-t-install-v2.1.yml:
--------------------------------------------------------------------------------
1 | groups:
2 | - jobs: [install-nsx-t, add-nsx-t-routers, config-nsx-t-extras]
3 | name: full-install
4 | - jobs: [standalone-install-nsx-t]
5 | name: base-install
6 | - jobs: [standalone-add-nsx-t-routers]
7 | name: add-routers
8 | - jobs: [standalone-config-nsx-t-extras]
9 | name: config-nsx-t-extras
10 | - jobs: [uninstall-nsx-t]
11 | name: wipe-env
12 | jobs:
13 | - name: install-nsx-t
14 | plan:
15 | - aggregate:
16 | - {get: nsx-t-gen-pipeline-tarball}
17 | - {get: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball}
18 | - {get: nsxt-ansible-tarball}
19 | - {get: nsx-mgr-ova}
20 | - {get: nsx-ctrl-ova}
21 | - {get: nsx-edge-ova}
22 | - {get: ovftool}
23 | - config:
24 | image_resource:
25 | params: {unpack: true}
26 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
27 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
28 | secret_access_key: ((final_s3_secret_access_key))}
29 | type: s3
30 | inputs:
31 | - {name: nsx-mgr-ova}
32 | - {name: nsxt-ansible-tarball}
33 | - {name: nsx-t-gen-pipeline-tarball}
34 | - {name: nsx-ctrl-ova}
35 | - {name: nsx-edge-ova}
36 | - {name: ovftool}
37 | outputs:
38 | - {name: nsx-t-gen-pipeline}
39 | - {name: nsxt-ansible}
40 | platform: linux
41 | run:
42 | args: [-ec, 'find . -name "version" -exec rm {} \; ;find . -name "url" -exec
43 | rm {} \; ;for file in $(find . -name "*-1.0");do new_file=$(echo $file
44 | | sed -e ''s/-1.0$//g'');mv ${file} ${new_file};done;ls -R;cd nsxt-ansible;
45 | tar -zxf ../nsxt-ansible-tarball/*.tgz; cd ..;cd nsx-t-gen-pipeline; tar
46 | -zxf ../nsx-t-gen-pipeline-tarball/*.tgz; cd ..;for token in $(env | grep
47 | ''='' | grep "^[A-Z]*" | grep ''=null$'' | sed -e ''s/=.*//g'');do export
48 | ${token}=""; done;echo Starting main task execution!!;nsx-t-gen-pipeline/tasks/install-nsx-t/task.sh']
49 | path: /bin/bash
50 | params: &id001 {COMPUTE_MANAGER_CONFIGS: ((compute_manager_configs)), DEFAULTGATEWAY: ((defaultgateway)),
51 | DNSDOMAIN: ((dnsdomain)), DNSSERVER: ((dnsserver)), EDGE_DEFAULTGATEWAY: ((edge_defaultgateway)),
52 | EDGE_DNSDOMAIN: ((edge_dnsdomain)), EDGE_DNSSERVER: ((edge_dnsserver)), EDGE_MGMT_PORTGROUP: ((edge_mgmt_portgroup)),
53 | EDGE_NETMASK: ((edge_netmask)), EDGE_NTPSERVERS: ((edge_ntpservers)), EDGE_VCENTER_CLUSTER: ((edge_vcenter_cluster)),
54 | EDGE_VCENTER_DATACENTER: ((edge_vcenter_datacenter)), EDGE_VCENTER_DATASTORE: ((edge_vcenter_datastore)),
55 | EDGE_VCENTER_HOST: ((edge_vcenter_host)), EDGE_VCENTER_PWD: ((edge_vcenter_pwd)),
56 | EDGE_VCENTER_USR: ((edge_vcenter_usr)), EDGE_VCENTER_RP: ((edge_vcenter_rp)), ENABLE_ANSIBLE_DEBUG: ((enable_ansible_debug)),
57 | ESXI_HOSTS_CONFIG: ((esxi_hosts_config)), ESXI_HOSTS_ROOT_PWD: ((esxi_hosts_root_pwd)),
58 | MGMT_PORTGROUP: ((mgmt_portgroup)), NETMASK: ((netmask)), NSX_T_CONTAINER_IP_BLOCK: null,
59 | NSX_T_CONTAINER_IP_BLOCK_SPEC: ((nsx_t_container_ip_block_spec)), NSX_T_CONTROLLERS_CONFIG: ((nsx_t_controllers_config)),
60 | NSX_T_CONTROLLER_CLUSTER_PWD: ((nsx_t_controller_cluster_pwd)), NSX_T_CONTROLLER_HOST_PREFIX: ((nsx_t_controller_host_prefix)),
61 | NSX_T_CONTROLLER_IPS: ((nsx_t_controller_ips)), NSX_T_CONTROLLER_ROOT_PWD: ((nsx_t_controller_root_pwd)),
62 | NSX_T_CONTROLLER_VM_NAME_PREFIX: ((nsx_t_controller_vm_name_prefix)), NSX_T_CSR_REQUEST_SPEC: ((nsx_t_csr_request_spec)),
63 | NSX_T_EDGE_CLUSTER: ((nsx_t_edge_cluster)), NSX_T_EDGE_DEPLOY_SIZE: ((nsx_t_edge_deploy_size)),
64 | NSX_T_EDGE_HOST_PREFIX: ((nsx_t_edge_host_prefix)), NSX_T_EDGE_IPS: ((nsx_t_edge_ips)),
65 | NSX_T_EDGE_OVERLAY_INTERFACE: ((nsx_t_edge_overlay_interface)), NSX_T_EDGE_PORTGROUP_EXT: ((nsx_t_edge_portgroup_ext)),
66 | NSX_T_EDGE_PORTGROUP_TRANSPORT: ((nsx_t_edge_portgroup_transport)), NSX_T_EDGE_ROOT_PWD: ((nsx_t_edge_root_pwd)),
67 | NSX_T_EDGE_UPLINK_INTERFACE: ((nsx_t_edge_uplink_interface)), NSX_T_EDGE_VM_NAME_PREFIX: ((nsx_t_edge_vm_name_prefix)),
68 | NSX_T_ESXI_VMNICS: ((nsx_t_esxi_vmnics)), NSX_T_EXTERNAL_IP_POOL: null, NSX_T_EXTERNAL_IP_POOL_SPEC: ((nsx_t_external_ip_pool_spec)),
69 | NSX_T_HA_SWITCHING_PROFILE_SPEC: ((nsx_t_ha_switching_profile_spec)), NSX_T_HOSTSWITCH: null,
70 | NSX_T_INSTALLER: ((nsx_t_installer)), NSX_T_KEEP_RESERVATION: ((nsx_t_keep_reservation)),
71 | NSX_T_LBR_SPEC: ((nsx_t_lbr_spec)), NSX_T_MANAGER_ADMIN_PWD: ((nsx_t_manager_admin_pwd)),
72 | NSX_T_MANAGER_ADMIN_USER: ((nsx_t_manager_admin_user)), NSX_T_MANAGER_FQDN: ((nsx_t_manager_host_name)),
73 | NSX_T_MANAGER_HOST_NAME: ((nsx_t_manager_host_name)), NSX_T_MANAGER_IP: ((nsx_t_manager_ip)),
74 | NSX_T_MANAGER_ROOT_PWD: ((nsx_t_manager_root_pwd)), NSX_T_MANAGER_VM_NAME: ((nsx_t_manager_vm_name)),
75 | NSX_T_MGR_DEPLOY_SIZE: ((nsx_t_mgr_deploy_size)), NSX_T_MONITOR_SPEC: ((nsx_t_monitor_spec)),
76 | NSX_T_NSGROUP_SPEC: ((nsx_t_nsgroup_spec)), NSX_T_NAT_RULES_SPEC: ((nsx_t_nat_rules_spec)),
77 | NSX_T_OVERLAY_HOSTSWITCH: ((nsx_t_overlay_hostswitch)), NSX_T_OVERLAY_PROFILE_MTU: ((nsx_t_overlay_profile_mtu)),
78 | NSX_T_OVERLAY_PROFILE_NAME: ((nsx_t_overlay_profile_name)), NSX_T_OVERLAY_PROFILE_VLAN: ((nsx_t_overlay_profile_vlan)),
79 | NSX_T_OVERLAY_TRANSPORT_ZONE: ((nsx_t_overlay_transport_zone)), NSX_T_PAS_NCP_CLUSTER_TAG: ((nsx_t_pas_ncp_cluster_tag)),
80 | NSX_T_SINGLE_UPLINK_PROFILE_MTU: ((nsx_t_single_uplink_profile_mtu)), NSX_T_SINGLE_UPLINK_PROFILE_NAME: ((nsx_t_single_uplink_profile_name)),
81 | NSX_T_SINGLE_UPLINK_PROFILE_VLAN: ((nsx_t_single_uplink_profile_vlan)), NSX_T_SIZING_SPEC: null,
82 | NSX_T_T0ROUTER: null, NSX_T_T0ROUTER_HA_MODE: null, NSX_T_T0ROUTER_SPEC: ((nsx_t_t0router_spec)),
83 | NSX_T_T1ROUTER_LOGICAL_SWITCHES: null, NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC: ((nsx_t_t1router_logical_switches_spec)),
84 | NSX_T_TEP_POOL_CIDR: ((nsx_t_tep_pool_cidr)), NSX_T_TEP_POOL_END: ((nsx_t_tep_pool_end)),
85 | NSX_T_TEP_POOL_GATEWAY: ((nsx_t_tep_pool_gateway)), NSX_T_TEP_POOL_NAME: ((nsx_t_tep_pool_name)),
86 | NSX_T_TEP_POOL_NAMESERVER: null, NSX_T_TEP_POOL_START: ((nsx_t_tep_pool_start)),
87 | NSX_T_TRANSPORT_VLAN: ((nsx_t_transport_vlan)), NSX_T_VERSION: ((nsx_t_version)),
88 | NSX_T_VLAN_HOSTSWITCH: ((nsx_t_vlan_hostswitch)), NSX_T_VLAN_TRANSPORT_ZONE: ((nsx_t_vlan_transport_zone)),
89 | NTPSERVERS: ((ntpservers)), RERUN_CONFIGURE_CONTROLLERS: ((rerun_configure_controllers)),
90 | VCENTER_CLUSTER: ((vcenter_cluster)), VCENTER_DATACENTER: ((vcenter_datacenter)),
91 | VCENTER_DATASTORE: ((vcenter_datastore)), VCENTER_HOST: ((vcenter_host)), VCENTER_MANAGER: ((vcenter_manager)),
92 | VCENTER_PWD: ((vcenter_pwd)), VCENTER_RP: ((vcenter_rp)), VCENTER_USR: ((vcenter_usr))}
93 | task: offlined-install-nsx-t
94 | - name: add-nsx-t-routers
95 | plan:
96 | - aggregate:
97 | - {get: nsx-t-gen-pipeline-tarball}
98 | - {get: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball}
99 | - get: nsxt-ansible-tarball
100 | params:
101 | globs: []
102 | passed: [install-nsx-t]
103 | trigger: true
104 | - config:
105 | image_resource:
106 | params: {unpack: true}
107 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
108 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
109 | secret_access_key: ((final_s3_secret_access_key))}
110 | type: s3
111 | inputs:
112 | - {name: nsx-t-gen-pipeline-tarball}
113 | - {name: nsxt-ansible-tarball}
114 | outputs:
115 | - {name: nsx-t-gen-pipeline}
116 | - {name: nsxt-ansible}
117 | platform: linux
118 | run:
119 | args: [-ec, 'find . -name "version" -exec rm {} \; ;find . -name "url" -exec
120 | rm {} \; ;for file in $(find . -name "*-1.0");do new_file=$(echo $file
121 | | sed -e ''s/-1.0$//g'');mv ${file} ${new_file};done;ls -R;cd nsxt-ansible;
122 | tar -zxf ../nsxt-ansible-tarball/*.tgz; cd ..;cd nsx-t-gen-pipeline; tar
123 | -zxf ../nsx-t-gen-pipeline-tarball/*.tgz; cd ..;for token in $(env | grep
124 | ''='' | grep "^[A-Z]*" | grep ''=null$'' | sed -e ''s/=.*//g'');do export
125 | ${token}=""; done;echo Starting main task execution!!;nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.sh']
126 | path: /bin/bash
127 | params: *id001
128 | task: offlined-add-nsx-t-routers
129 | - name: config-nsx-t-extras
130 | plan:
131 | - aggregate:
132 | - {get: nsx-t-gen-pipeline-tarball}
133 | - {get: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball}
134 | - get: nsxt-ansible-tarball
135 | params:
136 | globs: []
137 | passed: [add-nsx-t-routers]
138 | trigger: true
139 | - config:
140 | image_resource:
141 | params: {unpack: true}
142 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
143 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
144 | secret_access_key: ((final_s3_secret_access_key))}
145 | type: s3
146 | inputs:
147 | - {name: nsx-t-gen-pipeline-tarball}
148 | - {name: nsxt-ansible-tarball}
149 | outputs:
150 | - {name: nsx-t-gen-pipeline}
151 | platform: linux
152 | run:
153 | args: [-ec, 'find . -name "version" -exec rm {} \; ;find . -name "url" -exec
154 | rm {} \; ;for file in $(find . -name "*-1.0");do new_file=$(echo $file
155 | | sed -e ''s/-1.0$//g'');mv ${file} ${new_file};done;ls -R;cd nsx-t-gen-pipeline;
156 | tar -zxf ../nsx-t-gen-pipeline-tarball/*.tgz; cd ..;for token in $(env
157 | | grep ''='' | grep "^[A-Z]*" | grep ''=null$'' | sed -e ''s/=.*//g'');do
158 | export ${token}=""; done;echo Starting main task execution!!;nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.sh']
159 | path: /bin/bash
160 | params: *id001
161 | task: offlined-config-nsx-t-extras
162 | - name: standalone-install-nsx-t
163 | plan:
164 | - aggregate:
165 | - {get: nsx-t-gen-pipeline-tarball}
166 | - {get: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball}
167 | - {get: nsxt-ansible-tarball}
168 | - {get: nsx-mgr-ova}
169 | - {get: nsx-ctrl-ova}
170 | - {get: nsx-edge-ova}
171 | - {get: ovftool}
172 | - config:
173 | image_resource:
174 | params: {unpack: true}
175 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
176 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
177 | secret_access_key: ((final_s3_secret_access_key))}
178 | type: s3
179 | inputs:
180 | - {name: nsx-mgr-ova}
181 | - {name: nsxt-ansible-tarball}
182 | - {name: nsx-t-gen-pipeline-tarball}
183 | - {name: nsx-ctrl-ova}
184 | - {name: nsx-edge-ova}
185 | - {name: ovftool}
186 | outputs:
187 | - {name: nsx-t-gen-pipeline}
188 | - {name: nsxt-ansible}
189 | platform: linux
190 | run:
191 | args: [-ec, 'find . -name "version" -exec rm {} \; ;find . -name "url" -exec
192 | rm {} \; ;for file in $(find . -name "*-1.0");do new_file=$(echo $file
193 | | sed -e ''s/-1.0$//g'');mv ${file} ${new_file};done;ls -R;cd nsxt-ansible;
194 | tar -zxf ../nsxt-ansible-tarball/*.tgz; cd ..;cd nsx-t-gen-pipeline; tar
195 | -zxf ../nsx-t-gen-pipeline-tarball/*.tgz; cd ..;for token in $(env | grep
196 | ''='' | grep "^[A-Z]*" | grep ''=null$'' | sed -e ''s/=.*//g'');do export
197 | ${token}=""; done;echo Starting main task execution!!;nsx-t-gen-pipeline/tasks/install-nsx-t/task.sh']
198 | path: /bin/bash
199 | params: *id001
200 | task: offlined-install-nsx-t
201 | - name: standalone-add-nsx-t-routers
202 | plan:
203 | - aggregate:
204 | - {get: nsx-t-gen-pipeline-tarball}
205 | - {get: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball}
206 | - get: nsxt-ansible-tarball
207 | params:
208 | globs: []
209 | - config:
210 | image_resource:
211 | params: {unpack: true}
212 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
213 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
214 | secret_access_key: ((final_s3_secret_access_key))}
215 | type: s3
216 | inputs:
217 | - {name: nsx-t-gen-pipeline-tarball}
218 | - {name: nsxt-ansible-tarball}
219 | outputs:
220 | - {name: nsx-t-gen-pipeline}
221 | - {name: nsxt-ansible}
222 | platform: linux
223 | run:
224 | args: [-ec, 'find . -name "version" -exec rm {} \; ;find . -name "url" -exec
225 | rm {} \; ;for file in $(find . -name "*-1.0");do new_file=$(echo $file
226 | | sed -e ''s/-1.0$//g'');mv ${file} ${new_file};done;ls -R;cd nsxt-ansible;
227 | tar -zxf ../nsxt-ansible-tarball/*.tgz; cd ..;cd nsx-t-gen-pipeline; tar
228 | -zxf ../nsx-t-gen-pipeline-tarball/*.tgz; cd ..;for token in $(env | grep
229 | ''='' | grep "^[A-Z]*" | grep ''=null$'' | sed -e ''s/=.*//g'');do export
230 | ${token}=""; done;echo Starting main task execution!!;nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.sh']
231 | path: /bin/bash
232 | params: *id001
233 | task: offlined-add-nsx-t-routers
234 | - name: standalone-config-nsx-t-extras
235 | plan:
236 | - aggregate:
237 | - {get: nsx-t-gen-pipeline-tarball}
238 | - {get: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball}
239 | - get: nsxt-ansible-tarball
240 | params:
241 | globs: []
242 | - config:
243 | image_resource:
244 | params: {unpack: true}
245 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
246 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
247 | secret_access_key: ((final_s3_secret_access_key))}
248 | type: s3
249 | inputs:
250 | - {name: nsx-t-gen-pipeline-tarball}
251 | - {name: nsxt-ansible-tarball}
252 | outputs:
253 | - {name: nsx-t-gen-pipeline}
254 | platform: linux
255 | run:
256 | args: [-ec, 'find . -name "version" -exec rm {} \; ;find . -name "url" -exec
257 | rm {} \; ;for file in $(find . -name "*-1.0");do new_file=$(echo $file
258 | | sed -e ''s/-1.0$//g'');mv ${file} ${new_file};done;ls -R;cd nsx-t-gen-pipeline;
259 | tar -zxf ../nsx-t-gen-pipeline-tarball/*.tgz; cd ..;for token in $(env
260 | | grep ''='' | grep "^[A-Z]*" | grep ''=null$'' | sed -e ''s/=.*//g'');do
261 | export ${token}=""; done;echo Starting main task execution!!;nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.sh']
262 | path: /bin/bash
263 | params: *id001
264 | task: offlined-config-nsx-t-extras
265 | - name: uninstall-nsx-t
266 | plan:
267 | - aggregate:
268 | - {get: nsx-t-gen-pipeline-tarball}
269 | - {get: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball}
270 | - get: nsxt-ansible-tarball
271 | params:
272 | globs: []
273 | - config:
274 | image_resource:
275 | params: {unpack: true}
276 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
277 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
278 | secret_access_key: ((final_s3_secret_access_key))}
279 | type: s3
280 | inputs:
281 | - {name: nsx-t-gen-pipeline-tarball}
282 | - {name: nsxt-ansible-tarball}
283 | outputs:
284 | - {name: nsx-t-gen-pipeline}
285 | platform: linux
286 | run:
287 | args: [-ec, 'find . -name "version" -exec rm {} \; ;find . -name "url" -exec
288 | rm {} \; ;for file in $(find . -name "*-1.0");do new_file=$(echo $file
289 | | sed -e ''s/-1.0$//g'');mv ${file} ${new_file};done;ls -R;cd nsx-t-gen-pipeline;
290 | tar -zxf ../nsx-t-gen-pipeline-tarball/*.tgz; cd ..;for token in $(env
291 | | grep ''='' | grep "^[A-Z]*" | grep ''=null$'' | sed -e ''s/=.*//g'');do
292 | export ${token}=""; done;echo Starting main task execution!!;nsx-t-gen-pipeline/tasks/uninstall-nsx-t/task.sh']
293 | path: /bin/bash
294 | params: *id001
295 | task: offlined-uninstall-nsx-t
296 | nsx_t_gen_params: *id001
297 | resource_types: []
298 | resources:
299 | - name: nsx-t-gen-pipeline-tarball
300 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
301 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/git/nsx-t-gen-pipeline-tar(.*).tgz,
302 | secret_access_key: ((final_s3_secret_access_key))}
303 | type: s3
304 | - name: nsxt-ansible-tarball
305 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
306 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/git/nsxt-ansible-tar(.*).tgz,
307 | secret_access_key: ((final_s3_secret_access_key))}
308 | type: s3
309 | - name: nsx-mgr-ova
310 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
311 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/vmware/nsx-mgr-ova-*-(.*),
312 | secret_access_key: ((final_s3_secret_access_key))}
313 | type: s3
314 | - name: nsx-ctrl-ova
315 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
316 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/vmware/nsx-ctrl-ova-*-(.*),
317 | secret_access_key: ((final_s3_secret_access_key))}
318 | type: s3
319 | - name: nsx-edge-ova
320 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
321 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/vmware/nsx-edge-ova-*-(.*),
322 | secret_access_key: ((final_s3_secret_access_key))}
323 | type: s3
324 | - name: ovftool
325 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
326 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/vmware/ovftool-*-(.*),
327 | secret_access_key: ((final_s3_secret_access_key))}
328 | type: s3
329 | - name: nsxedgegen-nsx-t-gen-worker-v2.1-docker-tarball
330 | source: {access_key_id: ((final_s3_access_key_id)), bucket: ((final_s3_bucket)),
331 | endpoint: ((final_s3_endpoint)), regexp: ((offline_run_id))/resources/docker/nsxedgegen-nsx-t-gen-worker-v2.1-docker.(.*),
332 | secret_access_key: ((final_s3_secret_access_key))}
333 | type: s3
334 |
--------------------------------------------------------------------------------
/pipelines/user-inputs-for-canned-pks.yml:
--------------------------------------------------------------------------------
1 | # EDIT the parameters
2 | #
3 | # Minio offline bucket params
4 | iaas: vsphere
5 | final_s3_endpoint: http://:/ # EDIT the minio address/port
6 | final_s3_bucket: canned-pks
7 | final_s3_access_key_id:
8 | final_s3_secret_access_key:
9 | # Resources path: //resources//
10 | offline_run_id: "" # Default would be empty string unless there are multiple runs using same bucket
11 | nsx_t_installer: cannedpks
12 |
13 | # Pivnet tokens and tile versions
14 | pivnet_token: asdfjalsdfk2342l
15 | opsman_major_minor_version: ^2.1.6.*$
16 | pks_major_minor_version: ^1.1.*$
17 |
18 | # vCenter details
19 | vcenter_host: vcenter.mycompany.com
20 | vcenter_usr: administrator@vsphere.local
21 | vcenter_pwd: vcenterAdminPassword
22 | vcenter_datacenter: Datacenter
23 | vcenter_datastore: a-xio
24 | nsx_t_vcenter_rp: ""
25 | mgmt_portgroup: 'VM Network'
26 | ntpservers: 1.1.1.1
27 | dnsserver: 1.1.1.1
28 | dnsdomain: mycompany.com
29 | defaultgateway: 1.1.1.1
30 | netmask: 255.255.255.0
31 | esxi_hosts_root_pwd: esxiRootPassword # Esxi Host `root` Password
32 | nsx_t_mgmt_vcenter_cluster: MgmtCluster # vCenter Mgmt Cluster for hosting only the NSX-T Management Plane
33 | compute_vcenter_cluster: Cluster1 # vCenter Compute Cluster to be used for running the managed VMs, including K8s - can be same as the mgmt_cluster
34 |
35 | # NSX-T config
36 | nsx_t_esxi_vmnics: vmnic4 # Esxi Host unused vmnic to be used for nsx
37 | nsx_t_manager_admin_pwd: Managerpwd1! # Mgr `admin` & `root` Password, length >= 8, should have numeric, upper and special case characters
38 | nsx_t_controller_root_pwd: ControllerPwd!1 # Ctrl root password, should have numeric, upper and special case characters
39 | nsx_t_edge_root_pwd: Edgepwd1! # Edge root password, should have numeric, upper and special case characters
40 |
41 | # IPs for Mgr, ctrl, edge
42 | # Following nsx_t_manager_ip address should be mapped to nsx-t-mgr.((dnsdomain))
43 | nsx_t_manager_ip: 1.1.1.1
44 | nsx_t_controller_ips: '1.1.1.2,1.1.1.3,1.1.1.4' # 3 sets of ips from management network for Controller instances
45 | nsx_t_edge_ips: '1.1.1.5,1.1.1.6' # 2 or more sets of ips from management network for Edge instances
46 |
47 | # NSX Edge Portgroup Details
48 | nsx_t_edge_portgroup_ext: vlan-3508 # Portgroup for the external uplink network
49 | nsx_t_edge_portgroup_transport: vlan-3510 # Portgroup for the internal network for TEP and overlay
50 | nsx_t_overlay_profile_vlan: '3510' # VLAN ID of the internal network used by TEP and overlay
51 |
52 | # NSX T0 Router Details
53 | nsx_t_t0router_ips_cidr: '24'
54 | nsx_t_t0router_vip: 10.1.1.4/24
55 | nsx_t_t0router_ip_edge1: 10.1.1.2/24
56 | nsx_t_t0router_ip_edge2: 10.1.1.3/24
57 | nsx_t_t0router_gateway: 10.1.1.1
58 |
59 | # External IP Pool
60 | # This should be reachable from the external world via the T0 router
61 | # using either BGP or static routes
62 | # First 20 IPs reserved for Ops Mgr, PKS controller, Harbor and others (even PAS GoRouters, SSH...)
63 | nsx_t_external_ip_pool_cidr: 10.100.5.0/24
64 | nsx_t_external_ip_pool_gateway: 10.100.5.1
65 | nsx_t_external_ip_pool_start: 10.100.5.21 # Starting external ip -> Should not include gateway
66 | nsx_t_external_ip_pool_end: 10.100.5.200 # Ending external ip -> Should not include gateway
67 | # Following ip address should be mapped to opsmgr.((dnsdomain))
68 | nsx_t_nat_rules_opsman_ip: 10.100.5.15 # Ops mgr external ip -> should be within the external pool cidr range, but outside of the start and end range of external ip pool
69 | nsx_t_nat_rules_snat_translated_ip_for_all: 10.100.5.10 # Should be within the cidr range, but outside of the start and end range of external ip pool
70 |
71 | # CSR fields for NSX-T Maanger
72 | csr_request_org_name: Company
73 | csr_request_org_unit: CustomerXYZ
74 | csr_request_country: US
75 | csr_request_state: CA
76 | csr_request_city: SF
77 |
78 |
79 | # Ops Mgr details
80 | opsman_admin_usr: admin
81 | opsman_admin_password: admin123
82 | om_ssh_pwd: admin123
83 | om_decryption_pwd: admin123
84 | bosh_vm_storage: vsan
85 |
86 | # AZs and Clusters
87 | use_single_az: true # Would use only az_1, ignore rest of the azs
88 | az_1_cluster: Cluster1
89 | # Ignore the rest of the azs field if only using single az
90 | az_2_cluster: Cluster2
91 | az_3_cluster: Cluster3
92 |
93 | # PKS resource pools, only first one would be used if use_single_az set to true
94 | # Ops mgr would use resource_pool1 by default
95 | # Specified resource pools should exist
96 | resource_pool1: rp1
97 | resource_pool2: rp2
98 | resource_pool3: rp3
99 |
100 | # PKS Tile vcenter cluster
101 | pks_vcenter_cluster_list: Cluster1,Cluster2,Cluster3
102 |
103 | # PKS Controller user & password
104 | pks_api_admin_usr: pksadmin
105 | pks_api_admin_pwd: pksadmin123
106 |
107 | # PKS Controller external ip
108 | # Following ip address should be mapped to api.pks.((dnsdomain))
109 | pks_api_ip: 10.100.5.16 # PKS Controller -> should be within the cidr range, but outside of the start and end range of external ip pool
110 |
111 | # Harbor details
112 | # Harbor would be accessible at harbor.((dnsdomain))
113 | harbor_admin_pwd: harboradmin123
114 | # Following ip address should be mapped to harbor.((dnsdomain))
115 | harbor_app_external_ip: 10.100.5.17 # External exposed IP for Harbor -> should be within the cidr range, but outside of the start and end range of external ip pool
116 | harbor_app_disk_size: 102400 # in MBs
117 |
--------------------------------------------------------------------------------
/python/client.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # nsx-edge-gen
4 | #
5 | # Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | from __future__ import absolute_import, division, print_function
20 |
21 | __author__ = 'Sabha Parameswaran'
22 |
23 | import sys
24 | import yaml
25 | import json
26 | import requests
27 | import time
28 | from requests.auth import HTTPDigestAuth
29 | from pprint import pprint
30 |
31 | try:
32 | # Python 3
33 | from urllib.parse import urlparse
34 | except ImportError:
35 | # Python 2
36 | from urlparse import urlparse
37 |
38 | requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
39 |
40 | class auth(requests.auth.AuthBase):
41 |
42 | def __init__(self, context):
43 | self.context = context
44 |
45 | def __call__(self, request):
46 | username = self.context.get('admin_user')
47 | password = self.context.get('admin_passwd')
48 | return requests.auth.HTTPBasicAuth(username, password)(request)
49 |
50 | def get_context():
51 | if get_context.context is not None:
52 | return get_context.context
53 | else:
54 | raise Error('config not loaded!!')
55 |
56 | get_context.context = None
57 |
58 | def set_context(context):
59 | get_context.context = context
60 |
61 | def get(url, stream=False, check=True):
62 | context = get_context()
63 | url = context.get('url') + url
64 | headers = { 'Accept': 'application/json,text/html,application/xhtml+xml,application/xml' }
65 |
66 | response = requests.get(url, auth=auth(context), verify=False, headers=headers, stream=stream)
67 | check_response(response, check=check)
68 | return response
69 |
70 | def put(url, payload, check=True):
71 | try:
72 | context = get_context()
73 | url = context.get('url') + url
74 | response = requests.put(url, auth=auth(context), verify=False, json=payload)
75 | check_response(response, check=check)
76 | return response
77 | except:
78 | # Squelch Python error during put operations:
79 | # File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", line 314, in _raise_timeout
80 | # if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
81 | # TypeError: __str__ returned non-string (type SysCallError)
82 | #print('Error during put')
83 | return ''
84 |
85 | def post(url, payload, check=True):
86 | context = get_context()
87 | url = context.get('url') + url
88 | response = requests.post(url, auth=auth(context), verify=False, json=payload)
89 | check_response(response, check=check)
90 | return response
91 |
92 | def delete(url, check=True):
93 | context = get_context()
94 | url = context.get('url') + url
95 | overwrite_header = { 'X-Allow-Overwrite': "true" }
96 | response = requests.delete(url, auth=auth(context), headers=overwrite_header, verify=False)
97 | check_response(response, check=check)
98 | return response
99 |
100 | def check_response(response, check=True):
101 | #pprint(vars(response))
102 | #print(response.content)
103 | if check and (response.status_code != requests.codes.ok and response.status_code > 400):
104 |
105 | print('-', response.status_code, response.request.url, file=sys.stderr)
106 | try:
107 | errors = response.json()["errors"]
108 | print('- '+('\n- '.join(json.dumps(errors, indent=4).splitlines())), file=sys.stderr)
109 | except:
110 | print(response.text, file=sys.stderr)
111 | sys.exit(1)
112 |
--------------------------------------------------------------------------------
/python/mobclient.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # nsx-edge-gen
4 | #
5 | # Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | __author__ = 'Sabha Parameswaran'
20 |
21 | import base64
22 | import cookielib
23 | import ssl
24 | import requests
25 | import re
26 | import time
27 | from pyquery import PyQuery
28 | from lxml import html, etree
29 | import urllib
30 | import urllib2
31 | from urllib2 import urlopen, Request
32 | from requests.utils import quote
33 |
34 | try:
35 | # Python 3
36 | from urllib.parse import urlparse
37 | except ImportError:
38 | # Python 2
39 | from urlparse import urlparse
40 |
41 | requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
42 |
43 | DEBUG = False
44 |
45 | def get_context():
46 | if get_context.context is not None:
47 | return get_context.context
48 | else:
49 | raise Error(resourceType + ' config not loaded!!')
50 |
51 | get_context.context = None
52 |
53 | def set_context(context):
54 | get_context.context = context
55 |
56 |
57 | def create_url_opener():
58 | cookies = cookielib.LWPCookieJar()
59 | handlers = [
60 | urllib2.HTTPHandler(debuglevel=1),
61 | urllib2.HTTPSHandler(),
62 | urllib2.HTTPCookieProcessor(cookies)
63 | ]
64 | opener = urllib2.build_opener(*handlers)
65 | return opener
66 |
67 | def createBaseAuthToken(user, passwd):
68 | return base64.b64encode('%s:%s' % (user, passwd))
69 |
70 | def lookupSessionNonce(response):
71 | pq = PyQuery(response)
72 | vmware_session_nonce = ''
73 | hidden_entry = pq('input:hidden')
74 | if hidden_entry.attr('name') == 'vmware-session-nonce' :
75 | vmware_session_nonce = hidden_entry.attr('value')
76 | if DEBUG:
77 | print('vmware-session-nonce: ' + vmware_session_nonce)
78 | return vmware_session_nonce
79 |
80 |
81 | def init_vmware_session():
82 | context = get_context()
83 |
84 | vcenterMobServiceInstanceUrl = '/mob/?moid=ServiceInstance&method=retrieveContent'
85 | data = None #'vmware-session-nonce': context['vmware-session-nonce']}
86 | cookies = None
87 | serviceInstanceGetRespSock = invokeVCenterMob(context, vcenterMobServiceInstanceUrl, 'GET', data, cookies)
88 |
89 | serviceInstanceGetRespInfo = serviceInstanceGetRespSock.info()
90 | cookies = serviceInstanceGetRespSock.info()['Set-Cookie']
91 | serviceInstanceGetResp = serviceInstanceGetRespSock.read()
92 |
93 | serviceInstanceGetRespSock.close()
94 |
95 | if DEBUG:
96 | print('Cookies: ' + cookies)
97 | print('Info: ' + str(serviceInstanceGetRespInfo))
98 | print('vCenter MOB response :\n' + str(serviceInstanceGetResp)+ '\n-----\n')
99 |
100 | #if response.status_code != requests.codes.ok:
101 | # raise Error('Unable to connect to vcenter, error message: ' + vcenterServiceInstanceResponse.text)
102 |
103 | vmware_session_nonce = lookupSessionNonce(serviceInstanceGetResp)
104 | context['vmware-session-nonce'] = vmware_session_nonce
105 | context['vmware-cookies'] = cookies
106 | return
107 |
108 | def remove_nsxt_extension_from_vcenter():
109 |
110 | context = get_context()
111 | init_vmware_session()
112 |
113 | cookies = context['vmware-cookies']
114 |
115 | data = { 'vmware-session-nonce': context['vmware-session-nonce']}
116 | data['extensionKey'] = 'com.vmware.nsx.management.nsxt'
117 | vcenterUnregisterExtensionUrl = '/mob/?moid=ExtensionManager&method=unregisterExtension'
118 |
119 | mobRespSock = invokeVCenterMob(context, vcenterUnregisterExtensionUrl, 'POST', data, cookies)
120 | mobResp = mobRespSock.read()
121 | mobRespSock.close()
122 |
123 | if DEBUG:
124 | print('\n\n Mob Response for url[' + vcenterUnregisterExtensionUrl + ']:\n' + mobResp)
125 |
126 | return
127 |
128 | def invokeVCenterMob(vcenter_ctx, url, method, data, cookies):
129 | vcenterOriginUrl = 'https://' + vcenter_ctx['address']
130 | vcenterMobUrl = vcenterOriginUrl + url
131 |
132 | urlctx = create_non_verify_sslcontext()
133 | opener = create_url_opener()
134 | #data = urllib.urlencode({ 'vmware-session-nonce': context['vmware-session-nonce']})
135 | if data is not None and method == 'POST':
136 | req = urllib2.Request(vcenterMobUrl, data=urllib.urlencode(data))#, auth=auth, data=data, verify=False, headers=headers)
137 | else:
138 | req = urllib2.Request(vcenterMobUrl)
139 |
140 | base64string = createBaseAuthToken(vcenter_ctx.get('admin_user'), vcenter_ctx.get('admin_passwd'))
141 | #print('Url: {}'.format(vcenterMobUrl))
142 |
143 | req.add_header('Authorization', "Basic %s" % base64string)
144 | req.add_header('User-Agent', "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.30 (KHTML, like Gecko) Ubuntu/11.04 Chromium/12.0.742.112 Chrome/12.0.742.112 Safari/534.30")
145 | req.add_header('Accept-Charset', 'ISO-8859-1,utf-8;q=0.7,*;q=0.3')
146 | req.add_header('Accept-Language', 'en-US,en;q=0.8')
147 | req.add_header("Accept", "text/html,application/xhtml+xml,application/xml,;q=0.9,*/*;q=0.8")
148 | # req.add_header('Referer', vcenterMobUrl)
149 | # req.add_header('Origin', vcenterOriginUrl)
150 | # req.add_header('Host', vcenter_ctx['address'])
151 |
152 | if cookies is not None:
153 | req.add_header("Cookie", cookies)
154 | req.get_method = lambda: method
155 |
156 | sock = urllib2.urlopen(req, context=urlctx)
157 | return sock
158 |
159 | def escape(html):
160 | """Returns the given HTML with ampersands, quotes and carets encoded."""
161 | return mark_safe(force_unicode(html).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
162 |
163 | def html_decode(s):
164 | """
165 | Returns the ASCII decoded version of the given HTML string. This does
166 | NOT remove normal HTML tags like .
167 | """
168 | htmlCodes = (
169 | ("'", '''),
170 | ('"', '"'),
171 | ('>', '>'),
172 | ('<', '<'),
173 | ('&', '&')
174 | )
175 | for code in htmlCodes:
176 | s = s.replace(code[1], code[0])
177 | return s
178 |
179 | def create_non_verify_sslcontext():
180 | urlctx = ssl.create_default_context()
181 | urlctx.check_hostname = False
182 | urlctx.verify_mode = ssl.CERT_NONE
183 | return urlctx
184 |
--------------------------------------------------------------------------------
/python/nsx_t_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # nsx-edge-gen
4 | #
5 | # Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | __author__ = 'Sabha Parameswaran'
20 |
21 | import copy, sys, os
22 | import json
23 | import yaml
24 | from pprint import pprint
25 | import time
26 | import client
27 | from datetime import datetime
28 |
29 | DEBUG=True
30 |
31 | RETRY_INTERVAL = 45
32 | MAX_RETRY_CHECK = 6
33 |
34 | API_VERSION = '/api/v1'
35 |
36 | EDGE_CLUSTERS_ENDPOINT = '%s%s' % (API_VERSION, '/edge-clusters')
37 | TRANSPORT_NODES_ENDPOINT = '%s%s' % (API_VERSION, '/transport-nodes')
38 | FABRIC_NODES_ENDPOINT = '%s%s' % (API_VERSION, '/fabric/nodes')
39 |
40 | COMPUTE_COLLECTION_FABRIC_TEMPLATES_ENDPOINT = '%s%s' % (API_VERSION, '/fabric/compute-collection-fabric-templates')
41 | COMPUTE_COLLECTION_TRANSPORT_NODES_ENDPOINT = '%s%s' % (API_VERSION, '/compute-collection-transport-node-templates')
42 |
43 | esxi_host_map = {}
44 | edge_transport_node_map = {}
45 |
46 |
47 | def init():
48 |
49 | nsx_mgr_ip = os.getenv('NSX_T_MANAGER_IP')
50 | nsx_mgr_user = os.getenv('NSX_T_MANAGER_ADMIN_USER', 'admin')
51 | nsx_mgr_pwd = os.getenv('NSX_T_MANAGER_ROOT_PWD')
52 | nsx_mgr_context = {
53 | 'admin_user' : nsx_mgr_user,
54 | 'url': 'https://' + nsx_mgr_ip,
55 | 'admin_passwd' : nsx_mgr_pwd
56 | }
57 | client.set_context(nsx_mgr_context)
58 |
59 | def identify_edges_and_hosts():
60 | retries = 0
61 | failed_uninstalls = {}
62 | bailout = False
63 | install_failed = False
64 |
65 | fabric_nodes_api_endpoint = FABRIC_NODES_ENDPOINT
66 | fabric_nodes_resp = client.get(fabric_nodes_api_endpoint)
67 | # Check periodically for install status
68 | print 'Checking status of the NSX-T Fabric Nodes Addition!\n'
69 |
70 | while (retries < MAX_RETRY_CHECK and not bailout ):
71 | still_in_progress = False
72 | print '{} Checking Status \n'.format(datetime.now(), retries + 1)
73 |
74 | for fabric_node in fabric_nodes_resp.json()['results']:
75 | #print 'Fabric Node: {}'.format(fabric_node)
76 | fabric_node_state_url = '%s/%s/status' % (fabric_nodes_api_endpoint, fabric_node['id'])
77 | fabric_node_state_resp = client.get(fabric_node_state_url)
78 | message = fabric_node_state_resp.json()
79 | print ' Node: {}, IP: {}, Type: {}, Status: {}'.format(
80 | fabric_node['display_name'],
81 | fabric_node['ip_addresses'][0],
82 | fabric_node['resource_type'],
83 | message['host_node_deployment_status']
84 | )
85 |
86 | # Dont bail out when things are still in progress
87 | if message['host_node_deployment_status'] in ['INSTALL_IN_PROGRESS']:
88 | still_in_progress = True
89 |
90 | if message['host_node_deployment_status'] in [ 'INSTALL_FAILED', 'INSTALL_SUCCESSFUL']:
91 | bailout = True
92 | if message['host_node_deployment_status'] == 'INSTALL_FAILED':
93 | install_failed = True
94 | #print '\nERROR!! Install of NSX-T Modules on the ESXi Hosts failed!!'
95 | #print 'Check the NSX Manager for reasons for the failure, Exiting!!\n'
96 |
97 | # If anything still in progress, let it continue, retry the check status
98 | # Ignore other failed or success states till all are completed
99 | if still_in_progress:
100 | bailout = False
101 | print ' Sleeping for {} seconds before checking status of installs!\n'.format(RETRY_INTERVAL)
102 | time.sleep(RETRY_INTERVAL)
103 | retries += 1
104 |
105 | if retries == MAX_RETRY_CHECK:
106 | print '\nWARNING!! Max retries reached for checking if hosts have been added to NSX-T.\n'
107 | install_failed = True
108 |
109 | if install_failed == True:
110 | print '\nERROR!! Install of NSX-T Modules on the ESXi Hosts failed!!'
111 | print 'Something wrong with configuring the Hosts as part of the NSX-T Fabric, check NSX-T Mgr Fabric -> Nodes status'
112 | print 'Check the NSX Manager for reasons for the failure, Exiting!!'
113 | else:
114 | print '\nAll the ESXi host addition as transport nodes successfull!!'
115 |
116 | print ''
117 | return install_failed
118 |
119 | def main():
120 | init()
121 | install_failed = identify_edges_and_hosts()
122 |
123 | if install_failed:
124 | sys.exit(1)
125 | else:
126 | sys.exit(0)
127 |
128 | if __name__ == '__main__':
129 | main()
130 |
--------------------------------------------------------------------------------
/python/nsx_t_wipe.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # nsx-edge-gen
4 | #
5 | # Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
6 | #
7 | # Licensed under the Apache License, Version 2.0 (the "License");
8 | # you may not use this file except in compliance with the License.
9 | # You may obtain a copy of the License at
10 | #
11 | # http://www.apache.org/licenses/LICENSE-2.0
12 | #
13 | # Unless required by applicable law or agreed to in writing, software
14 | # distributed under the License is distributed on an "AS IS" BASIS,
15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 | # See the License for the specific language governing permissions and
17 | # limitations under the License.
18 |
19 | __author__ = 'Sabha Parameswaran'
20 |
21 | import copy, sys, os
22 | import json
23 | import yaml
24 | from pprint import pprint
25 | import time
26 | import client
27 | import mobclient
28 |
29 | DEBUG=True
30 | esxi_hosts_file = 'esxi_hosts'
31 |
32 | RETRY_INTERVAL = 30
33 | MAX_RETRY_CHECK = 4
34 |
35 | API_VERSION = '/api/v1'
36 |
37 | EDGE_CLUSTERS_ENDPOINT = '%s%s' % (API_VERSION, '/edge-clusters')
38 | TRANSPORT_NODES_ENDPOINT = '%s%s' % (API_VERSION, '/transport-nodes')
39 | FABRIC_NODES_ENDPOINT = '%s%s' % (API_VERSION, '/fabric/nodes')
40 | ROUTERS_ENDPOINT = '%s%s' % (API_VERSION, '/logical-routers')
41 | ROUTER_PORTS_ENDPOINT = '%s%s' % (API_VERSION, '/logical-router-ports')
42 | SWITCHES_ENDPOINT = '%s%s' % (API_VERSION, '/logical-switches')
43 | SWITCH_PORTS_ENDPOINT = '%s%s' % (API_VERSION, '/logical-ports')
44 | EDGE_CLUSTERS_ENDPOINT = '%s%s' % (API_VERSION, '/edge-clusters')
45 | LBR_SERVICES_ENDPOINT = '%s%s' % (API_VERSION, '/loadbalancer/services')
46 | LBR_VIRTUAL_SERVER_ENDPOINT = '%s%s' % (API_VERSION, '/loadbalancer/virtual-servers')
47 | LBR_POOLS_ENDPOINT = '%s%s' % (API_VERSION, '/loadbalancer/pools')
48 | LBR_MONITORS_ENDPOINT = '%s%s' % (API_VERSION, '/loadbalancer/monitors')
49 |
50 | COMPUTE_COLLECTION_FABRIC_TEMPLATES_ENDPOINT = '%s%s' % (API_VERSION, '/fabric/compute-collection-fabric-templates')
51 | COMPUTE_COLLECTION_TRANSPORT_NODES_ENDPOINT = '%s%s' % (API_VERSION, '/compute-collection-transport-node-templates')
52 |
53 | esxi_host_map = {}
54 | edge_transport_node_map = {}
55 |
56 |
57 | def init():
58 |
59 | nsx_mgr_ip = os.getenv('NSX_T_MANAGER_IP')
60 | nsx_mgr_user = os.getenv('NSX_T_MANAGER_ADMIN_USER', 'admin')
61 | nsx_mgr_pwd = os.getenv('NSX_T_MANAGER_ROOT_PWD')
62 | nsx_mgr_context = {
63 | 'admin_user' : nsx_mgr_user,
64 | 'url': 'https://' + nsx_mgr_ip,
65 | 'admin_passwd' : nsx_mgr_pwd
66 | }
67 | client.set_context(nsx_mgr_context)
68 |
69 | def identify_edges_and_hosts():
70 | fabric_nodes_api_endpoint = FABRIC_NODES_ENDPOINT
71 | fabric_nodes_resp = client.get(fabric_nodes_api_endpoint)
72 | for fabric_node in fabric_nodes_resp.json()['results']:
73 | print 'Fabric Node: {}'.format(fabric_node)
74 | if fabric_node['resource_type'] == 'EdgeNode':
75 | edge_transport_node_map[fabric_node['id']] = fabric_node['display_name']
76 | else:
77 | esxi_host_map[fabric_node['id']] = fabric_node['display_name']
78 |
79 | # edge_clusters_api_endpoint = EDGE_CLUSTERS_ENDPOINT
80 | # edge_clusters_resp = client.get(edge_clusters_api_endpoint)
81 | # for edge_cluster in edge_clusters_resp.json()['results']:
82 | # print 'Edge Cluster: {}'.format(edge_cluster)
83 | # for member in edge_cluster['members']:
84 | # edge_transport_node_map.append(member['transport_node_id'])
85 | #
86 | # transport_nodes_api_endpoint = TRANSPORT_NODES_ENDPOINT
87 | # transport_nodes_resp = client.get(transport_nodes_api_endpoint)
88 | # for transport_node in transport_nodes_resp.json()['results']:
89 | # print 'Transport node: {}'.format(transport_node)
90 | # if transport_node['id'] not in edge_transport_node_map:
91 | # esxi_host_map.append(transport_node['id'])
92 |
93 | def create_esxi_hosts():
94 | esxi_root_pwd = os.getenv('ESXI_HOSTS_ROOT_PWD')
95 | esxi_host_file_map = { 'esxi_hosts' : { 'hosts' : {} }}
96 | output_esxi_host_map = { }
97 | for esxi_host_id in esxi_host_map:
98 | esxi_host_name = esxi_host_map[esxi_host_id]
99 | output_esxi_host_map[esxi_host_name] = {
100 | 'ansible_ssh_host': esxi_host_name,
101 | 'ansible_ssh_user': 'root',
102 | 'ansible_ssh_pass': esxi_root_pwd
103 | }
104 | esxi_host_file_map = { 'esxi_hosts' : { 'hosts' : output_esxi_host_map } }
105 | write_config(esxi_host_file_map, esxi_hosts_file)
106 |
107 | def wipe_env():
108 |
109 | # Before we can wipe the env. we need to remove the hosts as transport nodes
110 | # we need to disable auto-install of nsx, remove the auto-addition as transport node
111 | disable_auto_install_for_compute_fabric()
112 |
113 | # Then remove the vcenter extension for nsx
114 | handle_nsxt_extension_removal()
115 |
116 | # remove the LBRs
117 | delete_lbrs()
118 |
119 | # clean up all the routers and switches
120 | delete_routers_and_switches()
121 |
122 | # remove the edge Clusters
123 | delete_edge_clusters()
124 |
125 | # finally remove the host from transport node list
126 | return uninstall_nsx_from_hosts()
127 |
128 | def disable_auto_install_for_compute_fabric():
129 | compute_fabric_collection_api_endpoint = COMPUTE_COLLECTION_FABRIC_TEMPLATES_ENDPOINT
130 | transport_node_collection_api_endpoint = COMPUTE_COLLECTION_TRANSPORT_NODES_ENDPOINT
131 |
132 | outer_resp = client.get(compute_fabric_collection_api_endpoint)
133 | #print 'Got Compute collection respo: {}'.format(outer_resp)
134 | compute_fabric_templates = outer_resp.json()['results']
135 | for compute_fabric in compute_fabric_templates:
136 | #print 'Iterating over Compute fabric respo: {}'.format(compute_fabric)
137 | compute_fabric['auto_install_nsx'] = False
138 | compute_fabric_id = compute_fabric['id']
139 |
140 | compute_collection_id = compute_fabric['compute_collection_id']
141 |
142 | # First remove the related transport node template from the compute collection relationship
143 | transport_node_association_from_compute_fabric_api_endpoint = '%s?compute_collection_id=%s' % (transport_node_collection_api_endpoint, compute_collection_id)
144 |
145 | get_resp = client.get(transport_node_association_from_compute_fabric_api_endpoint, check=False )
146 | if get_resp.status_code == 200:
147 | try:
148 | for transport_node in get_resp.json()['results']:
149 | transport_node_id = transport_node['id']
150 | transport_node_removal_api_endpoint = '%s/%s' % (transport_node_collection_api_endpoint, transport_node_id)
151 | delete_resp = client.delete(transport_node_removal_api_endpoint, check=False )
152 | print 'Removed auto-linking of Host as Transport Node in Fabric for Compute Manager: {}'.format(compute_fabric['compute_collection_id'])
153 | except Exception as e:
154 | print 'No transport nodes associated'
155 | #ignore
156 | # Now change the compute fabric template
157 | compute_fabric_update_api_endpoint = '%s/%s' % (compute_fabric_collection_api_endpoint, compute_fabric_id)
158 | resp = client.put(compute_fabric_update_api_endpoint, compute_fabric, check=False )
159 |
160 | if resp.status_code < 400:
161 | print 'Disabled auto install of NSX in Compute Fabric: {}'.format(compute_fabric['compute_collection_id'])
162 | print ''
163 | else:
164 | print 'Problem in disabling auto install in Compute Fabric: {}'.format(compute_fabric['compute_collection_id'])
165 | print 'Associated Error: {}'.format(resp.json())
166 | exit(1)
167 |
168 | def handle_nsxt_extension_removal():
169 | vcenter_context = { }
170 | compute_managers_config_raw = os.getenv('COMPUTE_MANAGER_CONFIGS')
171 | if compute_managers_config_raw is None or compute_managers_config_raw == '':
172 | print 'Compute manager config is empty, returning'
173 | return
174 |
175 | compute_managers_config = yaml.load(compute_managers_config_raw)['compute_managers']
176 |
177 | # compute_managers:
178 | # - vcenter_name: vcenter-01
179 | # vcenter_host: vcenter-01.corp.local
180 | # vcenter_usr: administrator@vsphere.local
181 | # vcenter_pwd: VMWare1!
182 | # # Multiple clusters under same vcenter can be specified
183 | # clusters:
184 | # - vcenter_cluster: Cluster1
185 | # overlay_profile_mtu: 1600 # Min 1600
186 | # overlay_profile_vlan: EDIT_ME # VLAN ID for the TEP/Overlay network
187 | # # Specify an unused vmnic on esxi host to be used for nsx-t
188 | # # can be multiple vmnics separated by comma
189 | # uplink_vmnics: vmnic1 # vmnic1,vmnic2...
190 |
191 | for vcenter in compute_managers_config:
192 | vcenter_context = {
193 | 'address' : vcenter['vcenter_host'],
194 | 'admin_user' : vcenter['vcenter_usr'],
195 | 'admin_passwd' : vcenter['vcenter_pwd']
196 | }
197 | print 'Removing nsx-t extension from vcenter : {}\n'.format(vcenter_context['address'])
198 | mobclient.set_context(vcenter_context)
199 | mobclient.remove_nsxt_extension_from_vcenter()
200 |
201 | def delete_routers_and_switches():
202 | delete_router_ports()
203 | delete_routers()
204 | delete_logical_switch_ports()
205 | delete_logical_switches()
206 |
207 | def delete_routers():
208 | api_endpoint = ROUTERS_ENDPOINT
209 | print 'Starting deletion of Routers!'
210 | router_resp = client.get(api_endpoint)
211 | for instance in router_resp.json()['results']:
212 | instance_api_endpoint = '%s/%s?force=true' % (api_endpoint, instance['id'])
213 | client.delete(instance_api_endpoint)
214 | print ' Deleted Routers!'
215 |
216 | def delete_router_ports():
217 | api_endpoint = ROUTER_PORTS_ENDPOINT
218 | print 'Starting deletion of Router Ports!'
219 | router_ports_resp = client.get(api_endpoint)
220 | for instance in router_ports_resp.json()['results']:
221 | instance_api_endpoint = '%s/%s?force=true' % (api_endpoint, instance['id'])
222 | client.delete(instance_api_endpoint)
223 | print ' Deleted Router Ports!'
224 |
225 | def delete_logical_switch_ports():
226 | api_endpoint = SWITCH_PORTS_ENDPOINT
227 | print 'Starting deletion of Logical Switch Ports!'
228 | logical_switch_ports_resp = client.get(api_endpoint)
229 | for instance in logical_switch_ports_resp.json()['results']:
230 | instance_api_endpoint = '%s/%s?force=true' % (api_endpoint, instance['id'])
231 | client.delete(instance_api_endpoint)
232 | print ' Deleted Logical Switch Ports!'
233 |
234 | def delete_logical_switches():
235 | api_endpoint = SWITCHES_ENDPOINT
236 | print 'Starting deletion of Logical Switches!'
237 | logical_switches_resp = client.get(api_endpoint)
238 | for instance in logical_switches_resp.json()['results']:
239 | instance_api_endpoint = '%s/%s?force=true' % (api_endpoint, instance['id'])
240 | client.delete(instance_api_endpoint)
241 | print ' Deleted Logical Switches!'
242 |
243 | def delete_lbrs():
244 | api_endpoint = LBR_SERVICES_ENDPOINT
245 | print 'Starting deletion of Loadbalancers!'
246 | lbrs_resp = client.get(api_endpoint)
247 | for instance in lbrs_resp.json()['results']:
248 | instance_api_endpoint = '%s/%s' % (api_endpoint, instance['id'])
249 | client.delete(instance_api_endpoint)
250 | print ' Deleted Loadbalancers!'
251 |
252 | api_endpoint = LBR_VIRTUAL_SERVER_ENDPOINT
253 | print 'Starting deletion of Virtual Servers!'
254 | virtual_servers_resp = client.get(api_endpoint)
255 | for instance in virtual_servers_resp.json()['results']:
256 | instance_api_endpoint = '%s/%s' % (api_endpoint, instance['id'])
257 | client.delete(instance_api_endpoint)
258 | print ' Deleted Virtual Servers!'
259 |
260 | api_endpoint = LBR_POOLS_ENDPOINT
261 | print 'Starting deletion of Server Pools!'
262 | pool_servers_resp = client.get(api_endpoint)
263 | for instance in pool_servers_resp.json()['results']:
264 | instance_api_endpoint = '%s/%s' % (api_endpoint, instance['id'])
265 | client.delete(instance_api_endpoint)
266 | print ' Deleted Server Pools!'
267 |
268 | def delete_edge_clusters():
269 | api_endpoint = EDGE_CLUSTERS_ENDPOINT
270 | print 'Starting deletion of Edge Clusters!'
271 | edge_clusters_resp = client.get(api_endpoint)
272 | for instance in edge_clusters_resp.json()['results']:
273 | instance_api_endpoint = '%s/%s' % (api_endpoint, instance['id'])
274 | resp = client.delete(instance_api_endpoint)
275 | print ' Deleted Edge Clusters!'
276 |
277 | def uninstall_nsx_from_hosts():
278 |
279 | print '\nStarting uninstall of NSX Components from Fabric!!\n'
280 | delete_node_from_transport_and_fabric('Edge Node', edge_transport_node_map)
281 | delete_node_from_transport_and_fabric('Esxi Host', esxi_host_map)
282 |
283 | return check_and_report_uninstall_status()
284 |
285 | def delete_node_from_transport_and_fabric(type_of_entity, entity_map):
286 | transport_nodes_api_endpoint = TRANSPORT_NODES_ENDPOINT
287 | fabric_nodes_api_endpoint = FABRIC_NODES_ENDPOINT
288 |
289 | for entity_id in entity_map.keys():
290 | print 'Deleting {} from Transport and Fabric nodes: {}'.format(type_of_entity, entity_map[entity_id])
291 | transport_node_delete_url = '%s/%s' % (transport_nodes_api_endpoint, entity_id)
292 | transport_nodes_resp = client.delete(transport_node_delete_url)
293 | #print ' Delete response from Transport nodes: {}'.format(transport_nodes_resp)
294 |
295 | fabric_node_delete_url = '%s/%s' % (fabric_nodes_api_endpoint, entity_id)
296 | fabric_node_delete_resp = client.delete(fabric_node_delete_url)
297 | #print ' Delete response from Fabric nodes: {}'.format(fabric_node_delete_resp)
298 | print ''
299 |
300 | def check_and_report_uninstall_status():
301 |
302 | retries = 0
303 | failed_uninstalls = {}
304 | uninstall_failed = False
305 | esxi_hosts_to_check = copy.copy(esxi_host_map)
306 | fabric_nodes_api_endpoint = FABRIC_NODES_ENDPOINT
307 |
308 | # Check periodically for uninstall status
309 | print 'Check status of uninstalls!'.format(RETRY_INTERVAL)
310 | while (retries < MAX_RETRY_CHECK and len(esxi_hosts_to_check) > 0 ):
311 | print ' Sleeping for {} seconds before checking status of uninstalls!\n'.format(RETRY_INTERVAL)
312 | time.sleep(RETRY_INTERVAL)
313 | for esxi_host_id in esxi_host_map.keys():
314 | if esxi_hosts_to_check.get(esxi_host_id) is None:
315 | continue
316 |
317 | print ' Checking uninstall status of Esxi Host: {}'.format(esxi_host_map[esxi_host_id])
318 | fabric_node_status_url = '%s/%s/status' % (fabric_nodes_api_endpoint, esxi_host_id)
319 | fabric_node_status_resp = client.get(fabric_node_status_url)
320 | if fabric_node_status_resp.status_code == 200:
321 | uninstall_status = fabric_node_status_resp.json()['host_node_deployment_status']
322 | print ' Uninstall status: {}'.format(uninstall_status)
323 |
324 | # If the uninstall failed, dont bother to check again, add it to the failed list
325 | if uninstall_status == 'UNINSTALL_FAILED':
326 | fabric_node_state_url = '%s/%s/state' % (fabric_nodes_api_endpoint, esxi_host_id)
327 | fabric_node_state_resp = client.get(fabric_node_state_url)
328 | failure_message = fabric_node_state_resp.json()['details'][0]['failure_message']
329 | print ' Failure message: ' + failure_message
330 | failed_uninstalls[esxi_hosts_to_check[esxi_host_id]] = failure_message
331 | del esxi_hosts_to_check[esxi_host_id]
332 | elif uninstall_status == 'UNINSTALL_SUCCESSFUL':
333 | # uninstall succeeded, dont bother to check again
334 | del esxi_hosts_to_check[esxi_host_id]
335 | else:
336 | # Node is not there anymore, delete it from the list
337 | del esxi_hosts_to_check[esxi_host_id]
338 | retries += 1
339 |
340 | print ' Completed uninstall of NSX Components from Fabric!\n'
341 | if len(failed_uninstalls) > 0:
342 | print 'WARNING!! Following nodes need to be cleaned up with additional steps!'
343 | print '----------------------------------------------------'
344 | print '\n'.join(host_name.encode('ascii') for host_name in failed_uninstalls.keys())
345 | print '----------------------------------------------------'
346 | print '\nScripts (executed next) would remove the NSX vibs from these hosts,'
347 | print ' but the host themselves would have to be rebooted in rolling fashion!!'
348 | print ''
349 | uninstall_failed = True
350 |
351 | return uninstall_failed
352 |
353 | def write_config(content, destination):
354 | try:
355 | with open(destination, 'w') as output_file:
356 | yaml.safe_dump(content, output_file)
357 |
358 | except IOError as e:
359 | print('Error : {}'.format(e))
360 | print >> sys.stderr, 'Problem with writing out a yaml file.'
361 | sys.exit(1)
362 |
363 | def main():
364 | global esxi_hosts_file
365 |
366 | esxi_hosts_file = sys.argv[1]
367 |
368 | init()
369 | identify_edges_and_hosts()
370 | create_esxi_hosts()
371 |
372 | uninstall_failed_status = wipe_env()
373 | if uninstall_failed_status:
374 | sys.exit(1)
375 | else:
376 | sys.exit(0)
377 |
378 | if __name__ == '__main__':
379 | main()
380 |
--------------------------------------------------------------------------------
/python/yaml2json.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 |
4 | import sys, yaml, json
5 |
6 | y = yaml.load(sys.stdin.read())
7 | print json.dumps(y)
8 |
--------------------------------------------------------------------------------
/tasks/add-nsx-t-routers/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 | export PYTHON_LIB_DIR=$(cd $PIPELINE_DIR/python && pwd)
11 |
12 | source $FUNCTIONS_DIR/create_ansible_cfg.sh
13 | source $FUNCTIONS_DIR/create_answerfile.sh
14 | source $FUNCTIONS_DIR/create_hosts.sh
15 | source $FUNCTIONS_DIR/create_extra_yaml_args.sh
16 | source $FUNCTIONS_DIR/check_null_variables.sh
17 |
18 | DEBUG=""
19 | if [ "$ENABLE_ANSIBLE_DEBUG" == "true" ]; then
20 | DEBUG="-vvv"
21 | fi
22 |
23 | # Check if NSX MGR is up or not
24 | nsx_mgr_up_status=$(curl -s -o /dev/null -I -w "%{http_code}" -k https://${NSX_T_MANAGER_IP}:443/login.jsp || true)
25 |
26 | # Deploy the ovas if its not up
27 | if [ $nsx_mgr_up_status -ne 200 ]; then
28 | echo "NSX Mgr not up yet, please deploy the ovas before configuring routers!!"
29 | exit -1
30 | fi
31 |
32 | create_hosts
33 | create_answerfile
34 | create_ansible_cfg
35 | create_extra_yaml_args
36 |
37 | cp hosts answerfile.yml ansible.cfg extra_yaml_args.yml nsxt-ansible/.
38 | cd nsxt-ansible
39 |
40 | echo ""
41 |
42 |
43 |
44 | NO_OF_CONTROLLERS=$(curl -k -u "admin:$NSX_T_MANAGER_ADMIN_PWD" \
45 | https://${NSX_T_MANAGER_IP}/api/v1/cluster/nodes \
46 | | jq '.results[].controller_role.type' | wc -l )
47 | if [ "$NO_OF_CONTROLLERS" -lt 2 ]; then
48 | echo "NSX Mgr and controller not configured yet, please cleanup incomplete vms and rerun base install before configuring routers!!"
49 | exit -1
50 | fi
51 |
52 | ansible-playbook $DEBUG -i hosts configureNsx.yml -e @extra_yaml_args.yml
53 | STATUS=$?
54 |
55 | echo ""
56 |
57 | if [ "$STATUS" == "0" ]; then
58 | python $PYTHON_LIB_DIR/nsx_t_status.py
59 | STATUS=$?
60 | fi
61 |
62 | exit $STATUS
63 |
--------------------------------------------------------------------------------
/tasks/add-nsx-t-routers/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source: {repository: nsxedgegen/nsx-t-gen-worker, tag: v2.1 }
8 |
9 | params:
10 | VCENTER_HOST:
11 | VCENTER_USR:
12 | VCENTER_PWD:
13 | VCENTER_DATACENTER:
14 | VCENTER_DATASTORE:
15 | VCENTER_CLUSTER:
16 | VCENTER_RP:
17 | VCENTER_MANAGER:
18 | NTPSERVERS:
19 | MGMT_PORTGROUP:
20 | DNSSERVER:
21 | DNSDOMAIN:
22 | DEFAULTGATEWAY:
23 | NETMASK:
24 | ESXI_HOSTS_ROOT_PWD:
25 | ESXI_HOSTS_CONFIG:
26 | COMPUTE_MANAGER_CONFIGS:
27 | EDGE_VCENTER_HOST:
28 | EDGE_VCENTER_USR:
29 | EDGE_VCENTER_PWD:
30 | EDGE_VCENTER_RP:
31 | EDGE_VCENTER_DATACENTER:
32 | EDGE_VCENTER_DATASTORE:
33 | EDGE_VCENTER_CLUSTER:
34 | EDGE_NTPSERVERS:
35 | EDGE_MGMT_PORTGROUP:
36 | EDGE_DNSSERVER:
37 | EDGE_DNSDOMAIN:
38 | EDGE_DEFAULTGATEWAY:
39 | EDGE_NETMASK:
40 | NSX_T_INSTALLER:
41 | NSX_T_MANAGER_FQDN:
42 | NSX_T_MANAGER_VM_NAME:
43 | NSX_T_MANAGER_HOST_NAME:
44 | NSX_T_MANAGER_IP:
45 | NSX_T_MANAGER_ADMIN_PWD:
46 | NSX_T_MANAGER_ROOT_PWD:
47 | NSX_T_CONTROLLERS_CONFIG:
48 | NSX_T_CONTROLLER_HOST_PREFIX:
49 | NSX_T_CONTROLLER_VM_NAME_PREFIX:
50 | NSX_T_CONTROLLER_IPS:
51 | NSX_T_CONTROLLER_ROOT_PWD:
52 | NSX_T_CONTROLLER_CLUSTER_PWD:
53 | NSX_T_EDGE_HOST_PREFIX:
54 | NSX_T_EDGE_VM_NAME_PREFIX:
55 | NSX_T_EDGE_IPS:
56 | NSX_T_EDGE_ROOT_PWD:
57 | NSX_T_EDGE_PORTGROUP_EXT:
58 | NSX_T_EDGE_PORTGROUP_TRANSPORT:
59 | NSX_T_EDGE_OVERLAY_INTERFACE:
60 | NSX_T_EDGE_UPLINK_INTERFACE:
61 | NSX_T_MGR_DEPLOY_SIZE:
62 | NSX_T_EDGE_DEPLOY_SIZE:
63 | NSX_T_TEP_POOL_NAME:
64 | NSX_T_TEP_POOL_CIDR:
65 | NSX_T_TEP_POOL_GATEWAY:
66 | NSX_T_TEP_POOL_START:
67 | NSX_T_TEP_POOL_END:
68 | NSX_T_SINGLE_UPLINK_PROFILE_NAME:
69 | NSX_T_SINGLE_UPLINK_PROFILE_MTU:
70 | NSX_T_SINGLE_UPLINK_PROFILE_VLAN:
71 | NSX_T_EDGE_CLUSTER:
72 | NSX_T_KEEP_RESERVATION:
73 | NSX_T_OVERLAY_PROFILE_NAME:
74 | NSX_T_OVERLAY_PROFILE_MTU:
75 | NSX_T_OVERLAY_PROFILE_VLAN:
76 | NSX_T_ESXI_VMNICS:
77 | NSX_T_HOSTSWITCH:
78 | NSX_T_TRANSPORT_VLAN:
79 | NSX_T_T0ROUTER_SPEC:
80 | NSX_T_PAS_NCP_CLUSTER_TAG:
81 | NSX_T_OVERLAY_TRANSPORT_ZONE:
82 | NSX_T_VLAN_TRANSPORT_ZONE:
83 | NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC:
84 | NSX_T_HA_SWITCHING_PROFILE_SPEC:
85 | NSX_T_CONTAINER_IP_BLOCK_SPEC:
86 | NSX_T_EXTERNAL_IP_POOL_SPEC:
87 | ENABLE_ANSIBLE_DEBUG:
88 | RERUN_CONFIGURE_CONTROLLERS:
89 |
90 | inputs:
91 | - name: nsx-t-gen-pipeline
92 | - name: nsxt-ansible
93 |
94 | run:
95 | path: nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.sh
96 |
97 |
--------------------------------------------------------------------------------
/tasks/config-nsx-t-extras/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 | export PYTHON_LIB_DIR=$(cd $PIPELINE_DIR/python && pwd)
11 | export SCRIPT_DIR=$(dirname $0)
12 |
13 | source $FUNCTIONS_DIR/check_null_variables.sh
14 |
15 | python $PYTHON_LIB_DIR/nsx_t_gen.py
16 |
17 | STATUS=$?
18 |
19 | exit $STATUS
20 |
--------------------------------------------------------------------------------
/tasks/config-nsx-t-extras/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source: {repository: nsxedgegen/nsx-t-gen-worker, tag: v2.1 }
8 |
9 | params:
10 | NSX_T_INSTALLER:
11 | NSX_T_MANAGER_FQDN:
12 | NSX_T_MANAGER_HOST_NAME:
13 | NSX_T_MANAGER_IP:
14 | NSX_T_MANAGER_ADMIN_USER:
15 | NSX_T_MANAGER_ROOT_PWD:
16 | NSX_T_OVERLAY_TRANSPORT_ZONE:
17 | NSX_T_PAS_NCP_CLUSTER_TAG:
18 | NSX_T_T0ROUTER_SPEC:
19 | NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC:
20 | NSX_T_HA_SWITCHING_PROFILE_SPEC:
21 | NSX_T_CONTAINER_IP_BLOCK_SPEC:
22 | NSX_T_EXTERNAL_IP_POOL_SPEC:
23 | NSX_T_NAT_RULES_SPEC:
24 | NSX_T_CSR_REQUEST_SPEC:
25 | NSX_T_LBR_SPEC:
26 | NSX_T_MONITOR_SPEC:
27 | NSX_T_NSGROUP_SPEC:
28 |
29 | inputs:
30 | - name: nsx-t-gen-pipeline
31 | run:
32 | path: nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.sh
33 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/copy_and_customize_ovas.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 |
11 | source $FUNCTIONS_DIR/copy_ovas.sh
12 |
13 | DEBUG=""
14 | if [ "$ENABLE_ANSIBLE_DEBUG" == "true" ]; then
15 | DEBUG="-vvv"
16 | fi
17 |
18 | NSX_T_MANAGER_OVA=$(ls $ROOT_DIR/nsx-mgr-ova)
19 | NSX_T_CONTROLLER_OVA=$(ls $ROOT_DIR/nsx-ctrl-ova)
20 | NSX_T_EDGE_OVA=$(ls $ROOT_DIR/nsx-edge-ova)
21 |
22 | cat > customize_ova_vars.yml <<-EOF
23 | ovftool_path: '/usr/bin'
24 | ova_file_path: "$OVA_ISO_PATH"
25 | nsx_manager_filename: "$NSX_T_MANAGER_OVA"
26 | nsx_controller_filename: "$NSX_T_CONTROLLER_OVA"
27 | nsx_gw_filename: "$NSX_T_EDGE_OVA"
28 |
29 | EOF
30 | cp customize_ova_vars.yml nsxt-ansible
31 |
32 | install_ovftool
33 | copy_ovsa_to_OVA_ISO_PATH
34 |
35 | cd nsxt-ansible
36 | ansible-playbook $DEBUG -i localhost customize_ovas.yml -e @customize_ova_vars.yml
37 | STATUS=$?
38 |
39 | echo ""
40 |
41 | # if [ -z "$SUPPORT_NSX_VMOTION" -o "$SUPPORT_NSX_VMOTION" == "false" ]; then
42 | # echo "Skipping vmks configuration for NSX-T Mgr!!"
43 | # echo 'configure_vmks: False' >> answerfile.yml
44 |
45 | # else
46 | # echo "Allowing vmks configuration for NSX-T Mgr!!"
47 | # echo 'configure_vmks: True' >> answerfile.yml
48 | # fi
49 |
50 | # echo ""
51 |
52 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 |
11 | export OVA_ISO_PATH='/root/ISOs/CHGA'
12 | export NSX_T_MANAGER_OVA=$(ls $ROOT_DIR/nsx-mgr-ova)
13 | export NSX_T_CONTROLLER_OVA=$(ls $ROOT_DIR/nsx-ctrl-ova)
14 | export NSX_T_EDGE_OVA=$(ls $ROOT_DIR/nsx-edge-ova)
15 |
16 | source $FUNCTIONS_DIR/copy_ovas.sh
17 | source $FUNCTIONS_DIR/create_ansible_cfg.sh
18 | source $FUNCTIONS_DIR/create_hosts.sh
19 | source $FUNCTIONS_DIR/create_extra_yaml_args.sh
20 | source $FUNCTIONS_DIR/check_null_variables.sh
21 | source $FUNCTIONS_DIR/deploy_ova_using_govc.sh
22 |
23 | # Default installer name to be used for tags
24 | if [ "$NSX_T_INSTALLER" == "" ]; then
25 | NSX_T_INSTALLER='nsx-t-gen'
26 | fi
27 |
28 | function check_status_up {
29 | ip_set=$1
30 | type_of_resource=$2
31 | status_up=true
32 |
33 | resources_down_count=0
34 | resources_configured=$(echo $ip_set | sed -e 's/,/ /g' | awk '{print NF}' )
35 | for resource_ip in $(echo $ip_set | sed -e 's/,/ /g' )
36 | do
37 | # no netcat on the docker image
38 | #status=$(nc -vz ${resource_ip} 22 2>&1 | grep -i succeeded || true)
39 | # following hangs on bad ports
40 | #status=$( /dev/tcp/${resource_ip}/22) >/dev/null 2>&1"
42 | status=$?
43 | if [ "$status" != "0" ]; then
44 | status_up=false
45 | resources_down_count=$(expr $resources_down_count + 1)
46 | fi
47 | done
48 |
49 | if [ "$status_up" == "true" ]; then
50 | (>&2 echo "All VMs of type ${type_of_resource} up, total: ${resources_configured}")
51 | echo "true"
52 | return
53 | fi
54 |
55 | if [ "$resources_down_count" != "$resources_configured" ]; then
56 | (>&2 echo "Mismatch in number of VMs of type ${type_of_resource} that are expected to be up!!")
57 | (>&2 echo "Configured ${type_of_resource} VM total: ${resources_configured}, VM down: ${resources_down_count}")
58 | (>&2 echo "Delete pre-created vms of type ${type_of_resource} and start over!!")
59 | (>&2 echo "If the vms are up and accessible and suspect its a timing issue, restart the job again!!")
60 | (>&2 echo "Exiting now !!")
61 | exit -1
62 | else
63 | (>&2 echo "All VMs of type ${type_of_resource} down, total: ${resources_configured}")
64 | (>&2 echo " Would need to deploy ${type_of_resource} ovas")
65 | fi
66 |
67 | echo "false"
68 | return
69 | }
70 |
71 | DEBUG=""
72 | if [ "$ENABLE_ANSIBLE_DEBUG" == "true" ]; then
73 | DEBUG="-vvv"
74 | fi
75 |
76 | check_ovas
77 |
78 | create_hosts
79 | create_ansible_cfg
80 | create_extra_yaml_args
81 | create_customize_ova_params
82 |
83 | cp hosts ansible.cfg extra_yaml_args.yml customize_ova_vars.yml nsxt-ansible/.
84 | cd nsxt-ansible
85 |
86 | echo ""
87 |
88 | # Check if the status and count of Mgr, Ctrl, Edge
89 | nsx_mgr_up_status=$(check_status_up $NSX_T_MANAGER_IP "NSX Mgr")
90 | nsx_controller_up_status=$(check_status_up $NSX_T_CONTROLLER_IPS "NSX Controller")
91 | nsx_edge_up_status=$(check_status_up $NSX_T_EDGE_IPS "NSX Edge")
92 | echo ""
93 |
94 | STATUS=0
95 | # Copy over the ovas if any of the resources are not up
96 | if [ "$nsx_mgr_up_status" != "true" -o "$nsx_controller_up_status" != "true" -o "$nsx_edge_up_status" != "true" ]; then
97 | echo "Detected one of the vms (mgr, controller, edge) are not yet up, preparing the ovas"
98 | echo ""
99 |
100 | install_ovftool
101 | copy_ovas_to_OVA_ISO_PATH
102 | create_customize_ova_params
103 |
104 | if [ "$NSX_T_KEEP_RESERVATION" != "true" ]; then
105 | echo "Reservation turned off, customizing the ovas to turn off reservation!!"
106 | echo ""
107 | ansible-playbook $DEBUG -i localhost customize_ovas.yml -e @customize_ova_vars.yml
108 | echo ""
109 | fi
110 | fi
111 |
112 | # Deploy the Mgr ova if its not up
113 | if [ "$nsx_mgr_up_status" != "true" ]; then
114 | #ansible-playbook $DEBUG -i hosts deploy_mgr.yml -e @extra_yaml_args.yml
115 | deploy_ova_using_govc "mgr" "${OVA_ISO_PATH}/${NSX_T_MANAGER_OVA}"
116 | STATUS=$?
117 |
118 | if [[ $STATUS != 0 ]]; then
119 | echo "Deployment of NSX Mgr OVA failed, vms failed to come up!!"
120 | echo "Check error logs"
121 | echo ""
122 | exit $STATUS
123 | else
124 | echo "Deployment of NSX Mgr ova succcessfull!! Continuing with rest of configuration!!"
125 | echo ""
126 | fi
127 | else
128 | echo "NSX Mgr up already, skipping deploying of the Mgr ova!!"
129 | fi
130 |
131 | # Deploy the Controller ova if its not up
132 | if [ "$nsx_controller_up_status" != "true" ]; then
133 | #ansible-playbook $DEBUG -i hosts deploy_ctrl.yml -e @extra_yaml_args.yml
134 | deploy_ova_using_govc "ctrl" "${OVA_ISO_PATH}/${NSX_T_CONTROLLER_OVA}"
135 | STATUS=$?
136 |
137 | if [[ $STATUS != 0 ]]; then
138 | echo "Deployment of NSX Controller OVA failed, vms failed to come up!!"
139 | echo "Check error logs"
140 | echo ""
141 | exit $STATUS
142 | else
143 | echo "Deployment of NSX Controller ova succcessfull!! Continuing with rest of configuration!!"
144 | echo ""
145 | fi
146 | else
147 | echo "NSX Controllers up already, skipping deploying of the Controller ova!!"
148 | fi
149 |
150 | # Deploy the Edge ova if its not up
151 | if [ "$nsx_edge_up_status" != "true" ]; then
152 | #ansible-playbook $DEBUG -i hosts deploy_edge.yml -e @extra_yaml_args.yml
153 | deploy_ova_using_govc "edge" "${OVA_ISO_PATH}/${NSX_T_EDGE_OVA}"
154 | STATUS=$?
155 |
156 | if [[ $STATUS != 0 ]]; then
157 | echo "Deployment of NSX Edge OVA failed, vms failed to come up!!"
158 | echo "Check error logs"
159 | echo ""
160 | exit $STATUS
161 | else
162 | echo "Deployment of NSX Edge ova succcessfull!! Continuing with rest of configuration!!"
163 | echo ""
164 | fi
165 | else
166 | echo "NSX Edges up already, skipping deploying of the Edge ova!!"
167 | fi
168 | echo ""
169 |
170 | # Give some time for vm services to be up before checking the status of the vm instances
171 | echo "Wait for 30 seconds before checking if all NSX VMs are up"
172 | sleep 30
173 | echo ""
174 |
175 | echo "Rechecking the status and count of Mgr, Ctrl, Edge instances !!"
176 | nsx_mgr_up_status=$(check_status_up $NSX_T_MANAGER_IP "NSX Mgr")
177 | nsx_controller_up_status=$(check_status_up $NSX_T_CONTROLLER_IPS "NSX Controller")
178 | nsx_edge_up_status=$(check_status_up $NSX_T_EDGE_IPS "NSX Edge")
179 | echo ""
180 |
181 | if [ "$nsx_mgr_up_status" != "true" \
182 | -o "$nsx_controller_up_status" != "true" \
183 | -o "$nsx_edge_up_status" != "true" ]; then
184 | # if [ "$nsx_mgr_up_status" != "true" \
185 | # -o "$nsx_controller_up_status" != "true" ]; then
186 | echo "Some problem with the VMs, one or more of the vms (mgr, controller, edge) failed to come up or not accessible!"
187 | echo "Check the related vms!!"
188 | echo "Notes: "
189 | echo " Its possible a vm is not reachable or temporarily unavailable, try restarting the concourse job"
190 | echo " In rare cases, delete any older non-running vms (same name can affect new ova deployment) "
191 | echo " as well as any vms that did not form a not complete set (like only 1 controller up out of 3)"
192 | echo " Check also the FAQs here: https://github.com/sparameswaran/nsx-t-gen/blob/master/docs/faqs.md"
193 | exit 1
194 | fi
195 | echo "All Good!! Proceeding with Controller configuration!"
196 | echo ""
197 |
198 | # Configure the controllers
199 | NO_OF_EDGES_CONFIGURED=$(echo $NSX_T_EDGE_IPS | sed -e 's/,/ /g' | awk '{print NF}' )
200 | NO_OF_CONTROLLERS_CONFIGURED=$(echo $NSX_T_CONTROLLER_IPS | sed -e 's/,/ /g' | awk '{print NF}' )
201 |
202 | # Total number of controllers should be mgr + no of controllers
203 | EXPECTED_TOTAL_CONTROLLERS=$(expr 1 + $NO_OF_CONTROLLERS_CONFIGURED )
204 |
205 | CURRENT_TOTAL_EDGES=$(curl -k -u "admin:$NSX_T_MANAGER_ADMIN_PWD" \
206 | https://${NSX_T_MANAGER_IP}/api/v1/fabric/nodes \
207 | 2>/dev/null | jq '.result_count' )
208 |
209 | CURRENT_TOTAL_CONTROLLERS=$(curl -k -u "admin:$NSX_T_MANAGER_ADMIN_PWD" \
210 | https://${NSX_T_MANAGER_IP}/api/v1/cluster/nodes \
211 | 2>/dev/null | jq '.result_count' )
212 | if [ "$CURRENT_TOTAL_CONTROLLERS" != "$EXPECTED_TOTAL_CONTROLLERS" ]; then
213 | RERUN_CONFIGURE_CONTROLLERS=true
214 | echo "Total # of Controllers [$CURRENT_TOTAL_CONTROLLERS] not matching expected count of (mgr + $EXPECTED_TOTAL_CONTROLLERS) !!"
215 | echo "Will run configure controllers!"
216 | echo ""
217 | fi
218 |
219 | if [ $NO_OF_EDGES_CONFIGURED -gt "$CURRENT_TOTAL_EDGES" ]; then
220 | RERUN_CONFIGURE_CONTROLLERS=true
221 | echo "Total # of Edges [$CURRENT_TOTAL_EDGES] not matching expected count of $NO_OF_EDGES_CONFIGURED !!"
222 | echo "Will run configure controllers!"
223 | echo ""
224 | fi
225 |
226 | if [ "$RERUN_CONFIGURE_CONTROLLERS" == "true" ]; then
227 | # There should 1 mgr + 1 controller (or atmost 3 controllers).
228 | # So if the count does not match, or user requested rerun of configure controllers
229 | echo "Configuring Controllers!!"
230 | ansible-playbook $DEBUG -i hosts configure_controllers.yml -e @extra_yaml_args.yml
231 | STATUS=$?
232 | else
233 | echo "Controllers already configured!!"
234 | echo ""
235 | fi
236 |
237 | if [[ $STATUS != 0 ]]; then
238 | echo "Configuration of controllers failed!!"
239 | echo "Check error logs"
240 | echo ""
241 | exit $STATUS
242 | else
243 | echo "Configuration of controllers successfull!!"
244 | echo ""
245 | fi
246 |
247 | # STATUS=0
248 | # Deploy the ovas if its not up
249 | # if [ "$SUPPORT_NSX_VMOTION" == "true" ]; then
250 |
251 | # ansible-playbook $DEBUG -i hosts configure_nsx_vmks.yml -e @extra_yaml_args.yml
252 | # STATUS=$?
253 |
254 | # if [[ $STATUS != 0 ]]; then
255 | # echo "Configuration of vmks support failed!!"
256 | # echo "Check error logs"
257 | # echo ""
258 | # exit $STATUS
259 | # else
260 | # echo "Configuration of vmks succcessfull!"
261 | # echo ""
262 | # fi
263 | # fi
264 |
265 | echo "Successfully finished with Install!!"
266 |
267 | exit 0
268 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source: {repository: nsxedgegen/nsx-t-gen-worker, tag: v2.1 }
8 |
9 | params:
10 | VCENTER_HOST:
11 | VCENTER_USR:
12 | VCENTER_PWD:
13 | VCENTER_DATACENTER:
14 | VCENTER_DATASTORE:
15 | VCENTER_CLUSTER:
16 | VCENTER_RP:
17 | VCENTER_MANAGER:
18 | NTPSERVERS:
19 | MGMT_PORTGROUP:
20 | DNSSERVER:
21 | DNSDOMAIN:
22 | DEFAULTGATEWAY:
23 | NETMASK:
24 | ESXI_HOSTS_ROOT_PWD:
25 | ESXI_HOSTS_CONFIG:
26 | COMPUTE_MANAGER_CONFIGS:
27 | EDGE_VCENTER_HOST:
28 | EDGE_VCENTER_USR:
29 | EDGE_VCENTER_PWD:
30 | EDGE_VCENTER_RP:
31 | EDGE_VCENTER_DATACENTER:
32 | EDGE_VCENTER_DATASTORE:
33 | EDGE_VCENTER_CLUSTER:
34 | EDGE_NTPSERVERS:
35 | EDGE_MGMT_PORTGROUP:
36 | EDGE_DNSSERVER:
37 | EDGE_DNSDOMAIN:
38 | EDGE_DEFAULTGATEWAY:
39 | EDGE_NETMASK:
40 | NSX_T_INSTALLER:
41 | NSX_T_MANAGER_FQDN:
42 | NSX_T_MANAGER_VM_NAME:
43 | NSX_T_MANAGER_HOST_NAME:
44 | NSX_T_MANAGER_IP:
45 | NSX_T_MANAGER_ADMIN_PWD:
46 | NSX_T_MANAGER_ROOT_PWD:
47 | NSX_T_CONTROLLERS_CONFIG:
48 | NSX_T_CONTROLLER_HOST_PREFIX:
49 | NSX_T_CONTROLLER_VM_NAME_PREFIX:
50 | NSX_T_CONTROLLER_IPS:
51 | NSX_T_CONTROLLER_ROOT_PWD:
52 | NSX_T_CONTROLLER_CLUSTER_PWD:
53 | NSX_T_EDGE_HOST_PREFIX:
54 | NSX_T_EDGE_VM_NAME_PREFIX:
55 | NSX_T_EDGE_IPS:
56 | NSX_T_EDGE_ROOT_PWD:
57 | NSX_T_EDGE_PORTGROUP_EXT:
58 | NSX_T_EDGE_PORTGROUP_TRANSPORT:
59 | NSX_T_EDGE_OVERLAY_INTERFACE:
60 | NSX_T_EDGE_UPLINK_INTERFACE:
61 | NSX_T_MGR_DEPLOY_SIZE:
62 | NSX_T_EDGE_DEPLOY_SIZE:
63 | NSX_T_TEP_POOL_NAME:
64 | NSX_T_TEP_POOL_CIDR:
65 | NSX_T_TEP_POOL_GATEWAY:
66 | NSX_T_TEP_POOL_START:
67 | NSX_T_TEP_POOL_END:
68 | NSX_T_SINGLE_UPLINK_PROFILE_NAME:
69 | NSX_T_SINGLE_UPLINK_PROFILE_MTU:
70 | NSX_T_SINGLE_UPLINK_PROFILE_VLAN:
71 | NSX_T_EDGE_CLUSTER:
72 | NSX_T_KEEP_RESERVATION:
73 | NSX_T_OVERLAY_PROFILE_NAME:
74 | NSX_T_OVERLAY_PROFILE_MTU:
75 | NSX_T_OVERLAY_PROFILE_VLAN:
76 | NSX_T_ESXI_VMNICS:
77 | NSX_T_HOSTSWITCH:
78 | NSX_T_TRANSPORT_VLAN:
79 | NSX_T_T0ROUTER_SPEC:
80 | NSX_T_PAS_NCP_CLUSTER_TAG:
81 | NSX_T_OVERLAY_TRANSPORT_ZONE:
82 | NSX_T_VLAN_TRANSPORT_ZONE:
83 | NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC:
84 | NSX_T_HA_SWITCHING_PROFILE_SPEC:
85 | NSX_T_CONTAINER_IP_BLOCK_SPEC:
86 | NSX_T_EXTERNAL_IP_POOL_SPEC:
87 | ENABLE_ANSIBLE_DEBUG:
88 | RERUN_CONFIGURE_CONTROLLERS:
89 | NSX_T_MONITOR_SPEC:
90 | NSX_T_NSGROUP_SPEC:
91 |
92 |
93 | inputs:
94 | - name: nsx-t-gen-pipeline
95 | - name: nsx-mgr-ova
96 | - name: nsx-ctrl-ova
97 | - name: nsx-edge-ova
98 | - name: nsxt-ansible
99 | - name: ovftool
100 |
101 | run:
102 | path: nsx-t-gen-pipeline/tasks/install-nsx-t/task.sh
103 |
--------------------------------------------------------------------------------
/tasks/uninstall-nsx-t/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | export ROOT_DIR=`pwd`
4 |
5 | export TASKS_DIR=$(dirname $BASH_SOURCE)
6 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
7 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
8 | export PYTHON_LIB_DIR=$(cd $PIPELINE_DIR/python && pwd)
9 | export SCRIPT_DIR=$(dirname $0)
10 |
11 | source $FUNCTIONS_DIR/check_null_variables.sh
12 | source $FUNCTIONS_DIR/delete_vm_using_govc.sh
13 |
14 | # First wipe out all non-nsx vms deployed on the management plane or compute clusters
15 | echo "Need to delete the non-NSX Vms that are running in the Compute cluster or Management cluster, before proceeding wtih clean of NSX Mgmt Plane!!"
16 | echo "Will proceed to highlighting the non NSX related vms that need to be deleted before issuing full wipe!!"
17 | echo "Will provide a 60 second window to cancel the task if you want to do a manual cleanup"
18 | destroy_vms_not_matching_nsx
19 |
20 | export ESXI_HOSTS_FILE="$ROOT_DIR/esxi_hosts"
21 |
22 | cp $FUNCTIONS_DIR/uninstall-nsx-vibs.yml $ROOT_DIR/
23 | cp $FUNCTIONS_DIR/uninstall-nsx-t-v2.1-vibs.sh $ROOT_DIR/uninstall-nsx-t-vibs.sh
24 |
25 | cat > $ROOT_DIR/ansible.cfg << EOF
26 | [defaults]
27 | host_key_checking = false
28 | EOF
29 |
30 | echo ""
31 |
32 | # Make sure the NSX Mgr is up
33 | set +e
34 | timeout 15 bash -c "(echo > /dev/tcp/${NSX_T_MANAGER_IP}/22) >/dev/null 2>&1"
35 | status=$?
36 | set -e
37 |
38 | if [ "$status" == "0" ]; then
39 | # Start wiping the NSX Configurations from NSX Mgr,
40 | # cleaning up the routers, switches, transport nodes and fabric nodes
41 | # Additionally create the esxi hosts file so we can do vib cleanup (in case things are sticking around)
42 | set +e
43 | python $PYTHON_LIB_DIR/nsx_t_wipe.py $ESXI_HOSTS_FILE
44 | STATUS=$?
45 | set -e
46 |
47 | if [ "$STATUS" != "0" ]; then
48 | echo "Problem in running cleanup of NSX components!!"
49 | echo "The deletion of the NSX vibs from Esxi hosts would be done by this wipe task!!"
50 | #exit $STATUS
51 | fi
52 | echo "The resources used within NSX Management plane have been cleaned up!!"
53 | echo ""
54 | else
55 | echo "NSX Manager VM not responding!!"
56 | echo "Cannot delete any related resources within the NSX Management plane"
57 | echo ""
58 | exit -1
59 | fi
60 |
61 | echo "Going to delete the NSX vms in 60 seconds!!!!"
62 | echo ""
63 | echo "Cancel the task if you want to manually check and then delete the VMs"
64 | echo "If cancelled, the deletion of the NSX VMs as well as removal of vibs from Esxi hosts needs to be done manually!!"
65 | echo ""
66 |
67 | echo "Manual NSX-T v${NSX_T_VERSION} Vib removal command on each Esxi Host:"
68 | echo "-----------------------------------------"
69 | cat $ROOT_DIR/uninstall-nsx-t-vibs.sh
70 | echo "-----------------------------------------"
71 | echo ""
72 | if [ -e $ESXI_HOSTS_FILE ]; then
73 | echo "Related Esxi hosts:"
74 | cat $ESXI_HOSTS_FILE | grep ansible | awk '{print $1}' | grep -v 'ansible_ssh' | sed -e 's/://g'
75 | echo ""
76 | fi
77 |
78 | sleep 60
79 |
80 | echo "Proceeding with NSX-T Management Plane VM deletion!"
81 | echo ""
82 | delete_vm_using_govc "edge"
83 | delete_vm_using_govc "ctrl"
84 | delete_vm_using_govc "mgr"
85 | echo "Finished NSX-T Management Plane VM deletion!"
86 | echo ""
87 |
88 | STATUS=0
89 | if [ -e "$ESXI_HOSTS_FILE" ]; then
90 | sleep 5
91 | echo "Now removing the NSX-T related vibs from the Esxi Hosts"
92 | set +e
93 | ansible-playbook -i $ESXI_HOSTS_FILE $ROOT_DIR/uninstall-nsx-vibs.yml || true
94 | STATUS=$?
95 | set -e
96 |
97 | if [ "$STATUS" == "0" ]; then
98 | echo "NSX-T Vibs removed from the Esxi host using ansible script"
99 | echo "If the previous step reported a fatal error matching 'No VIB matching VIB search specification', then uninstall was clean and no manual reboot of hosts required!!"
100 | else
101 | echo "Check for error details and based on existence of NSX-T Vibs on the Esxi host, proceed with manual cleanup and shutdown!"
102 | echo "If there are no NSX-T Vibs, then no shutdown required for this host"
103 | fi
104 | echo ""
105 |
106 | # esxi_hosts file looks like:
107 | # esxi_hosts:
108 | # hosts:
109 | # sc2-host-corp.local.io: { ansible_ssh_host: sc2-host-corp.local.io, ansible_ssh_user: root, ansible_ssh_pass: asdfn3! }
110 |
111 | echo "Related Esxi Hosts:"
112 | echo "--------------------------------------"
113 | cat $ESXI_HOSTS_FILE | grep ansible | awk '{print $1}' | grep -v 'ansible_ssh' | sed -e 's/://g'
114 | echo "--------------------------------------"
115 | fi
116 |
117 | echo ""
118 | echo "WARNING!! Only applicable to those Esxi hosts that got their nsx-t vibs removed via the ansible script"
119 | echo "Those Esxi Hosts should be rebooted for nsx-t vib removal to be effective!"
120 | echo "Please reboot all the listed Esxi Hosts in a rolling fashion to pick the changes!!"
121 | echo ""
122 | echo "NOTE: No Reboot required if there were errors matching 'No VIB matching VIB search specification' during VIB removal"
123 | echo ""
124 |
125 | echo "NSX-T ${NSX_T_VERSION} Uninstall Completed!!"
126 |
--------------------------------------------------------------------------------
/tasks/uninstall-nsx-t/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source: {repository: nsxedgegen/nsx-t-gen-worker, tag: v2.1 }
8 |
9 | params:
10 | NSX_T_VERSION:
11 | NSX_T_MANAGER_IP:
12 | NSX_T_MANAGER_ADMIN_USER:
13 | NSX_T_MANAGER_ROOT_PWD:
14 | COMPUTE_MANAGER_CONFIGS:
15 | NSX_T_MANAGER_VM_NAME:
16 | NSX_T_MANAGER_HOST_NAME:
17 | NSX_T_CONTROLLERS_CONFIG:
18 | NSX_T_CONTROLLER_HOST_PREFIX:
19 | NSX_T_CONTROLLER_VM_NAME_PREFIX:
20 | NSX_T_CONTROLLER_IPS:
21 | NSX_T_CONTROLLER_ROOT_PWD:
22 | NSX_T_CONTROLLER_CLUSTER_PWD:
23 | NSX_T_EDGE_HOST_PREFIX:
24 | NSX_T_EDGE_VM_NAME_PREFIX:
25 | NSX_T_EDGE_IPS:
26 | NSX_T_EDGE_ROOT_PWD:
27 | DNSDOMAIN:
28 | ESXI_HOSTS_ROOT_PWD:
29 |
30 | inputs:
31 | - name: nsx-t-gen-pipeline
32 |
33 | run:
34 | path: nsx-t-gen-pipeline/tasks/uninstall-nsx-t/task.sh
35 |
--------------------------------------------------------------------------------