├── .ansible-lint
├── .github
└── workflows
│ └── yamllint.yml
├── .gitignore
├── .yamllint.yml
├── LICENSE
├── README.md
├── deploy.yml
├── destroy.yml
├── docker
└── Dockerfile
├── ignored
└── .gitignored
├── requirements.txt
├── roles
├── concourse
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── operations.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── vars.yml
├── delete-deployment
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── env.yml
├── display-configuration
│ └── tasks
│ │ └── main.yml
├── harbor-standalone
│ ├── harbor-configuration
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── config.j2
│ └── harbor-vm
│ │ └── tasks
│ │ └── main.yml
├── minio-standalone
│ └── minio-vm
│ │ └── tasks
│ │ └── main.yml
├── nfs-server
│ ├── nfs.txt
│ └── tasks
│ │ └── main.yml
├── nsx-alb
│ ├── configure-cloud
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── deploy-controller
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── main.yml
│ └── dummy-service
│ │ ├── tasks
│ │ └── main.yml
│ │ └── vars
│ │ └── main.yml
├── nsxt
│ ├── nsxt-compute-manager
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-compute-nodes
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-edge-clusters
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-ip-blocks
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-ip-pools
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-mgmt-appliance
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-mgmt-certificate
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-principal-identities
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-super-user
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-switching-routing-legacy
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-switching-routing
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-transport-nodes
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-transport-zones
│ │ └── tasks
│ │ │ └── main.yml
│ ├── nsxt-uplink-profiles
│ │ └── tasks
│ │ │ └── main.yml
│ └── nsxt-vlan-segments
│ │ └── tasks
│ │ └── main.yml
├── tanzu
│ ├── application-service
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ ├── env.yml
│ │ │ ├── ncp.yml
│ │ │ ├── tas-nsx.yml
│ │ │ └── tas.yml
│ ├── ha-proxy
│ │ └── tasks
│ │ │ └── main.yml
│ ├── multi-cloud-generate-config
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── management-cluster.yml
│ ├── opsman
│ │ ├── configure-bosh
│ │ │ ├── defaults
│ │ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ ├── env.yml
│ │ │ │ ├── p-bosh-nsx.yml
│ │ │ │ └── p-bosh.yml
│ │ ├── install-opsman
│ │ │ ├── defaults
│ │ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ └── env.yml
│ │ └── upload-stage-configure-tile
│ │ │ ├── tasks
│ │ │ └── main.yml
│ │ │ └── templates
│ │ │ └── env.yml
│ ├── tkgi
│ │ ├── harbor-tile
│ │ │ ├── defaults
│ │ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ │ └── main.yml
│ │ │ └── templates
│ │ │ │ ├── env.yml
│ │ │ │ └── harbor.yml
│ │ └── tkgi-tile
│ │ │ ├── defaults
│ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ └── main.yml
│ │ │ └── templates
│ │ │ └── tkgi.yml
│ ├── vsphere-content-library
│ │ └── tasks
│ │ │ └── main.yml
│ ├── vsphere-enable-cluster
│ │ └── tasks
│ │ │ └── main.yml
│ └── vyos-router
│ │ └── tasks
│ │ └── main.yml
├── ubuntu-server
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── cloud_init_config.j2
└── vsphere
│ ├── vsphere-clusters
│ └── tasks
│ │ └── main.yml
│ ├── vsphere-datacenter
│ └── tasks
│ │ └── main.yml
│ ├── vsphere-deploy-vc-and-hosts
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ ├── embedded_vCSA_on_VC_6.7.json
│ │ └── embedded_vCSA_on_VC_7.0.json
│ └── vars
│ │ └── main.yml
│ ├── vsphere-distributed-port-groups
│ └── tasks
│ │ └── main.yml
│ ├── vsphere-distributed-switches
│ └── tasks
│ │ └── main.yml
│ ├── vsphere-enable-cluster-services
│ └── tasks
│ │ └── main.yml
│ ├── vsphere-import-vm-template
│ └── tasks
│ │ └── main.yml
│ ├── vsphere-local-datastores
│ ├── tasks
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
│ ├── vsphere-nfs-datastores
│ ├── tasks
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
│ ├── vsphere-resource-pools
│ └── tasks
│ │ └── main.yml
│ ├── vsphere-storage-based-policy-management
│ └── tasks
│ │ └── main.yml
│ └── vsphere-vswitch0-port-groups
│ └── tasks
│ └── main.yml
├── tests
├── run-tests.sh
└── test-nsx-local.sh
└── var-examples
├── base-vsphere
├── 4hosts-2clusters-custom.yml
├── README.md
├── minimal-opinionated-nfs.yml
└── minimal-opinionated.yml
├── nsxt
├── README.md
└── opinionated.yml
├── tanzu
├── application-service
│ ├── README.md
│ ├── opinionated-1host-nsx-manager-api.yml
│ └── opinionated-not-nested.yml
├── integrated-nsxt
│ ├── README.md
│ ├── opinionated-1host-manager-api.yml
│ └── opinionated-1host-policy-api.yml
├── multi-cloud
│ ├── README.md
│ ├── opinionated-1host-nsxt-alb.yml
│ ├── opinionated-1host.yml
│ └── opinionated-not-nested.yml
├── platform-automation-toolkit
│ └── opinionated-not-nested.yml
├── tas-tkgi-nsxt
│ └── opinionated-1host-nsx-policy-api.yml
├── vsphere-nsxt
│ ├── README.md
│ └── opinionated-1host.yml
├── vsphere-vds-alb
│ ├── README.md
│ └── opinionated-1host.yml
└── vsphere-vds-haproxy
│ ├── README.md
│ └── opinionated-1host.yml
└── vsphere-community-testing
└── wip.yml
/.ansible-lint:
--------------------------------------------------------------------------------
1 | ---
2 | exclude_paths:
3 | - config/
4 | - downloads/
5 |
6 | skip_list:
7 | - name[template]
8 | - no-changed-when
9 | - no-handler
10 | - yaml[line-length]
11 | - yaml[comments-indentation]
12 | - no-free-form
13 | - name[missing]
14 | - role-name[path]
15 | - ignore-errors # review this at some point
16 |
--------------------------------------------------------------------------------
/.github/workflows/yamllint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: 'Yamllint GitHub Actions'
3 | on:
4 | - pull_request
5 | jobs:
6 | yamllint:
7 | name: 'yaml-lint'
8 | runs-on: ubuntu-latest
9 | steps:
10 | - name: 'Checkout'
11 | uses: actions/checkout@v2
12 | - name: 'yaml-lint'
13 | uses: ibiqlik/action-yamllint@v3
14 | with:
15 | format: standard
16 | env:
17 | GITHUB_ACCESS_TOKEN: ${{ secrets.GITHUB_TOKEN }}
18 |
19 |
20 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .envrc
2 | temp/*
3 | settings.json
4 | tmp/
5 | ignored/
6 |
7 | # Tanzu Multi-cloud outputs
8 | cluster-config.yml
9 |
10 | # Byte-compiled / optimized / DLL files
11 | __pycache__/
12 | *.py[cod]
13 | *$py.class
14 |
15 | # C extensions
16 | *.so
17 |
18 |
19 | # Distribution / packaging
20 | .Python
21 | build/
22 | develop-eggs/
23 | dist/
24 | downloads/
25 | eggs/
26 | .eggs/
27 | lib/
28 | lib64/
29 | parts/
30 | sdist/
31 | var/
32 | wheels/
33 | pip-wheel-metadata/
34 | share/python-wheels/
35 | *.egg-info/
36 | .installed.cfg
37 | *.egg
38 | MANIFEST
39 |
40 | # PyInstaller
41 | # Usually these files are written by a python script from a template
42 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
43 | *.manifest
44 | *.spec
45 |
46 | # Installer logs
47 | pip-log.txt
48 | pip-delete-this-directory.txt
49 |
50 | # Unit test / coverage reports
51 | htmlcov/
52 | .tox/
53 | .nox/
54 | .coverage
55 | .coverage.*
56 | .cache
57 | nosetests.xml
58 | coverage.xml
59 | *.cover
60 | *.py,cover
61 | .hypothesis/
62 | .pytest_cache/
63 |
64 | # Translations
65 | *.mo
66 | *.pot
67 |
68 | # Django stuff:
69 | *.log
70 | local_settings.py
71 | db.sqlite3
72 | db.sqlite3-journal
73 |
74 | # Flask stuff:
75 | instance/
76 | .webassets-cache
77 |
78 | # Scrapy stuff:
79 | .scrapy
80 |
81 | # Sphinx documentation
82 | docs/_build/
83 |
84 | # PyBuilder
85 | target/
86 |
87 | # Jupyter Notebook
88 | .ipynb_checkpoints
89 |
90 | # IPython
91 | profile_default/
92 | ipython_config.py
93 |
94 | # pyenv
95 | .python-version
96 |
97 | # pipenv
98 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
99 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
100 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
101 | # install all needed dependencies.
102 | #Pipfile.lock
103 |
104 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
105 | __pypackages__/
106 |
107 | # Celery stuff
108 | celerybeat-schedule
109 | celerybeat.pid
110 |
111 | # SageMath parsed files
112 | *.sage.py
113 |
114 | # Environments
115 | .env
116 | .venv
117 | env/
118 | venv/
119 | ENV/
120 | env.bak/
121 | venv.bak/
122 |
123 | # Spyder project settings
124 | .spyderproject
125 | .spyproject
126 |
127 | # Rope project settings
128 | .ropeproject
129 |
130 | # mkdocs documentation
131 | /site
132 |
133 | # mypy
134 | .mypy_cache/
135 | .dmypy.json
136 | dmypy.json
137 |
138 | # Pyre type checker
139 | .pyre/
140 |
--------------------------------------------------------------------------------
/.yamllint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | extends: default
3 |
4 | rules:
5 | # 80 chars should be enough, but don't fail if a line is longer
6 | line-length:
7 | max: 160
8 | level: warning
9 | allow-non-breakable-words: true
10 | allow-non-breakable-inline-mappings: true
11 |
12 | braces:
13 | max-spaces-inside: 1 # Allows for jinja templating
14 |
15 | ignore: |
16 | temp/*
17 | .github/workflows/*
18 | tmp/*
19 |
--------------------------------------------------------------------------------
/deploy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy VC and Hosts
3 | hosts: localhost
4 | gather_facts: false
5 | collections:
6 | - vmware.alb
7 | - vmware.ansible_for_nsxt
8 | - crypto
9 |
10 | pre_tasks:
11 | - name: Create the temporary directory
12 | ansible.builtin.tempfile:
13 | state: directory
14 | suffix: setup
15 | register: temp_dir_results
16 | tags: ["always"]
17 |
18 | - ansible.builtin.set_fact:
19 | temp_dir: "{{ temp_dir_results.path }}"
20 | tags: ["always"]
21 |
22 | vars:
23 | debug_mode: "{{ debug | default(False) | bool }}"
24 |
25 | roles:
26 | # Base vsphere section
27 | - role: vsphere/vsphere-deploy-vc-and-hosts
28 | tags: ["vsphere"]
29 | when: 'nested_vcenter is defined and nested_hosts is defined'
30 | - role: vsphere/vsphere-datacenter
31 | tags: ["vsphere"]
32 | when: 'nested_vcenter is defined'
33 | - role: vsphere/vsphere-clusters
34 | tags: ["vsphere"]
35 | when: 'nested_clusters is defined'
36 | - role: vsphere/vsphere-vswitch0-port-groups
37 | tags: ["vsphere"]
38 | when: 'nested_hosts is defined'
39 | - role: vsphere/vsphere-local-datastores
40 | tags: ["vsphere"]
41 | when: 'nested_clusters is defined'
42 | - role: vsphere/vsphere-nfs-datastores
43 | tags: ["vsphere"]
44 | when: 'nested_clusters is defined'
45 | - role: vsphere/vsphere-enable-cluster-services
46 | tags: ["vsphere"]
47 | when: 'nested_clusters is defined'
48 | - role: vsphere/vsphere-resource-pools
49 | tags: ["vsphere"]
50 | when: 'nested_clusters is defined'
51 | - role: vsphere/vsphere-distributed-switches
52 | when: 'distributed_switches is defined'
53 | tags: ["vsphere"]
54 | - role: vsphere/vsphere-distributed-port-groups
55 | when: 'distributed_switches is defined'
56 | tags: ["vsphere"]
57 | - role: vsphere/vsphere-import-vm-template
58 | when: 'vm_templates is defined'
59 | tags: ["vsphere", "vm-template"]
60 |
61 | # NSX-T Section
62 | - role: nsxt/nsxt-mgmt-appliance
63 | when: 'nsxt is defined'
64 | tags: ["nsxt", "nsxt-mgr"]
65 | # TODO set password expiry
66 | # su admin '-c set user admin set user admin password-expiration 9999'
67 | - role: nsxt/nsxt-mgmt-certificate
68 | when: 'nsxt is defined'
69 | tags: ["nsxt", "nsxt-mgr-cert"]
70 | - role: nsxt/nsxt-ip-pools
71 | when: 'nsxt.ip_pools is defined or nsxt.policy_ip_pools is defined'
72 | tags: ["nsxt", "nsxt-ip-pools"]
73 | - role: nsxt/nsxt-ip-blocks
74 | when: 'nsxt.ip_blocks is defined or nsxt.policy_ip_blocks is defined'
75 | tags: ["nsxt", "nsxt-ip-blocks"]
76 | - role: nsxt/nsxt-uplink-profiles
77 | when: 'nsxt is defined'
78 | tags: ["nsxt", "nsxt-uplink-profiles"]
79 | - role: nsxt/nsxt-transport-zones
80 | when: 'nsxt is defined'
81 | tags: ["nsxt", "nsxt-transport-zones"]
82 | - role: nsxt/nsxt-compute-manager
83 | when: 'nsxt is defined'
84 | tags: ["nsxt", "nsxt-compute-mgr"]
85 | - role: nsxt/nsxt-principal-identities
86 | when: 'nsxt.principal_identities is defined'
87 | tags: ["nsxt", "nsxt-pis"]
88 | - role: nsxt/nsxt-vlan-segments
89 | when: 'nsxt is defined'
90 | tags: ["nsxt", "nsxt-vlan-segments"]
91 | - role: nsxt/nsxt-transport-nodes
92 | when: 'nsxt is defined'
93 | tags: ["nsxt", "nsxt-transport-nodes"]
94 | - role: nsxt/nsxt-edge-clusters
95 | when: 'nsxt is defined'
96 | tags: ["nsxt", "nsxt-edge-clusters"]
97 | - role: nsxt/nsxt-switching-routing
98 | when: 'nsxt is defined'
99 | tags: ["nsxt", "nsxt-switching-routing"]
100 |
101 | # NSX-ALB (Avi) Section
102 | - role: nsx-alb/deploy-controller
103 | when: 'nsx_alb is defined'
104 | tags: ["nsx-alb", "alb-controller"]
105 | - role: nsx-alb/configure-cloud
106 | when: 'nsx_alb is defined'
107 | tags: ["nsx-alb", "alb-cloud"]
108 | - role: nsx-alb/dummy-service
109 | when: 'nsx_alb.dummy_service is defined'
110 | tags: ["nsx-alb", "alb-dummy"]
111 |
112 | # Tanzu Multi-Cloud Secton
113 | - role: tanzu/multi-cloud-generate-config
114 | when: 'tanzu_multi_cloud.generated_config_file is defined'
115 | tags: ["tkgm-config"]
116 |
117 | # vSphere with Kubernetes Section
118 | - role: vsphere/vsphere-storage-based-policy-management
119 | when: 'tspbm is defined'
120 | tags: ["tkgs"]
121 | - role: tanzu/vsphere-content-library
122 | when: 'tanzu_vsphere is defined'
123 | tags: ["tkgs"]
124 | - role: tanzu/vyos-router
125 | when: 'tkg_router is defined'
126 | tags: ["tkgs"]
127 | - role: tanzu/ha-proxy
128 | when: 'tkg_haproxy is defined'
129 | tags: ["tkgs"]
130 | - role: tanzu/vsphere-enable-cluster
131 | when: 'tanzu_vsphere is defined'
132 | tags: ["tkgs", "tkgs-enable"]
133 |
134 | # Opsman/Bosh Secton
135 | - role: tanzu/opsman/install-opsman
136 | when: 'tanzu_opsman_bosh is defined'
137 | tags: ["opsman"]
138 | - role: tanzu/opsman/configure-bosh
139 | when: 'tanzu_opsman_bosh is defined'
140 | tags: ["bosh"]
141 |
142 | # TKGi section
143 | - role: tanzu/tkgi/harbor-tile
144 | when: 'tkgi is defined'
145 | tags: ["harbor-tile"]
146 | - role: tanzu/tkgi/tkgi-tile
147 | when: 'tkgi is defined'
148 | tags: ["tkgi-tile"]
149 |
150 | # TAS section
151 | - role: tanzu/application-service
152 | when: 'tanzu_application_service is defined'
153 | tags: ["tas-tile"]
154 |
155 | # Concourse section
156 | - role: concourse
157 | when: 'concourse is defined'
158 | tags: ["concourse"]
159 |
160 | # Harbor VM section
161 | - role: harbor-standalone/harbor-vm
162 | when: 'harbor_standalone is defined'
163 | tags: ["harbor"]
164 |
165 | # MinIO VM section
166 | - role: minio-standalone/minio-vm
167 | when: 'minio_standalone is defined'
168 | tags: ["minio"]
169 |
170 | post_tasks:
171 | - name: Remove the temporary directory
172 | ansible.builtin.file:
173 | path: "{{ temp_dir }}"
174 | state: absent
175 | no_log: true
176 |
177 | # Separate section to run commands inside harbor VM if created
178 | - name: Setup Standalone Harbor
179 | hosts: harbor
180 | become: true
181 | roles:
182 | - role: harbor-standalone/harbor-configuration
183 | when: 'harbor_standalone is defined'
184 | tags: ["harbor"]
185 |
186 | - name: Setup Standalone Minio
187 | hosts: minio
188 | become: true
189 | roles:
190 | - role: ricsanfre.minio
191 | minio_root_user: "{{ minio_standalone.minio_root_user }}"
192 | minio_root_password: "{{ minio_standalone.minio_root_password }}"
193 | minio_enable_tls: false
194 | minio_server_port: "{{ minio_standalone.minio_server_port }}"
195 | minio_url: "{{ minio_standalone.minio_url }}"
196 | minio_site_region: "{{ minio_standalone.minio_site_region }}"
197 | minio_buckets: "{{ minio_standalone.buckets }}"
198 | minio_pip_environment_vars:
199 | PIP_BREAK_SYSTEM_PACKAGES: "1"
200 | when: 'minio_standalone is defined'
201 | tags: ["minio"]
202 |
203 | - name: Display Summary
204 | hosts: localhost
205 | gather_facts: false
206 | roles:
207 | ### Show outputs at the end
208 | - role: display-configuration
209 | tags: ["summary"]
210 |
--------------------------------------------------------------------------------
/destroy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Delete Deployment
3 | hosts: localhost
4 | gather_facts: false
5 |
6 | vars:
7 | debug_mode: "{{ debug | default(False) | bool }}"
8 |
9 | roles:
10 | - role: delete-deployment
11 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:24.04
2 |
3 | ENV ANSIBLE_VERSION=2.16.3-0ubuntu2
4 |
5 | ENV ANSIBLE_HOST_KEY_CHECKING=False
6 |
7 | RUN set -xe \
8 | && echo "****** Install packages with apt ******" \
9 | && export DEBIAN_FRONTEND=noninteractive \
10 | && apt update \
11 | && apt install -y --no-install-recommends \
12 | ca-certificates iputils-ping python3-apt libarchive-tools sshpass \
13 | build-essential python3 python3-jmespath python3-setuptools \
14 | python3-pip python3-dev git curl ansible-core \
15 | \
16 | && ln --symbolic /usr/bin/python3 /usr/bin/python \
17 | \
18 | && echo "****** Installing OM CLI ******" \
19 | && arch=$(a=$(uname -m) && ([ $a = aarch64 ] || [ $a = arm64 ]) && printf arm64 || printf amd64) \
20 | && curl -L https://github.com/pivotal-cf/om/releases/download/7.9.0/om-linux-${arch}-7.9.0 --output /usr/local/bin/om \
21 | && chmod +x /usr/local/bin/om \
22 | \
23 | && echo "****** Installing BOSH CLI ******" \
24 | && arch=$(a=$(uname -m) && ([ $a = aarch64 ] || [ $a = arm64 ]) && printf arm64 || printf amd64) \
25 | && curl -L https://github.com/cloudfoundry/bosh-cli/releases/download/v7.8.2/bosh-cli-7.8.2-linux-${arch} --output /usr/local/bin/bosh \
26 | && chmod +x /usr/local/bin/bosh \
27 | \
28 | && echo "****** Installing Credhub CLI ******" \
29 | && arch=$(a=$(uname -m) && ([ $a = aarch64 ] || [ $a = arm64 ]) && printf arm64 || printf amd64) \
30 | && curl -L https://github.com/cloudfoundry/credhub-cli/releases/download/2.9.39/credhub-linux-${arch}-2.9.39.tgz --output /tmp/credhub.tgz \
31 | && tar -xvzf /tmp/credhub.tgz -C /usr/local/bin/ \
32 | \
33 | && echo "****** Installing Python modules ******" \
34 | && pip3 install pyvmomi netaddr minio \
35 | git+https://github.com/vmware/vsphere-automation-sdk-python.git --break-system-packages \
36 | \
37 | && echo "****** Adding community.vmware from Ansible Galaxy ******" \
38 | && ansible-galaxy collection install community.vmware:5.1.0 \
39 | \
40 | && echo "****** Adding vmware.avi from Ansible Galaxy ******" \
41 | && ansible-galaxy collection install vmware.alb:22.1.4 \
42 | \
43 | && echo "****** Adding ansible.utils from Ansible Galaxy ******" \
44 | && ansible-galaxy collection install ansible.utils \
45 | \
46 | && echo "****** Adding community.crypto from Ansible Galaxy ******" \
47 | && ansible-galaxy collection install community.crypto \
48 | \
49 | && echo "****** Adding community.general from Ansible Galaxy ******" \
50 | && ansible-galaxy collection install community.general \
51 | \
52 | && echo "****** Adding ansible-for-nsxt from Github ******" \
53 | && ansible-galaxy collection install git+https://github.com/laidbackware/ansible-for-nsxt.git,upstream-fixes --force \
54 | \
55 | && echo "****** Cloning ansible-for-vsphere-tanzu ******" \
56 | && ansible-galaxy collection install git+https://github.com/laidbackware/ansible-for-vsphere-tanzu.git,ansible-galaxy \
57 | \
58 | && echo "****** Install Minio role from https://github.com/ricsanfre/ansible-role-minio ******" \
59 | && ansible-galaxy role install ricsanfre.minio,v1.1.8 \
60 | \
61 | && echo "****** Removing build tools ******" \
62 | && apt remove -y build-essential python3-dev git curl \
63 | && apt autoremove -y \
64 | && rm -Rf /var/lib/apt/lists/* \
65 | && rm -Rf /usr/share/doc && rm -Rf /usr/share/man \
66 | && rm -rf /tmp/* \
67 | && apt-get clean
68 |
--------------------------------------------------------------------------------
/ignored/.gitignored:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/laidbackware/vmware-lab-builder/51e52497f9fad728b927245c070173625e531171/ignored/.gitignored
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible-core == 2.16.12
2 | ansible
3 | pyvmomi
4 | netaddr
5 | requests
6 | jmespath
7 | git+https://github.com/vmware/vsphere-automation-sdk-python.git
--------------------------------------------------------------------------------
/roles/concourse/defaults/main.yml:
--------------------------------------------------------------------------------
1 | concourse_ip: "{{ concourse.ip }}"
2 | concourse_db_persistent_disk_type: "{{ concourse.db_persistent_disk_type | default(102400) | int }}"
3 | concourse_worker_vm_type: "{{ concourse.worker_vm_type }}"
4 | concourse_web_vm_type: "{{ concourse.web_vm_type }}"
5 | concourse_db_vm_type: "{{ concourse.db_vm_type }}"
6 | concourse_network_name: "{{ concourse.network_name }}"
7 | concourse_username: "{{ tanzu_opsman_bosh.opsman_username }}"
8 | concourse_password: "{{ tanzu_opsman_bosh.opsman_password }}"
9 |
10 | opsman_ip: "{{ tanzu_opsman_bosh.opsman_ip }}"
11 | opsman_username: "{{ tanzu_opsman_bosh.opsman_username }}"
12 | opsman_password: "{{ tanzu_opsman_bosh.opsman_password }}"
13 |
--------------------------------------------------------------------------------
/roles/concourse/files/operations.yml:
--------------------------------------------------------------------------------
1 | - type: replace
2 | path: /instance_groups/name=worker/vm_extensions?/-
3 | value: increased-disk
4 |
--------------------------------------------------------------------------------
/roles/concourse/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create a temporary directory for concourse config files
2 | ansible.builtin.tempfile:
3 | state: directory
4 | suffix: concourse
5 | register: temp_dir_concourse
6 |
7 | - name: set concourse bosh deployment dir
8 | ansible.builtin.set_fact:
9 | concourse_bosh_deployment_dir: "{{ temp_dir_concourse.path }}/{{ concourse_bosh_deployment.split('.tgz') | first | split('/') | last }}"
10 |
11 | - name: Create concourse deployment directory
12 | ansible.builtin.file:
13 | path: "{{ concourse_bosh_deployment_dir }}"
14 | state: directory
15 |
16 | - name: Extract concourse-bosh-deployment tgz
17 | ansible.builtin.unarchive:
18 | src: "{{ concourse_bosh_deployment }}"
19 | dest: "{{ concourse_bosh_deployment_dir }}"
20 |
21 | - name: Set concourse vars
22 | ansible.builtin.template:
23 | src: vars.yml
24 | dest: "{{ temp_dir_concourse.path }}/vars.yml"
25 |
26 | - name: Copy file operations.yml
27 | ansible.builtin.copy:
28 | src: operations.yml
29 | dest: "{{ temp_dir_concourse.path }}/operations.yml"
30 |
31 | - name: export BOSH environment
32 | args:
33 | executable: /bin/bash
34 | environment:
35 | OM_TARGET: "{{ opsman_ip }}"
36 | OM_USERNAME: "{{ opsman_username }}"
37 | OM_PASSWORD: "{{ opsman_password }}"
38 | OM_SKIP_SSL_VALIDATION: true
39 | ansible.builtin.shell:
40 | cmd: om -d '{{ opsman_password }}{{ opsman_password }}' bosh-env > "{{ temp_dir_concourse.path }}/bosh_env"
41 |
42 | - name: Upload releases and stemcell to BOSH director
43 | args:
44 | executable: /bin/bash
45 | ansible.builtin.shell:
46 | cmd: |
47 | source "{{ temp_dir_concourse.path }}/bosh_env"
48 | bosh upload-release {{ bpm_release }}
49 | bosh upload-release {{ postgres_release }}
50 | bosh upload-release {{ uaa_release }}
51 | bosh upload-release {{ credhub_release }}
52 | bosh upload-release {{ backup_and_restore_sdk_release }}
53 | bosh upload-release {{ concourse_release }}
54 | bosh upload-stemcell {{ concourse_stemcell }}
55 |
56 | - name: Upload releases and stemcell to BOSH director
57 | args:
58 | executable: /bin/bash
59 | ansible.builtin.shell:
60 | cmd: |
61 | source "{{ temp_dir_concourse.path }}/bosh_env"
62 |
63 | credhub set \
64 | -n /p-bosh/concourse/local_user \
65 | -t user \
66 | -z "{{ concourse_username }}" \
67 | -w "{{ concourse_password }}"
68 |
69 | - name: Deploy Concourse BOSH release
70 | args:
71 | executable: /bin/bash
72 | ansible.builtin.shell:
73 | cmd: |
74 | source "{{ temp_dir_concourse.path }}/bosh_env"
75 | bosh -n -d concourse deploy {{ concourse_bosh_deployment_dir }}/cluster/concourse.yml \
76 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/privileged-http.yml \
77 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/privileged-https.yml \
78 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/basic-auth.yml \
79 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/tls-vars.yml \
80 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/tls.yml \
81 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/uaa.yml \
82 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/credhub-colocated.yml \
83 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/offline-releases.yml \
84 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/backup-atc-colocated-web.yml \
85 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/secure-internal-postgres.yml \
86 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/secure-internal-postgres-bbr.yml \
87 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/secure-internal-postgres-uaa.yml \
88 | -o {{ concourse_bosh_deployment_dir }}/cluster/operations/secure-internal-postgres-credhub.yml \
89 | -o {{ temp_dir_concourse.path }}/operations.yml \
90 | -l <(om interpolate --config {{ temp_dir_concourse.path }}/vars.yml) \
91 | -l {{ concourse_bosh_deployment_dir }}/versions.yml
92 |
--------------------------------------------------------------------------------
/roles/concourse/templates/vars.yml:
--------------------------------------------------------------------------------
1 | deployment_name: concourse
2 | # This can be any VM type from the cloud config: bosh cloud-config
3 | web_vm_type: {{ concourse_web_vm_type }}
4 | external_host: {{ concourse_ip }}
5 | external_url: https://{{ concourse_ip }}
6 | # This can be any VM type from the cloud config: bosh cloud-config
7 | db_vm_type: {{ concourse_db_vm_type }}
8 | # This can be any disk type from the cloud config: bosh cloud-config
9 | db_persistent_disk_type: {{ concourse_db_persistent_disk_type }}
10 | # This can be any VM type from the cloud config: bosh cloud-config
11 | worker_vm_type: {{ concourse_worker_vm_type }}
12 | # This assigns created VMs (web, worker, and db) to AZs in the IaaS
13 | azs: [az1]
14 | # The network name to assign the VMs to.
15 | network_name: {{ concourse_network_name }}
16 |
--------------------------------------------------------------------------------
/roles/delete-deployment/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | opsman_ip: "{{ tanzu_opsman_bosh.opsman_ip }}"
3 | opsman_username: "{{ tanzu_opsman_bosh.opsman_username }}"
4 | opsman_password: "{{ tanzu_opsman_bosh.opsman_password }}"
5 |
--------------------------------------------------------------------------------
/roles/delete-deployment/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Remove the vCenter VM
3 | community.vmware.vmware_guest:
4 | hostname: "{{ hosting_vcenter.ip }}"
5 | username: "{{ hosting_vcenter.username }}"
6 | password: "{{ hosting_vcenter.password }}"
7 | validate_certs: false
8 | name: "{{ environment_tag }}-vcenter"
9 | state: absent
10 | force: true
11 | delegate_to: localhost
12 | when: 'nested_vcenter is defined'
13 |
14 | - name: Remove ESXi hosts VMs
15 | community.vmware.vmware_guest:
16 | hostname: "{{ hosting_vcenter.ip }}"
17 | username: "{{ hosting_vcenter.username }}"
18 | password: "{{ hosting_vcenter.password }}"
19 | validate_certs: false
20 | name: "{{ environment_tag }}-{{ item.name }}"
21 | state: absent
22 | force: true
23 | delegate_to: localhost
24 | loop: "{{ nested_hosts }}"
25 | when: 'nested_hosts is defined'
26 |
27 | - name: Remove NSX-T Manager VM
28 | community.vmware.vmware_guest:
29 | hostname: "{{ hosting_vcenter.ip }}"
30 | username: "{{ hosting_vcenter.username }}"
31 | password: "{{ hosting_vcenter.password }}"
32 | validate_certs: false
33 | name: "{{ environment_tag }}-nsxt-manager"
34 | state: absent
35 | force: true
36 | delegate_to: localhost
37 | when: 'nsxt is defined'
38 |
39 | - name: Remove Avi Contoller VM
40 | community.vmware.vmware_guest:
41 | hostname: "{{ hosting_vcenter.ip }}"
42 | username: "{{ hosting_vcenter.username }}"
43 | password: "{{ hosting_vcenter.password }}"
44 | validate_certs: false
45 | name: "{{ environment_tag }}-controller"
46 | state: absent
47 | force: true
48 | delegate_to: localhost
49 | when: 'nsx_alb is defined'
50 |
51 | - name: Check if Opsman is online
52 | ansible.builtin.uri:
53 | validate_certs: false
54 | url: "http://{{ opsman_ip }}/"
55 | method: GET
56 | status_code: 200,302,301,503
57 | register: result_opsman_check
58 | until: result_opsman_check.status == 200
59 | when: 'tanzu_opsman_bosh is defined and not nested_vcenter is defined'
60 |
61 | - name: Delete TAS deployment (if not nested)
62 | args:
63 | executable: /bin/bash
64 | ansible.builtin.shell:
65 | cmd: |
66 | #/bin/bash
67 | set -eu
68 |
69 | unset OM_PASSWORD OM_USERNAME OM_SKIP_SSL_VALIDATION OM_TARGET
70 |
71 | env=$(cat << EOF
72 | {{ lookup('template', 'env.yml') | from_yaml | to_nice_yaml }}
73 | EOF
74 | )
75 |
76 | om --env <(echo "$env") delete-installation --force
77 | when: 'tanzu_opsman_bosh is defined and not nested_vcenter is defined and result_opsman_check.status == 200'
78 |
79 | - name: Remove Opsman VM
80 | community.vmware.vmware_guest:
81 | hostname: "{{ hosting_vcenter.ip }}"
82 | username: "{{ hosting_vcenter.username }}"
83 | password: "{{ hosting_vcenter.password }}"
84 | validate_certs: false
85 | name: "{{ environment_tag }}-ops-manager"
86 | state: absent
87 | force: true
88 | delegate_to: localhost
89 |
90 | - name: Remove MinIO VM
91 | community.vmware.vmware_guest:
92 | hostname: "{{ hosting_vcenter.ip }}"
93 | username: "{{ hosting_vcenter.username }}"
94 | password: "{{ hosting_vcenter.password }}"
95 | validate_certs: false
96 | name: "{{ environment_tag }}-minio"
97 | state: absent
98 | force: true
99 | delegate_to: localhost
100 |
--------------------------------------------------------------------------------
/roles/delete-deployment/templates/env.yml:
--------------------------------------------------------------------------------
1 | ---
2 | target: https://{{ opsman_ip }}
3 | # connect-timeout: 5 # default 5
4 | # request-timeout: 1800 # default 1800
5 | skip-ssl-validation: true # default false
6 | username: "{{ opsman_username }}"
7 | password: "{{ opsman_password }}"
8 | # decryption-passphrase is optional,
9 | # except for use with `import-installation`.
10 | # OpsMan depends on the passphrase
11 | # to decrypt the imported installation.
12 | # For other commands, providing this key allows
13 | # decryption of the OpsMan VM after reboot,
14 | # which would otherwise need to be done manually.
15 | decryption-passphrase: "{{ opsman_password }}{{ opsman_password }}"
16 |
--------------------------------------------------------------------------------
/roles/display-configuration/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # - name: Get TKGS endpoint
4 |
5 | # when: tanzu_vsphere is defined
6 |
7 | - name: Build outputs
8 | ansible.builtin.set_fact:
9 | build_outputs: >-
10 | {
11 | {% if nested_vcenter is defined %}
12 | "vcenter": {
13 | "ip": "{{ nested_vcenter.ip }}",
14 | "username": "{{ nested_vcenter.username }}",
15 | "password": "{{ nested_vcenter.password }}"
16 | },
17 | {% endif %}
18 | {% if nested_hosts is defined %}
19 | "esxi_hosts": [
20 | {% for host in nested_hosts %}
21 | {
22 | "name": "{{ environment_tag }}-{{ host.name }}",
23 | "ip": "{{ host.ip }}",
24 | "root_password": "{{ nested_host_password }}"
25 | },
26 | {% endfor %}
27 | ],
28 | {% endif %}
29 | {% if nsxt is defined %}
30 | "nsxt": {
31 | "manager_ip": "{{ nsxt.manager.ip }}",
32 | "edge_ips": [
33 | {% for edge in nsxt.edge_nodes %}
34 | "{{ edge.mgmt_ip_address }}",
35 | {% endfor %}
36 | ],
37 | "username": "{{ nsxt.manager.username }}",
38 | "password": "{{ nsxt.manager.password }}",
39 | {% if "tier_0" in nsxt and "locale_services" in nsxt.tier_0 and "interfaces" in nsxt.tier_0.locale_services[0] %}
40 | "t0_uplink": "{{ nsxt.tier_0.locale_services[0].interfaces[0].subnets[0].ip_addresses[0] }}"
41 | {% endif %}
42 | },
43 | {% endif %}
44 |
45 | {% if tanzu_vsphere is defined %}
46 | "tanzu_vsphere": {
47 | {% if tkg_router is defined %}
48 | "router" :{
49 | "uplink_ip": "{{ tkg_router.uplink_ip_cidr.split('/')[0] }}",
50 | "username": "vyos",
51 | "password": "vyos"
52 | },
53 | {% endif %}
54 | {% if tkg_haproxy is defined %}
55 | "haproxy": {
56 | "username": "{{ tkg_haproxy.username }}",
57 | "password": "{{ tkg_haproxy.password }}",
58 | "root_password": "{{ tkg_haproxy.root_pwd }}",
59 | "management_ip": "{{ tkg_haproxy.management_ip }}",
60 | "management_port": "{{ tkg_haproxy.management_port }}"
61 | },
62 | {% endif %}
63 | },
64 | {% endif %}
65 |
66 | {% if harbor is defined %}
67 | "harbor": {
68 | "ip": "{{ harbor.ip }}",
69 | "vm_ssh_username": "{{ harbor.username }}",
70 | "vm_ssh_password": "{{ harbor.password }}",
71 | "admin_username": "admin",
72 | "admin_password": "{{ harbor.password }}"
73 | },
74 | {% endif %}
75 |
76 | {% if tanzu_multi_cloud.generated_config_file is defined %}
77 | "tanzu_multi_cloud": {
78 | "config_file": "{{ tanzu_multi_cloud.generated_config_file }}",
79 | "mgmt_create_command" : "tanzu management-cluster create --file {{ tanzu_multi_cloud.generated_config_file }}",
80 | "mgmt_reset_command" : "tanzu context delete {{ environment_tag }}-tkg-mgmt"
81 | },
82 | {% endif %}
83 |
84 | {% if tanzu_application_service is defined %}
85 | "tanzu_application_service": {
86 | "opsman_ip": "{{ tanzu_opsman_bosh.opsman_ip }}",
87 | "apps_manager_url": "https://apps.{{ tanzu_application_service.sys_domain }}",
88 | "api_fqdn": "api.{{ tanzu_application_service.sys_domain }}",
89 | "login_instructions": "username: admin. password: (opsman > TAS tile > credentials > uaa.)"
90 | },
91 | {% endif %}
92 | {% if tkgi is defined %}
93 | "tanzu_kubernetes_grid_integrated": {
94 | "opsman_ip": "{{ tanzu_opsman_bosh.opsman_ip }}",
95 |
96 | "login_instructions": "username: admin. password: (opsman > TKGi tile > credentials > uaa.)"
97 | },
98 | {% endif %}
99 | {% if concourse is defined %}
100 | "platform_automation_toolkit": {
101 | "opsman_ip": "{{ tanzu_opsman_bosh.opsman_ip }}",
102 | "concourse": "https://{{ concourse.ip }}",
103 | "minio": "{{ minio_standalone.minio_url }}"
104 | },
105 | {% endif %}
106 |
107 | }
108 |
109 | - name: Display built configuration
110 | ansible.builtin.debug: var=build_outputs
111 |
--------------------------------------------------------------------------------
/roles/harbor-standalone/harbor-configuration/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Upgrade apt repositories
3 | ansible.builtin.apt:
4 | update_cache: true
5 |
6 | - name: Install docker CE dependencies
7 | ansible.builtin.apt:
8 | name:
9 | - apt-transport-https
10 | - ca-certificates
11 | - curl
12 | - gnupg-agent
13 | - software-properties-common
14 | ignore_errors: true
15 |
16 | - name: Add an docker CE Apt signing key
17 | ansible.builtin.apt_key:
18 | url: https://download.docker.com/linux/ubuntu/gpg
19 |
20 | - name: Add docker CE PPA
21 | ansible.builtin.apt_repository:
22 | repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu "{{ ansible_distribution_release }}" stable
23 | mode: "0644"
24 |
25 | - name: Install docker CE
26 | ansible.builtin.apt:
27 | name:
28 | - docker-ce
29 | - docker-ce-cli
30 | - containerd.io
31 |
32 | - name: Create a docker group
33 | ansible.builtin.group:
34 | name: docker
35 | state: present
36 |
37 | - name: Add the user to the docker group
38 | ansible.builtin.user:
39 | name: harbor
40 | group: docker
41 |
42 | - name: Get docker compose github release info
43 | ansible.builtin.uri:
44 | url: https://api.github.com/repos/docker/compose/releases/latest
45 | return_content: true
46 | body_format: json
47 | register: docker_compose_release_info
48 |
49 | - name: Get docker compose download URL
50 | ansible.builtin.set_fact:
51 | docker_compose_release_url: "{{ docker_compose_release_info.json | to_json | from_json | json_query(json_filter) | join('') }}"
52 | vars:
53 | - json_filter: >-
54 | assets[?(contains(name, '{{ release_asset_filter | default('Linux') }}') &&
55 | !contains(name, 'sha256') && !contains(name, '32'))].browser_download_url
56 |
57 | - ansible.builtin.debug:
58 | msg: "Downloading docker compose release from: {{ docker_compose_release_url }}"
59 |
60 | - name: Install docker compose github release
61 | ansible.builtin.get_url:
62 | url: "{{ docker_compose_release_url }}"
63 | dest: /usr/local/bin/docker-compose
64 | mode: 0755
65 |
66 | - name: Get harbor github release info
67 | ansible.builtin.uri:
68 | url: https://api.github.com/repos/goharbor/harbor/releases/latest
69 | return_content: true
70 | body_format: json
71 | register: harbor_release_info
72 |
73 | - name: Get harbor download URL
74 | ansible.builtin.set_fact:
75 | harbor_release_url: "{{ harbor_release_info.json | to_json | from_json | json_query(json_filter) | join('') }}"
76 | vars:
77 | - json_filter: >-
78 | assets[?(contains(name, '{{ release_asset_filter | default('online') }}') &&
79 | !contains(name, 'asc'))].browser_download_url
80 |
81 | - ansible.builtin.debug:
82 | msg: "Downloading harbor release from: {{ harbor_release_url }}"
83 |
84 | - name: Create temporary dir
85 | ansible.builtin.tempfile:
86 | state: directory
87 | suffix: -harbor-installer
88 | register: temp_dir
89 |
90 | - ansible.builtin.debug:
91 | msg: "Created tmp dir: {{ temp_dir.path }}"
92 |
93 | - name: Download and unpack harbor installer
94 | ansible.builtin.unarchive:
95 | src: "{{ harbor_release_url }}"
96 | dest: "{{ temp_dir.path }}"
97 | remote_src: true
98 |
99 | - name: Prepare harbor configuration
100 | ansible.builtin.template:
101 | src: config.j2
102 | dest: "{{ temp_dir.path }}/harbor/harbor.yml"
103 | mode: '660'
104 |
105 | - name: Install harbor
106 | ansible.builtin.command: "{{ temp_dir.path }}/harbor/install.sh --with-chartmuseum"
107 |
108 | - name: Delete temporary dir
109 | ansible.builtin.file:
110 | state: absent
111 | path: "{{ temp_dir.path }}/"
112 |
113 | - name: Cleanup apt packages
114 | ansible.builtin.apt:
115 | autoclean: true
116 | autoremove: true
117 |
--------------------------------------------------------------------------------
/roles/harbor-standalone/harbor-vm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Prepare Ubuntu Server VM
3 | ansible.builtin.include_role:
4 | name: ubuntu-server
5 | vars:
6 | inventory_host_group: harbor
7 |
8 | ubuntu_ova_path: "{{ ubuntu_ova }}"
9 |
10 | vcsa_hostname: "{{ hosting_vcenter.ip }}"
11 | vcsa_username: "{{ hosting_vcenter.username }}"
12 | vcsa_password: "{{ hosting_vcenter.password }}"
13 | vsphere_datacenter: "{{ hosting_vcenter.datacenter }}"
14 | vsphere_cluster: "{{ harbor_standalone.hosting_cluster }}"
15 | vsphere_datastore: "{{ harbor_standalone.hosting_datastore }}"
16 |
17 | ubuntu_vm_name: "{{ environment_tag }}-harbor"
18 | ubuntu_vm_network: "{{ harbor_standalone.hosting_network }}"
19 | ubuntu_vm_memory: "{{ harbor_standalone.memory }}"
20 | ubuntu_vm_cpus: "{{ harbor_standalone.cpus }}"
21 | ubuntu_vm_disk_size: "{{ harbor_standalone.disk_size }}"
22 |
23 | ubuntu_client_hostname: "{{ harbor_standalone.host_name }}"
24 | ubuntu_client_ip_address: "{{ harbor_standalone.ip }}/{{ harbor_standalone.hosting_network_cidr | ansible.utils.ipaddr('prefix') }}"
25 | ubuntu_client_gateway: "{{ harbor_standalone.hosting_network_gateway }}"
26 | ubuntu_client_nameserver: "{{ dns_server }}"
27 | ubuntu_client_username: "{{ harbor_standalone.username }}"
28 | ubuntu_client_password: "{{ harbor_standalone.password }}"
29 |
--------------------------------------------------------------------------------
/roles/minio-standalone/minio-vm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Prepare Ubuntu Server VM for MinIO
3 | ansible.builtin.include_role:
4 | name: ubuntu-server
5 | vars:
6 | inventory_host_group: minio
7 |
8 | ubuntu_ova_path: "{{ ubuntu_ova }}"
9 |
10 | vcsa_hostname: "{{ minio_standalone.vcenter_url }}"
11 | vcsa_username: "{{ minio_standalone.vcenter_username }}"
12 | vcsa_password: "{{ minio_standalone.vcenter_password }}"
13 | vsphere_datacenter: "{{ minio_standalone.vcenter_datacenter }}"
14 | vsphere_cluster: "{{ minio_standalone.vcenter_cluster }}"
15 | vsphere_datastore: "{{ minio_standalone.vcenter_datastore }}"
16 | vsphere_resource_pool: "{{ minio_standalone.vcenter_resource_pool }}"
17 |
18 | ubuntu_vm_name: "{{ environment_tag }}-minio"
19 | ubuntu_vm_network: "{{ minio_standalone.network }}"
20 | ubuntu_vm_memory: "{{ minio_standalone.memory_in_mb }}"
21 | ubuntu_vm_cpus: "{{ minio_standalone.cpus }}"
22 | ubuntu_vm_disk_size: "{{ minio_standalone.disk_size_in_gb }}"
23 | ubuntu_install_packages: "{{ minio_standalone.ubuntu_install_packages }}"
24 |
25 | ubuntu_client_hostname: "{{ minio_standalone.host_name }}"
26 | ubuntu_client_ip_address: "{{ minio_standalone.ip }}/{{ minio_standalone.network_cidr | ansible.utils.ipaddr('prefix') }}"
27 | ubuntu_client_gateway: "{{ minio_standalone.gateway }}"
28 | ubuntu_client_nameserver: "{{ dns_server }}"
29 | ubuntu_client_username: "{{ minio_standalone.vm_username }}"
30 | ubuntu_client_password: "{{ minio_standalone.vm_password }}"
31 | ubuntu_ssh_public_key: "{{ minio_standalone.ssh_public_key }}"
32 |
--------------------------------------------------------------------------------
/roles/nfs-server/nfs.txt:
--------------------------------------------------------------------------------
1 | sudo -i
2 |
3 | apt update
4 | apt install nfs-kernel-server
5 |
6 | lshw -C disk
7 | parted /dev/sdb # replace with inline command
8 | mkfs.ext4 /dev/sdb1
9 | mkdir /srv/share/
10 | mount /dev/sdb1 /srv/share
11 | mkdir /srv/share/vms /srv/share/isos
12 |
13 | chown -R nobody:nogroup /srv/share/
14 | chmod -R 777 /srv/share/
15 | echo "/srv/share 192.168.0.0/22(rw,async,no_subtree_check)" > /etc/exports
16 |
17 | exportfs -ra
18 | systemctl restart nfs-kernel-server
19 |
20 |
21 | ~/projects/ansible-for-vsphere/sync.sh
22 |
--------------------------------------------------------------------------------
/roles/nfs-server/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy Ubuntu NFS server
3 | community.vmware.vmware_deploy_ovf:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | validate_certs: false
8 | name: "vyos"
9 | datacenter: "{{ nested_vcenter.datacenter }}"
10 | # folder: /SG1/vm
11 | cluster: "{{ vyos.cluster }}"
12 | datastore: "{{ vyos.datastore }}"
13 | disk_provisioning: "{{ disk_mode }}"
14 | networks:
15 | public: "{{ vyos.uplink_port_group }}"
16 | internal: "{{ vyos.internal_port_group }}"
17 | ova: "../dump/focal-server-cloudimg-amd64.ova"
18 | allow_duplicates: false
19 | power_on: true
20 | fail_on_spec_warnings: true
21 | wait: true
22 | wait_for_ip_address: true
23 | inject_ovf_env: false
24 |
--------------------------------------------------------------------------------
/roles/nsx-alb/configure-cloud/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | controller_ip: "{{ nsx_alb.controller_ip }}"
3 | controller_username: "{{ nsx_alb.controller_username }}"
4 | controller_password: "{{ nsx_alb.controller_password }}"
5 | controller_vcenter_cluster: "{{ nsx_alb.controller_vcenter_cluster }}"
6 | api_version: "{{ nsx_alb.api_version }}"
7 | # api_version: 22.1.3
8 |
9 | avi_credentials:
10 | controller: "{{ controller_ip }}"
11 | username: "{{ controller_username }}"
12 | password: "{{ controller_password }}"
13 | api_version: "{{ api_version }}"
14 |
15 | # The cloud vCenter will host the service engines and Tanzu clusters
16 | cloud_name: "{{ nsx_alb.cloud_name }}"
17 | cloud_vcenter_username: "{{ nsx_alb.cloud_vcenter_username }}"
18 | cloud_vcenter_password: "{{ nsx_alb.cloud_vcenter_password }}"
19 | cloud_vcenter_url: "{{ nsx_alb.cloud_vcenter_url }}"
20 | cloud_vcenter_datacenter: "{{ nsx_alb.cloud_vcenter_datacenter }}"
21 | cloud_exclude_other_networks: "{{ nsx_alb.cloud_exclude_other_networks }}"
22 |
23 | se_group_name: "{{ nsx_alb.se_group_name }}"
24 | se_vcenter_cluster: "{{ nsx_alb.se_vcenter_cluster }}"
25 | # The management network will host he service engine management interface
26 | se_management_port_group: "{{ nsx_alb.se_management_port_group }}"
27 | se_management_network_cidr: "{{ nsx_alb.se_management_network_cidr }}"
28 | se_management_network_range: "{{ nsx_alb.se_management_network_range | default(omit) }}"
29 | # se_management_network_gateway: "{{ nsx_alb.se_management_network_gateway | default(omit) }}"
30 | # The vip network will contain the virtual servers created by Avi
31 | se_vip_port_group: "{{ nsx_alb.se_vip_port_group }}"
32 | se_vip_network_cidr: "{{ nsx_alb.se_vip_network_cidr }}"
33 | se_vip_network_range: "{{ nsx_alb.se_vip_network_range }}"
34 | se_vip_network_gateway: "{{ nsx_alb.se_vip_network_gateway }}"
35 |
--------------------------------------------------------------------------------
/roles/nsx-alb/deploy-controller/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - ansible.builtin.debug: var=nsx_alb
4 |
5 | - name: Deploy Avi Controller
6 | community.vmware.vmware_deploy_ovf:
7 | hostname: "{{ controller_vcenter_ip }}"
8 | username: "{{ controller_vcenter_username }}"
9 | password: "{{ controller_vcenter_password }}"
10 | validate_certs: false
11 | name: "{{ environment_tag }}-controller"
12 | datacenter: "{{ controller_vcenter_datacenter }}"
13 | # folder: /SG1/vm
14 | cluster: "{{ controller_vcenter_cluster }}"
15 | datastore: "{{ controller_vcenter_datastore }}"
16 | disk_provisioning: "thin"
17 | networks:
18 | "Management": "{{ controller_port_group }}"
19 | ova: "{{ controller_ova }}"
20 | allow_duplicates: false
21 | power_on: true
22 | fail_on_spec_warnings: false
23 | wait: true
24 | wait_for_ip_address: true
25 | inject_ovf_env: true
26 | properties:
27 | avi.mgmt-ip.CONTROLLER: "{{ controller_ip }}"
28 | avi.mgmt-mask.CONTROLLER: "{{ controller_netmask }} "
29 | avi.default-gw.CONTROLLER: "{{ controller_gateway }}"
30 | avi.sysadmin-public-key.CONTROLLER: "{{ controller_ssh_public_key }}"
31 | register: controller_results
32 |
33 | - name: Wait for Controller be ready
34 | ansible.builtin.uri:
35 | validate_certs: false
36 | url: "http://{{ controller_ip }}/"
37 | method: GET
38 | status_code: 200,302,301,503
39 | register: result_controller_check
40 | until: result_controller_check.status == 200
41 | retries: 480
42 | delay: 15
43 |
44 | - name: Check Cluster Status
45 | ansible.builtin.uri:
46 | validate_certs: false
47 | url: "https://{{ controller_ip }}/api/cluster/runtime"
48 | method: GET
49 | status_code: 200,302,301,503
50 | register: cluster_status
51 | until: cluster_status.json.cluster_state.progress == 100
52 | retries: 480
53 | delay: 15
54 |
55 | - name: Wait to all services be ready
56 | ansible.builtin.pause:
57 | echo: false
58 | seconds: 60
59 | when: controller_results.changed
60 |
61 | - name: Check to see if password needs to be reset
62 | vmware.alb.avi_api_session:
63 | avi_credentials: "{{ avi_credentials }}"
64 | http_method: get
65 | path: cluster
66 | register: access_test
67 | ignore_errors: true
68 |
69 | - ansible.builtin.fail:
70 | msg:
71 | - "The controller default password is empty"
72 | - "If you are using environmental variables, check it is exported"
73 | when: >
74 | controller_default_password == "" and
75 | "access_test.module_stderr is defined and
76 | 'Invalid credentials' in access_test.module_stderr"
77 |
78 | - name: Change admin default password
79 | vmware.alb.avi_useraccount:
80 | controller: "{{ controller_ip }}"
81 | username: "{{ controller_username }}"
82 | password: "{{ controller_password }}"
83 | api_version: "{{ api_version }}"
84 | old_password: "{{ controller_default_password }}"
85 | when: >
86 | "access_test.module_stderr is defined and
87 | 'Invalid credentials' in access_test.module_stderr"
88 | register: useraccount
89 |
90 | - name: Wait to for pw change
91 | ansible.builtin.pause:
92 | echo: false
93 | seconds: 5
94 | when: useraccount.changed
95 |
96 | - name: Create a SSL Key and Certificate
97 | vmware.alb.avi_sslkeyandcertificate:
98 | avi_credentials: "{{ avi_credentials }}"
99 | type: SSL_CERTIFICATE_TYPE_SYSTEM
100 | name: "controller-cert"
101 | certificate:
102 | self_signed: true
103 | days_until_expire: 730
104 | signature_algorithm: sha256WithRSAEncryption
105 | subject:
106 | common_name: "{{ controller_ip }}"
107 | subject_alt_names:
108 | - "{{ controller_ip }}"
109 | register: controller_cert_result
110 |
111 | - name: SettingbBasic Controller config
112 | vmware.alb.avi_systemconfiguration:
113 | avi_credentials: "{{ avi_credentials }}"
114 | welcome_workflow_complete: true
115 | default_license_tier: ESSENTIALS
116 | email_configuration:
117 | smtp_type: SMTP_NONE
118 | global_tenant_config:
119 | se_in_provider_context: true
120 | tenant_access_to_provider_se: true
121 | tenant_vrf: false
122 | dns_configuration:
123 | server_list:
124 | - type: V4
125 | addr: "{{ dns_server }}"
126 | ntp_configuration:
127 | ntp_servers:
128 | - server:
129 | type: DNS
130 | addr: "{{ ntp_server }}"
131 | portal_configuration:
132 | sslkeyandcertificate_refs:
133 | - /api/sslkeyandcertificate/?name=controller-cert
134 | register: system_config_result
135 |
136 | - name: Wait for server to restart with new certificate
137 | ansible.builtin.pause:
138 | echo: false
139 | seconds: 15
140 | when: system_config_result.changed
141 |
142 | - name: Create BackupConfiguration object
143 | vmware.alb.avi_backupconfiguration:
144 | avi_credentials: "{{ avi_credentials }}"
145 | name: Backup-Configuration
146 | backup_passphrase: VMware1!
147 | upload_to_remote_host: false
148 | tenant_ref: "/api/tenant/?name=admin"
149 | save_local: true
150 | register: backup
151 |
--------------------------------------------------------------------------------
/roles/nsx-alb/deploy-controller/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | controller_ova: "{{ nsx_alb_controller_ova }}"
3 | controller_vcenter_ip: "{{ nsx_alb.controller_vcenter_ip }}"
4 | controller_vcenter_username: "{{ nsx_alb.controller_vcenter_username }}"
5 | controller_vcenter_password: "{{ nsx_alb.controller_vcenter_password }}"
6 | controller_vcenter_datacenter: "{{ nsx_alb.controller_vcenter_datacenter }}"
7 | controller_vcenter_cluster: "{{ nsx_alb.controller_vcenter_cluster }}"
8 | controller_vcenter_datastore: "{{ nsx_alb.controller_vcenter_datastore }}"
9 |
10 | ntp_server: "{{ nsx_alb.ntp_server }}"
11 | dns_server: "{{ nsx_alb.dns_server }}"
12 |
13 | controller_ip: "{{ nsx_alb.controller_ip }}"
14 | controller_netmask: "{{ nsx_alb.controller_netmask }}"
15 | controller_gateway: "{{ nsx_alb.controller_gateway }}"
16 | controller_port_group: "{{ nsx_alb.controller_port_group }}"
17 | controller_ssh_public_key: "{{ nsx_alb.controller_ssh_public_key }}"
18 | controller_default_password: "{{ nsx_alb.controller_default_password }}"
19 | controller_username: "{{ nsx_alb.controller_username }}"
20 | controller_password: "{{ nsx_alb.controller_password }}"
21 | api_version: "{{ nsx_alb.api_version }}"
22 |
23 | avi_credentials:
24 | controller: "{{ controller_ip }}"
25 | username: "{{ controller_username }}"
26 | password: "{{ controller_password }}"
27 | api_version: "{{ api_version }}"
28 |
--------------------------------------------------------------------------------
/roles/nsx-alb/dummy-service/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Pause for 60 seconds for system to stabilize
3 | ansible.builtin.pause:
4 | seconds: 60
5 |
6 | - name: Create dummy Pool to force SE creation
7 | vmware.alb.avi_pool:
8 | avi_credentials: "{{ avi_credentials }}"
9 | name: dummy-pool
10 | state: present
11 |
12 | - name: Create vsvip for virtualservice for newtestvs
13 | vmware.alb.avi_vsvip:
14 | avi_credentials: "{{ avi_credentials }}"
15 | name: vsvip-dummy
16 | cloud_ref: /api/cloud/?name=Default-Cloud
17 | vip:
18 | - vip_id: '1'
19 | auto_allocate_ip_type: V4_ONLY
20 | auto_allocate_ip: true
21 | ipam_network_subnet:
22 | network_ref: "/api/network?name={{ dummy_service_network_name }}"
23 | subnet:
24 | ip_addr:
25 | addr: "{{ dummy_service_subnet_addr }}"
26 | type: "V4"
27 | mask: "{{ dummy_service_subnet_mask }}"
28 |
29 | - name: Create dummy Virtual Service to force SE creation
30 | vmware.alb.avi_virtualservice:
31 | avi_credentials: "{{ avi_credentials }}"
32 | name: dummy-vip
33 | state: present
34 | services:
35 | - port: 80
36 | application_profile_ref: '/api/applicationprofile?name=System-L4-Application'
37 | type: VS_TYPE_NORMAL
38 | cloud_ref: "/api/cloud?name=Default-Cloud"
39 | pool_ref: '/api/pool?name=dummy-pool'
40 | vsvip_ref: /api/vsvip/?name=vsvip-dummy
41 |
42 | - name: Pause for 60 seconds ensure SE is created
43 | ansible.builtin.pause:
44 | seconds: 60
45 |
46 | - name: Delete dummy Virtual Service to force SE creation
47 | vmware.alb.avi_virtualservice:
48 | avi_credentials: "{{ avi_credentials }}"
49 | name: dummy-vip
50 | state: absent
51 |
52 | - name: Delete vsvip for virtualservice for newtestvs
53 | vmware.alb.avi_vsvip:
54 | avi_credentials: "{{ avi_credentials }}"
55 | name: vsvip-dummy
56 | state: absent
57 |
58 | - name: Delete dummy Pool to force SE creation
59 | vmware.alb.avi_pool:
60 | avi_credentials: "{{ avi_credentials }}"
61 | name: dummy-pool
62 | state: absent
63 |
--------------------------------------------------------------------------------
/roles/nsx-alb/dummy-service/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | controller_ip: "{{ nsx_alb.controller_ip }}"
3 | controller_username: "{{ nsx_alb.controller_username }}"
4 | controller_password: "{{ nsx_alb.controller_password }}"
5 | api_version: "{{ nsx_alb.api_version }}"
6 |
7 | avi_credentials:
8 | controller: "{{ controller_ip }}"
9 | username: "{{ controller_username }}"
10 | password: "{{ controller_password }}"
11 | api_version: "{{ api_version }}"
12 |
13 | se_vcenter_cluster: "{{ nsx_alb.se_vcenter_cluster }}"
14 | # The management network will host he service engine management interface
15 | se_management_port_group: "{{ nsx_alb.se_management_port_group }}"
16 | se_management_network_cidr: "{{ nsx_alb.se_management_network_cidr }}"
17 | se_management_network_range: "{{ nsx_alb.se_management_network_range }}"
18 | se_management_network_gateway: "{{ nsx_alb.se_management_network_gateway }}"
19 | # The vip network will contain the virtual servers created by Avi
20 | se_vip_port_group: "{{ nsx_alb.se_vip_port_group }}"
21 | se_vip_network_cidr: "{{ nsx_alb.se_vip_network_cidr }}"
22 | se_vip_network_range: "{{ nsx_alb.se_vip_network_range }}"
23 | se_vip_network_gateway: "{{ nsx_alb.se_vip_network_gateway }}"
24 |
25 | dummy_service_subnet_addr: "{{ nsx_alb.dummy_service.subnet_addr }}"
26 | dummy_service_subnet_mask: "{{ nsx_alb.dummy_service.subnet_mask }}"
27 | dummy_service_network_name: "{{ nsx_alb.dummy_service.network_name }}"
28 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-compute-manager/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy compute manager
3 | vmware.ansible_for_nsxt.nsxt_fabric_compute_managers:
4 | hostname: "{{ nsxt.manager.hostname }}"
5 | username: "{{ nsxt.manager.username }}"
6 | password: "{{ nsxt.manager.password }}"
7 | validate_certs: false
8 | display_name: "vCenter"
9 | server: "{{ nested_vcenter.ip }}"
10 | origin_type: "vCenter"
11 | credential:
12 | credential_type: UsernamePasswordLoginCredential
13 | username: "{{ nested_vcenter.username }}"
14 | password: "{{ nested_vcenter.password }}"
15 | set_as_oidc_provider: true
16 | state: present
17 | retries: 12
18 | delay: 10
19 | register: compute_manager_result
20 | until: compute_manager_result is not failed
21 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-compute-nodes/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy compute manager
3 | vmware.ansible_for_nsxt.nsxt_fabric_compute_managers:
4 | hostname: "{{ nsxt.manager.hostname }}"
5 | username: "{{ nsxt.manager.username }}"
6 | password: "{{ nsxt.manager.password }}"
7 | validate_certs: false
8 | display_name: "vCenter"
9 | server: "{{ nested_vcenter.ip }}"
10 | origin_type: "vCenter"
11 | credential:
12 | credential_type: UsernamePasswordLoginCredential
13 | username: "{{ nested_vcenter.username }}"
14 | password: "{{ nested_vcenter.password }}"
15 | set_as_oidc_provider: true
16 | state: present
17 | retries: 12
18 | delay: 10
19 | register: result
20 | until: result is not failed
21 |
22 | - name: Create transport node profile
23 | vmware.ansible_for_nsxt.nsxt_transport_node_profiles:
24 | hostname: "{{ nsxt.manager.hostname }}"
25 | username: "{{ nsxt.manager.username }}"
26 | password: "{{ nsxt.manager.password }}"
27 | validate_certs: false
28 | resource_type: TransportNodeProfile
29 | display_name: "{{ item.display_name }}"
30 | description: "{{ item.description }}"
31 | host_switch_spec:
32 | resource_type: StandardHostSwitchSpec
33 | host_switches: "{{ item.host_switches }}"
34 | transport_zone_endpoints: "{{ item.transport_zone_endpoints | default(omit) }}"
35 | state: present
36 | loop: "{{ nsxt.transport_node_profiles }}"
37 |
38 | - name: Attach Transport node profile to cluster
39 | vmware.ansible_for_nsxt.nsxt_transport_node_collections:
40 | hostname: "{{ nsxt.manager.hostname }}"
41 | username: "{{ nsxt.manager.username }}"
42 | password: "{{ nsxt.manager.password }}"
43 | validate_certs: false
44 | display_name: "{{ item.display_name }}"
45 | resource_type: "TransportNodeCollection"
46 | description: "{{ item.description }}"
47 | compute_manager_name: "{{ item.compute_manager_name }}"
48 | cluster_name: "{{ item.cluster_name }}"
49 | transport_node_profile_name: "{{ item.transport_node_profile_name }}"
50 | state: present
51 | loop: "{{ nsxt.cluster_attach }}"
52 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-edge-clusters/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add edge cluster
3 | vmware.ansible_for_nsxt.nsxt_edge_clusters:
4 | hostname: "{{ nsxt.manager.hostname }}"
5 | username: "{{ nsxt.manager.username }}"
6 | password: "{{ nsxt.manager.password }}"
7 | validate_certs: false
8 | display_name: "{{ item.edge_cluster_name }}"
9 | description: "{{ item.description | default(omit) }}"
10 | cluster_profile_bindings:
11 | - profile_name: nsx-default-edge-high-availability-profile
12 | resource_type: EdgeHighAvailabilityProfile
13 | members: "{{ item.edge_cluster_members }}"
14 | state: present
15 | loop: "{{ nsxt.edge_clusters }}"
16 | retries: 12
17 | delay: 10
18 | until: "'rc' not in edge_cluster_result"
19 | register: edge_cluster_result
20 |
21 | - name: List Edge Clusters
22 | vmware.ansible_for_nsxt.nsxt_edge_clusters_facts:
23 | hostname: "{{ nsxt.manager.hostname }}"
24 | username: "{{ nsxt.manager.username }}"
25 | password: "{{ nsxt.manager.password }}"
26 | validate_certs: false
27 | register: edge_clusters
28 |
29 | - name: Wait 5 minutes for the edge clusters to be fully configured
30 | ansible.builtin.wait_for:
31 | timeout: 300
32 | when: edge_cluster_result.changed
33 |
34 | - name: List Edge Clusters
35 | vmware.ansible_for_nsxt.nsxt_edge_clusters_facts:
36 | hostname: "{{ nsxt.manager.hostname }}"
37 | username: "{{ nsxt.manager.username }}"
38 | password: "{{ nsxt.manager.password }}"
39 | validate_certs: false
40 | register: edge_clusters
41 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-ip-blocks/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create NSXT IP address blocks
3 | vmware.ansible_for_nsxt.nsxt_ip_blocks:
4 | hostname: "{{ nsxt.manager.hostname }}"
5 | username: "{{ nsxt.manager.username }}"
6 | password: "{{ nsxt.manager.password }}"
7 | validate_certs: false
8 | display_name: "{{ item.display_name }}"
9 | cidr: "{{ item.cidr }}"
10 | state: present
11 | loop: "{{ nsxt.ip_blocks }}"
12 | when: nsxt.ip_blocks is defined
13 |
14 | - name: Create NSXT Policy IP address blocks
15 | vmware.ansible_for_nsxt.nsxt_policy_ip_block:
16 | hostname: "{{ nsxt.manager.hostname }}"
17 | username: "{{ nsxt.manager.username }}"
18 | password: "{{ nsxt.manager.password }}"
19 | validate_certs: false
20 | display_name: "{{ item.display_name }}"
21 | cidr: "{{ item.cidr }}"
22 | state: present
23 | loop: "{{ nsxt.policy_ip_blocks }}"
24 | when: nsxt.policy_ip_blocks is defined
25 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-ip-pools/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create ip pool
3 | vmware.ansible_for_nsxt.nsxt_ip_pools:
4 | hostname: "{{ nsxt.manager.hostname }}"
5 | username: "{{ nsxt.manager.username }}"
6 | password: "{{ nsxt.manager.password }}"
7 | validate_certs: false
8 | display_name: "{{ item.display_name }}"
9 | description: "{{ item.description | default(omit) }}"
10 | subnets: "{{ item.subnets }}"
11 | state: present
12 | tags: "{{ item.tags | default(omit) }}"
13 | ip_release_delay: "{{ item.ip_release_delay | default(omit) }}"
14 | loop: "{{ nsxt.ip_pools }}"
15 | register: result
16 | retries: 5
17 | delay: 10
18 | until: "'module_stderr' not in result"
19 | when: nsxt.ip_pools is defined
20 |
21 | - name: Create policy ip pool
22 | vmware.ansible_for_nsxt.nsxt_policy_ip_pool:
23 | hostname: "{{ nsxt.manager.hostname }}"
24 | username: "{{ nsxt.manager.username }}"
25 | password: "{{ nsxt.manager.password }}"
26 | validate_certs: false
27 | display_name: "{{ item.display_name }}"
28 | description: "{{ item.description | default(omit) }}"
29 | pool_static_subnets: "{{ item.pool_static_subnets }}"
30 | state: present
31 | tags: "{{ item.tags | default(omit) }}"
32 | loop: "{{ nsxt.policy_ip_pools }}"
33 | register: result
34 | retries: 5
35 | delay: 10
36 | until: "'module_stderr' not in result"
37 | when: nsxt.policy_ip_pools is defined
38 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-mgmt-appliance/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - ansible.builtin.stat: path={{ nsxt_ova }}
3 | register: esxi_image
4 |
5 | - ansible.builtin.debug: msg="NSX-T OVA file at path {{ nsxt_ova }} does not exist"
6 | when: not esxi_image.stat.exists
7 | failed_when: not esxi_image.stat.exists
8 |
9 | - name: Deploy NSX-T Manager OVA
10 | community.vmware.vmware_deploy_ovf:
11 | hostname: "{{ nsxt.manager.hosting_vcenter_ip }}"
12 | username: "{{ nsxt.manager.hosting_vcenter_username }}"
13 | password: "{{ nsxt.manager.hosting_vcenter_password }}"
14 | validate_certs: false
15 | name: "{{ environment_tag }}-nsxt-manager"
16 | datacenter: "{{ nsxt.manager.hosting_datacenter }}"
17 | cluster: "{{ nsxt.manager.hosting_cluster }}"
18 | datastore: "{{ nsxt.manager.hosting_datastore }}"
19 | disk_provisioning: "{{ disk_mode }}"
20 | networks:
21 | "Network 1": "{{ nsxt.manager.hosting_network }}"
22 | ova: "{{ nsxt_ova }}"
23 | allow_duplicates: false
24 | power_on: true
25 | fail_on_spec_warnings: true
26 | wait: true
27 | wait_for_ip_address: true
28 | inject_ovf_env: true
29 | deployment_option: "{{ nsxt.manager.deployment_option | default('small') }}"
30 | properties:
31 | nsx_ip_0: "{{ nsxt.manager.ip }}"
32 | nsx_netmask_0: "{{ nsxt.manager.netmask }}"
33 | nsx_gateway_0: "{{ nsxt.manager.gateway }}"
34 | nsx_dns1_0: "{{ dns_server }}"
35 | nsx_domain_0: "{{ dns_domain }}"
36 | nsx_ntp_0: "{{ ntp_server_ip }}"
37 | nsx_isSSHEnabled: true
38 | nsx_allowSSHRootLogin: true
39 | nsx_hostname: "{{ nsxt.manager.ip }}"
40 | nsx_passwd_0: "{{ nsxt.manager.password }}"
41 | nsx_cli_passwd_0: "{{ nsxt.manager.password }}"
42 | nsx_cli_audit_passwd_0: "{{ nsxt.manager.password }}"
43 | nsx_role: "NSX Manager"
44 | async: 1800
45 | poll: 0
46 | register: nsxt_manager_results
47 |
48 | - name: Check on NSX-T Manager Deploy Job
49 | ansible.builtin.async_status:
50 | jid: "{{ nsxt_manager_results.ansible_job_id }}"
51 | register: job_result
52 | until: job_result.finished
53 | retries: 240
54 | delay: 10
55 |
56 | - name: Check manager status
57 | vmware.ansible_for_nsxt.nsxt_manager_status:
58 | hostname: "{{ nsxt.manager.hostname }}"
59 | username: "{{ nsxt.manager.username }}"
60 | password: "{{ nsxt.manager.password }}"
61 | validate_certs: false
62 | wait_time: 50
63 |
64 | - name: Check all services are up
65 | ansible.builtin.uri:
66 | url: https://{{ nsxt.manager.ip }}/api/v1/cluster-manager/status
67 | user: "{{ nsxt.manager.username }}"
68 | password: "{{ nsxt.manager.password }}"
69 | method: GET
70 | force_basic_auth: true
71 | validate_certs: false
72 | return_content: true
73 | retries: 60
74 | delay: 10
75 | register: result
76 | until: "'UNKNOWN' not in result.content and '\"DOWN\"' not in result.content"
77 |
78 | - name: Add license
79 | vmware.ansible_for_nsxt.nsxt_licenses:
80 | hostname: "{{ nsxt.manager.hostname }}"
81 | username: "{{ nsxt.manager.username }}"
82 | password: "{{ nsxt.manager.password }}"
83 | validate_certs: false
84 | license_key: "{{ nsxt.manager.license_key }}"
85 | state: present
86 | when: 'nsxt.manager.license_key is defined'
87 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-mgmt-certificate/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Checking if openssl is installed
4 | command: which openssl
5 |
6 | - name: NSX Manager cert check
7 | ansible.builtin.shell: |
8 | set -o pipefail && \
9 | openssl s_client -connect {{ nsxt.manager.ip }}:443 /dev/null \
10 | | openssl x509 -noout -text | grep DNS: | xargs
11 | register: result_nsx_manager_cert_sans
12 | args:
13 | executable: /usr/bin/bash
14 | changed_when: false
15 | ignore_errors: true
16 |
17 | - ansible.builtin.set_fact:
18 | cert_replace: "{{ nsxt.manager.ip not in result_nsx_manager_cert_sans.stdout }}"
19 |
20 | # BROKEN MODULE
21 | # - name: Get google
22 | # community.crypto.get_certificate:
23 | # host: www.google.com
24 | # port: 443
25 | # delegate_to: localhost
26 | # run_once: true
27 | # register: result_nsx_mgr_cert
28 |
29 | - name: Create private key (RSA, 4096 bits
30 | community.crypto.openssl_privatekey:
31 | path: "{{ temp_dir }}/nsx.key"
32 | when: cert_replace
33 |
34 | - name: Create certificate signing request (CSR) for self-signed certificate
35 | community.crypto.openssl_csr_pipe:
36 | privatekey_path: "{{ temp_dir }}/nsx.key"
37 | common_name: "{{ nsxt.manager.ip }}"
38 | country_name: US
39 | state_or_province_name: California
40 | locality_name: CA
41 | organization_name: NSX
42 | subject_alt_name:
43 | - "DNS:{{ nsxt.manager.ip }}"
44 | - "IP:{{ nsxt.manager.ip }}"
45 | register: result_csr
46 | when: cert_replace
47 |
48 | - name: Generate a Self Signed OpenSSL certificate
49 | community.crypto.x509_certificate:
50 | path: "{{ temp_dir }}/nsx.crt"
51 | privatekey_path: "{{ temp_dir }}/nsx.key"
52 | csr_content: "{{ result_csr.csr }}"
53 | provider: selfsigned
54 | when: cert_replace
55 |
56 | - name: Add NSX Machine certificate
57 | vmware.ansible_for_nsxt.nsxt_certificates:
58 | hostname: "{{ nsxt.manager.hostname }}"
59 | username: "{{ nsxt.manager.username }}"
60 | password: "{{ nsxt.manager.password }}"
61 | validate_certs: false
62 | display_name: "nsx_machine_cert"
63 | pem_encoded_file: "{{ temp_dir }}/nsx.crt"
64 | private_key_file: "{{ temp_dir }}/nsx.key"
65 | state: "present"
66 | register: cert_result
67 | ignore_errors: true
68 | when: cert_replace
69 |
70 | # boiler plate needed because certificates module is not idempotent
71 | - ansible.builtin.fail: msg="Certificate no added"
72 | when: "cert_replace and not cert_result.changed and 'already exists' not in cert_result.msg"
73 | - ansible.builtin.debug: msg="Machine certificate added"
74 | when: cert_replace and cert_result.changed
75 | - ansible.builtin.debug: msg="Machine certificate already exists"
76 | when: "cert_replace and 'already exists' in cert_result.msg"
77 | ignore_errors: true # result.sg is only generated if not changed
78 |
79 | - name: Assign Machine Cert to API
80 | ansible.builtin.uri:
81 | url: >-
82 | https://{{ nsxt.manager.hostname }}/api/v1/node/services/http?action=apply_certificate&certificate_id={{ cert_result.result.results[0].id }}
83 | user: "{{ nsxt.manager.username }}"
84 | password: "{{ nsxt.manager.password }}"
85 | method: POST
86 | force_basic_auth: true
87 | validate_certs: false
88 | status_code: [200, 202]
89 | when: cert_replace and cert_result.changed
90 |
91 | - name: Wait 1 minute for the endpoint to come back
92 | ansible.builtin.pause:
93 | minutes: 1
94 | when: cert_replace and cert_result.changed
95 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-principal-identities/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Super User Block
3 | block:
4 | - name: Create temporary certificate directory
5 | ansible.builtin.tempfile:
6 | state: directory
7 | suffix: pi_cert
8 | register: temp_dir_results
9 |
10 | - ansible.builtin.set_fact:
11 | tmp_dir={{ temp_dir_results.path }}
12 |
13 | - name: Create temporary certificate file
14 | ansible.builtin.copy:
15 | dest: "{{ tmp_dir }}/{{ item.display_name }}.pem"
16 | content: |
17 | {{ item.public_key }}
18 | mode: '660'
19 | loop: "{{ nsxt.principal_identities }}"
20 |
21 | - name: Register a TKGI Super User
22 | vmware.ansible_for_nsxt.nsxt_principal_identities:
23 | hostname: "{{ nsxt.manager.hostname }}"
24 | username: "{{ nsxt.manager.username }}"
25 | password: "{{ nsxt.manager.password }}"
26 | validate_certs: false
27 | display_name: "{{ item.display_name }}"
28 | name: "{{ item.display_name }}"
29 | node_id: "node-1"
30 | role: "{{ item.role }}"
31 | certificate_pem_file: "{{ tmp_dir }}/{{ item.display_name }}.pem"
32 | state: "present"
33 | register: pi_result
34 | loop: "{{ nsxt.principal_identities }}"
35 |
36 | always:
37 | - name: Remove the temp directory
38 | ansible.builtin.file:
39 | path: "{{ tmp_dir }}"
40 | state: absent
41 | when: tmp_dir is defined
42 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-super-user/tasks/main.yml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/laidbackware/vmware-lab-builder/51e52497f9fad728b927245c070173625e531171/roles/nsxt/nsxt-super-user/tasks/main.yml
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-switching-routing-legacy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create Tier 0 Gateway
4 | vmware.ansible_for_nsxt.nsxt_policy_tier0:
5 | hostname: "{{ nsxt.manager.hostname }}"
6 | username: "{{ nsxt.manager.username }}"
7 | password: "{{ nsxt.manager.password }}"
8 | validate_certs: false
9 | display_name: "{{ nsxt.tier_0.display_name }}"
10 | state: present
11 | ha_mode: "{{ nsxt.tier_0.ha_mode }}"
12 | failover_mode: "{{ nsxt.tier_0.failover_mode | default(omit) }}"
13 | disable_firewall: "{{ nsxt.tier_0.disable_firewall | default(omit) }}"
14 | force_whitelisting: "{{ nsxt.tier_0.force_whitelisting | default(omit) }}"
15 | static_routes: "{{ nsxt.tier_0.static_routes | default(omit) }}"
16 | bfd_peers: "{{ nsxt.tier_0.bfd_peers | default(omit) }}"
17 | locale_services: "{{ nsxt.tier_0.locale_services | default(omit) }}"
18 | vrf_config: "{{ nsxt.tier_0.vrf_config | default(omit) }}"
19 | tags: "{{ item.tags | default(omit) }}"
20 | retries: 3
21 | delay: 5
22 |
23 | - ansible.builtin.debug: var=nsxt.tier_1_gateways
24 |
25 | - name: Create Tier 1 Gateways
26 | vmware.ansible_for_nsxt.nsxt_policy_tier1:
27 | hostname: "{{ nsxt.manager.hostname }}"
28 | username: "{{ nsxt.manager.username }}"
29 | password: "{{ nsxt.manager.password }}"
30 | validate_certs: false
31 | display_name: "{{ item.display_name }}"
32 | state: "{{ item.state | default('present') }}"
33 | failover_mode: "{{ item.failover_mode | default(omit) }}"
34 | disable_firewall: "{{ item.disable_firewall | default(omit) }}"
35 | force_whitelisting: "{{ item.force_whitelisting | default(omit) }}"
36 | enable_standby_relocation: "{{ item.enable_standby_relocation | default(omit) }}"
37 | route_advertisement_rules: "{{ item.route_advertisement_rules | default(omit) }}"
38 | route_advertisement_types: "{{ item.route_advertisement_types | default(omit) }}"
39 | tier0_display_name: "{{ item.tier0_display_name }}"
40 | # locale_services: "{{ item.locale_services | default(omit) }}"
41 | tags: "{{ item.tags | default(omit) }}"
42 | loop: "{{ nsxt.tier_1_gateways }}"
43 |
44 | - name: Create overlay segments
45 | vmware.ansible_for_nsxt.nsxt_policy_segment:
46 | hostname: "{{ nsxt.manager.hostname }}"
47 | username: "{{ nsxt.manager.username }}"
48 | password: "{{ nsxt.manager.password }}"
49 | validate_certs: false
50 | display_name: "{{ item.display_name }}"
51 | state: present
52 | subnets: "{{ item.subnets | default(omit) }}"
53 | tier1_display_name: "{{ item.tier1_display_name | default(omit) }}"
54 | transport_zone_display_name: "{{ item.transport_zone_display_name }}"
55 | tags: "{{ item.tags | default(omit) }}"
56 | loop: "{{ nsxt.overlay_segments }}"
57 |
58 | - ansible.builtin.debug:
59 | msg:
60 | - The next step will check and wait for NSX-T networks to be available
61 | - You must add a static route to {{ item.nsxt_supernet }} with next hop {{ item.router_uplink }}
62 | - It may take a short while for NSX-T to complete the configuration and respond
63 | when: "nsxt.routing_tests is defined"
64 | loop: "{{ nsxt.routing_tests }}"
65 |
66 | - name: "Checking static route to NSX-T is in place by pinging {{ item.ip_to_ping }}"
67 | ansible.builtin.shell: "while true; do ping -c1 {{ item.ip_to_ping }} > /dev/null && break; done"
68 | delegate_to: localhost
69 | when: "nsxt.routing_tests is defined"
70 | loop: "{{ nsxt.routing_tests }}"
71 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-switching-routing/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create Tier 0 Gateway
4 | vmware.ansible_for_nsxt.nsxt_policy_tier0:
5 | hostname: "{{ nsxt.manager.hostname }}"
6 | username: "{{ nsxt.manager.username }}"
7 | password: "{{ nsxt.manager.password }}"
8 | validate_certs: false
9 | display_name: "{{ nsxt.tier_0.display_name }}"
10 | state: present
11 | ha_mode: "{{ nsxt.tier_0.ha_mode }}"
12 | failover_mode: "{{ nsxt.tier_0.failover_mode | default(omit) }}"
13 | disable_firewall: "{{ nsxt.tier_0.disable_firewall | default(omit) }}"
14 | force_whitelisting: "{{ nsxt.tier_0.force_whitelisting | default(omit) }}"
15 | static_routes: "{{ nsxt.tier_0.static_routes | default(omit) }}"
16 | bfd_peers: "{{ nsxt.tier_0.bfd_peers | default(omit) }}"
17 | locale_services: "{{ nsxt.tier_0.locale_services | default(omit) }}"
18 | vrf_config: "{{ nsxt.tier_0.vrf_config | default(omit) }}"
19 | tags: "{{ item.tags | default(omit) }}"
20 | retries: 3
21 | delay: 5
22 | when: nsxt.tier_0 is defined
23 |
24 | - ansible.builtin.debug: var=nsxt.tier_1_gateways
25 |
26 | - name: Create Tier 1 Gateways
27 | vmware.ansible_for_nsxt.nsxt_policy_tier1:
28 | hostname: "{{ nsxt.manager.hostname }}"
29 | username: "{{ nsxt.manager.username }}"
30 | password: "{{ nsxt.manager.password }}"
31 | validate_certs: false
32 | display_name: "{{ item.display_name }}"
33 | state: "{{ item.state | default('present') }}"
34 | failover_mode: "{{ item.failover_mode | default(omit) }}"
35 | disable_firewall: "{{ item.disable_firewall | default(omit) }}"
36 | force_whitelisting: "{{ item.force_whitelisting | default(omit) }}"
37 | enable_standby_relocation: "{{ item.enable_standby_relocation | default(omit) }}"
38 | route_advertisement_rules: "{{ item.route_advertisement_rules | default(omit) }}"
39 | route_advertisement_types: "{{ item.route_advertisement_types | default(omit) }}"
40 | tier0_display_name: "{{ item.tier0_display_name }}"
41 | # locale_services: "{{ item.locale_services | default(omit) }}"
42 | tags: "{{ item.tags | default(omit) }}"
43 | loop: "{{ nsxt.tier_1_gateways | default([]) }}"
44 |
45 | - name: Create overlay segments
46 | vmware.ansible_for_nsxt.nsxt_policy_segment:
47 | hostname: "{{ nsxt.manager.hostname }}"
48 | username: "{{ nsxt.manager.username }}"
49 | password: "{{ nsxt.manager.password }}"
50 | validate_certs: false
51 | display_name: "{{ item.display_name }}"
52 | state: present
53 | subnets: "{{ item.subnets | default(omit) }}"
54 | tier1_display_name: "{{ item.tier1_display_name | default(omit) }}"
55 | transport_zone_display_name: "{{ item.transport_zone_display_name }}"
56 | tags: "{{ item.tags | default(omit) }}"
57 | loop: "{{ nsxt.overlay_segments | default([]) }}"
58 |
59 | - ansible.builtin.debug:
60 | msg:
61 | - The next step will check and wait for NSX-T networks to be available
62 | - You must add a static route to {{ item.nsxt_supernet }} with next hop {{ item.router_uplink }}
63 | - It may take a short while for NSX-T to complete the configuration and respond
64 | when: "nsxt.routing_tests is defined"
65 | loop: "{{ nsxt.routing_tests }}"
66 |
67 | - name: "Checking static route to NSX-T is in place by pinging the IP address"
68 | ansible.builtin.shell: "while true; do ping -c1 {{ item.ip_to_ping }} > /dev/null && break; done"
69 | delegate_to: localhost
70 | when: "nsxt.routing_tests is defined"
71 | loop: "{{ nsxt.routing_tests }}"
72 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-transport-nodes/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - ansible.builtin.debug: msg="Creating transport node profiles {{ nsxt.transport_node_profiles }}"
3 |
4 | - name: Create transport node profile
5 | vmware.ansible_for_nsxt.nsxt_transport_node_profiles:
6 | hostname: "{{ nsxt.manager.hostname }}"
7 | username: "{{ nsxt.manager.username }}"
8 | password: "{{ nsxt.manager.password }}"
9 | validate_certs: false
10 | resource_type: TransportNodeProfile
11 | display_name: "{{ item.display_name }}"
12 | description: "{{ item.description }}"
13 | host_switch_spec:
14 | resource_type: StandardHostSwitchSpec
15 | host_switches: "{{ item.host_switches }}"
16 | transport_zone_endpoints: "{{ item.transport_zone_endpoints | default(omit) }}"
17 | state: present
18 | loop: "{{ nsxt.transport_node_profiles }}"
19 |
20 | - ansible.builtin.debug: msg="Attaching transport node profiles {{ nsxt.cluster_attach }}"
21 |
22 | - name: Attach Transport node profile to cluster
23 | vmware.ansible_for_nsxt.nsxt_transport_node_collections:
24 | hostname: "{{ nsxt.manager.hostname }}"
25 | username: "{{ nsxt.manager.username }}"
26 | password: "{{ nsxt.manager.password }}"
27 | validate_certs: false
28 | display_name: "{{ item.display_name }}"
29 | resource_type: "TransportNodeCollection"
30 | description: "{{ item.description }}"
31 | compute_manager_name: "{{ item.compute_manager_name }}"
32 | cluster_name: "{{ item.cluster_name }}"
33 | transport_node_profile_name: "{{ item.transport_node_profile_name }}"
34 | state: present
35 | loop: "{{ nsxt.cluster_attach }}"
36 | register: tnp_attach_result
37 |
38 | - name: Waiting for cluster to configure
39 | vmware.ansible_for_nsxt.nsxt_transport_nodes_facts:
40 | hostname: "{{ nsxt.manager.hostname }}"
41 | username: "{{ nsxt.manager.username }}"
42 | password: "{{ nsxt.manager.password }}"
43 | validate_certs: false
44 | register: host_transport_node_facts
45 | retries: 120
46 | delay: 10
47 | until:
48 | - host_transport_node_facts.results
49 | - managed_by_server.0 == nested_vcenter.ip
50 | vars:
51 | host_node_query: "results[?node_deployment_info.resource_type=='HostNode'].node_deployment_info.managed_by_server"
52 | managed_by_server: "{{ host_transport_node_facts | community.general.json_query(host_node_query) }}"
53 |
54 | - ansible.builtin.debug: msg="Creating edge nodes {{ nsxt.edge_nodes }}"
55 |
56 | - name: Create transport node
57 | vmware.ansible_for_nsxt.nsxt_transport_nodes:
58 | hostname: "{{ nsxt.manager.hostname }}"
59 | username: "{{ nsxt.manager.username }}"
60 | password: "{{ nsxt.manager.password }}"
61 | validate_certs: false
62 | display_name: "{{ item.display_name }}"
63 | host_switch_spec:
64 | resource_type: "StandardHostSwitchSpec"
65 | host_switches:
66 | - host_switch_profiles:
67 | - name: "{{ item.host_switches.tep.uplink_profile_name }}"
68 | type: UplinkHostSwitchProfile
69 | host_switch_name: "{{ item.host_switches.tep.host_switch_name | default('defaultHostSwitch') }}"
70 | host_switch_mode: STANDARD
71 | transport_zone_endpoints: "{{ item.host_switches.tep.transport_zone_endpoints }}"
72 | pnics:
73 | - device_name: "fp-eth2"
74 | uplink_name: "uplink-1"
75 | ip_assignment_spec: "{{ item.host_switches.tep.ip_assignment_spec }}"
76 | - host_switch_profiles:
77 | - name: "{{ item.host_switches.uplink.uplink_profile_name }}"
78 | type: UplinkHostSwitchProfile
79 | host_switch_name: "{{ item.host_switches.uplink.host_switch_name }}"
80 | host_switch_mode: STANDARD
81 | transport_zone_endpoints: "{{ item.host_switches.uplink.transport_zone_endpoints }}"
82 | pnics:
83 | - device_name: "fp-eth0"
84 | uplink_name: "uplink-1"
85 | node_deployment_info:
86 | resource_type: EdgeNode
87 | deployment_config:
88 | form_factor: "{{ item.size }}"
89 | node_user_settings:
90 | audit_password: "{{ nsxt.manager.password }}"
91 | audit_username: audit
92 | cli_password: "{{ nsxt.manager.password }}"
93 | cli_username: admin
94 | root_password: "{{ nsxt.manager.password }}"
95 | vm_deployment_config:
96 | compute: "{{ item.cluster_name }}"
97 | data_networks:
98 | - "{{ item.network_uplink_name }}"
99 | - "{{ item.network_tep_name }}"
100 | - "{{ item.network_tep_name }}"
101 | default_gateway_addresses:
102 | - "{{ item.mgmt_default_gateway }}"
103 | management_network: "{{ item.network_management_name }}"
104 | management_port_subnets:
105 | - ip_addresses:
106 | - "{{ item.mgmt_ip_address }}"
107 | prefix_length: "{{ item.mgmt_prefix_length }}"
108 | placement_type: VsphereDeploymentConfig
109 | storage: "{{ item.datastore_name }}"
110 | vc_name: vCenter
111 | vc_username: "{{ nested_vcenter.username }}"
112 | vc_password: "{{ nested_vcenter.password }}"
113 | deployment_type: VIRTUAL_MACHINE
114 | display_name: "{{ item.display_name }}"
115 | node_settings:
116 | allow_ssh_root_login: true
117 | enable_ssh: true
118 | hostname: "{{ item.mgmt_ip_address | replace('.', '-') }}.nip.io"
119 | ntp_servers: ["{{ ntp_server_ip }}"]
120 | state: present
121 | async: 1800
122 | poll: 0
123 | register: edge_node_results
124 | # retries: 12
125 | # delay: 5
126 | # until: "'msg' not in edge_node_results"
127 | loop: "{{ nsxt.edge_nodes }}"
128 |
129 | - name: Result check for deployment of transport node configuration
130 | ansible.builtin.async_status:
131 | jid: "{{ item.ansible_job_id }}"
132 | register: job_result
133 | until: job_result.finished
134 | loop: "{{ edge_node_results.results }}"
135 | retries: 180
136 | delay: 10
137 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-transport-zones/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create transport zone
3 | vmware.ansible_for_nsxt.nsxt_transport_zones:
4 | hostname: "{{ nsxt.manager.hostname }}"
5 | username: "{{ nsxt.manager.username }}"
6 | password: "{{ nsxt.manager.password }}"
7 | validate_certs: false
8 | state: "present"
9 | display_name: "{{ item.display_name }}"
10 | description: "{{ item.description }}"
11 | host_switch_name: "{{ item.host_switch_name | default(omit) }}"
12 | nested_nsx: "{{ item.nested_nsx | default(omit) }}"
13 | resource_type: "TransportZone"
14 | tags: "{{ item.tags | default(omit) }}"
15 | tz_type: "{{ item.transport_type }}"
16 | loop: "{{ nsxt.transport_zones }}"
17 | register: tz_results
18 |
19 | - name: Waiting 20 seconds for transport zone to configured
20 | ansible.builtin.pause:
21 | echo: false
22 | seconds: 20
23 | when: tz_results.changed
24 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-uplink-profiles/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create uplink profile
3 | vmware.ansible_for_nsxt.nsxt_uplink_profiles:
4 | hostname: "{{ nsxt.manager.hostname }}"
5 | username: "{{ nsxt.manager.username }}"
6 | password: "{{ nsxt.manager.password }}"
7 | validate_certs: false
8 | resource_type: UplinkHostSwitchProfile
9 | display_name: "{{ item.display_name }}"
10 | mtu: "{{ item.mtu | default(omit) }}"
11 | teaming: "{{ item.teaming }}"
12 | transport_vlan: "{{ item.transport_vlan }}"
13 | state: "present"
14 | loop: "{{ nsxt.uplink_profiles }}"
15 |
--------------------------------------------------------------------------------
/roles/nsxt/nsxt-vlan-segments/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - ansible.builtin.debug: var=nsxt.vlan_segments
3 |
4 | - name: Create VLAN segments
5 | vmware.ansible_for_nsxt.nsxt_policy_segment:
6 | hostname: "{{ nsxt.manager.hostname }}"
7 | username: "{{ nsxt.manager.username }}"
8 | password: "{{ nsxt.manager.password }}"
9 | validate_certs: false
10 | display_name: "{{ item.display_name }}"
11 | state: present
12 | transport_zone_display_name: "{{ item.transport_zone_display_name }}"
13 | vlan_ids: "{{ item.vlan_ids }}"
14 | tags: "{{ item.tags | default(omit) }}"
15 | do_wait_till_create: true
16 | loop: "{{ nsxt.vlan_segments }}"
17 | register: vlan_segment_results
18 | retries: 60
19 | delay: 5
20 | until: vlan_segment_results is success
21 |
--------------------------------------------------------------------------------
/roles/tanzu/application-service/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | opsman_ip: "{{ tanzu_opsman_bosh.opsman_ip }}"
3 | opsman_username: "{{ tanzu_opsman_bosh.opsman_username }}"
4 | opsman_password: "{{ tanzu_opsman_bosh.opsman_password }}"
5 |
6 | tas_template_file: "{{ tanzu_application_service.tas_template_file }}"
7 |
8 | tas_nsx_manager_url: "{{ tanzu_application_service.nsx.nsx_manager_url |default(omit) }}"
9 | tas_nsx_username: "{{ tanzu_application_service.nsx.username |default(omit) }}"
10 | tas_nsx_password: "{{ tanzu_application_service.nsx.password |default(omit) }}"
11 | tas_nsx_policy_api: "{{ tanzu_application_service.nsx.policy_api |default(False) }}"
12 | tas_nsx_transport_zone: "{{ tanzu_application_service.nsx.transport_zone |default(omit) }}"
13 | tas_nsx_principal_identity:
14 | public_key: |-
15 | {{ tanzu_application_service.nsx.principal_identity.public_key |default(omit) }}
16 | private_key: |-
17 | {{ tanzu_application_service.nsx.principal_identity.private_key |default(omit) }}
18 |
19 | tas_nsx_egress_pool_name: "{{ tanzu_application_service.nsx.egress_pool_name |default(omit) }}"
20 | tas_nsx_egress_pool_cidr: "{{ tanzu_application_service.nsx.egress_pool_cidr |default(omit) }}"
21 | tas_nsx_egress_pool_ranges: "{{ tanzu_application_service.nsx.egress_pool_ranges |default(omit) }}"
22 | tas_nsx_container_block_name: "{{ tanzu_application_service.nsx.container_block_name |default(omit) }}"
23 | tas_nsx_container_block_cidr: "{{ tanzu_application_service.nsx.container_block_cidr |default(omit) }}"
24 |
25 | tas_deployment_network: "{{ tanzu_application_service.deployment_network }}"
26 | tas_apps_domain: "{{ tanzu_application_service.apps_domain }}"
27 | tas_sys_domain: "{{ tanzu_application_service.sys_domain }}"
28 | tas_uaa_domain: "{{ tanzu_application_service.uaa_domain }}"
29 | tas_credhub_key: "{{ tanzu_application_service.credhub_key }}"
30 | tas_gorouter_ip: "{{ tanzu_application_service.gorouter_ip }}"
31 |
--------------------------------------------------------------------------------
/roles/tanzu/application-service/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: List Logical Routers
4 | vmware.ansible_for_nsxt.nsxt_logical_routers_facts:
5 | hostname: "{{ tas_nsx_manager_url }}"
6 | username: "{{ tas_nsx_username }}"
7 | password: "{{ tas_nsx_password }}"
8 | validate_certs: false
9 | register: result_nsx_routers
10 | when: "tas_nsx_manager_url is defined and '__omit' not in tas_nsx_manager_url"
11 |
12 | - name: Fetch NSX Manager cert
13 | ansible.builtin.shell: |-
14 | set -o pipefail && \
15 | openssl s_client -showcerts -servername {{ tas_nsx_manager_url }} \
16 | -connect {{ tas_nsx_manager_url }}:443 -
140 | {%- if tanzu_application_service.nsx is defined -%}
141 | tas-nsx.yml
142 | {%- else -%}
143 | tas.yml
144 | {%- endif -%}
145 |
146 | - name: Set base vars
147 | ansible.builtin.set_fact:
148 | tas_vars:
149 | - slug: VMware-NSX-T
150 | file_path: "{{ ncp_tile | default('') }}"
151 | product_config: "{{ lookup('template', 'ncp.yml') | from_yaml }}"
152 | apply_changes: false
153 | - slug: cf
154 | file_path: "{{ tas_tile }}"
155 | product_config: "{{ lookup('template', networking_plugin_template) | from_yaml }}"
156 | stemcell: "{{ tas_stemcell }}"
157 |
158 | - name: Dropping gorouter certificates if already set
159 | ansible.builtin.set_fact:
160 | tas_vars: |
161 | {% set a = tas_vars[1]['product_config']['product-properties'].pop('.properties.networking_poe_ssl_certs') %}
162 | {{ tas_vars }}
163 | when: not tas_gorouter_cert_replace
164 |
165 | - name: Dropping UAA certificates if already set
166 | ansible.builtin.set_fact:
167 | tas_vars: |
168 | {% set a = tas_vars[1]['product_config']['product-properties'].pop('.uaa.service_provider_key_credentials') %}
169 | {{ tas_vars }}
170 | when: not tas_uaa_cert_replace
171 |
172 | - name: Dropping HAProxy section unless TAS is 2.x
173 | ansible.builtin.set_fact:
174 | tas_vars: |
175 | {% set a = tas_vars[1]['product_config']['product-properties'].pop('.properties.haproxy_forward_tls') %}
176 | {{ tas_vars }}
177 | when: not "srt-2." in tas_tile
178 |
179 | - name: Drop NSX Tile vars if 'tas_nsx_manager_url' not set
180 | ansible.builtin.set_fact:
181 | tas_vars: |
182 | {% set _ = tas_vars.pop(0) %}
183 | {{ tas_vars }}
184 | when: not tas_nsx_manager_url is defined or '__omit' in tas_nsx_manager_url
185 |
186 | - name: Deploy TAS
187 | ansible.builtin.include_role:
188 | name: tanzu/opsman/upload-stage-configure-tile
189 | with_items: "{{ tas_vars }}"
190 |
--------------------------------------------------------------------------------
/roles/tanzu/application-service/templates/env.yml:
--------------------------------------------------------------------------------
1 | ---
2 | target: https://{{ opsman_ip }}
3 | # connect-timeout: 5 # default 5
4 | # request-timeout: 1800 # default 1800
5 | skip-ssl-validation: true # default false
6 | username: "{{ opsman_username }}"
7 | password: "{{ opsman_password }}"
8 | # decryption-passphrase is optional,
9 | # except for use with `import-installation`.
10 | # OpsMan depends on the passphrase
11 | # to decrypt the imported installation.
12 | # For other commands, providing this key allows
13 | # decryption of the OpsMan VM after reboot,
14 | # which would otherwise need to be done manually.
15 | decryption-passphrase: "{{ opsman_password }}{{ opsman_password }}"
16 |
--------------------------------------------------------------------------------
/roles/tanzu/application-service/templates/ncp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | product-name: VMware-NSX-T
3 | product-properties:
4 | .properties.container_ip_blocks:
5 | value:
6 | - cidr: "{{ tas_nsx_container_block_cidr }}"
7 | name: "{{ tas_nsx_container_block_name }}"
8 | .properties.default_running_asg_use_ip_set:
9 | value: false
10 | .properties.enable_inventory:
11 | value: true
12 | .properties.enable_policy_api:
13 | value: {{ tas_nsx_policy_api }}
14 | .properties.enable_snat:
15 | value: true
16 | .properties.exit_on_disconnection:
17 | selected_option: enable
18 | value: enable
19 | .properties.external_ip_pools:
20 | value:
21 | - allocation_ranges: "{{ tas_nsx_egress_pool_ranges }}"
22 | cidr: "{{ tas_nsx_egress_pool_cidr}}"
23 | name: "{{ tas_nsx_egress_pool_name }}"
24 | .properties.foundation_name:
25 | value: tas
26 | .properties.log_firewall_rules:
27 | selected_option: disable
28 | value: disable
29 | .properties.master_timeout:
30 | value: 18
31 | .properties.ncp_debug_log:
32 | value: false
33 | .properties.nsx_api_ca_cert:
34 | value: |-
35 | {{ nsx_ca_cert | indent(6) }}
36 | .properties.nsx_api_managers:
37 | value: https://{{ tas_nsx_manager_url }}
38 | .properties.nsx_auth:
39 | selected_option: client_cert
40 | value: client_cert
41 | .properties.nsx_auth.client_cert.nsx_api_client_cert:
42 | value:
43 | cert_pem: |-
44 | {{ tas_nsx_principal_identity.public_key | default(omit) | indent(8) }}
45 | private_key_pem: |-
46 | {{ tas_nsx_principal_identity.private_key | default(omit) | indent(8) }}
47 | .properties.nsx_node_agent_debug_log:
48 | value: false
49 | .properties.overlay_tz:
50 | value: "{{ tas_nsx_transport_zone }}"
51 | .properties.remove_ovs_ports_timeout:
52 | value: 1800
53 | .properties.snat_rule_logging:
54 | selected_option: extended
55 | value: extended
56 | .properties.subnet_prefix:
57 | value: 26
58 | .properties.tier0_router:
59 | value: "{{ nsx_t0_id }}"
60 | .properties.wait_for_security_policy_sync:
61 | value: false
62 | network-properties:
63 | network:
64 | name: "{{ tas_deployment_network }}"
65 | other_availability_zones:
66 | - name: az1
67 | singleton_availability_zone:
68 | name: az1
69 |
--------------------------------------------------------------------------------
/roles/tanzu/ha-proxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - ansible.builtin.debug: var=tkg_haproxy
4 |
5 | - name: Deploy HA Proxy
6 | community.vmware.vmware_deploy_ovf:
7 | hostname: "{{ nested_vcenter.ip }}"
8 | username: "{{ nested_vcenter.username }}"
9 | password: "{{ nested_vcenter.password }}"
10 | validate_certs: false
11 | name: "tkg-haproxy"
12 | datacenter: "{{ nested_vcenter.datacenter }}"
13 | cluster: "{{ tanzu_vsphere.vsphere_cluster }}"
14 | datastore: "{{ tkg_haproxy.datastore }}"
15 | disk_provisioning: "{{ disk_mode }}"
16 | networks:
17 | Management: "{{ tanzu_vsphere.management_port_group }}"
18 | Workload: "{{ tanzu_vsphere.vds_networking.workload_port_group }}"
19 | Frontend: "{{ tanzu_vsphere.vds_networking.workload_port_group }}"
20 | ova: "{{ haproxy_ova }}"
21 | allow_duplicates: false
22 | power_on: true
23 | fail_on_spec_warnings: true
24 | wait: true
25 | wait_for_ip_address: true
26 | inject_ovf_env: true
27 | properties:
28 | appliance.root_pwd: "{{ tkg_haproxy.root_pwd }}"
29 | network.nameservers: "{{ tkg_haproxy.nameservers }} "
30 | network.management_ip: "{{ tkg_haproxy.management_ip }}/{{ tkg_haproxy.management_subnet_bits }}"
31 | network.management_gateway: "{{ tkg_haproxy.management_gateway }}"
32 | network.workload_ip: "{{ tkg_haproxy.workload_ip }}"
33 | network.workload_gateway: "{{ tkg_haproxy.workload_gateway }}"
34 | loadbalance.service_ip_range: "{{ tkg_haproxy.service_ip_range }}"
35 | loadbalance.haproxy_user: "{{ tkg_haproxy.username }}"
36 | loadbalance.haproxy_pwd: "{{ tkg_haproxy.password }}"
37 | loadbalance.dataplane_port: "{{ tkg_haproxy.management_port | default(omit) }}"
38 |
--------------------------------------------------------------------------------
/roles/tanzu/multi-cloud-generate-config/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Load management cluster config
4 | ansible.builtin.set_fact:
5 | mgmt_cluster_config: "{{ lookup('template', 'management-cluster.yml') | from_yaml }}"
6 |
7 | # Allow insertion of variable if set
8 | # Workaround for j2 tempolate not removing keys when omit used
9 | - name: Set VSPHERE_CONTROL_PLANE_ENDPOINT if exists
10 | ansible.builtin.set_fact:
11 | mgmt_cluster_config: |
12 | {{
13 | mgmt_cluster_config | combine
14 | ({ 'VSPHERE_CONTROL_PLANE_ENDPOINT': tanzu_multi_cloud.vsphere_control_plane_endpoint })
15 | }}
16 | when: tanzu_multi_cloud.vsphere_control_plane_endpoint is defined
17 |
18 | # Workaround for j2 always templating strings instead of booleans
19 | - name: Set AVI_CONTROL_PLANE_HA_PROVIDER
20 | ansible.builtin.set_fact:
21 | mgmt_cluster_config: |
22 | {{
23 | mgmt_cluster_config | combine
24 | ({ 'AVI_CONTROL_PLANE_HA_PROVIDER': tanzu_multi_cloud.avi_control_plane_ha_provider })
25 | }}
26 |
27 | - name: Write management cluster config
28 | ansible.builtin.copy:
29 | content: "{{ mgmt_cluster_config | to_nice_yaml }}"
30 | dest: "{{ tanzu_multi_cloud.generated_config_file }}"
31 | mode: '0644'
32 |
--------------------------------------------------------------------------------
/roles/tanzu/multi-cloud-generate-config/templates/management-cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | AVI_CA_DATA_B64: ""
3 | AVI_CLOUD_NAME: "{{ tanzu_multi_cloud.avi_cloud_name }}"
4 | AVI_CONTROLLER: "{{ tanzu_multi_cloud.avi_controller }}"
5 | AVI_DATA_NETWORK: "{{ tanzu_multi_cloud.avi_data_network }}"
6 | AVI_DATA_NETWORK_CIDR: "{{ tanzu_multi_cloud.avi_data_network_cidr }}"
7 | AVI_ENABLE: true
8 | AVI_LABELS: ""
9 | AVI_PASSWORD: "{{ tanzu_multi_cloud.avi_password }}"
10 | AVI_SERVICE_ENGINE_GROUP: "{{ tanzu_multi_cloud.avi_service_engine_group | default('Default-Group') }}"
11 | AVI_USERNAME: "{{ tanzu_multi_cloud.avi_username }}"
12 | AVI_CONTROL_PLANE_HA_PROVIDER: "{{ tanzu_multi_cloud.avi_control_plane_ha_provider | bool }}"
13 | CLUSTER_NAME: "{{ tanzu_multi_cloud.mgmt_cluster_name }}"
14 | CONTROL_PLANE_MACHINE_COUNT: "{{ tanzu_multi_cloud.control_plane_machine_count }}"
15 | CLUSTER_CIDR: 100.96.0.0/11
16 | CLUSTER_PLAN: dev
17 | ENABLE_CEIP_PARTICIPATION: "false"
18 | ENABLE_MHC: true
19 | IDENTITY_MANAGEMENT_TYPE: none
20 | INFRASTRUCTURE_PROVIDER: vsphere
21 | LDAP_BIND_DN: ""
22 | LDAP_BIND_PASSWORD: ""
23 | LDAP_GROUP_SEARCH_BASE_DN: ""
24 | LDAP_GROUP_SEARCH_FILTER: ""
25 | LDAP_GROUP_SEARCH_GROUP_ATTRIBUTE: ""
26 | LDAP_GROUP_SEARCH_NAME_ATTRIBUTE: cn
27 | LDAP_GROUP_SEARCH_USER_ATTRIBUTE: DN
28 | LDAP_HOST: ""
29 | LDAP_ROOT_CA_DATA_B64: ""
30 | LDAP_USER_SEARCH_BASE_DN: ""
31 | LDAP_USER_SEARCH_FILTER: ""
32 | LDAP_USER_SEARCH_NAME_ATTRIBUTE: ""
33 | LDAP_USER_SEARCH_USERNAME: userPrincipalName
34 | OIDC_IDENTITY_PROVIDER_CLIENT_ID: ""
35 | OIDC_IDENTITY_PROVIDER_CLIENT_SECRET: ""
36 | OIDC_IDENTITY_PROVIDER_GROUPS_CLAIM: ""
37 | OIDC_IDENTITY_PROVIDER_ISSUER_URL: ""
38 | OIDC_IDENTITY_PROVIDER_NAME: ""
39 | OIDC_IDENTITY_PROVIDER_SCOPES: ""
40 | OIDC_IDENTITY_PROVIDER_USERNAME_CLAIM: ""
41 | SERVICE_CIDR: 100.64.0.0/13
42 | TKG_HTTP_PROXY_ENABLED: false
43 | VSPHERE_CONTROL_PLANE_DISK_GIB: "{{ tanzu_multi_cloud.control_plane_disk_gib }}"
44 | VSPHERE_CONTROL_PLANE_MEM_MIB: "{{ tanzu_multi_cloud.control_plane_mem_mib }}"
45 | VSPHERE_CONTROL_PLANE_NUM_CPUS: "{{ tanzu_multi_cloud.control_plane_num_cpus }}"
46 | VSPHERE_DATACENTER: "{{ tanzu_multi_cloud.vsphere_datacenter_path }}"
47 | VSPHERE_DATASTORE: "{{ tanzu_multi_cloud.vsphere_datastore_path }}"
48 | DEPLOY_TKG_ON_VSPHERE7: true
49 | VSPHERE_FOLDER: "{{ tanzu_multi_cloud.vsphere_folder_path }}"
50 | VSPHERE_INSECURE: true
51 | VSPHERE_NETWORK: "{{ tanzu_multi_cloud.vsphere_network }}"
52 | VSPHERE_PASSWORD: "{{ tanzu_multi_cloud.vsphere_password }}"
53 | VSPHERE_RESOURCE_POOL: "{{ tanzu_multi_cloud.vsphere_resource_pool_path }}"
54 | VSPHERE_SERVER: "{{ tanzu_multi_cloud.vsphere_server }}"
55 | VSPHERE_SSH_AUTHORIZED_KEY: "{{ tanzu_multi_cloud.vsphere_ssh_authorized_key }}"
56 | VSPHERE_USERNAME: "{{ tanzu_multi_cloud.vsphere_username }}"
57 | VSPHERE_WORKER_DISK_GIB: "{{ tanzu_multi_cloud.worker_disk_gib }}"
58 | VSPHERE_WORKER_MEM_MIB: "{{ tanzu_multi_cloud.worker_mem_mib }}"
59 | VSPHERE_WORKER_NUM_CPUS: "{{ tanzu_multi_cloud.worker_num_cpus }}"
60 | WORKER_MACHINE_COUNT: "{{ tanzu_multi_cloud.worker_machine_count }}"
61 |
--------------------------------------------------------------------------------
/roles/tanzu/opsman/configure-bosh/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | opsman_ip: "{{ tanzu_opsman_bosh.opsman_ip }}"
3 | opsman_username: "{{ tanzu_opsman_bosh.opsman_username }}"
4 | opsman_password: "{{ tanzu_opsman_bosh.opsman_password }}"
5 |
6 | bosh_networks: "{{ tanzu_opsman_bosh.bosh_networks }}"
7 | bosh_ntp: "{{ tanzu_opsman_bosh.ntp }}"
8 |
9 | bosh_vcenter_url: "{{ tanzu_opsman_bosh.vcenter_url }}"
10 | bosh_vcenter_cluster: "{{ tanzu_opsman_bosh.vcenter_cluster }}"
11 | bosh_vcenter_datacenter: "{{ tanzu_opsman_bosh.vcenter_datacenter }}"
12 | bosh_vcenter_datastore_ephemeral: "{{ tanzu_opsman_bosh.vcenter_datastore }}"
13 | bosh_vcenter_datastore_persistent: "{{ tanzu_opsman_bosh.vcenter_datastore }}"
14 | bosh_vcenter_username: "{{ tanzu_opsman_bosh.vcenter_username }}"
15 | bosh_vcenter_password: "{{ tanzu_opsman_bosh.vcenter_password }}"
16 |
17 | bosh_nsx_url: "{{ tanzu_opsman_bosh.nsx_url | default(omit) }}"
18 | bosh_nsx_username: "{{ tanzu_opsman_bosh.nsx_username | default(omit) }}"
19 | bosh_nsx_password: "{{ tanzu_opsman_bosh.nsx_password | default(omit) }}"
20 | bosh_nsx_use_policy_api: "{{ tanzu_opsman_bosh.nsx_use_policy_api | default(false) }}"
21 | bosh_template_file: "{{ tanzu_opsman_bosh.bosh_template_file }}"
22 |
23 | bosh_ssh_public_key: "{{ tanzu_opsman_bosh.ssh_public_key }}"
24 | bosh_username: "{{ tanzu_opsman_bosh.username }}"
25 | bosh_password: "{{ tanzu_opsman_bosh.password }}"
26 |
27 | bosh_trusted_ca_certs: "{{ tanzu_opsman_bosh.trusted_ca_certs | default('') }}"
28 | bosh_az_resource_pool: "{{ tanzu_opsman_bosh.az_resource_pool | default('') }}"
29 |
30 | bosh_disk_path: pcf_disk
31 | bosh_template_folder: pcf_templates
32 | bosh_vm_folder: pcf_vms
33 |
34 | bosh_vmextensions_configuration: "{{ tanzu_opsman_bosh.bosh_vmextensions_configuration | default([]) }}"
35 |
--------------------------------------------------------------------------------
/roles/tanzu/opsman/configure-bosh/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Fetch NSX Manager cert
3 | ansible.builtin.shell: |
4 | set -o pipefail && \
5 | openssl s_client -showcerts -servername {{ nsxt.manager.ip }} -connect {{ nsxt.manager.ip }}:443 -
52 | {
53 | {% for item in vminfo.instance.config.extraConfig %}
54 | {% if item.key == "guestinfo.dataplaneapi.cacert" %}
55 | "public_key": "{{ item.value }}",
56 | {% endif %}
57 | {% endfor %}
58 | }
59 | when: "tkg_haproxy is defined"
60 |
61 | - name: Decode haproxy CA Cert
62 | ansible.builtin.set_fact:
63 | haproxy_ca_cert: "{{ haproxy_ca_cert_raw.public_key | b64decode }}"
64 | when: "tkg_haproxy is defined"
65 |
66 | - name: Get a cert from NSX ALB Controller
67 | community.crypto.get_certificate:
68 | host: "{{ tanzu_vsphere.vds_networking.nsx_alb_server }}"
69 | port: 443
70 | delegate_to: localhost
71 | run_once: true
72 | register: nsx_alb_cert
73 | when: "tanzu_vsphere.vds_networking.nsx_alb_server is defined"
74 |
75 | - ansible.builtin.debug: var=tanzu_vsphere
76 |
77 | - name: Create vsphere namespace cluster
78 | vmware.ansible_for_vsphere_with_tanzu.vsphere_tanzu_cluster_manage:
79 | hostname: "{{ nested_vcenter.ip }}"
80 | username: "{{ nested_vcenter.username }}"
81 | password: "{{ nested_vcenter.password }}"
82 | validate_certs: false
83 | cluster_distributed_switch: "{{ tanzu_vsphere.nsxt.cluster_distributed_switch | default(omit) }}"
84 | cluster_name: "{{ tanzu_vsphere.vsphere_cluster }}"
85 | default_content_library: "{{ tanzu_vsphere.default_content_library }}"
86 | dns_search_domains: "{{ dns_domain }}"
87 | egress_cidrs: "{{ tanzu_vsphere.nsxt.egress_cidrs | default(omit) }}"
88 | ephemeral_storage_policy: "{{ tanzu_vsphere.ephemeral_storage_policy }}"
89 | haproxy_ip_range_list: "{{ tanzu_vsphere.vds_networking.haproxy_ip_range_list | default(omit) }}"
90 | haproxy_ca_chain: "{{ haproxy_ca_cert | default(omit) }}"
91 | haproxy_management_ip: "{{ tkg_haproxy.management_ip | default(omit) }}"
92 | haproxy_management_port: "{{ tanzu_vsphere.vds_networking.haproxy.management_port | default(omit) }}"
93 | haproxy_password: "{{ tkg_haproxy.password | default(omit) }}"
94 | haproxy_username: "{{ tkg_haproxy.username | default(omit) }}"
95 | image_storage_policy: "{{ tanzu_vsphere.image_storage_policy }}"
96 | ingress_cidrs: "{{ tanzu_vsphere.nsxt.ingress_cidrs | default(omit) }}"
97 | load_balancer_provider: "{{ tanzu_vsphere.vds_networking.load_balancer_provider | default(omit) }}"
98 | management_address_count: "{{ tanzu_vsphere.management_address_count | default(5) }}"
99 | management_dns_servers: "{{ tanzu_vsphere.management_dns_servers }}"
100 | management_gateway: "{{ tanzu_vsphere.management_gateway }}"
101 | management_starting_address: "{{ tanzu_vsphere.management_starting_address }}"
102 | management_netmask: "{{ tanzu_vsphere.management_netmask }}"
103 | management_ntp_servers: "{{ tanzu_vsphere.ntp_server_list }}"
104 | management_port_group: "{{ tanzu_vsphere.management_port_group }}"
105 | master_storage_policy: "{{ tanzu_vsphere.master_storage_policy }}"
106 | network_provider: "{{ tanzu_vsphere.network_provider }}"
107 | nsx_alb_ca_chain: "{{ nsx_alb_cert.cert | default(omit) }}"
108 | nsx_alb_password: "{{ tanzu_vsphere.vds_networking.nsx_alb_password | default(omit) }}"
109 | nsx_alb_username: "{{ tanzu_vsphere.vds_networking.nsx_alb_username | default(omit) }}"
110 | nsx_alb_server: "{{ tanzu_vsphere.vds_networking.nsx_alb_server | default(omit) }}"
111 | nsx_edge_cluster: "{{ tanzu_vsphere.nsxt.nsx_edge_cluster | default(omit) }}"
112 | pod_cidrs: "{{ tanzu_vsphere.nsxt.pod_cidrs | default(omit) }}"
113 | workload_dns_servers: "{{ tanzu_vsphere.workload_dns_servers }}"
114 | workload_gateway: "{{ tanzu_vsphere.vds_networking.workload_gateway | default(omit) }}"
115 | workload_ip_range_list: "{{ tanzu_vsphere.vds_networking.workload_ip_range_list | default(omit) }}"
116 | workload_netmask: "{{ tanzu_vsphere.vds_networking.workload_netmask | default(omit) }}"
117 | workload_ntp_servers: "{{ tanzu_vsphere.ntp_server_list }}"
118 | workload_portgroup: "{{ tanzu_vsphere.vds_networking.workload_port_group | default(omit) }}"
119 | services_cidr: "{{ tanzu_vsphere.services_cidr }}"
120 | supervisor_size: "{{ tanzu_vsphere.supervisor_size | upper }}"
121 | state: present
122 | async: 3600
123 | poll: 0
124 | register: cluster_results
125 |
126 | - name: Check on enable namespace job
127 | ansible.builtin.async_status:
128 | jid: "{{ cluster_results.ansible_job_id }}"
129 | register: job_result
130 | until: job_result.finished
131 | retries: 720
132 | delay: 5
133 |
--------------------------------------------------------------------------------
/roles/tanzu/vyos-router/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - ansible.builtin.debug: var=tkg_router
4 |
5 | - name: Deploy VYoS
6 | community.vmware.vmware_deploy_ovf:
7 | hostname: "{{ nested_vcenter.ip }}"
8 | username: "{{ nested_vcenter.username }}"
9 | password: "{{ nested_vcenter.password }}"
10 | validate_certs: false
11 | name: "tkgs-vyos-router"
12 | datacenter: "{{ nested_vcenter.datacenter }}"
13 | cluster: "{{ tanzu_vsphere.vsphere_cluster }}"
14 | datastore: "{{ tkg_router.datastore }}"
15 | disk_provisioning: "{{ disk_mode }}"
16 | networks:
17 | public: "{{ tanzu_vsphere.management_port_group }}"
18 | internal: "{{ tanzu_vsphere.vsphere_networking.workload_port_group }}"
19 | ova: "{{ lookup('env', 'SOFTWARE_DIR') }}/vyos-1.1.8-amd64.ova"
20 | allow_duplicates: false
21 | power_on: true
22 | fail_on_spec_warnings: true
23 | wait: true
24 | wait_for_ip_address: true
25 | inject_ovf_env: false
26 |
27 | - name: Configure VyOS VM
28 | community.vmware.vmware_vm_shell:
29 | hostname: "{{ nested_vcenter.ip }}"
30 | username: "{{ nested_vcenter.username }}"
31 | password: "{{ nested_vcenter.password }}"
32 | datacenter: "{{ nested_vcenter.datacenter }}"
33 | validate_certs: false
34 | vm_id: "tkgs-vyos-router"
35 | vm_username: "vyos"
36 | vm_password: "vyos"
37 | vm_shell: /bin/vbash
38 | vm_shell_args: |-
39 | -c "source /opt/vyatta/etc/functions/script-template
40 | configure
41 | delete interfaces ethernet eth0 address dhcp
42 | set interfaces ethernet eth0 address '{{ tkg_router.uplink_ip_cidr }}'
43 | set interfaces ethernet eth1 address '{{ tkg_router.internal_gateway_cidr }}'
44 | set service ssh port '22'
45 | set protocols static route 0.0.0.0/0 next-hop {{ tkg_router.uplink_next_hop }} distance '1'
46 |
47 | set nat source rule 100 outbound-interface 'eth0'
48 | set nat source rule 100 source address '{{ tkg_router.internal_network_cidr }}'
49 | set nat source rule 100 translation address masquerade
50 |
51 | commit
52 | save"
53 | vm_shell_cwd: "/home/vyos"
54 | register: results
55 |
--------------------------------------------------------------------------------
/roles/ubuntu-server/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Deploy Ubuntu OVA
3 | community.vmware.vmware_deploy_ovf:
4 | hostname: "{{ vcsa_hostname }}"
5 | username: "{{ vcsa_username }}"
6 | password: "{{ vcsa_password }}"
7 | validate_certs: false
8 | name: "{{ ubuntu_vm_name }}"
9 | cluster: "{{ vsphere_cluster }}"
10 | datacenter: "{{ vsphere_datacenter }}"
11 | datastore: "{{ vsphere_datastore }}"
12 | resource_pool: "{{ vsphere_resource_pool | default('Resources') }}"
13 | disk_provisioning: thin
14 | networks: "{u'VM Network':u'{{ ubuntu_vm_network }}'}"
15 | ova: "{{ ubuntu_ova_path }}"
16 | allow_duplicates: false
17 | power_on: false
18 | fail_on_spec_warnings: false
19 | wait: true
20 | wait_for_ip_address: true
21 | inject_ovf_env: true
22 | properties:
23 | user-data: "{{ lookup('template', 'cloud_init_config.j2') | b64encode }}"
24 | public-keys: "{{ ubuntu_ssh_public_key | default(omit) }}"
25 | delegate_to: localhost
26 | register: vm_instance_data
27 |
28 | - name: Get Ubuntu VM UUID
29 | ansible.builtin.set_fact:
30 | ubuntu_server_vm_uuid: "{{ vm_instance_data.instance.hw_product_uuid }}"
31 |
32 | - name: Configure Ubuntu VM resources
33 | community.vmware.vmware_guest:
34 | hostname: "{{ vcsa_hostname }}"
35 | username: "{{ vcsa_username }}"
36 | password: "{{ vcsa_password }}"
37 | validate_certs: false
38 | uuid: "{{ ubuntu_server_vm_uuid }}"
39 | state: "present"
40 | hardware:
41 | memory_mb: "{{ ubuntu_vm_memory }}"
42 | num_cpus: "{{ ubuntu_vm_cpus }}"
43 | disk:
44 | - size_gb: "{{ ubuntu_vm_disk_size }}"
45 | delegate_to: "localhost"
46 |
47 | - name: Power on Ubuntu VM
48 | community.vmware.vmware_guest_powerstate:
49 | hostname: "{{ vcsa_hostname }}"
50 | username: "{{ vcsa_username }}"
51 | password: "{{ vcsa_password }}"
52 | validate_certs: false
53 | uuid: "{{ ubuntu_server_vm_uuid }}"
54 | state: powered-on
55 | delegate_to: localhost
56 |
57 | - name: Wait for port 22 to become open and contain "OpenSSH"
58 | ansible.builtin.wait_for:
59 | port: 22
60 | host: "{{ ubuntu_client_ip_address | ansible.utils.ipaddr('address') }}"
61 | search_regex: OpenSSH
62 | delay: 10
63 |
64 | - name: Add host to an inventory group
65 | ansible.builtin.add_host:
66 | name: "{{ ubuntu_client_ip_address | ansible.utils.ipaddr('address') }}"
67 | groups: "{{ inventory_host_group }}"
68 | ansible_connection: ssh
69 | ansible_user: "{{ ubuntu_client_username }}"
70 | ansible_password: "{{ ubuntu_client_password }}"
71 |
--------------------------------------------------------------------------------
/roles/ubuntu-server/templates/cloud_init_config.j2:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | package_update: false
3 | package_upgrade: false
4 |
5 | hostname: {{ ubuntu_client_hostname }}
6 | fqdn: {{ ubuntu_client_hostname }}
7 | manage_etc_hosts: true
8 |
9 | users:
10 | - default
11 | - name: {{ ubuntu_client_username }}
12 | sudo: ALL=(ALL) NOPASSWD:ALL
13 | groups: sudo
14 | shell: /bin/bash
15 | lock_passwd: False
16 | plain_text_passwd: {{ ubuntu_client_password }}
17 | ssh_pwauth: yes
18 |
19 | chpasswd:
20 | expire: false
21 |
22 | write_files:
23 | - path: /etc/cloud/cloud.cfg.d/99-custom-networking.cfg
24 | permissions: '0644'
25 | content: |
26 | network: {config: disabled}
27 | - path: /etc/netplan/custom-static-ip-config.yaml
28 | permissions: '0644'
29 | content: |
30 | network:
31 | version: 2
32 | ethernets:
33 | ens192:
34 | dhcp4: false
35 | dhcp6: false
36 | addresses:
37 | - {{ ubuntu_client_ip_address }}
38 | routes:
39 | - to: default
40 | via: {{ ubuntu_client_gateway }}
41 | nameservers:
42 | addresses:
43 | - {{ ubuntu_client_nameserver }}
44 |
45 | runcmd:
46 | - [sudo, rm, /etc/netplan/50-cloud-init.yaml]
47 | - [sudo, netplan, generate]
48 | - [sudo, netplan, apply]
49 | {% if ubuntu_install_packages is defined %}
50 | - sudo apt update -y
51 | - sudo apt install -y {{ ubuntu_install_packages }}
52 | {% endif %}
53 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-clusters/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create Clusters
3 | community.vmware.vmware_cluster:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | datacenter_name: "{{ nested_vcenter.datacenter }}"
8 | cluster_name: "{{ item.key }}"
9 | validate_certs: false
10 | with_dict: "{{ nested_clusters }}"
11 |
12 | - name: Add ESXi Host to VCSA
13 | community.vmware.vmware_host:
14 | hostname: "{{ nested_vcenter.ip }}"
15 | username: "{{ nested_vcenter.username }}"
16 | password: "{{ nested_vcenter.password }}"
17 | datacenter_name: "{{ nested_vcenter.datacenter }}"
18 | validate_certs: false
19 | cluster_name: "{{ item.nested_cluster }}"
20 | esxi_hostname: "{{ item.ip }}"
21 | esxi_username: "root"
22 | esxi_password: "{{ nested_host_password }}"
23 | state: present
24 | loop: "{{ nested_hosts }}"
25 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-datacenter/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create Datacenter
3 | community.vmware.vmware_datacenter:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | datacenter_name: "{{ nested_vcenter.datacenter }}"
8 | state: present
9 | validate_certs: false
10 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-deploy-vc-and-hosts/templates/embedded_vCSA_on_VC_6.7.json:
--------------------------------------------------------------------------------
1 | {
2 | "__version": "2.3.1",
3 | "__comments": "Sample template to deploy a vCenter Server Appliance with an embedded Platform Services Controller on a vCenter Server instance.",
4 | "new.vcsa": {
5 | "vc": {
6 | "hostname": "{{ hosting_vcenter.ip }}",
7 | "username": "{{ hosting_vcenter.username }}",
8 | "password": "{{ hosting_vcenter.password }}",
9 | "deployment_network": "{{ nested_vcenter.hosting_network }}",
10 | "datacenter": [
11 | "{{ hosting_vcenter.datacenter }}"
12 | ],
13 | "datastore": "{{ nested_vcenter.hosting_datastore }}",
14 | "target": [
15 | "{{ nested_vcenter.hosting_cluster }}"
16 | ]
17 | },
18 | "appliance": {
19 | "thin_disk_mode": true,
20 | "deployment_option": "tiny",
21 | "name": "{{ environment_tag }}-vcenter"
22 | },
23 | "network": {
24 | "ip_family": "ipv4",
25 | "mode": "static",
26 | "ip": "{{ nested_vcenter.ip }}",
27 | "dns_servers": [
28 | "{{ dns_server }}"
29 | ],
30 | "prefix": "{{ nested_vcenter.mask }}",
31 | "gateway": "{{ nested_vcenter.gw }}",
32 | "system_name": "{{ nested_vcenter.host_name }}"
33 | },
34 | "os": {
35 | "password": "{{ nested_vcenter.password }}",
36 | "ntp_servers": "{{ ntp_server_ip }}",
37 | "ssh_enable": true
38 | },
39 | "sso": {
40 | "password": "{{ nested_vcenter.password }}",
41 | "domain_name": "vsphere.local"
42 | }
43 | },
44 | "ceip": {
45 | "description": {
46 | "__comments": [
47 | "++++VMware Customer Experience Improvement Program (CEIP)++++",
48 | "VMware's Customer Experience Improvement Program (CEIP) ",
49 | "provides VMware with information that enables VMware to ",
50 | "improve its products and services, to fix problems, ",
51 | "and to advise you on how best to deploy and use our ",
52 | "products. As part of CEIP, VMware collects technical ",
53 | "information about your organization's use of VMware ",
54 | "products and services on a regular basis in association ",
55 | "with your organization's VMware license key(s). This ",
56 | "information does not personally identify any individual. ",
57 | "",
58 | "Additional information regarding the data collected ",
59 | "through CEIP and the purposes for which it is used by ",
60 | "VMware is set forth in the Trust & Assurance Center at ",
61 | "http://www.vmware.com/trustvmware/ceip.html . If you ",
62 | "prefer not to participate in VMware's CEIP for this ",
63 | "product, you should disable CEIP by setting ",
64 | "'ceip.enabled': false. You may join or leave VMware's ",
65 | "CEIP for this product at any time. Please confirm your ",
66 | "acknowledgement by passing in the parameter ",
67 | "--acknowledge-ceip in the command line.",
68 | "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
69 | ]
70 | },
71 | "settings": {
72 | "ceip_enabled": false
73 | }
74 | }
75 | }
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-deploy-vc-and-hosts/templates/embedded_vCSA_on_VC_7.0.json:
--------------------------------------------------------------------------------
1 | {
2 | "__version": "2.13.0",
3 | "__comments": "Sample template to deploy a vCenter Server Appliance with an embedded Platform Services Controller on a vCenter Server instance.",
4 | "new_vcsa": {
5 | "vc": {
6 | "__comments": [
7 | "'datacenter' must end with a datacenter name, and only with a datacenter name. ",
8 | "'target' must end with an ESXi hostname, a cluster name, or a resource pool name. ",
9 | "The item 'Resources' must precede the resource pool name. ",
10 | "All names are case-sensitive. ",
11 | "For details and examples, refer to template help, i.e. vcsa-deploy {install|upgrade|migrate} --template-help"
12 | ],
13 | "hostname": "{{ hosting_vcenter.ip }}",
14 | "username": "{{ hosting_vcenter.username }}",
15 | "password": "{{ hosting_vcenter.password }}",
16 | "deployment_network": "{{ nested_vcenter.hosting_network }}",
17 | "datacenter": [
18 | "{{ hosting_vcenter.datacenter }}"
19 | ],
20 | "datastore": "{{ nested_vcenter.hosting_datastore }}",
21 | "target": [
22 | "{{ nested_vcenter.hosting_cluster }}"
23 | ]
24 | },
25 | "appliance": {
26 | "__comments": [
27 | "You must provide the 'deployment_option' key with a value, which will affect the VCSA's configuration parameters, such as the VCSA's number of vCPUs, the memory size, the storage size, and the maximum numbers of ESXi hosts and VMs which can be managed. For a list of acceptable values, run the supported deployment sizes help, i.e. vcsa-deploy --supported-deployment-sizes"
28 | ],
29 | "thin_disk_mode": true,
30 | "deployment_option": "tiny",
31 | "name": "{{ environment_tag }}-vcenter"
32 | },
33 | "network": {
34 | "ip_family": "ipv4",
35 | "mode": "static",
36 | "system_name": "{{ nested_vcenter.host_name }}",
37 | "ip": "{{ nested_vcenter.ip }}",
38 | "prefix": "{{ nested_vcenter.mask }}",
39 | "gateway": "{{ nested_vcenter.gw }}",
40 | "dns_servers": [
41 | "{{ dns_server }}"
42 | ]
43 | },
44 | "os": {
45 | "password": "{{ nested_vcenter.password }}",
46 | "ntp_servers": "{{ ntp_server_ip }}",
47 | "ssh_enable": true
48 | },
49 | "sso": {
50 | "password": "{{ nested_vcenter.password }}",
51 | "domain_name": "vsphere.local"
52 | }
53 | },
54 | "ceip": {
55 | "description": {
56 | "__comments": [
57 | "++++VMware Customer Experience Improvement Program (CEIP)++++",
58 | "VMware's Customer Experience Improvement Program (CEIP) ",
59 | "provides VMware with information that enables VMware to ",
60 | "improve its products and services, to fix problems, ",
61 | "and to advise you on how best to deploy and use our ",
62 | "products. As part of CEIP, VMware collects technical ",
63 | "information about your organization's use of VMware ",
64 | "products and services on a regular basis in association ",
65 | "with your organization's VMware license key(s). This ",
66 | "information does not personally identify any individual. ",
67 | "",
68 | "Additional information regarding the data collected ",
69 | "through CEIP and the purposes for which it is used by ",
70 | "VMware is set forth in the Trust & Assurance Center at ",
71 | "http://www.vmware.com/trustvmware/ceip.html . If you ",
72 | "prefer not to participate in VMware's CEIP for this ",
73 | "product, you should disable CEIP by setting ",
74 | "'ceip_enabled': false. You may join or leave VMware's ",
75 | "CEIP for this product at any time. Please confirm your ",
76 | "acknowledgement by passing in the parameter ",
77 | "--acknowledge-ceip in the command line.",
78 | "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
79 | ]
80 | },
81 | "settings": {
82 | "ceip_enabled": false
83 | }
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-deploy-vc-and-hosts/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | disks_to_add: ""
3 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-distributed-port-groups/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Build of host to vds mapping buy using the nest_cluster field in the host spec
3 | - name: Build list of port groups to add to each vds
4 | ansible.builtin.set_fact:
5 | vds_port_groups: >-
6 | [
7 | {% for distributed_switch in distributed_switches %}
8 | {% if "distributed_port_groups" in distributed_switch %}
9 | {% for distributed_port_group in distributed_switch.distributed_port_groups %}
10 | {
11 | "vds_name": "{{ distributed_switch.vds_name }}",
12 | "port_group_name": "{{ distributed_port_group.port_group_name }}",
13 | "vlan_id": "{{ distributed_port_group.vlan_id }}",
14 | },
15 | {% endfor %}
16 | {% endif %}
17 | {% endfor %}
18 | ]
19 | when: 'distributed_switches is defined'
20 |
21 | - name: Create VDS portgroups
22 | community.vmware.vmware_dvs_portgroup:
23 | hostname: "{{ nested_vcenter.ip }}"
24 | username: "{{ nested_vcenter.username }}"
25 | password: "{{ nested_vcenter.password }}"
26 | validate_certs: false
27 | state: present
28 | num_ports: 8
29 | port_binding: ephemeral
30 | portgroup_name: "{{ item.port_group_name }}"
31 | switch_name: "{{ item.vds_name }}"
32 | vlan_id: "{{ item.vlan_id }}"
33 | loop: "{{ vds_port_groups }}"
34 | delegate_to: localhost
35 | when: 'distributed_switches is defined'
36 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-distributed-switches/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create VDS
3 | community.vmware.vmware_dvswitch:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | validate_certs: false
8 | datacenter: "{{ nested_vcenter.datacenter }}"
9 | switch: "{{ item.vds_name }}"
10 | version: "{{ item.vds_version }}"
11 | mtu: "{{ item.mtu }}"
12 | uplink_quantity: "{{ item.uplink_quantity }}"
13 | state: present
14 | loop: "{{ distributed_switches }}"
15 | delegate_to: localhost
16 |
17 | # Build of host to vds mapping buy using the nest_cluster field in the host spec
18 | - name: Build list of hosts to add to each vds
19 | ansible.builtin.set_fact:
20 | host_vds_binding: >-
21 | [
22 | {% for distributed_switch in distributed_switches %}
23 | {% if "vmnics" in distributed_switch %}
24 | {% for cluster_mapping in distributed_switch.clusters %}
25 | {% for host in nested_hosts %}
26 | {% if cluster_mapping == host.nested_cluster %}
27 | {
28 | "vds_name": "{{ distributed_switch.vds_name }}",
29 | "esxi_hostname": "{{ host.ip }}",
30 | "vmnics": "{{ distributed_switch.vmnics }}",
31 | },
32 | {% endif %}
33 | {% endfor %}
34 | {% endfor %}
35 | {% endif %}
36 | {% endfor %}
37 | ]
38 |
39 | - name: Add Host to dVS
40 | community.vmware.vmware_dvs_host:
41 | hostname: "{{ nested_vcenter.ip }}"
42 | username: "{{ nested_vcenter.username }}"
43 | password: "{{ nested_vcenter.password }}"
44 | validate_certs: false
45 | esxi_hostname: "{{ item.esxi_hostname }}"
46 | switch_name: "{{ item.vds_name }}"
47 | vmnics: "{{ item.vmnics | default(omit) }}"
48 | state: present
49 | loop: "{{ host_vds_binding }}"
50 | delegate_to: localhost
51 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-enable-cluster-services/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Update Clusters to enable HA
3 | community.vmware.vmware_cluster_ha:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | datacenter_name: "{{ nested_vcenter.datacenter }}"
8 | cluster_name: "{{ item.key }}"
9 | validate_certs: false
10 | enable: "{{ item.value.enable_ha }}"
11 | ha_host_monitoring: "{{ item.value.ha_host_monitoring | default('disabled') }}"
12 | with_dict: "{{ nested_clusters }}"
13 | when: '"enable_ha" in item.value'
14 |
15 | - name: Update Clusters to enable DRS
16 | community.vmware.vmware_cluster_drs:
17 | hostname: "{{ nested_vcenter.ip }}"
18 | username: "{{ nested_vcenter.username }}"
19 | password: "{{ nested_vcenter.password }}"
20 | datacenter_name: "{{ nested_vcenter.datacenter }}"
21 | cluster_name: "{{ item.key }}"
22 | validate_certs: false
23 | enable: "{{ item.value.enable_drs }}"
24 | drs_default_vm_behavior: "{{ item.value.drs_default_vm_behavior | default(omit) }}"
25 | with_dict: "{{ nested_clusters }}"
26 | when: '"enable_drs" in item.value'
27 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-import-vm-template/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Upload OVA that will be converted into a template
3 | community.vmware.vmware_deploy_ovf:
4 | hostname: "{{ item.0.vcenter_server }}"
5 | username: "{{ item.0.vcenter_username }}"
6 | password: "{{ item.0.vcenter_password }}"
7 | validate_certs: false
8 | cluster: "{{ item.1 }}"
9 | datacenter: "{{ item.0.vcenter_datacenter }}"
10 | datastore: "{{ item.0.vcenter_datastore }}"
11 | disk_provisioning: thin
12 | networks:
13 | nic0: "{{ item.0.vcenter_network }}"
14 | ova: "{{ item.0.local_path }}"
15 | allow_duplicates: false
16 | power_on: false
17 | fail_on_spec_warnings: false
18 | wait: false
19 | delegate_to: localhost
20 | with_subelements:
21 | - "{{ vm_templates }}"
22 | - "vsphere_clusters"
23 | retries: 4 # retrie allow for newly configured clusters to settle
24 | delay: 15
25 | register: vm_instance_data
26 |
27 | - name: Import Base Image Template into vSphere
28 | community.vmware.vmware_guest:
29 | hostname: "{{ item.item.0.vcenter_server }}"
30 | username: "{{ item.item.0.vcenter_username }}"
31 | password: "{{ item.item.0.vcenter_password }}"
32 | validate_certs: false
33 | name: "{{ item.instance.hw_name }}"
34 | state: "present"
35 | is_template: true
36 | delegate_to: localhost
37 | loop: "{{ vm_instance_data.results }}"
38 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-local-datastores/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Gather info about disk of all ESXi Host in the given Cluster
3 | community.vmware.vmware_host_disk_info:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | validate_certs: false
8 | cluster_name: "{{ item.key }}"
9 | delegate_to: localhost
10 | register: cluster_host_disks
11 | with_dict: "{{ nested_clusters }}"
12 |
13 | - name: Build device lookup table
14 | ansible.builtin.set_fact:
15 | host_disk_map: >-
16 | {
17 | {% for cluster in cluster_host_disks.results %}
18 | {% for host_name, disks in cluster.hosts_disk_info.items() %}
19 | {% for disk in disks %}
20 | "{{ cluster.invocation.module_args.cluster_name }}_{{ host_name }}_{{ disk.device_ctd_list[0] }}":
21 | "{{ disk.canonical_name }}",
22 | {% endfor %}
23 | {% endfor %}
24 | {% endfor %}
25 | }
26 |
27 | # Build a list of each datastore to add to each host
28 | - name: Build datastores to add
29 | ansible.builtin.set_fact:
30 | datastores_to_add: >-
31 | [
32 | {% for host in nested_hosts %}
33 | {% if "nested_hosts_disks" in nested_clusters[host.nested_cluster] %}
34 | {% for disk in nested_clusters[host.nested_cluster].nested_hosts_disks %}
35 | {% if "datastore_prefix" in disk %}
36 | {
37 | "host_name": "{{ host.ip }}",
38 | "datastore_prefix": "{{ disk.datastore_prefix }}-{{ host.name }}",
39 | "disk_reference" : "{{ host.nested_cluster }}_{{ host.ip }}_vmhba0:C0:T{{ loop.index0 + 1 }}:L0",
40 | },
41 | {% endif %}
42 | {% endfor %}
43 | {% endif %}
44 | {% endfor %}
45 | ]
46 |
47 | - name: Mount VMFS datastores to ESXi
48 | community.vmware.vmware_host_datastore:
49 | hostname: "{{ nested_vcenter.ip }}"
50 | username: "{{ nested_vcenter.username }}"
51 | password: "{{ nested_vcenter.password }}"
52 | validate_certs: false
53 | datastore_name: "{{ item.datastore_prefix }}"
54 | datastore_type: "vmfs"
55 | vmfs_device_name: "{{ host_disk_map[item.disk_reference] }}"
56 | vmfs_version: 6
57 | esxi_hostname: "{{ item.host_name }}"
58 | state: present
59 | delegate_to: localhost
60 | loop: "{{ datastores_to_add }}"
61 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-local-datastores/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | datastores_to_add: []
3 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-nfs-datastores/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Build a list of each datastore to add to each host
3 | - name: Build datastores to add
4 | ansible.builtin.set_fact:
5 | datastores_to_add: >-
6 | [
7 | {% for host in nested_hosts %}
8 | {% if "nfs_datastores" in nested_clusters[host.nested_cluster] %}
9 | {% for datastore in nested_clusters[host.nested_cluster].nfs_datastores %}
10 | {
11 | "host_name": "{{ host.ip }}",
12 | "datastore_name": "{{ datastore.datastore_name }}",
13 | "nfs_server": "{{ datastore.nfs_server }}",
14 | "nfs_path": "{{ datastore.nfs_path }}",
15 | },
16 | {% endfor %}
17 | {% endif %}
18 | {% endfor %}
19 | ]
20 |
21 | - name: Mount NFS3 datastores to ESXi
22 | community.vmware.vmware_host_datastore:
23 | hostname: "{{ nested_vcenter.ip }}"
24 | username: "{{ nested_vcenter.username }}"
25 | password: "{{ nested_vcenter.password }}"
26 | validate_certs: false
27 | datastore_name: "{{ item.datastore_name }}"
28 | datastore_type: "nfs"
29 | nfs_server: '{{ item.nfs_server }}'
30 | nfs_path: '{{ item.nfs_path }}'
31 | nfs_ro: false
32 | esxi_hostname: "{{ item.host_name }}"
33 | state: present
34 | delegate_to: localhost
35 | loop: "{{ datastores_to_add }}"
36 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-nfs-datastores/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | datastores_to_add: []
3 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-resource-pools/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Build a list of resource pools to add and ignore cluster if none specified
3 | - name: Build resource pools to add
4 | ansible.builtin.set_fact:
5 | resource_pools_to_add: >-
6 | [
7 | {% for cluster_name, cluster_spec in nested_clusters.items() %}
8 | {% if "resource_pools" in cluster_spec %}
9 | {% for resource_pool in cluster_spec.resource_pools %}
10 | {
11 | "cluster_name": "{{ cluster_name }}",
12 | "resource_pool": "{{ resource_pool }}",
13 | },
14 | {% endfor %}
15 | {% endif %}
16 | {% endfor %}
17 | ]
18 |
19 | - name: Add resource pool to vCenter
20 | community.vmware.vmware_resource_pool:
21 | hostname: "{{ nested_vcenter.ip }}"
22 | username: "{{ nested_vcenter.username }}"
23 | password: "{{ nested_vcenter.password }}"
24 | validate_certs: false
25 | datacenter: "{{ nested_vcenter.datacenter }}"
26 | cluster: "{{ item.cluster_name }}"
27 | resource_pool: "{{ item.resource_pool }}"
28 | state: present
29 | loop: "{{ resource_pools_to_add }}"
30 | delegate_to: localhost
31 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-storage-based-policy-management/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create storage tag categories
3 | community.vmware.vmware_category:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | validate_certs: false
8 | category_name: "{{ item.category_name }}"
9 | category_description: "{{ item.description }}"
10 | category_cardinality: 'multiple'
11 | associable_object_types:
12 | - Datastore
13 | state: present
14 | delegate_to: localhost
15 | register: tag_category_result
16 | loop: "{{ tspbm.tag_categories }}"
17 |
18 | # TODO, build lookup table for names to IDs
19 |
20 | - name: Build device lookup table
21 | ansible.builtin.set_fact:
22 | tag_category_map: >-
23 | {
24 | {% for tag_category in tag_category_result.results %}
25 | "{{ tag_category.item.category_name }}": "{{ tag_category.category_results.category_id }}",
26 | {% endfor %}
27 | }
28 |
29 | - ansible.builtin.debug: var=tspbm.datastore_tags
30 |
31 | - name: Add datastore tags
32 | community.vmware.vmware_tag:
33 | hostname: "{{ nested_vcenter.ip }}"
34 | username: "{{ nested_vcenter.username }}"
35 | password: "{{ nested_vcenter.password }}"
36 | validate_certs: false
37 | category_id: "{{ tag_category_map[item.0.category_name] }}"
38 | tag_name: "{{ item.1.tag_name }}"
39 | tag_description: "{{ item.1.description }}"
40 | state: present
41 | delegate_to: localhost
42 | with_subelements:
43 | - "{{ tspbm.tag_categories }}"
44 | - "tags"
45 |
46 | - name: Add tags to datastores
47 | community.vmware.vmware_tag_manager:
48 | hostname: "{{ nested_vcenter.ip }}"
49 | username: "{{ nested_vcenter.username }}"
50 | password: "{{ nested_vcenter.password }}"
51 | validate_certs: false
52 | tag_names: "{{ item.tag_names }}"
53 | object_name: "{{ item.datastore_name }}"
54 | object_type: Datastore
55 | state: add
56 | delegate_to: localhost
57 | loop: "{{ tspbm.datastore_tags }}"
58 |
59 | - name: Create or update a vSphere tag-based storage policy
60 | community.vmware.vmware_vm_storage_policy:
61 | hostname: "{{ nested_vcenter.ip }}"
62 | username: "{{ nested_vcenter.username }}"
63 | password: "{{ nested_vcenter.password }}"
64 | validate_certs: false
65 | name: "{{ item.storage_policy_name }}"
66 | description: "{{ item.description }}"
67 | tag_category: "{{ item.tag_category }}"
68 | tag_name: "{{ item.tag_name }}"
69 | tag_affinity: true
70 | state: "present"
71 | delegate_to: localhost
72 | loop: "{{ tspbm.vm_storage_policies }}"
73 |
--------------------------------------------------------------------------------
/roles/vsphere/vsphere-vswitch0-port-groups/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Remove Default VM Portgroup
3 | community.vmware.vmware_portgroup:
4 | hostname: "{{ nested_vcenter.ip }}"
5 | username: "{{ nested_vcenter.username }}"
6 | password: "{{ nested_vcenter.password }}"
7 | validate_certs: false
8 | cluster_name: "{{ item.key }}"
9 | switch_name: vSwitch0
10 | portgroup_name: "VM Network"
11 | state: absent
12 | delegate_to: localhost
13 | with_dict: "{{ nested_clusters | default({}) }}"
14 | when: '"vswitch0_vm_port_group_name" in item.value and "vswitch0_vm_port_group_vlan" in item.value'
15 |
16 | - name: Add VM Portgroup to vSwitch 0
17 | community.vmware.vmware_portgroup:
18 | hostname: "{{ nested_vcenter.ip }}"
19 | username: "{{ nested_vcenter.username }}"
20 | password: "{{ nested_vcenter.password }}"
21 | validate_certs: false
22 | cluster_name: "{{ item.key }}"
23 | switch_name: vSwitch0
24 | portgroup_name: "{{ item.value.vswitch0_vm_port_group_name }}"
25 | vlan_id: "{{ item.value.vswitch0_vm_port_group_vlan }}"
26 | delegate_to: localhost
27 | with_dict: "{{ nested_clusters }}"
28 | when: '"vswitch0_vm_port_group_name" in item.value and "vswitch0_vm_port_group_vlan" in item.value'
29 |
--------------------------------------------------------------------------------
/tests/run-tests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."
6 |
7 | echo $script_dir
8 |
9 | function run_playbook() {
10 | vars_file=$1
11 | action=$2
12 | echo "running $action for $vars_file"
13 | docker run --rm \
14 | --env PARENT_VCENTER_USERNAME=${PARENT_VCENTER_USERNAME} \
15 | --env PARENT_VCENTER_PASSWORD=${PARENT_VCENTER_PASSWORD} \
16 | --env SOFTWARE_DIR='/software_dir' \
17 | --env ANSIBLE_FORCE_COLOR='true' \
18 | --env NSXT_LICENSE_KEY=${NSXT_LICENSE_KEY:-na} \
19 | --env vars_file=${vars_file} \
20 | --env action=${action} \
21 | --volume ${SOFTWARE_DIR}:/software_dir \
22 | --volume ${script_dir}:/work \
23 | laidbackware/vmware-lab-builder:v1 \
24 | ansible-playbook /work/${action}.yml \
25 | --extra-vars '@/work/var-examples/${vars_file}.yml'
26 |
27 | return_code=$?
28 | if [[ $return_code -ne 0 ]]; then
29 | echo "$action failed for $vars_file"
30 | exit $return_code
31 | fi
32 |
33 | echo -e "\n\n############################################################################################"
34 | echo "$vars_file $action complete"
35 | echo -e "############################################################################################\n\n"
36 | }
37 |
38 | function run_test() {
39 | vars_file=$1
40 |
41 | run_playbook $vars_file deploy
42 | run_playbook $vars_file destroy
43 | }
44 |
45 | function test_opinionated_examples() {
46 | run_test base-vsphere/minimal-opinionated
47 | run_test nsxt/opinionated
48 | run_test tanzu/vsphere-vds/opinionated-1host-haproxy
49 | run_test tanzu/vsphere-nsxt/opinionated-1host
50 | }
51 |
52 | test_opinionated_examples
--------------------------------------------------------------------------------
/tests/test-nsx-local.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eu
4 |
5 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."
6 |
7 | echo $script_dir
8 |
9 | nsx_30_ova="nsx-unified-appliance-3.0.3.2.0.19603133.ova"
10 | nsx_31_ova="nsx-unified-appliance-3.1.3.8.0.20532387.ova"
11 | nsx_32_ova="nsx-unified-appliance-3.2.3.0.0.21703641.ova"
12 | nsx_40_ova="nsx-unified-appliance-4.0.1.1.0.20598732.ova"
13 |
14 | # export script_dir=$(pwd)
15 |
16 | ansible-playbook $script_dir/deploy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml" \
17 | --extra-vars "nsxt_ova=${SOFTWARE_DIR}/${nsx_30_ova}" -vvv
18 |
19 | ansible-playbook $script_dir/destroy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml"
20 |
21 | ansible-playbook $script_dir/deploy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml" \
22 | --extra-vars "nsxt_ova=${SOFTWARE_DIR}/${nsx_31_ova}" -vvv
23 |
24 | ansible-playbook $script_dir/destroy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml"
25 |
26 | ansible-playbook $script_dir/deploy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml" \
27 | --extra-vars "nsxt_ova=${SOFTWARE_DIR}/${nsx_32_ova}" -vvv
28 |
29 | ansible-playbook $script_dir/destroy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml"
30 |
31 | ansible-playbook $script_dir/deploy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml" \
32 | --extra-vars "nsxt_ova=${SOFTWARE_DIR}/${nsx_40_ova}" -vvv
33 |
34 | ansible-playbook $script_dir/destroy.yml --extra-vars "@$script_dir/var-examples/nsxt/opinionated.yml"
--------------------------------------------------------------------------------
/var-examples/base-vsphere/4hosts-2clusters-custom.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SOFTWARE_DIR must contain all required software
3 | vc_iso: "{{ lookup('env', 'SOFTWARE_DIR') }}/VMware-VCSA-all-7.0.1-17327517.iso"
4 | esxi_ova: "{{ lookup('env', 'SOFTWARE_DIR') }}/Nested_ESXi7.0.0_Appliance_Template_v1.ova"
5 |
6 | environment_tag: "full" # Used to prepend object names in hosting vCenter
7 | dns_server: "192.168.0.1"
8 | dns_domain: "home.local"
9 | ntp_server_ip: time1.google.com
10 | disk_mode: thin # How all disks should be deployed
11 | nested_host_password: "VMware1!"
12 |
13 | hosting_vcenter: # This is the vCenter which will be the target for nested vCenters and ESXi hosts
14 | ip: "vcsa.lab"
15 | username: "{{ lookup('env', 'PARENT_VCENTER_USERNAME') }}"
16 | password: "{{ lookup('env', 'PARENT_VCENTER_PASSWORD') }}"
17 | datacenter: "Home" # Target for all VM deployment
18 |
19 | nested_vcenter: # the vCenter appliance that will be deployed
20 | ip: "192.168.0.181" # vCenter ip address
21 | mask: "22"
22 | gw: "192.168.0.1"
23 | host_name: "192.168.0.181" # FQDN if there is working DNS server, otherwise put the ip as a name
24 | username: "administrator@vsphere.local"
25 | password: "VMware1!"
26 | datacenter: "Lab" # DC to create after deployment
27 | # Below are properties of parent cluster
28 | hosting_network: "Net1" # Parent port group where the vCenter VM will be deployed
29 | hosting_cluster: "Physical" # Parent cluster where the vCenter VM will be deployed
30 | hosting_datastore: "NVME" # Parent datastore where the vCenter VM will be deployed
31 |
32 | nested_clusters: # You can add clusters in this section by duplicating the existing cluster
33 | skinny: # This will be the name of the cluster in the nested vCenter. Below are the minimum settings.
34 | # Below are properties of the hosting cluster
35 | management_vmk0_vlan: "0"
36 | hosting_cluster: "Physical" # The nested ESXi VMs will be deployed here
37 | hosting_datastore: "NVME" # Datastore target for nested ESXi VMs
38 | cpu_cores: 4 # CPU count
39 | ram_in_gb: 16 # memory
40 | # Added in vmnic order, these port groups must exist on the physical host
41 | # Must specify at least 2 port groups, up to a maximum of 10
42 | vmnic_physical_portgroup_assignment:
43 | - name: "Nest"
44 | - name: "Nest"
45 | full-fat: # This will be the name of the cluster in the nested vCenter
46 | enable_drs: true
47 | drs_default_vm_behavior: partiallyAutomated # Defaults to fully automated
48 | # HA can only be enabled if there is are datastores accessible by all hosts.
49 | enable_ha: true
50 | ha_host_monitoring: enabled
51 | # Settings below are assigned to each host in the cluster
52 | management_vmk0_vlan: "0"
53 | vswitch0_vm_port_group_name: vm-network
54 | vswitch0_vm_port_group_vlan: 0
55 | mgmt_vlan: 0 # VLAN ID used to tag VMK0
56 | cpu_cores: 4 # CPU count
57 | ram_in_gb: 16 # memory
58 | # In order list of disks to assign to the nested host. All will be marked as SSD.
59 | # Datastore names will be automatically be pre-pended with the hostname. E.g esx1
60 | # If the datastore_prefix property is removed the disk will not be set as a datastore
61 | # To leave the default OVA disks in place, delete this section.
62 | nested_hosts_disks:
63 | - size_gb: 200
64 | datastore_prefix: "datastore"
65 | # Below are properties of the hosting cluster
66 | hosting_cluster: "Physical" # The nested ESXi VMs will be deployed here
67 | hosting_datastore: "NVME" # Datastore target for nested ESXi VMs
68 | # Added in vmnic order, these port groups must exist on the physical host
69 | # Must specify at least 2 port groups, up to a maximum of 10
70 | vmnic_physical_portgroup_assignment:
71 | - name: "Nest"
72 | - name: "TEP"
73 | - name: "TEP"
74 | resource_pools: # List of resource pools, remove if not needed
75 | - TAS-AZ1
76 | - TKGI-AZ1
77 |
78 | # You can add nested ESXi hosts below
79 | nested_hosts:
80 | - name: esx1 # environment_tag will prepend the name to create the VM name
81 | ip: 192.168.0.182 # This will also be used as the hostname in the nested vCenter
82 | mask: 255.255.252.0
83 | gw: 192.168.0.1
84 | nested_cluster: skinny # the nested vcenter cluster, which is defined in the clusters section.
85 | - name: esx2 # environment_tag will prepend the name to create the VM name
86 | ip: 192.168.0.183 # This will also be used as the hostname in the nested vCenter
87 | mask: 255.255.252.0
88 | gw: 192.168.0.1
89 | nested_cluster: skinny # the nested vcenter cluster, which is defined in the clusters section.
90 | - name: esx3
91 | ip: 192.168.0.184 # This will also be used as the hostname in the nested vCenter
92 | mask: 255.255.252.0
93 | gw: 192.168.0.1
94 | nested_cluster: full-fat # the nested vcenter cluster, which is defined in the clusters section.
95 | - name: esx4
96 | ip: 192.168.0.185 # This will also be used as the hostname in the nested vCenter
97 | mask: 255.255.252.0
98 | gw: 192.168.0.1
99 | nested_cluster: full-fat # the nested vcenter cluster, which is defined in the clusters section.
100 |
101 | distributed_switches: # To not create any distributed switches, comment this section.
102 | - vds_name: vds_overlay
103 | mtu: 9000
104 | vds_version: 7.0.0 # Should be 7.0.0, 6.7.0
105 | clusters: # distributed switch will be attached to all hosts in the cluster
106 | - full-fat
107 | uplink_quantity: 1
108 | vmnics:
109 | - vmnic1
110 | distributed_port_groups: # Remove to create an empty VDS
111 | - port_group_name: tep_pg
112 | vlan_id: "0"
113 |
114 | tkgs:
115 | datastore_tag_category: tkgs-storage-category
116 | datastore_tag: tkgs-storage
117 | datastore_to_tag: test2-esx4
118 |
--------------------------------------------------------------------------------
/var-examples/base-vsphere/README.md:
--------------------------------------------------------------------------------
1 | # Base vSphere
2 |
3 | # Tested Versions
4 | - vSphere 7.0 U3 and 8.0 U1
5 |
6 | ## Architecture
7 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
8 | ```mermaid
9 | flowchart LR
10 | router_net("Routed\nNetwork")
11 | esxi_host["Physical\nESXi Host"]
12 | base_pg("Base\nPort Group")
13 | nested_host["Nested\nESXi Host"]
14 | vcenter["vCenter"]
15 | base_vss("VM network\nStandard Switch")
16 |
17 | router_net ---esxi_host
18 | esxi_host ---base_pg
19 | base_pg -- ESXi MGMT\n&\nVM Network ---nested_host
20 | base_pg ---vcenter
21 | nested_host ---base_vss
22 |
23 | style router_net fill:#aaa
24 | style base_pg fill:#aaa
25 | style base_vss fill:#aaa
26 | style esxi_host fill:#0ff
27 | style nested_host fill:#0c0
28 | style vcenter fill:#0c0
29 | ```
30 | - A single vCenter will be added.
31 | - Within the nested host the `vm-network` port group can be use to attach VMs to the routed network that has been passed through.
32 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/var-examples/base-vsphere/minimal-opinionated-nfs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SOFTWARE_DIR must contain all required software
3 | vc_iso: "{{ lookup('env', 'SOFTWARE_DIR') }}/VMware-VCSA-all-7.0.1-17327517.iso"
4 | esxi_ova: "{{ lookup('env', 'SOFTWARE_DIR') }}/Nested_ESXi7.0.1_Appliance_Template_v1.ova"
5 |
6 | environment_tag: "minimal" # Used to prepend object names in hosting vCenter
7 | dns_server: "192.168.0.1"
8 | dns_domain: "home.local"
9 | ntp_server_ip: "192.168.0.1"
10 | disk_mode: thin # How all disks should be deployed
11 | nested_host_password: "{{ opinionated.master_password }}"
12 |
13 | hosting_vcenter: # This is the vCenter which will be the target for nested vCenters and ESXi hosts
14 | ip: "vcsa.lab"
15 | username: "{{ lookup('env', 'PARENT_VCENTER_USERNAME') }}"
16 | password: "{{ lookup('env', 'PARENT_VCENTER_PASSWORD') }}"
17 | datacenter: "Home" # Target for all VM deployment
18 |
19 | # This section is only referenced by other variables in this file
20 | opinionated:
21 | master_password: "VMware1!"
22 | number_of_hosts: 2
23 | nested_hosts:
24 | cpu_cores: 4 # CPU count
25 | ram_in_gb: 16 # memory
26 | nfs_datastores:
27 | - datastore_name: nfs01
28 | nfs_server: nas.homelab.local
29 | nfs_path: /mnt/nfs01
30 | - datastore_name: nfs02
31 | nfs_server: nas.homelab.local
32 | nfs_path: /mnt/nfs02
33 | hosting_cluster: Physical
34 | hosting_datastore: NVME
35 | hosting_network:
36 | base:
37 | port_group: Nest
38 | cidr: "192.168.0.0/22"
39 | gateway: "192.168.0.1"
40 | # Minimal deployment requires 1 IPs, plus 1 per esxi host. They MUST be contiguous.
41 | starting_addr: "192.168.0.180"
42 |
43 | #####################################################################
44 | ### No need to edit below this line for an opinionated deployment ###
45 | #####################################################################
46 |
47 | nested_vcenter: # the vCenter appliance that will be deployed
48 | ip: "{{ opinionated.hosting_network.base.starting_addr }}" # vCenter ip address
49 | mask: "{{ opinionated.hosting_network.base.cidr.split('/')[1] }}"
50 | gw: "{{ opinionated.hosting_network.base.gateway }}"
51 | host_name: "{{ opinionated.hosting_network.base.starting_addr }}" # FQDN if there is working DNS server, otherwise put the ip as a name
52 | username: "administrator@vsphere.local"
53 | password: "{{ opinionated.master_password }}"
54 | datacenter: "Lab" # DC to create after deployment
55 | # Below are properties of parent cluster
56 | hosting_network: "{{ opinionated.hosting_network.base.port_group }}" # Parent port group where the vCenter VM will be deployed
57 | hosting_cluster: "{{ opinionated.hosting_cluster }}" # Parent cluster where the vCenter VM will be deployed
58 | hosting_datastore: "{{ opinionated.hosting_datastore }}" # Parent datastore where the vCenter VM will be deployed
59 |
60 | nested_clusters: # You can add clusters in this section by duplicating the existing cluster
61 | compute: # This will be the name of the cluster in the nested vCenter. Below are the minimum settings.
62 | # Below are properties of the hosting cluster
63 | hosting_cluster: "{{ opinionated.hosting_cluster }}" # The nested ESXi VMs will be deployed here
64 | hosting_datastore: "{{ opinionated.hosting_datastore }}" # Datastore target for nested ESXi VMs
65 | # Settings below are assigned to each host in the cluster
66 | cpu_cores: "{{ opinionated.nested_hosts.cpu_cores }}" # CPU count
67 | ram_in_gb: "{{ opinionated.nested_hosts.ram_in_gb }}" # memory
68 | # Added in vmnic order, these port groups must exist on the physical host
69 | # Must specify at least 2 port groups, up to a maximum of 10
70 | vmnic_physical_portgroup_assignment:
71 | - name: "{{ opinionated.hosting_network.base.port_group }}"
72 | - name: "{{ opinionated.hosting_network.base.port_group }}"
73 | nfs_datastores: "{{ opinionated.nested_hosts.nfs_datastores }}"
74 |
75 | opinionated_host_ip_ofset: 1
76 | # You can add nested ESXi hosts below
77 | nested_hosts: >-
78 | [
79 | {% for host_number in range(opinionated.number_of_hosts) %}
80 | {
81 | "name": "esx{{ host_number + 1 }}",
82 | "ip": "{{ opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(opinionated_host_ip_ofset + host_number) }}",
83 | "mask": "{{ opinionated.hosting_network.base.cidr | ansible.utils.ipaddr('netmask') }}",
84 | "gw": "{{ opinionated.hosting_network.base.gateway }}",
85 | "nested_cluster": "compute"
86 |
87 | },
88 | {% endfor %}
89 | ]
90 |
--------------------------------------------------------------------------------
/var-examples/base-vsphere/minimal-opinionated.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SOFTWARE_DIR must contain all required software
3 | vc_iso: "{{ lookup('env', 'SOFTWARE_DIR') }}/VMware-VCSA-all-7.0.3-18778458.iso"
4 | esxi_ova: "{{ lookup('env', 'SOFTWARE_DIR') }}/Nested_ESXi7.0u3_Appliance_Template_v1.ova"
5 |
6 | environment_tag: "minimal" # Used to prepend object names in hosting vCenter
7 | dns_server: "192.168.0.1"
8 | dns_domain: "home.local"
9 | ntp_server_ip: "192.168.0.1"
10 | disk_mode: thin # How all disks should be deployed
11 | nested_host_password: "{{ opinionated.master_password }}"
12 |
13 | hosting_vcenter: # This is the vCenter which will be the target for nested vCenters and ESXi hosts
14 | ip: "vcsa.lab"
15 | username: "{{ lookup('env', 'PARENT_VCENTER_USERNAME') }}"
16 | password: "{{ lookup('env', 'PARENT_VCENTER_PASSWORD') }}"
17 | datacenter: "Home" # Target for all VM deployment
18 |
19 | # This section is only referenced by other variables in this file
20 | opinionated:
21 | master_password: "VMware1!"
22 | number_of_hosts: 1
23 | nested_hosts:
24 | cpu_cores: 4 # CPU count
25 | ram_in_gb: 16 # memory
26 | hosting_cluster: Physical
27 | hosting_datastore: NVME
28 | hosting_network:
29 | base:
30 | port_group: Nest
31 | cidr: "192.168.0.0/22"
32 | gateway: "192.168.0.1"
33 | # Minimal deployment requires 1 IPs, plus 1 per esxi host. They MUST be contiguous.
34 | starting_addr: "192.168.0.180"
35 |
36 | #####################################################################
37 | ### No need to edit below this line for an opinionated deployment ###
38 | #####################################################################
39 |
40 | nested_vcenter: # the vCenter appliance that will be deployed
41 | ip: "{{ opinionated.hosting_network.base.starting_addr }}" # vCenter ip address
42 | mask: "{{ opinionated.hosting_network.base.cidr.split('/')[1] }}"
43 | gw: "{{ opinionated.hosting_network.base.gateway }}"
44 | host_name: "{{ opinionated.hosting_network.base.starting_addr }}" # FQDN if there is working DNS server, otherwise put the ip as a name
45 | username: "administrator@vsphere.local"
46 | password: "{{ opinionated.master_password }}"
47 | datacenter: "Lab" # DC to create after deployment
48 | # Below are properties of parent cluster
49 | hosting_network: "{{ opinionated.hosting_network.base.port_group }}" # Parent port group where the vCenter VM will be deployed
50 | hosting_cluster: "{{ opinionated.hosting_cluster }}" # Parent cluster where the vCenter VM will be deployed
51 | hosting_datastore: "{{ opinionated.hosting_datastore }}" # Parent datastore where the vCenter VM will be deployed
52 |
53 | nested_clusters: # You can add clusters in this section by duplicating the existing cluster
54 | compute: # This will be the name of the cluster in the nested vCenter. Below are the minimum settings.
55 | # Below are properties of the hosting cluster
56 | hosting_cluster: "{{ opinionated.hosting_cluster }}" # The nested ESXi VMs will be deployed here
57 | hosting_datastore: "{{ opinionated.hosting_datastore }}" # Datastore target for nested ESXi VMs
58 | # Settings below are assigned to each host in the cluster
59 | cpu_cores: "{{ opinionated.nested_hosts.cpu_cores }}" # CPU count
60 | ram_in_gb: "{{ opinionated.nested_hosts.ram_in_gb }}" # memory
61 | # Added in vmnic order, these port groups must exist on the physical host
62 | # Must specify at least 2 port groups, up to a maximum of 10
63 | vmnic_physical_portgroup_assignment:
64 | - name: "{{ opinionated.hosting_network.base.port_group }}"
65 | - name: "{{ opinionated.hosting_network.base.port_group }}"
66 |
67 | opinionated_host_ip_ofset: 1
68 | # You can add nested ESXi hosts below
69 | nested_hosts: >-
70 | [
71 | {% for host_number in range(opinionated.number_of_hosts) %}
72 | {
73 | "name": "esx{{ host_number + 1 }}",
74 | "ip": "{{ opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(opinionated_host_ip_ofset + host_number) }}",
75 | "mask": "{{ opinionated.hosting_network.base.cidr | ansible.utils.ipaddr('netmask') }}",
76 | "gw": "{{ opinionated.hosting_network.base.gateway }}",
77 | "nested_cluster": "compute"
78 |
79 | },
80 | {% endfor %}
81 | ]
82 |
--------------------------------------------------------------------------------
/var-examples/nsxt/README.md:
--------------------------------------------------------------------------------
1 | # NSX-T
2 |
3 | ## Depends on
4 | All base instructions. It is recommended to attempt a minimal deployment to first become comfortable with the processes.
5 |
6 | # Tested Versions
7 | - NSX-T 3.0.3, 3.1.3, 3.2.3, 4.0.1
8 | - NSX-T 4.1 is not currently supported due to breaking changes in the transport_zone module
9 | - vSphere 7.0 U3 and 8.0 U1
10 |
11 | ## Additional Dependencies
12 | - The NSX-T ISO must be added to your software directory and the filename updated in the vars file.
13 | - You need a valid NSX-T license.
14 | - On top of the standard routed network, you need a port group to use for the overlay, which does not need to be routable unless you want to run multiple nested ESXi hosts.
15 | - After the deployment you will need to add a static route to the T0 gateway uplink for any addresses that will be behind NSX-T.
16 |
17 | ## Architecture
18 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
19 |
20 |
21 | ```mermaid
22 | flowchart LR
23 | router_net("Routed\nNetwork")
24 | esxi_host["Physical\nESXi Host"]
25 | base_pg("Base Port Group\n(Routed)")
26 | tep_pg("TEP Port Group\n(Private)")
27 | nested_host["Nested\nESXi Host"]
28 | vcenter["vCenter"]
29 | nsx_mgr[NSX Manager]
30 | base_vss("VM network\nStandard Switch")
31 | nsx_vds(NSX Overlay\nSwitch)
32 | nsx_edge[NSX Edge]
33 |
34 | router_net --- esxi_host
35 | esxi_host --- base_pg & tep_pg
36 | base_pg -- ESXi MGMT\n&\nVM Network --- nested_host
37 | tep_pg -- NSX\nOverlay --- nested_host
38 | base_pg --- vcenter & nsx_mgr
39 | nested_host --- base_vss & nsx_vds
40 | base_vss & nsx_vds --- nsx_edge
41 |
42 | linkStyle 2,4,8 stroke:#f00
43 |
44 | style router_net fill:#aaa
45 | style base_pg fill:#aaa
46 | style tep_pg fill:#aaa
47 | style base_vss fill:#aaa
48 | style nsx_vds fill:#aaa
49 | style esxi_host fill:#0ff
50 | style nested_host fill:#0c0
51 | style vcenter fill:#0c0
52 | style nsx_mgr fill:#0c0
53 | style nsx_edge fill:#FBCEB1
54 | ```
55 |
56 | - The NSX-T Manager VM will be deployed as a standard VM on your physical host.
57 | - A single vCenter will be added.
58 | - All components will be added to a single nested ESXi host. This can be customized by editing the yaml.
59 | - A single T0 gataway will be deployed and the T0 uplink will share the same network as the management interfaces in vmnic0
60 | - If you want to have more that 1 nested host, then your tep network should be set to MTU of at least 1600 to allow the hosts to communicate.
61 | - The tep network is used twice on the nested ESXi hosts because the edge tep port group cannot be on the same VDS that will be used by the host transport nodes.
62 |
63 | ## Instructions
64 | In addition to the base instructions you will need to export the NSX-T license key as a variable called `NSXT_LICENSE_KEY`. E.g.
65 | ```
66 | export NSXT_LICENSE_KEY=AAAAA-BBBBB-CCCCC-DDDDD-EEEEE
67 | ```
68 | You can now use the run command from the base instructions pointing to your updated nsxt vars file.
69 |
70 | ## IP Assignment on opinionated deployment
71 |
72 | vCenter = `hosting_network.base.starting_addr`
73 | NSX Manager = `hosting_network.base.starting_addr + 1`
74 | router uplink = `hosting_network.base.starting_addr + 3`
75 | First ESXi host = `hosting_network.base.starting_addr + 8`
76 |
77 | ## Known Issues
78 | - Creation of the first VLAN segments can take some time whilst the Transport Zones are configured.
79 | - A number of Ansible for NSX-T modules are not properly idempotent and report changed even though no change has been made.
--------------------------------------------------------------------------------
/var-examples/tanzu/application-service/README.md:
--------------------------------------------------------------------------------
1 | # Tanzu Application Service with NSX-T
2 |
3 | This pattern will deploy NSX-T and TAS.
4 |
5 | ## Tested Versions
6 | - TAS 2.11, 2.13 and 4.0
7 | - NSX-T 3.2.3 & 4.0.1
8 | - vSphere 7.0 U3k
9 |
10 | ## Dependencies
11 | - The NSX-T OVA, Opsman OVA, TAS tile and Stemcell must must be added to your software directory and the filenames updated in the vars file.
12 | - You need a valid NSX-T license.
13 | - On top of the standard routed network, you need a port group to use for the overlay, which does not need to be routable.
14 | - During the deployment you will need to add a static route to the T0 gateway uplink for any addresses that will be behind NSX-T.
15 | - The OM CLI is used for all TAS related tasks. This is included in v8+ of the docker image.
16 |
17 | ## Architecture
18 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
19 |
20 | ```mermaid
21 | flowchart LR
22 | router_net("Routed\nNetwork")
23 | esxi_host["Physical\nESXi Host"]
24 | base_pg("Base Port Group\n(Routed)")
25 | tep_pg("TEP Port Group\n(Private)")
26 | nested_host["Nested\nESXi Host"]
27 | vcenter["vCenter"]
28 | nsx_mgr[NSX Manager]
29 | base_vss("VM network\nStandard Switch")
30 | nsx_vds(NSX Overlay\nSwitch)
31 | nsx_edge[NSX Edge]
32 | tas_vms[TAS VMs]
33 |
34 | router_net --- esxi_host
35 | esxi_host --- base_pg & tep_pg
36 | base_pg -- ESXi MGMT\n&\nVM Network ---- nested_host
37 | tep_pg -- NSX\nOverlay --- nested_host
38 | base_pg --- vcenter & nsx_mgr
39 | nested_host --- base_vss & nsx_vds
40 | base_vss & nsx_vds --- nsx_edge
41 | nsx_vds --- tas_vms
42 |
43 | linkStyle 2,4,8,10 stroke:#f00
44 |
45 | style router_net fill:#aaa
46 | style base_pg fill:#aaa
47 | style tep_pg fill:#aaa
48 | style base_vss fill:#aaa
49 | style nsx_vds fill:#aaa
50 | style esxi_host fill:#0ff
51 | style nested_host fill:#0c0
52 | style vcenter fill:#0c0
53 | style nsx_mgr fill:#0c0
54 | style nsx_edge fill:#FBCEB1
55 | style tas_vms fill:#FBCEB1
56 | ```
57 |
58 |
59 |
60 | - The NSX-T Manager VM will be deployed as a standard VM on your physical host.
61 | - A single vCenter will be added and attached to the physical host.
62 | - All components will be added to a single nested ESXi host. This can be customized by editing the yaml.
63 | - A single T0 gateway will be deployed and the T0 uplink will share the same network as the management interfaces in vmnic0
64 | - If you want to have more that 1 nested host, then your TEP network should be set to MTU of at least 1600 to allow the nested ESXi hosts to communicate and you must switch to using NFS shared storage as per [this example](../../base-vsphere/minimal-opinionated-nfs.yml).
65 | - During setup you will be prompted to add a static route to the T0 uplink when created.
66 | - A single T1 router and segment will be added, which will be used to host Opsman, Bosh, TAS.
67 |
68 | ## Instructions
69 | In addition to the base instructions you will need to export the NSX-T license key as a variable called `NSXT_LICENSE_KEY`. E.g.
70 | ```
71 | export NSXT_LICENSE_KEY=AAAAA-BBBBB-CCCCC-DDDDD-EEEEE
72 | ```
73 | You can now use the run command from the base instructions pointing to your updated nsxt vars file.
74 |
75 | ## IP Assignment on opinionated deployment
76 |
77 | vCenter = `hosting_network.base.starting_addr`
78 | NSX Manager = `hosting_network.base.starting_addr + 1`
79 | Router uplink = `hosting_network.base.starting_addr + 3`
80 | First ESXi host = `hosting_network.base.starting_addr + 8`
81 | Opsman = `opinionated.tas.routeable_super_net + 2`
82 |
83 | ## Troubleshooting
84 | - Creation of the first VLAN segments can take some time whilst the Transport Zones are configured. If this fails on the first attempt, retry the playbook and open an issue to report the wait time needs increasing.
85 | - A number of Ansible for NSX-T modules are not properly idempotent and report changed even though no change has been made.
--------------------------------------------------------------------------------
/var-examples/tanzu/application-service/opinionated-not-nested.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SOFTWARE_DIR must contain all required software
3 | opsman_ova: "{{ lookup('env', 'SOFTWARE_DIR') }}/ops-manager-vsphere-3.0.25+LTS-T.ova"
4 | tas_tile: "{{ lookup('env', 'SOFTWARE_DIR') }}/srt-4.0.20-build.2.pivotal"
5 | tas_stemcell: "{{ lookup('env', 'SOFTWARE_DIR') }}/bosh-stemcell-1.423-vsphere-esxi-ubuntu-jammy-go_agent.tgz"
6 |
7 | environment_tag: "tas-direct" # Used to prepend object names in hosting vCenter
8 | dns_server: "192.168.0.1"
9 | dns_domain: "home.local"
10 | ntp_server_ip: "192.168.0.1" # Must be set to an IP address!
11 | disk_mode: thin # How all disks should be deployed
12 |
13 | hosting_vcenter: # This is the vCenter which will be the target for nested vCenters and ESXi hosts
14 | ip: "vcsa.lab"
15 | username: "{{ lookup('env', 'PARENT_VCENTER_USERNAME') }}"
16 | password: "{{ lookup('env', 'PARENT_VCENTER_PASSWORD') }}"
17 | datacenter: "Home" # Target for all VM deployment
18 |
19 | # This section is only referenced by other variables in this file
20 | opinionated:
21 | master_password: "VMware1!"
22 | hosting_cluster: Physical
23 | hosting_datastore: NVME
24 | hosting_network:
25 | base:
26 | port_group: Nest
27 | cidr: "192.168.0.0/22"
28 | gateway: "192.168.0.1"
29 | # TAS requires XXX IP addresses!
30 | starting_addr: "192.168.1.210"
31 | tas:
32 | bosh_reserved_ip_ranges: "192.168.0.2-192.168.1.210,192.168.1.215-192.168.3.254"
33 | deployment_reserved_ip_ranges: "192.168.0.2-192.168.1.215,192.168.1.239-192.168.3.254"
34 |
35 | ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC/qMIbcfUkClzrBi7QjUmtdfpjDlTSXsPcJsmdmezbem2SboQPujpFUGmzFXi5PqhPa3QOxvMn8oJpozlJYydDspFAAqX+0u8n0FrwMMn0ckwUtF4zxz/AnI8tUltjeN67M7mG+3W8ooRFeaY6nrjxEIXbkMOzSbeTaFcB9cDzRXcMmFiuoeDhUonEYbClrsPzLdzscw7Vz6BLMHwo9SBOqlaL25aG/DtZPB7jobQRh48q3RW+Hb29nH18biJNLiBDsRu1SJIzw7Dl+rNSX9Va56vX0cz5CDiTg9A69WgySDx7oZFZM3W6jyHCN0fQbw/OxE/2MemhfQbdJqgffA7zE0qx+/DVIoT5bBP7FPpQSxkMjC+AEAH4PPsDIP0KJH9do6+T/g/Mf6725IOaPSJyV0bAkz1T57T1ty19KdhvPVfjrMnCap+pLvD6Qd/lxzY9uFF4oOwWmo4xQ6hQXZ9oJUhrRsG2A5LF7XGXwOb8mHSG4InfUfjaov4gjLl5tN0=
36 |
37 | #####################################################################
38 | ### No need to edit below this line for an opinionated deployment ###
39 | #####################################################################
40 | tanzu_opsman_bosh:
41 | vm_name: "{{ environment_tag }}-ops-manager"
42 | hostname: "{{ environment_tag }}-ops-manager"
43 | opsman_username: admin
44 | opsman_password: "{{ opinionated.master_password }}"
45 | ssh_public_key: "{{ opinionated.ssh_public_key }}"
46 |
47 | opsman_ip: "{{ opinionated.hosting_network.base.starting_addr }}" #| ansible.utils.ipmath(2)
48 | netmask: "{{ opinionated.hosting_network.base.cidr | ansible.utils.ipaddr('netmask') }}"
49 | gateway: "{{ opinionated.hosting_network.base.gateway }}"
50 | network: "{{ opinionated.hosting_network.base.port_group }}"
51 | dns: "{{ dns_server }}"
52 | ntp: "{{ ntp_server_ip }}"
53 |
54 | bosh_template_file: p-bosh.yml
55 |
56 | bosh_networks:
57 | - name: infra
58 | subnets:
59 | - iaas_identifier: "{{ opinionated.hosting_network.base.port_group }}"
60 | cidr: "{{ opinionated.hosting_network.base.cidr }}"
61 | dns: "{{ dns_server }}"
62 | gateway: "{{ opinionated.hosting_network.base.gateway }}"
63 | reserved_ip_ranges: >-
64 | {{ opinionated.hosting_network.base.gateway }}-{{ opinionated.hosting_network.base.starting_addr }},{{
65 | opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(6) }}-{{
66 | opinionated.hosting_network.base.cidr | ansible.utils.ipaddr('-2') | ansible.utils.ipaddr('address') }}
67 | availability_zone_names:
68 | - az1
69 | - name: tas-deployment
70 | subnets:
71 | - iaas_identifier: "{{ opinionated.hosting_network.base.port_group }}"
72 | cidr: "{{ opinionated.hosting_network.base.cidr }}"
73 | dns: "{{ dns_server }}"
74 | gateway: "{{ opinionated.hosting_network.base.gateway }}"
75 | # reserved_ip_ranges: "{{ opinionated.tas.deployment_reserved_ip_ranges }}"
76 | reserved_ip_ranges: >-
77 | {{ opinionated.hosting_network.base.gateway }}-{{ opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(5) }},{{
78 | opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(20) }}-{{
79 | opinionated.hosting_network.base.cidr | ansible.utils.ipaddr('-2') | ansible.utils.ipaddr('address') }}
80 | availability_zone_names:
81 | - az1
82 |
83 | vcenter_url: "{{ hosting_vcenter.ip }}"
84 | vcenter_cluster: "{{ opinionated.hosting_cluster }}"
85 | vcenter_datacenter: "{{ hosting_vcenter.datacenter }}"
86 | vcenter_datastore: "{{ opinionated.hosting_datastore }}"
87 | vcenter_folder: "/{{ hosting_vcenter.datacenter }}/vm"
88 | vcenter_username: "{{ hosting_vcenter.username }}"
89 | vcenter_password: "{{ hosting_vcenter.password }}"
90 |
91 | tanzu_application_service:
92 | tas_template_file: tas-nsx.yml
93 | # install_nsx_tile: true
94 | deployment_network: tas-deployment
95 | apps_domain: >-
96 | apps.{{ opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(19) }}.nip.io
97 | sys_domain: >-
98 | sys.{{ opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(19) }}.nip.io
99 | uaa_domain: >-
100 | login.sys.{{ opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(19) }}.nip.io
101 | credhub_key: "{{ opinionated.master_password }}{{ opinionated.master_password }}{{ opinionated.master_password }}{{ opinionated.master_password }}"
102 | gorouter_ip: "{{ opinionated.hosting_network.base.starting_addr | ansible.utils.ipmath(19) }}"
103 |
--------------------------------------------------------------------------------
/var-examples/tanzu/integrated-nsxt/README.md:
--------------------------------------------------------------------------------
1 | # Tanzu Kubernetes Grid Integrated with NSX-T
2 |
3 | This pattern will deploy NSX-T and TKGi.
4 |
5 | ## Tested Versions
6 | - TKGi 1.13 + NSX-T 3.1.3
7 | - TKGi 1.16 + NSX-T 3.2.3
8 | - vSphere 7.0 U3k
9 |
10 | ## Dependencies
11 | - The NSX-T OVA, Opsman OVA, Harbor tile and TKGi tile must must be added to your software directory and the filenames updated in the vars file.
12 | - You need a valid NSX-T license.
13 | - On top of the standard routed network, you need a port group to use for the overlay, which does not need to be routable.
14 | - During the deployment you will need to add a static route to the T0 gateway uplink for any addresses that will be behind NSX-T.
15 | - The OM CLI is used for all TKGi related tasks. This is included in v8+ of the docker image.
16 |
17 | ## Architecture
18 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
19 |
20 | ```mermaid
21 | flowchart LR
22 | router_net("Routed\nNetwork")
23 | esxi_host["Physical\nESXi Host"]
24 | base_pg("Base Port Group\n(Routed)")
25 | tep_pg("TEP Port Group\n(Private)")
26 | nested_host["Nested\nESXi Host"]
27 | vcenter["vCenter"]
28 | nsx_mgr[NSX Manager]
29 | base_vss("VM network\nStandard Switch")
30 | nsx_vds(NSX Overlay\nSwitch)
31 | nsx_edge[NSX Edge]
32 | tkgi_vms[TKGi VMs]
33 |
34 | base_vss & nsx_vds --- nsx_edge
35 | router_net --- esxi_host
36 | esxi_host --- base_pg & tep_pg
37 | base_pg -- ESXi MGMT\n&\nVM Network ---- nested_host
38 | tep_pg -- NSX\nOverlay --- nested_host
39 | base_pg --- vcenter & nsx_mgr
40 | nested_host --- base_vss & nsx_vds
41 | nsx_vds --- tkgi_vms
42 |
43 | linkStyle 2,4,8,10 stroke:#f00
44 |
45 | style router_net fill:#aaa
46 | style base_pg fill:#aaa
47 | style tep_pg fill:#aaa
48 | style base_vss fill:#aaa
49 | style nsx_vds fill:#aaa
50 | style esxi_host fill:#0ff
51 | style nested_host fill:#0c0
52 | style vcenter fill:#0c0
53 | style nsx_mgr fill:#0c0
54 | style nsx_edge fill:#FBCEB1
55 | style tkgi_vms fill:#FBCEB1
56 | ```
57 |
58 |
59 | - The NSX-T Manager VM will be deployed as a standard VM on your physical host.
60 | - A single vCenter will be added and attached to the physical host.
61 | - All components will be added to a single nested ESXi host. This can be customized by editing the yaml.
62 | - A single T0 gateway will be deployed and the T0 uplink will share the same network as the management interfaces in vmnic0
63 | - If you want to have more that 1 nested host, then your TEP network should be set to MTU of at least 1600 to allow the nested ESXi hosts to communicate and you must switch to using NFS shared storage as per [this example](../../base-vsphere/minimal-opinionated-nfs.yml).
64 | - During setup you will be prompted to add a static route to the T0 uplink when created.
65 | - A single T1 router and segment will be added, which will be used to host Opsman, Bosh, TKGi API/DB and Harbor.
66 |
67 | ## Instructions
68 | In addition to the base instructions you will need to export the NSX-T license key as a variable called `NSXT_LICENSE_KEY`. E.g.
69 | ```
70 | export NSXT_LICENSE_KEY=AAAAA-BBBBB-CCCCC-DDDDD-EEEEE
71 | ```
72 | You can now use the run command from the base instructions pointing to your updated nsxt vars file.
73 |
74 | ## IP Assignment on opinionated deployment
75 |
76 | vCenter = `hosting_network.base.starting_addr`
77 | NSX Manager = `hosting_network.base.starting_addr + 1`
78 | Router uplink = `hosting_network.base.starting_addr + 3`
79 | First ESXi host = `hosting_network.base.starting_addr + 8`
80 | Opsman = `opinionated.tkgi.routeable_super_net + 2`
81 | Opsman = `opinionated.tkgi.routeable_super_net + 15`
82 |
83 | ## Troubleshooting
84 | - Creation of the first VLAN segments can take some time whilst the Transport Zones are configured.
85 | - A number of Ansible for NSX-T modules are not properly idempotent and report changed even though no change has been made.
--------------------------------------------------------------------------------
/var-examples/tanzu/multi-cloud/README.md:
--------------------------------------------------------------------------------
1 | # Tanzu Multi-Cloud with Avi Load Balancer
2 | This will deploy using the standard 2 network topology
3 |
4 | ## Tested Versions
5 | - NSX ALB Controller 22.1.4
6 | - Tanzu Kubernetes Grid 2.4.0
7 |
8 | ## Additional Dependencies
9 | In addition to the base dependencies, you will need to download and store the NSX-ALB OVA file in your software directory:
10 | - [Tanzu download page](https://my.vmware.com/en/group/vmware/downloads/info/slug/infrastructure_operations_management/vmware_tanzu_kubernetes_grid/1_x)
11 |
12 | ## Architecture Nested
13 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
14 |
15 | ```mermaid
16 | flowchart LR
17 | router_net("Routed\nNetwork")
18 | esxi_host["Physical\nESXi Host"]
19 | base_pg("Base\nPort Group")
20 | trunk_pg("Trunk\nPort Group")
21 | nested_host["Nested\nESXi Host"]
22 | vcenter["vCenter"]
23 | nsx_alb_cont["NSX-ALB\nControllers"]
24 | base_vss("VM network\nStandard Switch")
25 | trunk_vds("Trunk\nDistributed Switch")
26 | nsx_seg["NSX-ALB\nSE Group"]
27 | tkg_vms["TKG VMs"]
28 |
29 | router_net --- esxi_host
30 | esxi_host --- base_pg & trunk_pg
31 | base_pg -- ESXi MGMT\n&\nVM Network ---- nested_host
32 | trunk_pg -- "Trunked Node\n& VIP VLANs" --- nested_host
33 | base_pg --- vcenter & nsx_alb_cont
34 | nested_host --- base_vss & trunk_vds
35 | base_vss & trunk_vds --- nsx_seg
36 | trunk_vds --- tkg_vms
37 |
38 | linkStyle 2,4,8,10,11 stroke:#00f
39 |
40 | style router_net fill:#aaa
41 | style base_pg fill:#aaa
42 | style trunk_pg fill:#aaa
43 | style base_vss fill:#aaa
44 | style trunk_vds fill:#aaa
45 | style esxi_host fill:#0ff
46 | style nested_host fill:#0c0
47 | style vcenter fill:#0c0
48 | style nsx_alb_cont fill:#0c0
49 | style nsx_seg fill:#FBCEB1
50 | style tkg_vms fill:#FBCEB1
51 | ```
52 |
53 |
54 |
55 | - A single vCenter will be added.
56 | - 2 networks are required.
57 | - The base network must be a standard port group, where VMs can attach. This will appear as `vm-network` in the nested cluster.
58 | - The workload network can be on a standard port group or a trunk port group, where the nested host will add a VLAN tag. This will appear as `workload-pg` in the nested cluster.
59 | - DHCP must be setup on the workload network.
60 | - NSX_ALB Controllers and Service Engine management interfaces will be added to `vm-network` on the 2nd and 3rd IP after the starting address.
61 |
62 | ## Architecture Not Nested
63 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
64 |
65 | ```mermaid
66 | flowchart LR
67 | router_net("Routed\nNetwork")
68 | esxi_host["Physical\nESXi Host"]
69 | base_pg("Base\nPort Group")
70 | nsx_alb_cont["NSX-ALB\nControllers"]
71 | nsx_seg["NSX-ALB\nSE Group"]
72 | tkg_vms["TKG VMs"]
73 |
74 | router_net --- esxi_host
75 | esxi_host --- base_pg
76 | base_pg --- nsx_alb_cont & nsx_seg & tkg_vms
77 |
78 |
79 | style router_net fill:#aaa
80 | style base_pg fill:#aaa
81 | style esxi_host fill:#0ff
82 | style nsx_alb_cont fill:#0c0
83 | style nsx_seg fill:#FBCEB1
84 | style tkg_vms fill:#FBCEB1
85 | ```
86 |
87 |
88 |
89 | - 1 network is required.
90 | - The base network must be a standard/distributed port group, where VMs can attach.
91 | - DHCP must be setup on the workload network to use the default TKGM management cluster yaml, although it can be modified to use node IPAM
92 | - NSX_ALB Controllers and Service Engine management interfaces will be added to `vm-network` on the 2nd and 3rd IP after the starting address.
93 |
94 | ## Instructions
95 | In addition to the base instructions you will need to export the NSX-ALB (Avi) default password, which can be found on the Controller Ova download page.
96 | ```
97 | export AVI_DEFAULT_PASSWORD=#######
98 | ```
99 | You can now use the run command from the base instructions pointing to your updated vars file.
100 |
101 | ## IP Assignment on opinionated deployment
102 |
103 | vCenter = `hosting_network.base.starting_addr`
104 | Avi Controller = `hosting_network.base.starting_addr + 1`
105 | First ESXi host = `hosting_network.base.starting_addr + 4`
106 |
107 | ## Known Issues
108 | - Creation of the first VLAN segments can take some time whilst the Transport Zones are configured.
109 | - A number of Ansible for NSX-T modules are not properly idempotent and report changed even though no change has been made.
--------------------------------------------------------------------------------
/var-examples/tanzu/vsphere-nsxt/README.md:
--------------------------------------------------------------------------------
1 | # Tanzu Kubernetes for vSphere with NSX-T Networking
2 | This will deploy NSX-T and enable Workload Management, aka deploy the Supervisor Cluster.
3 |
4 | ## Tested Versions
5 | - NSX-T 3.1.3, 3.2.3, 4.0.1
6 | - NSX-T 4.1 is not currently supported due to breaking changes in the transport_zone module
7 | - vSphere 7.0 U3c and 8.0 U1 (with NSX 4.0.1)
8 |
9 | ## Dependencies
10 | - The NSX-T ISO must be added to your software directory and the filename updated in the vars file.
11 | - You need a valid NSX-T license.
12 | - On top of the standard routed network, you need a port group to use for the overlay, which does not need to be routable.
13 | - After the deployment you will need to add a static route to the T0 gateway uplink for any addresses that will be behind NSX-T.
14 |
15 | ## Architecture
16 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
17 |
18 | ```mermaid
19 | flowchart LR
20 | router_net("Routed\nNetwork")
21 | esxi_host["Physical\nESXi Host"]
22 | base_pg("Base Port Group\n(Routed)")
23 | tep_pg("TEP Port Group\n(Private)")
24 | nested_host["Nested\nESXi Host"]
25 | vcenter["vCenter"]
26 | nsx_mgr[NSX Manager]
27 | base_vss("VM network\nStandard Switch")
28 | nsx_vds(NSX Overlay\nSwitch)
29 | nsx_edge[NSX Edge]
30 | supervisor[Supervisor\nCluster]
31 |
32 | router_net --- esxi_host
33 | esxi_host --- base_pg & tep_pg
34 | base_pg -- ESXi MGMT\n&\nVM Network ---- nested_host
35 | tep_pg -- NSX\nOverlay --- nested_host
36 | base_pg --- vcenter & nsx_mgr
37 | nested_host --- base_vss & nsx_vds
38 | base_vss & nsx_vds --- nsx_edge
39 | nsx_vds ---supervisor
40 |
41 | linkStyle 2,4,8,10 stroke:#f00
42 |
43 | style router_net fill:#aaa
44 | style base_pg fill:#aaa
45 | style tep_pg fill:#aaa
46 | style base_vss fill:#aaa
47 | style nsx_vds fill:#aaa
48 | style esxi_host fill:#0ff
49 | style nested_host fill:#0c0
50 | style vcenter fill:#0c0
51 | style nsx_mgr fill:#0c0
52 | style nsx_edge fill:#FBCEB1
53 | style supervisor fill:#FBCEB1
54 | ```
55 |
56 |
57 | - The NSX-T Manager VM will be deployed as a standard VM on your physical host.
58 | - A single vCenter will be added and attached to the physical host.
59 | - All components will be added to a single nested ESXi host. This can be customized by editing the yaml.
60 | - A single T0 gateway will be deployed and the T0 uplink will share the same network as the management interfaces in vmnic0
61 | - If you want to have more that 1 nested host, then your TEP network should be set to MTU of at least 1600 to allow the nested ESXi hosts to communicate and you must switch to using NFS shared storage as per [this example](../../base-vsphere/minimal-opinionated-nfs.yml).
62 | - During setup you will be prompted to add a static route to the T0 uplink when created.
63 | - A single T1 router and segment will be added, which will be used to host the supervisor cluster.
64 |
65 | ## Instructions
66 | In addition to the base instructions you will need to export the NSX-T license key as a variable called `NSXT_LICENSE_KEY`. E.g.
67 | ```
68 | export NSXT_LICENSE_KEY=AAAAA-BBBBB-CCCCC-DDDDD-EEEEE
69 | ```
70 | You can now use the run command from the base instructions pointing to your updated nsxt vars file.
71 |
72 | ## IP Assignment on opinionated deployment
73 |
74 | vCenter = `hosting_network.base.starting_addr`
75 | NSX Manager = `hosting_network.base.starting_addr + 1`
76 | router uplink = `hosting_network.base.starting_addr + 3`
77 | First ESXi host = `hosting_network.base.starting_addr + 8`
78 |
79 | ## Troubleshooting
80 | - During Supervisor Cluster setup the API will return errors for an extended period. The module will accept up to 150 seconds of errors, if the playbook ends with an error, check the UI to see if the action is progressing.
81 | - Creation of the first VLAN segments can take some time whilst the Transport Zones are configured.
82 | - A number of Ansible for NSX-T modules are not properly idempotent and report changed even though no change has been made.
--------------------------------------------------------------------------------
/var-examples/tanzu/vsphere-vds-alb/README.md:
--------------------------------------------------------------------------------
1 | # Tanzu Kubernetes for vSphere with Distributed Switching
2 | This will deploy using the standard 2 network topology
3 |
4 | ## Tested Versions
5 | - vSphere 7.0 U3i and 8.0 U1
6 | - NSX ALB Controller 22.1.5
7 |
8 | # Dependencies
9 | In addition to the base dependencies, you will need to download and store the NSX-ALB OVA file in your software directory:
10 | - [Tanzu download page](https://my.vmware.com/en/group/vmware/downloads/info/slug/infrastructure_operations_management/vmware_tanzu_kubernetes_grid/1_x)
11 |
12 |
13 | ## Architecture
14 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
15 |
16 | ```mermaid
17 | flowchart LR
18 | router_net("Routed\nNetwork")
19 | esxi_host["Physical\nESXi Host"]
20 | base_pg("Base\nPort Group")
21 | trunk_pg("Trunk\nPort Group")
22 | nested_host["Nested\nESXi Host"]
23 | vcenter["vCenter"]
24 | nsx_alb_cont[NSX-ALB Controllers]
25 | base_vss("VM network\nStandard Switch")
26 | trunk_vds("Trunk\nDistributed Switch")
27 | nsx_seg["NSX-ALB\nSE Group"]
28 | tkg_vms["TKG VMs"]
29 |
30 | router_net --- esxi_host
31 | esxi_host --- base_pg & trunk_pg
32 | base_pg -- ESXi MGMT\n&\nVM Network ---- nested_host
33 | trunk_pg -- Trunked Node\n& VIP VLANs --- nested_host
34 | base_pg --- vcenter & nsx_alb_cont
35 | nested_host --- base_vss & trunk_vds
36 | base_vss & trunk_vds --- nsx_seg
37 | trunk_vds --- tkg_vms
38 |
39 | linkStyle 2,4,8,10,11 stroke:#00f
40 |
41 | style router_net fill:#aaa
42 | style base_pg fill:#aaa
43 | style trunk_pg fill:#aaa
44 | style base_vss fill:#aaa
45 | style trunk_vds fill:#aaa
46 | style esxi_host fill:#0ff
47 | style nested_host fill:#0c0
48 | style vcenter fill:#0c0
49 | style nsx_alb_cont fill:#0c0
50 | style nsx_seg fill:#FBCEB1
51 | style tkg_vms fill:#FBCEB1
52 | ```
53 |
54 |
55 |
56 | - A single vCenter will be added.
57 | - 2 networks are required.
58 | - The base network must be a standard port group, where VMs can attach. This will appear as `vm-network` in the nested cluster.
59 | - The workload network can be on a standard port group or a trunk port group, where the nested host will add a VLAN tag. This will appear as `workload-pg` in the nested cluster.
60 | - NSX_ALB Controllers and Service Engine management interfaces will be added to `vm-network` on the 2nd and 3rd IP after the starting address.
61 |
62 | # IP Assignment on opinionated deployment
63 |
64 | vCenter = `hosting_network.base.starting_addr`
65 | Avi Controller = `hosting_network.base.starting_addr + 1`
66 | first ESXi host = `hosting_network.base.starting_addr + 8`
67 |
68 | # Troubleshooting
69 | - During creation the API will return errors for an extended period. The module will accept up to 150 seconds of errors, if the playbook ends with an error, check the UI to see if the action is progressing.
70 |
71 | # Roadmap
72 | - Add multi host option
73 | - Add functionality to check and apply updates
74 | - Add ability to shrink to 2 supervisors
75 | https://www.virtuallyghetto.com/2020/04/deploying-a-minimal-vsphere-with-kubernetes-environment.html
76 |
--------------------------------------------------------------------------------
/var-examples/tanzu/vsphere-vds-haproxy/README.md:
--------------------------------------------------------------------------------
1 | # Tanzu Kubernetes for vSphere with Distributed Switching
2 | This will deploy using the standard 2 network topology
3 |
4 | # Dependencies
5 | In addition the base dependencies, the following files need to be downloaded and stored in the software directory:
6 | - [VMware HA Proxy OVA](https://github.com/haproxytech/vmware-haproxy/releases/tag/v0.1.8)
7 |
8 | ## Architecture
9 | Below is the layout of the opinionated deployment, which can be customized by editing the vars file.
10 |
11 |
12 |
13 | ```mermaid
14 | flowchart LR
15 | router_net("Routed\nNetwork")
16 | esxi_host["Physical\nESXi Host"]
17 | base_pg("Base\nPort Group")
18 | trunk_pg("Trunk\nPort Group")
19 | nested_host["Nested\nESXi Host"]
20 | vcenter["vCenter"]
21 | base_vss("VM network\nStandard Switch")
22 | trunk_vds("Trunk\nDistributed Switch")
23 | nsx_seg["Haproxy\nVM"]
24 | tkg_vms["TKG VMs"]
25 |
26 | router_net --- esxi_host
27 | esxi_host --- base_pg & trunk_pg
28 | base_pg -- ESXi MGMT\n&\nVM Network ---- nested_host
29 | trunk_pg -- Trunked Node\n& VIP VLANs --- nested_host
30 | nested_host --- base_vss & trunk_vds
31 | base_vss & trunk_vds --- nsx_seg
32 | trunk_vds --- tkg_vms
33 |
34 | linkStyle 2,4,6,8,9 stroke:#00f
35 |
36 | style router_net fill:#aaa
37 | style base_pg fill:#aaa
38 | style trunk_pg fill:#aaa
39 | style base_vss fill:#aaa
40 | style trunk_vds fill:#aaa
41 | style esxi_host fill:#0ff
42 | style nested_host fill:#0c0
43 | style vcenter fill:#0c0
44 | style nsx_seg fill:#FBCEB1
45 | style tkg_vms fill:#FBCEB1
46 | ```
47 |
48 |
49 |
50 | - A single vCenter will be added.
51 | - 2 networks are required.
52 | - The base network must be a standard port group, where VMs can attach. This will appear as `vm-network` in the nested cluster.
53 | - The workload network can be on a standard port group or a trunk port group, where the nested host will add a VLAN tag. This will appear as `workload-pg` in the nested cluster.
54 | - The Haproxy management interface will be added to `vm-network` on the 2nd and 3rd IP after the starting address.
55 |
56 |
57 | # Routing
58 | By default a router will be created to bridge the workload and management networks. To be able to access resources deployed, you will need to add a static route to the router uplink which is the next IP after `hosting_network.base.starting_addr`
59 | If you want to provide your own routing, you can remove the `router` section under `tkgs` and then setup your own routing for the network defined in `tkgs_workload_cidr`.
60 |
61 | # IP Assignment on opinionated deployment
62 |
63 | vCenter = `hosting_network.base.starting_addr`
64 | router uplink = `hosting_network.base.starting_addr + 1`
65 | first ESXi host = `hosting_network.base.starting_addr + 8`
66 |
67 | # Troubleshooting
68 | - During creation the API will return errors for an extended period. The module will accept up to 150 seconds of errors, if the playbook ends with an error, check the UI to see if the action is progressing.
69 |
70 | # Roadmap
71 | - Add multi host option
72 | - Add functionality to check and apply updates
73 | - Add ability to shrink to 2 supervisors
74 | https://www.virtuallyghetto.com/2020/04/deploying-a-minimal-vsphere-with-kubernetes-environment.html
75 |
--------------------------------------------------------------------------------
/var-examples/vsphere-community-testing/wip.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # SOFTWARE_DIR must contain all required software
3 | vc_iso: "{{ lookup('env', 'SOFTWARE_DIR') }}/VMware-VCSA-all-7.0.1-17004997.iso"
4 | esxi_ova: "{{ lookup('env', 'SOFTWARE_DIR') }}/Nested_ESXi7.0.1_Appliance_Template_v1.ova"
5 |
6 | environment_tag: "vsphere-community" # Used to prepend object names in hosting vCenter
7 | dns_server: "192.168.0.110"
8 | domain: "home.local"
9 | ntp_server_ip: time1.google.com
10 | disk_mode: thin # How all disks should be deployed
11 | nested_host_password: "VMware1!"
12 |
13 | hosting_vcenter: # This is the vCenter which will be the target for nested vCenters and ESXi hosts
14 | ip: "vcsa.lab"
15 | user: "{{ lookup('env', 'PARENT_VCENTER_USERNAME') }}"
16 | password: "{{ lookup('env', 'PARENT_VCENTER_PASSWORD') }}"
17 | datacenter: "Home" # Target for all VM deployment
18 |
19 | nested_vcenter: # the vCenter appliance that will be deployed
20 | ip: "192.168.0.131" # vCenter ip address
21 | mask: "22"
22 | gw: "192.168.0.1"
23 | host_name: "192.168.0.131" # FQDN if there is working DNS server, otherwise put the ip as a name
24 | user: "administrator@vsphere.local"
25 | password: "VMware1!"
26 | datacenter: "Lab" # DC to create after deployment
27 | # Below are properties of parent cluster
28 | hosting_network: "Net1" # Parent port group where the vCenter VM will be deployed
29 | hosting_cluster: "Physical" # Parent cluster where the vCenter VM will be deployed
30 | hosting_datastore: "NVME" # Parent datastore where the vCenter VM will be deployed
31 |
32 | nested_clusters: # You can add clusters in this section by duplicating the existing cluster
33 | vsphere-community: # This will be the name of the cluster in the nested vCenter. Below are the minimum settings.
34 | # Below are properties of the hosting cluster
35 | hosting_cluster: "Physical" # The nested ESXi VMs will be deployed here
36 | hosting_datastore: "NVME" # Datastore target for nested ESXi VMs
37 | # Settings below are assigned to each host in the cluster
38 | cpu_cores: 6 # CPU count
39 | ram_in_gb: 16 # memory
40 | # In vmnic order, these port groups must exist on the physical host
41 | # Must specify at least 2 port groups, up to a maximum of 10
42 | vmnic_physical_portgroup_assignment:
43 | - name: "Nest"
44 | - name: "Nest"
45 |
46 | # You can add nested ESXi hosts below
47 | nested_hosts:
48 | esx1: # environment_tag will prepend the name to create the VM name
49 | ip: 192.168.0.132 # This will also be used as the hostname in the nested vCenter
50 | mask: 255.255.252.0
51 | gw: 192.168.0.1
52 | nested_cluster: vsphere-community # the nested vcenter cluster, which is defined in the clusters section.
53 | esx2: # environment_tag will prepend the name to create the VM name
54 | ip: 192.168.0.133 # This will also be used as the hostname in the nested vCenter
55 | mask: 255.255.252.0
56 | gw: 192.168.0.1
57 | nested_cluster: vsphere-community # the nested vcenter cluster, which is defined in the clusters section.
58 |
--------------------------------------------------------------------------------