├── .gitignore
├── docker_image
├── .dockerignore
├── .gitattributes
├── nsx-t-install-250.tar
├── Dockerfile
└── run.sh
├── .lfsconfig
├── .gitattributes
├── tasks
├── .DS_Store
├── config-nsx-t-extras
│ ├── .DS_Store
│ ├── task.sh
│ ├── task.yml
│ └── client.py
├── uninstall-nsx-t
│ ├── task.sh
│ └── task.yml
├── add-nsx-t-routers
│ ├── task.yml
│ └── task.sh
└── install-nsx-t
│ ├── task.yml
│ ├── copy_and_customize_ovas.sh
│ ├── task.sh
│ ├── modify_options.py
│ ├── turn_off_reservation.py
│ └── get_mo_ref_id.py
├── sample_parameters
├── raw
│ ├── combine_param_files.sh
│ ├── pks.yml
│ ├── nsx.yml
│ ├── pas.yml
│ └── pas_pks.yml
├── PKS_only
│ └── nsx_pipeline_config.yml
├── PAS_only
│ └── nsx_pipeline_config.yml
└── PAS_and_PKS
│ └── nsx_pipeline_config.yml
├── NOTICE
├── functions
├── generate-keys.sh
├── copy_ovas.sh
├── set_default_params.py
└── create_hosts.sh
├── LICENSE
├── nsxt_yaml
├── vars.yml
├── basic_resources.yml
└── basic_topology.yml
├── doc
├── Utilities-and-troubleshooting.md
├── Homepage.md
├── Network-prerequisites.md
├── Deployment.md
└── Parameter-file.md
├── docker_compose
└── docker-compose.yml
├── CONTRIBUTING.md
├── README.md
└── pipelines
└── nsx-t-install.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | .DS_Store
2 | .idea/
3 |
--------------------------------------------------------------------------------
/docker_image/.dockerignore:
--------------------------------------------------------------------------------
1 | .git
2 |
--------------------------------------------------------------------------------
/.lfsconfig:
--------------------------------------------------------------------------------
1 | [lfs]
2 | fetchexclude = "*"
3 |
--------------------------------------------------------------------------------
/docker_image/.gitattributes:
--------------------------------------------------------------------------------
1 | *.tar filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | docker_image/nsx-t-install-250.tar filter=lfs diff=lfs merge=lfs -text
2 |
--------------------------------------------------------------------------------
/tasks/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vmware/nsx-t-datacenter-ci-pipelines/HEAD/tasks/.DS_Store
--------------------------------------------------------------------------------
/tasks/config-nsx-t-extras/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vmware/nsx-t-datacenter-ci-pipelines/HEAD/tasks/config-nsx-t-extras/.DS_Store
--------------------------------------------------------------------------------
/docker_image/nsx-t-install-250.tar:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:7d5b77cbaf4687db6d9f8bb8623f756cf2436d2f4f6e138dbce8b9e5d42139ff
3 | size 730155008
4 |
--------------------------------------------------------------------------------
/tasks/uninstall-nsx-t/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 | export ROOT_DIR=`pwd`
5 |
6 | export TASKS_DIR=$(dirname $BASH_SOURCE)
7 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
8 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
9 |
10 | echo "Unimplemented !!"
11 |
12 | STATUS=$?
13 | popd >/dev/null 2>&1
14 |
15 | exit $STATUS
16 |
--------------------------------------------------------------------------------
/tasks/add-nsx-t-routers/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source:
8 | repository: projects.registry.vmware.com/nsxt_gen_pipeline/nsx-t-gen-worker
9 | tag: py3
10 |
11 | inputs:
12 | - name: nsx-t-gen-pipeline
13 | - name: nsxt-ansible
14 |
15 | run:
16 | path: nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.sh
17 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source:
8 | repository: projects.registry.vmware.com/nsxt_gen_pipeline/nsx-t-gen-worker
9 | tag: py3
10 |
11 | inputs:
12 | - name: nsx-t-gen-pipeline
13 | - name: nsxt-ansible
14 | - name: ovftool
15 |
16 | run:
17 | path: nsx-t-gen-pipeline/tasks/install-nsx-t/task.sh
18 |
--------------------------------------------------------------------------------
/tasks/config-nsx-t-extras/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -e
3 |
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 | export SCRIPT_DIR=$(dirname $0)
11 |
12 | python $TASKS_DIR/nsx_t_gen.py --router_config true --generate_cert false
13 |
14 | STATUS=$?
15 |
16 | exit $STATUS
17 |
--------------------------------------------------------------------------------
/sample_parameters/raw/combine_param_files.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | nsx_yaml="nsx.yml"
3 | nsx_yaml_tmp="nsx.yml.tmp"
4 | param_file="nsx_pipeline_config.yml"
5 | cp $nsx_yaml $nsx_yaml_tmp
6 | echo "" >> $nsx_yaml_tmp
7 | echo "" >> $nsx_yaml_tmp
8 | cat $nsx_yaml_tmp pks.yml > ../PKS_only/${param_file}
9 | cat $nsx_yaml_tmp pas.yml > ../PAS_only/${param_file}
10 | cat $nsx_yaml_tmp pas_pks.yml > ../PAS_and_PKS/${param_file}
11 | rm $nsx_yaml_tmp
12 |
--------------------------------------------------------------------------------
/tasks/uninstall-nsx-t/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source:
8 | repository: projects.registry.vmware.com/nsxt_gen_pipeline/nsx-t-gen-worker
9 | tag: py3
10 |
11 | # params:
12 | # VCENTER_HOST:
13 | # VCENTER_USR:
14 | # VCENTER_PWD:
15 |
16 |
17 | inputs:
18 | - name: nsx-t-gen-pipeline
19 |
20 | run:
21 | path: nsx-t-gen-pipeline/tasks/uninstall-nsx-t/task.sh
22 |
23 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | NSX-T-Data-Center-CI-Pipeline-with-Concourse
2 |
3 | Copyright (c) 2018 VMware, Inc.
4 |
5 | This product is licensed to you under the MIT license (the "License"). You may not use this product except in compliance with the MIT License.
6 |
7 | This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file.
8 |
9 |
--------------------------------------------------------------------------------
/docker_image/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:18.04
2 | ADD run.sh /home/run.sh
3 |
4 | # https://docs.docker.com/compose/install/#install-compose
5 | RUN apt-get update && \
6 | apt-get install -y vim curl openssh-client git wget python && \
7 | curl -sSL https://get.docker.com/ | sh && \
8 | curl -L --fail https://github.com/docker/compose/releases/download/1.23.2/run.sh -o /usr/local/bin/docker-compose && \
9 | chmod +x /usr/local/bin/docker-compose && \
10 | # download and install fly CLI
11 | wget -P /usr/local/bin/ https://github.com/concourse/concourse/releases/download/v5.7.0/fly-5.7.0-linux-amd64.tgz && \
12 | tar -C /usr/local/bin/ -xvf /usr/local/bin/fly-5.7.0-linux-amd64.tgz && \
13 | chmod +x /usr/local/bin/fly
14 |
15 | ENTRYPOINT ["/home/run.sh"]
16 |
--------------------------------------------------------------------------------
/functions/generate-keys.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e -u -x
4 |
5 | mkdir -p keys/web keys/worker
6 |
7 | openssl genpkey -algorithm RSA -out ./keys/web/tsa_host_key -pkeyopt rsa_keygen_bits:4096
8 | openssl genpkey -algorithm RSA -out ./keys/web/session_signing_key -pkeyopt rsa_keygen_bits:4096
9 | openssl genpkey -algorithm RSA -out ./keys/worker/worker_key -pkeyopt rsa_keygen_bits:4096
10 |
11 | chmod 600 ./keys/ -R
12 |
13 | ssh-keygen -y -f ./keys/web/tsa_host_key > ./keys/web/tsa_host_key.pub
14 | ssh-keygen -y -f ./keys/web/session_signing_key > ./keys/web/session_signing_key.pub
15 | ssh-keygen -y -f ./keys/worker/worker_key > ./keys/worker/worker_key.pub
16 |
17 | cp ./keys/worker/worker_key.pub ./keys/web/authorized_worker_keys
18 | cp ./keys/web/tsa_host_key.pub ./keys/worker
--------------------------------------------------------------------------------
/tasks/config-nsx-t-extras/task.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | platform: linux
4 |
5 | image_resource:
6 | type: docker-image
7 | source:
8 | repository: projects.registry.vmware.com/nsxt_gen_pipeline/nsx-t-gen-worker
9 | tag: py3
10 |
11 | params:
12 | NSX_T_INSTALLER:
13 | NSX_T_MANAGER_FQDN:
14 | NSX_T_MANAGER_HOST_NAME:
15 | NSX_T_MANAGER_IP:
16 | NSX_T_MANAGER_ADMIN_USER:
17 | NSX_T_MANAGER_ROOT_PWD:
18 | NSX_T_OVERLAY_TRANSPORT_ZONE:
19 | NSX_T_PAS_NCP_CLUSTER_TAG:
20 | NSX_T_T0ROUTER_SPEC:
21 | NSX_T_T1ROUTER_LOGICAL_SWITCHES_SPEC:
22 | NSX_T_HA_SWITCHING_PROFILE_SPEC:
23 | NSX_T_CONTAINER_IP_BLOCK_SPEC:
24 | NSX_T_EXTERNAL_IP_POOL_SPEC:
25 | NSX_T_NAT_RULES_SPEC:
26 | NSX_T_CSR_REQUEST_SPEC:
27 | NSX_T_LBR_SPEC:
28 |
29 | inputs:
30 | - name: nsx-t-gen-pipeline
31 | run:
32 | path: nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.sh
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | NSX-T-Data-Center-CI-Pipeline-with-Concourse
2 |
3 | Copyright (c) 2018 VMware, Inc.
4 |
5 | The MIT license (the “License”) set forth below applies to all parts of the NSX-T-Data-Center-CI-Pipeline-with-Concourse project. You may not use this file except in compliance with the License.
6 |
7 | MIT License
8 |
9 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
10 | so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
13 |
14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
15 |
16 |
17 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/copy_and_customize_ovas.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 |
11 | source $FUNCTIONS_DIR/copy_ovas.sh
12 |
13 | DEBUG=""
14 | if [ "$ENABLE_ANSIBLE_DEBUG" == "true" ]; then
15 | DEBUG="-vvv"
16 | fi
17 |
18 | NSX_T_MANAGER_OVA=$(ls $ROOT_DIR/nsx-mgr-ova)
19 | NSX_T_CONTROLLER_OVA=$(ls $ROOT_DIR/nsx-ctrl-ova)
20 | NSX_T_EDGE_OVA=$(ls $ROOT_DIR/nsx-edge-ova)
21 |
22 | cat > customize_ova_vars.yml <<-EOF
23 | ovftool_path: '/usr/bin'
24 | ova_file_path: "$OVA_ISO_PATH"
25 | nsx_manager_filename: "$NSX_T_MANAGER_OVA"
26 | nsx_controller_filename: "$NSX_T_CONTROLLER_OVA"
27 | nsx_gw_filename: "$NSX_T_EDGE_OVA"
28 |
29 | EOF
30 | cp customize_ova_vars.yml nsxt-ansible
31 |
32 | install_ovftool
33 | copy_ovsa_to_OVA_ISO_PATH
34 |
35 | cd nsxt-ansible
36 | ansible-playbook $DEBUG -i localhost customize_ovas.yml -e @customize_ova_vars.yml
37 | STATUS=$?
38 |
39 | echo ""
40 |
41 | # if [ -z "$SUPPORT_NSX_VMOTION" -o "$SUPPORT_NSX_VMOTION" == "false" ]; then
42 | # echo "Skipping vmks configuration for NSX-T Mgr!!"
43 | # echo 'configure_vmks: False' >> answerfile.yml
44 |
45 | # else
46 | # echo "Allowing vmks configuration for NSX-T Mgr!!"
47 | # echo 'configure_vmks: True' >> answerfile.yml
48 | # fi
49 |
50 | # echo ""
51 |
52 |
--------------------------------------------------------------------------------
/tasks/add-nsx-t-routers/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 |
11 | source $FUNCTIONS_DIR/create_hosts.sh
12 |
13 | DEBUG=""
14 | if [ "$enable_ansible_debug_int" == "true" ]; then
15 | DEBUG="-vvv"
16 | fi
17 |
18 | # Check if NSX MGR is up or not
19 | nsx_manager_ips=($(echo "$nsx_manager_ips_int" | sed -e 's/,/ /g'))
20 | manager_ip=${nsx_manager_ips[0]}
21 | nsx_mgr_up_status=$(curl -s -o /dev/null -I -w "%{http_code}" -k https://${manager_ip}:443/login.jsp || true)
22 |
23 | # Deploy the ovas if its not up
24 | if [ $nsx_mgr_up_status -ne 200 ]; then
25 | echo "NSX Mgr not up yet, please deploy the ovas before configuring routers!!"
26 | exit -1
27 | fi
28 |
29 | create_hosts
30 |
31 | cp ${PIPELINE_DIR}/nsxt_yaml/basic_resources.yml ${PIPELINE_DIR}/nsxt_yaml/vars.yml nsxt-ansible/
32 | cp hosts nsxt-ansible/hosts_file
33 | cd nsxt-ansible
34 |
35 | NO_OF_CONTROLLERS=$(curl -k -u "admin:$nsx_manager_password_int" \
36 | https://${manager_ip}/api/v1/cluster/nodes \
37 | | jq '.results[].controller_role.type' | wc -l )
38 | if [ "$NO_OF_CONTROLLERS" -lt 2 ]; then
39 | echo "NSX Mgr and controller not configured yet, please cleanup incomplete vms and rerun base install before configuring routers!!"
40 | exit -1
41 | fi
42 |
43 | # INFO: ansible errors without disabling host_key_checking
44 | # when obtaining thumbprints
45 | echo "[defaults]" > ansible.cfg
46 | echo "host_key_checking = false" >> ansible.cfg
47 | ansible-playbook $DEBUG -i hosts_file basic_resources.yml
48 | STATUS=$?
49 |
50 | exit $STATUS
--------------------------------------------------------------------------------
/functions/copy_ovas.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function install_ovftool {
4 |
5 | # Install provided ovftool
6 | if [ ! -e "/usr/bin/ovftool" ]; then
7 | pushd $ROOT_DIR/ovftool
8 | ovftool_bundle=$(ls *)
9 | chmod +x $ovftool_bundle
10 |
11 | size_of_tool=$(ls -al $ovftool_bundle | awk '{print $5}')
12 | if [ $size_of_tool -lt 10000000 ]; then
13 | echo "ovftool downloaded is lesser than 10 MB!!"
14 | echo "Check the file name/paths. Exiting from ova copy and deploy!!"
15 | exit 1
16 | fi
17 |
18 | is_binary=$(file $ovftool_bundle | grep "executable" || true)
19 | if [ "$is_binary" == "" ]; then
20 | echo "ovftool downloaded was not a valid binary image!!"
21 | echo "Check the file name/paths. Exiting from ova copy and deploy!!"
22 | exit 1
23 | fi
24 |
25 | ./${ovftool_bundle} --eulas-agreed
26 | popd
27 | echo "Done installing ovftool"
28 | else
29 | echo "ovftool already installed!!"
30 | fi
31 | echo ""
32 | }
33 |
34 | function check_ovas {
35 | # ova_file_name_int
36 | ova_file="$ROOT_DIR/nsx-mgr-ova/$ova_file_name_int"
37 | is_tar=$(file $ova_file | grep "tar archive" || true)
38 | if [ "$is_tar" == "" ]; then
39 | echo "File $ova_file downloaded was not a valid OVA image!!"
40 | echo "Check the file name/paths. Exiting from ova copy and deploy!!"
41 | exit 1
42 | fi
43 | }
44 |
45 | function copy_ovas_to_OVA_ISO_PATH {
46 |
47 | mkdir -p $OVA_ISO_PATH
48 | check_ovas
49 |
50 | mv $ROOT_DIR/nsx-mgr-ova/$ova_file_name_int $OVA_ISO_PATH
51 |
52 | echo "Done moving ova images into $OVA_ISO_PATH"
53 | echo ""
54 | }
55 |
56 | function create_customize_ova_params {
57 |
58 | cat > customize_ova_vars.yml <<-EOF
59 | ovftool_path: '/usr/bin'
60 | ova_file_path: "$OVA_ISO_PATH"
61 | EOF
62 |
63 | if [ "$NSX_T_KEEP_RESERVATION" == "false" ]; then
64 | echo "nsx_t_keep_reservation: $NSX_T_KEEP_RESERVATION" >> customize_ova_vars.yml
65 | fi
66 |
67 | #echo "$NSX_T_SIZING_SPEC" >> customize_ova_vars.yml
68 | }
69 |
--------------------------------------------------------------------------------
/nsxt_yaml/vars.yml:
--------------------------------------------------------------------------------
1 | # This file contains variable values for internal use
2 | ovftool_bin_path: "/usr/bin"
3 | overlay_transport_zone: 'overlay-tz'
4 | overlay_host_switch: "hostswitch-overlay"
5 | vlan_transport_zone: 'vlan-tz'
6 | vlan_host_switch: "hostswitch-vlan"
7 |
8 | transportzones:
9 | - display_name: "{{overlay_transport_zone}}"
10 | transport_type: "OVERLAY"
11 | host_switch_name: "{{overlay_host_switch}}" # will create one with this name
12 | - display_name: "{{vlan_transport_zone}}"
13 | transport_type: "VLAN"
14 | host_switch_name: "{{vlan_host_switch}}"
15 |
16 | vtep_ip_pool_name: vtep-ip-pool
17 |
18 | edge_uplink_prof: edge-single-uplink-prof
19 | host_uplink_prof: host-overlay-uplink-prof
20 |
21 | uplink_1_name: "uplink-1"
22 | uplink_2_name: "uplink-2"
23 | common_teaming_spec_for_esx:
24 | active_list:
25 | - uplink_name: "{{uplink_1_name}}"
26 | uplink_type: PNIC
27 | standby_list:
28 | - uplink_name: "{{uplink_2_name}}"
29 | uplink_type: PNIC
30 | policy: FAILOVER_ORDER
31 |
32 | uplink_profiles:
33 | - display_name: "{{edge_uplink_prof}}"
34 | description: "Edge Uplink Profile"
35 | teaming:
36 | active_list:
37 | - uplink_name: "{{uplink_1_name}}"
38 | uplink_type: PNIC
39 | policy: FAILOVER_ORDER
40 | transport_vlan: "{{hostvars['localhost'].edge_uplink_profile_vlan}}"
41 | - display_name: "{{host_uplink_prof}}"
42 | description: "Host Overlay Profile"
43 | teaming: "{{common_teaming_spec_for_esx}}"
44 | transport_vlan: "{{hostvars['localhost'].esxi_uplink_profile_vlan}}"
45 |
46 | edge_cluster_name: "edge-cluster-1"
47 |
48 | vlan_logical_switch: "uplink-vlan-ls"
49 | vlan_logical_switch_vlan: 0
50 | t0_uplink_port_ip: 172.60.0.1
51 | t0_uplink_port_subnet: 24
52 |
53 | single_pnic_list:
54 | - device_name: "{{hostvars['localhost'].esx_available_vmnic[0]}}"
55 | uplink_name: "{{uplink_1_name}}"
56 |
57 | extra_pnics: "{{ [] if hostvars['localhost'].esx_available_vmnic|length == 1 else [{'device_name': hostvars['localhost'].esx_available_vmnic[1], 'uplink_name': uplink_2_name }]}}"
58 |
59 | pnic_list: "{{single_pnic_list + extra_pnics}}"
60 |
61 | compute_manager_name: "vCenter-compute-manager"
62 | compute_manager_2_name: "vCenter-compute-manager_2"
63 |
--------------------------------------------------------------------------------
/doc/Utilities-and-troubleshooting.md:
--------------------------------------------------------------------------------
1 | ## Utilities and Troubleshooting
2 |
3 | ### Updating the parameter file
4 | In case you need to modify the parameter, file or fix anything and rerun the pipeline, perform the following:
5 | * Make your change to the parameter file
6 | * Hijack the container
7 | * run ``docker ps`` to find the "nsx-t-install" container ID
8 | * run ``docker exec -it bash``
9 | * in the container run ``fly-reset`` (this basically will run set-pipeline for the nsx-t pipeline)
10 |
11 |
12 | ### Cleaning up
13 |
14 | If you want to re-run from scratch do the following to cleanup:
15 | * Stop containers - `docker stop $(docker ps -a -q)`
16 | * Remove the containers - `docker rm $(docker ps -aq)`
17 | * Delete images - `docker rmi -f $(docker images -a -q)`
18 | * clear cache - `docker system prune --all`
19 | * unregister the vCenter extension - https://docs.vmware.com/en/VMware-NSX-T/2.2/com.vmware.nsxt.admin.doc/GUID-E6E2F017-1106-48C5-ABCA-3D3E9130A863.html
20 |
21 | ### Commonly seen issues:
22 | __NSX manager OVA auto download did not work; container exited.__
23 | Solution: it's likely the myvmware credentials were misspelled and did not work. Make sure the username is an email address, and both username and password are enclosed by single quotes (e.g. `-e VMWARE_USER='abc@efg.com' -e VMWARE_PASSWORD='pwd$pecial'`)
24 |
25 |
26 | __There should be 3 containers related to concourse running (web, worker, postgres). But one or more are missing from `docker ps` output.__
27 | Solution: use `docker ps -a` to find the exited container(s). Then check the logs with `docker logs `. If the error message signals the container exited due to insufficient disk space on the jumphost, clean up unused docker volumes with:
28 | `docker volume rm $(docker volume ls -qf dangling=true)`
29 | Then clean up the existing containers:
30 | > `docker exec -it nsx-t-install bash`
31 | > `cd /home/concourse`
32 | > `docker-compose down`
33 | > `exit`
34 | > `docker stop nginx-server nsx-t-install`
35 | > `docker rm nginx-server nsx-t-install`
36 |
37 | Finally, rerun the pipeline container.
38 |
39 |
40 | __Ovftool fails to deploy NSX manager with OVA. Error from pipeline: "transfer failed; failed to send http data".__
41 | Solution: chances are the EXTERNAL_DNS environment variable passed to the container was unable to resolve vCenter's name. Rerun the docker container with a proper DNS nameserver.
--------------------------------------------------------------------------------
/docker_compose/docker-compose.yml:
--------------------------------------------------------------------------------
1 | # Original source: https://github.com/concourse/concourse-docker
2 | # Update or sync up with latest version
3 | # Has additional flags for Web Proxy and DNS settings
4 |
5 | version: '3'
6 |
7 | services:
8 | concourse-db:
9 | image: postgres
10 | environment:
11 | - POSTGRES_DB=concourse
12 | - POSTGRES_PASSWORD=concourse_pass
13 | - POSTGRES_USER=concourse_user
14 | - PGDATA=/database
15 |
16 | concourse-web:
17 | image: concourse/concourse
18 | command: web
19 | links: [concourse-db]
20 | depends_on: [concourse-db]
21 | ports: ["8080:8080"] # EDIT if necessary
22 | volumes: ["./keys/web:/concourse-keys"]
23 | environment:
24 | - CONCOURSE_POSTGRES_HOST=concourse-db
25 | - CONCOURSE_POSTGRES_USER=concourse_user
26 | - CONCOURSE_POSTGRES_PASSWORD=concourse_pass
27 | - CONCOURSE_POSTGRES_DATABASE=concourse
28 | - CONCOURSE_EXTERNAL_URL= # EDIT ME
29 | - CONCOURSE_ADD_LOCAL_USER=nsx:vmware
30 | - CONCOURSE_MAIN_TEAM_LOCAL_USER=nsx
31 | - CONCOURSE_GARDEN_DNS_PROXY_ENABLE=true
32 | - CONCOURSE_WORKER_GARDEN_DNS_PROXY_ENABLE=true
33 |
34 | # Edit dns server for CONCOURSE_GARDEN_DNS_SERVER
35 | # Edit the no_proxy to your env to allow direct access
36 | # like the webserver hosting the ova bits.
37 | # Ensure there are no quotes or spaces in the values
38 |
39 | concourse-worker:
40 | image: concourse/concourse
41 | command: worker
42 | privileged: true
43 | links: [concourse-web]
44 | depends_on: [concourse-web]
45 | volumes:
46 | - "./keys/worker:/concourse-keys"
47 | environment:
48 | - CONCOURSE_TSA_HOST=concourse-web:2222
49 | - CONCOURSE_GARDEN_NETWORK
50 | - CONCOURSE_GARDEN_DNS_PROXY_ENABLE=true
51 | - CONCOURSE_WORKER_GARDEN_DNS_PROXY_ENABLE=true
52 | - CONCOURSE_GARDEN_DNS_SERVER= # EDIT ME
53 | # Fill details below if env uses a web proxy
54 | # Ensure there are no quotes or spaces in the values
55 | - http_proxy_url= # EDIT ME - sample: http://192.168.10.5:3128/
56 | - https_proxy_url= # EDIT ME - sample: http://192.168.10.5:3128/
57 | - no_proxy= # EDIT ME - sample: localhost,127.0.0.1,WEBSERVER-IP,8.8.8.8,10.193.99.2
58 | - HTTP_PROXY= # EDIT ME - sample: http://192.168.10.5:3128/
59 | - HTTPS_PROXY= # EDIT ME - sample: http://192.168.10.5:3128/
60 | - NO_PROXY= # EDIT ME - sample: localhost,127.0.0.1,WEBSERVER-IP,8.8.8.8,10.193.99.2
61 |
62 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Contributing to nsx-t-datacenter-ci-pipelines
4 |
5 | The nsx-t-datacenter-ci-pipelines project team welcomes contributions from the community. Before you start working with nsx-t-datacenter-ci-pipelines, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch.
6 |
7 | ## Contribution Flow
8 |
9 | This is a rough outline of what a contributor's workflow looks like:
10 |
11 | - Create a topic branch from where you want to base your work
12 | - Make commits of logical units
13 | - Make sure your commit messages are in the proper format (see below)
14 | - Push your changes to a topic branch in your fork of the repository
15 | - Submit a pull request
16 |
17 | Example:
18 |
19 | ``` shell
20 | git remote add upstream https://github.com/vmware/nsx-t-datacenter-ci-pipelines.git
21 | git checkout -b my-new-feature master
22 | git commit -a
23 | git push origin my-new-feature
24 | ```
25 |
26 | ### Staying In Sync With Upstream
27 |
28 | When your branch gets out of sync with the vmware/master branch, use the following to update:
29 |
30 | ``` shell
31 | git checkout my-new-feature
32 | git fetch -a
33 | git pull --rebase upstream master
34 | git push --force-with-lease origin my-new-feature
35 | ```
36 |
37 | ### Updating pull requests
38 |
39 | If your PR fails to pass CI or needs changes based on code review, you'll most likely want to squash these changes into
40 | existing commits.
41 |
42 | If your pull request contains a single commit or your changes are related to the most recent commit, you can simply
43 | amend the commit.
44 |
45 | ``` shell
46 | git add .
47 | git commit --amend
48 | git push --force-with-lease origin my-new-feature
49 | ```
50 |
51 | If you need to squash changes into an earlier commit, you can use:
52 |
53 | ``` shell
54 | git add .
55 | git commit --fixup
56 | git rebase -i --autosquash master
57 | git push --force-with-lease origin my-new-feature
58 | ```
59 |
60 | Be sure to add a comment to the PR indicating your new changes are ready to review, as GitHub does not generate a
61 | notification when you git push.
62 |
63 | ### Formatting Commit Messages
64 |
65 | We follow the conventions on [How to Write a Git Commit Message](http://chris.beams.io/posts/git-commit/).
66 |
67 | Be sure to include any related GitHub issue references in the commit message. See
68 | [GFM syntax](https://guides.github.com/features/mastering-markdown/#GitHub-flavored-markdown) for referencing issues
69 | and commits.
70 |
71 | ## Reporting Bugs and Creating Issues
72 |
73 | When opening a new issue, try to roughly follow the commit message format conventions above.
74 |
--------------------------------------------------------------------------------
/functions/set_default_params.py:
--------------------------------------------------------------------------------
1 | import fileinput
2 |
3 | PARAMS_FILE = "nsx_pipeline_config.yml"
4 | INTERNAL_CONFIG_FILE = "pipeline_config_internal.yml"
5 | DEFAULT_SECTION = "Params generated with default values"
6 | MGR_PWD = 'nsx_manager_password'
7 | VC_USER = 'vcenter_username'
8 | VC_PW = 'vcenter_password'
9 |
10 |
11 | def add_default_params_if_necessary():
12 | optional_params = {
13 | 'nsx_manager_virtual_ip': '',
14 | 'nsx_manager_cluster_fqdn': '',
15 | 'nsx_license_key': '',
16 | 'nsx_manager_root_pwd': 'Admin!23Admin',
17 | 'nsx_manager_cli_pwd': 'Admin!23Admin',
18 | 'compute_manager_username': 'Administrator@vsphere.local',
19 | 'compute_manager_password': 'Admin!23',
20 | 'compute_manager_2_vcenter_ip': '',
21 | 'compute_manager_2_username': '',
22 | 'compute_manager_2_password': '',
23 | 'tier0_uplink_port_ip_2': '',
24 | 'tier0_ha_vip': '',
25 | 'esx_ips': '',
26 | 'esx_os_version': '',
27 | 'esx_root_password': '',
28 | 'esx_hostname_prefix': '',
29 | 'nsx_t_t1router_logical_switches_spec': '',
30 | 'nsx_t_ha_switching_profile_spec': '',
31 | 'nsx_t_external_ip_pool_spec': '',
32 | 'nsx_t_container_ip_block_spec': '',
33 | 'nsx_t_nat_rules_spec': '',
34 | 'nsx_t_csr_request_spec': '',
35 | 'nsx_t_lbr_spec': ''
36 | }
37 | params_to_add = sorted(optional_params.keys())
38 |
39 | with open(PARAMS_FILE, 'r') as params_file:
40 | for line in params_file:
41 | for param in params_to_add:
42 | if param in line:
43 | params_to_add.remove(param)
44 | default_infered = line.split(':')[-1].strip()
45 | if MGR_PWD in line:
46 | optional_params['nsx_manager_root_pwd'] = default_infered
47 | optional_params['nsx_manager_cli_pwd'] = default_infered
48 | elif VC_USER in line:
49 | optional_params['compute_manager_username'] = default_infered
50 | elif VC_PW in line:
51 | optional_params['compute_manager_password'] = default_infered
52 |
53 | has_default_section = False
54 | fin = fileinput.input(INTERNAL_CONFIG_FILE, inplace=1)
55 | for line in fin:
56 | if has_default_section:
57 | next(fin, None)
58 | continue
59 | if line.strip() == "### %s" % DEFAULT_SECTION:
60 | has_default_section = True
61 | print line,
62 | # Python 3 use the following line
63 | # print(line, end='')
64 |
65 | if params_to_add:
66 | with open(INTERNAL_CONFIG_FILE, 'a') as internal_params_file:
67 | if not has_default_section:
68 | internal_params_file.writelines('\n### %s\n' % DEFAULT_SECTION)
69 | for param_to_add in params_to_add:
70 | internal_params_file.writelines(
71 | '%s: %s\n' % (param_to_add, optional_params[param_to_add]))
72 |
73 |
74 | if __name__ == "__main__":
75 | add_default_params_if_necessary()
76 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/task.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | export ROOT_DIR=`pwd`
6 |
7 | export TASKS_DIR=$(dirname $BASH_SOURCE)
8 | export PIPELINE_DIR=$(cd $TASKS_DIR/../../ && pwd)
9 | export FUNCTIONS_DIR=$(cd $PIPELINE_DIR/functions && pwd)
10 |
11 | export OVA_ISO_PATH='/root/ISOs/CHGA'
12 |
13 | source $FUNCTIONS_DIR/copy_ovas.sh
14 | source $FUNCTIONS_DIR/create_hosts.sh
15 |
16 | # Default installer name to be used for tags
17 | if [ "$NSX_T_INSTALLER" == "" ]; then
18 | NSX_T_INSTALLER='nsx-t-gen'
19 | fi
20 |
21 | function check_status_up {
22 | ip_set=$1
23 | type_of_resource=$2
24 | status_up=true
25 |
26 | resources_down_count=0
27 | resources_configured=$(echo $ip_set | sed -e 's/,/ /g' | awk '{print NF}' )
28 | for resource_ip in $(echo $ip_set | sed -e 's/,/ /g' )
29 | do
30 | # no netcat on the docker image
31 | #status=$(nc -vz ${resource_ip} 22 2>&1 | grep -i succeeded || true)
32 | # following hangs on bad ports
33 | #status=$( /dev/tcp/${resource_ip}/22) >/dev/null 2>&1"
35 | status=$?
36 | if [ "$status" != "0" ]; then
37 | status_up=false
38 | resources_down_count=$(expr $resources_down_count + 1)
39 | fi
40 | done
41 |
42 | if [ "$status_up" == "true" ]; then
43 | (>&2 echo "All VMs of type ${type_of_resource} up, total: ${resources_configured}")
44 | echo "true"
45 | return
46 | fi
47 |
48 | if [ "$resources_down_count" != "$resources_configured" ]; then
49 | (>&2 echo "Mismatch in number of VMs of type ${type_of_resource} that are expected to be up!!")
50 | (>&2 echo "Configured ${type_of_resource} VM total: ${resources_configured}, VM down: ${resources_down_count}")
51 | (>&2 echo "Delete pre-created vms of type ${type_of_resource} and start over!!")
52 | (>&2 echo "If the vms are up and accessible and suspect its a timing issue, restart the job again!!")
53 | (>&2 echo "Exiting now !!")
54 | exit -1
55 | else
56 | (>&2 echo "All VMs of type ${type_of_resource} down, total: ${resources_configured}")
57 | (>&2 echo " Would need to deploy ${type_of_resource} ovas")
58 | fi
59 |
60 | echo "false"
61 | return
62 | }
63 |
64 | DEBUG=""
65 | if [ "$enable_ansible_debug_int" == "true" ]; then
66 | DEBUG="-vvv"
67 | fi
68 |
69 | create_hosts
70 | cp ${PIPELINE_DIR}/tasks/install-nsx-t/get_mo_ref_id.py ./
71 | python get_mo_ref_id.py --host $vcenter_ip_int --user $vcenter_username_int --password $vcenter_password_int
72 |
73 | cp hosts.out ${PIPELINE_DIR}/nsxt_yaml/basic_topology.yml ${PIPELINE_DIR}/nsxt_yaml/vars.yml nsxt-ansible/
74 | cd nsxt-ansible
75 | cp ${PIPELINE_DIR}/tasks/install-nsx-t/modify_options.py ./
76 |
77 | if [[ "$unified_appliance_int" == "true" ]]; then
78 | # DNS server needs to be specified for static IPs
79 | python modify_options.py
80 | fi
81 |
82 | # Deploy the ovas if its not up
83 | echo "Installing ovftool"
84 | install_ovftool
85 |
86 | cp ${PIPELINE_DIR}/tasks/install-nsx-t/turn_off_reservation.py ./
87 | cp ${PIPELINE_DIR}/tasks/config-nsx-t-extras/*.py ./
88 |
89 | ansible-playbook $DEBUG -i hosts.out basic_topology.yml
90 | STATUS=$?
91 |
92 | if [[ $STATUS != 0 ]]; then
93 | echo "Deployment of NSX failed, vms failed to come up!!"
94 | echo "Check error logs"
95 | echo ""
96 | exit $STATUS
97 | else
98 | echo "Deployment of NSX is succcessfull!! Continuing with rest of configuration!!"
99 | echo ""
100 | fi
101 |
102 | echo "Successfully finished with Install!!"
103 |
104 | exit 0
105 |
--------------------------------------------------------------------------------
/tasks/config-nsx-t-extras/client.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | # nsx-t-gen
4 | #
5 |
6 |
7 | from __future__ import absolute_import, division, print_function
8 |
9 | __author__ = 'Sabha Parameswaran'
10 |
11 | import sys
12 | import yaml
13 | import json
14 | import requests
15 | import time
16 | from requests.auth import HTTPDigestAuth
17 | from pprint import pprint
18 |
19 | try:
20 | # Python 3
21 | from urllib.parse import urlparse
22 | except ImportError:
23 | # Python 2
24 | from urlparse import urlparse
25 |
26 | requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)
27 |
28 |
29 | class auth(requests.auth.AuthBase):
30 |
31 | def __init__(self, context):
32 | self.context = context
33 |
34 | def __call__(self, request):
35 | username = self.context.get('admin_user')
36 | password = self.context.get('admin_passwd')
37 | return requests.auth.HTTPBasicAuth(username, password)(request)
38 |
39 |
40 | def get_context():
41 | if get_context.context is not None:
42 | return get_context.context
43 | else:
44 | raise Error('config not loaded!!')
45 |
46 |
47 | get_context.context = None
48 |
49 |
50 | def set_context(context):
51 | get_context.context = context
52 |
53 |
54 | def get(url, stream=False, check=True):
55 | context = get_context()
56 | url = context.get('url') + url
57 | headers = {'Accept': 'application/json,text/html,application/xhtml+xml,application/xml'}
58 |
59 | response = requests.get(url, auth=auth(context), verify=False, headers=headers, stream=stream)
60 | check_response(response, check=check)
61 | return response
62 |
63 |
64 | def put(url, payload, check=True):
65 | try:
66 | context = get_context()
67 | url = context.get('url') + url
68 | response = requests.put(url, auth=auth(context), verify=False, json=payload)
69 | check_response(response, check=check)
70 | return response
71 | except:
72 | # Squelch Python error during put operations:
73 | # File "/usr/local/lib/python2.7/site-packages/requests/packages/urllib3/connectionpool.py", l
74 | # ine 314, in _raise_timeout
75 | # if 'timed out' in str(err) or 'did not complete (read)' in str(err): # Python 2.6
76 | # TypeError: __str__ returned non-string (type SysCallError)
77 | # print('Error during put')
78 | return ''
79 |
80 |
81 | def post(url, payload, check=True):
82 | context = get_context()
83 | url = context.get('url') + url
84 | response = requests.post(url, auth=auth(context), verify=False, json=payload)
85 | check_response(response, check=check)
86 | return response
87 |
88 |
89 | def delete(url, check=True):
90 | context = get_context()
91 | url = context.get('url') + url
92 | response = requests.delete(url, auth=auth(context), verify=False)
93 | check_response(response, check=check)
94 | return response
95 |
96 |
97 | def check_response(response, check=True):
98 | # pprint(vars(response))
99 | # print(response.content)
100 | if check and (response.status_code != requests.codes.ok and response.status_code > 400):
101 |
102 | print('-', response.status_code, response.request.url, file=sys.stderr)
103 | try:
104 | errors = response.json()["errors"]
105 | print('- ' + ('\n- '.join(json.dumps(errors, indent=4).splitlines())), file=sys.stderr)
106 | except:
107 | print(response.text, file=sys.stderr)
108 | sys.exit(1)
109 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/modify_options.py:
--------------------------------------------------------------------------------
1 | from tempfile import mkstemp
2 | from shutil import move, copymode
3 | import os
4 | from os import fdopen, remove
5 |
6 | TOPOLOGY_FILE = "basic_topology.yml"
7 | DNS_SERVER = "hostvars['localhost'].dns_server"
8 | PREFIX_LENGTH = "hostvars[item].prefix_length"
9 | DATA_NETWORKS = "data_networks:"
10 | MANAGEMENT_NETWORK = "management_network: \"{{hostvars[item]"
11 | COMPUTE = "compute: \"{{hostvars[item].vc_cluster_for_edge"
12 | STORAGE = "storage: \"{{hostvars[item].vc_datastore_for_edge"
13 |
14 |
15 | def add_new_line_if_absent(line):
16 | if line.endswith('\n'):
17 | return line
18 | return line + '\n'
19 |
20 |
21 | def replace_file(tmp_file_path):
22 | # Copy the file permissions from the old file to the new file
23 | copymode(TOPOLOGY_FILE, tmp_file_path)
24 | # Remove original file
25 | remove(TOPOLOGY_FILE)
26 | # Move new file
27 | move(tmp_file_path, "basic_topology.yml")
28 |
29 |
30 | def add_dns_server_option():
31 | dns_servers_spec = os.getenv('dns_server_int')
32 | fh, abs_path = mkstemp()
33 | with fdopen(fh, 'w') as new_file:
34 | with open(TOPOLOGY_FILE) as old_file:
35 | for line in old_file:
36 | if DNS_SERVER in line and ',' in dns_servers_spec:
37 | leading_spaces = len(line) - len(line.lstrip())
38 | dns_line = ' ' * leading_spaces + ("dns_server: %s\n"
39 | % dns_servers_spec.split(',')[0])
40 | line = line.replace(line, dns_line)
41 | elif PREFIX_LENGTH in line:
42 | leading_spaces = len(line) - len(line.lstrip()) - 2
43 | dns_line = ' ' * leading_spaces
44 | if ',' not in dns_servers_spec:
45 | dns_line += "dns_servers: [\"{{hostvars['localhost'].dns_server}}\"]"
46 | else:
47 | dns_servers = [s.strip() for s in dns_servers_spec.split(',')]
48 | dns_line += "dns_servers:"
49 | for server in dns_servers:
50 | dns_line += '\n' + ' ' * leading_spaces + "- %s" % server
51 | line = line.replace(line, line + dns_line)
52 | new_file.write(add_new_line_if_absent(line))
53 | replace_file(abs_path)
54 |
55 |
56 | def add_ids_in_param_if_necessary():
57 |
58 | def add_id_to_param(matched_line):
59 | leading_spaces = len(line) - len(line.lstrip())
60 | items = matched_line.lstrip().split(' ')
61 | newline = ' ' * leading_spaces + items[0][:-1] + "_id: " + items[1]
62 | return newline
63 |
64 | ansible_branch = os.getenv('nsxt_ansible_branch_int').strip()
65 | if ansible_branch and ansible_branch == 'master':
66 | fh, abs_path = mkstemp()
67 | with fdopen(fh, 'w') as new_file:
68 | with open(TOPOLOGY_FILE) as old_file:
69 | for line in old_file:
70 | if "data_networks:" in line:
71 | leading_spaces = len(line) - len(line.lstrip())
72 | line_with_id = ' ' * leading_spaces + "data_network_ids:"
73 | line = line.replace(line, line_with_id)
74 | elif MANAGEMENT_NETWORK in line or COMPUTE in line or STORAGE in line:
75 | line = line.replace(line, add_id_to_param(line))
76 | new_file.write(add_new_line_if_absent(line))
77 | replace_file(abs_path)
78 |
79 |
80 | if __name__ == "__main__":
81 | add_dns_server_option()
82 | add_ids_in_param_if_necessary()
83 |
--------------------------------------------------------------------------------
/sample_parameters/raw/pks.yml:
--------------------------------------------------------------------------------
1 | ### [OPTIONAL] For all the configs below
2 | nsx_t_t1router_logical_switches_spec: |
3 | t1_routers:
4 | # Add additional T1 Routers or collapse switches into same T1 Router as needed
5 | - name: T1-Router-PKS-Infra
6 | switches:
7 | - name: PKS-Infra
8 | logical_switch_gw: 192.168.50.1 # Last octet should be 1 rather than 0
9 | subnet_mask: 24
10 |
11 | - name: T1Router-PKS-Services
12 | switches:
13 | - name: PKS-Services
14 | logical_switch_gw: 192.168.60.1 # Last octet should be 1 rather than 0
15 | subnet_mask: 24
16 |
17 |
18 | nsx_t_container_ip_block_spec: |
19 | container_ip_blocks:
20 | - name: PKS-node-ip-block
21 | cidr: 11.4.0.0/16
22 |
23 | - name: PKS-pod-ip-block
24 | cidr: 12.4.0.0/16
25 |
26 |
27 | nsx_t_external_ip_pool_spec: |
28 | external_ip_pools:
29 |
30 | - name: snat-vip-pool-for-pks
31 | cidr: 10.208.50.0/24
32 | start: 10.208.50.10 # Should not include gateway
33 | end: 10.208.50.200 # Should not include gateway
34 |
35 | - name: tep-ip-pool2
36 | cidr: 192.168.220.0/24
37 | start: 192.168.220.10
38 | end: 192.168.220.200
39 |
40 |
41 | nsx_t_nat_rules_spec: |
42 | nat_rules:
43 | # Sample entry for PKS-Infra network
44 | - t0_router: DefaultT0Router
45 | nat_type: snat
46 | source_network: 192.168.50.0/24 # PKS Infra network cidr
47 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
48 | rule_priority: 8001 # Lower priority
49 |
50 | # Sample entry for PKS-Services network
51 | - t0_router: DefaultT0Router
52 | nat_type: snat
53 | source_network: 192.168.60.0/24 # PKS Clusters network cidr
54 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
55 | rule_priority: 8001 # Lower priority
56 |
57 | # Sample entry for allowing inbound to PKS Ops manager
58 | - t0_router: DefaultT0Router
59 | nat_type: dnat
60 | destination_network: 10.208.50.2 # External IP address for PKS opsmanager
61 | translated_network: 192.168.50.2 # Internal IP of PKS Ops manager
62 | rule_priority: 1024 # Higher priority
63 |
64 | # Sample entry for allowing outbound from PKS Ops Mgr to external
65 | - t0_router: DefaultT0Router
66 | nat_type: snat
67 | source_network: 192.168.50.2 # Internal IP of PAS opsmanager
68 | translated_network: 10.208.50.2 # External IP address for PAS opsmanager
69 | rule_priority: 1024 # Higher priority
70 |
71 | # Sample entry for allowing inbound to PKS Controller
72 | - t0_router: DefaultT0Router
73 | nat_type: dnat
74 | destination_network: 10.208.50.4 # External IP address for PKS opsmanager
75 | translated_network: 192.168.50.11 # Internal IP of PKS Ops Controller
76 | rule_priority: 1024 # Higher priority
77 |
78 | # Sample entry for allowing outbound from PKS Controller to external
79 | - t0_router: DefaultT0Router
80 | nat_type: snat
81 | source_network: 192.168.50.4 # Internal IP of PKS controller
82 | translated_network: 10.208.50.4 # External IP address for PKS controller
83 | rule_priority: 1024 # Higher priority
84 |
85 |
86 | nsx_t_csr_request_spec: |
87 | csr_request:
88 | #common_name not required - would use nsx_t_manager_host_name
89 | org_name: Company # EDIT
90 | org_unit: net-integ # EDIT
91 | country: US # EDIT
92 | state: CA # EDIT
93 | city: SF # EDIT
94 | key_size: 2048 # Valid values: 2048 or 3072
95 | algorithm: RSA # Valid values: RSA or DSA
96 |
97 |
98 | nsx_t_lbr_spec: |
99 | loadbalancers:
--------------------------------------------------------------------------------
/sample_parameters/raw/nsx.yml:
--------------------------------------------------------------------------------
1 | ### Configs marked OPTIONAL below can be removed from the param file
2 | ### if they are N/A or not desired.
3 |
4 | ### General settings
5 | enable_ansible_debug: false # set value to true for verbose output from Ansible
6 | # format: "http://:40001"
7 | nsx_image_webserver: "http://192.168.110.11:40001"
8 |
9 | ### NSX general network settings
10 | mgmt_portgroup: 'ESXi-RegionA01-vDS-COMP'
11 | dns_server: 192.168.110.10
12 | dns_domain: corp.local.io
13 | ntp_servers: time.vmware.com
14 | default_gateway: 192.168.110.1
15 | netmask: 255.255.255.0
16 |
17 | ### NSX manager cluster configs
18 | # Three node cluster is recommended. 1 is minimum, 3 is max
19 | nsx_manager_ips: 192.168.110.33,192.168.110.34,192.168.110.35 # Manager IPs.
20 | nsx_manager_username: admin
21 | nsx_manager_password: Admin!23Admin
22 | nsx_manager_hostname_prefix: "nsxt-mgr" # Min 12 chars, upper, lower, number, special digit
23 | nsx_manager_virtual_ip: 192.168.110.36 # [OPTIONAL] Virtual IP as the access IP for the manager cluster
24 | # FQDN is required if virtual IP is configured
25 | nsx_manager_cluster_fqdn: corp.local.io # [OPTIONAL] FQDN for the manager, will be used to generate cert for VIP
26 | nsx_license_key: 11111-22222-33333-44444-55555
27 | nsx_manager_root_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
28 | nsx_manager_cli_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
29 | nsx_manager_deployment_size: small # Recommended for real bare-bones demo, smallest setup
30 | nsx_manager_deployment_ip_prefix_length: 23
31 | nsx_manager_ssh_enabled: true
32 | vcenter_ip: 192.168.110.22
33 | vcenter_username: administrator@corp.local
34 | vcenter_password: "VMware1!"
35 | vcenter_datacenter: RegionA01
36 | vcenter_cluster: RegionA01-MGMT
37 | vcenter_datastore: iscsi
38 | resource_reservation_off: true
39 |
40 | # Compute manager credentials should be the same as above vCenter's if
41 | # controllers and edges are to be on the same vCenter
42 | compute_manager_username: "Administrator@vsphere.local" # [OPTIONAL] Defaults to vcenter_username if not set
43 | compute_manager_password: "VMware1!" # [OPTIONAL] Defaults to vcenter_password if not set
44 | # compute manager for the compute cluster (2nd vCenter)
45 | compute_manager_2_vcenter_ip: "null" # [OPTIONAL]
46 | compute_manager_2_username: "null" # [OPTIONAL]
47 | compute_manager_2_password: "null" # [OPTIONAL]
48 |
49 | edge_uplink_profile_vlan: 0 # For outbound uplink connection used by Edge, usually keep as 0
50 | esxi_uplink_profile_vlan: 0 # For internal overlay connection used by ESXi hosts, usually transport VLAN ID
51 |
52 | # Virtual Tunnel Endpoint network ip pool
53 | vtep_ip_pool_cidr: 192.168.213.0/24
54 | vtep_ip_pool_gateway: 192.168.213.1
55 | vtep_ip_pool_start: 192.168.213.10
56 | vtep_ip_pool_end: 192.168.213.200
57 |
58 | # Tier 0 router
59 | tier0_router_name: DefaultT0Router
60 | tier0_uplink_port_ip: 192.168.100.4
61 | tier0_uplink_port_subnet: 24
62 | tier0_uplink_next_hop_ip: 192.168.100.1
63 | tier0_uplink_port_ip_2: 192.168.100.5
64 | tier0_ha_vip: 192.168.100.3
65 |
66 | ### Edge nodes
67 | edge_ips: 192.168.110.37,192.168.110.38 # Comma separated based in number of required edges
68 | edge_default_gateway: 192.168.110.1
69 | edge_ip_prefix_length: 24
70 | edge_hostname_prefix: nsx-t-edge
71 | edge_transport_node_prefix: edge-transp-node
72 | edge_cli_password: "VMware1!"
73 | edge_root_password: "VMware1!"
74 | edge_deployment_size: "large" # Large recommended for PKS deployments
75 | vc_datacenter_for_edge: RegionA01
76 | vc_cluster_for_edge: RegionA01-MGMT
77 | vc_datastore_for_edge: iscsi
78 | vc_uplink_network_for_edge: "ESXi-RegionA01-vDS-COMP"
79 | vc_overlay_network_for_edge: "VM-RegionA01-vDS-COMP"
80 | vc_management_network_for_edge: "ESXi-RegionA01-vDS-COMP"
81 |
82 | ### ESX hosts
83 | # Install NSX on vSphere clusters automatically
84 | clusters_to_install_nsx: RegionA01-MGMT,RegionA01-K8s # Comma separated
85 | per_cluster_vlans: 0,0 # Comma separated, order of VLANs applied same as order of clusters
86 |
87 | esx_ips: "" # [OPTIONAL] additional esx hosts, if any, to be individually installed
88 | esx_os_version: "6.5.0" # [OPTIONAL]
89 | esx_root_password: "ca$hc0w" # [OPTIONAL]
90 | esx_hostname_prefix: "esx-host" # [OPTIONAL]
91 |
92 | esx_available_vmnic: "vmnic1" # comma separated physical NICs, applies to both cluster installation or ESXi installation
--------------------------------------------------------------------------------
/sample_parameters/raw/pas.yml:
--------------------------------------------------------------------------------
1 | ### [OPTIONAL] For all the configs below
2 | nsx_t_t1router_logical_switches_spec: |
3 | t1_routers:
4 | # Add additional T1 Routers or collapse switches into same T1 Router as needed
5 | # Remove unneeded T1 routers
6 | - name: T1-Router-PAS-Infra
7 | switches:
8 | - name: PAS-Infra
9 | logical_switch_gw: 192.168.10.1 # Last octet should be 1 rather than 0
10 | subnet_mask: 24
11 |
12 | - name: T1-Router-PAS-ERT
13 | switches:
14 | - name: PAS-ERT
15 | logical_switch_gw: 192.168.20.1 # Last octet should be 1 rather than 0
16 | subnet_mask: 24
17 | edge_cluster: true
18 |
19 | - name: T1-Router-PAS-Services
20 | switches:
21 | - name: PAS-Services
22 | logical_switch_gw: 192.168.30.1 # Last octet should be 1 rather than 0
23 | subnet_mask: 24
24 |
25 |
26 | nsx_t_ha_switching_profile_spec: |
27 | ha_switching_profiles:
28 | - name: HASwitchingProfile
29 |
30 |
31 | nsx_t_container_ip_block_spec: |
32 | container_ip_blocks:
33 | - name: PAS-container-ip-block
34 | cidr: 10.4.0.0/16
35 |
36 |
37 | nsx_t_external_ip_pool_spec: |
38 | external_ip_pools:
39 | - name: snat-vip-pool-for-pas
40 | cidr: 10.208.40.0/24
41 | start: 10.208.40.10 # Should not include gateway
42 | end: 10.208.40.200 # Should not include gateway
43 |
44 | - name: tep-ip-pool2
45 | cidr: 192.168.220.0/24
46 | start: 192.168.220.10
47 | end: 192.168.220.200
48 |
49 |
50 | # Specify NAT rules
51 | nsx_t_nat_rules_spec: |
52 | nat_rules:
53 | # Sample entry for allowing inbound to PAS Ops manager
54 | - t0_router: DefaultT0Router
55 | nat_type: dnat
56 | destination_network: 10.208.40.2 # External IP address for PAS opsmanager
57 | translated_network: 192.168.10.2 # Internal IP of PAS Ops manager
58 | rule_priority: 1024 # Higher priority
59 |
60 | # Sample entry for allowing outbound from PAS Ops Mgr to external
61 | - t0_router: DefaultT0Router
62 | nat_type: snat
63 | source_network: 192.168.10.2 # Internal IP of PAS opsmanager
64 | translated_network: 10.208.40.2 # External IP address for PAS opsmanager
65 | rule_priority: 1024 # Higher priority
66 |
67 | # Sample entry for PAS Infra network SNAT
68 | - t0_router: DefaultT0Router
69 | nat_type: snat
70 | source_network: 192.168.10.0/24 # PAS Infra network cidr
71 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
72 | rule_priority: 8000 # Lower priority
73 |
74 | # Sample entry for PAS ERT network SNAT
75 | - t0_router: DefaultT0Router
76 | nat_type: snat
77 | source_network: 192.168.20.0/24 # PAS ERT network cidr
78 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
79 | rule_priority: 8000 # Lower priority
80 |
81 | # Sample entry for PAS Services network SNAT
82 | - t0_router: DefaultT0Router
83 | nat_type: snat
84 | source_network: 192.168.30.0/24 # PAS Services network cidr
85 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
86 | rule_priority: 8001 # Lower priority
87 |
88 |
89 | nsx_t_csr_request_spec: |
90 | csr_request:
91 | #common_name not required - would use nsx_t_manager_host_name
92 | org_name: Company # EDIT
93 | org_unit: net-integ # EDIT
94 | country: US # EDIT
95 | state: CA # EDIT
96 | city: SF # EDIT
97 | key_size: 2048 # Valid values: 2048 or 3072
98 | algorithm: RSA # Valid values: RSA or DSA
99 |
100 |
101 | nsx_t_lbr_spec: |
102 | loadbalancers:
103 | # Sample entry for creating LBR for PAS ERT
104 | - name: PAS-ERT-LBR
105 | t1_router: T1-Router-PAS-ERT # Should match a previously declared T1 Router
106 | size: small # Allowed sizes: small, medium, large
107 | virtual_servers:
108 | - name: goRouter443 # Name that signifies function being exposed
109 | vip: 10.208.40.4 # Exposed VIP for LBR to listen on
110 | port: 443
111 | members:
112 | - ip: 192.168.20.11 # Internal ip of GoRouter instance 1
113 | port: 80
114 | - ip: 192.168.20.12 # Internal ip of GoRouter instance 1
115 | port: 443
116 | - name: goRouter80
117 | vip: 10.208.40.4
118 | port: 80
119 | members:
120 | - ip: 192.168.20.11 # Internal ip of GoRouter instance 1
121 | port: 80
122 | - ip: 192.168.20.12 # Internal ip of GoRouter instance 2
123 | port: 80
124 | - name: sshProxy # SSH Proxy exposed to outside
125 | vip: 10.208.40.5
126 | port: 2222 # Port 2222 for ssh proxy
127 | members:
128 | - ip: 192.168.20.41 # Internal ip of Diego Brain where ssh proxy runs
129 | port: 2222
--------------------------------------------------------------------------------
/tasks/install-nsx-t/turn_off_reservation.py:
--------------------------------------------------------------------------------
1 | # To run:
2 | # python turn_off_reservation.py --host 10.40.1.206 \
3 | # --user administrator@vsphere.local \
4 | # --password 'Admin!23' \
5 | # --vm_list ip1,ip2
6 |
7 | from pyVmomi import vim
8 |
9 | from pyVim.connect import SmartConnectNoSSL, Disconnect
10 |
11 | import argparse
12 | import atexit
13 |
14 | from tools import tasks
15 |
16 | import pdb
17 |
18 |
19 | def get_args():
20 | parser = argparse.ArgumentParser(
21 | description='Arguments for talking to vCenter')
22 |
23 | parser.add_argument('-s', '--host',
24 | required=True,
25 | action='store',
26 | help='vSpehre service to connect to')
27 |
28 | parser.add_argument('-o', '--port',
29 | type=int,
30 | default=443,
31 | action='store',
32 | help='Port to connect on')
33 |
34 | parser.add_argument('-u', '--user',
35 | required=True,
36 | action='store',
37 | help='User name to use')
38 |
39 | parser.add_argument('-p', '--password',
40 | required=True,
41 | action='store',
42 | help='Password to use')
43 |
44 | parser.add_argument('-v', '--vm_list',
45 | required=True,
46 | action='store',
47 | help='Comma separated list of VM identifiers')
48 |
49 | args = parser.parse_args()
50 | return args
51 |
52 |
53 | class ResourceReservationManager(object):
54 | def __init__(self):
55 | self._init_vc_view()
56 |
57 | def _init_vc_view(self):
58 | args = get_args()
59 | si = SmartConnectNoSSL(host=args.host, user=args.user,
60 | pwd=args.password, port=args.port)
61 | if not si:
62 | print("Could not connect to the specified host using specified "
63 | "username and password")
64 | return -1
65 | self.si = si
66 | atexit.register(Disconnect, si)
67 |
68 | self.content = si.RetrieveContent()
69 | self.vm_ids = []
70 | try:
71 | self.vm_ids = [vm_id.strip() for vm_id
72 | in args.vm_list.split(',') if vm_id]
73 | except Exception:
74 | print "Error parsing vm_list: %s" % args.vm_list
75 |
76 | objview = self.content.viewManager.CreateContainerView(
77 | self.content.rootFolder, [vim.VirtualMachine], True)
78 | self.vm_obj_list = objview.view
79 | objview.Destroy()
80 |
81 | def _get_vm_by_ip(self, vm_ip):
82 | try:
83 | for vm in self.vm_obj_list:
84 | for net in vm.guest.net:
85 | for ip in net.ipAddress:
86 | if ip == vm_ip:
87 | return vm
88 | except Exception as e:
89 | print e
90 | print "No VM found for %s" % vm_ip
91 |
92 | def _get_vm_by_name(self, vm_name):
93 | try:
94 | for vm in self.vm_obj_list:
95 | if vm_name.startswith(vm.name):
96 | return vm
97 | except Exception as e:
98 | print e
99 | print "No VM found for %s" % vm_name
100 |
101 | def _power_on_vm_if_off(self, vm):
102 | if format(vm.runtime.powerState) == "poweredOff":
103 | task = vm.PowerOnVM_Task()
104 | tasks.wait_for_tasks(self.si, [task])
105 |
106 | def turn_off_vm_memory_reservation(self, vm):
107 | # first check if memory reservation is >0
108 | try:
109 | if vm.config.memoryReservationLockedToMax:
110 | print "turn off memoryReservationLockedToMax"
111 | new_config = vim.VirtualMachineConfigSpec(
112 | memoryReservationLockedToMax=False)
113 | task = vm.ReconfigVM_Task(spec=new_config)
114 | tasks.wait_for_tasks(self.si, [task])
115 |
116 | if vm.resourceConfig.memoryAllocation.reservation > 0:
117 | new_allocation = vim.ResourceAllocationInfo(reservation=0)
118 | new_config = vim.VirtualMachineConfigSpec(
119 | memoryAllocation=new_allocation)
120 | task = vm.ReconfigVM_Task(spec=new_config)
121 | tasks.wait_for_tasks(self.si, [task])
122 | else:
123 | print 'resource reservation already at 0'
124 |
125 | self._power_on_vm_if_off(vm)
126 | except Exception as e:
127 | print 'unable to turn off reservation due to error: %s' % e
128 |
129 | def process(self):
130 | for vm_id in self.vm_ids:
131 | vm = self._get_vm_by_name(vm_id)
132 | if vm:
133 | # pdb.set_trace()
134 | print "Trying to turn off reservation for VM %s" % vm.name
135 | self.turn_off_vm_memory_reservation(vm)
136 | print ''
137 |
138 |
139 | if __name__ == "__main__":
140 | man = ResourceReservationManager()
141 | man.process()
142 |
--------------------------------------------------------------------------------
/doc/Homepage.md:
--------------------------------------------------------------------------------
1 | ## Homepage
2 |
3 | Welcome to the NSX-T Concourse CI Pipeline docs!
4 | This repository provides an easy-to-use automation framework that installs and configures NSX-T on vCenter clusters where PKS and/or PAS can be deployed.
5 |
6 | ### Concourse and NSX-T
7 | Concourse is an Open source CI/CD pipeline tool used by many organizations around the world, especially Pivotal PAS customers use it not only to deliver software in an agile manner but also to perform Day1 and 2 ops on the CNA platform. Sponsored by Pivotal, Concourse is the CICD tool of choice for working with the PCF suite of products PAS, PKS and FAS.
8 | NSX-T is the next-gen SDN from VMware built ground up for automation, it is supported by a growing number of automation tools that can be used to deploy and manage the platform.
The NSX-T Pipeline was created to allow our customers a simple way to deploy NSX-T end to end in a click of a button and to create a repeatable deployment process.
9 | - For more information about Concourse check out the Concourse tutorial by Stark & Wayne and Pivotal’s Concourse page https://pivotal.io/Concourse
10 | - For more information about NSX-T see https://blogs.vmware.com/networkvirtualization/2017/08/nsx-t-2-0.html/
11 |
12 | __Demo on how to use the pipeline:__
13 | [%20How%20to%20deploy%20NSX-T%20Datacenter%20CI%20pipeline%20with%20Concourse%20-%20YouTube.jpg)](http://www.youtube.com/watch?v=wU6FW1eC5B8)
14 |
15 | For more information on deployment process, go to the [Deployment page](Deployment.md)
16 | For more information on network prerequisites before running this pipeline, go to the [Network Prerequisites page](Network-prerequisites.md)
17 |
18 |
19 |
20 | ### Pipeline configuration scope
21 |
22 | This Concourse pipeline runs a set of tasks and jobs to deploy NSX-T, you can choose to run it on an existing concourse server or you can use the docker image which deploys everything for you including concourse server and the pipeline itself.
23 | In the Git repository, we also provide a template parameter files to create all the necessary “plumbing” to run Pivotal PAS and PKS using the deployed NSX framework.
24 | This project is utilizing a set of Ansible scripts created by the VMware NSBU (can be found here: https://github.com/vmware/ansible-for-nsxt), the pipeline takes the config parameters defined by the user in the pipeline’s parameter file, and evokes the Ansible scripts accordingly and runs them.
25 | Anyone can use this pipeline, either as is or utilize its code to build for your own needs. The project is open-sourced so feel free to submit issues, feature requests, and contributions to the GIT repo.
26 | The pipeline achieves the following tasks:
27 | - Deploys the VMware NSX-T Manager OVA image, Controller and Edge appliances are deployed using the manager API
28 | - Configures the Controller cluster and register the controllers with NSX manager
29 | - Configures host switches, uplink profiles, transport zones (VLAN and Overlay)
30 | - Configure the Edges and ESXi Hosts as transport nodes
31 | - Creates T0 Router (one per run, in HA VIP mode) with an uplink and static route
32 | - Creates and configures T1 Routers with nested logical switches and ports (sample parameters file has a template for PKS and PAS)
33 | - NAT Rules setup (sample parameters file has a template for PKS and PAS)
34 | - Creates an IP Pools (usually used for the containers) and IP Blocks (usually used for routed IPs)
35 | - Creates and sets up the load balancing
36 | - Self-signed cert generation and registration against NSX-T Manager using FQDN
37 | - Sets NSX license key
38 | - HA Spoofguard Switching Profile creation
39 |
40 | ### Unsupported configurations at the moment
41 |
42 | The following capabilities are not supported in this version of the Pipeline and are planned for future releases.
43 | * BGP – This pipeline yet to support BGP configuration and only supports static route deployment. If BGP is a requirement, deploy the pipeline with static route config and manually configure BGP.
44 | * Multi-rack spine leaf configuration – If the edges and clusters are deployed in multiple racks with separate L3 domains the pipeline currently does not support attaching different VTEP uplink profiles to different clusters and edges. To fix after the pipeline is deployed create the additional VTEP pools and uplink profiles and assign them to the clusters
45 | * Concourse is not supported on Redhat due to kernel compatibility. Use Ubuntu or equivalent.
46 |
47 |
48 | ### Parameter file for the pipeline
49 |
50 | Sample configuration parameters file can be found under the [sample_parameters](../sample_parameters) folder.
51 | __NOTE__: __If using the nsx-t-install docker image, you can safely delete all the parameters with [OPTIONAL] tags if those do not apply to your particular deployment.__ The pipeline will fill empty values for those parameters by default, or inferred values as stated in sample parameter files.
52 | __If running the pipeline on existing concourse environment and not using the nsx-t-install image__, all the parameters need to be included, e.g. 'nsx_manager_virtual_ip' and 'nsx_manager_cluster_fqdn'. If you wish to leave those parameters unconfigured, leave the parameter stub empty:
53 | ```
54 | ...
55 | nsx_manager_username: admin
56 | nsx_manager_virtual_ip: <-- Leave empty
57 | nsx_manager_cluster_fqdn: <-- Leave empty
58 | ...
59 | nsx_t_lbr_spec: <-- Leave empty
60 | ...
61 | ```
62 | Do not delete those lines as Concourse needs to pick up those params even if they are not set.
63 | For more information on the parameter file, go to the [Parameter file page](Parameter-file.md)
64 |
65 |
66 |
67 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | # nsx-t-datacenter-ci-pipelines
5 | This repository provides an easy-to-use automation framework that installs and configures NSX-T on vCenter clusters where PKS and/or PAS can be deployed.
6 |
7 | ## Overview
8 | Under the hood, there is a Concourse pipeline which is to be set up by a Docker container which the user creates. The Concourse pipeline is in turn run in three Docker containers: DB, worker, and web container.
9 |
10 | The Concourse pipeline performs the following jobs:
11 | 1. Deploy NSX manager, controllers and edges;
12 | 2. Convert hosts from vCenter clusters specified by user to NSX transport nodes;
13 | 3. Create NSX logical resources to make the environment PAS/PKS deployment ready.
14 |
15 | __For the full documentation see the [doc/](./doc) folder for this repository__
16 |
17 | ## Try it out
18 | On a Ubuntu VM with at least ~30GB of space,
19 | ```
20 | wget https://github.com/vmware/nsx-t-datacenter-ci-pipelines/raw/master/docker_image/nsx-t-install-250.tar -O nsx-t-install.tar
21 | docker load -i nsx-t-install.tar
22 | mkdir -p /home/concourse
23 | ```
24 | Create nsx_pipeline_config.yml based on a sample config file, e.g. https://github.com/vmware/nsx-t-datacenter-ci-pipelines/blob/master/sample_parameters/PAS_only/nsx_pipeline_config.yml for PAS environment, and place it under /home/concourse.
25 |
26 | ```
27 | docker run --name nsx-t-install -d \
28 | -v /var/run/docker.sock:/var/run/docker.sock \
29 | -v /home/concourse:/home/concourse \
30 | -e CONCOURSE_URL='http://:8080' \
31 | -e EXTERNAL_DNS='' \
32 | -e IMAGE_WEBSERVER_PORT=40001 \
33 | -e VMWARE_USER='' \
34 | -e VMWARE_PASSWORD='' \
35 | nsx-t-install
36 | ```
37 | Set CONCOURSE_URL to http://:8080 (host_ip is the IP address of the primary NIC of the VM running the container (example: 10.85.99.130); it is not the loopback address. Set EXTERNAL_DNS to the DNS server (it should be able to resolve the vCenter hostname, and public names e.g. github.com), and IMAGE_WEBSERVER_PORT to the port number provided in the nsx_pipeline_config.yml parameter nsx_image_webserver (recommendation: 40001).
38 |
39 | The above command will automatically download the ovftool (e.g. VMware-ovftool-4.3.0-xxxxxxx-lin.x86_64.bundle) and NSX OVA (nsx-unified-appliance-2.4.0.0.0.xxxxxxx.ova) files from myvmware.com. If you have already downloaded the two files manually, place them under /home/concourse, and you can run above command with VMWARE_USER and VMWARE_PASSWORD skipped. By default, the docker image from master/nsxt_2.4.0 branch downloads nsx ova version 2.4.0. If deploying earlier version (e.g. NSX-T 2.3.0), simply add `` -e NSXT_VERSION=2.3.0 `` in the docker run command above, or use the docker image from nsxt_2.3.0 branch.
40 |
41 | ---
42 | __If running the pipeline on existing concourse environment and not using the nsx-t-install image, please perform following additional steps:__ in nsx_pipeline_config.yml that was created under /home/concourse, add the following two lines at the beginning, depending on which NSX-T version you are deploying:
43 |
44 | | NSX-T 2.3.0 & earlier | NSX-T 2.4.0 | NSX-T 2.5.0 |
45 | |:----------------------:|:---------------:|:-------------:|
46 | | nsxt_ansible_branch=v1.0.0 | nsxt_ansible_branch=master | nsxt_ansible_branch=dev |
47 | | nsx_t_pipeline_branch=nsxt_2.3.0 | nsxt_ansible_branch=nsxt_2.4.0 | nsx_t_pipeline_branch=master |
48 |
49 | Also, if ovftool and ova files were downloaded manually, add ``ova_file_name=`` and ``ovftool_file_name=`` in nsx_pipeline_config.yml as well.
50 | Ignore this if you are using the docker image provided in this repository.
51 |
52 | ---
53 |
54 | Browse to the Concourse pipeline: http:///teams/main/pipelines/install-nsx-t/ (example: http://10.85.99.130:8080/teams/main/pipelines/install-nsx-t/) and click on the plus on the upper right corner to trigger a build to install NSX-T. If you are prompted with a username and password, use 'nsx' and 'vmware'.
55 |
56 | Check out the [Troubleshooting Guide](./doc/Utilities-and-troubleshooting.md) for troubleshooting tips.
57 |
58 | ## Contributing
59 |
60 | The nsx-t-datacenter-ci-pipelines project team welcomes contributions from the community. Before you start working with nsx-t-datacenter-ci-pipelines, please read our [Developer Certificate of Origin](https://cla.vmware.com/dco). All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch. For more detailed information, refer to [CONTRIBUTING.md](CONTRIBUTING.md).
61 |
62 | ## License
63 | NSX-T-Data-Center-CI-Pipeline-with-Concourse
64 |
65 | Copyright (c) 2018 VMware, Inc.
66 |
67 | The MIT license (the “License”) set forth below applies to all parts of the NSX-T-Data-Center-CI-Pipeline-with-Concourse project. You may not use this file except in compliance with the License.
68 |
69 | MIT License
70 |
71 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do
72 | so, subject to the following conditions:
73 |
74 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
75 |
76 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
77 |
--------------------------------------------------------------------------------
/doc/Network-prerequisites.md:
--------------------------------------------------------------------------------
1 | ## Network Prerequisites
2 |
3 | ### NSX-T Physical Network Requirements
4 |
5 | __Note__: The requirements below are suitable for a basic deployment of NSX-T using the pipeline. For more elaborate guidance on the design and deployment at scale and different topologies, please refer to the NSX-T design official guide: https://communities.vmware.com/docs/DOC-37591
6 |
7 | The following are the requirements for deploying nsx-t for PAS/PKS using the pipeline:
8 |
9 | Make sure you have vCenter and clusters setup according to the requirements of the workload you plan to run. Usually, for PAS and PKS, you will need a minimum of a management cluster and a payload cluster. Production environments require a minimum of 3 clusters for availability purposes.
10 |
11 | NSX has its requirements such as resources and network configuration that are detailed here: https://docs.vmware.com/en/VMware-NSX-T/2.2/nsxt_22_install.pdf
12 | For this guide, the assumption is that we deploy NSX-T for PKS or PAS or BOTH in a NATed configuration which allows for multiple deployments with same IP scheme. If Routed configuration is required, you will need to configure the parameter file with the required routed network configuration each time this pipeline is deployed.
13 |
14 | ### 1. Physical Network Configuration
15 |
16 | | # | Network Configuration | Description|
17 | |-------------|------------|------------|
18 | | 1.1| Jumbo frames| The physical switches that the ESXi server with NSX installed are connected to require a minimum of 1600 MTU jumbo frames supported. The vDS port groups where the edges will connect also to need to support 1600 MTU as well (See next part for vDS for edges)
19 | | 1.2 | Management VLAN | This VLAN will be used for the NSX-Manager, NSX controllers and NSX edges management interface. This network will be provided as regular vDS port groups, usually on a management cluster |
20 | | 1.3 | Management IPs | The following IPs are required on the management VLAN:
1. 1 IP for the NSX manager
2. An IP for each controller (1 or 3)
3. An IP for each edge management interface|
21 | | 1.4 | Uplink VLAN | This VLAN will be used by the edge VMs to communicate with the next hop switch. This can be the same VLAN as the management VLAN or separate.
If the edges are deployed in different Racks with different VLANs then more than one VLAN is required)|
22 | | 1.5 | Edge Uplink IPs | Each edge will require an IP on the uplink VLAN (Tier 0 configuration). An additional IP is required for the HA VIP floating IP. Total of 3 IPs on the uplink network.
(BGP is yet to be supported by the pipeline) |
23 | | 1.6 | Transport VLAN/s | Used by the ESXi servers where the workloads will run and by the edges. The ESXi servers and edges will establish an overlay tunnel using this VLAN.
If deployed in a spine-leaf configuration this will be multiple VLANs (not supported by the pipeline).|
24 | | 1.7 |VTEP IP pool| The ESXi servers and edges get the IP for the creation of the tunnel on the Transport VLAN from this pool. The VTEP IP Pool or pools (depending on the number of transport networks) needs to provide connectivity between all ESXi servers and edges on the transport network|
25 | | 1.8 | Routing – option 1 - Static route| On the physical switches that are the next hop from the edges, we will need to set up a static route. This static route will point to the Tier0 IP to reach the SNAT/VIP subnet (T0 IP is specified in row #1.5 above. The SNAT/VIP is specified in row #2.5)|
26 | | Option 2 |Proxy ARP|starting from version 2.4 NSX-T supports "Proxy ARP" on T0 (defined as the edge network on 1.4).
With this feature, NAT, LB or any stateful services can be configured with an IP address that belong to the network of the Tier0 uplink and no additional routed network and static route or BGP is required to it. |
27 | | 1.9 |Network Card|Each ESXi server that will have logical networks configured will need unused NICs that will be claimed by the NSX nSwitches.
you can use the same NICs for both esxi services and NSX-T, if you plan to use only 2 NICs for everything you will need to setup an nVDS on one of the NICs and migrate the esxi services|
28 |
29 | The following steps are required for setting up NSX for PAS and/or PKS.
30 | ### 2. PAS Subnets
31 |
32 | | # | PAS Subnets | IP Subnet
33 | |-------------|------------|------------|
34 | |2.1|PAS Infra subnet|Non-routable (NAT) or routable
Used for BOSH and Ops manager
e.g., 192.168.10.0/24|
35 | |2.2|PAS ERT subnet|Non-routable (NAT) or routable
Used by ERT components (Go routers, UAA, DB etc.)
e.g. 192.168.20.0/24|
36 | |2.3| PAS Services subnet|Non-routable (NAT) or routable
Subnet to be used by the PAS services (such as MySQL)
e.g. 192.168.30.0/24|
37 | |2.4| Container IP block | Non-routable IP block (NAT) or routable IP Block to be used to carve out subnets for the AIs networks.
The size will depend on the number of containers needed to be deployed.
e.g. PAS containers block - 172.16.0.0/16|
38 | |2.5|SNAT/VIP Ip pool|Routable, used in NATed deployments
A routable subnet to be used for the NAT addressee and LB VIPs
The static route (#1.8) will point to this network through the T0 HA VIP IP, or this network could be part of the T0 network if Proxy ARP is used
e.g. SNAT pool for PAS 10.208.40.0/24|
39 |
40 | ### 3. PKS Subnets
41 | | # | PKS Subnets | IP Subnet|
42 | |-------------|------------|------------|
43 | |3.1|PKS Infra subnet|Non-routable (NAT) or routable
Used for BOSH and Ops manager
e.g., 192.168.50.0/24|
44 | |3.2| PKS Services Subnet| Non-routable (NAT) or routable
USed for PKS API and Harbor
e.g. 192.168.60.0/24|
45 | |3.3| Node IP block| Non-routable IP Block, IP Block to carve out subnets for the dynamically created node networks (where the masters and workers are). The size of the subnet will depend on the number of clusters needed to be deployed (/24 for each cluster).
e.g. PKS containers block - 172.14.0.0/16 is at least a /16 as a /24 will be taken for each name space|
46 | |3.4| Container IP block| Non-routable IP Block, IP Block to carve out subnets for the dynamically created namespace networks. The size of the subnet will depend on the number of containers needed to be deployed.
e.g. PKS containers block - 172.15.0.0/16 is at least a /16 as a /24 will be taken for each name space|
47 | |3.5| SNAT/VIP Ip pool| Routable, used in NATed deployments
A routable subnet to be used for the NAT addressee and LB VIPs
The static route (#1.8) will point to this network through the T0 HA VIP IP (#1.5)
e.g. SNAT pool for PKS 10.208.50.0/24
48 | or this network could be part of the T0 network if Proxy ARP is used |
49 |
50 | ### 4. DNS Records
51 | Required for later deployment of PKS and/or PAS
52 |
53 | |#| DNS Record| Attribute|
54 | |-------------|------------|------------|
55 | |4.1|NSX-T manager|The NSX-T manager FQDN pointing to its management IP|
56 | |4.2|PAS Operations manager| DNS record pointing to the PAS operations manager DNAT IP (If routed direct IP)|
57 | |4.3|*.apps.fqdn|The record for the apps wildcard name in PAS pointing to the GoRouters LB VIP IP |
58 | |4.4|*.sys.fqdn|The record for the system wildcard name in PAS pointing to the GoRouters LB VIP IP|
59 | |4.5|PKS Operations manager|Record pointing to PKS operations manager DNAT IP (If routed direct IP)|
60 | |4.6|PKS UAA service|Record pointing to PKS UAA DNAT IP e.g. uaa.pks (If routed direct IP)|
61 | |4.7|PKS API service|Record pointing to PKS API Service DNAT IP e.g. api.pks (If routed direct IP)|
62 | |-------------|------------|------------|
63 | note - IPs for UAA (4.5) and API (4.6) are the same currently
64 |
65 |
66 |
--------------------------------------------------------------------------------
/sample_parameters/raw/pas_pks.yml:
--------------------------------------------------------------------------------
1 | ### [OPTIONAL] For all the configs below
2 | nsx_t_t1router_logical_switches_spec: |
3 | t1_routers:
4 | # Add additional T1 Routers or collapse switches into same T1 Router as needed
5 | # Remove unneeded T1 routers
6 |
7 | - name: T1-Router-PAS-Infra
8 | switches:
9 | - name: PAS-Infra
10 | logical_switch_gw: 192.168.10.1 # Last octet should be 1 rather than 0
11 | subnet_mask: 24
12 |
13 | - name: T1-Router-PAS-ERT
14 | switches:
15 | - name: PAS-ERT
16 | logical_switch_gw: 192.168.20.1 # Last octet should be 1 rather than 0
17 | subnet_mask: 24
18 | edge_cluster: true
19 |
20 | - name: T1-Router-PAS-Services
21 | switches:
22 | - name: PAS-Services
23 | logical_switch_gw: 192.168.30.1 # Last octet should be 1 rather than 0
24 | subnet_mask: 24
25 |
26 |
27 | # Comment off the following T1 Routers if there is no PKS
28 | - name: T1-Router-PKS-Infra
29 | switches:
30 | - name: PKS-Infra
31 | logical_switch_gw: 192.168.50.1 # Last octet should be 1 rather than 0
32 | subnet_mask: 24
33 |
34 | - name: T1Router-PKS-Services
35 | switches:
36 | - name: PKS-Services
37 | logical_switch_gw: 192.168.60.1 # Last octet should be 1 rather than 0
38 | subnet_mask: 24
39 |
40 |
41 | nsx_t_ha_switching_profile_spec: |
42 | ha_switching_profiles:
43 | - name: HASwitchingProfile
44 |
45 |
46 | nsx_t_container_ip_block_spec: |
47 | container_ip_blocks:
48 | - name: PAS-container-ip-block
49 | cidr: 10.4.0.0/16
50 |
51 | - name: PKS-node-ip-block
52 | cidr: 11.4.0.0/16
53 |
54 | - name: PKS-pod-ip-block
55 | cidr: 12.4.0.0/16
56 |
57 |
58 | nsx_t_external_ip_pool_spec: |
59 | external_ip_pools:
60 | - name: snat-vip-pool-for-pas
61 | cidr: 10.208.40.0/24
62 | start: 10.208.40.10 # Should not include gateway
63 | end: 10.208.40.200 # Should not include gateway
64 |
65 | - name: snat-vip-pool-for-pks
66 | cidr: 10.208.50.0/24
67 | start: 10.208.50.10 # Should not include gateway
68 | end: 10.208.50.200 # Should not include gateway
69 |
70 | - name: tep-ip-pool2
71 | cidr: 192.168.220.0/24
72 | start: 192.168.220.10
73 | end: 192.168.220.200
74 |
75 |
76 | nsx_t_nat_rules_spec: |
77 | nat_rules:
78 | # Sample entry for allowing inbound to PAS Ops manager
79 | - t0_router: DefaultT0Router
80 | nat_type: dnat
81 | destination_network: 10.208.40.2 # External IP address for PAS opsmanager
82 | translated_network: 192.168.10.2 # Internal IP of PAS Ops manager
83 | rule_priority: 1024 # Higher priority
84 |
85 | # Sample entry for allowing outbound from PAS Ops Mgr to external
86 | - t0_router: DefaultT0Router
87 | nat_type: snat
88 | source_network: 192.168.10.2 # Internal IP of PAS opsmanager
89 | translated_network: 10.208.40.2 # External IP address for PAS opsmanager
90 | rule_priority: 1024 # Higher priority
91 |
92 | # Sample entry for PAS Infra network SNAT
93 | - t0_router: DefaultT0Router
94 | nat_type: snat
95 | source_network: 192.168.10.0/24 # PAS Infra network cidr
96 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
97 | rule_priority: 8000 # Lower priority
98 |
99 | # Sample entry for PAS ERT network SNAT
100 | - t0_router: DefaultT0Router
101 | nat_type: snat
102 | source_network: 192.168.20.0/24 # PAS ERT network cidr
103 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
104 | rule_priority: 8000 # Lower priority
105 |
106 | # Sample entry for PAS Services network SNAT
107 | - t0_router: DefaultT0Router
108 | nat_type: snat
109 | source_network: 192.168.30.0/24 # PAS Services network cidr
110 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
111 | rule_priority: 8001 # Lower priority
112 |
113 |
114 | # Sample entry for PKS-Services network
115 | - t0_router: DefaultT0Router
116 | nat_type: snat
117 | source_network: 192.168.60.0/24 # PKS Clusters network cidr
118 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
119 | rule_priority: 8001 # Lower priority
120 |
121 | # Sample entry for PKS-Infra network
122 | - t0_router: DefaultT0Router
123 | nat_type: snat
124 | source_network: 192.168.50.0/24 # PKS Infra network cidr
125 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
126 | rule_priority: 8001 # Lower priority
127 |
128 | # Sample entry for allowing inbound to PKS Ops manager
129 | - t0_router: DefaultT0Router
130 | nat_type: dnat
131 | destination_network: 10.208.50.2 # External IP address for PKS opsmanager
132 | translated_network: 192.168.50.2 # Internal IP of PKS Ops manager
133 | rule_priority: 1024 # Higher priority
134 |
135 | # Sample entry for allowing outbound from PKS Ops Mgr to external
136 | - t0_router: DefaultT0Router
137 | nat_type: snat
138 | source_network: 192.168.50.2 # Internal IP of PAS opsmanager
139 | translated_network: 10.208.50.2 # External IP address for PAS opsmanager
140 | rule_priority: 1024 # Higher priority
141 |
142 | # Sample entry for allowing inbound to PKS Controller
143 | - t0_router: DefaultT0Router
144 | nat_type: dnat
145 | destination_network: 10.208.50.4 # External IP address for PKS opsmanager
146 | translated_network: 192.168.60.2 # Internal IP of PKS Ops Controller
147 | rule_priority: 1024 # Higher priority
148 |
149 | # Sample entry for allowing outbound from PKS Controller to external
150 | - t0_router: DefaultT0Router
151 | nat_type: snat
152 | source_network: 192.168.60.2 # Internal IP of PKS controller
153 | translated_network: 10.208.50.4 # External IP address for PKS controller
154 | rule_priority: 1024 # Higher priority
155 |
156 |
157 | nsx_t_csr_request_spec: |
158 | csr_request:
159 | #common_name not required - would use nsx_t_manager_host_name
160 | org_name: Company # EDIT
161 | org_unit: net-integ # EDIT
162 | country: US # EDIT
163 | state: CA # EDIT
164 | city: SF # EDIT
165 | key_size: 2048 # Valid values: 2048 or 3072
166 | algorithm: RSA # Valid values: RSA or DSA
167 |
168 |
169 | nsx_t_lbr_spec: |
170 | loadbalancers:
171 | # Sample entry for creating LBR for PAS ERT
172 | - name: PAS-ERT-LBR
173 | t1_router: T1-Router-PAS-ERT # Should match a previously declared T1 Router
174 | size: small # Allowed sizes: small, medium, large
175 | virtual_servers:
176 | - name: goRouter443 # Name that signifies function being exposed
177 | vip: 10.208.40.4 # Exposed VIP for LBR to listen on
178 | port: 443
179 | members:
180 | - ip: 192.168.20.11 # Internal ip of GoRouter instance 1
181 | port: 443
182 | - name: goRouter80
183 | vip: 10.208.40.4
184 | port: 80
185 | members:
186 | - ip: 192.168.20.31 # Internal ip of GoRouter instance 1
187 | port: 80
188 | - ip: 192.168.20.32 # Internal ip of GoRouter instance 2
189 | port: 80
190 | - name: sshProxy # SSH Proxy exposed to outside
191 | vip: 10.208.40.5
192 | port: 2222 # Port 2222 for ssh proxy
193 | members:
194 | - ip: 192.168.20.41 # Internal ip of Diego Brain where ssh proxy runs
195 | port: 2222
--------------------------------------------------------------------------------
/functions/create_hosts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function create_manager_host {
4 | # Outer parenthesis converts string to an array
5 | nsx_manager_ips=($(echo "$nsx_manager_ips_int" | sed -e 's/,/ /g'))
6 | manager_ip=${nsx_manager_ips[0]}
7 | manager_hostname="${nsx_manager_hostname_prefix_int}-1"
8 | # The following need to be placed under [localhost:vars] section
9 | cat >> manager_host <<-EOF
10 |
11 | nsx_manager_ip="$manager_ip"
12 | nsx_license_key="$nsx_license_key_int"
13 | nsx_manager_username="$nsx_manager_username_int"
14 | nsx_manager_password="$nsx_manager_password_int"
15 | nsx_manager_assigned_hostname="$manager_hostname"
16 | nsx_manager_root_pwd="$nsx_manager_root_pwd_int"
17 | nsx_manager_cli_pwd="$nsx_manager_cli_pwd_int"
18 | nsx_manager_deployment_size="$nsx_manager_deployment_size_int"
19 | EOF
20 | }
21 |
22 | function create_controller_hosts {
23 | nsx_manager_ips=($(echo "$nsx_manager_ips_int" | sed -e 's/,/ /g'))
24 | num_controllers=${#nsx_manager_ips[@]}
25 | if [[ $num_controllers -lt 2 ]]; then
26 | echo "No additional controller-manager specified."
27 | return
28 | fi
29 |
30 | # ip_mask_fields=($(echo "$netmask_int" | sed -e 's/\./ /g'))
31 | # prefix_length=0
32 | # for ip_mask_field in ${ip_mask_fields[*]}; do
33 | # prefix_length=$(( prefix_length + $(echo "obase=2;${ip_mask_field}" | bc | tr -cd '1' | wc -c) ))
34 | # done
35 |
36 | echo "[controllers]" > ctrl_vms
37 | for ((i=1;i<$num_controllers;++i)); do
38 | controller_ip=${nsx_manager_ips[i]}
39 | count=$((i+1))
40 | hostname="${nsx_manager_hostname_prefix_int}-${count}.${dns_domain_int}"
41 | controller_host="controller-${count} ip=${controller_ip} hostname=${hostname}"
42 | echo "$controller_host" >> ctrl_vms
43 | done
44 |
45 | cat >> ctrl_vms <<-EOF
46 | [controllers:vars]
47 | prefix_length="${nsx_manager_deployment_ip_prefix_length_int}"
48 | default_gateway="${default_gateway_int}"
49 | EOF
50 |
51 | }
52 |
53 | # TODO: update this with params from https://github.com/yasensim/nsxt-ansible/blob/master/answerfile.yml
54 | function create_edge_hosts {
55 | echo "[edge_nodes]" > edge_vms
56 | edge_ips_int=($(echo "$edge_ips_int" | sed -e 's/,/ /g'))
57 | per_edge_params=("edge_deployment_size_int" "vc_datacenter_for_edge_int" "vc_cluster_for_edge_int"
58 | "vc_datastore_for_edge_int" "vc_uplink_network_for_edge_int"
59 | "vc_overlay_network_for_edge_int" "vc_management_network_for_edge_int")
60 |
61 | num_edges=${#edge_ips_int[@]}
62 |
63 | for ((i=0;i<$num_edges;++i)); do
64 | edge_ip=${edge_ips_int[i]}
65 | count=$((i+1))
66 | hostname="${edge_hostname_prefix_int}-${count}.${dns_domain_int}"
67 | edge_host="edge-${count} ip=$edge_ip hostname=${hostname} default_gateway=$edge_default_gateway_int prefix_length=$edge_ip_prefix_length_int transport_node_name=${edge_transport_node_prefix_int}-${count}"
68 | # for param in "${per_edge_params[@]}"; do
69 | # # test if a single value is provided or a list is
70 | # param_val=($(echo "${!param}" | sed -e 's/,/ /g'))
71 | # if [[ ${#param_val[@]} -gt 1 && ${#param_val[@]} -eq ${#edge_ips_int[@]} ]]; then
72 | # edge_host="${edge_host} ${param::-4}=${param_val[i]}"
73 | # fi
74 | # done
75 | echo "$edge_host" >> edge_vms
76 | done
77 |
78 | cat >> edge_vms <<-EOF
79 | [edge_nodes:vars]
80 | edge_cli_password="$edge_cli_password_int"
81 | edge_root_password="$edge_root_password_int"
82 | EOF
83 |
84 | for param in "${per_edge_params[@]}"; do
85 | # param_val=($(echo "${!param}" | sed -e 's/,/ /g'))
86 | param_val="${!param}"
87 | # if [[ ${#param_val[@]} -eq 1 ]]; then
88 | echo "${param::-4}=${param_val}" >> edge_vms
89 | # fi
90 | done
91 | }
92 |
93 | function create_esx_hosts {
94 | count=1
95 | echo "[esx_hosts]" > esx_hosts
96 | for esx_ip in $(echo "$esx_ips_int" | sed -e 's/,/ /g')
97 | do
98 | hostname="${esx_hostname_prefix_int}-${count}.${dns_domain_int}"
99 | cat >> esx_hosts <<-EOF
100 | esx-host-${count} ansible_host=$esx_ip ansible_user=root ansible_ssh_pass=$esx_root_password_int ip=$esx_ip fabric_node_name=esx-fabric-${count} transport_node_name=esx-transp-${count} hostname=${hostname}
101 | EOF
102 | (( count++ ))
103 | done
104 |
105 | cat >> esx_hosts <<-EOF
106 | [esx_hosts:vars]
107 | esx_os_version=${esx_os_version_int}
108 | EOF
109 | }
110 |
111 | function set_list_var_and_strip_whitespaces {
112 | list_var_value=${!1}
113 | if [[ $list_var_value == "" || $list_var_value == "null" ]]; then
114 | return
115 | fi
116 | list_var_value=$(echo $list_var_value | sed '
117 | s/^ *// # remove leading whitespace
118 | s/ *$// # remove trailing whitespace
119 | s/ *,/,/g # remove whitespace before commas
120 | s/, */,/g # remove whitespace after commas
121 | s/,/","/g # put quotes around
122 | s/.*/["&"]/ # bracket & quotes around everything')
123 | echo "${1::-4}=$list_var_value" >> $2
124 | }
125 |
126 | function create_hosts {
127 |
128 | # TODO: set nsx manager fqdn
129 | export NSX_T_MANAGER_SHORT_HOSTNAME=$(echo "$NSX_T_MANAGER_FQDN" | awk -F '\.' '{print $1}')
130 | #apt -qq install bc
131 |
132 | cat > hosts <<-EOF
133 | [localhost]
134 | localhost ansible_connection=local
135 |
136 | [localhost:vars]
137 | vcenter_ip="$vcenter_ip_int"
138 | vcenter_username="$vcenter_username_int"
139 | vcenter_password="$vcenter_password_int"
140 | vcenter_datacenter="$vcenter_datacenter_int"
141 | vcenter_cluster="$vcenter_cluster_int"
142 | vcenter_datastore="$vcenter_datastore_int"
143 | mgmt_portgroup="$mgmt_portgroup_int"
144 |
145 | vc_datacenter_for_deployment="$vcenter_datacenter_int"
146 | vc_cluster_for_deployment="$vcenter_cluster_int"
147 | vc_datastore_for_deployment="$vcenter_datastore_int"
148 | vc_management_network_for_deployment="$mgmt_portgroup_int"
149 |
150 | dns_server="$dns_server_int"
151 | dns_domain="$dns_domain_int"
152 | ntp_servers="$ntp_servers_int"
153 | default_gateway="$default_gateway_int"
154 | netmask="$netmask_int"
155 | nsx_image_webserver="$nsx_image_webserver_int"
156 | ova_file_name="$ova_file_name_int"
157 |
158 | compute_manager_username="$compute_manager_username_int"
159 | compute_manager_password="$compute_manager_password_int"
160 | edge_uplink_profile_vlan="$edge_uplink_profile_vlan_int"
161 | esxi_uplink_profile_vlan="$esxi_uplink_profile_vlan_int"
162 | vtep_ip_pool_cidr="$vtep_ip_pool_cidr_int"
163 | vtep_ip_pool_gateway="$vtep_ip_pool_gateway_int"
164 | vtep_ip_pool_start="$vtep_ip_pool_start_int"
165 | vtep_ip_pool_end="$vtep_ip_pool_end_int"
166 |
167 | tier0_router_name="$tier0_router_name_int"
168 | tier0_uplink_port_ip="$tier0_uplink_port_ip_int"
169 | tier0_uplink_port_subnet="$tier0_uplink_port_subnet_int"
170 | tier0_uplink_next_hop_ip="$tier0_uplink_next_hop_ip_int"
171 |
172 | resource_reservation_off="$resource_reservation_off_int"
173 | nsx_manager_ssh_enabled="$nsx_manager_ssh_enabled_int"
174 | unified_appliance="$unified_appliance_int"
175 | EOF
176 |
177 | if [[ $unified_appliance_int == "true" ]]; then
178 | echo "nsx_manager_role=NSX Manager" >> hosts
179 | else
180 | echo "nsx_manager_role=nsx-manager" >> hosts
181 | fi
182 |
183 | create_manager_host
184 | cat manager_host >> hosts
185 |
186 | set_list_var_and_strip_whitespaces esx_available_vmnic_int hosts
187 | set_list_var_and_strip_whitespaces clusters_to_install_nsx_int hosts
188 | set_list_var_and_strip_whitespaces per_cluster_vlans_int hosts
189 |
190 | optional_params=("tier0_ha_vip_int" "tier0_uplink_port_ip_2_int" "compute_manager_2_username_int"
191 | "compute_manager_2_password_int" "compute_manager_2_vcenter_ip_int")
192 | for param in "${optional_params[@]}"; do
193 | param_val="${!param}"
194 | if [[ $param_val != "" && $param_val != "null" ]]; then
195 | echo "${param::-4}=${param_val}" >> hosts
196 | fi
197 | done
198 |
199 | create_edge_hosts
200 | create_controller_hosts
201 |
202 | if [[ -f ctrl_vms ]]; then
203 | cat ctrl_vms >> hosts
204 | echo "" >> hosts
205 | rm ctrl_vms
206 | fi
207 | cat edge_vms >> hosts
208 |
209 | rm manager_host edge_vms
210 |
211 | if [[ $esx_ips_int != "" && $esx_ips_int != "null" ]]; then
212 | create_esx_hosts
213 | echo "" >> hosts
214 | cat esx_hosts >> hosts
215 | rm esx_hosts
216 | fi
217 |
218 | }
219 |
--------------------------------------------------------------------------------
/sample_parameters/PKS_only/nsx_pipeline_config.yml:
--------------------------------------------------------------------------------
1 | ### Configs marked OPTIONAL below can be removed from the param file
2 | ### if they are N/A or not desired.
3 |
4 | ### General settings
5 | enable_ansible_debug: false # set value to true for verbose output from Ansible
6 | # format: "http://:40001"
7 | nsx_image_webserver: "http://192.168.110.11:40001"
8 |
9 | ### NSX general network settings
10 | mgmt_portgroup: 'ESXi-RegionA01-vDS-COMP'
11 | dns_server: 192.168.110.10
12 | dns_domain: corp.local.io
13 | ntp_servers: time.vmware.com
14 | default_gateway: 192.168.110.1
15 | netmask: 255.255.255.0
16 |
17 | ### NSX manager cluster configs
18 | # Three node cluster is recommended. 1 is minimum, 3 is max
19 | nsx_manager_ips: 192.168.110.33,192.168.110.34,192.168.110.35 # Manager IPs.
20 | nsx_manager_username: admin
21 | nsx_manager_password: Admin!23Admin
22 | nsx_manager_hostname_prefix: "nsxt-mgr" # Min 12 chars, upper, lower, number, special digit
23 | nsx_manager_virtual_ip: 192.168.110.36 # [OPTIONAL] Virtual IP as the access IP for the manager cluster
24 | # FQDN is required if virtual IP is configured
25 | nsx_manager_cluster_fqdn: corp.local.io # [OPTIONAL] FQDN for the manager, will be used to generate cert for VIP
26 | nsx_license_key: 11111-22222-33333-44444-55555
27 | nsx_manager_root_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
28 | nsx_manager_cli_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
29 | nsx_manager_deployment_size: small # Recommended for real bare-bones demo, smallest setup
30 | nsx_manager_deployment_ip_prefix_length: 23
31 | nsx_manager_ssh_enabled: true
32 | vcenter_ip: 192.168.110.22
33 | vcenter_username: administrator@corp.local
34 | vcenter_password: "VMware1!"
35 | vcenter_datacenter: RegionA01
36 | vcenter_cluster: RegionA01-MGMT
37 | vcenter_datastore: iscsi
38 | resource_reservation_off: true
39 |
40 | # Compute manager credentials should be the same as above vCenter's if
41 | # controllers and edges are to be on the same vCenter
42 | compute_manager_username: "Administrator@vsphere.local" # [OPTIONAL] Defaults to vcenter_username if not set
43 | compute_manager_password: "VMware1!" # [OPTIONAL] Defaults to vcenter_password if not set
44 | # compute manager for the compute cluster (2nd vCenter)
45 | compute_manager_2_vcenter_ip: "null" # [OPTIONAL]
46 | compute_manager_2_username: "null" # [OPTIONAL]
47 | compute_manager_2_password: "null" # [OPTIONAL]
48 |
49 | edge_uplink_profile_vlan: 0 # For outbound uplink connection used by Edge, usually keep as 0
50 | esxi_uplink_profile_vlan: 0 # For internal overlay connection used by ESXi hosts, usually transport VLAN ID
51 |
52 | # Virtual Tunnel Endpoint network ip pool
53 | vtep_ip_pool_cidr: 192.168.213.0/24
54 | vtep_ip_pool_gateway: 192.168.213.1
55 | vtep_ip_pool_start: 192.168.213.10
56 | vtep_ip_pool_end: 192.168.213.200
57 |
58 | # Tier 0 router
59 | tier0_router_name: DefaultT0Router
60 | tier0_uplink_port_ip: 192.168.100.4
61 | tier0_uplink_port_subnet: 24
62 | tier0_uplink_next_hop_ip: 192.168.100.1
63 | tier0_uplink_port_ip_2: 192.168.100.5
64 | tier0_ha_vip: 192.168.100.3
65 |
66 | ### Edge nodes
67 | edge_ips: 192.168.110.37,192.168.110.38 # Comma separated based in number of required edges
68 | edge_default_gateway: 192.168.110.1
69 | edge_ip_prefix_length: 24
70 | edge_hostname_prefix: nsx-t-edge
71 | edge_transport_node_prefix: edge-transp-node
72 | edge_cli_password: "VMware1!"
73 | edge_root_password: "VMware1!"
74 | edge_deployment_size: "large" # Large recommended for PKS deployments
75 | vc_datacenter_for_edge: RegionA01
76 | vc_cluster_for_edge: RegionA01-MGMT
77 | vc_datastore_for_edge: iscsi
78 | vc_uplink_network_for_edge: "ESXi-RegionA01-vDS-COMP"
79 | vc_overlay_network_for_edge: "VM-RegionA01-vDS-COMP"
80 | vc_management_network_for_edge: "ESXi-RegionA01-vDS-COMP"
81 |
82 | ### ESX hosts
83 | # Install NSX on vSphere clusters automatically
84 | clusters_to_install_nsx: RegionA01-MGMT,RegionA01-K8s # Comma separated
85 | per_cluster_vlans: 0,0 # Comma separated, order of VLANs applied same as order of clusters
86 |
87 | esx_ips: "" # [OPTIONAL] additional esx hosts, if any, to be individually installed
88 | esx_os_version: "6.5.0" # [OPTIONAL]
89 | esx_root_password: "ca$hc0w" # [OPTIONAL]
90 | esx_hostname_prefix: "esx-host" # [OPTIONAL]
91 |
92 | esx_available_vmnic: "vmnic1" # comma separated physical NICs, applies to both cluster installation or ESXi installation
93 |
94 | ### [OPTIONAL] For all the configs below
95 | nsx_t_t1router_logical_switches_spec: |
96 | t1_routers:
97 | # Add additional T1 Routers or collapse switches into same T1 Router as needed
98 | - name: T1-Router-PKS-Infra
99 | switches:
100 | - name: PKS-Infra
101 | logical_switch_gw: 192.168.50.1 # Last octet should be 1 rather than 0
102 | subnet_mask: 24
103 |
104 | - name: T1Router-PKS-Services
105 | switches:
106 | - name: PKS-Services
107 | logical_switch_gw: 192.168.60.1 # Last octet should be 1 rather than 0
108 | subnet_mask: 24
109 |
110 |
111 | nsx_t_container_ip_block_spec: |
112 | container_ip_blocks:
113 | - name: PKS-node-ip-block
114 | cidr: 11.4.0.0/16
115 |
116 | - name: PKS-pod-ip-block
117 | cidr: 12.4.0.0/16
118 |
119 |
120 | nsx_t_external_ip_pool_spec: |
121 | external_ip_pools:
122 |
123 | - name: snat-vip-pool-for-pks
124 | cidr: 10.208.50.0/24
125 | start: 10.208.50.10 # Should not include gateway
126 | end: 10.208.50.200 # Should not include gateway
127 |
128 | - name: tep-ip-pool2
129 | cidr: 192.168.220.0/24
130 | start: 192.168.220.10
131 | end: 192.168.220.200
132 |
133 |
134 | nsx_t_nat_rules_spec: |
135 | nat_rules:
136 | # Sample entry for PKS-Infra network
137 | - t0_router: DefaultT0Router
138 | nat_type: snat
139 | source_network: 192.168.50.0/24 # PKS Infra network cidr
140 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
141 | rule_priority: 8001 # Lower priority
142 |
143 | # Sample entry for PKS-Services network
144 | - t0_router: DefaultT0Router
145 | nat_type: snat
146 | source_network: 192.168.60.0/24 # PKS Clusters network cidr
147 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
148 | rule_priority: 8001 # Lower priority
149 |
150 | # Sample entry for allowing inbound to PKS Ops manager
151 | - t0_router: DefaultT0Router
152 | nat_type: dnat
153 | destination_network: 10.208.50.2 # External IP address for PKS opsmanager
154 | translated_network: 192.168.50.2 # Internal IP of PKS Ops manager
155 | rule_priority: 1024 # Higher priority
156 |
157 | # Sample entry for allowing outbound from PKS Ops Mgr to external
158 | - t0_router: DefaultT0Router
159 | nat_type: snat
160 | source_network: 192.168.50.2 # Internal IP of PAS opsmanager
161 | translated_network: 10.208.50.2 # External IP address for PAS opsmanager
162 | rule_priority: 1024 # Higher priority
163 |
164 | # Sample entry for allowing inbound to PKS Controller
165 | - t0_router: DefaultT0Router
166 | nat_type: dnat
167 | destination_network: 10.208.50.4 # External IP address for PKS opsmanager
168 | translated_network: 192.168.50.11 # Internal IP of PKS Ops Controller
169 | rule_priority: 1024 # Higher priority
170 |
171 | # Sample entry for allowing outbound from PKS Controller to external
172 | - t0_router: DefaultT0Router
173 | nat_type: snat
174 | source_network: 192.168.50.4 # Internal IP of PKS controller
175 | translated_network: 10.208.50.4 # External IP address for PKS controller
176 | rule_priority: 1024 # Higher priority
177 |
178 |
179 | nsx_t_csr_request_spec: |
180 | csr_request:
181 | #common_name not required - would use nsx_t_manager_host_name
182 | org_name: Company # EDIT
183 | org_unit: net-integ # EDIT
184 | country: US # EDIT
185 | state: CA # EDIT
186 | city: SF # EDIT
187 | key_size: 2048 # Valid values: 2048 or 3072
188 | algorithm: RSA # Valid values: RSA or DSA
189 |
190 |
191 | nsx_t_lbr_spec: |
192 | loadbalancers:
--------------------------------------------------------------------------------
/docker_image/run.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Set this via a env var
3 | # CONCOURSE_URL="http://10.33.75.99:8080"
4 | # EXTERNAL_DNS=
5 | # IMAGE_WEBSERVER_PORT=
6 | # VMWARE_USER
7 | # VMWARE_PASSWORD
8 | # NSXT_VERSION
9 |
10 | ROOT_WORK_DIR="/home/workspace"
11 | BIND_MOUNT_DIR="/home/concourse"
12 | CONFIG_FILE_NAME="nsx_pipeline_config.yml"
13 |
14 | if [[ ! -e ${BIND_MOUNT_DIR}/${CONFIG_FILE_NAME} ]]; then
15 | echo "Config file ${BIND_MOUNT_DIR}/${CONFIG_FILE_NAME} not found, exiting"
16 | exit 1
17 | fi
18 |
19 | # download the ovftool and OVA files
20 | cd $BIND_MOUNT_DIR
21 | ova_file_name=$(ls -l *.ova | sed 's/.* nsx/nsx/;s/ova.*/ova/' | tail -n1)
22 | ovftool_file_name=$(ls -l *.bundle | sed 's/.* VMware-ovftool/VMware-ovftool/;s/bundle.*/bundle/' | tail -n1)
23 |
24 | nsxt_version=2.5.0
25 | if [[ $ova_file_name != "" ]]; then
26 | nsxt_version=$(echo ${ova_file_name##*-} | head -c5)
27 | elif [[ $NSXT_VERSION != "" ]]; then
28 | nsxt_version=$NSXT_VERSION
29 | fi
30 |
31 | if [[ $ova_file_name == "" ]] || [[ $ovftool_file_name == "" ]]; then
32 | #ovftool_file_name="VMware-ovftool-4.3.0-7948156-lin.x86_64.bundle"
33 | #ova_file_name="nsx-unified-appliance-2.2.0.0.0.8680778.ova"
34 | set -e
35 | docker run -itd --name vmw-cli -e VMWINDEXDIR="/state" -e VMWUSER=$VMWARE_USER -e VMWPASS=$VMWARE_PASSWORD -v ${BIND_MOUNT_DIR}:/files --entrypoint=sh apnex/vmw-cli
36 |
37 | docker exec -t vmw-cli vmw-cli index OVFTOOL430
38 | ovftool_file_name=$(docker exec -t vmw-cli vmw-cli find fileType:bundle,version:4.3 | grep VMware-ovftool | sed 's/.* VMware-ovftool/VMware-ovftool/;s/bundle.*/bundle/' | tail -n1)
39 | docker exec -t vmw-cli vmw-cli get $ovftool_file_name
40 |
41 | version_no_dots=$(echo $nsxt_version | sed 's/\.//g')
42 | docker exec -t vmw-cli vmw-cli index NSX-T-${version_no_dots}
43 | ova_file_name=$(docker exec -t vmw-cli vmw-cli find fileType:ova,version:${nsxt_version} | grep nsx-unified-appliance | sed 's/.* nsx/nsx/;s/ova.*/ova/' | tail -n1)
44 | docker exec -t vmw-cli vmw-cli get $ova_file_name
45 | docker stop vmw-cli
46 | docker rm vmw-cli
47 |
48 | if [[ $ova_file_name == "" ]]; then
49 | echo "OVA not found for NSX version $nsxt_version. Please specify a supported version and recreate the container."
50 | exit 1
51 | fi
52 | set +e
53 | fi
54 |
55 | unified_appliance=true
56 | nsx_t_pipeline_branch=nsxt_2.5.0
57 | nsxt_ansible_branch=dev
58 |
59 | version_num=$(echo $nsxt_version | cut -d'.' -f1)
60 | version_sub_num=$(echo $nsxt_version | cut -d'.' -f2)
61 | # 2.4.0 deployments
62 | if [[ $version_num -eq 2 ]] && [[ $version_sub_num -eq 3 ]]; then
63 | nsx_t_pipeline_branch=nsxt_2.4.0
64 | nsxt_ansible_branch=master
65 | fi
66 | # 2.4.0 and earlier (non unified appliance) deployments
67 | if [[ $version_num -le 2 ]] && [[ $version_sub_num -le 3 ]]; then
68 | unified_appliance=false
69 | nsx_t_pipeline_branch=nsxt_2.3.0
70 | nsxt_ansible_branch=v1.0.0
71 | fi
72 |
73 | # Overwrite defaults if branches are explicitly passed in as env variables
74 | if [[ $PIPELINE_BRANCH != "" ]]; then
75 | nsx_t_pipeline_branch=$PIPELINE_BRANCH
76 | fi
77 | if [[ $ANSIBLE_BRANCH != "" ]]; then
78 | nsxt_ansible_branch=$ANSIBLE_BRANCH
79 | fi
80 |
81 | pipeline_internal_config="pipeline_config_internal.yml"
82 | echo "ovftool_file_name: $ovftool_file_name" > $pipeline_internal_config
83 | echo "ova_file_name: $ova_file_name" >> $pipeline_internal_config
84 | echo "unified_appliance: $unified_appliance" >> $pipeline_internal_config
85 | echo "nsx_t_pipeline_branch: $nsx_t_pipeline_branch" >> $pipeline_internal_config
86 | echo "nsxt_ansible_branch: $nsxt_ansible_branch" >> $pipeline_internal_config
87 |
88 | # Start a web server to host static files such as ovftool and NSX manager OVA
89 | docker run --name nginx-server -v ${BIND_MOUNT_DIR}:/usr/share/nginx/html:ro -p ${IMAGE_WEBSERVER_PORT}:80 -d nginx
90 |
91 | mkdir -p $ROOT_WORK_DIR
92 | cd $ROOT_WORK_DIR
93 |
94 | #git clone https://github.com/concourse/concourse-docker.git
95 | #concourse_docker_dir=${ROOT_WORK_DIR}/concourse-docker
96 | #cp ${concourse_docker_dir}/keys/generate $BIND_MOUNT_DIR
97 | #./generate
98 |
99 | git clone -b $nsx_t_pipeline_branch --single-branch https://github.com/vmware/nsx-t-datacenter-ci-pipelines.git
100 | pipeline_dir=${ROOT_WORK_DIR}/nsx-t-datacenter-ci-pipelines
101 | cp ${pipeline_dir}/docker_compose/docker-compose.yml $BIND_MOUNT_DIR
102 | cp ${pipeline_dir}/functions/generate-keys.sh $BIND_MOUNT_DIR
103 | cp ${pipeline_dir}/functions/set_default_params.py $BIND_MOUNT_DIR
104 |
105 | cd $BIND_MOUNT_DIR
106 | chmod +x generate-keys.sh
107 | ./generate-keys.sh
108 |
109 | # Prepare the yaml for docker compose
110 | concourse_version=5.7.0
111 | sed -i "0,/^ *- CONCOURSE_EXTERNAL_URL/ s|CONCOURSE_EXTERNAL_URL.*$|CONCOURSE_EXTERNAL_URL=${CONCOURSE_URL}|" docker-compose.yml
112 | sed -i "0,/^ *- CONCOURSE_GARDEN_DNS_SERVER/ s|CONCOURSE_GARDEN_DNS_SERVER.*$|CONCOURSE_GARDEN_DNS_SERVER=${EXTERNAL_DNS}|" docker-compose.yml
113 | sed -i "/^ *image: concourse\/concourse/ s|concourse/concourse.*$|concourse/concourse:$concourse_version|g" docker-compose.yml
114 |
115 | # If proxy env vars not set, remove the settings
116 | proxy_patterns=("http_proxy_url" "https_proxy_url" "no_proxy" "HTTP_PROXY" "HTTPS_PROXY" "NO_PROXY")
117 | if [[ -z "$HTTP_PROXY" ]] || [[ -z "HTTPS_PROXY" ]]; then
118 | for p in "${proxy_patterns[@]}"; do
119 | sed -i "/$p/d" docker-compose.yml
120 | done
121 | else
122 | sed -i "0,/^ *- HTTP_PROXY/ s|HTTP_PROXY.*$|HTTP_PROXY=${HTTP_PROXY}|" docker-compose.yml
123 | sed -i "0,/^ *- http_proxy_url/ s|http_proxy_url.*$|http_proxy_url=${HTTP_PROXY}|" docker-compose.yml
124 |
125 | sed -i "0,/^ *- HTTPS_PROXY/ s|HTTPS_PROXY.*$|HTTPS_PROXY=${HTTPS_PROXY}|" docker-compose.yml
126 | sed -i "0,/^ *- https_proxy_url/ s|https_proxy_url.*$|https_proxy_url=${HTTPS_PROXY}|" docker-compose.yml
127 |
128 | sed -i "0,/^ *- NO_PROXY/ s|NO_PROXY.*$|NO_PROXY=${NO_PROXY}|" docker-compose.yml
129 | sed -i "0,/^ *- no_proxy/ s|no_proxy.*$|no_proxy=${NO_PROXY}|" docker-compose.yml
130 | fi
131 | #sed -i "0,/^ *- CONCOURSE_GARDEN_NETWORK/ s|- CONCOURSE_GARDEN_NETWORK.*$|#- CONCOURSE_GARDEN_NETWORK|" docker-compose.yml
132 | #sed -i "/^ *- CONCOURSE_EXTERNAL_URL/ a\ - CONCOURSE_NO_REALLY_I_DONT_WANT_ANY_AUTH=true" docker-compose.yml
133 |
134 | echo "bringing up Concourse server in a docker-compose cluster"
135 | docker-compose up -d
136 |
137 | # Wait for the concourse API server to start up
138 | while true; do
139 | curl -s -o /dev/null $CONCOURSE_URL
140 | if [[ $? -eq 0 ]]; then
141 | break
142 | fi
143 | echo "waiting for Concourse web server to be running"
144 | sleep 2
145 | done
146 | echo "brought up the Concourse cluster"
147 |
148 | # Fill in the optional parameters with default values for the param file
149 | echo "Filling optional parameters!"
150 | python set_default_params.py
151 |
152 | # Use fly to start the pipeline
153 | CONCOURSE_TARGET=nsx-concourse
154 | PIPELINE_NAME=nsx-t-install
155 | echo "logging into concourse at $CONCOURSE_URL"
156 | fly -t $CONCOURSE_TARGET sync
157 | fly --target $CONCOURSE_TARGET login -u nsx -p vmware --concourse-url $CONCOURSE_URL -n main
158 | echo "setting the NSX-t install pipeline $PIPELINE_NAME"
159 | fly_reset_cmd="fly -t $CONCOURSE_TARGET set-pipeline -p $PIPELINE_NAME -c ${pipeline_dir}/pipelines/nsx-t-install.yml -l ${BIND_MOUNT_DIR}/${pipeline_internal_config} -l ${BIND_MOUNT_DIR}/${CONFIG_FILE_NAME}"
160 | yes | $fly_reset_cmd
161 | echo "unpausing the pipepline $PIPELINE_NAME"
162 | fly -t $CONCOURSE_TARGET unpause-pipeline -p $PIPELINE_NAME
163 |
164 | # add an alias for set-pipeline command
165 | echo "alias fly-reset=\"$fly_reset_cmd\"" >> ~/.bashrc
166 | destroy_cmd="cd $BIND_MOUNT_DIR; fly -t $CONCOURSE_TARGET destroy-pipeline -p $PIPELINE_NAME; docker-compose down; docker stop nginx-server; docker rm nginx-server;"
167 | echo "alias destroy=\"$destroy_cmd\"" >> ~/.bashrc
168 | source ~/.bashrc
169 |
170 | while true; do
171 | is_worker_running=$(docker ps | grep concourse-worker)
172 | if [[ ! $is_worker_running ]]; then
173 | docker-compose restart concourse-worker
174 | echo "concourse worker is down; restarted it"
175 | break
176 | fi
177 | sleep 5
178 | done
179 |
180 | sleep 3d
181 | fly -t $CONCOURSE_TARGET destroy-pipeline -p $PIPELINE_NAME
182 | docker-compose down
183 | docker stop nginx-server
184 | docker rm nginx-server
185 | exit 0
--------------------------------------------------------------------------------
/pipelines/nsx-t-install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | nsx_t_install_params: &nsx-t-install-params
3 | nsxt_ansible_branch_int: ((nsxt_ansible_branch))
4 | enable_ansible_debug_int: ((enable_ansible_debug))
5 | vcenter_datacenter_int: ((vcenter_datacenter))
6 | vcenter_datastore_int: ((vcenter_datastore))
7 | mgmt_portgroup_int: ((mgmt_portgroup))
8 | vcenter_cluster_int: ((vcenter_cluster))
9 | vcenter_ip_int: ((vcenter_ip))
10 | vcenter_username_int: ((vcenter_username))
11 | vcenter_password_int: ((vcenter_password))
12 | dns_server_int: ((dns_server))
13 | dns_domain_int: ((dns_domain))
14 | ntp_servers_int: ((ntp_servers))
15 | default_gateway_int: ((default_gateway))
16 | netmask_int: ((netmask))
17 | nsx_image_webserver_int: ((nsx_image_webserver))
18 | ova_file_name_int: ((ova_file_name))
19 | ovftool_file_name_int: ((ovftool_file_name))
20 |
21 | unified_appliance_int: ((unified_appliance))
22 | nsx_manager_ips_int: ((nsx_manager_ips))
23 | nsx_manager_virtual_ip_int: ((nsx_manager_virtual_ip))
24 | nsx_license_key_int: ((nsx_license_key))
25 | nsx_manager_username_int: ((nsx_manager_username))
26 | nsx_manager_password_int: ((nsx_manager_password))
27 | nsx_manager_hostname_prefix_int: ((nsx_manager_hostname_prefix))
28 | nsx_manager_cluster_fqdn_int: ((nsx_manager_cluster_fqdn))
29 | nsx_manager_root_pwd_int: ((nsx_manager_root_pwd))
30 | nsx_manager_cli_pwd_int: ((nsx_manager_cli_pwd))
31 | nsx_manager_deployment_size_int: ((nsx_manager_deployment_size))
32 | nsx_manager_deployment_ip_prefix_length_int: ((nsx_manager_deployment_ip_prefix_length))
33 | nsx_manager_ssh_enabled_int: ((nsx_manager_ssh_enabled))
34 | resource_reservation_off_int: ((resource_reservation_off))
35 |
36 | compute_manager_username_int: ((compute_manager_username))
37 | compute_manager_password_int: ((compute_manager_password))
38 | compute_manager_2_username_int: ((compute_manager_2_username))
39 | compute_manager_2_password_int: ((compute_manager_2_password))
40 | compute_manager_2_vcenter_ip_int: ((compute_manager_2_vcenter_ip))
41 |
42 | edge_ips_int: ((edge_ips))
43 | edge_default_gateway_int: ((edge_default_gateway))
44 | edge_ip_prefix_length_int: ((edge_ip_prefix_length))
45 | edge_hostname_prefix_int: ((edge_hostname_prefix))
46 | edge_transport_node_prefix_int: ((edge_transport_node_prefix))
47 | edge_cli_password_int: ((edge_cli_password))
48 | edge_root_password_int: ((edge_root_password))
49 | edge_deployment_size_int: ((edge_deployment_size))
50 | vc_datacenter_for_edge_int: ((vc_datacenter_for_edge))
51 | vc_cluster_for_edge_int: ((vc_cluster_for_edge))
52 | vc_datastore_for_edge_int: ((vc_datastore_for_edge))
53 | vc_uplink_network_for_edge_int: ((vc_uplink_network_for_edge))
54 | vc_overlay_network_for_edge_int: ((vc_overlay_network_for_edge))
55 | vc_management_network_for_edge_int: ((vc_management_network_for_edge))
56 | edge_uplink_profile_vlan_int: ((edge_uplink_profile_vlan))
57 | esxi_uplink_profile_vlan_int: ((esxi_uplink_profile_vlan))
58 |
59 | vtep_ip_pool_cidr_int: ((vtep_ip_pool_cidr))
60 | vtep_ip_pool_gateway_int: ((vtep_ip_pool_gateway))
61 | vtep_ip_pool_start_int: ((vtep_ip_pool_start))
62 | vtep_ip_pool_end_int: ((vtep_ip_pool_end))
63 |
64 | add_nsx_t_routers_params: &add-nsx-t-routers-params
65 | nsx_manager_ips_int: ((nsx_manager_ips))
66 | nsx_manager_username_int: ((nsx_manager_username))
67 | nsx_manager_password_int: ((nsx_manager_password))
68 |
69 | edge_ips_int: ((edge_ips))
70 | edge_ip_prefix_length_int: ((edge_ip_prefix_length))
71 | edge_hostname_prefix_int: ((edge_hostname_prefix))
72 | edge_transport_node_prefix_int: ((edge_transport_node_prefix))
73 |
74 | clusters_to_install_nsx_int: ((clusters_to_install_nsx))
75 | per_cluster_vlans_int: ((per_cluster_vlans))
76 | esx_ips_int: ((esx_ips))
77 | esx_os_version_int: ((esx_os_version))
78 | esx_root_password_int: ((esx_root_password))
79 | esx_hostname_prefix_int: ((esx_hostname_prefix))
80 | esx_available_vmnic_int: ((esx_available_vmnic))
81 |
82 | tier0_router_name_int: ((tier0_router_name))
83 | tier0_uplink_port_ip_int: ((tier0_uplink_port_ip))
84 | tier0_uplink_port_subnet_int: ((tier0_uplink_port_subnet))
85 | tier0_uplink_next_hop_ip_int: ((tier0_uplink_next_hop_ip))
86 | tier0_uplink_port_ip_2_int: ((tier0_uplink_port_ip_2))
87 | tier0_ha_vip_int: ((tier0_ha_vip))
88 |
89 | nsx_t_extras_params: &nsx-t-extras-params
90 | nsx_manager_ips_int: ((nsx_manager_ips))
91 | nsx_manager_virtual_ip_int: ((nsx_manager_virtual_ip))
92 | nsx_manager_username_int: ((nsx_manager_username))
93 | nsx_manager_password_int: ((nsx_manager_password))
94 | nsx_manager_hostname_prefix_int: ((nsx_manager_hostname_prefix))
95 | nsx_manager_cluster_fqdn_int: ((nsx_manager_cluster_fqdn))
96 | dns_domain_int: ((dns_domain))
97 |
98 | nsx_t_t1router_logical_switches_spec_int: ((nsx_t_t1router_logical_switches_spec))
99 | nsx_t_ha_switching_profile_spec_int: ((nsx_t_ha_switching_profile_spec))
100 | nsx_t_container_ip_block_spec_int: ((nsx_t_container_ip_block_spec))
101 | nsx_t_external_ip_pool_spec_int: ((nsx_t_external_ip_pool_spec))
102 | nsx_t_nat_rules_spec_int: ((nsx_t_nat_rules_spec))
103 | nsx_t_csr_request_spec_int: ((nsx_t_csr_request_spec))
104 | nsx_t_lbr_spec_int: ((nsx_t_lbr_spec))
105 |
106 | groups:
107 |
108 | - name: full-install-config
109 | jobs:
110 | - install-nsx-t
111 | - add-nsx-t-routers
112 | - config-nsx-t-extras
113 |
114 | - name: install-nsx-t
115 | jobs:
116 | - standalone-install-nsx-t
117 |
118 | - name: add-nsx-t-routers
119 | jobs:
120 | - standalone-add-nsx-t-routers
121 |
122 | - name: config-nsx-t-extras
123 | jobs:
124 | - standalone-config-nsx-t-extras
125 |
126 | resource_types:
127 | - name: file-url
128 | type: docker-image
129 | source:
130 | repository: pivotalservices/concourse-curl-resource
131 | tag: latest
132 |
133 |
134 | resources:
135 | - name: nsx-t-gen-pipeline
136 | type: git
137 | source:
138 | uri: https://github.com/vmware/nsx-t-datacenter-ci-pipelines.git
139 | params:
140 | disable_git_lfs: true
141 | branch: ((nsx_t_pipeline_branch))
142 |
143 | - name: nsxt-ansible
144 | type: git
145 | source:
146 | uri: https://github.com/vmware/ansible-for-nsxt.git
147 | branch: ((nsxt_ansible_branch))
148 |
149 | - name: ovftool
150 | type: file-url
151 | source:
152 | url: ((nsx_image_webserver))/((ovftool_file_name))
153 | filename: ((ovftool_file_name))
154 | skip_ssl_verification: true
155 |
156 | jobs:
157 |
158 | - name: install-nsx-t
159 | plan:
160 | - in_parallel:
161 | - get: nsx-t-gen-pipeline
162 | - get: nsxt-ansible
163 | - get: ovftool
164 |
165 | - task: install-nsx-t
166 | file: nsx-t-gen-pipeline/tasks/install-nsx-t/task.yml
167 | params: *nsx-t-install-params
168 |
169 | - name: add-nsx-t-routers
170 | plan:
171 | - in_parallel:
172 | - get: nsx-t-gen-pipeline
173 | - get: nsxt-ansible
174 | params: {globs: []}
175 | passed: [install-nsx-t]
176 | trigger: true
177 |
178 | - task: add-nsx-t-routers
179 | file: nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.yml
180 | params: *add-nsx-t-routers-params
181 |
182 | - name: config-nsx-t-extras
183 | plan:
184 | - in_parallel:
185 | - get: nsx-t-gen-pipeline
186 | - get: nsxt-ansible
187 | params: {globs: []}
188 | passed: [add-nsx-t-routers]
189 | trigger: true
190 |
191 | - task: config-nsx-t-extras
192 | file: nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.yml
193 | params: *nsx-t-extras-params
194 |
195 | - name: standalone-install-nsx-t
196 | plan:
197 | - in_parallel:
198 | - get: nsx-t-gen-pipeline
199 | - get: nsxt-ansible
200 | - get: ovftool
201 |
202 | - task: install-nsx-t
203 | file: nsx-t-gen-pipeline/tasks/install-nsx-t/task.yml
204 | params: *nsx-t-install-params
205 |
206 | - name: standalone-add-nsx-t-routers
207 | plan:
208 | - in_parallel:
209 | - get: nsx-t-gen-pipeline
210 | - get: nsxt-ansible
211 | params: {globs: []}
212 |
213 | - task: add-nsx-t-routers
214 | file: nsx-t-gen-pipeline/tasks/add-nsx-t-routers/task.yml
215 | params: *add-nsx-t-routers-params
216 |
217 | - name: standalone-config-nsx-t-extras
218 | plan:
219 | - in_parallel:
220 | - get: nsx-t-gen-pipeline
221 | - get: nsxt-ansible
222 | params: {globs: []}
223 |
224 | - task: config-nsx-t-extras
225 | file: nsx-t-gen-pipeline/tasks/config-nsx-t-extras/task.yml
226 | params: *nsx-t-extras-params
227 |
--------------------------------------------------------------------------------
/tasks/install-nsx-t/get_mo_ref_id.py:
--------------------------------------------------------------------------------
1 | # To run:
2 | # python get_mo_ref_id.py --host 10.40.1.206 --user administrator@vsphere.local --password 'Admin!23'
3 |
4 | from pyVmomi import vim
5 |
6 | from pyVim.connect import SmartConnectNoSSL, Disconnect
7 |
8 | import argparse
9 | import atexit
10 | import getpass
11 | import json
12 | import sys
13 |
14 | import pdb
15 |
16 | CLUSTER = 'cluster'
17 | NETWORK = 'network'
18 | DATASTORE = 'datastore'
19 |
20 | HOST_ID_FIELDS = [
21 | 'vc_datacenter_for_edge', 'vc_cluster_for_edge',
22 | 'vc_datastore_for_edge', 'vc_uplink_network_for_edge',
23 | 'vc_overlay_network_for_edge', 'vc_management_network_for_edge',
24 | 'vc_datacenter_for_deployment', 'vc_cluster_for_deployment',
25 | 'vc_datastore_for_deployment', 'vc_management_network_for_deployment'
26 | ]
27 |
28 | class NoMoRefIdFoundError(Exception):
29 | pass
30 |
31 | def get_args():
32 | parser = argparse.ArgumentParser(
33 | description='Arguments for talking to vCenter')
34 |
35 | parser.add_argument('-s', '--host',
36 | required=True,
37 | action='store',
38 | help='vSpehre service to connect to')
39 |
40 | parser.add_argument('-o', '--port',
41 | type=int,
42 | default=443,
43 | action='store',
44 | help='Port to connect on')
45 |
46 | parser.add_argument('-u', '--user',
47 | required=True,
48 | action='store',
49 | help='User name to use')
50 |
51 | parser.add_argument('-p', '--password',
52 | required=True,
53 | action='store',
54 | help='Password to use')
55 |
56 | args = parser.parse_args()
57 | return args
58 |
59 | class MoRefIdRetriever(object):
60 |
61 | def __init__(self):
62 | self.content = self._get_content()
63 | self.vim_resource_types = [vim.Datacenter,
64 | vim.Network, vim.Datastore]
65 | # stores {
66 | # 'network': {
67 | # 'VM network':
68 | # }
69 | # }
70 | self.mapping = {}
71 | self.mo_id_set = set()
72 | self.vc_objects = self._get_container_view_for_datacenter()
73 |
74 | def _get_content(self):
75 | args = get_args()
76 | si = SmartConnectNoSSL(host=args.host, user=args.user,
77 | pwd=args.password, port=args.port)
78 | if not si:
79 | print("Could not connect to the specified host using specified "
80 | "username and password")
81 | return -1
82 |
83 | atexit.register(Disconnect, si)
84 |
85 | content = si.RetrieveContent()
86 | return content
87 |
88 | def parse_mo_ref_id_from_obj(self, vc_object):
89 | return str(vc_object).strip(" \"'").split(':')[1]
90 |
91 | def build_mapping_for_vc_obj_type(self, vc_object_list, mapping):
92 | for vc_object in vc_object_list:
93 | mo_id = self.parse_mo_ref_id_from_obj(vc_object)
94 | mapping[vc_object.name] = mo_id
95 |
96 | def _get_container_view_for_datacenter(self):
97 | # content = get_content()
98 | objview = self.content.viewManager.CreateContainerView(
99 | self.content.rootFolder, [vim.Datacenter], True)
100 | vc_objects = objview.view
101 | objview.Destroy()
102 | return vc_objects
103 |
104 | def build_mapping(self):
105 | for vc_object in self.vc_objects:
106 | try:
107 | # pdb.set_trace()
108 | datacenter_name = vc_object.name
109 | datacenter_mapping = {
110 | CLUSTER: {},
111 | NETWORK: {},
112 | DATASTORE: {}
113 | }
114 | self.mapping[datacenter_name] = datacenter_mapping
115 |
116 | # get clusters
117 | clusters = vc_object.hostFolder.childEntity
118 | cluster_mapping = datacenter_mapping[CLUSTER]
119 | self.build_mapping_for_vc_obj_type(clusters, cluster_mapping)
120 |
121 | # get network
122 | networks = vc_object.network
123 | network_mapping = datacenter_mapping[NETWORK]
124 | self.build_mapping_for_vc_obj_type(networks, network_mapping)
125 |
126 | # storage
127 | datastores = vc_object.datastore
128 | datastore_mapping = datacenter_mapping[DATASTORE]
129 | self.build_mapping_for_vc_obj_type(datastores, datastore_mapping)
130 |
131 | # if vc_object.name not in mapping:
132 | # mapping[vc_object.name] = mo_id
133 | # self.mo_id_set.add(mo_id)
134 | # else:
135 | # pass
136 | except Exception as e:
137 | print e
138 | print 'vc_object %s not written into cache' % vc_object.name
139 |
140 | from pprint import pprint
141 | print 'dumping cache:'
142 | pprint(self.mapping)
143 |
144 | def get_mo_id(self, vc_datacenter, vc_object_type, vc_object_name):
145 | if vc_object_name in self.mo_id_set:
146 | # if this is already a MoRefId
147 | return vc_object_name
148 | try:
149 | if 'folder' in vc_datacenter:
150 | vc_datacenter = vc_datacenter.split('/')[1]
151 | print 'trying to lookup %s %s %s' % (vc_datacenter, vc_object_type, vc_object_name)
152 | return self.mapping[vc_datacenter][vc_object_type][vc_object_name]
153 | except KeyError:
154 | print 'ERROR: no moRefId found for %s of type %s in datacenter %s' % (
155 | vc_object_name, vc_object_type, vc_datacenter)
156 | raise NoMoRefIdFoundError()
157 |
158 |
159 | class HostsFileWriter(object):
160 |
161 | def __init__(self, mo_id_retriever, in_file, out_file):
162 | self.in_file = in_file
163 | self.out_file = out_file
164 | self.ids_to_replace = HOST_ID_FIELDS
165 | self.mo_id_retriever = mo_id_retriever
166 | self.current_datacenter = None
167 |
168 | def modify_line_if_matched(self, line):
169 | try:
170 | id_var_name = next(id_var_name for id_var_name in
171 | self.ids_to_replace if id_var_name in line)
172 | print "found variable %s that needs to be converted to moRefId" % id_var_name
173 | except StopIteration:
174 | return line
175 |
176 | id_value = line.split('=')[-1].strip(" \"'")
177 | if id_var_name.startswith('vc_datacenter_'):
178 | self.current_datacenter = id_value
179 | print "found datacenter specified as %s" % id_value
180 | return
181 |
182 | vc_object_type = id_var_name.split('_')[-3]
183 | # pdb.set_trace()
184 | mo_id = self.mo_id_retriever.get_mo_id(self.current_datacenter, vc_object_type, id_value)
185 | new_line = '%s=%s' % (id_var_name, mo_id)
186 | return new_line
187 |
188 | def modify_ssh_enabled_if_matched(self, line):
189 | new_line = line
190 | if line and 'ssh_enabled' in line:
191 | value = line.split('=')[-1].strip(" \"'")
192 | var_name = line.split('=')[0]
193 | if value in ['true', 'false']:
194 | value = value[0].upper() + value[1:]
195 | new_line = '%s=%s' % (var_name, value)
196 | return new_line
197 |
198 | def process_hosts_file(self):
199 | lines = []
200 | with open(self.in_file, 'r') as f:
201 | for line in f:
202 | new_line = self.modify_line_if_matched(line.strip())
203 | new_line = self.modify_ssh_enabled_if_matched(new_line)
204 | if new_line:
205 | lines.append('%s\n' % new_line)
206 |
207 | with open(self.out_file, 'w') as f:
208 | f.writelines(lines)
209 |
210 |
211 | if __name__ == "__main__":
212 | mr = MoRefIdRetriever()
213 | mr.build_mapping()
214 | w = HostsFileWriter(mr, 'hosts', 'hosts.out')
215 | try:
216 | w. process_hosts_file()
217 | except NoMoRefIdFoundError:
218 | print 'One or more vcenter entity not found, exiting'
219 | sys.exit(1)
220 |
--------------------------------------------------------------------------------
/doc/Deployment.md:
--------------------------------------------------------------------------------
1 | ## Deploying Concourse using the Docker Image
2 |
3 | The "nsx-t-ci-pipeline" Docker image automatically performs the following tasks:
4 | * If the NSX-T and OVF tool bits are not detected in /home/concourse folder the Docker image will download them automatically using the [vmw-cli](https://github.com/apnex/vmw-cli) project
5 | * Deploys NGINX and to serve the bits to the pipeline
6 | * Deploys Concourse
7 | * Registers the pipeline and un-pauses it
8 |
9 | By performing these tasks automatically the Docker image saves you on time running these repetitive tasks. But, if you have an existing concourse server that you want to run the pipeline from, you can skip this section and go straight to [here](#configuring-the-pipeline-to-run-on-an-existing-concourse-server)
10 | If you are planning on using this docker image to download the bits, make sure to comment out the file names in the parameter file for the NSX manager ova and the OVFTool, this is because the Docker image will take care of serving the file for you.
11 |
12 | ### Jumpbox requirements
13 | * Note * Concourse is not supported on Redhat. Please use Ubuntu or equivalent
14 | 1. Install Docker-ce https://docs.docker.com/install/linux/docker-ce/ubuntu/
15 | 2. Create /home/concourse directory (sudo mkdir –p /home/concourse). In this folder create the parameter file names: nsx_pipeline_config.yml.
This YAML file should contains the required parameters to run the pipeline. Sample config files for PAS or PKS or both can be found at: https://github.com/vmware/nsx-t-datacenter-ci-pipelines/blob/master/pipelines/sample-params.yml
16 | 3. The docker image downloads NSX manager OVA and the OVF tool automatically to /home/concourse, and will always download the latest version of these files. For this purpose the docker image initiates vmw-cli which downloads binaries from my.vmware.com website automatically (a project by Andrew Obersnel https://github.com/apnex/vmw-cli) if you want to download the files manually and skip the step for the automatic download, download and place the following two files in to /home/concourse directory:
17 | a. ovftool (example: VMware-ovftool-4.3.0-9158644-lin.x86_64.bundle).
18 | b. NSX Manager OVA file.
19 |
20 | ### Running the docker image
21 | 1. Load the docker image
22 | `` docker load -i ``
23 | 2. Start the docker container with the following command
24 | ```
25 | docker run --name nsx-t-install -d \
26 | -v /var/run/docker.sock:/var/run/docker.sock \
27 | -v /home/concourse:/home/concourse \
28 | -e CONCOURSE_URL='http://:8080' \
29 | -e EXTERNAL_DNS='' \
30 | -e IMAGE_WEBSERVER_PORT=40001 \
31 | -e VMWARE_USER='' \
32 | -e VMWARE_PASSWORD='' \
33 | nsx-t-install
34 | ```
35 | For more information on supported environment variables, checkout the [Pipeline bring-up options](#pipeline-bring-up-options) section below.
36 | Docker container will start the Concourse Docker (Concourse CI running in a Docker container) (https://github.com/concourse/concourse-docker) which creates three Concourse containers (one database, one web, and one worker container) and one nginx webserver container; it consists of one Concourse pipeline
37 | If you did not place the files manually in the /home/concourse folder, the script will download the latest versions from MyVMware using the vmw-cli tool written by Andrew Obersnel
38 | You can watch the progress of the creation of the containers by running `` watch docker ps ``
39 | It may take a few minutes to download the files, and for all the containers to load, once it did you should see the following containers running
40 |
41 | ```
42 | nsx-t-install
43 | nginx
44 | Concourse web
45 | Concourse Worker
46 | Postgress
47 | ```
48 | 3. Browse to the Concourse page: http://:8080/
49 | 4. Login with the following default credentials
50 | ```
51 | user: nsx
52 | password: vmware
53 | ```
54 |
55 | ## Configuring the pipeline to run on an existing Concourse server
56 | If you are using an existing Concourse server do the following:
57 | 1. Place the NSX-T manager unified appliance OVA and the OVF tool in an accessible web server
58 | 2. Specify the web server and the file names in the param file as follows:
59 | ```
60 | nsx_image_webserver: "http://192.168.110.11:40001"
61 | ova_file_name: "nsx-unified-appliance-2.2.0.0.0.8680778.ova" # Uncomment this if downloaded file manually and placed under /home/concourse
62 | ovftool_file_name: "VMware-ovftool-4.2.0-5965791-lin.x86_64.bundle"
63 | ```
64 | Be sure to add __required pipeline branches__ mentioned in the [Pipeline bring-up options](#pipeline-bring-up-options) section below.
65 | 3. Register the pipeline using: fly -t (target) sp -p (pipeline name) -c (nsx-t-datacenter-ci-pipelines/pipelines/nsx-t-install.yml) -l (parameter file)
66 |
67 |
68 | ## Pipeline bring-up options
69 |
70 | __If using the docker image provided in the [docker_image](../docker_image) folder of this repository:__
71 | The pipeline accepts the following environment variables:
72 |
73 | | ENV_VAR | description |
74 | |:---:|:---:|
75 | | CONCOURSE_URL | set to "http://:8080". IP should be primary NIC of the VM running the Docker container, not the loopback address |
76 | | EXTERNAL_DNS | set to a dns server that can resolve vCenter hostname, and public names e.g. github.com |
77 | | IMAGE_WEBSERVER_PORT | recommend '40001' if not taken |
78 | | VMWARE_USER (optional) | required if NSX ova need to be downloaded |
79 | | VMWARE_PASSWORD (optional) | required if NSX ova need to be downloaded |
80 | | NSX-T_VERSION (optional) | required if need to _download a earlier nsx-t version from myvmware.com_ |
81 | | PIPELINE_BRANCH (optional __*__) | branch to use for nsx-t-datacenter-ci-pipelines |
82 | | ANSIBLE_BRANCH (optional __*__) | branch to use for nsxt-ansible (https://github.com/vmware/ansible-for-nsxt) |
83 |
84 | To set those environment variables, use -e ENV_VAR='value' in the docker run command.
85 | __Note: (*)__ Unless for debugging purposes, these two parameters should not be explicitly set!
86 | NSX-T_VERSION also should not be specified if you have placed NSX-T ova file under /home/concourse (see [README.md](../README.md)), as the pipeline will automatically determine the NSX-T version and use the compatible pipeline branches.
87 | If you do wish to specify these ENV variables, make sure the NSX-T version and pipeline branches are compatible!! (see the matrix below)
88 |
89 |
90 | __If running the pipeline on existing concourse environment and not using the nsx-t-install image, please perform following additional steps:__ in nsx_pipeline_config.yml that was created under /home/concourse, add the following two lines at the beginning, depending on which NSX-T version you are deploying:
91 |
92 | | NSX-T 2.3.0 & earlier | NSX-T 2.4.0 | NSX-T 2.5.0 |
93 | |:----------------------:|:---------------:|:-------------:|
94 | | nsxt_ansible_branch=v1.0.0 | nsxt_ansible_branch=master | nsxt_ansible_branch=dev |
95 | | nsx_t_pipeline_branch=nsxt_2.3.0 | nsxt_ansible_branch=nsxt_2.4.0 | nsx_t_pipeline_branch=master |
96 |
97 | Also, if ovftool and ova files were downloaded manually, add ``ova_file_name=`` and ``ovftool_file_name=`` in nsx_pipeline_config.yml as well, as mentioned in the section above.
98 |
99 | ### Running the pipeline
100 |
101 | There are four options for running the pipelines:
102 | 1. Full deployment
103 | - Deploying OVAs and setting up NSX components
104 | - Setting up logical routing, overlay and transport nodes
105 | - Configures Extras such as NAT rules, tags, and spoofguard profiles and LB
106 | 2. OVAs and edge hosts deployment only
107 | 3. Routing only (T0, T1 Routers, and Logical switches), overlay and TNs
108 | 4. Extras configurations only
109 |
110 | 
111 |
112 | The idea is that while it makes sense to run the full deployment most of the times, one might want to run only a certain portion of the pipeline in stages.
113 | For example, it takes a bit of time to deploy the NSX components (about 40 minutes) and much shorter time to deploy the logical routing and extras, one can decide to run only “deploy OVAs” portion and then run the other parts (or the full install), and the pipeline will validate that the previous steps have been completed and will pick it up from there
114 | For all of these options, we use a single parameters file (See appendix for populating the param file. The following are the steps to run the pipeline.
115 |
116 | In the pipeline page select the job you want to run, in this guide we will run the full install:
117 |
118 | Click on the first grey box representing the first task
119 | 
120 |
121 |
122 | Click on the + sign on the top right
123 |
124 | 
125 |
126 | At this point the pipeline should start, you can follow its progress from this site
--------------------------------------------------------------------------------
/sample_parameters/PAS_only/nsx_pipeline_config.yml:
--------------------------------------------------------------------------------
1 | ### Configs marked OPTIONAL below can be removed from the param file
2 | ### if they are N/A or not desired.
3 |
4 | ### General settings
5 | enable_ansible_debug: false # set value to true for verbose output from Ansible
6 | # format: "http://:40001"
7 | nsx_image_webserver: "http://192.168.110.11:40001"
8 |
9 | ### NSX general network settings
10 | mgmt_portgroup: 'ESXi-RegionA01-vDS-COMP'
11 | dns_server: 192.168.110.10
12 | dns_domain: corp.local.io
13 | ntp_servers: time.vmware.com
14 | default_gateway: 192.168.110.1
15 | netmask: 255.255.255.0
16 |
17 | ### NSX manager cluster configs
18 | # Three node cluster is recommended. 1 is minimum, 3 is max
19 | nsx_manager_ips: 192.168.110.33,192.168.110.34,192.168.110.35 # Manager IPs.
20 | nsx_manager_username: admin
21 | nsx_manager_password: Admin!23Admin
22 | nsx_manager_hostname_prefix: "nsxt-mgr" # Min 12 chars, upper, lower, number, special digit
23 | nsx_manager_virtual_ip: 192.168.110.36 # [OPTIONAL] Virtual IP as the access IP for the manager cluster
24 | # FQDN is required if virtual IP is configured
25 | nsx_manager_cluster_fqdn: corp.local.io # [OPTIONAL] FQDN for the manager, will be used to generate cert for VIP
26 | nsx_license_key: 11111-22222-33333-44444-55555
27 | nsx_manager_root_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
28 | nsx_manager_cli_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
29 | nsx_manager_deployment_size: small # Recommended for real bare-bones demo, smallest setup
30 | nsx_manager_deployment_ip_prefix_length: 23
31 | nsx_manager_ssh_enabled: true
32 | vcenter_ip: 192.168.110.22
33 | vcenter_username: administrator@corp.local
34 | vcenter_password: "VMware1!"
35 | vcenter_datacenter: RegionA01
36 | vcenter_cluster: RegionA01-MGMT
37 | vcenter_datastore: iscsi
38 | resource_reservation_off: true
39 |
40 | # Compute manager credentials should be the same as above vCenter's if
41 | # controllers and edges are to be on the same vCenter
42 | compute_manager_username: "Administrator@vsphere.local" # [OPTIONAL] Defaults to vcenter_username if not set
43 | compute_manager_password: "VMware1!" # [OPTIONAL] Defaults to vcenter_password if not set
44 | # compute manager for the compute cluster (2nd vCenter)
45 | compute_manager_2_vcenter_ip: "null" # [OPTIONAL]
46 | compute_manager_2_username: "null" # [OPTIONAL]
47 | compute_manager_2_password: "null" # [OPTIONAL]
48 |
49 | edge_uplink_profile_vlan: 0 # For outbound uplink connection used by Edge, usually keep as 0
50 | esxi_uplink_profile_vlan: 0 # For internal overlay connection used by ESXi hosts, usually transport VLAN ID
51 |
52 | # Virtual Tunnel Endpoint network ip pool
53 | vtep_ip_pool_cidr: 192.168.213.0/24
54 | vtep_ip_pool_gateway: 192.168.213.1
55 | vtep_ip_pool_start: 192.168.213.10
56 | vtep_ip_pool_end: 192.168.213.200
57 |
58 | # Tier 0 router
59 | tier0_router_name: DefaultT0Router
60 | tier0_uplink_port_ip: 192.168.100.4
61 | tier0_uplink_port_subnet: 24
62 | tier0_uplink_next_hop_ip: 192.168.100.1
63 | tier0_uplink_port_ip_2: 192.168.100.5
64 | tier0_ha_vip: 192.168.100.3
65 |
66 | ### Edge nodes
67 | edge_ips: 192.168.110.37,192.168.110.38 # Comma separated based in number of required edges
68 | edge_default_gateway: 192.168.110.1
69 | edge_ip_prefix_length: 24
70 | edge_hostname_prefix: nsx-t-edge
71 | edge_transport_node_prefix: edge-transp-node
72 | edge_cli_password: "VMware1!"
73 | edge_root_password: "VMware1!"
74 | edge_deployment_size: "large" # Large recommended for PKS deployments
75 | vc_datacenter_for_edge: RegionA01
76 | vc_cluster_for_edge: RegionA01-MGMT
77 | vc_datastore_for_edge: iscsi
78 | vc_uplink_network_for_edge: "ESXi-RegionA01-vDS-COMP"
79 | vc_overlay_network_for_edge: "VM-RegionA01-vDS-COMP"
80 | vc_management_network_for_edge: "ESXi-RegionA01-vDS-COMP"
81 |
82 | ### ESX hosts
83 | # Install NSX on vSphere clusters automatically
84 | clusters_to_install_nsx: RegionA01-MGMT,RegionA01-K8s # Comma separated
85 | per_cluster_vlans: 0,0 # Comma separated, order of VLANs applied same as order of clusters
86 |
87 | esx_ips: "" # [OPTIONAL] additional esx hosts, if any, to be individually installed
88 | esx_os_version: "6.5.0" # [OPTIONAL]
89 | esx_root_password: "ca$hc0w" # [OPTIONAL]
90 | esx_hostname_prefix: "esx-host" # [OPTIONAL]
91 |
92 | esx_available_vmnic: "vmnic1" # comma separated physical NICs, applies to both cluster installation or ESXi installation
93 |
94 | ### [OPTIONAL] For all the configs below
95 | nsx_t_t1router_logical_switches_spec: |
96 | t1_routers:
97 | # Add additional T1 Routers or collapse switches into same T1 Router as needed
98 | # Remove unneeded T1 routers
99 | - name: T1-Router-PAS-Infra
100 | switches:
101 | - name: PAS-Infra
102 | logical_switch_gw: 192.168.10.1 # Last octet should be 1 rather than 0
103 | subnet_mask: 24
104 |
105 | - name: T1-Router-PAS-ERT
106 | switches:
107 | - name: PAS-ERT
108 | logical_switch_gw: 192.168.20.1 # Last octet should be 1 rather than 0
109 | subnet_mask: 24
110 | edge_cluster: true
111 |
112 | - name: T1-Router-PAS-Services
113 | switches:
114 | - name: PAS-Services
115 | logical_switch_gw: 192.168.30.1 # Last octet should be 1 rather than 0
116 | subnet_mask: 24
117 |
118 |
119 | nsx_t_ha_switching_profile_spec: |
120 | ha_switching_profiles:
121 | - name: HASwitchingProfile
122 |
123 |
124 | nsx_t_container_ip_block_spec: |
125 | container_ip_blocks:
126 | - name: PAS-container-ip-block
127 | cidr: 10.4.0.0/16
128 |
129 |
130 | nsx_t_external_ip_pool_spec: |
131 | external_ip_pools:
132 | - name: snat-vip-pool-for-pas
133 | cidr: 10.208.40.0/24
134 | start: 10.208.40.10 # Should not include gateway
135 | end: 10.208.40.200 # Should not include gateway
136 |
137 | - name: tep-ip-pool2
138 | cidr: 192.168.220.0/24
139 | start: 192.168.220.10
140 | end: 192.168.220.200
141 |
142 |
143 | # Specify NAT rules
144 | nsx_t_nat_rules_spec: |
145 | nat_rules:
146 | # Sample entry for allowing inbound to PAS Ops manager
147 | - t0_router: DefaultT0Router
148 | nat_type: dnat
149 | destination_network: 10.208.40.2 # External IP address for PAS opsmanager
150 | translated_network: 192.168.10.2 # Internal IP of PAS Ops manager
151 | rule_priority: 1024 # Higher priority
152 |
153 | # Sample entry for allowing outbound from PAS Ops Mgr to external
154 | - t0_router: DefaultT0Router
155 | nat_type: snat
156 | source_network: 192.168.10.2 # Internal IP of PAS opsmanager
157 | translated_network: 10.208.40.2 # External IP address for PAS opsmanager
158 | rule_priority: 1024 # Higher priority
159 |
160 | # Sample entry for PAS Infra network SNAT
161 | - t0_router: DefaultT0Router
162 | nat_type: snat
163 | source_network: 192.168.10.0/24 # PAS Infra network cidr
164 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
165 | rule_priority: 8000 # Lower priority
166 |
167 | # Sample entry for PAS ERT network SNAT
168 | - t0_router: DefaultT0Router
169 | nat_type: snat
170 | source_network: 192.168.20.0/24 # PAS ERT network cidr
171 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
172 | rule_priority: 8000 # Lower priority
173 |
174 | # Sample entry for PAS Services network SNAT
175 | - t0_router: DefaultT0Router
176 | nat_type: snat
177 | source_network: 192.168.30.0/24 # PAS Services network cidr
178 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
179 | rule_priority: 8001 # Lower priority
180 |
181 |
182 | nsx_t_csr_request_spec: |
183 | csr_request:
184 | #common_name not required - would use nsx_t_manager_host_name
185 | org_name: Company # EDIT
186 | org_unit: net-integ # EDIT
187 | country: US # EDIT
188 | state: CA # EDIT
189 | city: SF # EDIT
190 | key_size: 2048 # Valid values: 2048 or 3072
191 | algorithm: RSA # Valid values: RSA or DSA
192 |
193 |
194 | nsx_t_lbr_spec: |
195 | loadbalancers:
196 | # Sample entry for creating LBR for PAS ERT
197 | - name: PAS-ERT-LBR
198 | t1_router: T1-Router-PAS-ERT # Should match a previously declared T1 Router
199 | size: small # Allowed sizes: small, medium, large
200 | virtual_servers:
201 | - name: goRouter443 # Name that signifies function being exposed
202 | vip: 10.208.40.4 # Exposed VIP for LBR to listen on
203 | port: 443
204 | members:
205 | - ip: 192.168.20.11 # Internal ip of GoRouter instance 1
206 | port: 80
207 | - ip: 192.168.20.12 # Internal ip of GoRouter instance 1
208 | port: 443
209 | - name: goRouter80
210 | vip: 10.208.40.4
211 | port: 80
212 | members:
213 | - ip: 192.168.20.11 # Internal ip of GoRouter instance 1
214 | port: 80
215 | - ip: 192.168.20.12 # Internal ip of GoRouter instance 2
216 | port: 80
217 | - name: sshProxy # SSH Proxy exposed to outside
218 | vip: 10.208.40.5
219 | port: 2222 # Port 2222 for ssh proxy
220 | members:
221 | - ip: 192.168.20.41 # Internal ip of Diego Brain where ssh proxy runs
222 | port: 2222
--------------------------------------------------------------------------------
/sample_parameters/PAS_and_PKS/nsx_pipeline_config.yml:
--------------------------------------------------------------------------------
1 | ### Configs marked OPTIONAL below can be removed from the param file
2 | ### if they are N/A or not desired.
3 |
4 | ### General settings
5 | enable_ansible_debug: false # set value to true for verbose output from Ansible
6 | # format: "http://:40001"
7 | nsx_image_webserver: "http://192.168.110.11:40001"
8 |
9 | ### NSX general network settings
10 | mgmt_portgroup: 'ESXi-RegionA01-vDS-COMP'
11 | dns_server: 192.168.110.10
12 | dns_domain: corp.local.io
13 | ntp_servers: time.vmware.com
14 | default_gateway: 192.168.110.1
15 | netmask: 255.255.255.0
16 |
17 | ### NSX manager cluster configs
18 | # Three node cluster is recommended. 1 is minimum, 3 is max
19 | nsx_manager_ips: 192.168.110.33,192.168.110.34,192.168.110.35 # Manager IPs.
20 | nsx_manager_username: admin
21 | nsx_manager_password: Admin!23Admin
22 | nsx_manager_hostname_prefix: "nsxt-mgr" # Min 12 chars, upper, lower, number, special digit
23 | nsx_manager_virtual_ip: 192.168.110.36 # [OPTIONAL] Virtual IP as the access IP for the manager cluster
24 | # FQDN is required if virtual IP is configured
25 | nsx_manager_cluster_fqdn: corp.local.io # [OPTIONAL] FQDN for the manager, will be used to generate cert for VIP
26 | nsx_license_key: 11111-22222-33333-44444-55555
27 | nsx_manager_root_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
28 | nsx_manager_cli_pwd: Admin!23Admin # [OPTIONAL] Defaults to nsx_manager_password if not set
29 | nsx_manager_deployment_size: small # Recommended for real bare-bones demo, smallest setup
30 | nsx_manager_deployment_ip_prefix_length: 23
31 | nsx_manager_ssh_enabled: true
32 | vcenter_ip: 192.168.110.22
33 | vcenter_username: administrator@corp.local
34 | vcenter_password: "VMware1!"
35 | vcenter_datacenter: RegionA01
36 | vcenter_cluster: RegionA01-MGMT
37 | vcenter_datastore: iscsi
38 | resource_reservation_off: true
39 |
40 | # Compute manager credentials should be the same as above vCenter's if
41 | # controllers and edges are to be on the same vCenter
42 | compute_manager_username: "Administrator@vsphere.local" # [OPTIONAL] Defaults to vcenter_username if not set
43 | compute_manager_password: "VMware1!" # [OPTIONAL] Defaults to vcenter_password if not set
44 | # compute manager for the compute cluster (2nd vCenter)
45 | compute_manager_2_vcenter_ip: "null" # [OPTIONAL]
46 | compute_manager_2_username: "null" # [OPTIONAL]
47 | compute_manager_2_password: "null" # [OPTIONAL]
48 |
49 | edge_uplink_profile_vlan: 0 # For outbound uplink connection used by Edge, usually keep as 0
50 | esxi_uplink_profile_vlan: 0 # For internal overlay connection used by ESXi hosts, usually transport VLAN ID
51 |
52 | # Virtual Tunnel Endpoint network ip pool
53 | vtep_ip_pool_cidr: 192.168.213.0/24
54 | vtep_ip_pool_gateway: 192.168.213.1
55 | vtep_ip_pool_start: 192.168.213.10
56 | vtep_ip_pool_end: 192.168.213.200
57 |
58 | # Tier 0 router
59 | tier0_router_name: DefaultT0Router
60 | tier0_uplink_port_ip: 192.168.100.4
61 | tier0_uplink_port_subnet: 24
62 | tier0_uplink_next_hop_ip: 192.168.100.1
63 | tier0_uplink_port_ip_2: 192.168.100.5
64 | tier0_ha_vip: 192.168.100.3
65 |
66 | ### Edge nodes
67 | edge_ips: 192.168.110.37,192.168.110.38 # Comma separated based in number of required edges
68 | edge_default_gateway: 192.168.110.1
69 | edge_ip_prefix_length: 24
70 | edge_hostname_prefix: nsx-t-edge
71 | edge_transport_node_prefix: edge-transp-node
72 | edge_cli_password: "VMware1!"
73 | edge_root_password: "VMware1!"
74 | edge_deployment_size: "large" # Large recommended for PKS deployments
75 | vc_datacenter_for_edge: RegionA01
76 | vc_cluster_for_edge: RegionA01-MGMT
77 | vc_datastore_for_edge: iscsi
78 | vc_uplink_network_for_edge: "ESXi-RegionA01-vDS-COMP"
79 | vc_overlay_network_for_edge: "VM-RegionA01-vDS-COMP"
80 | vc_management_network_for_edge: "ESXi-RegionA01-vDS-COMP"
81 |
82 | ### ESX hosts
83 | # Install NSX on vSphere clusters automatically
84 | clusters_to_install_nsx: RegionA01-MGMT,RegionA01-K8s # Comma separated
85 | per_cluster_vlans: 0,0 # Comma separated, order of VLANs applied same as order of clusters
86 |
87 | esx_ips: "" # [OPTIONAL] additional esx hosts, if any, to be individually installed
88 | esx_os_version: "6.5.0" # [OPTIONAL]
89 | esx_root_password: "ca$hc0w" # [OPTIONAL]
90 | esx_hostname_prefix: "esx-host" # [OPTIONAL]
91 |
92 | esx_available_vmnic: "vmnic1" # comma separated physical NICs, applies to both cluster installation or ESXi installation
93 |
94 | ### [OPTIONAL] For all the configs below
95 | nsx_t_t1router_logical_switches_spec: |
96 | t1_routers:
97 | # Add additional T1 Routers or collapse switches into same T1 Router as needed
98 | # Remove unneeded T1 routers
99 |
100 | - name: T1-Router-PAS-Infra
101 | switches:
102 | - name: PAS-Infra
103 | logical_switch_gw: 192.168.10.1 # Last octet should be 1 rather than 0
104 | subnet_mask: 24
105 |
106 | - name: T1-Router-PAS-ERT
107 | switches:
108 | - name: PAS-ERT
109 | logical_switch_gw: 192.168.20.1 # Last octet should be 1 rather than 0
110 | subnet_mask: 24
111 | edge_cluster: true
112 |
113 | - name: T1-Router-PAS-Services
114 | switches:
115 | - name: PAS-Services
116 | logical_switch_gw: 192.168.30.1 # Last octet should be 1 rather than 0
117 | subnet_mask: 24
118 |
119 |
120 | # Comment off the following T1 Routers if there is no PKS
121 | - name: T1-Router-PKS-Infra
122 | switches:
123 | - name: PKS-Infra
124 | logical_switch_gw: 192.168.50.1 # Last octet should be 1 rather than 0
125 | subnet_mask: 24
126 |
127 | - name: T1Router-PKS-Services
128 | switches:
129 | - name: PKS-Services
130 | logical_switch_gw: 192.168.60.1 # Last octet should be 1 rather than 0
131 | subnet_mask: 24
132 |
133 |
134 | nsx_t_ha_switching_profile_spec: |
135 | ha_switching_profiles:
136 | - name: HASwitchingProfile
137 |
138 |
139 | nsx_t_container_ip_block_spec: |
140 | container_ip_blocks:
141 | - name: PAS-container-ip-block
142 | cidr: 10.4.0.0/16
143 |
144 | - name: PKS-node-ip-block
145 | cidr: 11.4.0.0/16
146 |
147 | - name: PKS-pod-ip-block
148 | cidr: 12.4.0.0/16
149 |
150 |
151 | nsx_t_external_ip_pool_spec: |
152 | external_ip_pools:
153 | - name: snat-vip-pool-for-pas
154 | cidr: 10.208.40.0/24
155 | start: 10.208.40.10 # Should not include gateway
156 | end: 10.208.40.200 # Should not include gateway
157 |
158 | - name: snat-vip-pool-for-pks
159 | cidr: 10.208.50.0/24
160 | start: 10.208.50.10 # Should not include gateway
161 | end: 10.208.50.200 # Should not include gateway
162 |
163 | - name: tep-ip-pool2
164 | cidr: 192.168.220.0/24
165 | start: 192.168.220.10
166 | end: 192.168.220.200
167 |
168 |
169 | nsx_t_nat_rules_spec: |
170 | nat_rules:
171 | # Sample entry for allowing inbound to PAS Ops manager
172 | - t0_router: DefaultT0Router
173 | nat_type: dnat
174 | destination_network: 10.208.40.2 # External IP address for PAS opsmanager
175 | translated_network: 192.168.10.2 # Internal IP of PAS Ops manager
176 | rule_priority: 1024 # Higher priority
177 |
178 | # Sample entry for allowing outbound from PAS Ops Mgr to external
179 | - t0_router: DefaultT0Router
180 | nat_type: snat
181 | source_network: 192.168.10.2 # Internal IP of PAS opsmanager
182 | translated_network: 10.208.40.2 # External IP address for PAS opsmanager
183 | rule_priority: 1024 # Higher priority
184 |
185 | # Sample entry for PAS Infra network SNAT
186 | - t0_router: DefaultT0Router
187 | nat_type: snat
188 | source_network: 192.168.10.0/24 # PAS Infra network cidr
189 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
190 | rule_priority: 8000 # Lower priority
191 |
192 | # Sample entry for PAS ERT network SNAT
193 | - t0_router: DefaultT0Router
194 | nat_type: snat
195 | source_network: 192.168.20.0/24 # PAS ERT network cidr
196 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
197 | rule_priority: 8000 # Lower priority
198 |
199 | # Sample entry for PAS Services network SNAT
200 | - t0_router: DefaultT0Router
201 | nat_type: snat
202 | source_network: 192.168.30.0/24 # PAS Services network cidr
203 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
204 | rule_priority: 8001 # Lower priority
205 |
206 |
207 | # Sample entry for PKS-Services network
208 | - t0_router: DefaultT0Router
209 | nat_type: snat
210 | source_network: 192.168.60.0/24 # PKS Clusters network cidr
211 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
212 | rule_priority: 8001 # Lower priority
213 |
214 | # Sample entry for PKS-Infra network
215 | - t0_router: DefaultT0Router
216 | nat_type: snat
217 | source_network: 192.168.50.0/24 # PKS Infra network cidr
218 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
219 | rule_priority: 8001 # Lower priority
220 |
221 | # Sample entry for allowing inbound to PKS Ops manager
222 | - t0_router: DefaultT0Router
223 | nat_type: dnat
224 | destination_network: 10.208.50.2 # External IP address for PKS opsmanager
225 | translated_network: 192.168.50.2 # Internal IP of PKS Ops manager
226 | rule_priority: 1024 # Higher priority
227 |
228 | # Sample entry for allowing outbound from PKS Ops Mgr to external
229 | - t0_router: DefaultT0Router
230 | nat_type: snat
231 | source_network: 192.168.50.2 # Internal IP of PAS opsmanager
232 | translated_network: 10.208.50.2 # External IP address for PAS opsmanager
233 | rule_priority: 1024 # Higher priority
234 |
235 | # Sample entry for allowing inbound to PKS Controller
236 | - t0_router: DefaultT0Router
237 | nat_type: dnat
238 | destination_network: 10.208.50.4 # External IP address for PKS opsmanager
239 | translated_network: 192.168.60.2 # Internal IP of PKS Ops Controller
240 | rule_priority: 1024 # Higher priority
241 |
242 | # Sample entry for allowing outbound from PKS Controller to external
243 | - t0_router: DefaultT0Router
244 | nat_type: snat
245 | source_network: 192.168.60.2 # Internal IP of PKS controller
246 | translated_network: 10.208.50.4 # External IP address for PKS controller
247 | rule_priority: 1024 # Higher priority
248 |
249 |
250 | nsx_t_csr_request_spec: |
251 | csr_request:
252 | #common_name not required - would use nsx_t_manager_host_name
253 | org_name: Company # EDIT
254 | org_unit: net-integ # EDIT
255 | country: US # EDIT
256 | state: CA # EDIT
257 | city: SF # EDIT
258 | key_size: 2048 # Valid values: 2048 or 3072
259 | algorithm: RSA # Valid values: RSA or DSA
260 |
261 |
262 | nsx_t_lbr_spec: |
263 | loadbalancers:
264 | # Sample entry for creating LBR for PAS ERT
265 | - name: PAS-ERT-LBR
266 | t1_router: T1-Router-PAS-ERT # Should match a previously declared T1 Router
267 | size: small # Allowed sizes: small, medium, large
268 | virtual_servers:
269 | - name: goRouter443 # Name that signifies function being exposed
270 | vip: 10.208.40.4 # Exposed VIP for LBR to listen on
271 | port: 443
272 | members:
273 | - ip: 192.168.20.11 # Internal ip of GoRouter instance 1
274 | port: 443
275 | - name: goRouter80
276 | vip: 10.208.40.4
277 | port: 80
278 | members:
279 | - ip: 192.168.20.31 # Internal ip of GoRouter instance 1
280 | port: 80
281 | - ip: 192.168.20.32 # Internal ip of GoRouter instance 2
282 | port: 80
283 | - name: sshProxy # SSH Proxy exposed to outside
284 | vip: 10.208.40.5
285 | port: 2222 # Port 2222 for ssh proxy
286 | members:
287 | - ip: 192.168.20.41 # Internal ip of Diego Brain where ssh proxy runs
288 | port: 2222
--------------------------------------------------------------------------------
/nsxt_yaml/basic_resources.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get ESX nodes information
3 | hosts: esx_hosts
4 | gather_facts: True
5 | tasks:
6 | - name: Get SHA-256 SSL thumbprint
7 | command: openssl x509 -in /etc/vmware/ssl/rui.crt -fingerprint -sha256 -noout
8 | when:
9 | - ansible_distribution == "VMkernel"
10 | register: thumb
11 | - name: Set ssl thumbprint fact
12 | set_fact:
13 | sslthumb: "{{ thumb.stdout|regex_findall('Fingerprint=(.*)') }}"
14 | when:
15 | - ansible_distribution == "VMkernel"
16 |
17 | - hosts: 127.0.0.1
18 | connection: local
19 | become: yes
20 | vars_files:
21 | - vars.yml
22 | tasks:
23 |
24 | ###############################################################################
25 | # Edge cluster
26 | ###############################################################################
27 | - name: Define edge cluster members
28 | set_fact:
29 | edge_members: "{{edge_members|default([]) + [ { 'transport_node_name': hostvars[item].transport_node_name } ] }}"
30 | with_items:
31 | - "{{groups['edge_nodes']}}"
32 |
33 | - name: NSX-T Edge Cluster
34 | nsxt_edge_clusters:
35 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
36 | username: "{{hostvars['localhost'].nsx_manager_username}}"
37 | password: "{{hostvars['localhost'].nsx_manager_password}}"
38 | display_name: "{{ edge_cluster_name }}"
39 | validate_certs: False
40 | members: "{{ edge_members }}"
41 | state: present
42 | register: edge_cluster
43 |
44 | ###############################################################################
45 | # Configure compute clusters for auto NSX install
46 | ###############################################################################
47 | - name: Enable auto install of NSX for specified clusters
48 | nsxt_compute_collection_fabric_templates:
49 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
50 | username: "{{hostvars['localhost'].nsx_manager_username}}"
51 | password: "{{hostvars['localhost'].nsx_manager_password}}"
52 | validate_certs: False
53 | display_name: "{{item}}-fabric_template"
54 | compute_manager_name: "{{compute_manager_name}}"
55 | cluster_name: "{{item}}"
56 | auto_install_nsx: True
57 | state: present
58 | with_items:
59 | - "{{hostvars['localhost'].clusters_to_install_nsx}}"
60 | register: auto_install_nsx_result
61 | when:
62 | - hostvars['localhost'].clusters_to_install_nsx is defined
63 |
64 | - name: Create uplink profiles for the clusters to be auto installed
65 | nsxt_uplink_profiles:
66 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
67 | username: "{{hostvars['localhost'].nsx_manager_username}}"
68 | password: "{{hostvars['localhost'].nsx_manager_password}}"
69 | validate_certs: False
70 | resource_type: UplinkHostSwitchProfile
71 | display_name: "{{item.0}}-profile"
72 | mtu: 1600
73 | teaming: "{{common_teaming_spec_for_esx}}"
74 | transport_vlan: "{{item.1}}"
75 | state: "present"
76 | with_together:
77 | - "{{hostvars['localhost'].clusters_to_install_nsx}}"
78 | - "{{hostvars['localhost'].per_cluster_vlans}}"
79 | when:
80 | - hostvars['localhost'].clusters_to_install_nsx is defined
81 | - hostvars['localhost'].per_cluster_vlans is defined
82 | - auto_install_nsx_result.changed == true
83 |
84 | - name: Create transport node profile for cluster
85 | nsxt_transport_node_profiles:
86 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
87 | username: "{{hostvars['localhost'].nsx_manager_username}}"
88 | password: "{{hostvars['localhost'].nsx_manager_password}}"
89 | validate_certs: False
90 | display_name: "{{item}}-transport_node_profile"
91 | resource_type: "TransportNodeProfile"
92 | host_switch_spec:
93 | resource_type: StandardHostSwitchSpec
94 | host_switches:
95 | - host_switch_profiles:
96 | - name: "{{item}}-profile"
97 | type: UplinkHostSwitchProfile
98 | host_switch_name: "{{overlay_host_switch}}"
99 | host_switch_mode: "STANDARD"
100 | pnics: "{{pnic_list}}"
101 | ip_assignment_spec:
102 | resource_type: StaticIpPoolSpec
103 | ip_pool_name: "{{vtep_ip_pool_name}}"
104 | transport_zone_endpoints:
105 | - transport_zone_name: "{{overlay_transport_zone}}"
106 | state: present
107 | with_items:
108 | - "{{hostvars['localhost'].clusters_to_install_nsx}}"
109 | when:
110 | - hostvars['localhost'].clusters_to_install_nsx is defined
111 | - hostvars['localhost'].per_cluster_vlans is defined
112 | register: transport_node_profile_result
113 |
114 | - name: Create transport node collection for the cluster to enable auto creation of TNs
115 | nsxt_transport_node_collections:
116 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
117 | username: "{{hostvars['localhost'].nsx_manager_username}}"
118 | password: "{{hostvars['localhost'].nsx_manager_password}}"
119 | validate_certs: False
120 | display_name: "{{item}}-transport_node_collection"
121 | description: Transport node collection
122 | resource_type: "TransportNodeCollection"
123 | compute_manager_name: "{{compute_manager_name}}"
124 | cluster_name: "{{item}}"
125 | transport_node_profile_name: "{{item}}-transport_node_profile"
126 | state: present
127 | with_items:
128 | - "{{hostvars['localhost'].clusters_to_install_nsx}}"
129 | when:
130 | - hostvars['localhost'].clusters_to_install_nsx is defined
131 | - hostvars['localhost'].per_cluster_vlans is defined
132 | - transport_node_profile_result.changed == true
133 |
134 | ###############################################################################
135 | # Install NSX on individual ESX hosts, if any
136 | ###############################################################################
137 | - name: Create transport nodes for ESX hosts (2.4.0 and later)
138 | nsxt_transport_nodes:
139 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
140 | username: "{{hostvars['localhost'].nsx_manager_username}}"
141 | password: "{{hostvars['localhost'].nsx_manager_password}}"
142 | validate_certs: False
143 | resource_type: TransportNode
144 | display_name: "{{hostvars[item].transport_node_name}}"
145 | description: Transport Node for {{hostvars[item].ip}}
146 | host_switch_spec:
147 | resource_type: StandardHostSwitchSpec
148 | host_switches:
149 | - host_switch_profiles:
150 | - name: "{{host_uplink_prof}}"
151 | type: UplinkHostSwitchProfile
152 | host_switch_name: "{{overlay_host_switch}}"
153 | pnics: "{{pnic_list}}"
154 | ip_assignment_spec:
155 | resource_type: StaticIpPoolSpec
156 | ip_pool_name: "{{vtep_ip_pool_name}}"
157 | transport_zone_endpoints:
158 | - transport_zone_name: "{{overlay_transport_zone}}"
159 | node_deployment_info:
160 | resource_type: "HostNode"
161 | display_name: "{{hostvars[item].fabric_node_name}}"
162 | ip_addresses:
163 | - "{{hostvars[item].ip}}"
164 | os_type: "ESXI"
165 | os_version: "{{hostvars[item].esx_os_version}}"
166 | host_credential:
167 | username: "root"
168 | password: "{{hostvars[item].ansible_ssh_pass}}"
169 | thumbprint: "{{hostvars[item].sslthumb[0]}}"
170 | state: present
171 | with_items:
172 | - "{{groups['esx_hosts']}}"
173 | when:
174 | - groups['esx_hosts'] is defined
175 |
176 | ###############################################################################
177 | # Tier 0 Router
178 | ###############################################################################
179 | - name: NSX-T T0 Logical Router
180 | nsxt_logical_routers:
181 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
182 | username: "{{hostvars['localhost'].nsx_manager_username}}"
183 | password: "{{hostvars['localhost'].nsx_manager_password}}"
184 | validate_certs: False
185 | resource_type: LogicalRouter
186 | description: "NSX-T T0 Logical Router"
187 | display_name: "{{hostvars['localhost'].tier0_router_name}}"
188 | edge_cluster_name: "{{ edge_cluster_name }}"
189 | router_type: TIER0
190 | high_availability_mode: ACTIVE_STANDBY
191 | state: present
192 | register: t0
193 |
194 | - name: Add static routes
195 | nsxt_logical_router_static_routes:
196 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
197 | username: "{{hostvars['localhost'].nsx_manager_username}}"
198 | password: "{{hostvars['localhost'].nsx_manager_password}}"
199 | validate_certs: False
200 | logical_router_name: "{{hostvars['localhost'].tier0_router_name}}"
201 | display_name: "tier0 static route"
202 | next_hops:
203 | - administrative_distance: '1'
204 | ip_address: "{{hostvars['localhost'].tier0_uplink_next_hop_ip}}"
205 | network: 0.0.0.0/0
206 | state: present
207 |
208 | - name: Create VLAN logical switch
209 | nsxt_logical_switches:
210 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
211 | username: "{{hostvars['localhost'].nsx_manager_username}}"
212 | password: "{{hostvars['localhost'].nsx_manager_password}}"
213 | validate_certs: False
214 | display_name: "{{vlan_logical_switch}}"
215 | replication_mode: SOURCE
216 | admin_state: UP
217 | transport_zone_name: "{{vlan_transport_zone}}"
218 | vlan: "{{vlan_logical_switch_vlan}}"
219 | state: present
220 | when:
221 | - t0.changed == true
222 |
223 | - name: Logical Switch Port for uplink_1
224 | nsxt_logical_ports:
225 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
226 | username: "{{hostvars['localhost'].nsx_manager_username}}"
227 | password: "{{hostvars['localhost'].nsx_manager_password}}"
228 | validate_certs: False
229 | display_name: lsp_for_uplink_1
230 | logical_switch_name: "{{vlan_logical_switch}}"
231 | admin_state: UP
232 | state: present
233 | register: vlan_lsp
234 | when:
235 | - t0.changed == true
236 |
237 | - name: Create logical router port for uplink1
238 | nsxt_logical_router_ports:
239 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
240 | username: "{{hostvars['localhost'].nsx_manager_username}}"
241 | password: "{{hostvars['localhost'].nsx_manager_password}}"
242 | validate_certs: False
243 | display_name: t0_uplink_1
244 | resource_type: LogicalRouterUpLinkPort
245 | logical_router_name: "{{hostvars['localhost'].tier0_router_name}}"
246 | linked_logical_switch_port_id:
247 | target_type: LogicalPort
248 | target_id: "{{vlan_lsp.id}}"
249 | subnets:
250 | - ip_addresses:
251 | - "{{hostvars['localhost'].tier0_uplink_port_ip}}"
252 | prefix_length: "{{hostvars['localhost'].tier0_uplink_port_subnet}}"
253 | edge_cluster_member_index:
254 | - 0
255 | state: present
256 | register: uplink_lrp
257 | when:
258 | - t0.changed == true
259 |
260 | - name: Logical Switch Port for uplink_2
261 | nsxt_logical_ports:
262 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
263 | username: "{{hostvars['localhost'].nsx_manager_username}}"
264 | password: "{{hostvars['localhost'].nsx_manager_password}}"
265 | validate_certs: False
266 | display_name: lsp_for_uplink_2
267 | logical_switch_name: "{{vlan_logical_switch}}"
268 | admin_state: UP
269 | state: present
270 | register: vlan_lsp_2
271 | when:
272 | - hostvars['localhost'].tier0_uplink_port_ip_2 is defined
273 | - t0.changed == true
274 |
275 | - name: Create logical router port for uplink2
276 | nsxt_logical_router_ports:
277 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
278 | username: "{{hostvars['localhost'].nsx_manager_username}}"
279 | password: "{{hostvars['localhost'].nsx_manager_password}}"
280 | validate_certs: False
281 | display_name: t0_uplink_2
282 | resource_type: LogicalRouterUpLinkPort
283 | logical_router_name: "{{hostvars['localhost'].tier0_router_name}}"
284 | linked_logical_switch_port_id:
285 | target_type: LogicalPort
286 | target_id: "{{vlan_lsp_2.id}}"
287 | subnets:
288 | - ip_addresses:
289 | - "{{hostvars['localhost'].tier0_uplink_port_ip_2}}"
290 | prefix_length: "{{hostvars['localhost'].tier0_uplink_port_subnet}}"
291 | edge_cluster_member_index:
292 | - 1
293 | state: present
294 | register: uplink_lrp_2
295 | when:
296 | - hostvars['localhost'].tier0_uplink_port_ip_2 is defined
297 | - t0.changed == true
298 |
299 | - name: HA VIP for T0 Router
300 | nsxt_logical_routers:
301 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
302 | username: "{{hostvars['localhost'].nsx_manager_username}}"
303 | password: "{{hostvars['localhost'].nsx_manager_password}}"
304 | validate_certs: False
305 | resource_type: LogicalRouter
306 | description: "NSX-T T0 Logical Router"
307 | display_name: "{{hostvars['localhost'].tier0_router_name}}"
308 | edge_cluster_name: "{{ edge_cluster_name }}"
309 | router_type: TIER0
310 | high_availability_mode: ACTIVE_STANDBY
311 | advanced_config:
312 | ha_vip_configs:
313 | - enabled: True
314 | ha_vip_subnets:
315 | - active_vip_addresses:
316 | - "{{hostvars['localhost'].tier0_ha_vip}}"
317 | prefix_length: "{{hostvars['localhost'].tier0_uplink_port_subnet}}"
318 | redundant_uplink_port_ids:
319 | - "{{uplink_lrp.id}}"
320 | - "{{uplink_lrp_2.id}}"
321 | state: present
322 | when:
323 | - hostvars['localhost'].tier0_uplink_port_ip_2 is defined
324 | - hostvars['localhost'].tier0_ha_vip is defined
325 | - t0.changed == true
326 |
--------------------------------------------------------------------------------
/doc/Parameter-file.md:
--------------------------------------------------------------------------------
1 | ## Parameter file
2 |
3 | The following are the sections of the parameter for the pipeline (if you are running it using the Docker image it will be placed in /home/concourse/nsx_pipeline_config.yml):
4 |
5 | ### General:
6 |
7 | **nsx_t_pipeline_branch: master**
8 | Use 'master' for 2.5.0 and later deployments.
9 | DO NOT specify this parameter if using nsx-t-install docker image
10 | **nsxt_ansible_branch: dev**
11 | Use 'master' for 2.4.0 deployments. use 'v1.0.0' for earlier deployments.
12 | DO NOT specify this parameter if using nsx-t-install docker image
13 | **enable_ansible_debug: false**
14 | set value to true for verbose output from Ansible
15 |
16 | ### NSX parameters:
17 |
18 | ###### The following configuration is for the vCenter where the NSX Manager and controllers will be deployed
19 | _**vcenter_ip:**_ 192.168.110.22
20 | _**vcenter_username:**_ administrator@corp.local
21 | _**vcenter_password:**_ "VMware1!"
22 | _**vcenter_datacenter:**_ RegionA01
23 | _**vcenter_cluster:**_ RegionA01-MGMT # management cluster
24 | _**vcenter_datastore:**_ iscsi
25 |
26 | ###### Configuration for the NSX manager networking
27 |
28 | _**mgmt_portgroup:**_ 'ESXi-RegionA01-vDS-COMP'
29 | _**dns_server:**_ 192.168.110.10
30 | _**dns_domain:**_ corp.local.io
31 | _**ntp_servers:**_ time.vmware.com
32 | _**default_gateway:**_ 192.168.110.1
33 | _**netmask:**_ 255.255.255.0
34 |
35 | ###### The nsx_image_webserver is where you placed the files. It can either be the auto-deployed nginx by the nsx-t-concourse docker image, or any other web server that you placed the files in.
36 | **nsx_image_webserver:** "http://192.168.110.11:40001"
37 |
38 | ###### Here you can specify the management components configuration.
39 | (Note – If resource_reservation_off: is set to false, in case the pipeline failed after deployment of the manager and controllers because of resource constraints the next run will remove the Memory reservations of these VMs. This is useful for lab purposes only)
40 | **nsx_manager_ips:** 192.168.110.33
41 | **nsx_manager_username:** admin
42 | **nsx_manager_password:** Admin!23Admin # Min 8 chars, upper, lower, number, special digit
43 | **nsx_manager_hostname_prefix:** "nsxt-mgr"
44 | **[OPTIONAL] nsx_manager_root_pwd:** Admin!23Admin
45 | **[OPTIONAL] nsx_manager_cli_pwd:** Admin!23Admin
46 | **nsx_manager_deployment_size:** small # small, medium, large
47 | **nsx_manager_deployment_ip_prefix_length:** 23
48 | **nsx_manager_ssh_enabled:** true
49 | **resource_reservation_off:** true
50 | **nsx_manager_ssh_enabled:** true
51 | **[OPTIONAL] nsx_manager_virtual_ip:** 192.168.110.36
52 | **[OPTIONAL] nsx_manager_cluster_fqdn:** corp.local.io
53 | FQDN for the manager, will be used to generate cert for VIP. If VIP is set this field is required.
54 | **[OPTIONAL] nsx_license_key:** 12345-12345-12345-12345-12345
55 |
56 |
57 | ###### Compute manager config serves 2 purposes. For deploying the Edges using the NSX manager API and for auto installation of NSX on the vSphere clusters. The pipeline currently supports two vCenters in the pipeline.
58 |
59 | **compute_manager_username:** "Administrator@corp.local"
60 | **compute_manager_password:** "VMware1!"
61 | **[OPTIONAL] compute_manager_2_username:** admin
62 | **[OPTIONAL] compute_manager_2_vcenter_ip:** 192.168.110.99
63 | **[OPTIONAL] compute_manager_2_password:** Admin!23Admin
64 |
65 | ###### Do not configure the following parameters if you plan to use the cluster auto install. This section is used with per ESXi server/edge installation.
66 | **edge_uplink_profile_vlan:** 0
67 | For outbound uplink connection used by Edge, usually keep as 0
68 | **esxi_uplink_profile_vlan:** 0
69 | For internal overlay connection used by ESXi hosts, usually transport VLAN ID
70 |
71 | ###### Configuration of the VTEP pool to be used by the ESXi servers and edges.
72 | **vtep_ip_pool_name:** tep-ip-pool
73 | **vtep_ip_pool_cidr:** 192.168.213.0/24
74 | **vtep_ip_pool_gateway:** 192.168.213.1
75 | **vtep_ip_pool_start:** 192.168.213.10
76 | **vtep_ip_pool_end:** 192.168.213.200
77 |
78 | ###### Tier 0 configuration. Note only the HA VIP and static route is currently supported by the pipeline
79 | **tier0_router_name:** DefaultT0Router
80 | **tier0_uplink_port_ip:** 192.168.100.4
81 | **tier0_uplink_port_subnet:** 24
82 | **tier0_uplink_next_hop_ip:** 192.168.100.1
83 | **tier0_uplink_port_ip_2:** 192.168.100.5
84 | **tier0_ha_vip:** 192.168.100.3
85 |
86 | ###### Edge nodes
87 | **edge_ips:** 192.168.110.37, 192.168.110.38
88 | Comma separated based in number of required edges
89 | Note, if specifying more than 1 IP in edge_ips the pipeline will deploy multiple edges.
90 | **edge_default_gateway:** 192.168.110.1
91 | **edge_ip_prefix_length:** 24
92 | **edge_hostname_prefix:** nsx-t-edge
93 | **edge_fabric_name_prefix:** EdgeNode
94 | **edge_transport_node_prefix:** edge-transp-node
95 | **edge_cli_password:** "VMware1!"
96 | **edge_root_password:** "VMware1!"
97 | **edge_deployment_size:** "large"
98 | Large recommended for PKS deployments
99 | **vc_datacenter_for_edge:** RegionA01
100 | **vc_cluster_for_edge:** RegionA01-MGMT
101 | **vc_datastore_for_edge:** iscsi
102 | **vc_uplink_network_for_edge:** "ESXi-RegionA01-vDS-COMP"
103 | **vc_overlay_network_for_edge:** "VM-RegionA01-vDS-COMP"
104 | **vc_management_network_for_edge:** "ESXi-RegionA01-vDS-COMP"
105 |
106 | ###### This configuration is to install NSX on vSphere clusters automatically. Specify comma-separated list of clusters to install NSX on and the VLANs they will use.
107 | **clusters_to_install_nsx:** RegionA01-MGMT, RegionA01-K8s #Comma separated
108 | **per_cluster_vlans:** 0, 0
109 | Note – We yet to support multiple uplink profile assignment
110 |
111 |
112 | ###### Per ESX Installation, If you are not using the “auto cluster install” or if additional ESXi are needed, specify them here. Otherwise leave the "esx_ips" empty
113 | **[OPTIONAL] esx_ips:**
114 | **[OPTIONAL] esx_os_version:** "6.5.0"
115 | **[OPTIONAL] esx_root_password:** "VMware1!"
116 | **[OPTIONAL] esx_hostname_prefix:** "esx-host"
117 |
118 | ###### Here specify which physical NICs will be claimed on the ESXi servers. Comma-separated list will work. Note that this applies to both auto installed clusters and individual ESXi hosts
119 | **esx_available_vmnic: "vmnic1"**
120 |
121 |
122 |
123 |
124 |
125 | Logical parameters
126 |
127 | ##### The following sections of the parameter file can be customized for your deployment. You can set your configuration according to your requirements, whether PAS, PKS, VMs or anything else. The example below is for both PAS and PKS. As mentioned above, if using nsx-t-install docker image, simple delete the sections that does not apply to your deployment. For existing concourse servers, leave the parameter spec names.
128 | ###### Tier-1 distributed routers and switches configuration
129 | ```
130 | nsx_t_t1router_logical_switches_spec: |
131 | t1_routers:
132 | # Add additional T1 Routers or collapse switches into same T1 Router as needed
133 | # Remove unneeded T1 routers
134 |
135 | - name: T1-Router-PAS-Infra
136 | switches:
137 | - name: PAS-Infra
138 | logical_switch_gw: 192.168.10.1 # Last octet should be 1 rather than 0
139 | subnet_mask: 24
140 |
141 | - name: T1-Router-PAS-ERT
142 | switches:
143 | - name: PAS-ERT
144 | logical_switch_gw: 192.168.20.1 # Last octet should be 1 rather than 0
145 | subnet_mask: 24
146 | edge_cluster: true
147 |
148 | - name: T1-Router-PAS-Services
149 | switches:
150 | - name: PAS-Services
151 | logical_switch_gw: 192.168.30.1 # Last octet should be 1 rather than 0
152 | subnet_mask: 24
153 |
154 | # Comment off the following T1 Routers if there is no PKS
155 | - name: T1-Router-PKS-Infra
156 | switches:
157 | - name: PKS-Infra
158 | logical_switch_gw: 192.168.50.1 # Last octet should be 1 rather than 0
159 | subnet_mask: 24
160 |
161 | - name: T1Router-PKS-Services
162 | switches:
163 | - name: PKS-Services
164 | logical_switch_gw: 192.168.60.1 # Last octet should be 1 rather than 0
165 | subnet_mask: 24
166 | ```
167 |
168 | ###### This configuration creates an HA switching profile (required by PAS).
169 | ```
170 | nsx_t_ha_switching_profile_spec: |
171 | ha_switching_profiles:
172 | - name: HASwitchingProfile
173 | tags:
174 | ```
175 |
176 | ###### This configuration is for the IPAM IP blocks
177 | ```
178 | nsx_t_container_ip_block_spec: |
179 | container_ip_blocks:
180 | - name: PAS-container-ip-block
181 | cidr: 10.4.0.0/16
182 | tags:
183 | - name: PKS-node-ip-block
184 | cidr: 11.4.0.0/16
185 | - name: PKS-pod-ip-block
186 | cidr: 12.4.0.0/16
187 | ```
188 |
189 | ###### The following configuration is for the IP pools. As seen in the example we created 2 SNAT/VIP pools for PKS and PAS. Also if additional TEP pool are required, you can create them here
190 | ```
191 | nsx_t_external_ip_pool_spec: |
192 | external_ip_pools:
193 | - name: snat-vip-pool-for-pas
194 | cidr: 10.208.40.0/24
195 | start: 10.208.40.10 # Should not include gateway
196 | end: 10.208.40.200 # Should not include gateway
197 |
198 | - name: snat-vip-pool-for-pks
199 | cidr: 10.208.50.0/24
200 | start: 10.208.50.10 # Should not include gateway
201 | end: 10.208.50.200 # Should not include gateway
202 | ```
203 |
204 | ###### NAT rules (can be pre-configured if IPs are known
205 | ```
206 | nsx_t_nat_rules_spec: |
207 | nat_rules:
208 | # Sample entry for allowing inbound to PAS Ops manager
209 | - t0_router: DefaultT0Router
210 | nat_type: dnat
211 | destination_network: 10.208.40.2 # External IP address for PAS opsmanager
212 | translated_network: 192.168.10.2 # Internal IP of PAS Ops manager
213 | rule_priority: 1024 # Higher priority
214 |
215 | # Sample entry for allowing outbound from PAS Ops Mgr to external
216 | - t0_router: DefaultT0Router
217 | nat_type: snat
218 | source_network: 192.168.10.2 # Internal IP of PAS opsmanager
219 | translated_network: 10.208.40.2 # External IP address for PAS opsmanager
220 | rule_priority: 1024 # Higher priority
221 |
222 | # Sample entry for PAS Infra network SNAT
223 | - t0_router: DefaultT0Router
224 | nat_type: snat
225 | source_network: 192.168.10.0/24 # PAS Infra network cidr
226 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
227 | rule_priority: 8000 # Lower priority
228 |
229 | # Sample entry for PAS ERT network SNAT
230 | - t0_router: DefaultT0Router
231 | nat_type: snat
232 | source_network: 192.168.20.0/24 # PAS ERT network cidr
233 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
234 | rule_priority: 8000 # Lower priority
235 |
236 | # Sample entry for PAS Services network SNAT
237 | - t0_router: DefaultT0Router
238 | nat_type: snat
239 | source_network: 192.168.30.0/24 # PAS Services network cidr
240 | translated_network: 10.208.40.3 # SNAT External Address for PAS networks
241 | rule_priority: 8001 # Lower priority
242 |
243 |
244 | # Sample entry for PKS-Services network
245 | - t0_router: DefaultT0Router
246 | nat_type: snat
247 | source_network: 192.168.60.0/24 # PKS Clusters network cidr
248 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
249 | rule_priority: 8001 # Lower priority
250 |
251 | # Sample entry for PKS-Infra network
252 | - t0_router: DefaultT0Router
253 | nat_type: snat
254 | source_network: 192.168.50.0/24 # PKS Infra network cidr
255 | translated_network: 10.208.50.3 # SNAT External Address for PKS networks
256 | rule_priority: 8001 # Lower priority
257 |
258 | # Sample entry for allowing inbound to PKS Ops manager
259 | - t0_router: DefaultT0Router
260 | nat_type: dnat
261 | destination_network: 10.208.50.2 # External IP address for PKS opsmanager
262 | translated_network: 192.168.50.2 # Internal IP of PKS Ops manager
263 | rule_priority: 1024 # Higher priority
264 |
265 | # Sample entry for allowing outbound from PKS Ops Mgr to external
266 | - t0_router: DefaultT0Router
267 | nat_type: snat
268 | source_network: 192.168.50.2 # Internal IP of PAS opsmanager
269 | translated_network: 10.208.50.2 # External IP address for PAS opsmanager
270 | rule_priority: 1024 # Higher priority
271 |
272 | # Sample entry for allowing inbound to PKS Controller
273 | - t0_router: DefaultT0Router
274 | nat_type: dnat
275 | destination_network: 10.208.50.4 # External IP address for PKS opsmanager
276 | translated_network: 192.168.60.2 # Internal IP of PKS Ops Controller
277 | rule_priority: 1024 # Higher priority
278 |
279 | # Sample entry for allowing outbound from PKS Controller to external
280 | - t0_router: DefaultT0Router
281 | nat_type: snat
282 | source_network: 192.168.60.2 # Internal IP of PKS controller
283 | translated_network: 10.208.50.4 # External IP address for PKS controller
284 | rule_priority: 1024 # Higher priority
285 | ```
286 |
287 | ###### Specify the self-signed certificate config for NSX-T.
288 | ```
289 | nsx_t_csr_request_spec: |
290 | csr_request:
291 | #common_name not required - would use nsx_t_manager_host_name
292 | org_name: Company # EDIT
293 | org_unit: net-integ # EDIT
294 | country: US # EDIT
295 | state: CA # EDIT
296 | city: SF # EDIT
297 | key_size: 2048 # Valid values: 2048 or 3072
298 | algorithm: RSA
299 | ```
300 |
301 | ###### Load balancing config. Required only for PAS. If deploying PKS comment out from -name down
302 | ```
303 | nsx_t_lbr_spec: |
304 | loadbalancers:
305 | # Sample entry for creating LBR for PAS ERT
306 | - name: PAS-ERT-LBR
307 | t1_router: T1-Router-PAS-ERT # Should match a previously declared T1 Router
308 | size: small # Allowed sizes: small, medium, large
309 | virtual_servers:
310 | - name: goRouter443 # Name that signifies function being exposed
311 | vip: 10.208.40.4 # Exposed VIP for LBR to listen on
312 | port: 443
313 | members:
314 | - ip: 192.168.20.11 # Internal ip of GoRouter instance 1
315 | port: 443
316 | - name: goRouter80
317 | vip: 10.208.40.4
318 | port: 80
319 | members:
320 | - ip: 192.168.20.31 # Internal ip of GoRouter instance 1
321 | port: 80
322 | - ip: 192.168.20.32 # Internal ip of GoRouter instance 2
323 | port: 80
324 | - name: sshProxy # SSH Proxy exposed to outside
325 | vip: 10.208.40.5
326 | port: 2222 # Port 2222 for ssh proxy
327 | members:
328 | - ip: 192.168.20.41 # Internal ip of Diego Brain where ssh proxy runs
329 | port: 2222
330 | ```
331 |
332 |
333 |
334 |
335 |
336 |
337 |
338 |
339 |
340 |
--------------------------------------------------------------------------------
/nsxt_yaml/basic_topology.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: 127.0.0.1
3 | connection: local
4 | become: yes
5 | vars_files:
6 | - vars.yml
7 | tasks:
8 | - name: deploy NSX Manager OVA
9 | nsxt_deploy_ova:
10 | ovftool_path: "{{ovftool_bin_path}}"
11 | datacenter: "{{hostvars['localhost'].vcenter_datacenter}}"
12 | datastore: "{{hostvars['localhost'].vcenter_datastore}}"
13 | portgroup: "{{hostvars['localhost'].mgmt_portgroup}}"
14 | cluster: "{{hostvars['localhost'].vcenter_cluster}}"
15 | vmname: "{{hostvars['localhost'].nsx_manager_assigned_hostname}}"
16 | hostname: "{{hostvars['localhost'].nsx_manager_assigned_hostname}}.{{hostvars['localhost'].dns_domain}}"
17 | dns_server: "{{hostvars['localhost'].dns_server}}"
18 | dns_domain: "{{hostvars['localhost'].dns_domain}}"
19 | ntp_server: "{{hostvars['localhost'].ntp_servers}}"
20 | gateway: "{{hostvars['localhost'].default_gateway}}"
21 | ip_address: "{{hostvars['localhost'].nsx_manager_ip}}"
22 | netmask: "{{hostvars['localhost'].netmask}}"
23 | admin_password: "{{hostvars['localhost'].nsx_manager_password}}"
24 | cli_password: "{{hostvars['localhost'].nsx_manager_cli_pwd}}"
25 | path_to_ova: "{{hostvars['localhost'].nsx_image_webserver}}"
26 | ova_file: "{{hostvars['localhost'].ova_file_name}}"
27 | vcenter: "{{hostvars['localhost'].vcenter_ip}}"
28 | vcenter_user: "{{hostvars['localhost'].vcenter_username}}"
29 | vcenter_passwd: "{{hostvars['localhost'].vcenter_password}}"
30 | deployment_size: "{{hostvars['localhost'].nsx_manager_deployment_size}}"
31 | role: "{{hostvars['localhost'].nsx_manager_role}}"
32 | ssh_enabled: "{{hostvars['localhost'].nsx_manager_ssh_enabled}}"
33 | allow_ssh_root_login: "{{hostvars['localhost'].nsx_manager_ssh_enabled}}"
34 | register: ova_deploy_status
35 |
36 | - name: Check manager status
37 | nsxt_manager_status:
38 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
39 | username: "{{hostvars['localhost'].nsx_manager_username}}"
40 | password: "{{hostvars['localhost'].nsx_manager_password}}"
41 | validate_certs: False
42 | wait_time: 60
43 |
44 | - name: Wait for NSX appliances to be status up
45 | pause:
46 | minutes: "{{hostvars['localhost'].appliance_ready_wait}}"
47 | when:
48 | - ova_deploy_status.changed == true
49 |
50 | - name: Generate NSX self-signed certificate if it doens't exist
51 | command: python nsx_t_gen.py --router_config false --generate_cert true
52 |
53 | - name: Wait for NSX appliances to be status up
54 | pause:
55 | minutes: "{{hostvars['localhost'].appliance_ready_wait}}"
56 |
57 | - name: Wait for Manager API server to be up
58 | shell: "last=1; while [ $last -ne 0 ]; do curl -s -o /dev/null -ku '{{hostvars['localhost'].nsx_manager_username}}:{{hostvars['localhost'].nsx_manager_password}}' https://{{hostvars['localhost'].nsx_manager_ip}}/api/v1/trust-management/certificates --connect-timeout 5; last=$?; echo \"waiting for NSX Manager to come up\"; done"
59 |
60 | - name: Add license for NSX-T manager if applicable
61 | nsxt_licenses:
62 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
63 | username: "{{hostvars['localhost'].nsx_manager_username}}"
64 | password: "{{hostvars['localhost'].nsx_manager_password}}"
65 | validate_certs: False
66 | license_key: "{{hostvars['localhost'].nsx_license_key}}"
67 | state: present
68 | when:
69 | - hostvars['localhost'].nsx_license_key != "null"
70 |
71 | # this should've been enabled already
72 | - name: Enable install-upgrade service
73 | uri:
74 | method: PUT
75 | url: https://{{hostvars['localhost'].nsx_manager_ip}}/api/v1/node/services/install-upgrade
76 | user: "{{hostvars['localhost'].nsx_manager_username}}"
77 | password: "{{hostvars['localhost'].nsx_manager_password}}"
78 | body:
79 | service_name: "install-upgrade"
80 | service_properties:
81 | enabled: True
82 | body_format: json
83 | force_basic_auth: yes
84 | validate_certs: no
85 |
86 | - name: Get install-upgrade service status
87 | uri:
88 | method: GET
89 | url: https://{{hostvars['localhost'].nsx_manager_ip}}/api/v1/node/services/install-upgrade/status
90 | user: "{{hostvars['localhost'].nsx_manager_username}}"
91 | password: "{{hostvars['localhost'].nsx_manager_password}}"
92 | force_basic_auth: yes
93 | validate_certs: no
94 | register: iu_status
95 |
96 | - name: Restart install-upgrade service if it's not running
97 | uri:
98 | method: POST
99 | url: https://{{hostvars['localhost'].nsx_manager_ip}}/api/v1/node/services/install-upgrade?action=restart
100 | user: "{{hostvars['localhost'].nsx_manager_username}}"
101 | password: "{{hostvars['localhost'].nsx_manager_password}}"
102 | force_basic_auth: yes
103 | validate_certs: no
104 | when: iu_status.json.runtime_state != "running"
105 |
106 | - name: Wait 2 minutes before deploying compute manager
107 | pause: minutes=2
108 |
109 | - name: Deploy compute manager
110 | nsxt_fabric_compute_managers:
111 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
112 | username: "{{hostvars['localhost'].nsx_manager_username}}"
113 | password: "{{hostvars['localhost'].nsx_manager_password}}"
114 | validate_certs: False
115 | display_name: "{{compute_manager_name}}"
116 | server: "{{hostvars['localhost'].vcenter_ip}}" # vCenter IP
117 | origin_type: vCenter
118 | credential:
119 | credential_type: UsernamePasswordLoginCredential
120 | username: "{{hostvars['localhost'].compute_manager_username}}"
121 | password: "{{hostvars['localhost'].compute_manager_password}}"
122 | state: present
123 | retries: 3
124 | delay: 10
125 | register: compute_manager
126 | until: compute_manager is not failed
127 |
128 | # TODO: change var names
129 | - name: Deploy compute manager 2
130 | nsxt_fabric_compute_managers:
131 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
132 | username: "{{hostvars['localhost'].nsx_manager_username}}"
133 | password: "{{hostvars['localhost'].nsx_manager_password}}"
134 | validate_certs: False
135 | display_name: "{{compute_manager_2_name}}"
136 | server: "{{hostvars['localhost'].compute_manager_2_vcenter_ip}}"
137 | origin_type: vCenter
138 | credential:
139 | credential_type: UsernamePasswordLoginCredential
140 | username: "{{hostvars['localhost'].compute_manager_2_username}}"
141 | password: "{{hostvars['localhost'].compute_manager_2_password}}"
142 | state: present
143 | register: compute_manager_2_compute_cluster
144 | when:
145 | - hostvars['localhost'].compute_manager_2_vcenter_ip is defined
146 | - hostvars['localhost'].compute_manager_2_username is defined
147 | - hostvars['localhost'].compute_manager_2_password is defined
148 |
149 | - name: Install pyvmomi tools dependency
150 | shell: "cd /tmp; git clone https://github.com/vmware/pyvmomi-community-samples.git; cp -r pyvmomi-community-samples/samples/tools /usr/local/lib/python2.7/dist-packages"
151 | when:
152 | - hostvars['localhost'].resource_reservation_off == "true"
153 |
154 | - name: Turn off memory reservation for additional controller-managers and power them on
155 | command: python turn_off_reservation.py --host {{hostvars['localhost'].vcenter_ip}} --user {{hostvars['localhost'].vcenter_username}} --password {{hostvars['localhost'].vcenter_password}} --vm_list {{hostvars[item].hostname}}
156 | with_items:
157 | - "{{groups['controllers']}}"
158 | when:
159 | - groups['controllers'] is defined
160 | - hostvars['localhost'].resource_reservation_off == "true"
161 |
162 | - name: Turn off memory reservation for edge nodes and power them on
163 | command: python turn_off_reservation.py --host {{hostvars['localhost'].vcenter_ip}} --user {{hostvars['localhost'].vcenter_username}} --password {{hostvars['localhost'].vcenter_password}} --vm_list {{hostvars[item].hostname}}
164 | with_items:
165 | - "{{groups['edge_nodes']}}"
166 | when:
167 | - hostvars['localhost'].resource_reservation_off == "true"
168 |
169 | - name: Deploy additional controller-managers
170 | nsxt_manager_auto_deployment:
171 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
172 | username: "{{hostvars['localhost'].nsx_manager_username}}"
173 | password: "{{hostvars['localhost'].nsx_manager_password}}"
174 | validate_certs: False
175 | deployment_requests:
176 | - roles: [CONTROLLER, MANAGER]
177 | form_factor: "{{hostvars['localhost'].nsx_manager_deployment_size.upper()}}"
178 | user_settings:
179 | cli_password: "{{hostvars['localhost'].nsx_manager_cli_pwd}}"
180 | root_password: "{{hostvars['localhost'].nsx_manager_root_pwd}}"
181 | deployment_config:
182 | placement_type: VsphereClusterNodeVMDeploymentConfig
183 | vc_name: "{{compute_manager_name}}"
184 | management_network: "{{hostvars['localhost'].vc_management_network_for_deployment}}"
185 | hostname: "{{hostvars[item].hostname}}"
186 | compute: "{{hostvars['localhost'].vc_cluster_for_deployment}}"
187 | storage: "{{hostvars['localhost'].vc_datastore_for_deployment}}"
188 | default_gateway_addresses:
189 | - "{{hostvars[item].default_gateway}}"
190 | management_port_subnets:
191 | - ip_addresses:
192 | - "{{hostvars[item].ip}}"
193 | prefix_length: "{{hostvars[item].prefix_length}}"
194 | enable_ssh: "{{hostvars['localhost'].nsx_manager_ssh_enabled}}"
195 | state: present
196 | with_items: "{{groups['controllers']}}"
197 | when: groups['controllers'] is defined
198 | register: controller_response
199 |
200 | - name: Wait 120 seconds for controllers to become reachable
201 | shell: I=0; while ! ping -c 1 -W 2 {{hostvars[item].ip}}; do I=$(( I + 1 )); [ $I -gt 60 ] && exit 1; done; exit 0
202 | with_items: "{{groups['controllers']}}"
203 | when: groups['controllers'] is defined
204 |
205 | # TODO: does not support tag
206 | - name: Create Transport Zones
207 | nsxt_transport_zones:
208 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
209 | username: "{{hostvars['localhost'].nsx_manager_username}}"
210 | password: "{{hostvars['localhost'].nsx_manager_password}}"
211 | validate_certs: False
212 | resource_type: "TransportZone"
213 | display_name: "{{item.display_name}}"
214 | description: "NSX {{item.transport_type}} Transport Zone"
215 | transport_type: "{{item.transport_type}}"
216 | host_switch_name: "{{item.host_switch_name}}"
217 | state: "present"
218 | with_items:
219 | - "{{transportzones}}"
220 |
221 | - name: Create uplink profile
222 | nsxt_uplink_profiles:
223 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
224 | username: "{{hostvars['localhost'].nsx_manager_username}}"
225 | password: "{{hostvars['localhost'].nsx_manager_password}}"
226 | validate_certs: False
227 | resource_type: UplinkHostSwitchProfile
228 | display_name: "{{item.display_name}}"
229 | mtu: 1600
230 | teaming: "{{item.teaming}}"
231 | transport_vlan: "{{item.transport_vlan}}"
232 | state: "present"
233 | with_items:
234 | - "{{uplink_profiles}}"
235 |
236 | # # TODO: support tags
237 | - name: Create VTEP IP pool
238 | nsxt_ip_pools:
239 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
240 | username: "{{hostvars['localhost'].nsx_manager_username}}"
241 | password: "{{hostvars['localhost'].nsx_manager_password}}"
242 | validate_certs: False
243 | display_name: "{{vtep_ip_pool_name}}"
244 | subnets:
245 | - cidr: "{{hostvars['localhost'].vtep_ip_pool_cidr}}"
246 | allocation_ranges:
247 | - start: "{{ hostvars['localhost'].vtep_ip_pool_start }}"
248 | end: "{{ hostvars['localhost'].vtep_ip_pool_end }}"
249 | gateway_ip: "{{hostvars['localhost'].vtep_ip_pool_gateway}}"
250 | state: present
251 | register: vtep_pool_object
252 |
253 | - name: Add Edge VM as transport node
254 | nsxt_transport_nodes:
255 | hostname: "{{hostvars['localhost'].nsx_manager_ip}}"
256 | username: "{{hostvars['localhost'].nsx_manager_username}}"
257 | password: "{{hostvars['localhost'].nsx_manager_password}}"
258 | validate_certs: False
259 | display_name: "{{hostvars[item].transport_node_name}}"
260 | host_switch_spec:
261 | resource_type: StandardHostSwitchSpec
262 | host_switches:
263 | - host_switch_profiles:
264 | - name: "{{edge_uplink_prof}}"
265 | type: UplinkHostSwitchProfile
266 | host_switch_name: "{{overlay_host_switch}}"
267 | pnics:
268 | - device_name: fp-eth0
269 | uplink_name: "{{uplink_1_name}}"
270 | ip_assignment_spec:
271 | resource_type: StaticIpPoolSpec
272 | ip_pool_name: "{{vtep_ip_pool_name}}"
273 | - host_switch_profiles:
274 | - name: "{{edge_uplink_prof}}"
275 | type: UplinkHostSwitchProfile
276 | host_switch_name: "{{vlan_host_switch}}"
277 | pnics:
278 | - device_name: fp-eth1
279 | uplink_name: "{{uplink_1_name}}"
280 | transport_zone_endpoints:
281 | - transport_zone_name: "{{overlay_transport_zone}}"
282 | - transport_zone_name: "{{vlan_transport_zone}}"
283 | node_deployment_info:
284 | resource_type: "EdgeNode"
285 | display_name: "{{hostvars[item].hostname}}"
286 | ip_addresses:
287 | - "{{hostvars[item].ip}}"
288 | deployment_config:
289 | form_factor: "{{hostvars[item].edge_deployment_size.upper()}}"
290 | node_user_settings:
291 | cli_password: "{{hostvars[item].edge_cli_password}}"
292 | root_password: "{{hostvars[item].edge_root_password}}"
293 | vm_deployment_config:
294 | placement_type: VsphereDeploymentConfig
295 | vc_name: "{{compute_manager_name}}"
296 | data_networks:
297 | - "{{hostvars[item].vc_overlay_network_for_edge}}"
298 | - "{{hostvars[item].vc_uplink_network_for_edge}}"
299 | management_network: "{{hostvars[item].vc_management_network_for_edge}}"
300 | hostname: "{{hostvars[item].hostname}}"
301 | compute: "{{hostvars[item].vc_cluster_for_edge}}"
302 | storage: "{{hostvars[item].vc_datastore_for_edge}}"
303 | default_gateway_addresses:
304 | - "{{hostvars[item].default_gateway}}"
305 | management_port_subnets:
306 | - ip_addresses:
307 | - "{{hostvars[item].ip}}"
308 | prefix_length: "{{hostvars[item].prefix_length}}"
309 | enable_ssh: "{{hostvars['localhost'].nsx_manager_ssh_enabled}}"
310 | state: present
311 | with_items:
312 | - "{{groups['edge_nodes']}}"
313 |
314 | - name: Wait 120 seconds for edge nodes to become reachable
315 | shell: I=0; while ! ping -c 1 -W 2 {{hostvars[item].ip}}; do I=$(( I + 1 )); [ $I -gt 60 ] && exit 1; done; exit 0
316 | with_items: "{{groups['edge_nodes']}}"
317 |
--------------------------------------------------------------------------------