├── .dockerignore ├── .gitignore ├── Dockerfile ├── Dockerfile.mTLS ├── LICENSE ├── Makefile ├── README.md ├── ansible ├── .gitignore ├── README.md ├── ansible.cfg ├── playbook.retry ├── playbook.yml └── roles │ ├── cloudinit │ ├── files │ │ ├── etc │ │ │ ├── cloud │ │ │ │ └── cloud.cfg.d │ │ │ │ │ ├── 10-enable-manage-etc-hosts.cfg │ │ │ │ │ └── 99-DataSourceVMware.cfg │ │ │ └── systemd │ │ │ │ └── system │ │ │ │ ├── net-postconfig.service │ │ │ │ └── ovf-to-cloud-init.service │ │ ├── usr │ │ │ └── lib │ │ │ │ └── python3.7 │ │ │ │ └── site-packages │ │ │ │ └── cloudinit │ │ │ │ └── distros │ │ │ │ └── photon.py │ │ └── var │ │ │ └── lib │ │ │ └── vmware │ │ │ ├── metadata.txt │ │ │ ├── net-postconfig.sh │ │ │ ├── ovf-to-cloud-init.sh │ │ │ ├── retry.sh │ │ │ └── userdata.txt │ └── tasks │ │ └── main.yml │ ├── common │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ ├── photon.yml │ │ └── rpm_repos.yml │ ├── haproxy │ ├── files │ │ └── etc │ │ │ ├── haproxy │ │ │ ├── dataplaneapi.cfg │ │ │ ├── haproxy.cfg │ │ │ └── haproxy.cfg.mtls │ │ │ └── systemd │ │ │ └── system │ │ │ ├── dataplaneapi.service │ │ │ ├── dataplaneapi.slice │ │ │ ├── haproxy.service.d │ │ │ ├── cloud-init.conf │ │ │ └── slice.conf │ │ │ └── haproxy.slice │ └── tasks │ │ └── main.yml │ ├── pki │ ├── files │ │ └── usr │ │ │ └── local │ │ │ └── bin │ │ │ ├── new-ca.sh │ │ │ └── new-cert.sh │ └── tasks │ │ └── main.yml │ ├── sysprep │ ├── files │ │ └── etc │ │ │ └── hosts │ └── tasks │ │ └── main.yml │ └── vmware │ ├── files │ ├── etc │ │ ├── systemd │ │ │ └── system │ │ │ │ ├── anyip-routes.service │ │ │ │ └── route-tables.service │ │ └── vmware │ │ │ ├── anyip-routes.cfg │ │ │ └── route-tables.cfg │ ├── usr │ │ ├── lib │ │ │ └── systemd │ │ │ │ └── network │ │ │ │ ├── 10-frontend.link │ │ │ │ ├── 10-management.link │ │ │ │ └── 10-workload.link │ │ └── local │ │ │ └── bin │ │ │ └── haproxy-support │ └── var │ │ └── lib │ │ └── vmware │ │ ├── anyiproutectl.sh │ │ └── routetablectl.sh │ └── tasks │ └── main.yml ├── docs ├── how-to-build-ova.md ├── how-to-container.md ├── upgrade.md └── virtual-ip-config.md ├── example ├── README.md ├── ca.crt ├── ca.key ├── client.crt ├── client.key ├── id_rsa ├── id_rsa.pub ├── meta-data ├── server.crt ├── server.key └── user-data ├── hack ├── image-build-ova.py ├── image-govc-cloudinit.sh ├── image-post-create-config.sh ├── image-ssh.sh ├── image-tools.sh ├── image-upload.py └── test-route-programs.sh ├── kickstart.json └── packer.json /.dockerignore: -------------------------------------------------------------------------------- 1 | .dockerignore 2 | .gitignore 3 | output/ 4 | packer_cache/ 5 | hack/ -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | output/ 2 | packer_cache/ -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ################################################################################ 16 | ## BUILD ARGS ## 17 | ################################################################################ 18 | # The golang image is used to build the DataPlane API. 19 | ARG GOLANG_IMAGE=golang:1.14.1 20 | 21 | 22 | ################################################################################ 23 | ## BUILD DATAPLANE API STAGE ## 24 | ################################################################################ 25 | FROM ${GOLANG_IMAGE} as builder 26 | 27 | # The Git repo used to build the DataPlane API binary. 28 | ARG DATAPLANEAPI_URL 29 | ENV DATAPLANEAPI_URL ${DATAPLANEAPI_URL:-https://github.com/haproxytech/dataplaneapi.git} 30 | 31 | # The Git ref used to build the DataPlane API binary. 32 | ARG DATAPLANEAPI_REF 33 | ENV DATAPLANEAPI_REF ${DATAPLANEAPI_REF:-494f9b817842d9e28f7b75c4c32a59395794636c} 34 | 35 | WORKDIR / 36 | 37 | RUN git clone "${DATAPLANEAPI_URL}" && \ 38 | cd dataplaneapi && \ 39 | git checkout -b build-me "${DATAPLANEAPI_REF}" && \ 40 | make build 41 | 42 | 43 | ################################################################################ 44 | ## MAIN STAGE ## 45 | ################################################################################ 46 | FROM photon:3.0 as main 47 | LABEL "maintainer" "Andrew Kutz " 48 | 49 | WORKDIR / 50 | 51 | COPY --from=builder /dataplaneapi/build/dataplaneapi /usr/local/bin/dataplaneapi 52 | RUN chmod 0755 /usr/local/bin/dataplaneapi 53 | 54 | RUN tdnf install -y \ 55 | awk \ 56 | curl \ 57 | inotify-tools\ 58 | iproute2 \ 59 | iputils \ 60 | lsof \ 61 | pcre \ 62 | rpm \ 63 | shadow \ 64 | systemd \ 65 | vim 66 | 67 | RUN tdnf install -y haproxy && \ 68 | useradd --system --home-dir=/var/lib/haproxy --user-group haproxy && \ 69 | mkdir -p /var/lib/haproxy && \ 70 | chown -R haproxy:haproxy /var/lib/haproxy 71 | COPY ansible/roles/haproxy/files/etc/haproxy/haproxy.cfg \ 72 | example/ca.crt example/server.crt example/server.key \ 73 | /etc/haproxy/ 74 | RUN chmod 0640 /etc/haproxy/haproxy.cfg /etc/haproxy/*.crt && \ 75 | chmod 0440 /etc/haproxy/*.key 76 | 77 | RUN mkdir -p /etc/vmware /var/lib/vmware 78 | COPY ansible/roles/vmware/files/etc/vmware/*.cfg \ 79 | /etc/vmware/ 80 | COPY ansible/roles/vmware/files/var/lib/vmware/*.sh \ 81 | /var/lib/vmware/ 82 | RUN chmod 0644 /etc/vmware/*.cfg && \ 83 | chmod 0744 /var/lib/vmware/*.sh 84 | 85 | CMD [ "-f", "/etc/haproxy/haproxy.cfg" ] 86 | ENTRYPOINT [ "/usr/sbin/haproxy" ] 87 | -------------------------------------------------------------------------------- /Dockerfile.mTLS: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | ################################################################################ 16 | ## BUILD ARGS ## 17 | ################################################################################ 18 | # The golang image is used to build the DataPlane API. 19 | ARG GOLANG_IMAGE=golang:1.14.1 20 | 21 | 22 | ################################################################################ 23 | ## BUILD DATAPLANE API STAGE ## 24 | ################################################################################ 25 | FROM ${GOLANG_IMAGE} as builder 26 | 27 | # The Git repo used to build the DataPlane API binary. 28 | ARG DATAPLANEAPI_URL 29 | ENV DATAPLANEAPI_URL ${DATAPLANEAPI_URL:-https://github.com/haproxytech/dataplaneapi.git} 30 | 31 | # The Git ref used to build the DataPlane API binary. 32 | ARG DATAPLANEAPI_REF 33 | ENV DATAPLANEAPI_REF ${DATAPLANEAPI_REF:-494f9b817842d9e28f7b75c4c32a59395794636c} 34 | 35 | WORKDIR / 36 | 37 | RUN git clone "${DATAPLANEAPI_URL}" && \ 38 | cd dataplaneapi && \ 39 | git checkout -b build-me "${DATAPLANEAPI_REF}" && \ 40 | make build 41 | 42 | 43 | ################################################################################ 44 | ## MAIN STAGE ## 45 | ################################################################################ 46 | FROM photon:3.0 as main 47 | LABEL "maintainer" "Andrew Kutz " 48 | 49 | WORKDIR / 50 | 51 | COPY --from=builder /dataplaneapi/build/dataplaneapi /usr/local/bin/dataplaneapi 52 | RUN chmod 0755 /usr/local/bin/dataplaneapi 53 | 54 | RUN tdnf install -y \ 55 | awk \ 56 | curl \ 57 | inotify-tools\ 58 | iproute2 \ 59 | iputils \ 60 | lsof \ 61 | pcre \ 62 | rpm \ 63 | shadow \ 64 | systemd \ 65 | vim 66 | 67 | RUN tdnf install -y haproxy && \ 68 | useradd --system --home-dir=/var/lib/haproxy --user-group haproxy && \ 69 | mkdir -p /var/lib/haproxy && \ 70 | chown -R haproxy:haproxy /var/lib/haproxy 71 | COPY ansible/roles/haproxy/files/etc/haproxy/haproxy.cfg \ 72 | example/ca.crt example/server.crt example/server.key \ 73 | /etc/haproxy/ 74 | RUN chmod 0640 /etc/haproxy/haproxy.cfg /etc/haproxy/*.crt && \ 75 | chmod 0440 /etc/haproxy/*.key 76 | 77 | RUN mkdir -p /etc/vmware /var/lib/vmware 78 | COPY ansible/roles/vmware/files/var/lib/vmware/*.sh \ 79 | /var/lib/vmware 80 | RUN chmod 0744 /var/lib/vmware/*.sh 81 | 82 | CMD [ "-f", "/etc/haproxy/haproxy.cfg" ] 83 | ENTRYPOINT [ "/usr/sbin/haproxy" ] 84 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # If you update this file, please follow 16 | # https://suva.sh/posts/well-documented-makefiles 17 | 18 | # Ensure Make is run with bash shell as some syntax below is bash-specific 19 | SHELL := /usr/bin/env bash 20 | 21 | .DEFAULT_GOAL := help 22 | 23 | ## -------------------------------------- 24 | ## Help 25 | ## -------------------------------------- 26 | 27 | help: ## Display this help 28 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z0-9_-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 29 | 30 | ## -------------------------------------- 31 | ## Variables 32 | ## -------------------------------------- 33 | 34 | # Initialize the version with the Git version. 35 | VERSION ?= $(shell git describe --always --dirty) 36 | 37 | # The output directory for produced assets. 38 | OUTPUT_DIR ?= ./output 39 | 40 | # DataPlane API version to build 41 | DATAPLANEAPI_REF ?= v2.1.0 42 | 43 | # DataPlane API URL to build 44 | DATAPLANEAPI_URL ?= https://github.com/haproxytech/dataplaneapi 45 | 46 | # The location of the DataPlane API binary. 47 | DATAPLANEAPI_BIN := $(OUTPUT_DIR)/dataplaneapi-$(DATAPLANEAPI_REF).linux_amd64 48 | 49 | # The locations of the DataPlane API specifications. 50 | DATAPLANEAPI_OPENAPI_JSON := $(OUTPUT_DIR)/dataplaneapi-$(DATAPLANEAPI_REF)-openapi.json 51 | 52 | # Enable mTLS on the dataplane API 53 | DATAPLANEAPI_WITH_MTLS ?= false 54 | 55 | 56 | ## -------------------------------------- 57 | ## Packer flags 58 | ## -------------------------------------- 59 | PACKER_FLAGS += -var='version=$(VERSION)' 60 | PACKER_FLAGS += -var='output_directory=$(OUTPUT_DIR)/ova' 61 | PACKER_FLAGS += -var='dataplaneapi_ref=$(DATAPLANEAPI_REF)' 62 | 63 | # If FOREGROUND=1 then Packer will set headless to false, causing local builds 64 | # to build in the foreground, with a UI. This is very useful when debugging new 65 | # platforms or issues with existing ones. 66 | ifeq (1,$(strip $(FOREGROUND))) 67 | PACKER_FLAGS += -var="headless=false" 68 | endif 69 | 70 | # A list of variable files given to Packer. 71 | PACKER_VAR_FILES := $(strip $(foreach f,$(abspath $(PACKER_VAR_FILES)),-var-file="$(f)" )) 72 | 73 | # Initialize a list of flags to pass to Packer. This includes any existing flags 74 | # specified by PACKER_FLAGS, as well as prefixing the list with the variable 75 | # files from PACKER_VAR_FILES, with each file prefixed by -var-file=. 76 | # 77 | # Any existing values from PACKER_FLAGS take precendence over variable files. 78 | PACKER_FLAGS := $(PACKER_VAR_FILES) $(PACKER_FLAGS) 79 | 80 | 81 | ## -------------------------------------- 82 | ## OVA 83 | ## -------------------------------------- 84 | .PHONY: clean-ova 85 | clean-ova: ## Cleans the generated HAProxy load balancer OVA 86 | rm -rf $(OUTPUT_DIR)/ova 87 | 88 | .PHONY: build-ova 89 | build-ova: ## Builds the HAProxy load balancer OVA 90 | build-ova: clean-ova 91 | PACKER_LOG=1 packer build $(PACKER_FLAGS) packer.json 92 | 93 | .PHONY: verify 94 | verify: ## Verifies the packer config 95 | packer validate $(PACKER_FLAGS) packer.json 96 | 97 | 98 | ## -------------------------------------- 99 | ## Docker 100 | ## -------------------------------------- 101 | .PHONY: build-image 102 | build-image: ## Builds the container image 103 | ifeq ($(DATAPLANEAPI_WITH_MTLS),true) 104 | docker build --build-arg "DATAPLANEAPI_REF=$(DATAPLANEAPI_REF)" --build-arg "DATAPLANEAPI_URL=$(DATAPLANEAPI_URL)" -f Dockerfile.mTLS -t haproxy . 105 | else 106 | docker build --build-arg "DATAPLANEAPI_REF=$(DATAPLANEAPI_REF)" --build-arg "DATAPLANEAPI_URL=$(DATAPLANEAPI_URL)" -t haproxy . 107 | endif 108 | 109 | 110 | ## -------------------------------------- 111 | ## DataPlane API Binary 112 | ## -------------------------------------- 113 | .PHONY: build-api-bin 114 | build-api-bin: build-image 115 | build-api-bin: ## Builds the DataPlane API binary 116 | @mkdir -p $(dir $(DATAPLANEAPI_BIN)) 117 | CONTAINER=$$(docker run -d --rm haproxy) && \ 118 | docker cp $${CONTAINER}:/usr/local/bin/dataplaneapi $(DATAPLANEAPI_BIN) && \ 119 | docker kill $${CONTAINER} 120 | @echo $(DATAPLANEAPI_BIN) 121 | 122 | 123 | ## -------------------------------------- 124 | ## DataPlane API OpenAPI spec 125 | ## -------------------------------------- 126 | .PHONY: build-api-spec 127 | build-api-spec: build-image 128 | build-api-spec: ## Builds the DataPlane API spec 129 | @mkdir -p $(dir $(DATAPLANEAPI_OPENAPI_JSON)) 130 | CONTAINER=$$(docker run -d --rm -p 5556:5556 haproxy) && \ 131 | while ! curl \ 132 | --cacert example/ca.crt \ 133 | --cert example/client.crt --key example/client.key \ 134 | --user client:cert \ 135 | "https://localhost:5556/v2/specification_openapiv3" >$(DATAPLANEAPI_OPENAPI_JSON); do \ 136 | sleep 1; \ 137 | done && \ 138 | docker kill $${CONTAINER} 139 | 140 | ## -------------------------------------- 141 | ## Testing 142 | ## -------------------------------------- 143 | .PHONY: test-anyiproutectl 144 | test-anyiproutectl: build-image 145 | test-anyiproutectl: ## Run anyiproutectl tests 146 | hack/test-route-programs.sh -a 147 | 148 | .PHONY: test-routetablectl 149 | test-routetablectl: build-image 150 | test-routetablectl: ## Run routetablectl tests 151 | hack/test-route-programs.sh -r 152 | 153 | ## -------------------------------------- 154 | ## Clean 155 | ## -------------------------------------- 156 | .PHONY: clean 157 | clean: ## Cleans artifacts 158 | rm -fr $(OUTPUT_DIR) 159 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # VMware + HAProxy 2 | 3 | This project enables customers to build an OSS virtual appliance with HAProxy and its [Data Plane API](https://www.haproxy.com/documentation/dataplaneapi/latest) designed to enable Kubernetes workload management with Project Pacific on vSphere 7. 4 | 5 | * [Download](#download) 6 | * [Deploy](#deploy) 7 | * [Build](#build) 8 | * [Test](#test) 9 | * [Upgrade](#upgrade) 10 | 11 | ## Download 12 | 13 | The latest version of the appliance OVA is always available from the [releases](https://github.com/haproxytech/vmware-haproxy/releases) page: 14 | 15 | ### NOTE 16 | If running on or upgrading to vSphere 7.0.1 or later, you _must_ upgrade to version v0.1.9 or later. 17 | 18 | | Version | SHA256 | 19 | |---|---| 20 | | [v0.2.0](https://cdn.haproxy.com/download/haproxy/vsphere/ova/haproxy-v0.2.0.ova) | `07fa35338297c591f26b6c32fb6ebcb91275e36c677086824f3fd39d9b24fb09` | 21 | | [v0.1.10](https://cdn.haproxy.com/download/haproxy/vsphere/ova/haproxy-v0.1.10.ova) | `81f2233b3de75141110a7036db2adabe4d087c2a6272c4e03e2924bff3dccc33` | 22 | | [v0.1.9](https://cdn.haproxy.com/download/haproxy/vsphere/ova/haproxy-v0.1.9.ova) | `f3d0c88e7181af01b2b3e6a318ae03a77ffb0e1949ef16b2e39179dc827c305a` | 23 | | [v0.1.8](https://cdn.haproxy.com/download/haproxy/vsphere/ova/vmware-haproxy-v0.1.8.ova) | `eac73c1207c05aeeece6d17dd1ac1dde0e557d94812f19082751cfb6925ad082` | 24 | 25 | ## Deploy 26 | 27 | Refer to the [system requirements](https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-C86B9028-2701-40FE-BA05-519486E010F4.html) and the [installation documentation](https://docs.vmware.com/en/VMware-vSphere/7.0/vmware-vsphere-with-tanzu/GUID-5673269F-C147-485B-8706-65E4A87EB7F0.html). 28 | 29 | For a tutorial on deploying and using the HAProxy load balancer in vSphere with Tanzu, check out the [vSphere with Tanzu Quick Start Guide](https://core.vmware.com/resource/vsphere-tanzu-quick-start-guide). 30 | 31 | ## Build 32 | 33 | Documentation on how to build the appliance is available [here](./docs/how-to-build-ova.md). 34 | 35 | ## Test 36 | 37 | Documentation on how to test the components in the appliance with Docker containers is available [here](./docs/how-to-container.md). 38 | 39 | ## Configure 40 | 41 | Documentation on how to configure the Virtual IPs managed by the appliance is available [here](./docs/virtual-ip-config.md). 42 | 43 | ## Upgrade 44 | 45 | Documentation on recommended upgrade procedures can be found [here](./docs/upgrade.md). 46 | 47 | -------------------------------------------------------------------------------- /ansible/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc -------------------------------------------------------------------------------- /ansible/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Configuration for the HAProxy OVA 2 | 3 | The OVA guest is customized using Ansible. As such, some of the playbooks are opinionated to the Guest OS selected. 4 | Configuration parameters for the Ansible scripts are defined in ../packer.json 5 | 6 | ## Playbooks 7 | 8 | The Ansible playbooks are indexed in playbook.yml here in the root and each playbook has its own folder in /roles 9 | 10 | ### cloudinit 11 | 12 | Cloud-init is the way in which the OVA is configured on first boot. 13 | This playbook adds the cloud-init packages and VMware datasource. 14 | It then runs a configuration script and cleans up 15 | 16 | ### common 17 | 18 | Common defines the core set of OS dependencies to add to the distribution and allows for some tweaks to the guest config 19 | If additional OS dependencies should be installed, they can be added to /common/defaults/main.yaml 20 | 21 | ### haproxy 22 | 23 | The haproxy playlist provides the following functions: 24 | 25 | - Provides the default haproxy.cfg to be copied to the appliance 26 | - Ensure that haproxy starts after the cloud-init boot stage 27 | - Install the dataplane API (see ../packer.json) 28 | - Configure and enable haproxy as a systemd service 29 | 30 | ### pki 31 | 32 | The pki playlist copies two scripts into the guest at /usr/local/bin: 33 | 34 | - new-ca.sh will create a new self-signed certificate authority 35 | - new-cert.sh will create a new certificate 36 | 37 | ### sysprep 38 | 39 | The sysprep playbook configures a bunch of guest OS internals 40 | It's mostly concerned with restoring the OVA to a pristine state following the prior configuration steps 41 | 42 | - Sets /etc/hostname and resets /etc/hosts 43 | - Reset the IP tables to remove any firewall settings 44 | - Reset /etc/machine-id and audit logs 45 | - Remove SSH host keys and authorized users 46 | - Clean up caches created by installing OS dependencies 47 | - Clean cloud-init dependencies and temp files 48 | - Clean shell history and /var/log 49 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | [default] 16 | remote_tmp = /tmp/.ansible/ 17 | filter_plugins = ./filter_plugins 18 | retry_files_enabled = False 19 | 20 | [ssh_connection] 21 | pipelining = True 22 | -------------------------------------------------------------------------------- /ansible/playbook.retry: -------------------------------------------------------------------------------- 1 | default 2 | -------------------------------------------------------------------------------- /ansible/playbook.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - hosts: all 16 | become: yes 17 | roles: 18 | - role: common 19 | - role: cloudinit 20 | - role: haproxy 21 | - role: vmware 22 | - role: pki 23 | - role: sysprep 24 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/etc/cloud/cloud.cfg.d/10-enable-manage-etc-hosts.cfg: -------------------------------------------------------------------------------- 1 | manage_etc_hosts: true -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/etc/cloud/cloud.cfg.d/99-DataSourceVMware.cfg: -------------------------------------------------------------------------------- 1 | datasource_list: [ "VMware" ] 2 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/etc/systemd/system/net-postconfig.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=net-postconfig.service 3 | 4 | # This service *must* run after the ovfenv has been parsed and before haproxy starts 5 | After=network-online.target cloud-final.service 6 | Wants=network-online.target 7 | Before=haproxy.service 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | 12 | [Service] 13 | Type=oneshot 14 | TimeoutSec=0 15 | WorkingDirectory=/var/lib/vmware 16 | 17 | # Create the log directory. 18 | ExecStartPre=/bin/mkdir -p /var/log/vmware 19 | 20 | # The script that sets up the anyip routes 21 | ExecStart=/bin/sh -c '/var/lib/vmware/net-postconfig.sh 2>&1 | tee -a /var/log/vmware/net-postconfig.log' 22 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/etc/systemd/system/ovf-to-cloud-init.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=ovf-to-cloud-init.service 3 | 4 | # This service *must* run before the first cloud-init service 5 | Before=cloud-init-local.service 6 | 7 | # Always run this service. It has the appropriate logic to exit if nothing needs doing 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | 12 | [Service] 13 | Type=oneshot 14 | RemainAfterExit=yes 15 | TimeoutSec=0 16 | WorkingDirectory=/var/lib/vmware 17 | 18 | # Ensure the VMware directories exist 19 | ExecStartPre=/bin/mkdir -p /etc/vmware /var/log/vmware 20 | 21 | # The script that transforms the OVF data into cloud-init data and sets it in guest info 22 | ExecStart=/bin/sh -c '/var/lib/vmware/ovf-to-cloud-init.sh 2>&1 | tee -a /var/log/vmware/ovf-to-cloud-init.log' 23 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/usr/lib/python3.7/site-packages/cloudinit/distros/photon.py: -------------------------------------------------------------------------------- 1 | # vi: ts=4 expandtab 2 | # 3 | # Copyright (C) 2020 VMware Inc. 4 | # Author: Mahmoud Bassiouny 5 | # 6 | 7 | import os 8 | import fnmatch 9 | 10 | from cloudinit import distros 11 | from cloudinit import helpers 12 | from cloudinit import log as logging 13 | from cloudinit import util 14 | from cloudinit.distros import net_util 15 | from cloudinit.distros import rhel_util as rhutil 16 | from cloudinit.distros.parsers.hostname import HostnameConf 17 | from cloudinit.net.network_state import mask_to_net_prefix 18 | 19 | from cloudinit.settings import PER_INSTANCE 20 | from pwd import getpwnam 21 | from grp import getgrnam 22 | 23 | LOG = logging.getLogger(__name__) 24 | 25 | 26 | class Distro(distros.Distro): 27 | hostname_conf_fn = "/etc/hostname" 28 | locale_conf_fn = "/etc/default/locale" 29 | systemd_locale_conf_fn = '/etc/locale.conf' 30 | network_conf_dir = "/etc/systemd/network/" 31 | resolve_conf_fn = "/etc/systemd/resolved.conf" 32 | init_cmd = ['systemctl'] # init scripts 33 | 34 | def __init__(self, name, cfg, paths): 35 | distros.Distro.__init__(self, name, cfg, paths) 36 | # This will be used to restrict certain 37 | # calls from repeatly happening (when they 38 | # should only happen say once per instance...) 39 | self._runner = helpers.Runners(paths) 40 | self.osfamily = 'photon' 41 | 42 | def apply_locale(self, locale, out_fn=None): 43 | if self.uses_systemd(): 44 | if not out_fn: 45 | out_fn = self.systemd_locale_conf_fn 46 | else: 47 | if not out_fn: 48 | out_fn = self.locale_conf_fn 49 | locale_cfg = { 50 | 'LANG': locale, 51 | } 52 | rhutil.update_sysconfig_file(out_fn, locale_cfg) 53 | 54 | # rhutil will modify /etc/locale.conf 55 | # For locale change to take effect, reboot is needed or we can restart systemd-localed 56 | # This is equivalent of localectl 57 | cmd = ['systemctl', 'restart', 'systemd-localed'] 58 | LOG.debug("Attempting to restart localed using command %s", cmd) 59 | try: 60 | (_out, err) = util.subp(cmd) 61 | if len(err): 62 | LOG.warn("Running %s resulted in stderr output: %s", cmd, err) 63 | except util.ProcessExecutionError: 64 | util.logexc(LOG, "Restart of localed using command %s failed", cmd) 65 | 66 | def install_packages(self, pkglist): 67 | # self.update_package_sources() 68 | self.package_command('install', pkgs=pkglist) 69 | 70 | def _write_network_config(self, netconfig): 71 | ifindex = 10 72 | 73 | LOG.debug("Setting Networking Config Version 2") 74 | 75 | for k, v in netconfig.items(): 76 | if (k == "ethernets"): 77 | for key, val in v.items(): 78 | link = key 79 | 80 | conf = "[Match]\n" 81 | # Generate [Match] section 82 | if ('match' in val): 83 | match = val['match'] 84 | 85 | for i in match: 86 | if (i == 'macaddress'): 87 | conf += "MACAddress=%s\n\n" % ( 88 | match['macaddress']) 89 | elif (i == 'driver'): 90 | conf += "Driver=%s\n" % (match['driver']) 91 | conf += "Name=%s\n\n" % (link) 92 | elif (i == 'name'): 93 | conf += "Name=%s\n\n" % (match['name']) 94 | else: 95 | conf += "Name=%s\n\n" % link 96 | 97 | # Generate [Link] section 98 | if ('mtu' in val): 99 | conf += "[Link]\nMTUBytes=%s\n\n" % (val['mtu']) 100 | 101 | # Generate [Network] section 102 | conf += "[Network]\n" 103 | 104 | if ('dhcp4' in val and 'dhcp6' in val): 105 | if (val['dhcp4'] == True and val['dhcp6'] == True): 106 | conf += "DHCP=yes\n" 107 | if (val['dhcp4'] == True and val['dhcp6'] == False): 108 | conf += "DHCP=ipv4\n" 109 | if (val['dhcp4'] == False and val['dhcp6'] == True): 110 | conf += "DHCP=ipv6\n" 111 | if (val['dhcp4'] == False and val['dhcp6'] == False): 112 | conf += "DHCP=no\n" 113 | elif ('dhcp4' in val): 114 | if val['dhcp4'] == True: 115 | conf += "DHCP=ipv4\n" 116 | elif ('dhcp6' in val): 117 | if (val['dhcp6'] == True): 118 | conf += "DHCP=ipv6\n" 119 | 120 | if ('nameservers' in val): 121 | nameservers = val['nameservers'] 122 | 123 | if ('search' in nameservers): 124 | search = nameservers['search'] 125 | s = ' '.join(search) 126 | conf += "Domains=%s\n" % s 127 | if ('addresses' in nameservers): 128 | s = nameservers['addresses'] 129 | conf += "DNS=%s\n" % ' '.join(s) 130 | 131 | # Generate [DHCP] section 132 | if ('dhcp4domain' in val): 133 | if val['dhcp4domain'] == True: 134 | conf += "\n[DHCP]\nUseDomains=yes\n\n" 135 | else: 136 | conf += "\n[DHCP]\nUseDomains=no\n\n" 137 | 138 | # Generate [Address] section 139 | if ('addresses' in val): 140 | for i in val['addresses']: 141 | conf += "\n[Address]\nAddress=%s\n" % (i) 142 | 143 | # Generate [Route] section 144 | if ('gateway4' in val): 145 | conf += "\n[Route]\nGateway=%s\n" % (val['gateway4']) 146 | 147 | if ('gateway6' in val): 148 | conf += "\n[Route]\nGateway=%s\n" % (val['gateway6']) 149 | 150 | if ('routes' in val): 151 | routes = val['routes'] 152 | conf += "\n[Route]\n" 153 | 154 | for i in routes: 155 | if (i['via']): 156 | conf += "Gateway=%s\n" % (i['via']) 157 | if (i['to']): 158 | conf += "Destination=%s\n" % (i['to']) 159 | if (i['metric']): 160 | conf += "Metric=%s\n" % (i['metric']) 161 | 162 | net_fn = network_file_name(self.network_conf_dir, link) 163 | if not net_fn: 164 | net_fn = self.network_conf_dir + \ 165 | str(ifindex) + '-' + link + '.network' 166 | else: 167 | net_fn = self.network_conf_dir + net_fn 168 | 169 | util.write_file(net_fn, conf) 170 | os.chown(net_fn, getpwnam('systemd-network') 171 | [2], getgrnam('systemd-network')[2]) 172 | 173 | def _write_network(self, settings): 174 | entries = net_util.translate_network(settings) 175 | LOG.debug("Translated ubuntu style network settings %s into %s", 176 | settings, entries) 177 | route_entries = [] 178 | route_entries = translate_routes(settings) 179 | dev_names = entries.keys() 180 | dev_index = 10 181 | nameservers = [] 182 | searchdomains = [] 183 | # Format for systemd 184 | for (dev, info) in entries.items(): 185 | if 'dns-nameservers' in info: 186 | nameservers.extend(info['dns-nameservers']) 187 | if 'dns-search' in info: 188 | searchdomains.extend(info['dns-search']) 189 | if dev == 'lo': 190 | continue 191 | net_fn = network_file_name(self.network_conf_dir, dev) 192 | if not net_fn: 193 | net_fn = self.network_conf_dir + \ 194 | str(dev_index) + '-' + dev + '.network' 195 | else: 196 | net_fn = self.network_conf_dir + net_fn 197 | 198 | dhcp_enabled = 'no' 199 | if info.get('bootproto') == 'dhcp': 200 | if settings.find('inet dhcp') >= 0 and settings.find('inet6 dhcp') >= 0: 201 | dhcp_enabled = 'yes' 202 | else: 203 | dhcp_enabled = 'ipv6' if info.get( 204 | 'inet6') == True else 'ipv4' 205 | 206 | net_cfg = { 207 | 'Name': dev, 208 | 'DHCP': dhcp_enabled, 209 | } 210 | 211 | if info.get('hwaddress'): 212 | net_cfg['MACAddress'] = info.get('hwaddress') 213 | if info.get('address'): 214 | net_cfg['Address'] = "%s" % (info.get('address')) 215 | if info.get('netmask'): 216 | net_cfg['Address'] += "/%s" % ( 217 | mask_to_net_prefix(info.get('netmask'))) 218 | if info.get('gateway'): 219 | net_cfg['Gateway'] = info.get('gateway') 220 | if info.get('dns-nameservers'): 221 | net_cfg['DNS'] = str( 222 | tuple(info.get('dns-nameservers'))).replace(',', '') 223 | if info.get('dns-search'): 224 | net_cfg['Domains'] = str( 225 | tuple(info.get('dns-search'))).replace(',', '') 226 | route_entry = [] 227 | if dev in route_entries: 228 | route_entry = route_entries[dev] 229 | route_index = 0 230 | found = True 231 | while found: 232 | route_name = 'routes.' + str(route_index) 233 | if route_name in route_entries[dev]: 234 | val = str(tuple(route_entries[dev][route_name])).replace( 235 | ',', '') 236 | if val: 237 | net_cfg[route_name] = val 238 | else: 239 | found = False 240 | route_index += 1 241 | 242 | if info.get('auto'): 243 | self._write_interface_file(net_fn, net_cfg, route_entry) 244 | 245 | resolve_data = [] 246 | new_resolve_data = [] 247 | with open(self.resolve_conf_fn, "r") as rf: 248 | resolve_data = rf.readlines() 249 | LOG.debug("Old Resolve Data\n") 250 | LOG.debug("%s", resolve_data) 251 | for item in resolve_data: 252 | if (nameservers and ('DNS=' in item)) or (searchdomains and ('Domains=' in item)): 253 | continue 254 | else: 255 | new_resolve_data.append(item) 256 | 257 | new_resolve_data = new_resolve_data + \ 258 | convert_resolv_conf(nameservers, searchdomains) 259 | LOG.debug("New resolve data\n") 260 | LOG.debug("%s", new_resolve_data) 261 | if nameservers or searchdomains: 262 | util.write_file(self.resolve_conf_fn, ''.join(new_resolve_data)) 263 | 264 | return dev_names 265 | 266 | def _write_interface_file(self, net_fn, net_cfg, route_entry): 267 | if not net_cfg['Name']: 268 | return 269 | content = "[Match]\n" 270 | content += "Name=%s\n" % (net_cfg['Name']) 271 | if 'MACAddress' in net_cfg: 272 | content += "MACAddress=%s\n" % (net_cfg['MACAddress']) 273 | content += "[Network]\n" 274 | 275 | if 'DHCP' in net_cfg and net_cfg['DHCP'] in {'yes', 'ipv4', 'ipv6'}: 276 | content += "DHCP=%s\n" % (net_cfg['DHCP']) 277 | else: 278 | if 'Address' in net_cfg: 279 | content += "Address=%s\n" % (net_cfg['Address']) 280 | if 'Gateway' in net_cfg: 281 | content += "Gateway=%s\n" % (net_cfg['Gateway']) 282 | if 'DHCP' in net_cfg and net_cfg['DHCP'] == 'no': 283 | content += "DHCP=%s\n" % (net_cfg['DHCP']) 284 | 285 | route_index = 0 286 | found = True 287 | if route_entry: 288 | while found: 289 | route_name = 'routes.' + str(route_index) 290 | if route_name in route_entry: 291 | content += "[Route]\n" 292 | if len(route_entry[route_name]) != 2: 293 | continue 294 | content += "Gateway=%s\n" % ( 295 | route_entry[route_name][0]) 296 | content += "Destination=%s\n" % ( 297 | route_entry[route_name][1]) 298 | else: 299 | found = False 300 | route_index += 1 301 | 302 | util.write_file(net_fn, content) 303 | 304 | def _bring_up_interfaces(self, device_names): 305 | cmd = ['systemctl', 'restart', 'systemd-networkd', 'systemd-resolved'] 306 | LOG.debug("Attempting to run bring up interfaces using command %s", 307 | cmd) 308 | try: 309 | (_out, err) = util.subp(cmd) 310 | if len(err): 311 | LOG.warn("Running %s resulted in stderr output: %s", cmd, err) 312 | return True 313 | except util.ProcessExecutionError: 314 | util.logexc(LOG, "Running network bringup command %s failed", cmd) 315 | return False 316 | 317 | def _select_hostname(self, hostname, fqdn): 318 | # We should attempt to use the FQDN when possible. This makes it 319 | # possible to use the hostname command after the system is booted 320 | # to get the short, full, and domain names of the host. 321 | if fqdn: 322 | return fqdn 323 | return hostname 324 | 325 | def _write_hostname(self, your_hostname, out_fn): 326 | conf = None 327 | try: 328 | # Try to update the previous one 329 | # so lets see if we can read it first. 330 | conf = self._read_hostname_conf(out_fn) 331 | except IOError: 332 | pass 333 | if not conf: 334 | conf = HostnameConf('') 335 | conf.set_hostname(your_hostname) 336 | util.write_file(out_fn, str(conf), mode=0o644) 337 | 338 | def _read_system_hostname(self): 339 | sys_hostname = self._read_hostname(self.hostname_conf_fn) 340 | return (self.hostname_conf_fn, sys_hostname) 341 | 342 | def _read_hostname_conf(self, filename): 343 | conf = HostnameConf(util.load_file(filename)) 344 | conf.parse() 345 | return conf 346 | 347 | def _read_hostname(self, filename, default=None): 348 | hostname = None 349 | try: 350 | conf = self._read_hostname_conf(filename) 351 | hostname = conf.hostname 352 | except IOError: 353 | pass 354 | if not hostname: 355 | return default 356 | return hostname 357 | 358 | def _get_localhost_ip(self): 359 | return "127.0.1.1" 360 | 361 | def set_timezone(self, tz): 362 | distros.set_etc_timezone(tz=tz, tz_file=self._find_tz_file(tz)) 363 | 364 | def package_command(self, command, args=None, pkgs=None): 365 | if pkgs is None: 366 | pkgs = [] 367 | 368 | cmd = ['tdnf'] 369 | # Determines whether or not tdnf prompts for confirmation 370 | # of critical actions. We don't want to prompt... 371 | cmd.append("-y") 372 | 373 | if args and isinstance(args, str): 374 | cmd.append(args) 375 | elif args and isinstance(args, list): 376 | cmd.extend(args) 377 | 378 | cmd.append(command) 379 | 380 | pkglist = util.expand_package_list('%s-%s', pkgs) 381 | cmd.extend(pkglist) 382 | 383 | # Allow the output of this to flow outwards (ie not be captured) 384 | util.subp(cmd, capture=False) 385 | 386 | def update_package_sources(self): 387 | self._runner.run("update-sources", self.package_command, 388 | ["makecache"], freq=PER_INSTANCE) 389 | 390 | 391 | def convert_resolv_conf(nameservers, searchdomains): 392 | """Returns a string formatted for resolv.conf.""" 393 | result = [] 394 | if nameservers: 395 | nslist = "DNS=" 396 | for ns in nameservers: 397 | nslist = nslist + '%s ' % ns 398 | nslist = nslist + '\n' 399 | result.append(str(nslist)) 400 | if searchdomains: 401 | sdlist = "Domains=" 402 | for sd in searchdomains: 403 | sdlist = sdlist + '%s ' % sd 404 | sdlist = sdlist + '\n' 405 | result.append(str(sdlist)) 406 | return result 407 | 408 | 409 | def translate_routes(settings): 410 | entries = [] 411 | for line in settings.splitlines(): 412 | line = line.strip() 413 | if not line or line.startswith("#"): 414 | continue 415 | split_up = line.split(None, 1) 416 | if len(split_up) <= 1: 417 | continue 418 | entries.append(split_up) 419 | consume = {} 420 | ifaces = [] 421 | for (cmd, args) in entries: 422 | if cmd == 'iface': 423 | if consume: 424 | ifaces.append(consume) 425 | consume = {} 426 | consume[cmd] = args 427 | else: 428 | consume[cmd] = args 429 | 430 | absorb = False 431 | for (cmd, args) in consume.items(): 432 | if cmd == 'iface': 433 | absorb = True 434 | if absorb: 435 | ifaces.append(consume) 436 | out_ifaces = {} 437 | for info in ifaces: 438 | if 'iface' not in info: 439 | continue 440 | iface_details = info['iface'].split(None) 441 | dev_name = None 442 | if len(iface_details) >= 1: 443 | dev = iface_details[0].strip().lower() 444 | if dev: 445 | dev_name = dev 446 | if not dev_name: 447 | continue 448 | route_info = {} 449 | route_index = 0 450 | found = True 451 | while found: 452 | route_name = 'routes.' + str(route_index) 453 | if route_name in info: 454 | val = info[route_name].split() 455 | if val: 456 | route_info[route_name] = val 457 | else: 458 | found = False 459 | route_index += 1 460 | if dev_name in out_ifaces: 461 | out_ifaces[dev_name].update(route_info) 462 | else: 463 | out_ifaces[dev_name] = route_info 464 | return out_ifaces 465 | 466 | 467 | def network_file_name(dirname, dev_name): 468 | network_file_pattern = "*" + dev_name + ".network*" 469 | for node in os.listdir(dirname): 470 | node_fullpath = os.path.join(dirname, node) 471 | if fnmatch.fnmatch(node, network_file_pattern): 472 | util.del_file(node_fullpath) 473 | return node 474 | return "" 475 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/var/lib/vmware/metadata.txt: -------------------------------------------------------------------------------- 1 | local-hostname: HOSTNAME 2 | network: 3 | version: 2 4 | ethernets: 5 | MGMT_CONFIG 6 | WORKLOAD_CONFIG 7 | FRONTEND_CONFIG 8 | cleanup-guestinfo: 9 | - userdata 10 | - vendordata 11 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/var/lib/vmware/net-postconfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | # Network post-configuration actions that should be run on every boot should be appended here 5 | # Used by net-postconfig systemd service 6 | 7 | route_table_cfg_file="/etc/vmware/route-tables.cfg" 8 | 9 | # Appends an entry to the route-table service's config file. 10 | # Input values: 11 | # - 1 Routing Table ID 12 | # - 2 Routing Table Name = Network/Interface Name 13 | # - 3 Interface MAC 14 | # - 4 Gateway IP (If unset, assumes acquired using DHCP) 15 | writeRouteTableConfig() { 16 | id="${1}" 17 | network="${2}" 18 | mac="${3}" 19 | gateway="${4}" 20 | if [ "${gateway}" == "" ] || [ "${gateway}" == "null" ]; then 21 | # Assume DHCP. Get it from DHCP lease file. 22 | ifindex=$(cat "/sys/class/net/${network}/ifindex") 23 | gateway=$(grep ROUTER "/var/run/systemd/netif/leases/${ifindex}" | cut -d= -f2) 24 | if [ "${gateway}" == "" ] || [ "${gateway}" == "null" ]; then 25 | return 0 26 | fi 27 | fi 28 | ip=$(ip -4 address show "${network}" | grep 'scope global' | awk '{print $2}') 29 | # Set default gateway route if not already set in route-tables.cfg. 30 | default_gw_route="${id},${network},${mac},${ip},${gateway}" 31 | if ! grep -Fxq "$default_gw_route" "$route_table_cfg_file"; then 32 | echo "$default_gw_route" >> "$route_table_cfg_file" 33 | fi 34 | 35 | # Set linked scope route if not already set in route-tables.cfg. 36 | linked_scoped_route="${id},${network},${mac},${ip}" 37 | if ! grep -Fxq "$linked_scoped_route" "$route_table_cfg_file"; then 38 | echo "$linked_scoped_route" >> "$route_table_cfg_file" 39 | fi 40 | } 41 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/var/lib/vmware/ovf-to-cloud-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -e 18 | set -x 19 | 20 | # The path to the Data Plane API configuration file. 21 | data_plane_api_cfg=/etc/haproxy/dataplaneapi.cfg 22 | 23 | # These PCI slots are hard-coded in the OVF config 24 | # This is the reliable way of determining which network is which 25 | # Link files are used so that prescriptive naming behavior can 26 | # be given to udevd. This keeps systemd-networkd from racing during 27 | # early discovery/initialization of these devices. 28 | # management_pci="0000:03:00.0" 160 eth0 29 | # workload_pci="0000:0b:00.0" 192 eth1 30 | # frontend_pci="0000:13:00.0" 224 eth2 31 | 32 | # These keys are hardcoded to match the data from OVF config 33 | hostname_key="network.hostname" 34 | management_ip_key="network.management_ip" 35 | workload_ip_key="network.workload_ip" 36 | frontend_ip_key="network.frontend_ip" 37 | management_gw_key="network.management_gateway" 38 | workload_gw_key="network.workload_gateway" 39 | frontend_gw_key="network.frontend_gateway" 40 | 41 | workload_networks_key="network.additional_workload_networks" 42 | 43 | # These are the display names for the nics 44 | management_net_name="management" 45 | workload_net_name="workload" 46 | frontend_net_name="frontend" 47 | 48 | # The script persists the encoded userdata and metadata to the filesystem 49 | # This is both for post-mortem analysis and so that they can be refreshed on boot 50 | encoded_userdata_path="/var/lib/vmware/encoded_userdata.txt" 51 | encoded_metadata_path="/var/lib/vmware/encoded_metadata.txt" 52 | 53 | ca_crt_path="/etc/haproxy/ca.crt" 54 | ca_key_path="/etc/haproxy/ca.key" 55 | anyip_cfg_path="/etc/vmware/anyip-routes.cfg" 56 | net_postconfig_path="/var/lib/vmware/net-postconfig.sh" 57 | first_boot_path="/var/lib/vmware/.ovf_to_cloud_init.done" 58 | 59 | # Ensure that metadata exists in guestinfo for correct networking 60 | # On first boot, the persisted metadata is written. On subsequent boots, it is read. 61 | ensureMetadata () { 62 | if [ "$(ovf-rpctool get metadata)" == "" ]; then 63 | if [ -f "$encoded_metadata_path" ]; then 64 | encoded_metadata=$(cat $encoded_metadata_path) 65 | ovf-rpctool set metadata "$encoded_metadata" 66 | ovf-rpctool set metadata.encoding "base64" 67 | else 68 | echo "Error: Metadata is missing from $encoded_metadata_path" 69 | fi 70 | fi 71 | } 72 | 73 | # If there is no ovfenv, there's nothing to process 74 | checkForExistingOvfenv () { 75 | val=$(ovf-rpctool get ovfenv) 76 | if [ "$val" == "" ]; then 77 | echo "Exiting due to no ovfenv to process" 78 | return 1 79 | fi 80 | } 81 | 82 | # Need to ensure that special characters are properly escaped for Sed, including forward slashes 83 | # Input arg is string to escape 84 | escapeString () { 85 | escaped=$(printf "%q" "$1" | sed 's/\//\\\//g') 86 | echo "$escaped" 87 | } 88 | 89 | # Extract the additional workload networks and store them in the appropriate file. 90 | # These CIDRs will be picked up by the route-tables service and the appropriate routes will be created. 91 | writeWorkloadNetworks() { 92 | networks=$(ovf-rpctool get.ovf "${workload_networks_key}") 93 | if [ -n "${networks}" ]; then 94 | echo "${networks//,/$'\n'}" > /etc/vmware/workload-networks.cfg 95 | fi 96 | } 97 | 98 | # Persist a string to a file 99 | # Input values: 100 | # - The string to write 101 | # - The file to write to 102 | # - The permissions to set 103 | writeCertFile () { 104 | echo "$1" > "$2" 105 | formatCertificate "$2" 106 | chmod "$3" "$2" 107 | } 108 | 109 | getRootPwd () { 110 | val=$(ovf-rpctool get.ovf appliance.root_pwd) 111 | salt=$(openssl passwd -1 -salt SaltSalt "$val") 112 | escapeString "$salt" 113 | } 114 | 115 | setDataPlaneAPIPort() { 116 | port=$(ovf-rpctool get.ovf "loadbalance.dataplane_port") 117 | if [ "${port}" == "" ] || [ "${port}" == "0" ] || [ "${port}" == "null" ]; then 118 | port=5556 119 | fi 120 | sed -i -e 's/TLS_PORT=5556/TLS_PORT='"${port}"'/' "${data_plane_api_cfg}" 121 | echo "Data Plane API port set to ${port}" 122 | } 123 | 124 | setHAProxyUserPass() { 125 | user="$(ovf-rpctool get.ovf "loadbalance.haproxy_user")" 126 | pass="$(ovf-rpctool get.ovf "loadbalance.haproxy_pwd")" 127 | if [ "${user}" == "" ] || [ "${user}" == "null" ]; then 128 | user="admin" 129 | fi 130 | if [ "${pass}" == "" ] || [ "${pass}" == "null" ]; then 131 | pass="haproxy" 132 | fi 133 | pass="$(openssl passwd -1 "${pass}")" 134 | sed -i -e '/^userlist controller/a\ \user '"${user}"' password '"${pass}"'' /etc/haproxy/haproxy.cfg 135 | } 136 | 137 | # If the certificate is copy/pasted into OVF, \ns are turned into spaces so it needs to be formatted 138 | # Input value is a certificate file. It is modified in place 139 | # This should be idempotent 140 | formatCertificate () { 141 | sed -i \ 142 | -e 's/BEGIN /BEGIN_/g' \ 143 | -e 's/RSA /RSA_/g' \ 144 | -e 's/PRIVATE /PRIVATE_/g' \ 145 | -e 's/END /END_/g' \ 146 | -e 's/ /\n/g' \ 147 | -e 's/BEGIN_/BEGIN /g' \ 148 | -e 's/RSA_/RSA /g' \ 149 | -e 's/PRIVATE_/PRIVATE /g' \ 150 | -e 's/END_/END /g' \ 151 | "$1" 152 | } 153 | 154 | # Returns the FQDN for the host. 155 | getHostFQDN() { 156 | host_fqdn=$(ovf-rpctool get.ovf "${hostname_key}") 157 | if [ "${host_fqdn}" == "" ] || [ "${host_fqdn}" == "null" ]; then 158 | host_fqdn="haproxy.local" 159 | fi 160 | echo "${host_fqdn}" 161 | } 162 | 163 | permitRootViaSSH() { 164 | permit_root_login=$(ovf-rpctool get.ovf appliance.permit_root_login) 165 | # Force a lower-case comparison since the value is True on vCenter and true 166 | # when coming from ESX. 167 | if [ "${permit_root_login,,}" == "true" ]; then 168 | permit_root_login="yes" 169 | else 170 | permit_root_login="no" 171 | fi 172 | sed -i -e '/^PermitRootLogin/s/^.*$/PermitRootLogin '"${permit_root_login}"'/' /etc/ssh/sshd_config 173 | } 174 | 175 | # Produces the necessary metadata config for an interface 176 | # Input values: 177 | # - interface name 178 | # - mac address 179 | # - static IP (CIDR notation) 180 | # If static IP is not defined, DHCP is assumed 181 | getNetworkInterfaceYamlConfig () { 182 | cfg1=" $1:\n match:\n macaddress: $2\n wakeonlan: true\n" 183 | cfg2="" 184 | if [ "$3" == "" ] || [ "$3" == "null" ]; then 185 | cfg2=" dhcp4: true" 186 | else 187 | cfg2=" dhcp4: false\n addresses:\n - "$3 188 | fi 189 | echo "$cfg1$cfg2" 190 | } 191 | 192 | # Given a network, find the mac address associated with it 193 | getMacForNetwork () { 194 | if [ ! -f "/sys/class/net/$1/address" ]; then 195 | return 1 196 | fi 197 | cat "/sys/class/net/$1/address" 198 | } 199 | 200 | # Writes out the config for the management network 201 | getManagementNetworkConfig () { 202 | mac=$(getMacForNetwork "$management_net_name") 203 | ip=$(ovf-rpctool get.ovf "$management_ip_key") 204 | config="$(getNetworkInterfaceYamlConfig "$management_net_name" "$mac" "$ip")" 205 | gateway=$(ovf-rpctool get.ovf "$management_gw_key") 206 | if [ "$gateway" != "" ] && [ "$gateway" != "null" ]; then 207 | config="$config\n gateway4: $gateway" 208 | fi 209 | nameservers=$(ovf-rpctool get.ovf network.nameservers) 210 | if [ "$nameservers" == "" ] || [ "$nameservers" == "null" ]; then 211 | nameservers="1.1.1.1, 1.0.0.1" 212 | fi 213 | config="$config\n nameservers:" 214 | config="$config\n addresses: [${nameservers}]" 215 | echo -e "$(escapeString "$config")" 216 | } 217 | 218 | # Writes out the config for the backend network 219 | getWorkloadNetworkConfig () { 220 | mac=$(getMacForNetwork "$workload_net_name") 221 | ip=$(ovf-rpctool get.ovf "$workload_ip_key") 222 | echo -e "$(escapeString "$(getNetworkInterfaceYamlConfig "$workload_net_name" "$mac" "$ip")")" 223 | } 224 | 225 | # Writes out the config for the frontend network 226 | # Note that this is conditional on there being a third network device that is 227 | # the device connected to the frontend network. 228 | # If there is no third device, then this function returns gracefully with a 229 | # successful return code. 230 | getFrontendNetworkConfig () { 231 | if ! mac=$(getMacForNetwork "$frontend_net_name"); then 232 | return 0 233 | fi 234 | ip=$(ovf-rpctool get.ovf "$frontend_ip_key") 235 | echo -e "$(escapeString "$(getNetworkInterfaceYamlConfig "$frontend_net_name" "$mac" "$ip")")" 236 | } 237 | 238 | # Get all values from OVF and insert them into the userdata template 239 | publishUserdata () { 240 | encoded_userdata=$(sed \ 241 | -e 's/ROOT_PWD_FROM_OVFENV/'"$(getRootPwd)"'/' \ 242 | -e 's/CREATE_DEFAULT_CA/'"$(getCreateDefaultCA)"'/' \ 243 | -e 's/MGMT_IFACE_NAME/'"${management_net_name}"'/' \ 244 | userdata.txt | base64) 245 | 246 | echo "$encoded_userdata" > "$encoded_userdata_path" 247 | ovf-rpctool set userdata "$encoded_userdata" 248 | ovf-rpctool set userdata.encoding "base64" 249 | } 250 | 251 | # Generate entries for cloud-init metadata and append them to the template 252 | publishMetadata () { 253 | encoded_metadata=$(sed \ 254 | -e 's/HOSTNAME/'"$(getHostFQDN)"'/' \ 255 | -e 's/MGMT_CONFIG/'"$(getManagementNetworkConfig)"'/' \ 256 | -e 's/WORKLOAD_CONFIG/'"$(getWorkloadNetworkConfig)"'/' \ 257 | -e 's/FRONTEND_CONFIG/'"$(getFrontendNetworkConfig)"'/' \ 258 | metadata.txt | base64) 259 | 260 | echo "$encoded_metadata" > "$encoded_metadata_path" 261 | ovf-rpctool set metadata "$encoded_metadata" 262 | ovf-rpctool set metadata.encoding "base64" 263 | } 264 | 265 | # If both ca.crt and ca.key are not defined, create a default one 266 | getCreateDefaultCA () { 267 | ca_cert=$(ovf-rpctool get.ovf appliance.ca_cert) 268 | ca_cert_key=$(ovf-rpctool get.ovf appliance.ca_cert_key) 269 | if [ "$ca_cert" != "" ] && [ "$ca_cert" != "null" ] && \ 270 | [ "$ca_cert_key" != "" ] && [ "$ca_cert_key" != "null" ]; then 271 | echo "false" 272 | else 273 | echo "true" 274 | fi 275 | } 276 | 277 | # Don't write these to cloud-init as it's visible in the VM's guestinfo 278 | # If either ca.crt or ca.key are missing, write out a default ca 279 | writeCAfiles () { 280 | if [ "$(getCreateDefaultCA)" == "false" ]; then 281 | ca_cert=$(ovf-rpctool get.ovf appliance.ca_cert) 282 | ca_cert_key=$(ovf-rpctool get.ovf appliance.ca_cert_key) 283 | writeCertFile "$ca_cert" "$ca_crt_path" "644" 284 | writeCertFile "$ca_cert_key" "$ca_key_path" "644" 285 | fi 286 | } 287 | 288 | # Persist service CIDRs to a configuration file that's picked up by the anyip-routes service 289 | writeAnyipConfig () { 290 | cidrs=$(ovf-rpctool get.ovf "loadbalance.service_ip_range") 291 | if [ "$cidrs" != "" ]; then 292 | echo -e "${cidrs//,/\\n}" >> "$anyip_cfg_path" 293 | fi 294 | } 295 | 296 | # If a network is DHCP, remove the default gateway for it 297 | # Input values: 298 | # - OVF key for the network IP 299 | # - Interface name 300 | disableDefaultRoute () { 301 | ip=$(ovf-rpctool get.ovf "$1") 302 | if [ "$ip" == "" ] || [ "$ip" == "null" ]; then 303 | # DHCP. 304 | network="${2}" 305 | mac=$(getMacForNetwork "$network") 306 | net_dropin_dir="/usr/lib/systemd/network/10-${network}.network.d" 307 | mkdir -p "$net_dropin_dir" 308 | echo -e "[DHCP]\nUseGateway=false" > "${net_dropin_dir}/10-dhcp.conf" 309 | 310 | # Ensure systemd-networkd can read the drop-in conf file. 311 | chmod a+rx "$net_dropin_dir" 312 | chmod a+r "${net_dropin_dir}/10-dhcp.conf" 313 | fi 314 | } 315 | 316 | # Appends an entry to the route-table service's config file if a gateway was 317 | # specified for this network. 318 | # Input values: 319 | # - 1 Table ID 320 | # - 2 Table Name 321 | # - 3 Gateway Key 322 | writeRouteTableConfig() { 323 | id="${1}" 324 | network="${2}" 325 | gateway=$(ovf-rpctool get.ovf "${3}") 326 | mac=$(getMacForNetwork "$network") 327 | echo "writeRouteTableConfig ${id} ${network} ${mac} ${gateway}" >>"${net_postconfig_path}" 328 | } 329 | 330 | # Disable default routes created automatically when using DHCP. 331 | disableDefaultRoutes () { 332 | disableDefaultRoute "${workload_ip_key}" "${workload_net_name}" 333 | if getMacForNetwork "${frontend_net_name}"; then 334 | disableDefaultRoute "${frontend_ip_key}" "${frontend_net_name}" 335 | fi 336 | } 337 | 338 | # Write network postconfig actions to the script run by the net-postconfig service 339 | writeNetPostConfig () { 340 | writeRouteTableConfig 2 "${workload_net_name}" "${workload_gw_key}" 341 | if getMacForNetwork "${frontend_net_name}"; then 342 | writeRouteTableConfig 3 "${frontend_net_name}" "${frontend_gw_key}" 343 | fi 344 | } 345 | 346 | if [ ! -f "$first_boot_path" ]; then 347 | checkForExistingOvfenv # Exit if there is no ovfenv to process 348 | touch "$first_boot_path" 349 | publishUserdata 350 | publishMetadata 351 | permitRootViaSSH 352 | setHAProxyUserPass 353 | setDataPlaneAPIPort 354 | writeCAfiles 355 | writeAnyipConfig 356 | writeWorkloadNetworks 357 | disableDefaultRoutes 358 | writeNetPostConfig 359 | else 360 | ensureMetadata 361 | fi 362 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/var/lib/vmware/retry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | # Runs a target script n times with a fixed sleep between retries 18 | 19 | DEFAULT_SLEEP_TIME_SECS=1 20 | DEFAULT_RETRY_ATTEMPTS=3 21 | 22 | USAGE=" 23 | usage: ${0} [FLAGS] COMMAND 24 | Runs a command n times with a configurable delay after each attempt 25 | 26 | COMMAND 27 | Any valid bash command. Use quotes around the command for parameters 28 | 29 | FLAGS 30 | -h show this help and exit 31 | -s sleep time in seconds (defaults to $DEFAULT_SLEEP_TIME_SECS) 32 | -r retry attempts (defaults to $DEFAULT_RETRY_ATTEMPTS) 33 | " 34 | 35 | function error() { 36 | local exit_code="${?}" 37 | echo "${@}" 1>&2 38 | return "${exit_code}" 39 | } 40 | 41 | function fatal() { 42 | error "${@}" || exit 1 43 | } 44 | 45 | while getopts ":h:s:r:" opt; do 46 | case ${opt} in 47 | h) 48 | error "${USAGE}" && exit 1 49 | ;; 50 | s) 51 | SLEEP_TIME_SECS="${OPTARG}" 52 | ;; 53 | r) 54 | RETRY_ATTEMPTS="${OPTARG}" 55 | ;; 56 | \?) 57 | error "invalid option: -${OPTARG} ${USAGE}" && exit 1 58 | ;; 59 | :) 60 | error "option -${OPTARG} requires an argument" && exit 1 61 | ;; 62 | esac 63 | done 64 | shift $((OPTIND-1)) 65 | 66 | SLEEP_TIME_SECS=${SLEEP_TIME_SECS:-$DEFAULT_SLEEP_TIME_SECS} 67 | RETRY_ATTEMPTS=${RETRY_ATTEMPTS:-$DEFAULT_RETRY_ATTEMPTS} 68 | 69 | if [ "${#}" -lt "1" ]; then 70 | fatal "COMMAND is required ${USAGE}" 71 | fi 72 | COMMAND="${1}" 73 | 74 | COMMAND_NAME=$(basename "$COMMAND" | cut -d ' ' -f 1) 75 | 76 | rc=0 77 | for i in $(seq 1 "$RETRY_ATTEMPTS"); do 78 | # Run as a new bash process as it may have environment variables preceeding the command 79 | bash -c "$COMMAND"; rc=$? 80 | if [ $rc -eq 0 ]; then 81 | exit 0 82 | else 83 | echo "Retrying $COMMAND_NAME $i. Last return code=$rc" 84 | sleep "$SLEEP_TIME_SECS" 85 | fi 86 | done 87 | echo "WARNING: $COMMAND_NAME failed all retry attempts. Last return code=$rc" 88 | exit $rc -------------------------------------------------------------------------------- /ansible/roles/cloudinit/files/var/lib/vmware/userdata.txt: -------------------------------------------------------------------------------- 1 | ## template: jinja 2 | #cloud-config 3 | chpasswd: 4 | list: 5 | - root:ROOT_PWD_FROM_OVFENV 6 | 7 | # Generate certificates using pre-seeded entropy provided by haveged. 8 | runcmd: 9 | - dd if=/dev/random of=/tmp/.random bs=256 count=1 10 | - if true; then RANDFILE=/tmp/.random /var/lib/vmware/retry.sh "new-ca.sh -n '{{ ds.meta_data.local_ipv4 }}' /etc/haproxy"; fi 11 | - dd if=/dev/random of=/tmp/.random bs=256 count=1 12 | - RANDFILE=/tmp/.random /var/lib/vmware/retry.sh "new-cert.sh -n -1 /etc/haproxy/ca.crt -2 /etc/haproxy/ca.key -3 "127.0.0.1,{{ ds.meta_data.network.interfaces.by_ipv4.keys()|join(',') }}" -4 "localhost" "{{ ds.meta_data.hostname }}" /etc/haproxy" 2>&1 | tee /var/log/vmware/new_cert.log 13 | - vmware-rpctool "info-set guestinfo.dataplaneapi.cacert $(base64 -w0 /etc/haproxy/ca.crt)" 14 | - ip -4 address show MGMT_IFACE_NAME | grep 'scope global' | awk '{print $2}' | cut -d/ -f1 | xargs -I{} sed -i -e 's/#ListenAddress 0.0.0.0/ListenAddress {}/' /etc/ssh/sshd_config; systemctl restart sshd 15 | - ip -4 address show MGMT_IFACE_NAME | grep 'scope global' | awk '{print $2}' | cut -d/ -f1 | xargs -I{} sed -i -e 's/TLS_HOST=0.0.0.0/TLS_HOST={}/' /etc/haproxy/dataplaneapi.cfg 16 | -------------------------------------------------------------------------------- /ansible/roles/cloudinit/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - name: Install cloud-init packages 16 | command: tdnf install -y cloud-init cloud-utils python3-netifaces 17 | register: cloudinit 18 | until: cloudinit is not failed 19 | retries: 50 20 | 21 | - name: Remove cloud-init /etc/cloud/cloud.cfg.d/99-disable-networking-config.cfg 22 | file: 23 | path: /etc/cloud/cloud.cfg.d/99-disable-networking-config.cfg 24 | state: absent 25 | when: ansible_os_family == "VMware Photon OS" 26 | 27 | - name: Enable management of /etc/hosts 28 | copy: 29 | src: files/etc/cloud/cloud.cfg.d/10-enable-manage-etc-hosts.cfg 30 | dest: /etc/cloud/cloud.cfg.d/10-enable-manage-etc-hosts.cfg 31 | owner: root 32 | group: root 33 | mode: "0644" 34 | when: ansible_os_family == "VMware Photon OS" 35 | 36 | - name: Patch the cloud-init Photon distro source 37 | copy: 38 | force: true 39 | src: files/usr/lib/python3.7/site-packages/cloudinit/distros/photon.py 40 | dest: /usr/lib/python3.7/site-packages/cloudinit/distros/photon.py 41 | owner: root 42 | group: root 43 | mode: "0644" 44 | when: ansible_os_family == "VMware Photon OS" 45 | 46 | - name: Add ovf-to-cloud-init service 47 | copy: 48 | src: files/etc/systemd/system/ovf-to-cloud-init.service 49 | dest: /etc/systemd/system/ovf-to-cloud-init.service 50 | owner: root 51 | group: root 52 | mode: "0644" 53 | 54 | - name: Set VMware as cloud-init datasource 55 | copy: 56 | src: files/etc/cloud/cloud.cfg.d/99-DataSourceVMware.cfg 57 | dest: /etc/cloud/cloud.cfg.d/99-DataSourceVMware.cfg 58 | owner: root 59 | group: root 60 | mode: "0644" 61 | when: ansible_os_family == "VMware Photon OS" 62 | 63 | - name: Add net-postconfig service 64 | copy: 65 | src: files/etc/systemd/system/net-postconfig.service 66 | dest: /etc/systemd/system/net-postconfig.service 67 | owner: root 68 | group: root 69 | mode: "0644" 70 | 71 | - name: Add ovf-to-cloud-init conversion script 72 | copy: 73 | src: files/var/lib/vmware/ovf-to-cloud-init.sh 74 | dest: /var/lib/vmware/ovf-to-cloud-init.sh 75 | owner: root 76 | group: root 77 | mode: "0744" 78 | 79 | - name: Add cloud-init userdata template 80 | copy: 81 | src: files/var/lib/vmware/userdata.txt 82 | dest: /var/lib/vmware/userdata.txt 83 | owner: root 84 | group: root 85 | mode: "0444" 86 | 87 | - name: Add cloud-init userdata template 88 | copy: 89 | src: files/var/lib/vmware/metadata.txt 90 | dest: /var/lib/vmware/metadata.txt 91 | owner: root 92 | group: root 93 | mode: "0444" 94 | 95 | - name: Add net-postconfig script 96 | copy: 97 | src: files/var/lib/vmware/net-postconfig.sh 98 | dest: /var/lib/vmware/net-postconfig.sh 99 | owner: root 100 | group: root 101 | mode: "0744" 102 | 103 | - name: Add retry.sh 104 | copy: 105 | src: files/var/lib/vmware/retry.sh 106 | dest: /var/lib/vmware/retry.sh 107 | owner: root 108 | group: root 109 | mode: "0744" 110 | 111 | - name: Enable ovf-to-cloud-init service 112 | service: 113 | name: ovf-to-cloud-init 114 | enabled: yes 115 | 116 | - name: Enable net-postconfig service 117 | service: 118 | name: net-postconfig 119 | enabled: yes 120 | 121 | - name: Install ovf-rpctool 122 | get_url: 123 | url: '{{ ovf_rpctool_url }}' 124 | dest: /usr/sbin/ovf-rpctool 125 | mode: "0755" 126 | -------------------------------------------------------------------------------- /ansible/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | common_photon_rpms: 16 | - conntrack-tools 17 | - distrib-compat 18 | - ebtables 19 | - haproxy 20 | - haveged 21 | - inotify-tools 22 | - iputils 23 | - iproute2 24 | - jq 25 | - lsof 26 | - net-tools 27 | - ntp 28 | - openssl-c_rehash 29 | - open-vm-tools 30 | - pcre 31 | - psmisc 32 | - python-netifaces 33 | - python3-pip 34 | - python-requests 35 | - rpm 36 | - sed 37 | - socat 38 | - tar 39 | - tcpdump 40 | - traceroute 41 | - unzip 42 | - vim 43 | 44 | disable_public_repos: false 45 | extra_rpms: "" 46 | extra_repos: "" 47 | # photon does not have backward compatibility for legacy distro behavior for sysctl.conf by default 48 | # as it uses systemd-sysctl. set this var so we can use for sysctl conf file value. 49 | sysctl_conf_file: "{{ '/etc/sysctl.d/99-sysctl.conf' if ansible_os_family == 'VMware Photon OS' else '/etc/sysctl.conf' }}" 50 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - import_tasks: photon.yml 16 | when: ansible_os_family == "VMware Photon OS" 17 | 18 | - name: Ensure net.ipv4.ip_forward sysctl is present 19 | sysctl: 20 | name: net.ipv4.ip_forward 21 | value: "1" 22 | state: present 23 | sysctl_set: yes 24 | reload: yes 25 | sysctl_file: "{{ sysctl_conf_file }}" 26 | 27 | - name: Ensure net.ipv6.conf.all.forwarding sysctl is present 28 | sysctl: 29 | name: net.ipv6.conf.all.forwarding 30 | value: "0" 31 | state: present 32 | sysctl_set: yes 33 | reload: yes 34 | sysctl_file: "{{ sysctl_conf_file }}" 35 | 36 | - name: Ensure IPv6 is disabled for all interfaces 37 | sysctl: 38 | name: net.ipv6.conf.all.disable_ipv6 39 | value: "1" 40 | state: present 41 | sysctl_set: yes 42 | reload: yes 43 | sysctl_file: "{{ sysctl_conf_file }}" 44 | 45 | - name: Ensure IPv6 is disabled as the default 46 | sysctl: 47 | name: net.ipv6.conf.default.disable_ipv6 48 | value: "1" 49 | state: present 50 | sysctl_set: yes 51 | reload: yes 52 | sysctl_file: "{{ sysctl_conf_file }}" 53 | 54 | - name: Bind SSH only to IPv4 55 | replace: 56 | path: /etc/ssh/sshd_config 57 | regexp: '^.*?AddressFamily.*$' 58 | replace: 'AddressFamily inet' 59 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/photon.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - import_tasks: rpm_repos.yml 16 | 17 | - name: Perform a tdnf distro-sync 18 | command: tdnf distro-sync -y --refresh 19 | register: distro 20 | changed_when: '"Nothing to do" not in distro.stderr' 21 | until: distro is not failed 22 | retries: 50 23 | 24 | # When the linux package is upgraded on Photon, the current /lib/modules 25 | # folder is removed, causing future package installs to file if they involve 26 | # kmods, which a lot of our future packages do. A reboot fixes this. In the 27 | # future it might be nice to only reboot if that package was upgraded, but 28 | # a reboot of Photon is generally very fast so this is fine for now. 29 | - name: Reboot after distro sync 30 | reboot: {} 31 | when: distro.changed 32 | 33 | - name: Concatenate the Photon RPMs 34 | set_fact: 35 | photon_rpms: "{{ common_photon_rpms | join(' ') }}" 36 | 37 | - name: install baseline dependencies 38 | command: tdnf install {{ photon_rpms }} -y 39 | register: baseline 40 | until: baseline is not failed 41 | retries: 50 42 | 43 | - name: install extra RPMs 44 | command: tdnf install {{ extra_rpms }} -y 45 | when: extra_rpms != "" 46 | register: extrarpms 47 | until: extrarpms is not failed 48 | retries: 50 49 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/rpm_repos.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - name: Find existing repo files 16 | find: 17 | depth: 1 18 | paths: 19 | - /etc/yum.repos.d 20 | patterns: '*.repo' 21 | register: repo_files 22 | when: disable_public_repos|bool 23 | 24 | - name: Disable repos 25 | command: "mv {{ item.path }} {{ item.path }}.disabled" 26 | loop: "{{ repo_files.files }}" 27 | when: disable_public_repos|bool 28 | 29 | - name: Install extra repos 30 | copy: 31 | src: "{{ item }}" 32 | dest: "/etc/yum.repos.d/{{ item | basename }}" 33 | loop: "{{ extra_repos.split() }}" 34 | when: extra_repos != "" 35 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/haproxy/dataplaneapi.cfg: -------------------------------------------------------------------------------- 1 | # This file configures the Data Plane API service. 2 | LOG_LEVEL=warning 3 | LOG_TO=stdout 4 | SCHEME=https 5 | HAPROXY_BIN=/usr/sbin/haproxy 6 | CONFIG_FILE=/etc/haproxy/haproxy.cfg 7 | RELOAD_CMD=/usr/bin/systemctl reload haproxy 8 | RELOAD_DELAY=30 9 | TLS_HOST=0.0.0.0 10 | TLS_PORT=5556 11 | TLS_CERTIFICATE=/etc/haproxy/server.crt 12 | TLS_KEY=/etc/haproxy/server.key 13 | USERLIST=controller 14 | ADDITIONAL_FLAGS=--update-map-files 15 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/haproxy/haproxy.cfg: -------------------------------------------------------------------------------- 1 | global 2 | log stdout format raw local0 debug 3 | chroot /var/lib/haproxy 4 | stats socket /run/haproxy.sock user haproxy group haproxy mode 660 level admin expose-fd listeners 5 | stats timeout 30s 6 | user haproxy 7 | group haproxy 8 | master-worker 9 | # Setting maxconn in the global section is what successfully sets the ulimit on the 10 | # host, otherwise we run out of file descriptors (defaulted at 4096). 11 | maxconn 60000 12 | # This property indicates the number of maximum number of reloads a worker 13 | # will survive before being forcefully killed. This number is required to control 14 | # the rate at which processes can scale due to the number of reloads outscaling 15 | # the rate processes are reaped when all of their connections have been cleaned up. 16 | # This number was derived by taking the average virtual memory consumption for a 17 | # single HA Proxy process under load, ~28MB, and allocating HA Proxy 3GB out of 4GB 18 | # of the total virtual memory space. 19 | mworker-max-reloads 100 20 | 21 | # Default SSL material locations 22 | ca-base /etc/ssl/certs 23 | crt-base /etc/ssl/private 24 | 25 | # Default ciphers to use on SSL-enabled listening sockets. 26 | # For more information, see ciphers(1SSL). This list is from: 27 | # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ 28 | # An alternative list with additional directives can be obtained from 29 | # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy 30 | ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS 31 | ssl-default-bind-options no-sslv3 32 | 33 | defaults 34 | mode tcp 35 | log global 36 | option tcplog 37 | option dontlognull 38 | option tcp-smart-accept 39 | timeout check 5s 40 | timeout connect 9s 41 | timeout client 10s 42 | timeout queue 5m 43 | timeout server 10s 44 | # tunnel timeout needs to be set at a lowish value to deal with the frequent 45 | # reloads invoked by dataplaneapi at scale. With a higher value set, established 46 | # connections will hang around and prevent haproxy from killing all off older processes 47 | # because those old processes won't terminate those established connections unless 48 | # it is told to do so. Having these processes linger for too long can eventually 49 | # starve the system of resources as the spawn rate of processes exceeds the death rate. 50 | timeout tunnel 5m 51 | timeout client-fin 10s 52 | 53 | # Stats are disabled by default because enabling them on a non-local IP address 54 | # would result in allocating a port that could result in a conflict with one 55 | # of the binds programmed at runtime. 56 | # 57 | # To enable stats, uncomment the following section and replace SYSTEM_IP_ADDR 58 | # with the IP address of the HAProxy host. 59 | #frontend stats 60 | # mode http 61 | # bind SYSTEM_IP_ADDR:8404 62 | # stats enable 63 | # stats uri /stats 64 | # stats refresh 500ms 65 | # stats hide-version 66 | # stats show-legends 67 | 68 | userlist controller 69 | user client insecure-password cert 70 | 71 | # Please do remove this section; it is used when performing integration 72 | # testing with the HAProxy Docker image. 73 | # 74 | # This section, along with the default user above, will be removed by 75 | # Ansible when building the HAProxy appliance's OVA. 76 | program api 77 | command dataplaneapi --log-level=debug --scheme=https --haproxy-bin=/usr/sbin/haproxy --config-file=/etc/haproxy/haproxy.cfg --reload-cmd="kill -SIGUSR2 1" --reload-delay=5 --tls-host=0.0.0.0 --tls-port=5556 --tls-certificate=/etc/haproxy/server.crt --tls-key=/etc/haproxy/server.key --userlist=controller --update-map-files 78 | no option start-on-reload 79 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/haproxy/haproxy.cfg.mtls: -------------------------------------------------------------------------------- 1 | global 2 | log stdout format raw local0 debug 3 | chroot /var/lib/haproxy 4 | stats socket /run/haproxy.sock user haproxy group haproxy mode 660 level admin expose-fd listeners 5 | stats timeout 30s 6 | user haproxy 7 | group haproxy 8 | master-worker 9 | # Setting maxconn in the global section is what successfully sets the ulimit on the 10 | # host, otherwise we run out of file descriptors (defaulted at 4096). 11 | maxconn 60000 12 | # This property indicates the number of maximum number of reloads a worker 13 | # will survive before being forcefully killed. This number is required to control 14 | # the rate at which processes can scale due to the number of reloads outscaling 15 | # the rate processes are reaped when all of their connections have been cleaned up. 16 | # This number was derived by taking the average virtual memory consumption for a 17 | # single HA Proxy process under load, ~28MB, and allocating HA Proxy 3GB out of 4GB 18 | # of the total virtual memory space. 19 | mworker-max-reloads 100 20 | 21 | # Default SSL material locations 22 | ca-base /etc/ssl/certs 23 | crt-base /etc/ssl/private 24 | 25 | # Default ciphers to use on SSL-enabled listening sockets. 26 | # For more information, see ciphers(1SSL). This list is from: 27 | # https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ 28 | # An alternative list with additional directives can be obtained from 29 | # https://mozilla.github.io/server-side-tls/ssl-config-generator/?server=haproxy 30 | ssl-default-bind-ciphers ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES:RSA+AESGCM:RSA+AES:!aNULL:!MD5:!DSS 31 | ssl-default-bind-options no-sslv3 32 | 33 | defaults 34 | mode tcp 35 | log global 36 | option tcplog 37 | option dontlognull 38 | option tcp-smart-accept 39 | timeout check 5s 40 | timeout connect 9s 41 | timeout client 10s 42 | timeout queue 5m 43 | timeout server 10s 44 | # tunnel timeout needs to be set at a lowish value to deal with the frequent 45 | # reloads invoked by dataplaneapi at scale. With a higher value set, established 46 | # connections will hang around and prevent haproxy from killing all off older processes 47 | # because those old processes won't terminate those established connections unless 48 | # it is told to do so. Having these processes linger for too long can eventually 49 | # starve the system of resources as the spawn rate of processes exceeds the death rate. 50 | timeout tunnel 5m 51 | timeout client-fin 10s 52 | 53 | # Stats are disabled by default because enabling them on a non-local IP address 54 | # would result in allocating a port that could result in a conflict with one 55 | # of the binds programmed at runtime. 56 | # 57 | # To enable stats, uncomment the following section and replace SYSTEM_IP_ADDR 58 | # with the IP address of the HAProxy host. 59 | #frontend stats 60 | # mode http 61 | # bind SYSTEM_IP_ADDR:8404 62 | # stats enable 63 | # stats uri /stats 64 | # stats refresh 500ms 65 | # stats hide-version 66 | # stats show-legends 67 | 68 | userlist controller 69 | user client insecure-password cert 70 | 71 | program api 72 | command dataplaneapi --log-level=debug --scheme=https --haproxy-bin=/usr/sbin/haproxy --config-file=/etc/haproxy/haproxy.cfg --reload-cmd="kill -SIGUSR2 1" --reload-delay=5 --tls-host=0.0.0.0 --tls-port=5556 --tls-ca=/etc/haproxy/ca.crt --tls-certificate=/etc/haproxy/server.crt --tls-key=/etc/haproxy/server.key --userlist=controller --update-map-files 73 | no option start-on-reload 74 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/systemd/system/dataplaneapi.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=dataplaneapi.service 3 | 4 | # This service should run after haproxy.service 5 | After=haproxy.service 6 | Wants=haproxy.service 7 | 8 | [Install] 9 | WantedBy=multi-user.target 10 | 11 | [Service] 12 | TimeoutSec=0 13 | Restart=always 14 | EnvironmentFile=/etc/haproxy/dataplaneapi.cfg 15 | WorkingDirectory=/var/lib/haproxy 16 | Slice=dataplaneapi.slice 17 | 18 | ExecStart=/usr/local/bin/dataplaneapi \ 19 | --log-level=${LOG_LEVEL} \ 20 | --log-to=${LOG_TO} \ 21 | --scheme=${SCHEME} \ 22 | --haproxy-bin=${HAPROXY_BIN} \ 23 | --config-file=${CONFIG_FILE} \ 24 | --reload-cmd=${RELOAD_CMD} \ 25 | --reload-delay=${RELOAD_DELAY} \ 26 | --tls-host=${TLS_HOST} \ 27 | --tls-port=${TLS_PORT} \ 28 | --tls-certificate=${TLS_CERTIFICATE} \ 29 | --tls-key=${TLS_KEY} \ 30 | --userlist=${USERLIST} ${ADDITIONAL_FLAGS} 31 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/systemd/system/dataplaneapi.slice: -------------------------------------------------------------------------------- 1 | [Slice] 2 | CPUWeight=20 3 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/systemd/system/haproxy.service.d/cloud-init.conf: -------------------------------------------------------------------------------- 1 | [Unit] 2 | # Start haproxy after cloud-init so the latter may be used to configure 3 | # the former before haproxy and its dataplane API server come online. 4 | # 5 | # Please see the following link for more information about the 6 | # cloud-init boot stage managed by the cloud-config.service: 7 | # https://cloudinit.readthedocs.io/en/latest/topics/boot.html#final 8 | After=cloud-final.service 9 | Wants=cloud-final.service 10 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/systemd/system/haproxy.service.d/slice.conf: -------------------------------------------------------------------------------- 1 | # Assign HAProxy to the haproxy slice. 2 | [Service] 3 | Slice=haproxy.slice 4 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/files/etc/systemd/system/haproxy.slice: -------------------------------------------------------------------------------- 1 | [Slice] 2 | CPUWeight=80 3 | -------------------------------------------------------------------------------- /ansible/roles/haproxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - name: Install DataPlane API 16 | get_url: 17 | url: '{{ dataplane_api_url }}' 18 | dest: /usr/local/bin/dataplaneapi 19 | mode: "0755" 20 | 21 | - name: Download HAProxy 2.2.2 22 | get_url: 23 | url: '{{ haproxy_rpm_url }}' 24 | dest: /root/haproxy-2.2.2.rpm 25 | when: haproxy_rpm_url != '' 26 | 27 | - name: Install HAProxy 2.2.2 28 | command: sh -c 'rpm --upgrade -vh /root/haproxy-2.2.2.rpm || true' 29 | when: haproxy_rpm_url != '' 30 | 31 | - name: Create HAProxy service drop-in directory 32 | file: 33 | path: /etc/systemd/system/haproxy.service.d 34 | state: directory 35 | 36 | - name: Create HAProxy cloud-init drop-in file 37 | copy: 38 | src: files/etc/systemd/system/haproxy.service.d/cloud-init.conf 39 | dest: /etc/systemd/system/haproxy.service.d/cloud-init.conf 40 | owner: root 41 | group: root 42 | mode: "0644" 43 | 44 | - name: Create HAProxy slice drop-in file 45 | copy: 46 | src: files/etc/systemd/system/haproxy.service.d/slice.conf 47 | dest: /etc/systemd/system/haproxy.service.d/slice.conf 48 | owner: root 49 | group: root 50 | mode: "0644" 51 | 52 | - name: Create Dataplane API systemd file 53 | copy: 54 | src: files/etc/systemd/system/dataplaneapi.service 55 | dest: /etc/systemd/system/dataplaneapi.service 56 | owner: root 57 | group: root 58 | mode: "0644" 59 | 60 | - name: Create Dataplane API systemd slice file 61 | copy: 62 | src: files/etc/systemd/system/dataplaneapi.slice 63 | dest: /etc/systemd/system/dataplaneapi.slice 64 | owner: root 65 | group: root 66 | mode: "0644" 67 | 68 | - name: Create Data Plane API configuration file 69 | copy: 70 | src: files/etc/haproxy/dataplaneapi.cfg 71 | dest: /etc/haproxy/dataplaneapi.cfg 72 | owner: root 73 | group: root 74 | mode: "0644" 75 | 76 | - name: Create HAProxy configuration file 77 | copy: 78 | src: files/etc/haproxy/haproxy.cfg 79 | dest: /etc/haproxy/haproxy.cfg 80 | owner: root 81 | group: root 82 | mode: "0644" 83 | 84 | - name: Remove default user and the Data Plane API program from HAProxy config file 85 | replace: 86 | path: /etc/haproxy/haproxy.cfg 87 | after: 'userlist controller' 88 | regexp: '^(.+)$' 89 | replace: '\n' 90 | 91 | - name: Update HAProxy log level 92 | replace: 93 | path: /etc/haproxy/haproxy.cfg 94 | regexp: 'log stdout format raw local0 debug' 95 | replace: 'log stdout format raw local0 info' 96 | 97 | - name: Enable HAProxy service 98 | systemd: 99 | name: haproxy 100 | enabled: yes 101 | daemon_reload: yes 102 | 103 | - name: Enable Dataplane API service 104 | systemd: 105 | name: dataplaneapi 106 | enabled: yes 107 | daemon_reload: yes 108 | -------------------------------------------------------------------------------- /ansible/roles/pki/files/usr/local/bin/new-ca.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | set -o errexit 18 | set -o nounset 19 | set -o pipefail 20 | 21 | USAGE=" 22 | usage: ${0} [FLAGS] COMMON_NAME [OUT_DIR] 23 | Creates a self-signed certificate authority and writes its public 24 | and private keys as two PEM-encoded files, ca.crt and ca.key. 25 | 26 | COMMON_NAME 27 | The certificate's common name. This is a required argument. 28 | 29 | OUT_DIR 30 | An optional argument that specifies the directory to which to write 31 | the public and private keys. If omitted, they files are written to 32 | the working directory. 33 | 34 | FLAGS 35 | -h show this help and exit 36 | -c country (defaults to US) 37 | -s state or province (defaults to CA) 38 | -l locality (defaults to Palo Alto) 39 | -o organization (defaults to VMware) 40 | -u organizational unit (defaults to CAPV) 41 | -b bit size (defaults to 2048) 42 | -d days until expiry (defaults to 3650) 43 | -f file name prefix (defaults to ca) 44 | -n skip overwriting a certificate and key if one already exists. 45 | " 46 | 47 | function error() { 48 | local exit_code="${?}" 49 | echo "${@}" 1>&2 50 | return "${exit_code}" 51 | } 52 | 53 | function fatal() { 54 | error "${@}" || exit 1 55 | } 56 | 57 | # Start of main script 58 | while getopts ":hvnc:s:l:o:u:b:d:f:" opt; do 59 | case ${opt} in 60 | h) 61 | error "${USAGE}" && exit 1 62 | ;; 63 | c) 64 | TLS_COUNTRY_NAME="${OPTARG}" 65 | ;; 66 | s) 67 | TLS_STATE_OR_PROVINCE_NAME="${OPTARG}" 68 | ;; 69 | l) 70 | TLS_LOCALITY_NAME="${OPTARG}" 71 | ;; 72 | o) 73 | TLS_ORG_NAME="${OPTARG}" 74 | ;; 75 | u) 76 | TLS_OU_NAME="${OPTARG}" 77 | ;; 78 | b) 79 | TLS_DEFAULT_BITS="${OPTARG}" 80 | ;; 81 | d) 82 | TLS_DEFAULT_DAYS="${OPTARG}" 83 | ;; 84 | f) 85 | TLS_FILE_PREFIX="${OPTARG}" 86 | ;; 87 | n) 88 | NO_OVERWRITE=1 89 | ;; 90 | v) 91 | VERBOSE=1 92 | set -x 93 | ;; 94 | \?) 95 | error "invalid option: -${OPTARG} ${USAGE}" && exit 1 96 | ;; 97 | :) 98 | error "option -${OPTARG} requires an argument" && exit 1 99 | ;; 100 | esac 101 | done 102 | shift $((OPTIND-1)) 103 | 104 | # Verbose mode 105 | VERBOSE="${VERBOSE-}" 106 | NO_OVERWRITE="${NO_OVERWRITE-}" 107 | 108 | # The strength of the generated certificate 109 | TLS_DEFAULT_BITS=${TLS_DEFAULT_BITS:-2048} 110 | 111 | # The number of days until the certificate expires. The default 112 | # value is 10 years. 113 | TLS_DEFAULT_DAYS=${TLS_DEFAULT_DAYS:-3650} 114 | 115 | # The components that make up the certificate's distinguished name. 116 | TLS_COUNTRY_NAME=${TLS_COUNTRY_NAME:-US} 117 | TLS_STATE_OR_PROVINCE_NAME=${TLS_STATE_OR_PROVINCE_NAME:-California} 118 | TLS_LOCALITY_NAME=${TLS_LOCALITY_NAME:-Palo Alto} 119 | TLS_ORG_NAME=${TLS_ORG_NAME:-VMware} 120 | TLS_OU_NAME=${TLS_OU_NAME:-CAPV} 121 | 122 | # The file name prefix for the public and private keys. 123 | TLS_FILE_PREFIX=${TLS_FILE_PREFIX:-ca} 124 | 125 | # The directory to which to write the public and private keys. 126 | { [ "${#}" -gt "1" ] && OUT_DIR="${2}"; } || OUT_DIR="$(pwd)" 127 | mkdir -p "${OUT_DIR}" 128 | 129 | if { [ -f "${OUT_DIR}/${TLS_FILE_PREFIX}.crt" ] || [ -f "${OUT_DIR}/${TLS_FILE_PREFIX}.key" ]; } && [ $NO_OVERWRITE ]; then 130 | echo "Existing ${TLS_FILE_PREFIX}.crt or ${TLS_FILE_PREFIX}.key "\ 131 | "exists in ${OUT_DIR}. Skipping cert generation." 132 | exit 0 133 | fi 134 | 135 | # The certificate's common name. 136 | if [ "${#}" -lt "1" ]; then 137 | fatal "COMMON_NAME is required ${USAGE}" 138 | fi 139 | TLS_COMMON_NAME="${1}" 140 | 141 | # Make a temporary directory and switch to it. 142 | OLD_DIR="$(pwd)" 143 | pushd "$(mktemp -d)" 144 | TLS_TMP_DIR="$(pwd)" 145 | 146 | # Returns the absolute path of the provided argument. 147 | abspath() { 148 | { [ "$(printf %.1s "${1}")" = "/" ] && echo "${1}"; } || echo "${OLD_DIR}/${1}" 149 | } 150 | 151 | # Write the SSL config file to disk. 152 | cat >ssl.conf <&2 56 | return "${exit_code}" 57 | } 58 | 59 | function fatal() { 60 | error "${@}" || exit 1 61 | } 62 | 63 | # Start of main script 64 | while getopts ":hvn1:2:3:4:c:s:l:o:u:b:d:k:e:f:" opt; do 65 | case ${opt} in 66 | h) 67 | error "${USAGE}" && exit 1 68 | ;; 69 | 1) 70 | TLS_CA_CRT="${OPTARG}" 71 | ;; 72 | 2) 73 | TLS_CA_KEY="${OPTARG}" 74 | ;; 75 | 3) 76 | TLS_IP_SANS="${OPTARG}" 77 | ;; 78 | 4) 79 | TLS_DNS_SANS="${OPTARG}" 80 | ;; 81 | c) 82 | TLS_COUNTRY_NAME="${OPTARG}" 83 | ;; 84 | s) 85 | TLS_STATE_OR_PROVINCE_NAME="${OPTARG}" 86 | ;; 87 | l) 88 | TLS_LOCALITY_NAME="${OPTARG}" 89 | ;; 90 | o) 91 | TLS_ORG_NAME="${OPTARG}" 92 | ;; 93 | u) 94 | TLS_OU_NAME="${OPTARG}" 95 | ;; 96 | b) 97 | TLS_DEFAULT_BITS="${OPTARG}" 98 | ;; 99 | d) 100 | TLS_DEFAULT_DAYS="${OPTARG}" 101 | ;; 102 | k) 103 | TLS_KEY_USAGE="${OPTARG}" 104 | ;; 105 | e) 106 | TLS_EXT_KEY_USAGE="${OPTARG}" 107 | ;; 108 | f) 109 | TLS_FILE_PREFIX="${OPTARG}" 110 | ;; 111 | n) 112 | NO_OVERWRITE=1 113 | ;; 114 | v) 115 | VERBOSE=1 116 | set -x 117 | ;; 118 | \?) 119 | error "invalid option: -${OPTARG} ${USAGE}" && exit 1 120 | ;; 121 | :) 122 | error "option -${OPTARG} requires an argument" && exit 1 123 | ;; 124 | esac 125 | done 126 | shift $((OPTIND-1)) 127 | 128 | # Verbose mode 129 | VERBOSE="${VERBOSE-}" 130 | NO_OVERWRITE="${NO_OVERWRITE-}" 131 | 132 | # The strength of the generated certificate 133 | TLS_DEFAULT_BITS=${TLS_DEFAULT_BITS:-2048} 134 | 135 | # The number of days until the certificate expires. The default 136 | # value is 10 years. 137 | TLS_DEFAULT_DAYS=${TLS_DEFAULT_DAYS:-3650} 138 | 139 | # The components that make up the certificate's distinguished name. 140 | TLS_COUNTRY_NAME=${TLS_COUNTRY_NAME:-US} 141 | TLS_STATE_OR_PROVINCE_NAME=${TLS_STATE_OR_PROVINCE_NAME:-California} 142 | TLS_LOCALITY_NAME=${TLS_LOCALITY_NAME:-Palo Alto} 143 | TLS_ORG_NAME=${TLS_ORG_NAME:-VMware} 144 | TLS_OU_NAME=${TLS_OU_NAME:-CAPV} 145 | 146 | # The certificate's key usage. 147 | TLS_KEY_USAGE=${TLS_KEY_USAGE:-digitalSignature, keyEncipherment} 148 | 149 | # The certificate's extended key usage string. 150 | TLS_EXT_KEY_USAGE=${TLS_EXT_KEY_USAGE:-clientAuth, serverAuth} 151 | 152 | # The file name prefix for the public and private keys. 153 | TLS_FILE_PREFIX=${TLS_FILE_PREFIX:-server} 154 | 155 | # The certificate's common name. 156 | [ "${#}" -ge "1" ] || fatal "COMMON_NAME is required ${USAGE}" 157 | TLS_COMMON_NAME="${1}" 158 | 159 | # The signing CA. 160 | [ -e "${TLS_CA_CRT-}" ] || fatal "the public key of the CA must be specified with -1 ${USAGE}" 161 | [ -e "${TLS_CA_KEY-}" ] || fatal "the private key of the CA must be specified with -2 ${USAGE}" 162 | 163 | # The directory to which to write the public and private keys. 164 | { [ "${#}" -gt "1" ] && OUT_DIR="${2}"; } || OUT_DIR="$(pwd)" 165 | mkdir -p "${OUT_DIR}" 166 | 167 | if { [ -f "${OUT_DIR}/${TLS_FILE_PREFIX}.crt" ] || [ -f "${OUT_DIR}/${TLS_FILE_PREFIX}.key" ]; } && [ $NO_OVERWRITE ]; then 168 | echo "Existing ${TLS_FILE_PREFIX}.crt or ${TLS_FILE_PREFIX}.key "\ 169 | "exists in ${OUT_DIR}. Skipping cert generation." 170 | exit 0 171 | fi 172 | 173 | # Make a temporary directory and switch to it. 174 | OLD_DIR="$(pwd)" 175 | pushd "$(mktemp -d)" 176 | TLS_TMP_DIR="$(pwd)" 177 | 178 | # Returns the absolute path of the provided argument. 179 | abspath() { 180 | { [ "$(printf %.1s "${1}")" = "/" ] && echo "${1}"; } || echo "${OLD_DIR}/${1}" 181 | } 182 | 183 | # Write the SSL config file to disk. 184 | cat >ssl.conf <> ssl.conf <>ssl.conf && i="$(( i+1 ))" 225 | done 226 | 227 | # Append any IP SANs to the SSL config file. 228 | i=1 && for j in $(echo "${TLS_IP_SANS-}" | tr ',' ' '); do 229 | echo "IP.${i} = ${j}" >>ssl.conf && i="$(( i+1 ))" 230 | done 231 | fi 232 | 233 | [ -z "${VERBOSE}" ] || cat ssl.conf 234 | 235 | # Generate a private key file. 236 | openssl genrsa -out "${TLS_FILE_PREFIX}.key" "${TLS_DEFAULT_BITS}" 237 | 238 | # Generate a certificate CSR. 239 | openssl req -config ssl.conf \ 240 | -new \ 241 | -key "${TLS_FILE_PREFIX}.key" \ 242 | -days "${TLS_DEFAULT_DAYS}" \ 243 | -out "${TLS_FILE_PREFIX}.csr" 244 | 245 | # Sign the CSR with the provided CA. 246 | openssl x509 -extfile ssl.conf \ 247 | -extensions ext \ 248 | -days "${TLS_DEFAULT_DAYS}" \ 249 | -req \ 250 | -in "${TLS_FILE_PREFIX}.csr" \ 251 | -CA "$(abspath "${TLS_CA_CRT}")" \ 252 | -CAkey "$(abspath "${TLS_CA_KEY}")" \ 253 | -CAcreateserial \ 254 | -out "${TLS_FILE_PREFIX}.crt" 255 | 256 | 257 | if [[ ! -f "${TLS_FILE_PREFIX}.crt" || ! -f "${TLS_FILE_PREFIX}.key" ]]; then 258 | echo "failed to output certificate and key" 259 | exit 1 260 | fi 261 | 262 | # Copy the files to OUT_DIR 263 | cp -f "${TLS_FILE_PREFIX}.crt" "${TLS_FILE_PREFIX}.key" "$(abspath "${OUT_DIR}")" 264 | 265 | # Print the certificate's information if requested. 266 | [ -z "${VERBOSE}" ] || { echo && openssl x509 -noout -text <"${TLS_FILE_PREFIX}.crt"; } 267 | 268 | # Return to the original directory and cleanup the temporary TLS dir. 269 | popd 270 | rm -fr "${TLS_TMP_DIR}" 271 | 272 | -------------------------------------------------------------------------------- /ansible/roles/pki/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - name: Create new-ca.sh file 16 | copy: 17 | src: files/usr/local/bin/new-ca.sh 18 | dest: /usr/local/bin/new-ca.sh 19 | owner: root 20 | group: root 21 | mode: "0755" 22 | 23 | - name: Create new-cert.sh file 24 | copy: 25 | src: files/usr/local/bin/new-cert.sh 26 | dest: /usr/local/bin/new-cert.sh 27 | owner: root 28 | group: root 29 | mode: "0755" 30 | -------------------------------------------------------------------------------- /ansible/roles/sysprep/files/etc/hosts: -------------------------------------------------------------------------------- 1 | ::1 localhost ip6-localhost ip6-loopback 2 | 127.0.0.1 localhost localhost.local -------------------------------------------------------------------------------- /ansible/roles/sysprep/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - name: Define file modes 16 | set_fact: 17 | last_log_mode: "0644" 18 | machine_id_mode: "0444" 19 | 20 | - name: Set hostname 21 | command: hostnamectl set-hostname localhost.local 22 | 23 | - name: Reset hosts file 24 | copy: 25 | src: files/etc/hosts 26 | dest: /etc/hosts 27 | owner: root 28 | group: root 29 | mode: "0644" 30 | 31 | - name: Truncate hostname file 32 | file: 33 | state: "{{ item.state }}" 34 | path: "{{ item.path }}" 35 | owner: root 36 | group: root 37 | mode: "{{ item.mode }}" 38 | loop: 39 | - { path: /etc/hostname, state: absent, mode: "0644" } 40 | - { path: /etc/hostname, state: touch, mode: "0644" } 41 | 42 | # Only the virtual services are listening on non-management interfaces, so 43 | # there is no need for a stateful firewall. Not to mention that iptables 44 | # reliance on conntrack contributes to performance degredation. 45 | - name: Disable IP tables 46 | service: 47 | name: iptables 48 | enabled: no 49 | 50 | - name: Remove the kickstart log 51 | file: 52 | state: absent 53 | path: /root/anaconda-ks.cfg 54 | 55 | - name: Remove tdnf package caches 56 | command: /usr/bin/tdnf -y clean all 57 | 58 | - name: Truncate machine id 59 | file: 60 | state: "{{ item.state }}" 61 | path: "{{ item.path }}" 62 | owner: root 63 | group: root 64 | mode: "{{ item.mode }}" 65 | loop: 66 | - { path: /etc/machine-id, state: absent, mode: "{{ machine_id_mode }}" } 67 | - { path: /etc/machine-id, state: touch, mode: "{{ machine_id_mode }}" } 68 | 69 | - name: Truncate audit logs 70 | file: 71 | state: "{{ item.state }}" 72 | path: "{{ item.path }}" 73 | owner: root 74 | group: utmp 75 | mode: "{{ item.mode }}" 76 | loop: 77 | - { path: /var/log/wtmp, state: absent, mode: "0664" } 78 | - { path: /var/log/lastlog, state: absent, mode: "{{ last_log_mode }}" } 79 | - { path: /var/log/wtmp, state: touch, mode: "0664" } 80 | - { path: /var/log/lastlog, state: touch, mode: "{{ last_log_mode }}" } 81 | 82 | - name: Remove cloud-init lib dir and logs 83 | file: 84 | state: absent 85 | path: "{{ item }}" 86 | loop: 87 | - /var/lib/cloud 88 | - /var/log/cloud-init.log 89 | - /var/log/cloud-init-output.log 90 | - /var/run/cloud-init 91 | 92 | # A shallow search in /tmp and /var/tmp is used to declare which files or 93 | # directories will be removed as part of resetting temp space. The reason 94 | # a state absent->directory task isn't used is because Ansible's own data 95 | # directory on the remote host(s) is /tmp/.ansible. Thus, by removing /tmp, 96 | # Ansible can no longer access the remote host. 97 | - name: Find temp files 98 | find: 99 | depth: 1 100 | file_type: any 101 | paths: 102 | - /tmp 103 | - /var/tmp 104 | pattern: '*' 105 | register: temp_files 106 | 107 | - name: Reset temp space 108 | file: 109 | state: absent 110 | path: "{{ item.path }}" 111 | loop: "{{ temp_files.files }}" 112 | 113 | - name: Find SSH host keys 114 | find: 115 | paths: 116 | - /etc/ssh 117 | pattern: 'ssh_host_*' 118 | register: ssh_host_keys 119 | 120 | - name: Remove SSH host keys 121 | file: 122 | state: absent 123 | path: "{{ item.path }}" 124 | loop: "{{ ssh_host_keys.files }}" 125 | 126 | - name: Remove SSH authorized users 127 | file: 128 | state: absent 129 | path: "{{ item.path }}" 130 | loop: 131 | - { path: /root/.ssh/authorized_keys } 132 | - { path: "/home/{{ ansible_env.SUDO_USER }}/.ssh/authorized_keys" } 133 | 134 | - name: Remove and recreate /var/log 135 | file: 136 | state: "{{ item.state }}" 137 | path: "{{ item.path }}" 138 | owner: root 139 | group: root 140 | mode: 0755 141 | loop: 142 | - { path: /var/log, state: absent } 143 | - { path: /var/log, state: directory } 144 | 145 | - name: Truncate shell history 146 | file: 147 | state: absent 148 | path: "{{ item.path }}" 149 | loop: 150 | - { path: /root/.bash_history } 151 | - { path: "/home/{{ ansible_env.SUDO_USER }}/.bash_history" } 152 | -------------------------------------------------------------------------------- /ansible/roles/vmware/files/etc/systemd/system/anyip-routes.service: -------------------------------------------------------------------------------- 1 | # Creates Any IP routes for each network CIDR defined in 2 | # in /etc/vmware/anyip-routes.cfg. 3 | 4 | [Unit] 5 | Description=anyip-routes.service 6 | 7 | # This service *must* run after cloud-init has completed and networking is 8 | # online (which it is by the time cloud-final has executed), but before HAProxy 9 | # has started. 10 | After=cloud-final.service 11 | Before=haproxy.service 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | 16 | [Service] 17 | Type=simple 18 | TimeoutSec=0 19 | WorkingDirectory=/var/lib/vmware 20 | 21 | # Create the log directory. 22 | ExecStartPre=/bin/mkdir -p /var/log/vmware 23 | 24 | # Run the up command once. 25 | ExecStartPre=/var/lib/vmware/anyiproutectl.sh up 26 | 27 | # Watch the config file for changes. 28 | ExecStart=/var/lib/vmware/anyiproutectl.sh watch 29 | 30 | # Remove the Any IP routes. 31 | ExecStopPost=/var/lib/vmware/anyiproutectl.sh down 32 | -------------------------------------------------------------------------------- /ansible/roles/vmware/files/etc/systemd/system/route-tables.service: -------------------------------------------------------------------------------- 1 | # Creates default gateways on new route tables for each network configured 2 | # in /etc/vmware/route-tables.cfg. 3 | 4 | [Unit] 5 | Description=route-tables.service 6 | 7 | # This service *must* run after cloud-init has completed and networking is 8 | # online (which it is by the time cloud-final has executed), but before HAProxy 9 | # has started. 10 | After=cloud-final.service net-postconfig.service 11 | Before=anyip-routes.service haproxy.service 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | 16 | [Service] 17 | Type=simple 18 | TimeoutSec=0 19 | WorkingDirectory=/var/lib/vmware 20 | 21 | # Create the log directory. 22 | ExecStartPre=/bin/mkdir -p /var/log/vmware 23 | 24 | # Run the up command once. 25 | ExecStartPre=/var/lib/vmware/routetablectl.sh up 26 | 27 | # Watch the config file for changes. 28 | ExecStart=/var/lib/vmware/routetablectl.sh watch 29 | 30 | # Remove the Any IP routes. 31 | ExecStopPost=/var/lib/vmware/routetablectl.sh down 32 | 33 | Restart=always 34 | -------------------------------------------------------------------------------- /ansible/roles/vmware/files/etc/vmware/anyip-routes.cfg: -------------------------------------------------------------------------------- 1 | # 2 | # Configuration file that contains a line-delimited list of CIDR values 3 | # that define the network ranges used to bind the load balancer's frontends 4 | # to virtual IP addresses. 5 | # 6 | # * Lines beginning with a comment character, #, are ignored 7 | # * This file is used by the anyip-routes service 8 | # 9 | -------------------------------------------------------------------------------- /ansible/roles/vmware/files/etc/vmware/route-tables.cfg: -------------------------------------------------------------------------------- 1 | # 2 | # Configuration file that contains a line-delimited list of values used to 3 | # create route tables on which default gateways are defined. This enables 4 | # the use of IP policy to ensure traffic to interfaces that do not use the 5 | # default gateway is routed correctly. 6 | # 7 | # * Lines beginning with a comment character, #, are ignored 8 | # * This file is used by the route-tables service 9 | # 10 | # Each line that contains a value must adhere to the following, 11 | # comma-separated format: 12 | # 13 | # ,,,, 14 | # 15 | # The fields in the above format are as follows: 16 | # 17 | # * TableID The route table ID. This value should be an integer between 18 | # 2-250. Please see /etc/iproute2/rt_tables for a list of the 19 | # route table IDs currently in use, including reserved IDs. 20 | # 21 | # * TableName The name of the route table. This value will be appended 22 | # to a constant prefix, used to identify route tables managed 23 | # by the route-tables service. 24 | # 25 | # * MACAddress The MAC address of the interface connected to the network 26 | # specified by NetworkCIDR 27 | # 28 | # * NetworkCIDR The CIDR of the network to which the interface by MACAddress 29 | # is connected 30 | # 31 | # * Gateway4 The IPv4 address of the gateway used by the network specified 32 | # by NetworkCIDR 33 | # 34 | # For example, the following lines are valid values: 35 | # 36 | # 2,frontend,00:00:00:ab:cd:ef,192.168.1.0/24,192.168.1.1 37 | # 3,workload,00:00:00:12:34:56,192.168.2.0/24,192.168.2.1 38 | # 39 | -------------------------------------------------------------------------------- /ansible/roles/vmware/files/usr/lib/systemd/network/10-frontend.link: -------------------------------------------------------------------------------- 1 | [Match] 2 | Path=pci-0000:13:00.0 3 | 4 | [Link] 5 | Name=frontend -------------------------------------------------------------------------------- /ansible/roles/vmware/files/usr/lib/systemd/network/10-management.link: -------------------------------------------------------------------------------- 1 | [Match] 2 | Path=pci-0000:03:00.0 3 | 4 | [Link] 5 | Name=management -------------------------------------------------------------------------------- /ansible/roles/vmware/files/usr/lib/systemd/network/10-workload.link: -------------------------------------------------------------------------------- 1 | [Match] 2 | Path=pci-0000:0b:00.0 3 | 4 | [Link] 5 | Name=workload -------------------------------------------------------------------------------- /ansible/roles/vmware/files/usr/local/bin/haproxy-support: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # Collects support bundles for HAProxy and records networking information for debugging. 4 | # 5 | 6 | function save_haproxy_info { 7 | # Collect HAProxy logs 8 | journalctl -xu haproxy > /var/log/haproxy.log 9 | 10 | # Collect the HAProxy version. 11 | { { rpm -qa haproxy || true; } && 12 | { command -v haproxy >/dev/null 2>&1 && haproxy -vv || /usr/sbin/haproxy -vv; }; \ 13 | } > /etc/haproxy/haproxy-version 14 | 15 | # Collect DPAPI version 16 | { command -v dataplaneapi >/dev/null 2>&1 && dataplaneapi --version || /usr/local/bin/dataplaneapi --version; } \ 17 | > /etc/haproxy/dataplaneapi-version 18 | } 19 | 20 | function save_network_info { 21 | { echo '--- IP TABLES ---' && \ 22 | iptables -S && \ 23 | echo '--- IP ADDRS ---' && \ 24 | ip a && \ 25 | echo '--- IP ROUTES ---' && \ 26 | ip r && \ 27 | echo '--- IP ROUTE TABLE LOCAL ---' && \ 28 | ip r show table local && \ 29 | echo '--- IP ROUTE TABLES ---' && \ 30 | grep 'rtctl_' /etc/iproute2/rt_tables | awk '{print $2}' | while IFS= read -r table_name 31 | do 32 | echo "${table_name}" && ip route show table "${table_name}" 33 | done && \ 34 | echo '--- OPEN PORTS ---' && lsof -noP | grep LISTEN; } > /var/log/network-info.log 35 | } 36 | 37 | function save_route_table_info { 38 | journalctl -xu route-tables > /var/log/routes.log 39 | } 40 | 41 | function save_anyip_info { 42 | journalctl -xu anyip > /var/log/anyip.log 43 | } 44 | 45 | 46 | if [ "$(id -u)" != 0 ]; then 47 | echo "This script must be executed as root" 48 | exit 1 49 | fi 50 | 51 | echo "Starting support bundle collection" 52 | 53 | save_haproxy_info 54 | save_network_info 55 | save_anyip_info 56 | save_route_table_info 57 | 58 | support_output="${HOME}/haproxy-support.tgz" 59 | 60 | tar -C / -czf "${support_output}" \ 61 | /etc/haproxy \ 62 | /var/log/ \ 63 | /var/log/vmware \ 64 | /etc/vmware/ > /dev/null 2>&1 65 | 66 | echo "Support bundle collected at ${support_output}" 67 | -------------------------------------------------------------------------------- /ansible/roles/vmware/files/var/lib/vmware/anyiproutectl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ################################################################################ 18 | # usage: anyiproutectl [FLAGS] 19 | # This program is used to control Any IP routes on this host. 20 | ################################################################################ 21 | 22 | set -o errexit 23 | set -o nounset 24 | set -o pipefail 25 | 26 | ################################################################################ 27 | ## usage 28 | ################################################################################ 29 | 30 | USAGE="usage: ${0} [FLAGS] CMD 31 | Controls Any IP routes on this host 32 | 33 | CMD 34 | up enables the routes 35 | down disables the routes 36 | watch runs in the foreground while watching the config file for changes 37 | 38 | FLAGS 39 | -h show this help and exit 40 | 41 | Globals 42 | CONFIG_FILE 43 | path to this program's config file. default: /etc/vmware/anyip-routes.cfg 44 | " 45 | 46 | ################################################################################ 47 | ## args 48 | ################################################################################ 49 | 50 | # The path to the config file used by this program. 51 | CONFIG_FILE="${CONFIG_FILE:-/etc/vmware/anyip-routes.cfg}" 52 | 53 | 54 | ################################################################################ 55 | ## funcs 56 | ################################################################################ 57 | 58 | # error stores exit code, writes arguments to STDERR, and returns stored exit code 59 | # fatal is like error except it will exit program if exit code >0 60 | function error() { 61 | local exit_code="${?}" 62 | echo "${@}" 1>&2 63 | return "${exit_code}" 64 | } 65 | function fatal() { 66 | error "${@}" 67 | exit 1 68 | } 69 | function echo2() { 70 | echo "${@}" 1>&2 71 | } 72 | 73 | # Disable any custom routes. 74 | function down_routes() { 75 | while IFS= read -r line; do 76 | # Skip empty and commented lines. 77 | if [ -z "${line}" ] || [ "${line::1}" == "#" ]; then 78 | continue 79 | fi 80 | 81 | ip="${line}" 82 | # When doing the grep, remove a possible /32 since the "ip route" 83 | # command will normalize /32 IP addresses by removing the /32. 84 | if ! ip route show table local | grep -qF "local ${ip%/32} dev lo scope host"; then 85 | echo2 "route already removed for ${ip}" 86 | else 87 | echo2 "removing route for ${ip}" 88 | ip route del table local "${ip}" dev lo 89 | fi 90 | done <"${CONFIG_FILE}" 91 | } 92 | 93 | # Enables the custom routes. 94 | function up_routes() { 95 | while IFS= read -r line; do 96 | # Skip empty and commented lines. 97 | if [ -z "${line}" ] || [ "${line::1}" == "#" ]; then 98 | continue 99 | fi 100 | 101 | ip="${line}" 102 | # When doing the grep, remove a possible /32 since the "ip route" 103 | # command will normalize /32 IP addresses by removing the /32. 104 | if ip route show table local | grep -qF "local ${ip%/32} dev lo scope host"; then 105 | echo2 "route already exists for ${ip}" 106 | else 107 | echo2 "adding route for ${ip}" 108 | ip route add local "${ip}" dev lo 109 | fi 110 | done <"${CONFIG_FILE}" 111 | } 112 | 113 | # Watches the config file and acts on any detected changes. 114 | function watch_routes() { 115 | echo2 "watching configuration file for changes" 116 | inotifywait -m -e modify "${CONFIG_FILE}" | while read -r; do up_routes; done 117 | } 118 | 119 | ################################################################################ 120 | ## main 121 | ################################################################################ 122 | 123 | # Parse the command-line arguments. 124 | while getopts ":h" opt; do 125 | case ${opt} in 126 | h) 127 | fatal "${USAGE}" 128 | ;; 129 | \?) 130 | fatal "invalid option: -${OPTARG} ${USAGE}" 131 | ;; 132 | :) 133 | fatal "option -${OPTARG} requires an argument" 134 | ;; 135 | esac 136 | done 137 | shift $((OPTIND - 1)) 138 | 139 | CMD="${1-}" 140 | case "${CMD}" in 141 | up) 142 | up_routes 143 | ;; 144 | down) 145 | down_routes 146 | ;; 147 | watch) 148 | watch_routes 149 | ;; 150 | *) 151 | error "${USAGE}" 152 | ;; 153 | esac 154 | -------------------------------------------------------------------------------- /ansible/roles/vmware/files/var/lib/vmware/routetablectl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ################################################################################ 18 | # usage: routetablectl [FLAGS] 19 | # This program is used to control custom route tables on this host. 20 | ################################################################################ 21 | 22 | set -o errexit 23 | set -o nounset 24 | set -o pipefail 25 | 26 | ################################################################################ 27 | ## usage 28 | ################################################################################ 29 | 30 | USAGE="usage: ${0} [FLAGS] CMD 31 | Controls custom route tables on this host 32 | 33 | CMD 34 | up enables the routes 35 | down disables the routes 36 | watch runs in the foreground while watching the config file for changes 37 | 38 | FLAGS 39 | -h show this help and exit 40 | 41 | Globals 42 | CONFIG_FILE 43 | path to this program's config file. default: /etc/vmware/route-tables.cfg 44 | RT_TABLES_FILE 45 | path to the rt_tables file. default: /etc/iproute2/rt_tables 46 | " 47 | 48 | ################################################################################ 49 | ## const 50 | ################################################################################ 51 | 52 | # The prefix for the names of route tables created with this program. 53 | RT_TABLE_NAME_PREFIX="rtctl_" 54 | 55 | ################################################################################ 56 | ## args 57 | ################################################################################ 58 | 59 | # The path to the config file used by this program. 60 | CONFIG_FILE="${CONFIG_FILE:-/etc/vmware/route-tables.cfg}" 61 | 62 | # The path to the file with the route table identifiers. 63 | RT_TABLES_FILE="${RT_TABLES_FILE:-/etc/iproute2/rt_tables}" 64 | 65 | # Path to the file for additional workload networks. 66 | WORKLOAD_NETWORKS_FILE="${WORKLOAD_NETWORKS_FILE:-/etc/vmware/workload-networks.cfg}" 67 | 68 | # Name of the route table for workload networks. 69 | WORKLOAD_RT="${WORKLOAD_RT:-${RT_TABLE_NAME_PREFIX}workload}" 70 | 71 | ################################################################################ 72 | ## funcs 73 | ################################################################################ 74 | 75 | # error stores exit code, writes arguments to STDERR, and returns stored exit code 76 | # fatal is like error except it will exit program if exit code >0 77 | function error() { 78 | local exit_code="${?}" 79 | echo "${@}" 1>&2 80 | return "${exit_code}" 81 | } 82 | 83 | function fatal() { 84 | error "${@}" 85 | exit 1 86 | } 87 | 88 | function echo2() { 89 | echo "${@}" 2>&1 90 | } 91 | 92 | function call() { 93 | echo2 "${@}" 94 | eval "${@}" 95 | } 96 | 97 | # Returns the name of the device that has the provided MAC address. 98 | function dev_from_mac() { 99 | ip -o link | awk -F': ' -vIGNORECASE=1 '/'"${1}"'/ { print $2 }' | awk -F'@' '{print $1}' 100 | } 101 | 102 | # Disable any custom route tables. 103 | function down_routes() { 104 | printf '' >"${RT_TABLES_FILE}.tmp" 105 | while IFS= read -r line; do 106 | # Copy the line only if it is not a rtctl_ table. 107 | if [[ "${line}" != *"${RT_TABLE_NAME_PREFIX}"* ]]; then 108 | echo "${line}" >>"${RT_TABLES_FILE}.tmp" 109 | # Otherwise, get the name of the table and disable it. 110 | else 111 | # Get the name of the route table. 112 | route_table_name="$(echo "${line}" | awk -F' ' '{print $2}')" 113 | echo2 "discovered route table ${route_table_name}" 114 | 115 | # Remove the rules for this route table. 116 | while ip call "rule del from 0/0 to 0/0 table ${route_table_name} 2>/dev/null"; do true; done 117 | 118 | # Remove any existing routes from our route tables. 119 | while IFS= read -r route; do 120 | call "ip route del table ${route_table_name} ${route}" 121 | done < <(ip route show table "${route_table_name}") 122 | fi 123 | done < "${RT_TABLES_FILE}" 124 | mv -f "${RT_TABLES_FILE}.tmp" "${RT_TABLES_FILE}" 125 | } 126 | 127 | # Adds route tables to the route tables file. Prevents duplicates from being added. 128 | function add_route_tables() { 129 | tables=$(grep -E '^\w' "${CONFIG_FILE}" || echo "" | cut -d, -f1,2 | uniq) 130 | for table in ${tables}; do 131 | IFS=, read -ra line <<< "${table}" 132 | route_table_id="${line[0]}" 133 | route_table_name="${RT_TABLE_NAME_PREFIX}${line[1]}" 134 | echo2 "create new route table id=${route_table_id} name=${route_table_name}" 135 | printf '%d\t%s\n' "${route_table_id}" "${route_table_name}" >>"${RT_TABLES_FILE}" 136 | done 137 | } 138 | 139 | # Adds lookup rules for workload routes. The net result is that additional workloads can be reached 140 | # via the default gateway of the workload network route table. 141 | function add_workload_network_rules() { 142 | if [ ! -f "${WORKLOAD_NETWORKS_FILE}" ]; then 143 | echo2 "no additional workload networks detected" 144 | return 145 | fi 146 | 147 | while IFS= read -r cfg_cidr; do 148 | # Skip empty and commented lines. 149 | if [ -z "${cfg_cidr}" ] || [ "${cfg_cidr::1}" == "#" ]; then 150 | continue 151 | fi 152 | call "ip rule add to ${cfg_cidr} lookup ${WORKLOAD_RT}" 153 | done < "${WORKLOAD_NETWORKS_FILE}" 154 | } 155 | 156 | # Enables the custom route tables. 157 | function up_routes() { 158 | # Enabling the custom route tables first requires removing any custom route 159 | # tables. 160 | down_routes 161 | 162 | if [ ! -f "${CONFIG_FILE}" ]; then 163 | echo2 "missing config file ${CONFIG_FILE}" 164 | return 0 165 | fi 166 | 167 | add_route_tables 168 | add_workload_network_rules 169 | 170 | while IFS= read -r line; do 171 | # Skip empty and commented lines. 172 | if [ -z "${line}" ] || [ "${line::1}" == "#" ]; then 173 | continue 174 | fi 175 | 176 | # Split the line into its parts. 177 | IFS=, read -ra line_parts <<<"${line}" 178 | 179 | # Store route table configuration's parts. 180 | cfg_table_name="${line_parts[1]}" 181 | cfg_mac_addr="${line_parts[2]}" 182 | cfg_cidr="${line_parts[3]}" 183 | cfg_gateway="" 184 | 185 | if [[ ${#line_parts[@]} == 5 ]]; then 186 | cfg_gateway="${line_parts[4]}" 187 | fi 188 | 189 | cfg_dev="$(dev_from_mac "${cfg_mac_addr}")" 190 | route_table_name="${RT_TABLE_NAME_PREFIX}${cfg_table_name}" 191 | 192 | if [[ "${cfg_gateway}" == "" ]]; then 193 | cfg_destination=$(python3 -c "import sys; import ipaddress; print(ipaddress.ip_network(sys.argv[1], strict=False))" "${cfg_cidr}") 194 | host="$(echo "${cfg_cidr}" | cut -d/ -f 1)" 195 | call "ip route add table ${route_table_name} ${cfg_destination} dev ${cfg_dev} proto kernel scope link src ${host}" 196 | else 197 | # Create default route for new route table. 198 | call "ip route add table ${route_table_name} default via ${cfg_gateway} dev ${cfg_dev} proto static" 199 | # Create IP rule for new route table. 200 | call "ip rule add from ${cfg_cidr} lookup ${route_table_name}" 201 | fi 202 | 203 | done <"${CONFIG_FILE}" 204 | } 205 | 206 | # Watches the config file and acts on any detected changes. 207 | function watch_routes() { 208 | echo2 "watching configuration file for changes" 209 | inotifywait -m -e modify "${CONFIG_FILE}" | while read -r; do up_routes; done 210 | } 211 | 212 | ################################################################################ 213 | ## main 214 | ################################################################################ 215 | 216 | # Parse the command-line arguments. 217 | while getopts ":h" opt; do 218 | case ${opt} in 219 | h) 220 | fatal "${USAGE}" 221 | ;; 222 | \?) 223 | fatal "invalid option: -${OPTARG} ${USAGE}" 224 | ;; 225 | :) 226 | fatal "option -${OPTARG} requires an argument" 227 | ;; 228 | esac 229 | done 230 | shift $((OPTIND - 1)) 231 | 232 | CMD="${1-}" 233 | case "${CMD}" in 234 | up) 235 | up_routes 236 | ;; 237 | down) 238 | down_routes 239 | ;; 240 | watch) 241 | watch_routes 242 | ;; 243 | *) 244 | error "${USAGE}" 245 | ;; 246 | esac 247 | -------------------------------------------------------------------------------- /ansible/roles/vmware/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Copyright 2020 HAProxy Technologies 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http:#www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | --- 15 | - name: Create /etc/vmware 16 | file: 17 | path: /etc/vmware 18 | owner: root 19 | group: root 20 | mode: "0755" 21 | state: directory 22 | 23 | - name: Create /var/lib/vmware 24 | file: 25 | path: /var/lib/vmware 26 | owner: root 27 | group: root 28 | mode: "0755" 29 | state: directory 30 | 31 | - name: Copy management network link configuration 32 | copy: 33 | src: files/usr/lib/systemd/network/10-management.link 34 | dest: /usr/lib/systemd/network/10-management.link 35 | owner: systemd-network 36 | group: systemd-network 37 | mode: "0644" 38 | 39 | - name: Copy workload network link configuration 40 | copy: 41 | src: files/usr/lib/systemd/network/10-workload.link 42 | dest: /usr/lib/systemd/network/10-workload.link 43 | owner: systemd-network 44 | group: systemd-network 45 | mode: "0644" 46 | 47 | - name: Copy frontend network link configuration 48 | copy: 49 | src: files/usr/lib/systemd/network/10-frontend.link 50 | dest: /usr/lib/systemd/network/10-frontend.link 51 | owner: systemd-network 52 | group: systemd-network 53 | mode: "0644" 54 | 55 | - name: Copy support bundle collection script 56 | copy: 57 | src: files/usr/local/bin/haproxy-support 58 | dest: /usr/local/bin/haproxy-support 59 | owner: root 60 | group: root 61 | mode: "0544" 62 | 63 | - name: Generate new initrd image containing our link files 64 | shell: 65 | cmd: /usr/bin/dracut --add network --rebuild /boot/initrd.img-$(uname -r) 66 | 67 | - name: Create anyip-routes service configuration file 68 | copy: 69 | src: files/etc/vmware/anyip-routes.cfg 70 | dest: /etc/vmware/anyip-routes.cfg 71 | owner: root 72 | group: root 73 | mode: "0644" 74 | 75 | - name: Create anyiproutectl script 76 | copy: 77 | src: files/var/lib/vmware/anyiproutectl.sh 78 | dest: /var/lib/vmware/anyiproutectl.sh 79 | owner: root 80 | group: root 81 | mode: "0744" 82 | 83 | - name: Create anyip-routes service 84 | copy: 85 | src: files/etc/systemd/system/anyip-routes.service 86 | dest: /etc/systemd/system/anyip-routes.service 87 | owner: root 88 | group: root 89 | mode: "0644" 90 | 91 | - name: Enable anyip-routes service 92 | systemd: 93 | name: anyip-routes 94 | enabled: yes 95 | daemon_reload: yes 96 | state: stopped 97 | 98 | - name: Create route-table service configuration file 99 | copy: 100 | src: files/etc/vmware/route-tables.cfg 101 | dest: /etc/vmware/route-tables.cfg 102 | owner: root 103 | group: root 104 | mode: "0644" 105 | 106 | - name: Create routetablectl script 107 | copy: 108 | src: files/var/lib/vmware/routetablectl.sh 109 | dest: /var/lib/vmware/routetablectl.sh 110 | owner: root 111 | group: root 112 | mode: "0744" 113 | 114 | - name: Create route-tables service 115 | copy: 116 | src: files/etc/systemd/system/route-tables.service 117 | dest: /etc/systemd/system/route-tables.service 118 | owner: root 119 | group: root 120 | mode: "0644" 121 | 122 | - name: Enable route-tables service 123 | systemd: 124 | name: route-tables 125 | enabled: yes 126 | daemon_reload: yes 127 | state: stopped 128 | -------------------------------------------------------------------------------- /docs/how-to-build-ova.md: -------------------------------------------------------------------------------- 1 | # Building the Appliance 2 | 3 | This page describes how to build the appliance OVA. 4 | 5 | ## Requirements 6 | 7 | Building the OVA requires: 8 | 9 | * VMware Fusion or Workstation 10 | * Packer 1.4.1 11 | * Ansible 2.8+ 12 | 13 | ## Build the OVA 14 | 15 | To build the OVA please run the following the command: 16 | 17 | ```shell 18 | export PATH=/Applications/VMware\ Fusion.app/Contents/Library/:$PATH ## add `vmware-vdiskmanager` to path 19 | make build-ova 20 | ``` 21 | 22 | The above command build the OVA with Packer in _headless_ mode, meaning that VMware Fusion/Workstation will not display the virtual machine (VM) as it is being built. If the build process fails or times out, please use the following command to build the OVA in the foreground: 23 | 24 | ```shell 25 | export PATH=/Applications/VMware\ Fusion.app/Contents/Library/:$PATH ## add `vmware-vdiskmanager` to path 26 | FOREGROUND=1 make build-ova 27 | ``` 28 | 29 | Once the OVA is built, it should be located at `./output/ova/haproxy.ova` and be around `500MiB`. 30 | -------------------------------------------------------------------------------- /docs/how-to-container.md: -------------------------------------------------------------------------------- 1 | # Testing HAProxy & DataPlane API with a Container 2 | 3 | Illustrating the utility of haproxy as a load-balancer is best accomplished using a container: 4 | 5 | 1. Build the image: 6 | 7 | ```shell 8 | make build-image 9 | ``` 10 | 11 | 2. Start the haproxy image in detached mode and map its secure, dataplane API port (`5556`) and the port used by the load balancer (`8085`) to the local host: 12 | 13 | ```shell 14 | docker run -it --name haproxy -d --rm -p 5556:5556 -p 8085:8085 haproxy 15 | ``` 16 | 17 | 3. Create a [frontend configuration](https://www.haproxy.com/documentation/dataplaneapi/latest/#tag/Frontend): 18 | 19 | ```shell 20 | $ curl -X POST \ 21 | --cacert example/ca.crt \ 22 | --cert example/client.crt --key example/client.key \ 23 | --user client:cert \ 24 | -H "Content-Type: application/json" \ 25 | -d '{"name": "lb-frontend", "mode": "tcp"}' \ 26 | "https://localhost:5556/v2/services/haproxy/configuration/frontends?version=1" 27 | {"mode":"tcp","name":"lb-frontend"} 28 | ``` 29 | 30 | 4. [Bind](https://www.haproxy.com/documentation/dataplaneapi/latest/#tag/Bind) the frontend configuration to `*:8085`: 31 | 32 | ```shell 33 | $ curl -X POST \ 34 | --cacert example/ca.crt \ 35 | --cert example/client.crt --key example/client.key \ 36 | --user client:cert \ 37 | -H "Content-Type: application/json" \ 38 | -d '{"name": "lb-frontend", "address": "*", "port": 8085}' \ 39 | "https://localhost:5556/v2/services/haproxy/configuration/binds?frontend=lb-frontend&version=2" 40 | {"address":"*","name":"lb-frontend","port":8085} 41 | ``` 42 | 43 | 5. At this point it is possible to curl the load balancer, even if there is no one on the backend answering the query: 44 | 45 | ```shell 46 | $ curl http://localhost:8085 47 |

503 Service Unavailable

48 | No server is available to handle this request. 49 | 50 | ``` 51 | 52 | 6. Create a [backend configuration](https://www.haproxy.com/documentation/dataplaneapi/latest/#tag/Backend) and bind it to the frontend configuration: 53 | 54 | ```shell 55 | $ curl -X POST \ 56 | --cacert example/ca.crt \ 57 | --cert example/client.crt --key example/client.key \ 58 | --user client:cert \ 59 | -H "Content-Type: application/json" \ 60 | -d '{"name": "lb-backend", "mode":"tcp", "balance": {"algorithm":"roundrobin"}, "adv_check": "tcp-check"}' \ 61 | "https://localhost:5556/v2/services/haproxy/configuration/backends?version=3" 62 | {"adv_check":"tcp-check","balance":{"algorithm":"roundrobin","arguments":null},"mode":"tcp","name":"lb-backend"} 63 | ``` 64 | 65 | 7. Update the frontend to use the backend: 66 | 67 | ```shell 68 | $ curl -X PUT \ 69 | --cacert example/ca.crt \ 70 | --cert example/client.crt --key example/client.key \ 71 | --user client:cert \ 72 | -H "Content-Type: application/json" \ 73 | -d '{"name": "lb-frontend", "mode": "tcp", "default_backend": "lb-backend"}' \ 74 | "https://localhost:5556/v2/services/haproxy/configuration/frontends/lb-frontend?version=4" 75 | {"default_backend":"lb-backend","mode":"tcp","name":"lb-frontend"} 76 | ``` 77 | 78 | 8. Run two simple web servers in detached mode named `http1` and `http2`: 79 | 80 | ```shell 81 | docker run --rm -d -p 8086:80 --name "http1" nginxdemos/hello:plain-text && \ 82 | docker run --rm -d -p 8087:80 --name "http2" nginxdemos/hello:plain-text 83 | ``` 84 | 85 | 9. Add the first web [server](https://www.haproxy.com/documentation/dataplaneapi/latest/#tag/Server) to the backend configuration: 86 | 87 | ```shell 88 | $ curl -X POST \ 89 | --cacert example/ca.crt \ 90 | --cert example/client.crt --key example/client.key \ 91 | --user client:cert \ 92 | -H "Content-Type: application/json" \ 93 | -d '{"name": "lb-backend-server-1", "address": "'"$(docker inspect http1 -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}')"'", "port": 80, "check": "enabled", "maxconn": 30, "verify": "none", "weight": 100}' \ 94 | "https://localhost:5556/v2/services/haproxy/configuration/servers?backend=lb-backend&version=5" 95 | {"address":"172.17.0.2","check":"enabled","maxconn":30,"name":"lb-backend-server-1","port":80,"weight":100} 96 | ``` 97 | 98 | 10. With the first web server attached to the load balancer's backend configuration, it should now be possible to query the load balancer and get more than an empty reply: 99 | 100 | ```shell 101 | $ curl http://localhost:8085 102 | Server address: 172.17.0.2:80 103 | Server name: 456dbfd57701 104 | Date: 21/Dec/2019:22:22:22 +0000 105 | URI: / 106 | Request ID: 7bcabcecb553bcee5ed7efb4b8725f96 107 | ``` 108 | 109 | Sure enough, the server address `172.17.0.2` is the same as the reported IP address of `lb-backend-server-1` above! 110 | 111 | 11. Add the second web server to the backend configuration: 112 | 113 | ```shell 114 | $ curl -X POST \ 115 | --cacert example/ca.crt \ 116 | --cert example/client.crt --key example/client.key \ 117 | --user client:cert \ 118 | -H "Content-Type: application/json" \ 119 | -d '{"name": "lb-backend-server-2", "address": "'"$(docker inspect http2 -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}')"'", "port": 80, "check": "enabled", "maxconn": 30, "verify": "none", "weight": 100}' \ 120 | "https://localhost:5556/v2/services/haproxy/configuration/servers?backend=lb-backend&version=6" 121 | {"address":"172.17.0.3","check":"enabled","maxconn":30,"name":"lb-backend-server-2","port":80,"weight":100} 122 | ``` 123 | 124 | 12. Now that both web servers are connected to the load-balancer, use `curl` to query the load-balanced endpoint a few times to validate that both backend servers are being used: 125 | 126 | ```shell 127 | $ for i in {1..4}; do curl http://localhost:8085 && echo; done 128 | Server address: 172.17.0.2:80 129 | Server name: 456dbfd57701 130 | Date: 21/Dec/2019:22:26:51 +0000 131 | URI: / 132 | Request ID: 77918aee58dd1eb7ba068b081d843a7c 133 | 134 | Server address: 172.17.0.3:80 135 | Server name: 877362812ed9 136 | Date: 21/Dec/2019:22:26:51 +0000 137 | URI: / 138 | Request ID: 097ccb892b565193f334fb544239fca6 139 | 140 | Server address: 172.17.0.2:80 141 | Server name: 456dbfd57701 142 | Date: 21/Dec/2019:22:26:51 +0000 143 | URI: / 144 | Request ID: 61022aa3a8a37cdf37541ec1c24b383e 145 | 146 | Server address: 172.17.0.3:80 147 | Server name: 877362812ed9 148 | Date: 21/Dec/2019:22:26:51 +0000 149 | URI: / 150 | Request ID: 2b2b9a0ef2e4eba53f6c5c118c10e1d8 151 | ``` 152 | 153 | It's alive! 154 | 155 | 13. Stop haproxy and kill the web servers: 156 | 157 | ```shell 158 | docker kill haproxy http1 http2 159 | ``` -------------------------------------------------------------------------------- /docs/upgrade.md: -------------------------------------------------------------------------------- 1 | # Upgrade 2 | 3 | This document describes the recommended upgrade process. The upgrade process generally works by swapping out the currently deployed VM with a new VM. The cluster will automatically reconcile the routes assuming all current configuration is identical. 4 | 5 | ## Restrictions 6 | 7 | - If upgrading to vSphere 7.0.1, then HAProxy _must_ be updated to v0.1.9 or later. 8 | - Upgrade without downtime is currently not supported. 9 | - Migration between network topologies during upgrade is not supported. 10 | 11 | ## Prerequisites 12 | 13 | - A currently running HAProxy VM 14 | - A target version greater than the current version 15 | - The ability to deploy VMs 16 | 17 | ## Steps 18 | 19 | - Confirm you have enough CPU and disk space for the new VM and the old VM. 20 | - Find and copy `/etc/haproxy/server.crt` and `/etc/haproxy/server.key` out of the instance. Optionally copy the CA files if you need them. 21 | - If you have made any customizations to `/etc/haproxy/haproxy.cfg` or `/etc/haproxy/dataplaneapi.cfg` then make backups or notes of those customizations at this time. 22 | - Deploy the new VM with an _identical_ configuration as the currently running instance using the existing HAProxy `server.crt` and `server.key` as inputs. 23 | - Power down the old instance. You can revert back to this instance if something goes wrong with the upgrade. 24 | - Optionally add resource reservations for the new VM. 25 | - Power on the new instance. 26 | - Verify the configuration on the new VM. See [verification](#verification) below on the minimum recommended checks to perform. 27 | - If you're confident the upgrade has succeeded, remove the old VM. 28 | 29 | ## Verification 30 | 31 | After a short time to deploy, you should be able to log into the new instance. Verify services are up and running and the configuration is correct. 32 | 33 | Minimum verification steps should include the following: 34 | 35 | Verify haproxy and dataplaneapi services have started and are running: 36 | 37 | ``` 38 | systemctl status haproxy 39 | systemctl status dataplaneapi 40 | ``` 41 | 42 | Re-apply any custom changes to the `haproxy.cfg` or `dataplaneapi.cfg` files and restart the services. 43 | 44 | On the service side, ensure service-type load balancer external IPs are reachable on both the supervisor and Tanzu clusters. 45 | 46 | Finally, ensure the cluster starts programming routes via dataplaneapi into `haproxy.cfg` and spawning a new HAProxy processes. If this happens, you should see the `haproxy.cfg` grow in size as routes are added. This make take some time if the cluster operator is in an exponential backoff loop. As a rule of thumb, if you don't have routes within 5 minutes then something is probably wrong. Double check dataplane api logs to ensure it is processing traffic. 47 | 48 | ``` 49 | journalctl -xeu dataplaneapi 50 | ``` 51 | 52 | ## Recovery 53 | 54 | If things don't go according to plan and the upgrade cannot continue, then power off the new appliance and power on the old appliance. It is _very_ important to not run both appliances on the same network because they will ARP for the same IP addresses causing flaky connections. 55 | -------------------------------------------------------------------------------- /docs/virtual-ip-config.md: -------------------------------------------------------------------------------- 1 | # A Guide to Virtual IP management in the HAProxy Appliance 2 | 3 | ## Summary 4 | 5 | HAProxy setups are often configured to load-balance traffic on a single 6 | endpoint, optionally using a floating IP in a Highly Available configuration. 7 | 8 | This HAProxy appliance is designed to allow HAProxy to load balance traffic 9 | across a range of Virtual IPs (VIP). This uses a capability of Linux called 10 | AnyIP which allows the appliance to respond to all IP addresses within 11 | specified ranges. 12 | 13 | This capability works in symbiosis with HAProxy and the external control plane 14 | managing HAProxy configuration. In the case of vSphere with Tanzu, the 15 | Supervisor Cluster is that control plane. The external control plane manages 16 | a pool of IPs that it can allocate to various services. When a new service is 17 | defined, the external control plane configures HAProxy with a frontend 18 | definition containing an IP it allocates from the pool, which should - if 19 | everything is correctly configured - fall within a VIP range defined using 20 | AnyIP. 21 | 22 | ## How does AnyIP work? 23 | 24 | AnyIP works using the local routing table in Linux, defining Virtual IPs the 25 | appliance can use. You can view the routing table with the following command: 26 | 27 | ```shell 28 | ip route list table local 29 | ``` 30 | 31 | By adding an entry to the routing table containing an IP range in CIDR notation 32 | assigned to the loopback inteface, the appliance will immediately start 33 | responding to all IPs in the range. You can experiment with this in a Linux VM using: 34 | 35 | ```shell 36 | ip route add local dev lo 37 | ping 38 | ``` 39 | 40 | Removing the range is achieved by: 41 | 42 | ```shell 43 | ip route delete local 44 | ``` 45 | 46 | Note that these examples are purely for illustration. You don't have to manually 47 | configure the routing tables in the appliance. That is managed for you 48 | (see below). 49 | 50 | ## How do I configure VIP ranges in the appliance? 51 | 52 | When you deploy the appliance, it will ask you for one or more VIP ranges which 53 | it will then persist in a configuration file 54 | [`/etc/vmware/anyip-routes.cfg`](../ansible/roles/vmware/files/etc/vmware/anyip-routes.cfg) 55 | (see [video clip here](https://youtu.be/wfYDDbBJHfM?t=920)). The appliance will 56 | then automatically ensure that the local routing table in the appliance is 57 | kept in sync with this file. It does this using a simple utility running 58 | as a systemd service which you can view using `systemctl status anyip-routes`. 59 | 60 | Once the appliance is up and running, you can test that the AnyIP is working 61 | by pinging IP addresses in the configured VIP ranges. You can do this from 62 | within the appliance or outside. It should respond to all of them. 63 | 64 | If you want to extend the VIP ranges of the appliance, you can modify the 65 | [`/etc/vmware/anyip-routes.cfg`](../ansible/roles/vmware/files/etc/vmware/anyip-routes.cfg) 66 | file and that will automatically update the routing tables. However, please 67 | note that the external control plane managing the configuration of the HAProxy 68 | instance *also* needs to be told that it can allocate IPs out of this new 69 | range (see above). 70 | 71 | ## What precautions do I need to consider? 72 | 73 | You need to be careful using AnyIP because the appliance will immediately 74 | respond to *every* IP in the range. That means that anything else on the 75 | network that has been assigned an IP in that range could be impacted. A 76 | common misconfiguration is to define a range that overlaps with a gateway 77 | or nameserver. 78 | 79 | It also means that you cannot assume that just because a VIP is pingable, 80 | that HAProxy has a frontend configured and defined for it. The way to test 81 | that HAProxy is serving traffic is to use a utility like `curl`. 82 | 83 | Note that HAProxy itself has no knowledge of the AnyIP configuration, but 84 | does have a dependency on it. HAProxy will not start if it has a frontend 85 | defined with an IP that it cannot bind. In other words, all frontend IPs 86 | configured in HAProxy need to be within a defined VIP range. 87 | 88 | ## Troubleshooting 89 | 90 | AnyIP is a fairly simple concept, so there's not too many things to check 91 | 92 | - Check that you can ping VIPs from a shell within the appliance 93 | 94 | - Ensure that the VIP ranges you want are in 95 | [`/etc/vmware/anyip-routes.cfg`](../ansible/roles/vmware/files/etc/vmware/anyip-routes.cfg) 96 | and there are no typos 97 | 98 | - Check that the VIP ranges you want are present in the local routing table: 99 | 100 | ```shell 101 | ip route list table local 102 | 103 | # you should see an entry that looks like this: 104 | local 192.168.20.128/25 dev lo scope host 105 | ``` 106 | 107 | - Make sure that DHCP is not assigning IP addresses in the same range 108 | 109 | - Make sure the VIP ranges don't overlap with any infrastructure IPs, such as a gateway or nameserver 110 | 111 | - Make sure that the anyip-routes service is enabled and running: 112 | 113 | ```shell 114 | systemctl status anyip-routes 115 | ``` 116 | 117 | - Check the output of the anyip-routes service to see if anything unexpected happened: 118 | 119 | ```shell 120 | journalctl -xeu anyip-routes 121 | ``` 122 | 123 | - Make sure that the configuration on the control plane managing HAProxy 124 | frontends agrees with the configuration in the appliance. In the case of 125 | vSphere with Tanzu Workload Management, it's the field marked "IP Address 126 | Ranges for Virtual Servers" (see [video clip here](https://youtu.be/wfYDDbBJHfM?t=1947)). 127 | The ranges in here should be the same or a subset of the ranges in 128 | [`/etc/vmware/anyip-routes.cfg`](../ansible/roles/vmware/files/etc/vmware/anyip-routes.cfg). 129 | 130 | ## Advanced 131 | 132 | If you want to dive deeper into the plumbing of the appliance VIP management, 133 | you can look at [`/var/lib/vmware/anyiproutectl.sh`](../ansible/roles/vmware/files/var/lib/vmware/anyiproutectl.sh) 134 | and [`/var/lib/vmware/routetablectl.sh`](../ansible/roles/vmware/files/var/lib/vmware/routetablectl.sh). -------------------------------------------------------------------------------- /example/README.md: -------------------------------------------------------------------------------- 1 | # Example data 2 | 3 | The files in this directory: 4 | 5 | * **Are** example data used for testing 6 | * Are **not** included in any of the images 7 | * Should **not** be used in production systems 8 | -------------------------------------------------------------------------------- /example/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDjzCCAnegAwIBAgIJAPuZr7nL/tRlMA0GCSqGSIb3DQEBBQUAMGUxCzAJBgNV 3 | BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8x 4 | DzANBgNVBAoMBlZNd2FyZTENMAsGA1UECwwEQ0FQVjENMAsGA1UEAwwEY2FwdjAe 5 | Fw0xOTEyMjMxODQzMzdaFw0yOTEyMjAxODQzMzdaMGUxCzAJBgNVBAYTAlVTMRMw 6 | EQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8xDzANBgNVBAoM 7 | BlZNd2FyZTENMAsGA1UECwwEQ0FQVjENMAsGA1UEAwwEY2FwdjCCASIwDQYJKoZI 8 | hvcNAQEBBQADggEPADCCAQoCggEBANfNCeDtgvR5LukAmIR7FSw+vk9D9EagLoJz 9 | p/PbCsOzB4HK2kPMPa3co+PRAUP0hZZzyKhhK8FZEUwm8Zy6a7SI9pGCyzi2JKo6 10 | fIzWXGRqKshkwJSWFkboAMwHQNpL78blplM4RRUZHsoXvslwStkohB2/b2qcK9+X 11 | N3zjccffECBNmQXuVST7dnINYl1c4VDYUwHPMwVNleeNIDXSYuUs2zlKpcIjSeLq 12 | 5KKAed7iew75Gs+vvBbjeMqVJNFZVBUE/VUo5Ah2BLMsnp0Ho8kZGJIAW+QYLY4c 13 | wbrYBpnjIVFv07eia60OwhjWvGlopINfuPHhWVkreUNkyvDuV60CAwEAAaNCMEAw 14 | DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFA8Q016e 15 | 3JD5NZGyQqU54xDV2uPLMA0GCSqGSIb3DQEBBQUAA4IBAQDMHxLDpfBEQTr8mpCI 16 | MrSU7lsoOCjrJplDOscM94xa8GTwW9Qw8nLBW8fTs7jxoUVfOrfGKXVHI1K+cH2d 17 | 9hfZrcpFb7irjyMpzst16tQdPR2WBOb8FBa2NeUlpK8Ij3WsJydSD8wAD0uIj/l2 18 | NCkwLmH4LL04fhZxC5Gk+haNf6mXhqmX9/S9Eddm1P7gfkEvaP8mQR6INIpg2ahN 19 | FO7c6GMD86bZqrcnggT4onWwD7zYFQ21x545XcpoYGyI4rRRTmUeX/AMrx6m3oM2 20 | jAfels+RGMoo+WgNTzFBZztoY4ZFLH8FeQlWPgD4ZwAT/3L7gMl6M9QJ30rVX52J 21 | 4a2Y 22 | -----END CERTIFICATE----- 23 | -------------------------------------------------------------------------------- /example/ca.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEA180J4O2C9Hku6QCYhHsVLD6+T0P0RqAugnOn89sKw7MHgcra 3 | Q8w9rdyj49EBQ/SFlnPIqGErwVkRTCbxnLprtIj2kYLLOLYkqjp8jNZcZGoqyGTA 4 | lJYWRugAzAdA2kvvxuWmUzhFFRkeyhe+yXBK2SiEHb9vapwr35c3fONxx98QIE2Z 5 | Be5VJPt2cg1iXVzhUNhTAc8zBU2V540gNdJi5SzbOUqlwiNJ4urkooB53uJ7Dvka 6 | z6+8FuN4ypUk0VlUFQT9VSjkCHYEsyyenQejyRkYkgBb5BgtjhzButgGmeMhUW/T 7 | t6JrrQ7CGNa8aWikg1+48eFZWSt5Q2TK8O5XrQIDAQABAoIBACOjWdlKgBDtnmCe 8 | V5GxXerDpdwjRckQFP44KWltKBbvjvLRVEBUD2+R+4LY9lOJozIYhu+/tGEm22Nv 9 | HwGaC8VxxP580iDYe6+dHwqHMBTpL42Ojfs72gv1roQDQqOKXNvE+zXNGiOE1X/c 10 | cgaEQ+ge98qN3dGGXvx61ZALY7P1BcHzEhZl21sGBi7/qCcPAFRIFAEvLUdZbAcN 11 | PAE3FDAdFeRjqGywWrdYVpzR0FE2+xITUj73+jOKkn03diZVxB0hJ1EugWHnGc7X 12 | c10zUwpM2VoBrmr5R/fhcgpRfET8vA0EJkWmAgxmtwY64dU/TBB2PhHLS2noD8Q/ 13 | X0XVYYECgYEA+sxAVVkoVB/d6HGHH7kKifnc0qXmiC8RVLcrXTTLsJE7L1gOBMfM 14 | J2z7EEps0lRAvdzruZefqoVpd8Mh44zekBCy1JNMzl5y4OwrLLXxlySXMnLfETvA 15 | MW9fGnyAIPG4/a633zG2WGAm3EFgVoxTTSKok3y7lmOGgPDpH8mqzOECgYEA3Ebz 16 | sbkG9jyk/R/2qEl3LxM6ynt7VKfDfteOs6Rx7mce01KF9WNv9gXv4LYEiWgRNd+c 17 | c38EeXcPqG3AhY853721nGP1zl0CXbSrDt/bB1RZUWqhdYB+tOtzXCxm1QR1te/3 18 | MoX2fXJm5/jEwojHofxxEpTVmHMjQ0vDIvntuE0CgYBXlZL1+1/pGQPfFB6TRoTW 19 | sIqcidFbR8yuoBUlxLVJoT5hB0hGBRxXvGhlRQiB32iIpakwtDHPVC4D5AJmvCBR 20 | gXNiZ1qQS02lHPTq9VM8bEvdE16xXwN8gB9fWZFJcAEhnq2Z5Xt/m3yWuMITF4hT 21 | zMHAV/QOzgz/5KIVNtFOIQKBgQCjDr6cQ4wcwL2dRojvABsCtOhjNM8R1nIHtgdD 22 | gap4wMr3wXG6OVaKttBf9j0bffane5SzhkXIqFLl6gCGnYRI1ITYdMJjdUQoG3I4 23 | u4rGPTE07IsCkRC6WkR16cRhUUDVYgIJ21KggAwfEW6NVnT4uwb0q0oF5M0opq+X 24 | d4z6TQKBgBXSWaR2zk12Z7wxFy4W04nrZlyr3eMX8xnyhlpmaYbkF17R26cAY5Gs 25 | x6DRp6vI4VvtQdh0FVVBtwoUVsudOHQCFaASLaIHb5S7p6WorTnGpI0szo6RCAqN 26 | yFMRD/+V5NxtoYaHKR/Poqdj3f22Oko8rKYxpU0j/mGET+tPaF5s 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /example/client.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIDpTCCAo2gAwIBAgIJAMXCj3EmByuLMA0GCSqGSIb3DQEBBQUAMGUxCzAJBgNV 3 | BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8x 4 | DzANBgNVBAoMBlZNd2FyZTENMAsGA1UECwwEQ0FQVjENMAsGA1UEAwwEY2FwdjAe 5 | Fw0xOTEyMjMyMjA0MDZaFw0yOTEyMjAyMjA0MDZaMGUxCzAJBgNVBAYTAlVTMRMw 6 | EQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8xDzANBgNVBAoM 7 | BlZNd2FyZTENMAsGA1UECwwEQ0FQVjENMAsGA1UEAwwEY2FwdjCCASIwDQYJKoZI 8 | hvcNAQEBBQADggEPADCCAQoCggEBAOIWfe9L7nOMcBNdmWWvtyVB/nKrVujVP9yO 9 | ahs9dG5avWrSKp6TZqQqHyQunz2P1j/3eUNBVsB7q2iac7lvEas8i8gvM6cnZjCy 10 | sPunOpcHTtmZ7cklW2xJk/67g7CderMHUFlEKZ2Gg0dKy1GHT0K60xBpbbcr7ULH 11 | bAsSWfpQqr3IoLJVIz0VIkyZoFDc8Mq8yWFxTFpdM53tIQ5tRJbFpXVq1FKR0QnM 12 | fSgpl3BXHf9hqoiEwtt0NLaTa8oj33gfAd9JdkJyQGMxinRFNGITlS63I3rdAr2E 13 | bTZ6pV1XnGbOwfQjDMPB4eR7LH97IF+nrpPyp+Idz0whGMrFATUCAwEAAaNYMFYw 14 | CQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsG 15 | AQUFBwMBMB0GA1UdDgQWBBRPgsHYMwkkUtbI3emNlA8M/GJ6VTANBgkqhkiG9w0B 16 | AQUFAAOCAQEAbfyRDthXiLECmCI9cQe6Q9wMSSupqwuRfYZjMPcWfKiqTSlzug2z 17 | K5i0DaksZczkSabZQY4C2Dhc4IY2WvDZE6CErMmMvWgbC68Uy3fJiyyxYZslA79R 18 | 7tBqNyjZ/uD/3hlxC+tj6W6K01g8pZnftJLqm1PbobPTOzn4OObPfb8rUrWUvN+N 19 | mICeqNzl9NaOylKo6KtpZrd6w0+AEBhN0O7/3VB2smu/iwCvusSAX0kqiK5r0m6f 20 | M1H3ksI6jzuHbl4DzhiOGyUpPKczHsG9KWid58Z3/JWl86J4jE1yt8zdAP7fk+dO 21 | 4OET19pMmMHYg9NKRW1HpQhUbx3oOhEv1g== 22 | -----END CERTIFICATE----- 23 | -------------------------------------------------------------------------------- /example/client.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEpAIBAAKCAQEA4hZ970vuc4xwE12ZZa+3JUH+cqtW6NU/3I5qGz10blq9atIq 3 | npNmpCofJC6fPY/WP/d5Q0FWwHuraJpzuW8RqzyLyC8zpydmMLKw+6c6lwdO2Znt 4 | ySVbbEmT/ruDsJ16swdQWUQpnYaDR0rLUYdPQrrTEGlttyvtQsdsCxJZ+lCqvcig 5 | slUjPRUiTJmgUNzwyrzJYXFMWl0zne0hDm1ElsWldWrUUpHRCcx9KCmXcFcd/2Gq 6 | iITC23Q0tpNryiPfeB8B30l2QnJAYzGKdEU0YhOVLrcjet0CvYRtNnqlXVecZs7B 7 | 9CMMw8Hh5Hssf3sgX6euk/Kn4h3PTCEYysUBNQIDAQABAoIBAQCNtbdd5GQjvOUK 8 | 3mIl4IuVKNZKHact7WxH3GQZit2NxgZwDCd2mcF+KIC4dxiMx7ltArrZMv0jTODV 9 | geoDUuDqSdr7sMpZfVKKN5bDRcBtpcEAl4D50RaKu1uuEO6sJykfSfhM23KSMBvc 10 | 9b6W7Y76rotZABwq8beXYdQQ5IHNaNDTvNvrmtOpVnzHjwcw0ZWMLqwipcmuhLot 11 | vm9RriOcIJqAT13LAkp4+29fpP5RMTmNseTYZryN9UKg1BToeP8jVALxgx48yxLE 12 | tYVHN3TIq5pNk7D1CJ/HAcMhxEL3D5St0RGCv7GzGWC5+Wk486V/tnhpvKIJPI+5 13 | nShBsuxBAoGBAPXDP9/8qDVOFuhwdo9a+T3rOCN2iXNlAndJgCLdv8Kf1u92o8hu 14 | mKLGvnSbnt67zVWIZWSSmstCn8zSYNj96BkCmHiIXrhAeeBQPJsf60kMBnrYgs/Z 15 | gOqzH9JOw1NcnIkyhZVSjInn39rWFiKTWI7CRyeRdC4IMxQU/vQLw+JNAoGBAOuB 16 | b12UcQtF5EzK5nxweA55IwCff5VTiwRoHX2FyTUSahn9K6S8P/FN+VQMu5fsauek 17 | 60gHHfLuXAYijQ1Km4bdgZyJhVLN+WzNHmDJGQsuXPRJ+idHTy0R39MCSxcNSonQ 18 | +TisMyg3EEIKVJ24J4OlgPusH7NPv6yhm85tS36JAoGBAOLb6QqJ33vVKbBGoCqU 19 | f554ksmpkhfDFhOm9XE54Nl3UqCZk3ZhIOShMQ3S2UQhd9mMnovICLu4NGqNiHjF 20 | aIotqzEYMNdELTyy1D8dp8M2JoUfdyEGVcpQrv8jVYqN4rGCwWylVrW2JR2MocIo 21 | 4YZmL+iGjAgx6XSQLQh6E8fBAoGAbQW/i1/DsUdKt+4aIzNhsLmNZaVwx60kJwcX 22 | 19sOWV5L9foIsTtgkpHZQXqfgWY120SykuaQi7yip0hpaeTG+PkkHlZffQTTWfXf 23 | AUk3KcDt0T1J69MMKT4kEqf2IRbLEd/G7+Bv0kcjZJ8pqtXsnPoKKvf0uOrLPdyW 24 | p0pbb5kCgYBTs+/RbqdeTpbkKXBcaYYcIpr9+HOiaGLl1LHhuOPzU8AqV5mOXg4d 25 | jidtLwlOklv6wUvMVJMchUjJJHpY9kn6E++3QvB29BcfK85fP9oKf0WB0Lp0z13G 26 | t+d3Y+TAco6lzgo1lIUn/LxxPodeLiCN04saLy7jGlUmWQF53lSN2Q== 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /example/id_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn 3 | NhAAAAAwEAAQAAAQEAxSCrM09uuR2LocFidwsKMwilyN8veG0KKHEH9fxEau15Fiee1t65 4 | G6TF7TNJdSCn4Y55EcxZFw2uVoEWBbwKLIevC3ipmIaCWDip8r8f52AU/2mFnS7v3nYL/E 5 | /BmChtTIlOkXTb7BrFyACoZJwbXLJk3Edp4hMDpl0OTPz96DLMFpPuFiHFsiVOgVgEGyv2 6 | pVZ+ZCEUBWrOO5Oj5JtU4O0yr/jDl+iT87/IDqsb6nFxSqBHE7YQtibCr5kcADrS6wR4eQ 7 | TMzilfRvgp5oM5jP6eqlNZ33m7wzbgbc04VET9JU3CRpn/oOVrWyfL3VHfjABWRqb6AEZd 8 | b9bOFG5HnQAAA8BPyNvFT8jbxQAAAAdzc2gtcnNhAAABAQDFIKszT265HYuhwWJ3CwozCK 9 | XI3y94bQoocQf1/ERq7XkWJ57W3rkbpMXtM0l1IKfhjnkRzFkXDa5WgRYFvAosh68LeKmY 10 | hoJYOKnyvx/nYBT/aYWdLu/edgv8T8GYKG1MiU6RdNvsGsXIAKhknBtcsmTcR2niEwOmXQ 11 | 5M/P3oMswWk+4WIcWyJU6BWAQbK/alVn5kIRQFas47k6Pkm1Tg7TKv+MOX6JPzv8gOqxvq 12 | cXFKoEcTthC2JsKvmRwAOtLrBHh5BMzOKV9G+CnmgzmM/p6qU1nfebvDNuBtzThURP0lTc 13 | JGmf+g5WtbJ8vdUd+MAFZGpvoARl1v1s4UbkedAAAAAwEAAQAAAQAR4DeGLKLWyJYb8gRy 14 | 1R50qEkYYRzV59Vu+2kEZn7xz10WpDskMwhIOHX2X5s+stpmetwBwC0oCQaRM52CoZ2ukh 15 | NOj/+ZJEF3rJPEvo4vFihxTOlf6py36K6Hj9f3a1sWALGOQTGcRIVA8MZUcU+N5WN+Ej/I 16 | z36aPIAKfTqtLp1CdaAuw/9l48+e71yE4ESiaXBhTGlaTzATqLcN2URhjYwsJZverxlEtE 17 | qWQmQ+vwm0JT1hsU5OEm/K7rtg9h0DZh1fLZlkbOSDX01nOsNnx4eAtuq+TLOuCvcc7Fl9 18 | FT4oYbGlhad/xLtxbcXrukEMLI3x4cMMfTVxJIQ5PcThAAAAgDu0Jmj+bWT4HhCTmfL8jG 19 | g14j3HxyDx7fXCF7VJvZfpffxhkmHJEJL5+450XWm5XVE/4yYPF0Gghyh0wHcdXlB8IMHl 20 | D7u9eWBOGNCSxv0uZAVKnRRlRsxF6kZ4uN+T+BGfIHTOBwli2xYv7tAAyryShbcf1iMKUh 21 | SY31U9BsokAAAAgQDttiLUC4wjwlqEwAWg7FAnmz7QQ7wX6SxVWhbWm5XTkX3NfF2n4Ukg 22 | qLoZh63RT3akrVtrWj0jG/Ozu/csSSNz0lgh1NbkWe9/wwiJWyKZEpHv5TIn6FhBSp4Txk 23 | jBD68gkV80ys3Uw0SwI7eIUkKkPgUkMZMlYkeglXiDxHVjlQAAAIEA1Es12D9jRRHmNJgk 24 | XOI8XObwfXZL+QVY8n2afliO3T3zDAwzbILY1Fn9DdSc87cjnadZ5lL3GPamxBzrdBnBx+ 25 | DBU4b5oeUHkCh+bvDxdrbc4hsiVqgcRGD2p7M2001tcjJUCcqSA5r6wDTYO+WvAoMcZurr 26 | NoSRtzHmev+i0ekAAAAEY2FwaQECAwQFBgc= 27 | -----END OPENSSH PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /example/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFIKszT265HYuhwWJ3CwozCKXI3y94bQoocQf1/ERq7XkWJ57W3rkbpMXtM0l1IKfhjnkRzFkXDa5WgRYFvAosh68LeKmYhoJYOKnyvx/nYBT/aYWdLu/edgv8T8GYKG1MiU6RdNvsGsXIAKhknBtcsmTcR2niEwOmXQ5M/P3oMswWk+4WIcWyJU6BWAQbK/alVn5kIRQFas47k6Pkm1Tg7TKv+MOX6JPzv8gOqxvqcXFKoEcTthC2JsKvmRwAOtLrBHh5BMzOKV9G+CnmgzmM/p6qU1nfebvDNuBtzThURP0lTcJGmf+g5WtbJ8vdUd+MAFZGpvoARl1v1s4Ubked capi 2 | -------------------------------------------------------------------------------- /example/meta-data: -------------------------------------------------------------------------------- 1 | instance-id: haproxy 2 | local-hostname: haproxy.local 3 | network: 4 | version: 2 5 | ethernets: 6 | nics: 7 | match: 8 | name: * 9 | dhcp4: no 10 | dhcp6: no 11 | wait-on-network: 12 | ipv4: yes 13 | cleanup-guestinfo: 14 | - userdata 15 | - vendordata 16 | -------------------------------------------------------------------------------- /example/server.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIID0zCCArugAwIBAgIJAMXCj3EmByuJMA0GCSqGSIb3DQEBBQUAMGUxCzAJBgNV 3 | BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8x 4 | DzANBgNVBAoMBlZNd2FyZTENMAsGA1UECwwEQ0FQVjENMAsGA1UEAwwEY2FwdjAe 5 | Fw0xOTEyMjMxOTA4NDBaFw0yOTEyMjAxOTA4NDBaMG4xCzAJBgNVBAYTAlVTMRMw 6 | EQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8xDzANBgNVBAoM 7 | BlZNd2FyZTENMAsGA1UECwwEQ0FQVjEWMBQGA1UEAwwNZGF0YXBsYW5lLWFwaTCC 8 | ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKhgCJ7uGBbAqjiNHLZjrHBd 9 | ztmLJ69PBh6IBHNX2cWxWlUl40wMcHzxQtC00lfNDShjZL6+cX6rgG4cdG7rXMwe 10 | 7lmJXgYubSN2cTQCkgajHDkxKOJLuS0gNVyCKiH/ZYkdjm4Q8KNN9ESyfSHpag0H 11 | WXwdixxgxd7hw1nkU2M5WSAwQuonmo4L/grC0ZEykVnYeIl/CFyfC+z2etNnoFtT 12 | TjeUZgYMp/lV4z5ILQX0kGGibbHLVRdxOxeeawarbMZN5q8hg4F/XAz6MkVePpjT 13 | l7FbUIgMTPf8VIlVYYoRHDurzcu6sQAZcTp86li4F/v58toHdrlmRFShNArS1kMC 14 | AwEAAaN9MHswCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB 15 | BQUHAwIGCCsGAQUFBwMBMB0GA1UdDgQWBBTwpC5N5rqYLLctGKKO4+KtApKY4TAj 16 | BgNVHREEHDAagg1kYXRhcGxhbmUtYXBpgglsb2NhbGhvc3QwDQYJKoZIhvcNAQEF 17 | BQADggEBAISh6MEwIY0oM0ASOXahvJwAHsQrvBY7AskpT/Ajt2S5Z9tU6uCoo6t+ 18 | 8XVxYqN3QZcOKhm79uZVcEbx8WqU+MSILbHNnCXOPnClQL5pRwocCChiCBQ3s+NC 19 | cMV48VDr9Th6lgcRLW7qStym8X3/5TgagfpZYoBQxtdvK51+UcKe02JxrlXs9vxj 20 | tXxXWRlERkvx7xRTivvgTl0EhrDvnre3FZOpBf0GxcbzSGJzChRde4JMmbruHvPJ 21 | cGWHNycwxNn6SJs5NbKD1TSRkhtLJxFYKKWPMlea0cZ1eI06AXyxO16WXiMblcVz 22 | 5ufvE4rLautw1icbL6uFPvqfw6G4yuw= 23 | -----END CERTIFICATE----- 24 | -------------------------------------------------------------------------------- /example/server.key: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEogIBAAKCAQEAqGAInu4YFsCqOI0ctmOscF3O2Ysnr08GHogEc1fZxbFaVSXj 3 | TAxwfPFC0LTSV80NKGNkvr5xfquAbhx0butczB7uWYleBi5tI3ZxNAKSBqMcOTEo 4 | 4ku5LSA1XIIqIf9liR2ObhDwo030RLJ9IelqDQdZfB2LHGDF3uHDWeRTYzlZIDBC 5 | 6ieajgv+CsLRkTKRWdh4iX8IXJ8L7PZ602egW1NON5RmBgyn+VXjPkgtBfSQYaJt 6 | sctVF3E7F55rBqtsxk3mryGDgX9cDPoyRV4+mNOXsVtQiAxM9/xUiVVhihEcO6vN 7 | y7qxABlxOnzqWLgX+/ny2gd2uWZEVKE0CtLWQwIDAQABAoIBADiCOoJ0cBs6wCZI 8 | nclMvHPd1+1E+aWphOk8Rxoa8tiZhDKti5JbXmlgY0c+WJXlGLRkX0xxCBA2KN1L 9 | qXwMe9F51A6yvp6eeDx9pNKKxk158p/zMsB4H4bYl3Abs15Do+UXyVSBVPHJ2OAx 10 | czOtbc0/mTXBiI2WoPIDnRePPLlA23Gyh1tCL/2qUKlIm97UlA3lfEY720IvOppx 11 | cKynnRFqR0BSc0cSSGzWdaaDogJoPI0QryVil89uqtpLRjSZWQSyxPgYsUqmZ2Qv 12 | ELZ24sEyt5I1ET0AR7Zo2j9R+0fv/4og9S1T181iZV/1cPO1QiOSQuaAECLG2JVc 13 | GwOJN7ECgYEA2nRiMgDL0YN3aYwSh6o8QjG5sqK+89oUZ6NJxUJyPEdhTv9fIgvS 14 | 3Go7kp6jVtbQpOQ4ujWG8XrMyg0OocMZNI0TfEQwlpW2R61qYqlXn5NrnzgRy7Kp 15 | k5Nh7N3biySGfCsGqDQacBqPb+Hw2Vz6LnHkwjHvahfAT2ilk6hNzksCgYEAxVA9 16 | veGfuCSo69azHXuNWZdk/TyKOH9M3Ua0yQyA11O5GQQpcekZJ1HNLXNpkJIdlrgv 17 | 6TXUteY96/8Go0+Emz9pNVhtaWtmK7nXGeIqzebuAhOr1BI0f1RfLZPYmGWG0FGh 18 | rMOw6ExAQ2efLbrgDR8veiLutjmtXdhXAvupvOkCgYB6QI+DdDab9XqqMp1cFsTK 19 | NpG5iqJaTT0GEreCEQFlAn1OfFN89ij2+OyFzQmT23vAC4hDjRkLorFgkZYSqOA+ 20 | LIUvEeovG3F7A2158VL8FsAvxm0PFdkYoW435KCsabBpZJrBHwd8nSCQeF0VB5dq 21 | zoo6sz9MnCYfSmz/CT0hqQKBgARGdNAEb32B7dcOU9szriBe02VZRnVq7LNusMQa 22 | bUA9JCmSmYNKVa0wbI2rjoH/aHy3iPI4pF88meLfJ7mqI2lpulHZlKVQGmqgIDUD 23 | XYIQmt+YpYR20IRhw54tMeg7auxgmGwx4UdnP402VZCcF/aw0kqHLzzriwIZieka 24 | otbhAoGAYk5LT8KkiHfxoZvU634U0RV4iEi98OK0Zld+zZVeE9LA9awEJ5UUFGrU 25 | bXuJ+Gw46ZDqOh3Tu3vOSb5sZqSkImNEi8p6+E6rCGDOXqqAvFyjeoOBFOAtIIVI 26 | PV9I1CbM8DgGMoUI/PGTIU/PENfGkAMiZ4pZR6Yv+HUc0+VCaH8= 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /example/user-data: -------------------------------------------------------------------------------- 1 | ## template: jinja 2 | #cloud-config 3 | 4 | write_files: 5 | - path: /etc/haproxy/ca.crt 6 | owner: haproxy:haproxy 7 | permissions: "0640" 8 | content: | 9 | -----BEGIN CERTIFICATE----- 10 | MIIDjzCCAnegAwIBAgIJAPuZr7nL/tRlMA0GCSqGSIb3DQEBBQUAMGUxCzAJBgNV 11 | BAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8x 12 | DzANBgNVBAoMBlZNd2FyZTENMAsGA1UECwwEQ0FQVjENMAsGA1UEAwwEY2FwdjAe 13 | Fw0xOTEyMjMxODQzMzdaFw0yOTEyMjAxODQzMzdaMGUxCzAJBgNVBAYTAlVTMRMw 14 | EQYDVQQIDApDYWxpZm9ybmlhMRIwEAYDVQQHDAlQYWxvIEFsdG8xDzANBgNVBAoM 15 | BlZNd2FyZTENMAsGA1UECwwEQ0FQVjENMAsGA1UEAwwEY2FwdjCCASIwDQYJKoZI 16 | hvcNAQEBBQADggEPADCCAQoCggEBANfNCeDtgvR5LukAmIR7FSw+vk9D9EagLoJz 17 | p/PbCsOzB4HK2kPMPa3co+PRAUP0hZZzyKhhK8FZEUwm8Zy6a7SI9pGCyzi2JKo6 18 | fIzWXGRqKshkwJSWFkboAMwHQNpL78blplM4RRUZHsoXvslwStkohB2/b2qcK9+X 19 | N3zjccffECBNmQXuVST7dnINYl1c4VDYUwHPMwVNleeNIDXSYuUs2zlKpcIjSeLq 20 | 5KKAed7iew75Gs+vvBbjeMqVJNFZVBUE/VUo5Ah2BLMsnp0Ho8kZGJIAW+QYLY4c 21 | wbrYBpnjIVFv07eia60OwhjWvGlopINfuPHhWVkreUNkyvDuV60CAwEAAaNCMEAw 22 | DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFA8Q016e 23 | 3JD5NZGyQqU54xDV2uPLMA0GCSqGSIb3DQEBBQUAA4IBAQDMHxLDpfBEQTr8mpCI 24 | MrSU7lsoOCjrJplDOscM94xa8GTwW9Qw8nLBW8fTs7jxoUVfOrfGKXVHI1K+cH2d 25 | 9hfZrcpFb7irjyMpzst16tQdPR2WBOb8FBa2NeUlpK8Ij3WsJydSD8wAD0uIj/l2 26 | NCkwLmH4LL04fhZxC5Gk+haNf6mXhqmX9/S9Eddm1P7gfkEvaP8mQR6INIpg2ahN 27 | FO7c6GMD86bZqrcnggT4onWwD7zYFQ21x545XcpoYGyI4rRRTmUeX/AMrx6m3oM2 28 | jAfels+RGMoo+WgNTzFBZztoY4ZFLH8FeQlWPgD4ZwAT/3L7gMl6M9QJ30rVX52J 29 | 4a2Y 30 | -----END CERTIFICATE----- 31 | - path: /etc/haproxy/ca.key 32 | owner: haproxy:haproxy 33 | permissions: "0440" 34 | content: | 35 | -----BEGIN RSA PRIVATE KEY----- 36 | MIIEowIBAAKCAQEA180J4O2C9Hku6QCYhHsVLD6+T0P0RqAugnOn89sKw7MHgcra 37 | Q8w9rdyj49EBQ/SFlnPIqGErwVkRTCbxnLprtIj2kYLLOLYkqjp8jNZcZGoqyGTA 38 | lJYWRugAzAdA2kvvxuWmUzhFFRkeyhe+yXBK2SiEHb9vapwr35c3fONxx98QIE2Z 39 | Be5VJPt2cg1iXVzhUNhTAc8zBU2V540gNdJi5SzbOUqlwiNJ4urkooB53uJ7Dvka 40 | z6+8FuN4ypUk0VlUFQT9VSjkCHYEsyyenQejyRkYkgBb5BgtjhzButgGmeMhUW/T 41 | t6JrrQ7CGNa8aWikg1+48eFZWSt5Q2TK8O5XrQIDAQABAoIBACOjWdlKgBDtnmCe 42 | V5GxXerDpdwjRckQFP44KWltKBbvjvLRVEBUD2+R+4LY9lOJozIYhu+/tGEm22Nv 43 | HwGaC8VxxP580iDYe6+dHwqHMBTpL42Ojfs72gv1roQDQqOKXNvE+zXNGiOE1X/c 44 | cgaEQ+ge98qN3dGGXvx61ZALY7P1BcHzEhZl21sGBi7/qCcPAFRIFAEvLUdZbAcN 45 | PAE3FDAdFeRjqGywWrdYVpzR0FE2+xITUj73+jOKkn03diZVxB0hJ1EugWHnGc7X 46 | c10zUwpM2VoBrmr5R/fhcgpRfET8vA0EJkWmAgxmtwY64dU/TBB2PhHLS2noD8Q/ 47 | X0XVYYECgYEA+sxAVVkoVB/d6HGHH7kKifnc0qXmiC8RVLcrXTTLsJE7L1gOBMfM 48 | J2z7EEps0lRAvdzruZefqoVpd8Mh44zekBCy1JNMzl5y4OwrLLXxlySXMnLfETvA 49 | MW9fGnyAIPG4/a633zG2WGAm3EFgVoxTTSKok3y7lmOGgPDpH8mqzOECgYEA3Ebz 50 | sbkG9jyk/R/2qEl3LxM6ynt7VKfDfteOs6Rx7mce01KF9WNv9gXv4LYEiWgRNd+c 51 | c38EeXcPqG3AhY853721nGP1zl0CXbSrDt/bB1RZUWqhdYB+tOtzXCxm1QR1te/3 52 | MoX2fXJm5/jEwojHofxxEpTVmHMjQ0vDIvntuE0CgYBXlZL1+1/pGQPfFB6TRoTW 53 | sIqcidFbR8yuoBUlxLVJoT5hB0hGBRxXvGhlRQiB32iIpakwtDHPVC4D5AJmvCBR 54 | gXNiZ1qQS02lHPTq9VM8bEvdE16xXwN8gB9fWZFJcAEhnq2Z5Xt/m3yWuMITF4hT 55 | zMHAV/QOzgz/5KIVNtFOIQKBgQCjDr6cQ4wcwL2dRojvABsCtOhjNM8R1nIHtgdD 56 | gap4wMr3wXG6OVaKttBf9j0bffane5SzhkXIqFLl6gCGnYRI1ITYdMJjdUQoG3I4 57 | u4rGPTE07IsCkRC6WkR16cRhUUDVYgIJ21KggAwfEW6NVnT4uwb0q0oF5M0opq+X 58 | d4z6TQKBgBXSWaR2zk12Z7wxFy4W04nrZlyr3eMX8xnyhlpmaYbkF17R26cAY5Gs 59 | x6DRp6vI4VvtQdh0FVVBtwoUVsudOHQCFaASLaIHb5S7p6WorTnGpI0szo6RCAqN 60 | yFMRD/+V5NxtoYaHKR/Poqdj3f22Oko8rKYxpU0j/mGET+tPaF5s 61 | -----END RSA PRIVATE KEY----- 62 | 63 | runcmd: 64 | - "new-cert.sh -1 /etc/haproxy/ca.crt -2 /etc/haproxy/ca.key -3 \"127.0.0.1,{{ ds.meta_data.local_ipv4 }}\" -4 \"localhost\" \"{{ ds.meta_data.hostname }}\" /etc/haproxy" 65 | 66 | users: 67 | - name: builder 68 | sudo: ALL=(ALL) NOPASSWD:ALL 69 | ssh_authorized_keys: 70 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDFIKszT265HYuhwWJ3CwozCKXI3y94bQoocQf1/ERq7XkWJ57W3rkbpMXtM0l1IKfhjnkRzFkXDa5WgRYFvAosh68LeKmYhoJYOKnyvx/nYBT/aYWdLu/edgv8T8GYKG1MiU6RdNvsGsXIAKhknBtcsmTcR2niEwOmXQ5M/P3oMswWk+4WIcWyJU6BWAQbK/alVn5kIRQFas47k6Pkm1Tg7TKv+MOX6JPzv8gOqxvqcXFKoEcTthC2JsKvmRwAOtLrBHh5BMzOKV9G+CnmgzmM/p6qU1nfebvDNuBtzThURP0lTcJGmf+g5WtbJ8vdUd+MAFZGpvoARl1v1s4Ubked capi 71 | -------------------------------------------------------------------------------- /hack/image-govc-cloudinit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ################################################################################ 18 | # usage: image-govc-cloudinit.sh VM 19 | # This program updates a remote VM with the cloud-init data to ready it for 20 | # testing. This program requires a configured govc. 21 | ################################################################################ 22 | 23 | set -o errexit 24 | set -o nounset 25 | set -o pipefail 26 | 27 | if [ "${#}" -lt "1" ]; then 28 | echo "usage: ${0} VM" 1>&2 29 | exit 1 30 | fi 31 | 32 | if ! command -v govc >/dev/null 2>&1; then 33 | echo "govc binary must be in \$PATH" 1>&2 34 | exit 1 35 | fi 36 | 37 | export GOVC_VM="${1-}" 38 | 39 | create_snapshot() { 40 | snapshots="$(govc snapshot.tree)" 41 | if [[ ${snapshots} = *${1-}* ]]; then 42 | echo "image-govc-cloudinit: skip snapshot '${1-}'; already exists" 43 | else 44 | echo "image-post-cloudinit: create snapshot '${1-}'" 45 | vmrun snapshot "${VMX_FILE}" "${1-}" 46 | fi 47 | } 48 | 49 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 50 | 51 | # If the VM has a "new" snapshot then revert to it and delete all other 52 | # snapshots. 53 | snapshots="$(govc snapshot.tree 2>/dev/null)" || true 54 | if [[ ${snapshots} = *new* ]]; then 55 | echo "image-govc-cloudinit: reverting to snapshot 'new'" 56 | govc snapshot.revert new 57 | for s in ${snapshots}; do 58 | if [ "${s}" != "new" ] && [ "${s}" != "." ] ; then 59 | echo "image-govc-cloudinit: removing snapshot '${s}'" 60 | govc snapshot.remove "${s}" 61 | fi 62 | done 63 | else 64 | echo "image-govc-cloudinit: creating snapshot 'new'" 65 | govc snapshot.create new 66 | fi 67 | 68 | echo "image-govc-cloudinit: initializing cloud-init data" 69 | govc vm.change \ 70 | -e "guestinfo.userdata.encoding=base64" \ 71 | -e "guestinfo.metadata.encoding=base64" \ 72 | -e "guestinfo.userdata='$(base64 -w0 &2 32 | exit 1 33 | fi 34 | 35 | VM_RUN="${VM_RUN:-$(command -v vmrun 2>/dev/null)}" 36 | if [ ! -e "${VM_RUN}" ] || [ ! -x "${VM_RUN}" ]; then 37 | echo "vmrun must be in \$PATH or specified by \$VM_RUN" 1>&2 38 | exit 1 39 | fi 40 | VM_RUN_DIR="$(dirname "${VM_RUN}")" 41 | export PATH="${VM_RUN_DIR}:${PATH}" 42 | 43 | # Get the path of the VMX file. 44 | VMX_FILE=$(/bin/ls "${1-}"/*.vmx) 45 | 46 | create_snapshot() { 47 | snapshots="$(vmrun listSnapshots "${VMX_FILE}" 2>/dev/null)" 48 | if [[ ${snapshots} = *${1-}* ]]; then 49 | echo "image-post-create-config: skip snapshot '${1-}'; already exists" 50 | else 51 | echo "image-post-create-config: create snapshot '${1-}'" 52 | vmrun snapshot "${VMX_FILE}" "${1-}" 53 | fi 54 | } 55 | 56 | create_snapshot new 57 | 58 | if grep -q 'guestinfo.userdata' "${VMX_FILE}"; then 59 | echo "image-post-create-config: skipping cloud-init data; already exists" 60 | else 61 | echo "image-post-create-config: insert cloud-init data" 62 | CIDATA_DIR="$(dirname "${BASH_SOURCE[0]}")/../example" 63 | cat <>"${VMX_FILE}" 64 | guestinfo.userdata = "$({ base64 -w0 || base64; } 2>/dev/null <"${CIDATA_DIR}/user-data")" 65 | guestinfo.userdata.encoding = "base64" 66 | guestinfo.metadata = "$({ base64 -w0 || base64; } 2>/dev/null <"${CIDATA_DIR}/meta-data")" 67 | guestinfo.metadata.encoding = "base64" 68 | EOF 69 | create_snapshot cloudinit 70 | fi 71 | -------------------------------------------------------------------------------- /hack/image-ssh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ################################################################################ 18 | # usage: image-ssh.sh BUILD_DIR 19 | # This program uses SSH to connect to an image running locally in VMware 20 | # Workstation or VMware Fusion. 21 | ################################################################################ 22 | 23 | set -o errexit 24 | set -o nounset 25 | set -o pipefail 26 | 27 | if [ "${#}" -lt "1" ]; then 28 | echo "usage: ${0} BUILD_DIR" 1>&2 29 | exit 1 30 | fi 31 | 32 | VM_RUN="${VM_RUN:-$(command -v vmrun 2>/dev/null)}" 33 | if [ ! -e "${VM_RUN}" ] || [ ! -x "${VM_RUN}" ]; then 34 | echo "vmrun must be in \$PATH or specified by \$VM_RUN" 1>&2 35 | exit 1 36 | fi 37 | VM_RUN_DIR="$(dirname "${VM_RUN}")" 38 | export PATH="${VM_RUN_DIR}:${PATH}" 39 | 40 | # Get the path of the VMX file. 41 | VMX_FILE=$(/bin/ls "${1-}"/*.vmx) 42 | 43 | # Get the SSH user. 44 | SSH_USER="builder" 45 | 46 | # Get the VM's IP address. 47 | IP_ADDR="$(vmrun getGuestIPAddress "${VMX_FILE}")" 48 | 49 | # SSH into the VM with the provided user. 50 | SSH_KEY="$(dirname "${BASH_SOURCE[0]}")/../example/id_rsa" 51 | echo "image-ssh: ssh -i ${SSH_KEY} ${SSH_USER}@${IP_ADDR}" 52 | exec ssh -o UserKnownHostsFile=/dev/null -i "${SSH_KEY}" "${SSH_USER}"@"${IP_ADDR}" 53 | -------------------------------------------------------------------------------- /hack/image-tools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ################################################################################ 18 | # usage: image-tools.sh [FLAGS] 19 | # This program ensures the tools required for building images are available 20 | # in the expected locations. 21 | ################################################################################ 22 | 23 | set -o errexit 24 | set -o nounset 25 | set -o pipefail 26 | 27 | if ! command -v go >/dev/null 2>&1; then 28 | echo "Golang binary must be in \$PATH" 1>&2 29 | exit 1 30 | fi 31 | 32 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 33 | mkdir -p hack/.bin && cd hack/.bin 34 | 35 | HOSTOS=$(go env GOHOSTOS) 36 | HOSTARCH=$(go env GOHOSTARCH) 37 | 38 | checksum_sha256() { 39 | if command -v shasum >/dev/null 2>&1; then 40 | shasum --ignore-missing -a 256 -c "${1}" 41 | elif command -v sha256sum >/dev/null 2>&1; then 42 | sha256sum --ignore-missing -c "${1}" 43 | else 44 | echo "missing shasum tool" 1>&2 45 | return 1 46 | fi 47 | } 48 | 49 | ensure_ansible() { 50 | if [ -L "ansible" ]; then return; fi 51 | if _bin="$(command -v ansible 2>/dev/null)"; then 52 | ln -s "${_bin}" ansible; return 53 | fi 54 | if ! command -v python >/dev/null 2>&1; then 55 | echo "Python binary must be in \$PATH" 1>&2 56 | return 1 57 | fi 58 | if ! command -v pip >/dev/null 2>&1; then 59 | curl -L https://bootstrap.pypa.io/get-pip.py -o get-pip.py 60 | python get-pip.py --user 61 | rm -f get-pip.py 62 | fi 63 | _version="2.8.0" 64 | python -m pip install --user "ansible==${_version}" 65 | if _bin="$(command -v ansible 2>/dev/null)"; then 66 | ln -s "${_bin}" ansible; return 67 | fi 68 | echo "User's Python binary directory must bin in \$PATH" 1>&2 69 | return 1 70 | } 71 | 72 | ensure_jq() { 73 | if [ -L "jq" ]; then return; fi 74 | if [ -f "jq" ]; then 75 | { [ -x "jq" ] && return; } || rm -f "jq" 76 | fi 77 | if _bin="$(command -v jq 2>/dev/null)"; then 78 | ln -s "${_bin}" jq; return 79 | fi 80 | _version="1.6" # earlier versions don't follow the same OS/ARCH patterns 81 | case "${HOSTOS}" in 82 | linux) 83 | _binfile="jq-linux64" 84 | ;; 85 | darwin) 86 | _binfile="jq-osx-amd64" 87 | ;; 88 | *) 89 | echo "unsupported HOSTOS=${HOSTOS}" 1>&2 90 | return 1 91 | ;; 92 | esac 93 | _bin_url="https://github.com/stedolan/jq/releases/download/jq-${_version}/${_binfile}" 94 | curl -L "${_bin_url}" -o jq 95 | chmod 0755 jq 96 | } 97 | 98 | ensure_packer() { 99 | if [ -L "packer" ]; then return; fi 100 | if [ -f "packer" ]; then 101 | { [ -x "packer" ] && return; } || rm -f "packer" 102 | fi 103 | if _bin="$(command -v packer 2>/dev/null)"; then 104 | ln -s "${_bin}" packer; return 105 | fi 106 | _version="1.4.1" 107 | _chkfile="packer_${_version}_SHA256SUMS" 108 | _chk_url="https://releases.hashicorp.com/packer/${_version}/${_chkfile}" 109 | _zipfile="packer_${_version}_${HOSTOS}_${HOSTARCH}.zip" 110 | _zip_url="https://releases.hashicorp.com/packer/${_version}/${_zipfile}" 111 | curl -LO "${_chk_url}" 112 | curl -LO "${_zip_url}" 113 | checksum_sha256 "${_chkfile}" 114 | unzip "${_zipfile}" 115 | rm -f "${_chkfile}" "${_zipfile}" 116 | } 117 | 118 | ensure_packer_goss() { 119 | _binfile="${HOME}/.packer.d/plugins/packer-provisioner-goss" 120 | if [ -f "${_binfile}" ]; then 121 | { [ -x "${_binfile}" ] && return; } || rm -f "${_binfile}" 122 | fi 123 | _bin_dir="$(dirname "${_binfile}")" 124 | mkdir -p "${_bin_dir}" && cd "${_bin_dir}" 125 | case "${HOSTOS}" in 126 | linux) 127 | _sha256="28be39d0ddf9ad9c14e432818261abed2f2bd83257cfba213e19d5c59b710d03" 128 | ;; 129 | darwin) 130 | _sha256="7ae43b5dbd26a166c8673fc7299e91d1c2244c7d2b3b558ce04e2e53acfa6f88" 131 | ;; 132 | *) 133 | echo "unsupported HOSTOS=${HOSTOS}" 1>&2 134 | return 1 135 | ;; 136 | esac 137 | _version="0.3.0" 138 | _bin_url="https://github.com/YaleUniversity/packer-provisioner-goss/releases/download/v${_version}/packer-provisioner-goss-v${_version}-${HOSTOS}-${HOSTARCH}" 139 | curl -L "${_bin_url}" -o "${_binfile}" 140 | printf "%s *${_binfile}" "${_sha256}" >"${_binfile}.sha256" 141 | if ! checksum_sha256 "${_binfile}.sha256"; then 142 | _exit_code="${?}" 143 | rm -f "${_binfile}.sha256" 144 | return "${_exit_code}" 145 | fi 146 | rm -f "${_binfile}.sha256" 147 | chmod 0755 "${_binfile}" 148 | } 149 | 150 | #ensure_ansible 151 | #ensure_jq 152 | #ensure_packer 153 | ensure_packer_goss 154 | -------------------------------------------------------------------------------- /hack/image-upload.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ################################################################################ 18 | # usage: image-upload.py [FLAGS] ARGS 19 | # This program uploads an OVA created from a Packer build 20 | ################################################################################ 21 | 22 | import argparse 23 | import atexit 24 | import hashlib 25 | import json 26 | import os 27 | import re 28 | import requests 29 | import subprocess 30 | import sys 31 | 32 | 33 | def main(): 34 | parser = argparse.ArgumentParser( 35 | description="Uploads an OVA created from a Packer build") 36 | parser.add_argument(dest='build_dir', 37 | nargs='?', 38 | metavar='BUILD_DIR', 39 | default='.', 40 | help='The Packer build directory') 41 | parser.add_argument('--key-file', 42 | dest='key_file', 43 | required=True, 44 | nargs='?', 45 | metavar='KEY_FILE', 46 | help='The GCS key file') 47 | args = parser.parse_args() 48 | 49 | # Get the absolute path to the GCS key file. 50 | key_file = os.path.abspath(args.key_file) 51 | 52 | # Change the working directory if one is specified. 53 | os.chdir(args.build_dir) 54 | print("image-build-ova: cd %s" % args.build_dir) 55 | 56 | # Load the packer manifest JSON 57 | data = None 58 | with open('packer-manifest.json', 'r') as f: 59 | data = json.load(f) 60 | 61 | # Get the first build. 62 | build = data['builds'][0] 63 | build_data = build['custom_data'] 64 | print("image-upload-ova: loaded %s-%s" % (build['name'], 65 | build_data['version'])) 66 | 67 | # Get the OVA and its checksum. 68 | ova = "%s.ova" % build['name'] 69 | ova_sum = "%s.sha256" % ova 70 | 71 | # Get the name of the remote OVA and its checksum. 72 | rem_ova = "%s-%s.ova" % (build['name'], 73 | build_data['version']) 74 | 75 | # Determine whether or not this is a release or CI image. 76 | upload_dir = 'ci' 77 | if re.match(r'^v?\d+\.\d+\.\d(-\d+)?$', build_data['version']): 78 | upload_dir = 'release' 79 | 80 | # Get the path to the GCS OVA and its checksum. 81 | gcs_ova = "gs://load-balancer-api/ova/%s/%s/%s" % ( 82 | upload_dir, build_data['version'], rem_ova) 83 | gcs_ova_sum = "%s.sha256" % gcs_ova 84 | 85 | # Get the URL of the OVA and its checksum. 86 | url_ova = "http://storage.googleapis.com/load-balancer-api/ova/%s/%s/%s" % ( 87 | upload_dir, build_data['version'], rem_ova) 88 | url_ova_sum = "%s.sha256" % url_ova 89 | 90 | # Compare the remote checksum with the local checksum. 91 | lcl_ova_sum_val = get_local_checksum(ova_sum) 92 | print("image-upload-ova: local sha256 %s" % lcl_ova_sum_val) 93 | rem_ova_sum_val = get_remote_checksum(url_ova_sum) 94 | print("image-upload-ova: remote sha256 %s" % rem_ova_sum_val) 95 | if lcl_ova_sum_val == rem_ova_sum_val: 96 | print("image-upload-ova: skipping upload") 97 | print("image-upload-ova: download from %s" % url_ova) 98 | return 99 | 100 | # Activate the GCS service account. 101 | activate_service_account(key_file) 102 | atexit.register(deactivate_service_account) 103 | 104 | # Upload the OVA and its checksum. 105 | print("image-upload-ova: upload %s" % gcs_ova) 106 | subprocess.check_call(['gsutil', 'cp', ova, gcs_ova]) 107 | print("image-upload-ova: upload %s" % gcs_ova_sum) 108 | subprocess.check_call(['gsutil', 'cp', ova_sum, gcs_ova_sum]) 109 | 110 | print("image-upload-ova: download from %s" % url_ova) 111 | 112 | 113 | def activate_service_account(path): 114 | args = [ 115 | "gcloud", "auth", 116 | "activate-service-account", 117 | "--key-file", path, 118 | ] 119 | subprocess.check_call(args) 120 | 121 | 122 | def deactivate_service_account(): 123 | subprocess.call(["gcloud", "auth", "revoke"]) 124 | 125 | 126 | def get_remote_checksum(url): 127 | r = requests.get(url) 128 | if r.status_code >= 200 and r.status_code <= 299: 129 | return r.text.strip() 130 | return None 131 | 132 | 133 | def get_local_checksum(path): 134 | with open(path, 'r') as f: 135 | return f.readline().strip() 136 | 137 | 138 | if __name__ == "__main__": 139 | main() 140 | -------------------------------------------------------------------------------- /hack/test-route-programs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2020 HAProxy Technologies 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http:#www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | ################################################################################ 18 | # usage: test-route-programs 19 | # Deploys a local test environment for testing anyiproutectl and routetablectl 20 | ################################################################################ 21 | 22 | set -o errexit # Exits immediately on unexpected errors (does not bypass traps) 23 | set -o nounset # Errors if variables are used without first being defined 24 | set -o pipefail # Non-zero exit codes in piped commands causes pipeline to fail 25 | # with that code 26 | 27 | # Change directories to the parent directory of the one in which this script is 28 | # located. 29 | cd "$(dirname "${BASH_SOURCE[0]}")/.." 30 | 31 | ################################################################################ 32 | ## usage 33 | ################################################################################ 34 | 35 | USAGE="usage: ${0} [FLAGS] 36 | Deploys a local test environment for testing anyiproutectl and routetablectl 37 | 38 | FLAGS 39 | -h show this help and exit 40 | -a test anyiproutectl 41 | -r test routetablectl 42 | 43 | Globals 44 | HAPROXY_IMAGE 45 | name of the HAProxy image to use; otherwise builds locally 46 | HAPROXY_CONTAINER 47 | name of the HAProxy container. default: haproxy 48 | " 49 | 50 | ################################################################################ 51 | ## const 52 | ################################################################################ 53 | DOCKER_NET_1=test-routes-1 54 | DOCKER_NET_2=test-routes-2 55 | DOCKER_NET_3=test-routes-3 56 | 57 | ################################################################################ 58 | ## args 59 | ################################################################################ 60 | 61 | HAPROXY_IMAGE="${HAPROXY_IMAGE-}" 62 | HAPROXY_CONTAINER="${HAPROXY_CONTAINER:-haproxy}" 63 | 64 | ################################################################################ 65 | ## require 66 | ################################################################################ 67 | 68 | function check_dependencies() { 69 | command -v docker >/dev/null 2>&1 || fatal "docker is required" 70 | } 71 | 72 | ################################################################################ 73 | ## funcs 74 | ################################################################################ 75 | 76 | # error stores exit code, writes arguments to STDERR, and returns stored exit code 77 | # fatal is like error except it will exit program if exit code >0 78 | function error() { 79 | local exit_code="${?}" 80 | echo "${@}" 1>&2 81 | return "${exit_code}" 82 | } 83 | function fatal() { 84 | error "${@}" 85 | exit 1 86 | } 87 | 88 | # Gets the CIDR of the provided Docker network. 89 | function net_cidr() { 90 | docker network inspect --format='{{range .IPAM.Config}}{{.Subnet}}{{end}}' "${1}" 91 | } 92 | 93 | # Gets the gateway of the provided Docker network. 94 | function net_gateway() { 95 | docker network inspect --format='{{range .IPAM.Config}}{{.Gateway}}{{end}}' "${1}" 96 | } 97 | 98 | # Gets the MAC address of the HAProxy container connnected to the provided 99 | # network. 100 | # Will fail unless the HAProxy container is running. 101 | function net_mac_addr() { 102 | docker network inspect --format='{{range .Containers}}{{.MacAddress}}{{end}}' "${1}" 103 | } 104 | 105 | function net_ip() { 106 | docker network inspect --format='{{range .Containers}}{{.IPv4Address}}{{end}}' "${1}" 107 | } 108 | 109 | # Creates a Docker network if it does not exist. 110 | function net_create() { 111 | if [ -z "$(docker network ls -qf "Name=${1}")" ]; then 112 | echo "creating docker network ${1}" 113 | docker network create --attachable --driver=bridge "${1}" 114 | fi 115 | } 116 | 117 | # Deletes a Docker network. 118 | function net_delete() { 119 | echo "deleting docker network ${1}" 120 | docker network rm "${1}" 2>/dev/null || true 121 | } 122 | 123 | # Creates the Docker networks used with the tests. 124 | function net_up() { 125 | net_create "${DOCKER_NET_1}" 126 | net_create "${DOCKER_NET_2}" 127 | net_create "${DOCKER_NET_3}" 128 | 129 | # Store the CIDR for the Docker networks. 130 | DOCKER_NET_1_CIDR="$(net_cidr "${DOCKER_NET_1}")" 131 | DOCKER_NET_2_CIDR="$(net_cidr "${DOCKER_NET_2}")" 132 | DOCKER_NET_3_CIDR="$(net_cidr "${DOCKER_NET_3}")" 133 | 134 | # Store the Gateways for the Docker networks. 135 | #DOCKER_NET_1_GATEWAY="$(net_cidr "${DOCKER_NET_1}")" 136 | DOCKER_NET_2_GATEWAY="$(net_gateway "${DOCKER_NET_2}")" 137 | DOCKER_NET_3_GATEWAY="$(net_gateway "${DOCKER_NET_3}")" 138 | } 139 | 140 | # Deletes the Docker networks used with the tests. 141 | function net_down() { 142 | net_delete "${DOCKER_NET_1}" 143 | net_delete "${DOCKER_NET_2}" 144 | net_delete "${DOCKER_NET_3}" 145 | } 146 | 147 | # Stops the HAProxy container. 148 | function stop_haproxy() { 149 | docker kill "${HAPROXY_CONTAINER}" || true 150 | } 151 | 152 | # Called before this program exits. 153 | function on_exit() { 154 | stop_haproxy 155 | net_down 156 | [ -z "${TEMP_TEST:-}" ] || rm -f "${TEMP_TEST}" 157 | } 158 | trap on_exit EXIT 159 | 160 | # Builds the HAProxy image if necessary. 161 | function build_haproxy() { 162 | # If no image is specified then build it. 163 | if [ -z "${HAPROXY_IMAGE}" ]; then 164 | make build-image 165 | export HAPROXY_IMAGE=haproxy 166 | fi 167 | } 168 | 169 | # Starts HAProxy if it is not running. 170 | function start_haproxy() { 171 | if [ -z "$(docker ps -qf "Name=${HAPROXY_CONTAINER}")" ]; then 172 | 173 | echo "creating haproxy container: ${HAPROXY_CONTAINER}" 174 | # The container is create in privileged mode to enable the use of AnyIP 175 | docker create \ 176 | --name="${HAPROXY_CONTAINER}" \ 177 | --network="${DOCKER_NET_1}" \ 178 | --privileged \ 179 | --rm \ 180 | "${HAPROXY_IMAGE}" 181 | 182 | # Connect the two, additional networks. 183 | docker network connect "${DOCKER_NET_2}" "${HAPROXY_CONTAINER}" 184 | docker network connect "${DOCKER_NET_3}" "${HAPROXY_CONTAINER}" 185 | 186 | echo "starting haproxy container: ${HAPROXY_CONTAINER}" 187 | docker start "${HAPROXY_CONTAINER}" 188 | 189 | # Store the container ID. 190 | #HAPROXY_CONTAINER_ID="$(docker inspect --format='{{.Id}}' "${HAPROXY_CONTAINER}")" 191 | 192 | # Store the MAC addresses for the container for each network. 193 | #DOCKER_NET_1_MAC="$(net_mac_addr "${DOCKER_NET_1}")" 194 | DOCKER_NET_2_MAC="$(net_mac_addr "${DOCKER_NET_2}")" 195 | DOCKER_NET_3_MAC="$(net_mac_addr "${DOCKER_NET_3}")" 196 | 197 | DOCKER_IP_NET_2="$(net_ip "${DOCKER_NET_2}")" 198 | DOCKER_IP_NET_3="$(net_ip "${DOCKER_NET_3}")" 199 | fi 200 | } 201 | 202 | function test_prereqs() { 203 | check_dependencies 204 | 205 | # Build the HAProxy image if necessary. 206 | build_haproxy 207 | 208 | # Create the networks. 209 | net_up 210 | 211 | # Start HAproxy. 212 | start_haproxy 213 | } 214 | 215 | function test_anyiproutectl() { 216 | test_prereqs 217 | 218 | # Get the AnyIP ranges for the Docker networks. 219 | DOCKER_NET_1_ANYIP_SLASH_32="${DOCKER_NET_1_CIDR%.*/*}.128/32" 220 | DOCKER_NET_1_ANYIP_CIDR_1="${DOCKER_NET_1_CIDR%.*/*}.128/25" 221 | DOCKER_NET_2_ANYIP_CIDR_1="${DOCKER_NET_2_CIDR%.*/*}.128/25" 222 | DOCKER_NET_3_ANYIP_CIDR_1="${DOCKER_NET_3_CIDR%.*/*}.128/25" 223 | 224 | # Define a random IP address in each of the AnyIP ranges. 225 | ANYIP_IP_SLASH_32="${DOCKER_NET_1_ANYIP_SLASH_32%/32}" 226 | ANYIP_IP_1="${DOCKER_NET_1_ANYIP_CIDR_1%.*/*}.$(shuf -i 128-254 -n 1)" 227 | ANYIP_IP_2="${DOCKER_NET_2_ANYIP_CIDR_1%.*/*}.$(shuf -i 128-254 -n 1)" 228 | ANYIP_IP_3="${DOCKER_NET_3_ANYIP_CIDR_1%.*/*}.$(shuf -i 128-254 -n 1)" 229 | 230 | # Create the temp test file. 231 | TEMP_TEST=".$(date "+%s")" 232 | cat <"${TEMP_TEST}" 233 | #!/bin/bash 234 | 235 | set -o errexit 236 | set -o nounset 237 | set -o pipefail 238 | set -v 239 | 240 | echo "starting test" >&2 241 | 242 | # Ping each of the IP addresses and expect an error for each one. 243 | ! ping -c2 -W1 "${ANYIP_IP_SLASH_32}" 244 | ! ping -c2 -W1 "${ANYIP_IP_1}" 245 | ! ping -c2 -W1 "${ANYIP_IP_2}" 246 | ! ping -c2 -W1 "${ANYIP_IP_3}" 247 | 248 | # Run the program with an empty config file and expect no errors. 249 | /var/lib/vmware/anyiproutectl.sh up 250 | 251 | # Create the config file. 252 | cat </etc/vmware/anyip-routes.cfg 253 | ${DOCKER_NET_1_ANYIP_SLASH_32} 254 | EOD 255 | 256 | # Run the program with a populated config file and expect no errors. 257 | /var/lib/vmware/anyiproutectl.sh up 258 | 259 | # Run the program with a populated config file again and expect no errors. 260 | /var/lib/vmware/anyiproutectl.sh up 261 | 262 | # Ping the /32 address. 263 | ping -c2 "${ANYIP_IP_SLASH_32}" 264 | 265 | # Disable the routes and expect no errors. 266 | /var/lib/vmware/anyiproutectl.sh down 267 | 268 | # Disable the AnyIP routes again and expect no errors. 269 | /var/lib/vmware/anyiproutectl.sh down 270 | 271 | # Ping the /32 address and expect an error. 272 | ! ping -c2 -W1 "${ANYIP_IP_SLASH_32}" 273 | 274 | # Recreate the config file. 275 | cat </etc/vmware/anyip-routes.cfg 276 | ${DOCKER_NET_1_ANYIP_CIDR_1} 277 | ${DOCKER_NET_2_ANYIP_CIDR_1} 278 | EOD 279 | 280 | # Run the program with a populated config file and expect no errors. 281 | /var/lib/vmware/anyiproutectl.sh up 282 | 283 | # Run the program with a populated config file again and expect no errors. 284 | /var/lib/vmware/anyiproutectl.sh up 285 | 286 | # Ping each of the IP addresses and expect no errors. 287 | ping -c2 "${ANYIP_IP_1}" 288 | ping -c2 "${ANYIP_IP_2}" 289 | 290 | # Disable the routes and expect no errors. 291 | /var/lib/vmware/anyiproutectl.sh down 292 | 293 | # Disable the AnyIP routes again and expect no errors. 294 | /var/lib/vmware/anyiproutectl.sh down 295 | 296 | # Ping each of the IP addresses and expect an error for each one. 297 | ! ping -c2 -W1 "${ANYIP_IP_1}" 298 | ! ping -c2 -W1 "${ANYIP_IP_2}" 299 | 300 | # Watch the config file for changes. 301 | /var/lib/vmware/anyiproutectl.sh watch & 302 | 303 | # Sleep for a moment to give the watch a chance to take. 304 | sleep 1 305 | 306 | # Update the config file. 307 | echo "${DOCKER_NET_3_ANYIP_CIDR_1}" >>/etc/vmware/anyip-routes.cfg 308 | 309 | # Ping the new IP address and expect no errors. 310 | ping -c2 "${ANYIP_IP_3}" 311 | EOF 312 | 313 | # Copy the test script to the container. 314 | docker cp "${TEMP_TEST}" "${HAPROXY_CONTAINER}":/test.sh 315 | 316 | # Execute the test script inside the container. 317 | docker exec "${HAPROXY_CONTAINER}" bash /test.sh 318 | } 319 | 320 | function test_routetablectl() { 321 | test_prereqs 322 | 323 | # Create the config file. 324 | # ,,,, 325 | TEMP_TEST=".$(date "+%s")" 326 | cat <"${TEMP_TEST}" 327 | 2,frontend,${DOCKER_NET_2_MAC},${DOCKER_NET_2_CIDR},${DOCKER_NET_2_GATEWAY} 328 | 3,workload,${DOCKER_NET_3_MAC},${DOCKER_NET_3_CIDR},${DOCKER_NET_3_GATEWAY} 329 | EOF 330 | 331 | cat <"${TEMP_TEST}" 332 | #!/bin/bash 333 | 334 | set -o errexit 335 | set -o nounset 336 | set -o pipefail 337 | set -v 338 | 339 | # Run the program with an empty config file and expect no errors. 340 | /var/lib/vmware/routetablectl.sh up 341 | 342 | # Create the config file. 343 | # ,,,, 344 | cat </etc/vmware/route-tables.cfg 345 | 2,frontend,${DOCKER_NET_2_MAC},${DOCKER_IP_NET_2},${DOCKER_NET_2_GATEWAY} 346 | 3,workload,${DOCKER_NET_3_MAC},${DOCKER_IP_NET_3},${DOCKER_NET_3_GATEWAY} 347 | 2,frontend,${DOCKER_NET_2_MAC},${DOCKER_IP_NET_2} 348 | 3,workload,${DOCKER_NET_3_MAC},${DOCKER_IP_NET_3} 349 | EOD 350 | 351 | # Create the networks file. 352 | # It's a newline-delimited list of CIDRs. 353 | echo "10.169.10.0/24" > /etc/vmware/workload-networks.cfg 354 | echo "10.169.20.0/24" >> /etc/vmware/workload-networks.cfg 355 | 356 | # Run the program with a populated config file and expect no errors. 357 | /var/lib/vmware/routetablectl.sh up 358 | 359 | # Assert the file /etc/iproute2/rt_tables has the expected tables in it. 360 | grep $'2\trtctl_frontend' /etc/iproute2/rt_tables 361 | grep $'3\trtctl_workload' /etc/iproute2/rt_tables 362 | 363 | # Assert the expected IP rules exist. 364 | ip rule show table rtctl_frontend 365 | ip rule show table rtctl_workload 366 | 367 | # Assert that rules for our workload networks exist. 368 | ip rule show table rtctl_workload | grep "10.169.10.0 /24" 369 | ip rule show table rtctl_workload | grep "10.169.20.0 /24" 370 | 371 | # Assert the expected default gateways exist. 372 | ip route show table rtctl_frontend | grep default 373 | ip route show table rtctl_workload | grep default 374 | 375 | # Disable the routes and expect no errors. 376 | /var/lib/vmware/routetablectl.sh down 377 | 378 | # Assert the file /etc/iproute2/rt_tables DOES NOT have the expected tables in it. 379 | ! grep $'2\trtctl_frontend' /etc/iproute2/rt_tables 380 | ! grep $'3\trtctl_workload' /etc/iproute2/rt_tables 381 | 382 | # Assert the expected IP rules DO NOT exist. This also applies to workload networks. 383 | ! ip rule | grep rtctl_frontend' 384 | ! ip rule | grep rtctl_workload' 385 | 386 | # Assert the expected default gateways DO NOT exist. 387 | ! ip route show table rtctl_frontend 2>/dev/null 388 | ! ip route show table rtctl_workload 2>/dev/null 389 | 390 | # Assert that rules for our workload networks DO NOT exist. 391 | ! ip rule show table rtctl_workload | grep "10.169.10.0 /24" 2>/dev/null 392 | ! ip rule show table rtctl_workload | grep "10.169.20.0 /24" 2>/dev/null 393 | 394 | # Truncate the config file. 395 | printf '' >/etc/vmware/route-tables.cfg 396 | 397 | # Watch the config file for changes. 398 | /var/lib/vmware/routetablectl.sh watch & 399 | 400 | # Sleep for a moment to give the watch a chance to take. 401 | sleep 1 402 | 403 | # Update the config file and assert the appropriate actions occur. 404 | echo "2,workload,${DOCKER_NET_3_MAC},${DOCKER_NET_3_CIDR},${DOCKER_NET_3_GATEWAY}" >/etc/vmware/route-tables.cfg 405 | sleep 2 406 | grep $'2\trtctl_workload' /etc/iproute2/rt_tables 407 | ip rule | grep rtctl_workload 408 | ip route show table rtctl_workload | grep default 409 | 410 | # Update the config file and assert the appropriate actions occur. 411 | echo "3,frontend,${DOCKER_NET_2_MAC},${DOCKER_NET_2_CIDR},${DOCKER_NET_2_GATEWAY}" >> /etc/vmware/route-tables.cfg 412 | sleep 2 413 | grep $'3\trtctl_frontend' /etc/iproute2/rt_tables 414 | ip rule | grep rtctl_frontend 415 | ip route show table rtctl_frontend | grep default 416 | 417 | EOF 418 | 419 | # Copy the test script to the container. 420 | docker cp "${TEMP_TEST}" "${HAPROXY_CONTAINER}":/test.sh 421 | 422 | # Execute the test script inside the container. 423 | docker exec "${HAPROXY_CONTAINER}" bash /test.sh 424 | } 425 | 426 | ################################################################################ 427 | ## main 428 | ################################################################################ 429 | 430 | # Parse the command-line arguments. 431 | while getopts ":har" opt; do 432 | case ${opt} in 433 | h) 434 | fatal "${USAGE}" 435 | ;; 436 | a) 437 | test_anyiproutectl 438 | exit "${?}" 439 | ;; 440 | r) 441 | test_routetablectl 442 | exit "${?}" 443 | ;; 444 | \?) 445 | fatal "invalid option: -${OPTARG} ${USAGE}" 446 | ;; 447 | :) 448 | fatal "option -${OPTARG} requires an argument" 449 | ;; 450 | esac 451 | done 452 | shift $((OPTIND - 1)) 453 | error "${USAGE}" 454 | -------------------------------------------------------------------------------- /kickstart.json: -------------------------------------------------------------------------------- 1 | { 2 | "hostname": "localhost", 3 | "password": { 4 | "crypted": true, 5 | "text": "*", 6 | "age": -1 7 | }, 8 | "disk": "/dev/sda", 9 | "partitions": [ 10 | { 11 | "mountpoint": "/boot", 12 | "size": 512, 13 | "filesystem": "ext4" 14 | }, 15 | { 16 | "mountpoint": "/", 17 | "size": 0, 18 | "filesystem": "ext4", 19 | "lvm": { 20 | "vg_name": "root_vg", 21 | "lv_name": "root" 22 | } 23 | }, 24 | { 25 | "size": 2048, 26 | "filesystem": "swap", 27 | "lvm": { 28 | "vg_name": "swap_vg", 29 | "lv_name": "swap1" 30 | } 31 | } 32 | ], 33 | "packages": [ 34 | "bash", 35 | "linux", 36 | "initramfs", 37 | "lvm2", 38 | "minimal", 39 | "openssh-server", 40 | "open-vm-tools", 41 | "shadow", 42 | "sudo" 43 | ], 44 | "postinstall": [ 45 | "#!/bin/bash", 46 | "useradd -U -d /home/builder -m --groups wheel builder && echo 'builder:builder' | chpasswd", 47 | "echo 'builder ALL=(ALL) NOPASSWD: ALL' >/etc/sudoers.d/builder", 48 | "chmod 440 /etc/sudoers.d/builder", 49 | "useradd --system --no-create-home --home-dir=/var/lib/haproxy --user-group haproxy", 50 | "mkdir -p /var/lib/haproxy && chown -R haproxy:haproxy /var/lib/haproxy", 51 | "systemctl disable docker.service && systemctl mask docker.service", 52 | "systemctl enable sshd.service", 53 | "echo '\n\n[DHCP]\nClientIdentifier=mac' >> /etc/systemd/network/99-dhcp-en.network" 54 | ] 55 | } -------------------------------------------------------------------------------- /packer.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "ansible_common_vars": "", 4 | "ansible_extra_vars": "dataplane_api_url={{user `dataplane_api_url`}} haproxy_rpm_url={{user `haproxy_rpm_url`}} ovf_rpctool_url={{user `ovf_rpctool_url`}}", 5 | "build_name": "haproxy", 6 | "build_timestamp": "{{timestamp}}", 7 | "build_version": "{{user `build_name`}}-{{user `version`}}", 8 | "version": "", 9 | "dataplane_api_url": "https://storage.googleapis.com/load-balancer-api/dataplaneapi/v2.1.0/dataplaneapi", 10 | "disable_public_repos": "false", 11 | "disk_type_id": "0", 12 | "distro_name": "photon", 13 | "existing_ansible_ssh_args": "{{env `ANSIBLE_SSH_ARGS`}}", 14 | "extra_repos": "", 15 | "extra_rpms": "", 16 | "guest_os_type": "vmware-photon-64", 17 | "haproxy_rpm_url": "https://storage.googleapis.com/load-balancer-api/haproxy/v2.2.2/haproxy-2.2.2-1.ph3.x86_64.rpm", 18 | "headless": "true", 19 | "iso_url": "https://packages.vmware.com/photon/3.0/Rev2/iso/Update2/photon-minimal-3.0-a0f216d.iso", 20 | "iso_checksum": "sha1:a5acf94d564f63a174a9de200e04ab6cfe2451f2", 21 | "os_display_name": "VMware Photon OS 64-bit", 22 | "output_directory": "./output/ova", 23 | "reenable_public_repos": "true", 24 | "remove_extra_repos": "false", 25 | "ovf_rpctool_url": "https://storage.googleapis.com/load-balancer-api/ovf-rpctool/v0.0.1/ovf-rpctool", 26 | "shutdown_command": "shutdown now", 27 | "skip_compaction": "false", 28 | "ssh_username": "builder", 29 | "ssh_password": "builder", 30 | "vmx_version": "13", 31 | "vnc_bind_address": "127.0.0.1", 32 | "vnc_disable_password": "false", 33 | "vnc_port_min": "5900", 34 | "vnc_port_max": "6000" 35 | }, 36 | "builders": [ 37 | { 38 | "name": "haproxy", 39 | "vm_name": "haproxy", 40 | "vmdk_name": "haproxy", 41 | "output_directory": "{{user `output_directory`}}", 42 | "type": "vmware-iso", 43 | "version": "{{user `vmx_version`}}", 44 | "cpus": 1, 45 | "cores": 1, 46 | "memory": 2048, 47 | "disk_size": 20480, 48 | "disk_type_id": "0", 49 | "boot_wait": "5s", 50 | "http_directory": "{{pwd}}", 51 | "guest_os_type": "{{user `guest_os_type`}}", 52 | "headless": "{{user `headless`}}", 53 | "iso_url": "{{user `iso_url`}}", 54 | "iso_checksum": "{{user `iso_checksum`}}", 55 | "ssh_username": "{{user `ssh_username`}}", 56 | "ssh_password": "{{user `ssh_password`}}", 57 | "ssh_wait_timeout": "60m", 58 | "boot_command": [ 59 | "", 60 | "vmlinuz initrd=initrd.img root=/dev/ram0 loglevel=3 ", 61 | "ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/kickstart.json ", 62 | "photon.media=cdrom" 63 | ], 64 | "shutdown_command": "echo '{{user `ssh_password`}}' | sudo -S -E sh -c 'usermod -L {{user `ssh_username`}} && {{user `shutdown_command`}}'", 65 | "skip_compaction": "{{user `skip_compaction`}}", 66 | "vnc_bind_address": "{{user `vnc_bind_address`}}", 67 | "vnc_port_min": "{{user `vnc_port_min`}}", 68 | "vnc_port_max": "{{user `vnc_port_max`}}", 69 | "vnc_disable_password": "{{user `vnc_disable_password`}}" 70 | } 71 | ], 72 | "provisioners": [ 73 | { 74 | "type": "ansible", 75 | "playbook_file": "./ansible/playbook.yml", 76 | "ansible_env_vars": [ 77 | "ANSIBLE_SSH_ARGS='{{user `existing_ansible_ssh_args`}} -o IdentitiesOnly=yes'", 78 | "ANSIBLE_REMOTE_TEMP='/tmp/.ansible/'" 79 | ], 80 | "extra_arguments": [ 81 | "--extra-vars", 82 | "{{user `ansible_common_vars`}}", 83 | "--extra-vars", 84 | "{{user `ansible_extra_vars`}}" 85 | ] 86 | } 87 | ], 88 | "post-processors": [ 89 | { 90 | "type": "manifest", 91 | "output": "{{user `output_directory`}}/packer-manifest.json", 92 | "strip_path": true, 93 | "custom_data": { 94 | "build_timestamp": "{{user `build_timestamp`}}", 95 | "build_date": "{{isotime}}", 96 | "version": "{{user `version`}}", 97 | "iso_checksum": "{{user `iso_checksum`}}", 98 | "iso_checksum_type": "{{user `iso_checksum_type`}}", 99 | "iso_url": "{{user `iso_url`}}" 100 | } 101 | }, 102 | { 103 | "type": "shell-local", 104 | "command": "./hack/image-build-ova.py --vmx {{user `vmx_version`}} {{user `output_directory`}}" 105 | }, 106 | { 107 | "type": "shell-local", 108 | "command": "./hack/image-post-create-config.sh {{user `output_directory`}}" 109 | } 110 | ] 111 | } 112 | --------------------------------------------------------------------------------