├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── PULL_REQUEST_TEMPLATE.md
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── Dockerfile
├── LICENSE.txt
├── Makefile
├── NOTICE.txt
├── README.md
├── ansible-windows
├── defaults
│ └── main.yml
├── tasks
│ ├── common.yml
│ ├── containerd.yml
│ ├── main.yml
│ └── registry.yml
└── templates
│ ├── containerd
│ └── config.toml
│ └── registry
│ └── config.yml
├── ansible
├── defaults
│ └── main.yml
├── files
│ ├── etc
│ │ ├── crictl.yaml
│ │ ├── hosts
│ │ ├── sysctl.d
│ │ │ └── 80-bridged-net-traffic.conf
│ │ ├── systemd
│ │ │ └── system
│ │ │ │ ├── containerd.service
│ │ │ │ └── vmtoolsd.service.d
│ │ │ │ ├── afterdbus.conf
│ │ │ │ └── autorestart.conf
│ │ └── vmware-tools
│ │ │ └── tools.conf
│ ├── ip6tables.rules
│ ├── iptables.rules
│ ├── scripts
│ │ ├── docker_registry_ops.py
│ │ ├── image_retag.py
│ │ ├── ova-custom-patch.sh
│ │ └── utkg_download_carvel_packages.py
│ ├── usr
│ │ ├── lib
│ │ │ └── vmware-vmservice
│ │ │ │ ├── defer-cloud-init-generator
│ │ │ │ ├── disable-cloud-init-networking
│ │ │ │ └── disable-cloud-init-networking.service
│ │ └── local
│ │ │ └── bin
│ │ │ └── kubeadm
│ └── va_hardening
│ │ ├── OverriddenPDIs
│ │ └── vasecurity.spec
├── tasks
│ ├── common.yml
│ ├── disable_pwd_rotation.yml
│ ├── iptables.yml
│ ├── main.yml
│ ├── photon.yml
│ ├── registry.yml
│ ├── retag_images.yml
│ ├── ubuntu.yml
│ ├── ubuntu_hack.yml
│ └── va_hardening.yml
└── templates
│ └── etc
│ ├── containerd
│ └── config_v2.toml
│ ├── kubernetes
│ └── manifests
│ │ └── registry.yml
│ ├── modprobe.d
│ └── blocklist-nouveau.conf
│ └── systemd
│ └── journald.conf
├── build-ova.sh
├── docs
├── examples
│ ├── README.md
│ ├── customizations
│ │ ├── README.md
│ │ ├── adding_os_pkg_repos.md
│ │ ├── changing_hardware_version.md
│ │ └── prometheus_node_exporter.md
│ └── tutorial_building_an_image.md
├── files
│ └── demo.gif
└── windows.md
├── goss
├── goss-command.yaml
├── goss-files.yaml
├── goss-kernel-params.yaml
├── goss-package.yaml
├── goss-service.yaml
├── goss-vars.yaml
└── goss.yaml
├── hack
├── make-helpers
│ ├── build-image-builder-container.sh
│ ├── build-node-image.sh
│ ├── clean-containers.sh
│ ├── clean-image-artifacts.sh
│ ├── list-supported-os.sh
│ ├── make-build-all.sh
│ ├── make-help.sh
│ ├── run-artifacts-container.sh
│ └── utils.sh
├── tkgs-image-build-ova.py
└── tkgs_ovf_template.xml
├── packer-variables
├── default-args.j2
├── goss-args.j2
├── packer-http-config.j2
├── photon-3
│ └── default-args-photon-3.j2
├── photon-5
│ └── default-args-photon-5.j2
├── ubuntu-2004-efi
│ └── default-args-ubuntu-2004-efi.j2
├── ubuntu-2204-efi
│ └── default-args-ubuntu-2204-efi.j2
├── vsphere.j2
└── windows
│ ├── default-args-windows.j2
│ ├── goss-args-windows.j2
│ └── vsphere-windows.j2
├── scripts
├── tkg_byoi.py
└── utkg_custom_ovf_properties.py
├── supported-context.json
└── supported-version.txt
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Never modify line endings of these files.
2 | LICENSE.txt -crlf
3 | NOTICE.txt -crlf
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | name: Bug report
4 | about: Tell us about a problem you are experiencing
5 |
6 | ---
7 |
8 |
12 |
13 | **What steps did you take and what happened:**
14 |
15 |
16 | **What did you expect to happen:**
17 |
18 |
19 | **Is there anything else you would like to add?**
20 |
21 |
22 | **Please tell us about your environment.**
23 |
24 |
25 |
26 |
27 |
28 | |
29 | Value |
30 | How to Obtain |
31 |
32 |
33 | Commit ID |
34 |
|
35 | Run git log -1 |
36 |
37 |
38 | Kubernetes Version |
39 |
|
40 | Kubernetes version that you are trying to build the image |
41 |
42 |
43 | OS Type |
44 |
|
45 | OS Type and version that you are trying to build the image |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | name: Feature request
4 | about: Suggest an idea for this project
5 |
6 | ---
7 |
8 |
12 |
13 | **Is your feature request related to a problem? Please describe.**
14 |
15 |
16 | **Describe the solution you'd like**
17 |
18 |
19 | **Additional context**
20 |
22 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 |
5 |
6 | **What does this PR do, and why is it needed?**
7 |
8 |
9 |
10 | **Which issue(s) is/are addressed by this PR?** *(optional, in `fixes #(, fixes #, ...)` format, will close the issue(s) when PR gets merged)*:
11 |
12 | Fixes #
13 |
14 | **Testing Done**:
15 |
16 |
17 | **Are there any special notes for your reviewer**:
18 |
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Except this file
2 | .vscode
3 | !.gitignore
4 | scripts/__pycache__
5 | output*
6 | .idea
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | We as members, contributors, and leaders pledge to make participation in vsphere-tanzu-kubernetes-grid-image-builder project and our
6 | community a harassment-free experience for everyone, regardless of age, body
7 | size, visible or invisible disability, ethnicity, sex characteristics, gender
8 | identity and expression, level of experience, education, socio-economic status,
9 | nationality, personal appearance, race, religion, or sexual identity
10 | and orientation.
11 |
12 | We pledge to act and interact in ways that contribute to an open, welcoming,
13 | diverse, inclusive, and healthy community.
14 |
15 | ## Our Standards
16 |
17 | Examples of behavior that contributes to a positive environment for our
18 | community include:
19 |
20 | * Demonstrating empathy and kindness toward other people
21 | * Being respectful of differing opinions, viewpoints, and experiences
22 | * Giving and gracefully accepting constructive feedback
23 | * Accepting responsibility and apologizing to those affected by our mistakes,
24 | and learning from the experience
25 | * Focusing on what is best not just for us as individuals, but for the
26 | overall community
27 |
28 | Examples of unacceptable behavior include:
29 |
30 | * The use of sexualized language or imagery, and sexual attention or
31 | advances of any kind
32 | * Trolling, insulting or derogatory comments, and personal or political attacks
33 | * Public or private harassment
34 | * Publishing others' private information, such as a physical or email
35 | address, without their explicit permission
36 | * Other conduct which could reasonably be considered inappropriate in a
37 | professional setting
38 |
39 | ## Enforcement Responsibilities
40 |
41 | Community leaders are responsible for clarifying and enforcing our standards of
42 | acceptable behavior and will take appropriate and fair corrective action in
43 | response to any behavior that they deem inappropriate, threatening, offensive,
44 | or harmful.
45 |
46 | Community leaders have the right and responsibility to remove, edit, or reject
47 | comments, commits, code, wiki edits, issues, and other contributions that are
48 | not aligned to this Code of Conduct, and will communicate reasons for moderation
49 | decisions when appropriate.
50 |
51 | ## Scope
52 |
53 | This Code of Conduct applies within all community spaces, and also applies when
54 | an individual is officially representing the community in public spaces.
55 | Examples of representing our community include using an official e-mail address,
56 | posting via an official social media account, or acting as an appointed
57 | representative at an online or offline event.
58 |
59 | ## Enforcement
60 |
61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
62 | reported to the community leaders responsible for enforcement at oss-coc@vmware.com.
63 | All complaints will be reviewed and investigated promptly and fairly.
64 |
65 | All community leaders are obligated to respect the privacy and security of the
66 | reporter of any incident.
67 |
68 | ## Enforcement Guidelines
69 |
70 | Community leaders will follow these Community Impact Guidelines in determining
71 | the consequences for any action they deem in violation of this Code of Conduct:
72 |
73 | ### 1. Correction
74 |
75 | **Community Impact**: Use of inappropriate language or other behavior deemed
76 | unprofessional or unwelcome in the community.
77 |
78 | **Consequence**: A private, written warning from community leaders, providing
79 | clarity around the nature of the violation and an explanation of why the
80 | behavior was inappropriate. A public apology may be requested.
81 |
82 | ### 2. Warning
83 |
84 | **Community Impact**: A violation through a single incident or series
85 | of actions.
86 |
87 | **Consequence**: A warning with consequences for continued behavior. No
88 | interaction with the people involved, including unsolicited interaction with
89 | those enforcing the Code of Conduct, for a specified period of time. This
90 | includes avoiding interactions in community spaces as well as external channels
91 | like social media. Violating these terms may lead to a temporary or
92 | permanent ban.
93 |
94 | ### 3. Temporary Ban
95 |
96 | **Community Impact**: A serious violation of community standards, including
97 | sustained inappropriate behavior.
98 |
99 | **Consequence**: A temporary ban from any sort of interaction or public
100 | communication with the community for a specified period of time. No public or
101 | private interaction with the people involved, including unsolicited interaction
102 | with those enforcing the Code of Conduct, is allowed during this period.
103 | Violating these terms may lead to a permanent ban.
104 |
105 | ### 4. Permanent Ban
106 |
107 | **Community Impact**: Demonstrating a pattern of violation of community
108 | standards, including sustained inappropriate behavior, harassment of an
109 | individual, or aggression toward or disparagement of classes of individuals.
110 |
111 | **Consequence**: A permanent ban from any sort of public interaction within
112 | the community.
113 |
114 | ## Attribution
115 |
116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage],
117 | version 2.0, available at
118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
119 |
120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct
121 | enforcement ladder](https://github.com/mozilla/diversity).
122 |
123 | [homepage]: https://www.contributor-covenant.org
124 |
125 | For answers to common questions about this code of conduct, see the FAQ at
126 | https://www.contributor-covenant.org/faq. Translations are available at
127 | https://www.contributor-covenant.org/translations.
128 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to vSphere Tanzu Kubernetes Grid Image Builder
2 |
3 | We welcome contributions from the community and first want to thank you for taking the time to contribute!
4 |
5 | Please familiarize yourself with the [Code of Conduct][code-of-conduct] before contributing.
6 |
7 | Before you start working with vSphere Tanzu Kubernetes Grid Image Builder, please read our [Developer Certificate of Origin][dco]. All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch.
8 |
9 | ## Ways to contribute
10 |
11 | We welcome many different types of contributions and not all of them need a Pull request. Contributions may include:
12 |
13 | * New features and proposals
14 | * Documentation
15 | * Bug fixes
16 | * Issue Triage
17 | * Answering questions and giving feedback
18 | * Helping to onboard new contributors
19 | * Other related activities
20 |
21 | ## Contribution Flow
22 |
23 | This is a rough outline of what a contributor's workflow looks like:
24 |
25 | * Make a fork of the repository within your GitHub account
26 | * Create a topic branch in your fork from where you want to base your work
27 | * Make commits of logical units
28 | * Make sure your commit messages are with the proper format, quality and descriptiveness (see below)
29 | * Push your changes to the topic branch in your fork
30 | * Create a pull request containing that commit
31 |
32 | We follow the GitHub workflow and you can find more details on the [GitHub flow documentation][github-flow].
33 |
34 | ## Reporting Bugs and Creating Issues
35 |
36 | For specifics on what to include in your report, please follow the guidelines in the issue and pull request templates when available.
37 |
38 | ### Pull Request Checklist
39 |
40 | Before submitting your pull request, we advise you to use the following:
41 |
42 | 1. Check if your code changes will pass both code linting checks and unit tests.
43 | 2. Ensure your commit messages are descriptive. We follow the conventions on [How to Write a Git Commit Message][git-commit]. Be sure to include any related GitHub issue references in the commit message. See [GFM syntax][github-markdown] for referencing issues and commits.
44 | 3. Check the commits and commits messages and ensure they are free from typos.
45 |
46 | ## Ask for Help
47 |
48 | Please use Github Pull Requests and Github Issues as a means to start a conversation with the team.
49 |
50 | [//]: Links
51 |
52 | [code-of-conduct]: CODE_OF_CONDUCT.md
53 | [dco]: https://cla.vmware.com/dco
54 | [git-commit]: http://chris.beams.io/posts/git-commit/
55 | [github-flow]: https://docs.github.com/en/get-started/quickstart/github-flow
56 | [github-markdown]: https://guides.github.com/features/mastering-markdown/#GitHub-flavored-markdown
57 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | ARG BASE_IMAGE=library/photon:5.0
6 | FROM ${BASE_IMAGE}
7 |
8 | ARG IMAGE_BUILDER_COMMIT_ID=""
9 | ARG ANSIBLE_VERSION=2.15.13
10 | ARG IMAGE_BUILDER_REPO="https://github.com/kubernetes-sigs/image-builder.git"
11 | ARG IMAGE_BUILDER_REPO_NAME=image-builder
12 | ARG PACKER_GITHUB_API_TOKEN=""
13 |
14 | ENV PATH=${PATH}:/ovftool
15 | ENV LANG=en_US.UTF-8
16 |
17 | SHELL ["/bin/bash", "-c"]
18 |
19 | RUN tdnf -y update
20 | RUN tdnf -y upgrade
21 |
22 | # Install required packages
23 | RUN for package in unzip git wget build-essential python3-pip jq coreutils openssh-server xorriso grep ; do tdnf -y install "$package" --refresh; done
24 |
25 | # Install Semver
26 | RUN pip3 install semver jinja2 jinja2-time
27 |
28 | # Install Windows Remote Management
29 | RUN pip3 install pywinrm
30 |
31 | # Install ovftool
32 | # TODO: this URL might change or expire so need to look at better way to install it on the container.
33 | RUN wget https://vdc-download.vmware.com/vmwb-repository/dcr-public/2ee5a010-babf-450b-ab53-fb2fa4de79af/2a136212-2f83-4f5d-a419-232f34dc08cf/VMware-ovftool-4.4.3-18663434-lin.x86_64.zip
34 | RUN unzip VMware-ovftool-4.4.3-18663434-lin.x86_64.zip -d /
35 |
36 | # Setup image Builder code
37 | RUN git clone $IMAGE_BUILDER_REPO $IMAGE_BUILDER_REPO_NAME
38 | WORKDIR $IMAGE_BUILDER_REPO_NAME
39 |
40 | RUN git checkout $IMAGE_BUILDER_COMMIT_ID
41 |
42 | # Install Ansible
43 | RUN pip3 install ansible-core==$ANSIBLE_VERSION
44 | # Set the environment variable where packer will be installed
45 | ENV PATH=${PATH}:/image-builder/images/capi/.local/bin
46 |
47 | # Running deps-ova to setup packer, goss provisioner and other ansible galaxy collections
48 | WORKDIR images/capi
49 |
50 | # This version of ansible requires locale to be set explicitly.
51 | ENV LANG=en_US.UTF-8
52 | ENV LC_ALL=en_US.UTF-8
53 |
54 | RUN make deps-ova
55 |
56 | # Make sure packer, ansible and ovftool are installed properly
57 | RUN command -v packer
58 | RUN command -v ansible
59 | RUN command -v ovftool
60 |
61 | # Copy the image build script
62 | COPY build-ova.sh .
63 | RUN chmod +x build-ova.sh
64 |
65 | CMD ["./build-ova.sh"]
66 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | Mozilla Public License
2 | Version 2.0
3 |
4 | 1. Definitions
5 |
6 | 1.1. “Contributor”
7 | means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software.
8 |
9 | 1.2. “Contributor Version”
10 | means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor’s Contribution.
11 |
12 | 1.3. “Contribution”
13 | means Covered Software of a particular Contributor.
14 |
15 | 1.4. “Covered Software”
16 | means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof.
17 |
18 | 1.5. “Incompatible With Secondary Licenses”
19 | means
20 |
21 | that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or
22 |
23 | that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License.
24 |
25 | 1.6. “Executable Form”
26 | means any form of the work other than Source Code Form.
27 |
28 | 1.7. “Larger Work”
29 | means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software.
30 |
31 | 1.8. “License”
32 | means this document.
33 |
34 | 1.9. “Licensable”
35 | means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License.
36 |
37 | 1.10. “Modifications”
38 | means any of the following:
39 |
40 | any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or
41 |
42 | any new file in Source Code Form that contains any Covered Software.
43 |
44 | 1.11. “Patent Claims” of a Contributor
45 | means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version.
46 |
47 | 1.12. “Secondary License”
48 | means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses.
49 |
50 | 1.13. “Source Code Form”
51 | means the form of the work preferred for making modifications.
52 |
53 | 1.14. “You” (or “Your”)
54 | means an individual or a legal entity exercising rights under this License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity.
55 |
56 | 2. License Grants and Conditions
57 |
58 | 2.1. Grants
59 |
60 | Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license:
61 |
62 | under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and
63 |
64 | under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version.
65 |
66 | 2.2. Effective Date
67 |
68 | The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution.
69 |
70 | 2.3. Limitations on Grant Scope
71 |
72 | The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor:
73 |
74 | for any code that a Contributor has removed from Covered Software; or
75 |
76 | for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or
77 |
78 | under Patent Claims infringed by Covered Software in the absence of its Contributions.
79 |
80 | This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4).
81 |
82 | 2.4. Subsequent Licenses
83 |
84 | No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3).
85 |
86 | 2.5. Representation
87 |
88 | Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License.
89 |
90 | 2.6. Fair Use
91 |
92 | This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents.
93 |
94 | 2.7. Conditions
95 |
96 | Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1.
97 |
98 | 3. Responsibilities
99 |
100 | 3.1. Distribution of Source Form
101 |
102 | All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients’ rights in the Source Code Form.
103 |
104 | 3.2. Distribution of Executable Form
105 |
106 | If You distribute Covered Software in Executable Form then:
107 |
108 | such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and
109 |
110 | You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients’ rights in the Source Code Form under this License.
111 |
112 | 3.3. Distribution of a Larger Work
113 |
114 | You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s).
115 |
116 | 3.4. Notices
117 |
118 | You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies.
119 |
120 | 3.5. Application of Additional Terms
121 |
122 | You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction.
123 |
124 | 4. Inability to Comply Due to Statute or Regulation
125 |
126 | If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it.
127 |
128 | 5. Termination
129 |
130 | 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice.
131 |
132 | 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate.
133 |
134 | 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination.
135 |
136 | 6. Disclaimer of Warranty
137 |
138 | Covered Software is provided under this License on an “as is” basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer.
139 |
140 | 7. Limitation of Liability
141 |
142 | Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party’s negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You.
143 |
144 | 8. Litigation
145 |
146 | Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party’s ability to bring cross-claims or counter-claims.
147 |
148 | 9. Miscellaneous
149 |
150 | This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor.
151 |
152 | 10. Versions of the License
153 |
154 | 10.1. New Versions
155 |
156 | Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number.
157 |
158 | 10.2. Effect of New Versions
159 |
160 | You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward.
161 |
162 | 10.3. Modified Versions
163 |
164 | If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License).
165 |
166 | 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
167 |
168 | If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached.
169 |
170 | Exhibit A - Source Code Form License Notice
171 |
172 | This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
173 |
174 | If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice.
175 |
176 | You may add additional accurate notices of copyright ownership.
177 |
178 | Exhibit B - “Incompatible With Secondary Licenses” Notice
179 |
180 | This Source Code Form is “Incompatible With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0.
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | SHELL := /usr/bin/env bash -o errexit -o pipefail -o nounset
6 | MAKEFLAGS += -s
7 | .DEFAULT_GOAL := help
8 | .EXPORT_ALL_VARIABLES:
9 |
10 | # Default variables
11 | DEFAULT_ARTIFACTS_CONTAINER_PORT = 8081
12 | DEFAULT_PACKER_HTTP_PORT = 8082
13 | DEFAULT_IMAGE_BUILDER_BASE_IMAGE = library/photon:5.0
14 | MAKE_HELPERS_PATH = $(shell pwd)/hack/make-helpers
15 | SUPPORTED_VERSION_TEXT = $(shell pwd)/supported-version.txt
16 | DEFAULT_SUPPORTED_CONTEXT_JSON = $(shell pwd)/supported-context.json
17 |
18 | ifndef SUPPORTED_CONTEXT_JSON
19 | override SUPPORTED_CONTEXT_JSON = $(DEFAULT_SUPPORTED_CONTEXT_JSON)
20 | endif
21 |
22 | KUBERNETES_VERSION := $(shell cat ${SUPPORTED_VERSION_TEXT} | xargs)
23 |
24 | # Terminal colors
25 | clear=\033[0m
26 | green=\033[0;32m
27 |
28 | define LIST_SUPPORTED_OS_HELP_INFO
29 | # To list supported OS targets
30 | #
31 | # Example:
32 | # make list-supported-os
33 | endef
34 | .PHONY: list-supported-os
35 | ifeq ($(PRINT_HELP),y)
36 | list-supported-os:
37 | printf "$$green$$LIST_VERSIONS_HELP_INFO$$clear\n"
38 | else
39 | list-supported-os:
40 | $(MAKE_HELPERS_PATH)/list-supported-os.sh
41 | endif
42 |
43 | define RUN_ARTIFACTS_CONTAINER_HELP_INFO
44 | # Runs the artifacts container for a Kubernetes version.
45 | #
46 | # Arguments:
47 | # ARTIFACTS_CONTAINER_PORT: [Optional] container port, if not provided
48 | # defaults to $(DEFAULT_ARTIFACTS_CONTAINER_PORT)
49 | # Example:
50 | # make run-artifacts-container
51 | # make run-artifacts-container ARTIFACTS_CONTAINER_PORT=9090
52 | endef
53 | .PHONY: run-artifacts-container
54 | ifeq ($(PRINT_HELP),y)
55 | run-artifacts-container:
56 | printf "$$green$$RUN_ARTIFACTS_CONTAINER_HELP_INFO$$clear\n"
57 | else
58 | run-artifacts-container:
59 | $(MAKE_HELPERS_PATH)/run-artifacts-container.sh -p $(ARTIFACTS_CONTAINER_PORT) -k $(KUBERNETES_VERSION)
60 | endif
61 |
62 | define BUILD_IMAGE_BUILDER_CONTAINER_HELP_INFO
63 | # Will create a docker container image for creation of TKGs OVA with the dependencies
64 | # like packer, ansible and kubernetes image builder code.
65 | #
66 | # Example:
67 | # make build-image-builder-container
68 | endef
69 | .PHONY: build-image-builder-container
70 | ifeq ($(PRINT_HELP),y)
71 | build-image-builder-container:
72 | printf "$$green$$BUILD_IMAGE_BUILDER_CONTAINER_HELP_INFO$$clear\n"
73 | else
74 | build-image-builder-container:
75 | $(MAKE_HELPERS_PATH)/build-image-builder-container.sh
76 | endif
77 |
78 | define BUILD_NODE_IMAGE
79 | # To build vSphere Tanzu Kubernetes Grid compliant node images
80 | #
81 | # Arguments:
82 | # OS_TARGET: [Required] Node Image OS target, to see list of supported OS target
83 | # use "make list-supported-os".
84 | # TKR_SUFFIX: [Required] TKR suffix for the generated Node image, this can be used to
85 | # distinguish different node images.
86 | # IMAGE_ARTIFACTS_PATH: [Required] Node image OVA and packer logs output folder.
87 | # HOST_IP: [Required] IP Address of host where artifact container is running.
88 | # ARTIFACTS_CONTAINER_PORT: [Optional] Artifacts container port, defaults to $(DEFAULT_ARTIFACTS_CONTAINER_PORT)
89 | # PACKER_HTTP_PORT: [Optional] Port used by Packer HTTP server for hosting the Preseed/Autoinstall files,
90 | # defaults to $(DEFAULT_PACKER_HTTP_PORT).
91 | #
92 | # Example:
93 | # make build-node-image OS_TARGET=photon-3 TKR_SUFFIX=byoi HOST_IP=1.2.3.4 IMAGE_ARTIFACTS_PATH=$(HOME)/image
94 | # make build-node-image OS_TARGET=photon-3 TKR_SUFFIX=byoi HOST_IP=1.2.3.4 IMAGE_ARTIFACTS_PATH=$(HOME)/image ARTIFACTS_CONTAINER_PORT=9090 PACKER_HTTP_PORT=9091
95 | endef
96 | .PHONY: build-node-image
97 | ifeq ($(PRINT_HELP),y)
98 | build-node-image:
99 | printf "$$green$$BUILD_NODE_IMAGE$$clear\n"
100 | else
101 | build-node-image: build-image-builder-container
102 | $(MAKE_HELPERS_PATH)/build-node-image.sh
103 | endif
104 |
105 | define CLEAN_CONTAINERS_HELP_IFO
106 | # To Stops and remove BYOI related docker containers
107 | #
108 | # Arguments:
109 | # LABEL: [Optional] To delete containers selectively based on labels
110 | # When docker containers are created they are labeled with the below keys, both artifacts and
111 | # image builder container will have the "byoi" label key. Artifacts containers will have
112 | # "byoi_artifacts" and Kubernetes version label keys are added. Image builder containers
113 | # will have "byoi_image_builder", Kubernetes version, and OS target label keys are added
114 | # Example:
115 | # make clean-containers
116 | # make clean-containers LABEL=byoi
117 | # make clean-containers LABEL=byoi_artifacts
118 | # make clean-containers LABEL=byoi_image_builder
119 | # make clean-containers LABEL=v1.23.15+vmware.1
120 | # make clean-containers LABEL=photon-3
121 | endef
122 | .PHONY: clean-containers
123 | ifeq ($(PRINT_HELP),y)
124 | clean-containers:
125 | printf "$$green$$CLEAN_CONTAINERS_HELP_IFO$$clear\n"
126 | else
127 | clean-containers:
128 | $(MAKE_HELPERS_PATH)/clean-containers.sh
129 | endif
130 |
131 | define CLEAN_IMAGE_ARTIFACTS_HELP_INFO
132 | # To clean the artifacts generated by the image builder container
133 | #
134 | # Arguments:
135 | # IMAGE_ARTIFACTS_PATH: [Required] Node image OVA and packer logs output folder.
136 | #
137 | # Example:
138 | # make clean-image-artifacts IMAGE_ARTIFACTS_PATH=$(HOME)/image-artifacts
139 | endef
140 | .PHONY: clean-image-artifacts
141 | ifeq ($(PRINT_HELP),y)
142 | clean-image-artifacts:
143 | printf "$$green$$CLEAN_IMAGE_ARTIFACTS_HELP_INFO$$clear\n"
144 | else
145 | clean-image-artifacts:
146 | $(MAKE_HELPERS_PATH)/clean-image-artifacts.sh
147 | endif
148 |
149 | define CLEAN_HELP_INFO
150 | # To clean the artifacts and containers generated or created when building the image
151 | #
152 | # Arguments:
153 | # IMAGE_ARTIFACTS_PATH: [Required] Node image OVA and packer logs output folder.
154 | # LABEL: [Optional] To delete containers selectively based on labels
155 | # When docker containers are created they are labeled with the below keys, both artifacts and
156 | # image builder container will have the "byoi" label key. Artifacts containers will have
157 | # "byoi_artifacts" and Kubernetes version label keys are added. Image builder containers
158 | # will have "byoi_image_builder", Kubernetes version, and OS target label keys are added
159 | #
160 | # Example:
161 | # make clean IMAGE_ARTIFACTS_PATH=$(HOME)/image-artifacts
162 | # make clean IMAGE_ARTIFACTS_PATH=$(HOME)/image-artifacts LABEL=byoi
163 | endef
164 | .PHONY: clean
165 | ifeq ($(PRINT_HELP),y)
166 | clean:
167 | printf "$$green$$CLEAN_HELP_INFO$$clear\n"
168 | else
169 | clean: clean-containers clean-image-artifacts
170 | endif
171 |
172 | define HELP_INFO
173 | # Use to list supported Kubernetes versions and the corresponding supported OS targets
174 | #
175 | # Example:
176 | # make
177 | # make help
178 | endef
179 | .PHONY: help
180 | ifeq ($(PRINT_HELP),y)
181 | help:
182 | printf "$$green$$HELP_INFO$$clear\n"
183 | else
184 | help: ## help
185 | $(MAKE_HELPERS_PATH)/make-help.sh
186 | endif
--------------------------------------------------------------------------------
/NOTICE.txt:
--------------------------------------------------------------------------------
1 | Copyright 2022 VMware, Inc.
2 |
3 | This product is licensed to you under the Mozilla Public License, V2.0 (the "License"). You may not use this product except in compliance with the License.
4 |
5 | This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # vSphere Tanzu Kubernetes Grid Image Builder
2 |
3 | vSphere Tanzu Kubernetes Grid Image Builder provides tooling that can be used to build node images for use with [vSphere with Tanzu][vsphere-with-tanzu].
4 |
5 | ## Content
6 |
7 | - [Prerequisites](#prerequisites)
8 | - [Building Images](#building-images)
9 | - [Make Targets](#make-targets)
10 | - [Customization Examples](#customizations-examples)
11 | - [Debugging](#debugging)
12 | - [Contributing](#contributing)
13 | - [License](#license)
14 | - [Support](#support)
15 |
16 | ## Prerequisites
17 |
18 | Below are the prerequisites for building the node images
19 |
20 | - vSphere Environment version >= 8.0
21 | - DCHP configured for vCenter (required by Packer)
22 | - `jq` version >= `1.6`
23 | - `make` version >= `4.2.1`
24 | - `docker` version >= `20.10.21`
25 | - Linux environment should have the below utilities available on the system
26 | - [Docker](https://www.docker.com/)
27 | - [GNU Make](https://www.gnu.org/software/make/)
28 | - [jq](https://stedolan.github.io/jq/)
29 |
30 | ## Building Images
31 |
32 | 
33 |
34 | - Clone this repository on the linux environment for building the image.
35 | - Update the vSphere environment details like vCenter IP, Username, Password, etc. in [vsphere.j2](packer-variables/vsphere.j2)
36 | - For details on the permissions required for the user please refer to the packer [vsphere-iso documentation](https://developer.hashicorp.com/packer/plugins/builders/vsphere/vsphere-iso#required-vsphere-privileges).
37 | - To identify the kubernetes version supported by the branch, check the version information provided in [supported version file][supported-version].
38 | - Run the artifacts container using `make run-artifacts-container`.
39 | - Default port used by the artifacts container is `8081` but this can be configured using the `ARTIFACTS_CONTAINER_PORT` parameter.
40 | - Run the image-builder container to build the node image(use `make build-node-image` target).
41 | - Default port used the image-builder containter is `8082` but this can be configured using the `PACKER_HTTP_PORT`.
42 | - Once the OVA is generated upload the OVA to a [content library][vm-admin-guide] used by the supervisor.
43 | - To clean the containers and artifacts use the `make clean` target.
44 |
45 | ## Supported Kubernetes Version
46 |
47 | [supported-version.txt](supported-version.txt) holds information about the kubernetes release version supported.
48 |
49 | [supported-context.json](supported-context.json) holds information about the context for the supported Kubernetes versions which includes supported OS targets along with the artifacts container image URL. This file will be updated when a new Kubernetes version is supported by the **vSphere Tanzu** team.
50 |
51 | ## Make targets
52 |
53 | ### Help
54 |
55 | - `make help` Provides help information about different make targets
56 |
57 | ```bash
58 | make
59 | make help
60 | ```
61 |
62 | ### Clean
63 |
64 | There are three different clean targets to clean the containers or artifacts generated during the process or both.
65 |
66 | - `make clean-containers` is used to stop/remove the artifacts or image builder or both.
67 | - During the container creation, All containers related to BYOI will be labelled as `byoi`
68 | - artifacts container will have `byoi_artifacts` and Kubernetes version as labels.
69 | - image builder container will have `byoi_image_builder`, Kubernetes version, and os target as labels
70 |
71 | ```bash
72 | make clean-containers PRINT_HELP=y # To show the help information for this target
73 | make clean-containers # To clean all the artifacts and image-builder containers
74 | make clean-containers LABEL=byoi_artifacts # To remove artifact containers
75 | ```
76 |
77 | - `make clean-image-artifacts` is used to remove the image artifacts like OVA's and packer log files
78 |
79 | ```bash
80 | make clean-image-artifacts PRINT_HELP=y # To show help information for this target
81 | make clean-image-artifacts IMAGE_ARTIFACTS_PATH=/root/artifacts/ # To clean the image artifacts in a folder
82 | ```
83 |
84 | - `make clean` is a combination of `clean-containers` and `clean-image-artifacts` that cleans both containers and image artifacts
85 |
86 | ```bash
87 | make clean PRINT_HELP=y # To show the help information for this target
88 | make clean IMAGE_ARTIFACTS_PATH=/root/artifacts/ # To clean image artifacts and containers
89 | make clean IMAGE_ARTIFACTS_PATH=/root/artifacts/ LABEL=byoi_image_builder # To clean image artifacts and image builder containers
90 | ```
91 |
92 | ### Image Building
93 |
94 | - `make run-artifacts-container` is used to run the artifacts container for a Kubernetes version at a particular port
95 | - artifacts image URL will be fetched from the [supported-context.json](supported-context.json).
96 | - By default artifacts container uses port `8080` by default however this can be configured through the `ARTIFACTS_CONTAINER_PORT` parameter.
97 |
98 | ```bash
99 | make run-artifacts-container PRINT_HELP=y # To show the help information for this target
100 | make run-artifacts-container ARTIFACTS_CONTAINER_PORT=9090 # To run 1.22.13 Kubernetes artifacts container on port 9090
101 | ```
102 |
103 | - `make build-image-builder-container` is used to build the image builder container locally with all the dependencies like `Packer`, `Ansible`, and `OVF Tool`.
104 |
105 | ```bash
106 | make build-image-builder-container PRINT_HELP=y # To show the help information for this target.
107 | make build-image-builder-container # To create the image builder container.
108 | ```
109 |
110 | - `make build-node-image` is used to build the vSphere Tanzu compatible node image for a Kubernetes version.
111 | - Host IP is required to pull the required Carvel Packages during the image build process and the default artifacts container port is 8080 which can be configured through `ARTIFACTS_CONTAINER_PORT`.
112 | - TKR(Tanzu Kubernetes Release) Suffix is used to distinguish images built on the same version for a different purpose. Maximum suffix length can be 8 characters.
113 |
114 | ```bash
115 | make build-node-image PRINT_HELP=y # To show the help information for this target.
116 | make build-node-image OS_TARGET=photon-5 TKR_SUFFIX=byoi HOST_IP=1.2.3.4 IMAGE_ARTIFACTS_PATH=/Users/image ARTIFACTS_CONTAINER_PORT=9090 # Create photon-5 based Kubernetes node image
117 | ```
118 |
119 | ## Customizations Examples
120 |
121 | Sample customization examples can be found [here](docs/examples/README.md)
122 |
123 | For Windows support you may refer to [Windows tutorial](docs/windows.md)
124 |
125 | ## Debugging
126 |
127 | - To enable debugging for the [make file scripts](hack/make-helpers/) export `DEBUGGING=true`.
128 | - Debug logs are enabled by default on the image builder container which can be viewed through the `docker logs -f ` command.
129 | - Packer logs can be found at `/logs/packer-.log` which will be helpful when debugging issues.
130 |
131 | ## Contributing
132 |
133 | The vSphere Tanzu Kubernetes Grid Image Builder project team welcomes contributions from the community. Before you start working with VMware Image Builder, please read our [Developer Certificate of Origin][dco]. All contributions to this repository must be signed as described on that page. Your signature certifies that you wrote the patch or have the right to pass it on as an open-source patch. For more detailed information, please refer to [CONTRIBUTING][contributing].
134 |
135 | ## License
136 |
137 | This project is available under the [Mozilla Public License, V2.0][project-license].
138 |
139 | ## Support
140 |
141 | VMware will support issues with the vSphere Tanzu Kubernetes Grid Image Builder, but you are responsible for any issues relating to your image customizations and custom applications. You can open VMware Support cases for TKG clusters built with a custom Tanzu Kubernetes release image, however, VMware Support will be limited to best effort only, with VMware Support having full discretion over how much effort to put in to troubleshooting. On opening a case with VMware Support regarding any issue with a cluster built with a custom Tanzu Kubernetes release image, VMware Support asks that you provide support staff with the exact changes made to the base image.
142 |
143 | [//]: Links
144 |
145 | [contributing]: CONTRIBUTING.md
146 | [dco]: https://cla.vmware.com/dco
147 | [project-license]: LICENSE.txt
148 | [vsphere-with-tanzu]: https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere-supervisor/8-0/vsphere-supervisor-services-and-workloads-8-0.html
149 | [supported-version]: ./supported-version.txt
150 | [vm-admin-guide]: https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere/8-0/vsphere-virtual-machine-administration-guide-8-0.html
151 |
--------------------------------------------------------------------------------
/ansible-windows/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2024 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 | ---
4 | registry_install_path: "C:/registry"
5 | registry_root_directory: "C:/storage/container-registry"
6 | registry_log_directory: "C:/var/log/registry"
7 | goss_download_path: "C:\\tmp"
8 |
--------------------------------------------------------------------------------
/ansible-windows/tasks/common.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2024 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | - name: Copy goss binary
5 | win_shell: cp "{{ additional_executables_destination_path }}\\goss.exe" "{{ goss_download_path }}"
6 |
--------------------------------------------------------------------------------
/ansible-windows/tasks/containerd.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2024 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | - name: Ensure containerd conf.d
5 | ansible.windows.win_file:
6 | path: "{{ item }}"
7 | state: directory
8 | with_items:
9 | - "{{ programfiles.stdout | trim }}\\containerd\\conf.d"
10 |
11 | - name: Copy containerd config file {{ containerd_config_file }}
12 | ansible.windows.win_template:
13 | dest: "{{ programfiles.stdout | trim }}\\containerd\\config.toml"
14 | src: "containerd/config.toml"
15 | vars:
16 | allusersprofile: "{{ alluserprofile.stdout | trim }}"
17 | plugin_bin_dir: "{{ systemdrive.stdout | trim }}/opt/cni/bin"
18 | plugin_conf_dir: "{{ systemdrive.stdout | trim }}/etc/cni/net.d"
19 | # programfiles is C:\Program Files, but should be C:\\Program Files
20 | # otherwise task Register Containerd fails with "invalid escape sequence: \P"
21 | containerd_conf_dir: '{{ programfiles.stdout | trim | regex_replace("\\", "\\\\") }}\\\\containerd'
22 |
23 | - name: Remove crictl.exe
24 | ansible.windows.win_file:
25 | path: "{{ programfiles.stdout | trim }}\\containerd\\crictl.exe"
26 | state: absent
27 |
--------------------------------------------------------------------------------
/ansible-windows/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2024 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | - import_tasks: common.yml
5 |
6 | - import_tasks: containerd.yml
7 |
8 | - import_tasks: registry.yml
9 |
--------------------------------------------------------------------------------
/ansible-windows/tasks/registry.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2024 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | # - name: Generate random password
5 | # win_random_string:
6 | # length: 16
7 | # special: no
8 | # numeric: yes
9 | # upper: yes
10 | # lower: yes
11 | # param: registry_secret
12 | # register: registry_secret_result
13 |
14 | - name: Create registry directory structure
15 | ansible.windows.win_file:
16 | path: "{{ item }}"
17 | state: directory
18 | mode: 0644
19 | with_items:
20 | - "{{ registry_install_path | trim }}"
21 | - "{{ registry_root_directory | trim }}"
22 | - "{{ registry_log_directory | trim }}"
23 |
24 | - name: Copy registry binary to registry directory
25 | win_shell: cp "{{ additional_executables_destination_path | trim }}\\registry.exe" "{{ registry_install_path | trim }}\\registry.exe"
26 |
27 | - name: Create registry config
28 | ansible.windows.win_template:
29 | src: registry/config.yml
30 | dest: "{{ registry_install_path | trim }}\\config.yml"
31 |
32 | - name: Install registry via nssm
33 | community.windows.win_nssm:
34 | name: registry
35 | start_mode: auto
36 | state: started
37 | application: "{{ registry_install_path | trim }}\\registry.exe"
38 | arguments: "serve {{ registry_install_path | trim }}\\config.yml"
39 | app_rotate_bytes: 10485760
40 | stderr_file: "{{ registry_log_directory | trim }}\\registry.err.log"
41 | stdout_file: "{{ registry_log_directory | trim }}\\registry.log"
42 | app_rotate_online: 1
43 |
44 | - name: Download registry store tar archive
45 | ansible.windows.win_get_url:
46 | url: "{{ registry_store_archive_url | trim }}"
47 | dest: "{{ registry_root_directory | trim }}/registry.tar.gz"
48 | retries: 5
49 | delay: 3
50 |
51 | - name: Unpack registry root directory
52 | ansible.windows.win_command: cmd /c tar -zxvf "{{ registry_root_directory | trim }}/registry.tar.gz" -C "{{ registry_root_directory | trim }}"
53 |
54 | - name: Remove registry store tar archive
55 | ansible.windows.win_file:
56 | path: "{{ registry_root_directory | trim }}/registry.tar.gz"
57 | state: absent
58 |
--------------------------------------------------------------------------------
/ansible-windows/templates/containerd/config.toml:
--------------------------------------------------------------------------------
1 | # Copyright 2020 The Kubernetes Authors.
2 |
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 |
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 |
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | root = "{{ allusersprofile }}\\containerd\\root"
16 | state = "{{ allusersprofile }}\\containerd\\state"
17 | version = 2
18 |
19 | {% if 'imports' not in containerd_additional_settings | b64decode %}
20 | imports = ["{{ containerd_conf_dir }}\\conf.d\\*.toml"]
21 | {% endif %}
22 |
23 | [grpc]
24 | address = "\\\\.\\pipe\\containerd-containerd"
25 |
26 | [plugins]
27 | [plugins."io.containerd.grpc.v1.cri"]
28 | sandbox_image = "{{ pause_image }}"
29 | [plugins."io.containerd.grpc.v1.cri".cni]
30 | bin_dir = "{{ plugin_bin_dir }}"
31 | conf_dir = "{{ plugin_conf_dir }}"
32 | [plugins."io.containerd.grpc.v1.cri".registry]
33 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
34 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
35 | endpoint = ["https://registry-1.docker.io"]
36 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"]
37 | endpoint = ["http://localhost:5000"]
38 | {% if packer_builder_type.startswith('azure') %}
39 | [plugins."io.containerd.grpc.v1.cri".registry.headers]
40 | X-Meta-Source-Client = ["azure/capz"]
41 | {% endif %}
42 |
43 | {{containerd_additional_settings | b64decode}}
44 |
--------------------------------------------------------------------------------
/ansible-windows/templates/registry/config.yml:
--------------------------------------------------------------------------------
1 | version: 0.1
2 | log:
3 | level: debug
4 | fields:
5 | service: registry
6 | storage:
7 | filesystem:
8 | rootdirectory: "{{ registry_root_directory }}" # Linux node uses /var/lib/registry
9 | maintenance:
10 | uploadpurging:
11 | enabled: false
12 | delete:
13 | enabled: true
14 | http:
15 | addr: :5000
16 | secret: placeholder
17 | debug:
18 | addr: localhost:5001
19 |
--------------------------------------------------------------------------------
/ansible/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 | ---
5 | #photon does not have backward compatibility for legacy distro behavior for sysctl.conf by default
6 | #as it uses systemd-sysctl. set this var so we can use for sysctl conf file value.
7 | sysctl_conf_file: "{{ '/etc/sysctl.d/99-sysctl.conf' if ansible_os_family == 'VMware Photon OS' else '/etc/sysctl.conf' }}"
8 |
9 | photon_va_hardening_url: "{{ artifacts_container_url }}/artifacts/photon_hardening.tar.gz"
10 | va_hardening_rpm_version: "3.0"
11 | va_hardening_rpm_release: "{{ imageVersion }}"
12 | carvel_tools: "/tmp/carvel-tools"
13 | registry_root_directory: "/storage/container-registry"
14 | systemd_networkd_update_initramfs: >-
15 | {%- if ansible_os_family == 'VMware Photon OS' -%}
16 | dracut -f
17 | {%- elif ansible_os_family == 'Debian' -%}
18 | update-initramfs -u
19 | {%- endif -%}
20 | enable_ubuntu_hwe: "False"
21 | journald_system_max_use: 2G
22 |
--------------------------------------------------------------------------------
/ansible/files/etc/crictl.yaml:
--------------------------------------------------------------------------------
1 | runtime-endpoint: unix:///run/containerd/containerd.sock
2 | image-endpoint: unix:///run/containerd/containerd.sock
3 | timeout: 10
4 | debug: false
5 |
--------------------------------------------------------------------------------
/ansible/files/etc/hosts:
--------------------------------------------------------------------------------
1 | ::1 localhost ip6-localhost ip6-loopback
2 | 127.0.0.1 localhost localhost.local
--------------------------------------------------------------------------------
/ansible/files/etc/sysctl.d/80-bridged-net-traffic.conf:
--------------------------------------------------------------------------------
1 | # Bridged network traffic.
2 | # Pass bridged network traffic onto iptables chains (so that iptables proxy
3 | # functionality, relied on by kube-proxy, can work), see more at:
4 | # https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/#network-plugin-requirements
5 | net.bridge.bridge-nf-call-iptables=1
6 |
7 | # enable IP Forwarding by default
8 | net.ipv4.ip_forward=1
9 |
10 | # GCM-3801 antrea optimization
11 | net.ipv4.tcp_limit_output_bytes=524290
12 |
--------------------------------------------------------------------------------
/ansible/files/etc/systemd/system/containerd.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=containerd container runtime
3 | Documentation=https://containerd.io
4 | After=remote-fs.target
5 |
6 | [Service]
7 | ExecStartPre=/sbin/modprobe overlay
8 | ExecStart=/usr/local/bin/containerd
9 | Restart=always
10 | RestartSec=5
11 | Delegate=yes
12 | KillMode=process
13 | OOMScoreAdjust=-999
14 | LimitNOFILE=1048576
15 | # Having non-zero Limit*s causes performance problems due to accounting overhead
16 | # in the kernel. We recommend using cgroups to do container-local accounting.
17 | LimitNPROC=infinity
18 | LimitCORE=infinity
19 |
20 | [Install]
21 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/ansible/files/etc/systemd/system/vmtoolsd.service.d/afterdbus.conf:
--------------------------------------------------------------------------------
1 | [Unit]
2 | After=dbus.service
3 |
--------------------------------------------------------------------------------
/ansible/files/etc/systemd/system/vmtoolsd.service.d/autorestart.conf:
--------------------------------------------------------------------------------
1 | [Service]
2 | Restart=on-failure
--------------------------------------------------------------------------------
/ansible/files/etc/vmware-tools/tools.conf:
--------------------------------------------------------------------------------
1 | [deployPkg]
2 | enable-custom-scripts = true
3 |
4 | [guestinfo]
5 | exclude-nics=antrea-gw*
6 |
--------------------------------------------------------------------------------
/ansible/files/ip6tables.rules:
--------------------------------------------------------------------------------
1 | *filter
2 | -P INPUT DROP
3 | -P FORWARD DROP
4 | -P OUTPUT DROP
5 | -A INPUT -i lo -j ACCEPT
6 | -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
7 | -A INPUT -p tcp -m tcp --dport 22 -j ACCEPT
8 | -A INPUT -p ipv6-icmp -j ACCEPT
9 | -A INPUT -p udp -m udp --sport 547 --dport 546 -j ACCEPT
10 | -A OUTPUT -j ACCEPT
11 | COMMIT
12 |
--------------------------------------------------------------------------------
/ansible/files/iptables.rules:
--------------------------------------------------------------------------------
1 | # init
2 | *filter
3 | :INPUT DROP [0:0]
4 | :FORWARD DROP [0:0]
5 | :OUTPUT DROP [0:0]
6 | # Allow local-only connections
7 | -A INPUT -i lo -j ACCEPT
8 | -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
9 | -A INPUT -p tcp -m tcp --dport 22 -j ACCEPT
10 | -A OUTPUT -j ACCEPT
11 | -P INPUT DROP
12 | -A INPUT -p tcp -m multiport --dports 30000:32767 -j ACCEPT
13 | -A INPUT -p tcp -m multiport --dports 6443,10250,2379,2380,179,22,10349,10350,10351,10100 -j ACCEPT
14 | -A INPUT -p udp -m udp --dport 6081 -j ACCEPT
15 | COMMIT
16 |
--------------------------------------------------------------------------------
/ansible/files/scripts/docker_registry_ops.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import json
3 | import subprocess
4 |
5 | IMAGE_NAME = "docker.io/vmware/docker-registry"
6 | PAUSE_IMAGE_NAME = "localhost:5000/vmware.io/pause"
7 | LABEL = "io.cri-containerd.pinned=pinned"
8 |
9 | def get_image_version(image_name):
10 | cp = subprocess.run(["crictl", "images", "-o", "json"], capture_output=True, text=True)
11 | cp.check_returncode()
12 | images = json.loads(cp.stdout)["images"]
13 | for image in images:
14 | for repo_tag in image["repoTags"]:
15 | if repo_tag.startswith(image_name):
16 | return repo_tag.split(":")[-1]
17 | else:
18 | raise Exception(f"No image with name {image_name} found")
19 |
20 | def get_registry_version():
21 | return get_image_version(IMAGE_NAME)
22 |
23 | def get_pause_version():
24 | return get_image_version(PAUSE_IMAGE_NAME)
25 |
26 | def apply_label(image):
27 | subprocess.run(["ctr", "-n", "k8s.io", "images", "label", image, LABEL], check=True)
28 |
29 | def pin_image():
30 | image_name_with_version = IMAGE_NAME + ":" + get_registry_version()
31 | apply_label(image_name_with_version)
32 |
33 | pause_image_name_with_version = PAUSE_IMAGE_NAME + ":" + get_pause_version()
34 | apply_label(pause_image_name_with_version)
35 |
36 | def main():
37 | parser = argparse.ArgumentParser(
38 | description='Script to copy carvel packages')
39 | parser.add_argument('--version',
40 | action='store_true',
41 | help='Print version of docker-registry image')
42 | parser.add_argument('--pin',
43 | action='store_true',
44 | help='Pin image by applying label io.cri-containerd.pinned=pinned')
45 |
46 | args = parser.parse_args()
47 | if args.version:
48 | print(get_registry_version())
49 | elif args.pin:
50 | pin_image()
51 |
52 |
53 | if __name__ == '__main__':
54 | main()
55 |
--------------------------------------------------------------------------------
/ansible/files/scripts/image_retag.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import subprocess
3 | import time
4 | import logging
5 | import re
6 |
7 | logging.basicConfig(format='%(message)s', level=logging.DEBUG)
8 |
9 |
10 | class Retag():
11 | def __init__(self, k8sSemver, dockerVersion, family):
12 | self.k8sSemver = k8sSemver
13 | self.k8sSeries = re.match('^([0-9]+\.[0-9]+)', k8sSemver[1:]).groups(1)[0]
14 | self.dockerVersion = dockerVersion
15 | logging.info(f"dockerVersion: {dockerVersion}")
16 |
17 | self.ctrPrefix = "ctr -n k8s.io images "
18 | self.newImagePrefix = ["localhost:5000/vmware.io/"]
19 | self.target = "ubuntu" if family == "Debian" else "photon"
20 | self.imageList = self.listImages()
21 | logging.info(f"Existing images: {self.imageList}")
22 |
23 | self.docker()
24 | registry = localRegistry(self.dockerVersion)
25 | registry.start()
26 |
27 | self.k8s()
28 |
29 | registry.stop()
30 |
31 | logging.info("Retagged images list:")
32 | logging.info(self.listImages())
33 |
34 | # gccp & gcauth are considered specialPrefix
35 | # ex. `localhost:5000/vmware.io/guest-cluster-cloud-provider:0.1-93-gb26e653`
36 | # All other images are not
37 | # ex. `vmware.io/csi-attacher:v3.2.1_vmware.1`
38 | def getImageInfo(self, imageHint, specialPrefix=False):
39 | for image in self.imageList:
40 | if imageHint in image:
41 | imageVersion = image.split(':')[-1]
42 | if specialPrefix:
43 | imagePrefix = ":".join(image.split(':')[0:-1])
44 | else:
45 | imagePrefix = image.split(':')[0]
46 | logging.info(f"ImageInfo: {image}, {imagePrefix}, {imageVersion}")
47 | return image, imagePrefix, imageVersion
48 |
49 | logging.info(f"No image found for {imageHint}")
50 |
51 | def listImages(self):
52 | cmd = f"{self.ctrPrefix} ls -q"
53 | output = subprocess.run(cmd, check=True, shell=True, capture_output=True)
54 | imageList = output.stdout.decode().split()
55 | return imageList
56 |
57 | def retagAndPush(self, oldTag, newTag, push=True):
58 | retag = f"{self.ctrPrefix} tag --force {oldTag} {newTag}"
59 | subprocess.run(retag, check=True, shell=True)
60 | logging.info(f"Retagged {oldTag} -> {newTag}")
61 |
62 | if push:
63 | pushCmd = f"{self.ctrPrefix} push --plain-http {newTag} {oldTag}"
64 | subprocess.run(pushCmd, check=True, shell=True)
65 | logging.info(f"Pushed {newTag}")
66 |
67 | def removeImage(self, oldTag):
68 | cmd = f"{self.ctrPrefix} rm {oldTag}"
69 | subprocess.run(cmd, check=True, shell=True)
70 | logging.info(f"Removed {oldTag}")
71 |
72 | def docker(self):
73 | dockerImages = [f"docker.io/vmware/docker-registry:{self.dockerVersion}"]
74 |
75 | for image in dockerImages:
76 | oldImage, oldPrefix, imageVersion = self.getImageInfo(image)
77 | newTag = f'{oldPrefix}:{self.dockerVersion}'
78 | self.retagAndPush(oldImage, newTag, push=False)
79 | self.removeImage(oldImage)
80 |
81 | def k8s(self):
82 | k8sImages = [
83 | "coredns", "etcd", "kube-apiserver", "pause",
84 | "kube-controller-manager", "kube-proxy", "kube-scheduler"
85 | ]
86 |
87 | for image in k8sImages:
88 | for prefix in self.newImagePrefix:
89 | oldImage, oldPrefix, imageVersion = self.getImageInfo(image)
90 | newTag = f'{prefix}{"/".join(oldPrefix.split("/")[2:])}:{imageVersion}'
91 | self.retagAndPush(oldImage, newTag)
92 | self.removeImage(oldImage)
93 |
94 |
95 | class localRegistry():
96 | def __init__(self, dockerVersion):
97 | self.dockerVersion = dockerVersion
98 | self.ctrPrefix = "ctr -n k8s.io "
99 | self.ctrRunOptions = "run -d --null-io --net-host "
100 |
101 | self.registryImage = f"docker.io/vmware/docker-registry:{self.dockerVersion}"
102 |
103 | self.imageName = "docker-registry"
104 | self.mountSrc = "/storage/container-registry"
105 | self.mountDst = "/var/lib/registry"
106 | self.mountOptions = "rbind:rw"
107 | self.imageCmd = "/bin/registry serve /etc/docker/registry/config.yaml"
108 |
109 | def start(self):
110 | cmd = f"{self.ctrPrefix} {self.ctrRunOptions} " \
111 | f"--mount type=bind,src={self.mountSrc},dst={self.mountDst},options={self.mountOptions} " \
112 | f"{self.registryImage} {self.imageName} " \
113 | f"{self.imageCmd}"
114 | subprocess.run(cmd, check=True, shell=True)
115 | logging.info(f"Docker registry started with {self.registryImage}")
116 |
117 | def stop(self):
118 | taskKill = f"{self.ctrPrefix} task kill {self.imageName}"
119 | rmContainer = f"{self.ctrPrefix} containers rm {self.imageName}"
120 |
121 | subprocess.run(taskKill, check=True, shell=True)
122 | time.sleep(3)
123 | subprocess.run(rmContainer, check=False, shell=True)
124 | logging.info("Docker registry stopped")
125 |
126 |
127 | def main():
128 | parser = argparse.ArgumentParser()
129 | parser.add_argument('--k8sSemver')
130 | parser.add_argument('--dockerVersion')
131 | parser.add_argument('--family')
132 | args = parser.parse_args()
133 |
134 | Retag(args.k8sSemver, args.dockerVersion, args.family)
135 |
136 |
137 | if __name__ == "__main__":
138 | main()
139 |
--------------------------------------------------------------------------------
/ansible/files/scripts/ova-custom-patch.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 | set -x
7 |
8 | add_panic_mount_option() {
9 | # Adding disk mount option errors=panic to root disk mount options
10 | # so that if there was a disk failure, a panic will be triggered,
11 | # and we also set kernel.panic to 10 which will cause the VM to restart
12 | # automatically after disk failure after 10 seconds.
13 | # More specifically, this function makes changes to /etc/fstab where
14 | # PARTUUID=47c08993-cc75-4650-b36d-b2c3c5738d66 / ext4 defaults,barrier,noatime,noacl,data=ordered 1 1
15 | # becomes
16 | # PARTUUID=47c08993-cc75-4650-b36d-b2c3c5738d66 / ext4 defaults,barrier,noatime,noacl,data=ordered,errors=panic 1 1
17 | panic_option="errors=panic"
18 | # grep for "/{tab}ext4"
19 | current_options=$(cat /etc/fstab | grep -G "/[ $(printf '\t')]*ext4" | head -1 | awk -F' ' '{print $4}')
20 | # somehow there is no root disk mounted
21 | [[ "$current_options" == "" ]] && return
22 | # if panic_option already exists, return
23 | echo $current_options | grep $panic_option >/dev/null 2>&1 && [[ "$?" == "0" ]] && return
24 | new_options="$current_options,$panic_option"
25 | sed -i "s/$current_options/$new_options/g" /etc/fstab
26 | }
27 |
28 | add_panic_mount_option
29 |
--------------------------------------------------------------------------------
/ansible/files/scripts/utkg_download_carvel_packages.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import subprocess
3 |
4 |
5 | def download_image_from_artifactory(image_path, localhost_image_path):
6 | image_path_list = image_path.split(',')
7 | localhost_image_path_list = localhost_image_path.split(',')
8 | try:
9 | for i in range(len(image_path_list)):
10 | image_path_list[i] = image_path_list[i].strip()
11 | localhost_image_path_list[i] = localhost_image_path_list[i].strip()
12 | download_tar = 'wget ' + image_path_list[i]
13 | subprocess.check_output(['bash', '-c', download_tar])
14 | tar_file_name = image_path_list[i].split('/')[-1]
15 | carvel_tools = '/tmp/carvel-tools/imgpkg copy --tar ' + tar_file_name + ' --to-repo ' + \
16 | localhost_image_path_list[i]
17 | subprocess.check_output(['bash', '-c', carvel_tools])
18 | except Exception as e:
19 | raise Exception("Unable to download carvel package", str(e))
20 |
21 | if len(image_path_list) == 0:
22 | raise Exception("Could not find carvel package")
23 |
24 |
25 | def main():
26 | parser = argparse.ArgumentParser(
27 | description='Script to copy carvel packages')
28 | parser.add_argument('--addonImageList',
29 | help='List of addon package images')
30 | parser.add_argument('--addonLocalImageList',
31 | help='List of addon package local images',
32 | default=None)
33 |
34 | args = parser.parse_args()
35 | download_image_from_artifactory(args.addonImageList, args.addonLocalImageList)
36 |
37 |
38 | if __name__ == '__main__':
39 | main()
40 |
--------------------------------------------------------------------------------
/ansible/files/usr/lib/vmware-vmservice/defer-cloud-init-generator:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -e
3 |
4 | # Arguments from systemd
5 | SYSTEMD_NORMAL_D=$1
6 | SYSTEMD_EARLY_D=$2
7 | SYSTEMD_LATE_D=$3
8 |
9 | GUESTINFO_KEY="guestinfo.vmservice.defer-cloud-init"
10 | ENABLED_VAL="enabled"
11 | DONE_VAL="done"
12 |
13 | # Ensure vmware-rpctool
14 | export PATH=/sbin:/usr/sbin:/bin:/usr/bin:$PATH
15 | command -v vmware-rpctool >/dev/null 2>&1 || return 1
16 |
17 | GUESTINFO_VAL=$(vmware-rpctool "info-get $GUESTINFO_KEY" 2>&1)
18 | if [ "$GUESTINFO_VAL" = "$ENABLED_VAL" ]; then
19 |
20 | # Mask cloud-init
21 | mkdir -p /run/systemd/transient
22 | ln -sf /dev/null /run/systemd/transient/cloud-init.target
23 | ln -sf /dev/null /run/systemd/transient/cloud-init.service
24 |
25 | # Enable disable-cloud-init-networking
26 | mkdir -p /run/systemd/transient/multi-user.target.wants
27 | ln -sf /usr/lib/vmware-vmservice/disable-cloud-init-networking.service /run/systemd/transient/disable-cloud-init-networking.service
28 | ln -sf /usr/lib/vmware-vmservice/disable-cloud-init-networking.service /run/systemd/transient/multi-user.target.wants/disable-cloud-init-networking.service
29 |
30 | # Set flag to done, so we don't run again
31 | vmware-rpctool "info-set $GUESTINFO_KEY $DONE_VAL"
32 | fi
--------------------------------------------------------------------------------
/ansible/files/usr/lib/vmware-vmservice/disable-cloud-init-networking:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | set -e
4 | echo "network: {config: disabled}" > /etc/cloud/cloud.cfg.d/99_vmservice.cfg
5 | rm -rf /etc/netplan/*cloud-init*.yaml
--------------------------------------------------------------------------------
/ansible/files/usr/lib/vmware-vmservice/disable-cloud-init-networking.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=VMware VM Service - Disable Cloud-init networking
3 | After=systemd-remount-fs.service
4 | Before=open-vm-tools.service
5 |
6 | [Service]
7 | Type=oneshot
8 | RemainAfterExit=yes
9 | ExecStart=/usr/lib/vmware-vmservice/disable-cloud-init-networking
10 |
11 | [Install]
12 | WantedBy=multi-user.target
--------------------------------------------------------------------------------
/ansible/files/usr/local/bin/kubeadm:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Wrapper script to pass additional cmdline parameters to kubeadm that CABPK doesn't allow for.
4 |
5 | # Using a crictl command to verify the a successul run of the container runtime
6 | counter=0
7 | until [ "$counter" -ge 15 ] && echo "WARNING: crictl ps failed to run successfully. Containerd may not be running"
8 | do
9 | crictl ps > /dev/null 2>&1 && break
10 | counter=$((counter+1))
11 | sleep 1s
12 | done
13 |
14 | /bin/kubeadm -v 1 "$@"
15 |
--------------------------------------------------------------------------------
/ansible/files/va_hardening/OverriddenPDIs:
--------------------------------------------------------------------------------
1 | # this module disables install of bridge interfaces
2 | GEN003619
3 | # this disables sshd service
4 | GEN001121
5 | # disables root login for ssh
6 | GEN001120
7 | # disables account after 3 unsuccessfull attempts (needs to be removed later)
8 | GEN000460
9 | # enables aes ciphers
10 | GEN005505
11 | # enables wheel group
12 | GEN005521
13 | # makes all unknown directories to be owned by root
14 | GEN001160
15 |
--------------------------------------------------------------------------------
/ansible/files/va_hardening/vasecurity.spec:
--------------------------------------------------------------------------------
1 | Name: photon_vasecurity
2 | Summary: VA Security Hardening scripts for VMware
3 | Version: %{version}
4 | Release: %{release}
5 | License: VMware License
6 | Vendor: VMware, Inc.
7 | BuildRoot: %{_topdir}/INSTALL
8 | Group: Applications/System
9 | BuildArch: noarch
10 | AutoReqProv: no
11 | #Obsoletes: tcpdump
12 |
13 | %description
14 | Virtual Appliance Security Hardening for Photon.
15 |
16 | %prep
17 |
18 |
19 | %clean
20 |
21 | %install
22 | mkdir -p %{buildroot}/vasecurity
23 | cp -rf %{_topdir}/SOURCES/vasecurity/* %{buildroot}/vasecurity/
24 |
25 | %files
26 | %defattr(0700,root,root)
27 | /vasecurity
28 |
29 | # ----------------------------------------------------------------------
30 | # This is the pre install script. It is also used for updates.
31 | # ----------------------------------------------------------------------
32 | %pre
33 |
34 | # ----------------------------------------------------------------------
35 | # This is the post install/update script
36 | # ----------------------------------------------------------------------
37 | %post
38 | /vasecurity/postinstall
39 |
40 | # ----------------------------------------------------------------------
41 | # This is the pre uninstall script
42 | # ----------------------------------------------------------------------
43 | %preun
44 |
45 | # ----------------------------------------------------------------------
46 | # This is the post uninstall script, need to specify all dirs as
47 | # there may be other packages installing under /opt/maas.
48 | # ----------------------------------------------------------------------
49 | %postun
50 |
--------------------------------------------------------------------------------
/ansible/tasks/common.yml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 | ---
5 | - name: Configure /etc/sysctl.d/80-bridged-net-traffic.conf
6 | copy:
7 | src: files/etc/sysctl.d/80-bridged-net-traffic.conf
8 | dest: /etc/sysctl.d/80-bridged-net-traffic.conf
9 | mode: 0644
10 |
11 | - name: add errors=panic to fstab for / partition
12 | script: files/scripts/ova-custom-patch.sh
13 |
14 | - name: Create vmtoolsd.service.d
15 | file:
16 | path: /etc/systemd/system/vmtoolsd.service.d/
17 | state: directory
18 | mode: 0755
19 |
20 | - name: Configure /etc/vmware-tools/tools.conf
21 | copy:
22 | src: files/etc/vmware-tools/tools.conf
23 | dest: /etc/vmware-tools/tools.conf
24 | mode: 0644
25 |
26 | - name: Create autorestart.conf
27 | copy:
28 | src: files/etc/systemd/system/vmtoolsd.service.d/autorestart.conf
29 | dest: /etc/systemd/system/vmtoolsd.service.d/autorestart.conf
30 | mode: 0644
31 |
32 | - name: Configure /usr/local/bin/kubeadm
33 | copy:
34 | src: files/usr/local/bin/kubeadm
35 | dest: /usr/local/bin/kubeadm
36 | mode: 0755
37 |
38 | # TODO(KK) Investigate difference between upstream yaml and this
39 | - name: Configure /etc/crictl.yaml
40 | copy:
41 | src: files/etc/crictl.yaml
42 | dest: /etc/crictl.yaml
43 | mode: 0644
44 |
45 | - name: Replace containerd systemd file
46 | copy:
47 | src: etc/systemd/system/containerd.service
48 | dest: /etc/systemd/system/containerd.service
49 | mode: 0644
50 |
51 | - name: Symlink docker.service to containerd.service
52 | file:
53 | src: "/etc/systemd/system/containerd.service"
54 | dest: "/usr/lib/systemd/system/docker.service"
55 | state: link
56 |
57 | - name: Create audit log directory
58 | file:
59 | path: "{{ item }}"
60 | state: directory
61 | mode: 0755
62 | loop:
63 | - "/var/log/kubernetes"
64 | - "/var/log/kubernetes/audit"
65 |
66 | - name: Create /usr/lib/vmware-wcpgc-manifests directory
67 | file:
68 | path: /usr/lib/vmware-wcpgc-manifests/
69 | state: directory
70 | mode: 0644
71 |
72 | - name: Add guest cluster artifacts
73 | get_url:
74 | url: "{{ item }}"
75 | dest: /usr/lib/vmware-wcpgc-manifests/
76 | mode: 0644
77 | ignore_errors: yes
78 | loop: "{{ gcAuthArtifacts.split(',') }}"
79 | retries: 5
80 | delay: 3
81 |
82 | - name: Ensure /etc/kubernetes/manifests dir exists
83 | file:
84 | path: /etc/kubernetes/manifests
85 | state: directory
86 |
87 | - name: Copy docker-registry static pod config yaml
88 | template:
89 | src: "etc/kubernetes/manifests/registry.yml"
90 | dest: "/etc/kubernetes/manifests/registry.yml"
91 | mode: 0644
92 |
93 | - name: Copy containerd config.toml
94 | template:
95 | src: "etc/containerd/config_v2.toml"
96 | dest: "/etc/containerd/config.toml"
97 | mode: 0644
98 |
99 | - name: Restart containerd service with custom config.toml
100 | systemd:
101 | name: containerd
102 | daemon_reload: yes
103 | enabled: True
104 | state: restarted
105 |
106 | - name: Ensure localRegistry storage dir exists
107 | file:
108 | path: /storage/container-registry
109 | state: directory
110 |
111 | # VM will be unable to reach github after the iptables rules are configured.
112 | # Hence, goss binary is pre-downloaded to a new tmp space as /tmp get cleaned.
113 | # /tmp/tmp.fstab is used as ansible cannot mount non-persistent mount point.
114 | - name: Create /tkgs-tmp for goss
115 | mount:
116 | path: /tkgs-tmp
117 | src: "tmpfs"
118 | fstype: tmpfs
119 | state: mounted
120 | fstab: /tmp/tmp.fstab
121 |
122 | - name: Download goss binary
123 | ansible.builtin.get_url:
124 | url: "https://github.com/aelsabbahy/goss/releases/download/v0.3.16/goss-linux-amd64"
125 | dest: /tkgs-tmp/goss-linux-amd64
126 | mode: 0755
127 | retries: 5
128 | delay: 3
129 | when: kubernetes_semver is version('v1.31.0', '<')
130 |
131 | - name: Disable containerd service
132 | systemd:
133 | name: "{{ item }}"
134 | enabled: no
135 | loop:
136 | - "containerd"
137 |
138 | - name: Remove cloud-init boot-order conf files
139 | file:
140 | path: "{{ item }}"
141 | state: absent
142 | loop:
143 | - "/etc/systemd/system/cloud-config.service.d/boot-order.conf"
144 | - "/etc/systemd/system/cloud-final.service.d/boot-order.conf"
145 |
146 | # TODO: Consume caymanized imgpkg rather than open source GCM-6191
147 | - name: Download carvel install.sh
148 | get_url:
149 | url: https://carvel.dev/install.sh
150 | dest: /tmp/install.sh
151 | mode: "0777"
152 |
153 | - name: Create a directory for carvel tools if it does not exist
154 | ansible.builtin.file:
155 | path: "{{ carvel_tools }}"
156 | state: directory
157 | mode: "0755"
158 |
159 | - name: Install wget for Carvel tools Installation
160 | command: tdnf install wget -y
161 | when: ansible_os_family == "VMware Photon OS"
162 |
163 | - name: Set up carvel tools inside OVA
164 | ansible.builtin.shell: K14SIO_INSTALL_BIN_DIR={{ carvel_tools }} /tmp/install.sh
165 | args:
166 | executable: /bin/bash
167 |
168 | - name: Unload nouveau
169 | template:
170 | src: "etc/modprobe.d/blocklist-nouveau.conf"
171 | dest: "/etc/modprobe.d/blocklist-nouveau.conf"
172 | mode: 0644
173 | ignore_errors: true
174 |
175 | - name: Update initramfs
176 | command: "{{ systemd_networkd_update_initramfs }}"
177 | when: (systemd_networkd_update_initramfs is defined) and (systemd_networkd_update_initramfs | length > 0)
178 |
179 | - name: Copy /etc/systemd/journald.conf
180 | ansible.builtin.template:
181 | src: "etc/systemd/journald.conf"
182 | dest: "/etc/systemd/journald.conf"
183 | owner: root
184 | group: root
185 | mode: 0644
186 |
--------------------------------------------------------------------------------
/ansible/tasks/disable_pwd_rotation.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 | - name: Disable password expiry
4 | ansible.builtin.shell: sed -i 's/^PASS_MAX_DAYS.*$/PASS_MAX_DAYS -1/g' /etc/login.defs
5 | args:
6 | executable: /bin/bash
7 |
--------------------------------------------------------------------------------
/ansible/tasks/iptables.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 | ---
4 | - name: Copy iptables rules file
5 | copy:
6 | src: iptables.rules
7 | dest: "{{ '/etc/systemd/scripts/ip4save' if ansible_os_family == 'VMware Photon OS' else '/etc/iptables/rules.v4' }}"
8 | mode: 0644
9 |
10 | - name: Copy ip6tables rules file
11 | copy:
12 | src: ip6tables.rules
13 | dest: "{{ '/etc/systemd/scripts/ip6save' if ansible_os_family == 'VMware Photon OS' else '/etc/iptables/rules.v6' }}"
14 | mode: 0644
15 |
16 | - name: Save to persist iptables rules
17 | shell: "iptables-restore < /etc/iptables/rules.v4"
18 | when: ansible_os_family == "Debian"
19 |
20 | - name: Save to persist ip6tables rules
21 | shell: "ip6tables-restore < /etc/iptables/rules.v6"
22 | when: ansible_os_family == "Debian"
--------------------------------------------------------------------------------
/ansible/tasks/main.yml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 | ---
5 | - import_tasks: common.yml
6 |
7 | - import_tasks: photon.yml
8 | when: ansible_os_family == "VMware Photon OS"
9 |
10 | - import_tasks: ubuntu.yml
11 | when: ansible_os_family == "Debian"
12 |
13 | - import_tasks: ubuntu_hack.yml
14 | when: ansible_os_family == "Debian"
15 |
16 | - name: Check if local registry store exists
17 | uri:
18 | url: "{{ registry_store_archive_url | trim }}"
19 | method: GET
20 | status_code: [200, 201, 302]
21 | register: registry_store_url_check
22 | ignore_errors: true
23 |
24 | # Used in Windows OVA builds
25 | - import_tasks: registry.yml
26 | when: registry_store_url_check.status == 200
27 |
28 | # Used in Linux OVA builds
29 | - import_tasks: retag_images.yml
30 | when: registry_store_url_check.status != 200
31 |
32 | - import_tasks: iptables.yml
33 |
34 | # va_hardening step in photon overrides the audit conf, so change the audit
35 | # conf after va_hardening is completed.
36 | - name: Change auditd configuration to rotate audit log files
37 | ansible.builtin.shell: grep -qF "max_log_file_action" /etc/audit/auditd.conf && sed -i '/max_log_file_action/c\max_log_file_action = ROTATE' /etc/audit/auditd.conf || echo "max_log_file_action = ROTATE" >> /etc/audit/auditd.conf
38 | args:
39 | executable: /bin/bash
40 |
41 | - import_tasks: disable_pwd_rotation.yml
--------------------------------------------------------------------------------
/ansible/tasks/photon.yml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 | ---
5 | - name: Create afterdbus.conf
6 | copy:
7 | src: files/etc/systemd/system/vmtoolsd.service.d/afterdbus.conf
8 | dest: /etc/systemd/system/vmtoolsd.service.d/afterdbus.conf
9 | mode: 0644
10 |
11 | - name: Disable DHCP on the network interface
12 | file:
13 | path: "/etc/systemd/network/99-dhcp-en.network"
14 | state: absent
15 |
16 | - name: Enable Apparmor service
17 | systemd:
18 | name: apparmor
19 | daemon_reload: yes
20 | enabled: true
21 | state: restarted
22 |
23 | - name: Enable Apparmor in kernel
24 | replace:
25 | path: /boot/photon.cfg
26 | regexp: "apparmor=0"
27 | replace: "apparmor=1"
28 |
29 | - name: Enable POS flag in kernel
30 | lineinfile:
31 | path: /boot/photon.cfg
32 | backrefs: yes
33 | regexp: "^(?!.*pos=1)(photon_cmdline.*)"
34 | line: '\1 pos=1'
35 |
36 | - name: Disabling cgroups kernel memory accounting and disabling cgroups v2
37 | lineinfile:
38 | path: /boot/photon.cfg
39 | backrefs: yes
40 | regexp: "^(photon_cmdline.*)$"
41 | line: '\1 cgroup.memory=nokmem systemd.legacy_systemd_cgroup_controller=yes'
42 |
43 | - name: Disable tdnf cache timer
44 | systemd:
45 | name: tdnf-cache-updateinfo.timer
46 | enabled: no
47 | state: stopped
48 |
49 | - name: Mask systemd-timesyncd unit
50 | ansible.builtin.systemd:
51 | masked: true
52 | name: systemd-timesyncd.service
53 |
--------------------------------------------------------------------------------
/ansible/tasks/registry.yml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | - name: Create registry directory structure
6 | file:
7 | path: "{{ registry_root_directory | trim }}"
8 | state: directory
9 | mode: 0644
10 |
11 | - name: Download registry store tar archive
12 | get_url:
13 | url: "{{ registry_store_archive_url | trim }}"
14 | dest: "{{ registry_root_directory | trim }}/registry.tar.gz"
15 | mode: 0644
16 | retries: 5
17 | delay: 3
18 |
19 | - name: Unpack registry root directory
20 | unarchive:
21 | src: "{{ registry_root_directory | trim }}/registry.tar.gz"
22 | dest: "{{ registry_root_directory | trim }}"
23 | remote_src: yes
24 |
25 | - name: Remove registry store tar archive
26 | file:
27 | path: "{{ registry_root_directory | trim }}/registry.tar.gz"
28 | state: absent
29 |
30 | - name: Retag Container Images
31 | script: files/scripts/image_retag.py --k8sSemver {{ kubernetes_semver }} --dockerVersion {{ dockerVersion }} --family "{{ ansible_os_family }}"
32 | args:
33 | executable: python3
34 |
35 | - name: Pin container images
36 | ansible.builtin.script: files/scripts/docker_registry_ops.py --pin
37 | register: registry_version
38 | args:
39 | executable: python3
40 |
41 | - name: List images
42 | shell: 'CONTAINERD_NAMESPACE="k8s.io" ctr --address=/var/run/containerd/containerd.sock images ls -q'
43 | register: container_list
44 |
45 | - name: Print containers
46 | debug:
47 | var: container_list.stdout_lines
48 |
--------------------------------------------------------------------------------
/ansible/tasks/retag_images.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 | ---
4 | - name: Retag Container Images
5 | script: files/scripts/image_retag.py --k8sSemver {{ kubernetes_semver }} --dockerVersion {{ dockerVersion }} --family "{{ ansible_os_family }}"
6 | args:
7 | executable: python3
8 |
9 | - name: Start docker registry
10 | shell: 'ctr -n k8s.io run -d --null-io --net-host --mount type=bind,src=/storage/container-registry,dst=/var/lib/registry,options=rbind:rw docker.io/vmware/docker-registry:{{ dockerVersion }} docker-registry /bin/registry serve /etc/docker/registry/config.yaml'
11 |
12 | - name: Copy carvel packages and images to Embedded registry
13 | script: files/scripts/utkg_download_carvel_packages.py --addonImageList {{ addon_image_list }} --addonLocalImageList {{ localhost_addon_image_list }}
14 | args:
15 | executable: python3
16 |
17 | - name: Stop docker registry
18 | shell: 'ctr -n k8s.io task kill docker-registry'
19 |
20 | - name: List images
21 | shell: 'CONTAINERD_NAMESPACE="k8s.io" ctr --address=/var/run/containerd/containerd.sock images ls -q'
22 | register: container_list
23 |
24 | - name: Print containers
25 | debug:
26 | var: container_list.stdout_lines
--------------------------------------------------------------------------------
/ansible/tasks/ubuntu.yml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 | ---
5 | - name: Modify /bin/sh to point to bash instead of dash
6 | shell: "{{ item }}"
7 | with_items:
8 | - echo "dash dash/sh boolean false" | debconf-set-selections
9 | - DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash
10 |
11 | - name: Mask systemd-timesyncd unit
12 | ansible.builtin.systemd:
13 | masked: true
14 | name: systemd-timesyncd.service
15 |
--------------------------------------------------------------------------------
/ansible/tasks/ubuntu_hack.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 | ---
4 | #Conditionally defer cloud-init to second boot, allowing vmtools customization on first boot and reboot
5 | - name: Adding "disable_vmware_customization= true" to /etc/cloud/cloud.cfg
6 | lineinfile:
7 | line: "disable_vmware_customization: true"
8 | path: /etc/cloud/cloud.cfg
9 |
10 | - name: Create directory for services
11 | file:
12 | path: /usr/lib/vmware-vmservice
13 | state: directory
14 | mode: 0755
15 |
16 | - name: Create disable-cloud-init-networking file
17 | copy:
18 | src: files/usr/lib/vmware-vmservice/disable-cloud-init-networking
19 | dest: /usr/lib/vmware-vmservice/disable-cloud-init-networking
20 | mode: 0755
21 |
22 | - name: Create disable-cloud-init-networking.service file
23 | copy:
24 | src: files/usr/lib/vmware-vmservice/disable-cloud-init-networking.service
25 | dest: /usr/lib/vmware-vmservice/disable-cloud-init-networking.service
26 |
27 | - name: Create defer-cloud-init-generator file
28 | copy:
29 | src: files/usr/lib/vmware-vmservice/defer-cloud-init-generator
30 | dest: /usr/lib/vmware-vmservice/defer-cloud-init-generator
31 | mode: 0755
32 |
33 | - name: Create directory for system generators
34 | file:
35 | path: /etc/systemd/system-generators
36 | state: directory
37 | mode: 0755
38 |
39 | - name: Create a symlink between two files
40 | file:
41 | src: /usr/lib/vmware-vmservice/defer-cloud-init-generator
42 | dest: /etc/systemd/system-generators/vmware-vmservice-defer-cloud-init-generator
43 | state: link
44 |
45 | - name: Create a symlink between two machine-id files
46 | file:
47 | src: /etc/machine-id
48 | dest: /var/lib/dbus/machine-id
49 | state: link
50 |
51 | - name: Clean up added directories and files
52 | file:
53 | path: "{{ vm_files }}"
54 | state: absent
55 | vars:
56 | vm_files:
57 | - ssh_host_*
58 | - vmware*
59 | - cloud*
60 |
61 | - name: Clean up cloud-init
62 | command:
63 | cmd: cloud-init clean --seed --logs
64 |
65 | # Upstream image-builder creates this config as we are again doing the cloud-init clean
66 | # this file will be removed so creating the ds-identify.cfg for Ubuntu 22.04 or above
67 | - name: Create cloud-init datasource config file
68 | copy:
69 | dest: /etc/cloud/ds-identify.cfg
70 | force: true
71 | content: |
72 | datasource: VMware
73 | when: ansible_distribution_version is version('22.04', '>=')
74 |
75 | # sudo apt-get install --install-recommends linux-generic-hwe-22.04
76 | - name: Enabling Ubuntu HWE kernel
77 | ansible.builtin.apt:
78 | install_recommends: true
79 | name: linux-generic-hwe-{{ ansible_distribution_version }}
80 | when: ( enable_ubuntu_hwe is defined ) and ( enable_ubuntu_hwe|bool == True )
81 |
--------------------------------------------------------------------------------
/ansible/tasks/va_hardening.yml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 | ---
4 | - name: Install rpm-build package
5 | command: tdnf install rpm-build -y
6 |
7 | - name: Create rpm build directories
8 | file:
9 | path: "{{ item }}"
10 | state: directory
11 | mode: 0755
12 | loop:
13 | - "/tmp/va_rpm"
14 | - "/tmp/va_rpm/BUILD"
15 | - "/tmp/va_rpm/RPMS"
16 | - "/tmp/va_rpm/SOURCES"
17 | - "/tmp/va_rpm/SOURCES/vasecurity"
18 | - "/tmp/va_rpm/SPECS"
19 | - "/tmp/va_rpm/INSTALL"
20 |
21 | - name: Download va_hardening tar
22 | unarchive:
23 | src: "{{ photon_va_hardening_url }}"
24 | dest: /tmp/va_rpm/SOURCES/vasecurity
25 | copy: no
26 | mode: 0755
27 | retries: 5
28 | delay: 3
29 |
30 | - name: Copy va_hardening files
31 | copy:
32 | src: "{{ item.src }}"
33 | dest: "{{ item.dst }}"
34 | mode: 0755
35 | loop:
36 | - { src: 'files/va_hardening/OverriddenPDIs', dst: '/tmp/va_rpm/SOURCES/vasecurity/vahardening' }
37 | - { src: 'files/va_hardening/vasecurity.spec', dst: '/tmp/va_rpm' }
38 |
39 | - name: Build va_hardening RPM
40 | shell: "rpmbuild --buildroot INSTALL --define='_topdir /tmp/va_rpm' --define 'version {{ va_hardening_rpm_version }} ' --define 'release {{ va_hardening_rpm_release }}' --target noarch -bb vasecurity.spec"
41 | args:
42 | chdir: "/tmp/va_rpm"
43 |
44 | - name: Install va_hardening RPM
45 | command: "rpm -ihv photon_vasecurity-{{ va_hardening_rpm_version }}-{{ va_hardening_rpm_release }}.noarch.rpm"
46 | args:
47 | chdir: "/tmp/va_rpm/RPMS/noarch"
48 |
49 | - name: Remove va_hardening build dir
50 | file:
51 | state: absent
52 | path: "/tmp/va_rpm"
53 |
54 | - name: Remove rpm-build package
55 | command: tdnf remove rpm-build -y
56 |
--------------------------------------------------------------------------------
/ansible/templates/etc/containerd/config_v2.toml:
--------------------------------------------------------------------------------
1 | version = 2
2 | root = "/var/lib/containerd"
3 | state = "/run/containerd"
4 | plugin_dir = ""
5 | disabled_plugins = []
6 | required_plugins = []
7 | oom_score = 0
8 |
9 | [grpc]
10 | address = "/run/containerd/containerd.sock"
11 | uid = 0
12 | gid = 0
13 | max_recv_message_size = 16777216
14 | max_send_message_size = 16777216
15 |
16 | [debug]
17 | address = ""
18 | uid = 0
19 | gid = 0
20 | level = ""
21 |
22 | [metrics]
23 | address = ""
24 | grpc_histogram = false
25 |
26 | [cgroup]
27 | path = ""
28 |
29 | [plugins]
30 | [plugins."io.containerd.monitor.v1.cgroups"]
31 | no_prometheus = false
32 | [plugins."io.containerd.grpc.v1.cri"]
33 | stream_server_address = "127.0.0.1"
34 | stream_server_port = "0"
35 | enable_selinux = false
36 | sandbox_image = "{{ pause_image }}"
37 | stats_collect_period = 10
38 | enable_tls_streaming = false
39 | max_container_log_line_size = 16384
40 | disable_proc_mount = false
41 | [plugins."io.containerd.grpc.v1.cri".containerd]
42 | snapshotter = "overlayfs"
43 | no_pivot = false
44 | default_runtime_name = "runc"
45 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes]
46 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
47 | runtime_type = "io.containerd.runc.v2"
48 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
49 | SystemdCgroup = true
50 | [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime]
51 | runtime_type = ""
52 | runtime_engine = ""
53 | runtime_root = ""
54 | [plugins."io.containerd.grpc.v1.cri".cni]
55 | bin_dir = "/opt/cni/bin"
56 | conf_dir = "/etc/cni/net.d"
57 | conf_template = ""
58 | [plugins."io.containerd.grpc.v1.cri".registry]
59 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
60 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
61 | endpoint = ["https://registry-1.docker.io"]
62 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."localhost:5000"]
63 | endpoint = ["http://localhost:5000"]
64 | [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming]
65 | tls_cert_file = ""
66 | tls_key_file = ""
67 | [plugins."io.containerd.service.v1.diff-service"]
68 | default = ["walking"]
69 | [plugins."io.containerd.runtime.v1.linux"]
70 | shim = "containerd-shim"
71 | runtime = "runc"
72 | runtime_root = ""
73 | no_shim = false
74 | shim_debug = false
75 | [plugins."io.containerd.internal.v1.opt"]
76 | path = "/opt/containerd"
77 | [plugins."io.containerd.internal.v1.restart"]
78 | interval = "10s"
79 | [plugins."io.containerd.gc.v1.scheduler"]
80 | pause_threshold = 0.02
81 | deletion_threshold = 0
82 | mutation_threshold = 100
83 | schedule_delay = "0s"
84 | startup_delay = "100ms"
85 |
--------------------------------------------------------------------------------
/ansible/templates/etc/kubernetes/manifests/registry.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: docker-registry
5 | namespace: kube-system
6 | labels:
7 | app: docker-registry
8 | spec:
9 | containers:
10 | - command:
11 | - /bin/registry
12 | - serve
13 | - /etc/docker/registry/config.yaml
14 | image: docker.io/vmware/docker-registry:{{ dockerVersion }}
15 | name: docker-registry
16 | imagePullPolicy: IfNotPresent
17 | volumeMounts:
18 | - mountPath: /var/lib/registry
19 | name: storage-container-registry
20 | readOnly: false
21 | hostNetwork: true
22 | priorityClassName: system-node-critical
23 | volumes:
24 | - hostPath:
25 | path: /storage/container-registry
26 | type: Directory
27 | name: storage-container-registry
--------------------------------------------------------------------------------
/ansible/templates/etc/modprobe.d/blocklist-nouveau.conf:
--------------------------------------------------------------------------------
1 | # blacklist nouveau -- keeps it from being loaded
2 | blacklist nouveau
3 | # options nouveau modeset=0 -- If the module is built into the kernel, then this disables it
4 | # This is to more like future-proofing against future kernels
5 | options nouveau modeset=0
6 | #Prevent from loading even when tried to be loaded (either manual or using some other instrument)
7 | install nouveau /bin/false
--------------------------------------------------------------------------------
/ansible/templates/etc/systemd/journald.conf:
--------------------------------------------------------------------------------
1 | # This file is part of systemd.
2 | #
3 | # systemd is free software; you can redistribute it and/or modify it under the
4 | # terms of the GNU Lesser General Public License as published by the Free
5 | # Software Foundation; either version 2.1 of the License, or (at your option)
6 | # any later version.
7 | #
8 | # Entries in this file show the compile time defaults. Local configuration
9 | # should be created by either modifying this file, or by creating "drop-ins" in
10 | # the journald.conf.d/ subdirectory. The latter is generally recommended.
11 | # Defaults can be restored by simply deleting this file and all drop-ins.
12 | #
13 | # Use 'systemd-analyze cat-config systemd/journald.conf' to display the full config.
14 | #
15 | # See journald.conf(5) for details.
16 |
17 | [Journal]
18 | #Storage=auto
19 | #Compress=yes
20 | #Seal=yes
21 | #SplitMode=uid
22 | #SyncIntervalSec=5m
23 | #RateLimitIntervalSec=30s
24 | #RateLimitBurst=10000
25 | SystemMaxUse={{ journald_system_max_use }}
26 | #SystemKeepFree=
27 | #SystemMaxFileSize=
28 | #SystemMaxFiles=100
29 | #RuntimeMaxUse=
30 | #RuntimeKeepFree=
31 | #RuntimeMaxFileSize=
32 | #RuntimeMaxFiles=100
33 | #MaxRetentionSec=
34 | #MaxFileSec=1month
35 | #ForwardToSyslog=no
36 | #ForwardToKMsg=no
37 | #ForwardToConsole=no
38 | #ForwardToWall=yes
39 | #TTYPath=/dev/console
40 | #MaxLevelStore=debug
41 | #MaxLevelSyslog=debug
42 | #MaxLevelKMsg=notice
43 | #MaxLevelConsole=info
44 | #MaxLevelWall=emerg
45 | #LineMax=48K
46 | #ReadKMsg=yes
47 | #Audit=yes
48 |
--------------------------------------------------------------------------------
/build-ova.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # © Broadcom. All Rights Reserved.
3 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | set -e
7 | set -x
8 |
9 | # Default variables
10 | image_builder_root=${IB_ROOT:-"/image-builder/images/capi"}
11 | default_packer_variables=${image_builder_root}/image/packer-variables/
12 | packer_configuration_folder=${image_builder_root}
13 | tkr_metadata_folder=${image_builder_root}/tkr-metadata/
14 | custom_ovf_properties_file=${image_builder_root}/custom_ovf_properties.json
15 | artifacts_output_folder=${image_builder_root}/artifacts
16 | ova_destination_folder=${artifacts_output_folder}/ovas
17 | ova_ts_suffix=$(date +%Y%m%d%H%M%S)
18 |
19 | function copy_custom_image_builder_files() {
20 | cp image/hack/tkgs-image-build-ova.py hack/image-build-ova.py
21 | cp image/hack/tkgs_ovf_template.xml hack/ovf_template.xml
22 | }
23 |
24 | function download_configuration_files() {
25 | # Download kubernetes configuration file
26 | wget -q http://${HOST_IP}:${ARTIFACTS_CONTAINER_PORT}/artifacts/metadata/kubernetes_config.json
27 |
28 | wget -q http://${HOST_IP}:${ARTIFACTS_CONTAINER_PORT}/artifacts/metadata/unified-tkr-vsphere.tar.gz
29 | mkdir ${tkr_metadata_folder}
30 | tar xzf unified-tkr-vsphere.tar.gz -C ${tkr_metadata_folder}
31 |
32 | # Download compatibility files
33 | wget -q http://${HOST_IP}:${ARTIFACTS_CONTAINER_PORT}/artifacts/metadata/compatibility/vmware-system.compatibilityoffering.json
34 | wget -q http://${HOST_IP}:${ARTIFACTS_CONTAINER_PORT}/artifacts/metadata/compatibility/vmware-system.guest.kubernetes.distribution.image.version.json
35 | }
36 |
37 | # Generate packaer input variables based on packer-variables folder
38 | function generate_packager_configuration() {
39 | mkdir -p $ova_destination_folder
40 | TKR_SUFFIX_ARG=
41 | [[ -n "$TKR_SUFFIX" ]] && TKR_SUFFIX_ARG="--tkr_suffix ${TKR_SUFFIX}"
42 |
43 | # additional_packer_variables
44 | ADDITIONAL_PACKER_VAR_FILES_LIST=
45 | [[ -n "$ADDITIONAL_PACKER_VARIABLE_FILES" ]] && ADDITIONAL_PACKER_VAR_FILES_LIST="--additional_packer_variables ${ADDITIONAL_PACKER_VARIABLE_FILES}"
46 |
47 | # override_package_repositories
48 | OVERRIDE_PACKAGE_REPO_FILE_LIST=
49 | [[ -n "${OVERRIDE_PACKAGE_REPOS}" ]] && OVERRIDE_PACKAGE_REPO_FILE_LIST="--override_package_repositories ${OVERRIDE_PACKAGE_REPOS}"
50 |
51 | python3 image/scripts/tkg_byoi.py setup \
52 | --host_ip ${HOST_IP} \
53 | --artifacts_container_port ${ARTIFACTS_CONTAINER_PORT} \
54 | --packer_http_port ${PACKER_HTTP_PORT} \
55 | --default_config_folder ${default_packer_variables} \
56 | --dest_config ${packer_configuration_folder} \
57 | --tkr_metadata_folder ${tkr_metadata_folder} \
58 | ${TKR_SUFFIX_ARG} \
59 | --kubernetes_config ${image_builder_root}/kubernetes_config.json \
60 | --ova_destination_folder ${ova_destination_folder} \
61 | --os_type ${OS_TARGET} \
62 | --ova_ts_suffix ${ova_ts_suffix} \
63 | ${ADDITIONAL_PACKER_VAR_FILES_LIST} \
64 | ${OVERRIDE_PACKAGE_REPO_FILE_LIST}
65 |
66 | echo "Image Builder Packer Variables"
67 | cat ${packer_configuration_folder}/packer-variables.json
68 | }
69 |
70 | function generate_custom_ovf_properties() {
71 | python3 image/scripts/utkg_custom_ovf_properties.py \
72 | --kubernetes_config ${image_builder_root}/kubernetes_config.json \
73 | --outfile ${custom_ovf_properties_file}
74 | }
75 |
76 |
77 | function download_stig_files() {
78 | if [[ "$OS_TARGET" != "photon-3" && "$OS_TARGET" != "photon-5" && "$OS_TARGET" != "ubuntu-2204-efi" ]]; then
79 | echo "Skipping STIG setup as '${OS_TARGET}' is not STIG Compliant"
80 | return
81 | fi
82 |
83 | stig_compliance_dir="${image_builder_root}/image/compliance"
84 | if [ -d "$stig_compliance_dir" ]
85 | then
86 | rm -rf "${stig_compliance_dir}"
87 | fi
88 | mkdir -p "${image_builder_root}/image/tmp"
89 | if [ ${OS_TARGET} == "photon-3" ]
90 | then
91 | wget -q http://${HOST_IP}:${ARTIFACTS_CONTAINER_PORT}/artifacts/photon-3-stig-hardening.tar.gz
92 | tar -xvf photon-3-stig-hardening.tar.gz -C "${image_builder_root}/image/tmp/"
93 | mv ${image_builder_root}/image/tmp/photon-3-stig-hardening-* "${stig_compliance_dir}"
94 | rm -rf photon-3-stig-hardening.tar.gz
95 | elif [ ${OS_TARGET} == "photon-5" ]
96 | then
97 | wget -q http://${HOST_IP}:${ARTIFACTS_CONTAINER_PORT}/artifacts/vmware-photon-5.0-stig-ansible-hardening.tar.gz
98 | tar -xvf vmware-photon-5.0-stig-ansible-hardening.tar.gz -C "${image_builder_root}/image/tmp/"
99 | mv ${image_builder_root}/image/tmp/vmware-photon-5.0-stig-ansible-hardening-* "${stig_compliance_dir}"
100 | rm -rf vmware-photon-5.0-stig-ansible-hardening.tar.gz
101 | elif [ ${OS_TARGET} == "ubuntu-2204-efi" ]
102 | then
103 | wget -q http://${HOST_IP}:${ARTIFACTS_CONTAINER_PORT}/artifacts/vmware-ubuntu-22.04-stig-ansible-hardening.tar.gz
104 | tar -xvf vmware-ubuntu-22.04-stig-ansible-hardening.tar.gz -C "${image_builder_root}/image/tmp/"
105 | mv ${image_builder_root}/image/tmp/vmware-ubuntu-22.04-stig-ansible-hardening-* "${stig_compliance_dir}"
106 | rm -rf vmware-ubuntu-22.04-stig-ansible-hardening.tar.gz
107 | fi
108 | }
109 |
110 | # Enable packer debug logging to the log file
111 | function packer_logging() {
112 | mkdir /image-builder/packer_cache
113 | mkdir -p $artifacts_output_folder/logs
114 | export PACKER_LOG=10
115 | datetime=$(date '+%Y%m%d%H%M%S')
116 | export PACKER_LOG_PATH="${artifacts_output_folder}/logs/packer-$datetime-$RANDOM.log"
117 | echo "Generating packer logs to $PACKER_LOG_PATH"
118 | }
119 |
120 | # Invokes kubernetes image builder for the corresponding OS target
121 | function trigger_image_builder() {
122 | EXTRA_ARGS=""
123 | ON_ERROR_ASK=1 PATH=$PATH:/home/imgbuilder-ova/.local/bin PACKER_CACHE_DIR=/image-builder/packer_cache \
124 | PACKER_VAR_FILES="${image_builder_root}/packer-variables.json" \
125 | OVF_CUSTOM_PROPERTIES=${custom_ovf_properties_file} \
126 | IB_OVFTOOL=1 ANSIBLE_TIMEOUT=180 IB_OVFTOOL_ARGS="--allowExtraConfig" \
127 | make build-node-ova-vsphere-${OS_TARGET}
128 | }
129 |
130 | # Packer generates OVA with a different name so change the OVA name to OSImage/VMI and
131 | # copy to the destination folder.
132 | function copy_ova() {
133 | TKR_SUFFIX_ARG=
134 | [[ -n "$TKR_SUFFIX" ]] && TKR_SUFFIX_ARG="--tkr_suffix ${TKR_SUFFIX}"
135 | python3 image/scripts/tkg_byoi.py copy_ova \
136 | --kubernetes_config ${image_builder_root}/kubernetes_config.json \
137 | --tkr_metadata_folder ${tkr_metadata_folder} \
138 | ${TKR_SUFFIX_ARG} \
139 | --os_type ${OS_TARGET} \
140 | --ova_destination_folder ${ova_destination_folder} \
141 | --ova_ts_suffix ${ova_ts_suffix}
142 | }
143 |
144 | function main() {
145 | copy_custom_image_builder_files
146 | download_configuration_files
147 | generate_packager_configuration
148 | generate_custom_ovf_properties
149 | download_stig_files
150 | packer_logging
151 | trigger_image_builder
152 | copy_ova
153 | }
154 |
155 | main
156 |
--------------------------------------------------------------------------------
/docs/examples/README.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | This document gives information about how to build Kubernetes Release on vSphere Node image and different customization examples to customize the Node image.
4 |
5 | - [Tutorial for Using the vSphere Tanzu Kubernetes Grid Image Builder](tutorial_building_an_image.md)
6 | - [Changing VM Hardware Version(VMX version)](./customizations/changing_hardware_version.md)
7 | - [Adding new OS packages and configuring the repositories or sources](./customizations/adding_os_pkg_repos.md).
8 | - [Running Prometheus node exporter service on the nodes](./customizations/prometheus_node_exporter.md)
9 |
--------------------------------------------------------------------------------
/docs/examples/customizations/README.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | This document gives information about different customization examples to customize a Kubernetes Release on vSphere Node image.
4 |
5 | - [Changing VM Hardware Version(VMX version)][hardware-version]
6 | - [Adding new OS packages and configuring the repositories or sources][custom-package-repos].
7 | - [Running Prometheus node exporter service on the nodes][prometheus-node-exporter]
8 |
9 | [//]: Links
10 | [hardware-version]: changing_hardware_version.md
11 | [custom-package-repos]: adding_os_pkg_repos.md
12 | [prometheus-node-exporter]: prometheus_node_exporter.md
13 |
--------------------------------------------------------------------------------
/docs/examples/customizations/adding_os_pkg_repos.md:
--------------------------------------------------------------------------------
1 | # Adding new OS packages and configuring the repositories or sources
2 |
3 | ## Use case
4 |
5 | As a user I want to
6 |
7 | 1. Add new OS packages such as `pkg1` and `pkg2`.
8 | 2. Configure sources/repositories to an internal mirror to build the node images in an air-gapped scenario. Configuring new sources/repositories is also useful when we want to install internally built software.
9 |
10 | ## Customization
11 |
12 | Configuration of OS packages and sources/repositories is exposed as packer variables. To view a list of packer variables exposed by [kubernetes image builder][kubernetes-image-builder] please refer to this [page][customizations-doc].
13 |
14 | ### Adding new packages
15 |
16 | To add new packages [kubernetes image builder][kubernetes-image-builder] provides `extra_rpms` and `extra_debs` packer variables for Photon and Ubuntu respectively.
17 |
18 | To add new packages to Ubuntu 22.04, add the packages to the packer variables in the [default-args-ubuntu-2204-efi.j2][default-args-ubuntu-2204-efi] file as shown below.
19 |
20 | ```jinja
21 | "extra_debs": "existing_packages pkg1 pkg2"
22 | ```
23 |
24 | To add new packages to Photon 5, add the packages to the packer variables in the [default-args-photon-5.j2][default-args-photon-5] file as shown below.
25 |
26 | ```jinja
27 | "extra_rpms": "existing_packages pkg1 pkg2"
28 | ```
29 |
30 | _**Note**: The location of default configuration specific to OS follows the path nomenclature, `packer-variables/-/default-args--.j2`. Include `-efi` suffix as well along with the version for Ubuntu._
31 |
32 | ### Configuring new sources/repositories
33 |
34 | [kubernetes image builder][kubernetes-image-builder] provides `extra_repos` packer variables through which sources/repositories can be configured for both Photon and Ubuntu. As there is a difference in how Ubuntu/Photon sources are configured we need to have separate source files for Photon/Ubuntu.
35 |
36 | - Create new folder `repos` in [ansible files][ansible-files] folder
37 | - Depending upon the Linux OS flavour, use either of the below steps
38 | - For **Photon** sources, create a new file called `photon.repo` in the `repos` folder. Refer below for sample content and refer to the official Photon [document][photon-repo-doc] for more information.
39 |
40 | ```text
41 | [photon]
42 | name=VMware Photon Linux $releasever ($basearch)
43 | baseurl=/$releasever/photon_release_$releasever_$basearch
44 | gpgkey=
45 | gpgcheck=1
46 | enabled=1
47 |
48 | [photon-updates]
49 | name=VMware Photon Linux $releasever ($basearch) Updates
50 | baseurl=/$releasever/photon_updates_$releasever_$basearch
51 | gpgkey=
52 | gpgcheck=1
53 | enabled=1
54 |
55 | [photon-extras]
56 | name=VMware Photon Extras $releasever ($basearch)
57 | baseurl=/$releasever/photon_extras_$releasever_$basearch
58 | gpgkey=
59 | gpgcheck=1
60 | enabled=1
61 | ```
62 |
63 | - For **Ubuntu** sources, create a new file called `ubuntu.list` in the `repos` folder. Refer to the official ubuntu [documentation][ubuntu-sources-doc] for more information.
64 | - _**Note**: `jammy` is for ubuntu 22.04 so this needs to be changed if the ubuntu version is also changed, example for ubuntu 20.04 it is `focal`_
65 |
66 | ```text
67 | deb jammy main restricted universe
68 | deb jammy-security main restricted
69 | deb jammy-updates main restricted
70 | ```
71 |
72 | - Create a new file `repos.j2` in [packer-variables][packer-variables] folder for configuring the `extra_repos` packer variable folder. (Uses [jinja][jinja] templating)
73 |
74 | ```jinja
75 | {
76 | {% if os_type == "photon-5" %}
77 | "extra_repos": "/image-builder/images/capi/image/ansible/files/repos/photon.repo"
78 | {% elif os_type == "ubuntu-2204-efi" %}
79 | "extra_repos": "/image-builder/images/capi/image/ansible/files/repos/ubuntu.list"
80 | {% endif %}
81 | }
82 | ```
83 |
84 | ### Disabling public repositories/sources
85 |
86 | For disabling public repos set the `disable_public_repos` packer variable to `true` in the `repos.j2` file.
87 |
88 | ### Removing the extra repositories/sources
89 |
90 | To remove the extra repositories/sources that were configured during the image build process set the `remove_extra_repos` packer variable to `true`.
91 |
92 | ```jinja
93 | {
94 | "disable_public_repos": true,
95 | "remove_extra_repos": true
96 | {% if os_type == "photon-5" %}
97 | "extra_repos": "/image-builder/images/capi/image/ansible/files/repos/photon.repo"
98 | {% elif os_type == "ubuntu-2204-efi" %}
99 | "extra_repos": "/image-builder/images/capi/image/ansible/files/repos/ubuntu.list"
100 | {% endif %}
101 | }
102 | ```
103 |
104 | [//]: Links
105 |
106 | [ansible-files]: ./../../ansible/files/
107 | [customizations-doc]: https://image-builder.sigs.k8s.io/capi/capi.html#customization
108 | [jinja]: https://jinja.palletsprojects.com/en/3.1.x/
109 | [kubernetes-image-builder]: https://github.com/kubernetes-sigs/image-builder/
110 | [photon-repo-doc]: https://vmware.github.io/photon/assets/files/html/3.0/photon_admin/adding-a-new-repository.html
111 | [ubuntu-sources-doc]: https://manpages.ubuntu.com/manpages/focal/man5/sources.list.5.html
112 | [default-args-ubuntu-2204-efi]: ../../packer-variables/ubuntu-2204-efi/default-args-ubuntu-2204-efi.j2
113 | [default-args-photon-5]: ../../packer-variables/photon-5/default-args-photon-5.j2
114 | [packer-variables]: ./../../packer-variables/
115 |
--------------------------------------------------------------------------------
/docs/examples/customizations/changing_hardware_version.md:
--------------------------------------------------------------------------------
1 | # Changing the Hardware version
2 |
3 | ## Use case
4 |
5 | As a customer, I want to change the Hardware version of the node image to use the latest hardware functionalities.
6 |
7 | ## Background
8 |
9 | By default, node images use the hardware version(`VMX`) 17 defined in [default-args.j2][default-args](Windows uses hardware version 18 by default defined in [default-args-windows.j2][default-args-windows]). Please refer to the below documents to learn more about the hardware version and its compatibility with the vSphere environment.
10 |
11 | - [Hardware Features Available with Virtual Machine Compatibility Settings][vm-admin-guide]
12 | - [ESXi/ESX hosts and compatible virtual machine hardware versions list][kb-vm-hardware-version-list]
13 |
14 | ## Customization
15 |
16 | [Kubernetes Image Builder][kubernetes-image-builder] has a `vmx_version` packer variable through which the hardware version can be configured. Edit the `vmx_version` filed in the [default-args.j2][default-args] present in [packer variables](./../../packer-variables/) folder with the appropriate hardware version and build the image
17 |
18 | ```text
19 | "vmx_version": "17",
20 | ```
21 |
22 | - _**Note**: For Windows, update the `vmx_version` in the [default-args-windows.j2][default-args-windows] file._
23 |
24 | [//]: Links
25 |
26 | [default-args]: [./../../../packer-variables/default-args.j2]
27 | [kubernetes-image-builder]: https://github.com/kubernetes-sigs/image-builder/
28 | [default-args-windows]: ../../packer-variables/windows/default-args-windows.j2
29 | [vm-admin-guide]: https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere/8-0/vsphere-virtual-machine-administration-guide-8-0.html
30 | [kb-vm-hardware-version-list]: https://knowledge.broadcom.com/external/article?legacyId=2007240
31 |
--------------------------------------------------------------------------------
/docs/examples/customizations/prometheus_node_exporter.md:
--------------------------------------------------------------------------------
1 | # Running Prometheus node exporter service on the nodes
2 |
3 | ## Use case
4 |
5 | I want to monitor the nodes using the hardware and OS metrics exposed by Prometheus [Node Exporter](https://prometheus.io/docs/guides/node-exporter/).
6 |
7 | ## Customization
8 |
9 | For this we will create a new ansible task that will create and configure the node exporter service.
10 |
11 | - Create unit file for the new service `node_exporter.service` in [ansible files](./../../ansible/files/) folder.
12 |
13 | ```text
14 | [Unit]
15 | Description=Prometheus Node Exporter
16 | Documentation=https://github.com/prometheus/node_exporter
17 | After=network-online.target
18 |
19 | [Service]
20 | User=root
21 | ExecStart=/opt/node_exporter/node_exporter
22 | Restart=on-failure
23 | RestartSec=5
24 |
25 | [Install]
26 | WantedBy=multi-user.target
27 |
28 | ```
29 |
30 | - Create new variables in the [main.yml](./../../ansible/defaults/main.yml) for configuring where to pull the service binary and the version
31 |
32 | ```yaml
33 | node_exporter_url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_version }}/node_exporter-{{ node_exporter_version }}.linux-amd64.tar.gz"
34 | node_exporter_binary: "node_exporter-{{ node_exporter_version }}.linux-amd64"
35 | node_exporter_location: /opt/node_exporter
36 | node_exporter_tar: /tmp/node_exporter.tar.gz
37 | ```
38 |
39 | - `node_exporter_version` is a ansible variable that can be passed through the `node_exporter_version` packer variable present in [default-args.j2](./../../packer-variables/default-args.j2)
40 |
41 | ```text
42 | "ansible_user_vars": " node_exporter_version=1.4.0 ",
43 | ```
44 |
45 | - Open the `9100` for the service (For external services to pull the metrics). Edit the [iptables.rules](./../../ansible/files/iptables.rules)
46 |
47 | ```text
48 | -A INPUT -p tcp -m multiport --dports 6443,10250,2379,2380,179,22,10349,10350,10351,10100,9100 -j ACCEPT
49 | ```
50 |
51 | - Create the ansible task(`node_exporter.yaml`) for creating the service in [ansible tasks](./../../ansible/tasks/) folder
52 |
53 | ```yaml
54 | - name: Download Prometheus node exporter tar file
55 | get_url:
56 | url: "{{ node_exporter_url }}"
57 | dest: "{{ node_exporter_tar }}"
58 |
59 | - name: Extracting the Node Exporter binary
60 | unarchive:
61 | src: "{{ node_exporter_tar }}"
62 | remote_src: yes
63 | dest: /tmp/
64 |
65 | - name: Create node exporter directory
66 | file:
67 | path: "{{ node_exporter_location }}"
68 | state: directory
69 |
70 | - name: Renaming node exporter binrary
71 | command: mv "/tmp/{{ node_exporter_binary }}/node_exporter" "{{ node_exporter_location }}/"
72 |
73 | - name: Create node exporter unit file
74 | copy:
75 | src: files/node_exporter.service
76 | dest: /etc/systemd/system/node_exporter.service
77 | mode: 0644
78 |
79 | - name: Enable node exporter service
80 | systemd:
81 | name: node_exporter
82 | daemon_reload: yes
83 | enabled: yes
84 |
85 | - name: Start node_exporter, if not started
86 | service:
87 | name: node_exporter
88 | state: started
89 | ```
90 |
91 | - Edit [main.yml](./../../ansible/tasks/main.yml) for adding the tasks to the ansible role.
92 |
93 | ```yaml
94 | # At the end of the file
95 | - import_tasks: node_exporter.yml
96 | ```
97 |
--------------------------------------------------------------------------------
/docs/examples/tutorial_building_an_image.md:
--------------------------------------------------------------------------------
1 | # Tutorial for Using the vSphere Tanzu Kubernetes Grid Image Builder
2 |
3 | This tutorial describes how to use the vSphere Tanzu Kubernetes Grid Image Builder to build a custom TKR for use on Supervisor in the vSphere.
4 |
5 | The vSphere Tanzu Kubernetes Grid Image Builder uses Hashicorp Packer to generate images. Packer invokes vCenter APIs to create a VM from a TKR.
6 |
7 | ## Requirements
8 |
9 | - vCenter Server 8, which can be any vCenter 8 instance, it does not have to be the same vCenter managing your vSphere
10 | - Packer requires the vSphere environment to have DHCP configured; you cannot use static IP address management
11 | - Tutorial uses Ubuntu 22.04 based Linux VM to generate the image
12 |
13 | ## Clone the Repository
14 |
15 | Clone the vSphere Tanzu Kubernetes Grid Image Builder repository on the Linux VM where you are building the image.
16 |
17 | ```bash
18 | git clone https://github.com/vmware-tanzu/vsphere-tanzu-kubernetes-grid-image-builder.git
19 | ```
20 |
21 | ## Install Docker
22 |
23 | The vSphere Tanzu Kubernetes Grid Image Builder runs components as Docker images to generate VMs.
24 |
25 | Refer to the [Docker installation guide][docker-installation] for setting up Docker Engine.
26 |
27 | ## Install JQ
28 |
29 | Install:
30 |
31 | ```bash
32 | sudo apt install -y jq
33 | ```
34 |
35 | Verify:
36 |
37 | ```bash
38 | jq --version
39 | ```
40 |
41 | ## Install Make
42 |
43 | Install:
44 |
45 | ```bash
46 | sudo apt install make
47 | ```
48 |
49 | Verify:
50 |
51 | ```bash
52 | make --version
53 | ```
54 |
55 | ## Update vsphere.j2 with vSphere Environment Details
56 |
57 | The `vsphere.j2` file is a packer configuration file with vSphere environment details.
58 |
59 | CD to the `vsphere-tanzu-kubernetes-grid-image-builder/packer-variables/` directory.
60 |
61 | Update the vsphere.j2 environment variables with details for your vCenter 8 instance.
62 |
63 | ```bash
64 | vi vsphere.j2
65 | ```
66 |
67 | For example:
68 |
69 | ```bash
70 | {
71 | {# vCenter server IP or FQDN #}
72 | "vcenter_server":"192.2.2.2",
73 | {# vCenter username #}
74 | "username":"user@vsphere.local",
75 | {# vCenter user password #}
76 | "password":"ADMIN-PASSWORD",
77 | {# Datacenter name where packer creates the VM for customization #}
78 | "datacenter":"Datacenter",
79 | {# Datastore name for the VM #}
80 | "datastore":"datastore22",
81 | {# [Optional] Folder name #}
82 | "folder":"",
83 | {# Cluster name where packer creates the VM for customization #}
84 | "cluster": "Management-Cluster",
85 | {# Packer VM network #}
86 | "network": "PG-MGMT-VLAN-1050",
87 | {# To use insecure connection with vCenter #}
88 | "insecure_connection": "true",
89 | {# TO create a clone of the Packer VM after customization#}
90 | "linked_clone": "true",
91 | {# To create a snapshot of the Packer VM after customization #}
92 | "create_snapshot": "true",
93 | {# To destroy Packer VM after Image Build is completed #}
94 | "destroy": "true"
95 | }
96 | ```
97 |
98 | ## Identify the Kubernetes version
99 |
100 | To identify the version of Kubernetes Release supported by the current branch, refer the [supported-version.txt](../../supported-version.txt)
101 |
102 | Usage:
103 |
104 | ```bash
105 | cat supported-version.txt
106 | ```
107 |
108 | ## Run the Artifacts Server Container for the Selected Kubernetes Version
109 |
110 | Running the `run-artifacts-container` Makefile target, will pull the Artifacts Server container image corresponding to the selected Kubernetes Release.
111 |
112 | Usage:
113 |
114 | ```bash
115 | make run-artifacts-container
116 | ```
117 |
118 | ## Run the Image Builder Application
119 |
120 | Usage:
121 |
122 | ```bash
123 | make build-node-image OS_TARGET= TKR_SUFFIX= HOST_IP= IMAGE_ARTIFACTS_PATH= ARTIFACTS_CONTAINER_PORT=8081
124 | ```
125 |
126 | NOTE: The HOST_IP must be reachable from the vCenter.
127 |
128 | Example:
129 |
130 | ```bash
131 | make build-node-image OS_TARGET=ubuntu-2204-efi TKR_SUFFIX=byoi HOST_IP=192.2.2.3 IMAGE_ARTIFACTS_PATH=/home/ubuntu/image ARTIFACTS_CONTAINER_PORT=8081
132 | ```
133 |
134 | ## Verify the Custom Image
135 |
136 | Locally the image is stored in the `/image/ovas` directory. For example, `/home/ubuntu/image/ovas`.
137 |
138 | The `/image/logs` directory contains the `packer-xxxx.log` file that you can use to troubleshoot image building errors.
139 |
140 | To verify that image is built successfully, check vCenter Server.
141 |
142 | You should see the image being built in the datacenter, cluster, folder that you specified in the vsphere.j2 file.
143 |
144 | ## Customize the image
145 |
146 | Refer to the [customization examples][customizations].
147 |
148 | ## Upload the Image to generate custom Kubernetes Release
149 |
150 | Download the custom image from local storage or from the vCenter Server.
151 |
152 | In your vSphere environment, create a local content library and upload the custom image there.
153 |
154 | Refer to the documentation for [creating a local content library][create-local-content-library] for use with Supervisor.
155 |
156 | To use the custom TKR, configure the vSphere Namespace to use the local content library.
157 |
158 | [//]: Links
159 | [docker-installation]: https://docs.docker.com/engine/install/
160 | [create-local-content-library]: https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere-supervisor/8-0/using-tkg-service-with-vsphere-supervisor.html
161 | [customizations]: ./customizations/README.md
162 |
--------------------------------------------------------------------------------
/docs/files/demo.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/vmware-tanzu/vsphere-tanzu-kubernetes-grid-image-builder/24818693bffffe8690078ed9994bdc91c509723a/docs/files/demo.gif
--------------------------------------------------------------------------------
/docs/windows.md:
--------------------------------------------------------------------------------
1 | # Building Windows Image Using the vSphere Tanzu Kubernetes Grid Image Builder
2 |
3 | This tutorial describes how to use the vSphere Tanzu Kubernetes Grid Image Builder to build Windows OVA image for use with [vSphere Kubernetes Service 3.3][vsphere-kubernetes-service-release-notes] and above. Windows container workload support is only available in Kubernetes release v1.31 and above.
4 |
5 | ## Use case
6 |
7 | I want to build a Windows Node Image to deploy a node pool for Windows container workloads in my guest cluster.
8 |
9 | ## Requirements
10 |
11 | - Check the [prerequisites][prerequisites]
12 | - vCenter Server 8, which can be any vCenter 8 instance, it does not have to be the same vCenter managing your vSphere with Tanzu environment
13 | - Packer requires the vSphere environment to have DHCP configured; you cannot use static IP address management
14 | - A recent Windows Server 22H2 ISO image. Download through your Microsoft Developer Network (MSDN) or Volume Licensing (VL) account. The use of evaluation media is not supported or recommended.
15 | - VMware Tools ISO Image
16 | - A datastore on your vCenter that can accommodate your custom Windows VM template, which can have a starting size greater than 10GB (thin provisioned).
17 |
18 | ## Prepare for Image Builder
19 |
20 | Follow the [standard tutorial][tutorials-base] to prepare the environment for vSphere Tanzu Kubernetes Grid Image Builder.
21 |
22 | ## Get Windows Server and VMware Tools ISO
23 |
24 | Windows Server 22H2 ISO image can be downloaded from Microsoft through your Microsoft Developer Network (MSDN) or Volume Licensing (VL) account.
25 |
26 | VMware Tools can be downloaded via the [Broadcom Knowledge Base][broadcom-kb].
27 |
28 | In this tutorial, we use Windows Server 22H2 `en-us_windows_server_2022_x64_dvd_620d7eac.iso` and `VMware-tools-windows-12.5.0-23800621.iso`.
29 |
30 | ### Install govc (Optional)
31 |
32 | You may follow the [govc documentation][govc-doc] to install govc on the Linux VM you're building the image.
33 |
34 | You may use the below example bash commands to upload Windows Server 22H2 ISO and the VMware Tools Windows ISO to your vCenter instance.
35 |
36 | ```bash
37 | export GOVC_URL=[VC_URL]
38 | export GOVC_USERNAME=[VC_USERNAME]
39 | export GOVC_PASSWORD=[VC_PASSWORD]
40 | export GOVC_INSECURE=1
41 | export GOVC_DATACENTER=Datacenter
42 | export GOVC_CLUSTER=Management-Cluster
43 | export GOVC_DATASTORE=datastore22
44 |
45 | govc datastore.upload --ds="$GOVC_DATASTORE" --dc="$GOVC_DATACENTER" en-us_windows_server_2022_x64_dvd_620d7eac.iso windows2022.iso
46 | govc datastore.upload --ds="$GOVC_DATASTORE" --dc="$GOVC_DATACENTER" VMware-tools-windows-12.5.0-23800621.iso vmtools.iso
47 | ```
48 |
49 | Alternatively, you may use the vCenter UI to upload the ISOs to the datastore.
50 |
51 | ## Prepare Windows setup answer file
52 |
53 | You may customize the Windows Node Image with a [Windows setup answer file][windows-setup-ans-file]
54 |
55 | The upstream Windows setup answer file can be found at [Image Builder][ib-windows-unattended-xml].
56 |
57 | ### Provision Administrative Account for Log Collection
58 |
59 | In order for the Windows nodes to work with the [vSphere Kubernetes Service support bundle tool][gather-logs], you need to add an administrative account in the answer file.
60 |
61 | The following snippet shows how to add an administrative account in the answer file.
62 |
63 | ```bash
64 | curl https://raw.githubusercontent.com/kubernetes-sigs/image-builder/refs/heads/main/images/capi/packer/ova/windows/windows-2022-efi/autounattend.xml -o /home/image-builder/windows_autounattend.xml
65 | vi /home/image-builder/windows_autounattend.xml
66 | ```
67 |
68 | Locate the `LocalAccounts` in the xml and add a new `LocalAccount` to this section.
69 |
70 | ```xml
71 |
72 |
73 | Administrator
74 | Administrator
75 | Administrators
76 | Administrator
77 |
78 |
79 |
80 | MyAdminPassw0rd
81 | true
82 |
83 | For log collection
84 | Admin Account
85 | WindowsAdmin
86 | Administrators
87 |
88 |
89 | ```
90 |
91 | You should alter the user name and password to comform to organizational policies.
92 |
93 | ## Update vsphere.j2 with vSphere Environment Details
94 |
95 | The `vsphere.j2` file is a packer configuration file with vSphere environment details.
96 |
97 | CD to the `vsphere-tanzu-kubernetes-grid-image-builder/packer-variables/` directory.
98 |
99 | Update the `vsphere.j2` and `packer-variables/windows/vsphere-windows.j2` environment variables with details for your vCenter 8 instance.
100 |
101 | ```bash
102 | $ vi vsphere.j2
103 |
104 | {
105 | {# vCenter server IP or FQDN #}
106 | "vcenter_server":"192.2.2.2",
107 | {# vCenter username #}
108 | "username":"user@vsphere.local",
109 | {# vCenter user password #}
110 | "password":"ADMIN-PASSWORD",
111 | {# Datacenter name where packer creates the VM for customization #}
112 | "datacenter":"Datacenter",
113 | {# Datastore name for the VM #}
114 | "datastore":"datastore22",
115 | {# [Optional] Folder name #}
116 | "folder":"",
117 | {# Cluster name where packer creates the VM for customization #}
118 | "cluster": "Management-Cluster",
119 | {# Packer VM network #}
120 | "network": "PG-MGMT-VLAN-1050",
121 | {# To use insecure connection with vCenter #}
122 | "insecure_connection": "true",
123 | {# TO create a clone of the Packer VM after customization#}
124 | "linked_clone": "true",
125 | {# To create a snapshot of the Packer VM after customization #}
126 | "create_snapshot": "true",
127 | {# To destroy Packer VM after Image Build is completed #}
128 | "destroy": "true"
129 | }
130 | ```
131 |
132 | ```bash
133 | vi packer-variables/windows/vsphere-windows.j2
134 |
135 | {
136 | "os_iso_path": "[datastore22] windows2022.iso",
137 | "vmtools_iso_path": "[datastore22] vmtools.iso"
138 | }
139 | ```
140 |
141 | NOTE: You need to match the ISO image file names in the datastore.
142 |
143 | ## Run the Artifacts Container for the Selected Kubernetes Version
144 |
145 | Usage:
146 |
147 | ```bash
148 | make run-artifacts-container
149 | ```
150 |
151 | ## Run the Image Builder Application
152 |
153 | Usage:
154 |
155 | ```bash
156 | make build-node-image OS_TARGET= TKR_SUFFIX= HOST_IP= IMAGE_ARTIFACTS_PATH= ARTIFACTS_CONTAINER_PORT=8081
157 | ```
158 |
159 | NOTE:
160 |
161 | - The HOST_IP must be reachable from the vCenter.
162 |
163 | - You may list the Kubernetes in your Supervisor cluster to get the version suffix.
164 |
165 | ```bash
166 | $ kubectl get kr
167 |
168 | NAME VERSION READY COMPATIBLE CREATED TYPE
169 | kubernetesrelease.kubernetes.vmware.com/v1.32.0---vmware.6-fips-vkr.2 v1.32.0+vmware.6-fips-vkr.2 True True 3h8m
170 | ```
171 |
172 | Example:
173 |
174 | ```bash
175 | make build-node-image OS_TARGET=windows-2022-efi TKR_SUFFIX=vkr.4 HOST_IP=192.2.2.3 IMAGE_ARTIFACTS_PATH=/home/image-builder/image ARTIFACTS_CONTAINER_PORT=8081 AUTO_UNATTEND_ANSWER_FILE_PATH=/home/image-builder/windows_autounattend.xml
176 | ```
177 |
178 | ## Verify the Custom Image
179 |
180 | Locally the image is stored in the `/image/ovas` directory. For example, `/home/image-builder/image/ovas`.
181 |
182 | The `/image/logs` directory contains the `packer-xxxx.log` file that you can use to troubleshoot image building errors.
183 |
184 | To verify that image is built successfully, check vCenter Server.
185 |
186 | You should see the image being built in the datacenter, cluster, folder that you specified in the vsphere.j2 file.
187 |
188 | ## Upload the Image to the vSphere Kubernetes Service Environment
189 |
190 | Download the custom image from local storage or from the vCenter Server.
191 |
192 | In your vSphere with Tanzu environment, create a local content library and upload the custom image there.
193 |
194 | Refer to the documentation for [creating a local content library][tkgs-service-with-supervisor] for use with vSphere Kubernetes Service.
195 |
196 | You need to upload both Linux and Windows node images to the local content library as the Linux node image will be
197 | used to deploy VMs for Kubernetes Control Plane and Linux node pools (if any).
198 |
199 | Note: You should disable Security Policy for this content library for Windows image.
200 |
201 | ## Create a cluster with Windows Node Pool
202 |
203 | You may refer to [vSphere Kubernetes Service 3.3 documentation][vsphere-kubernetes-service-release-notes] for more information on how to deploy a cluster with Windows Node Pool with vSphere Kubernetes Service 3.3 and above.
204 |
205 | ## Known Issues for Windows Container Workload Support
206 |
207 | ### Kubernetes Release v1.32
208 |
209 | - The minimum vmclass should be best-effort-large for Windows Worker Node
210 |
211 | When a windows worker node is configured with a vm-class which has resource configuration lower than best-effort-large, some of the management pods may not run due to loss of network connectivity.
212 |
213 | Resolution:
214 | Switch to a vmclass configured with more resources.
215 |
216 | - After a node reboot, stateful windows Application pods can be in failed (Unknown) state.
217 |
218 | Symptoms: The windows stateful pod description shows failed mount with error as following:
219 |
220 | ```bash
221 | Warning FailedMount 23m kubelet MountVolume.MountDevice failed for volume "pvc-63a2bde4-8183-40ac-b115-247ae64b6cb4" : rpc error: code = Internal desc = error mounting volume. Parameters: {7e1b7769-d86d-4b8a-b9a6-a1a303021b43-63a2bde4-8183-40ac-b115-247ae64b6cb4 ntfs
222 | ```
223 |
224 | Relevant log’s location: logs of vsphere-csi-node `kubectl logs $pod_name` or `kubectl describe` the application pod it self.
225 |
226 | Workaround: After restart if pod is in unknown state, follow these steps:
227 |
228 | 1. cordon the node with command kubectl cordon <\<*node*\>>
229 |
230 | 2. delete the pod, let pod schedule on other node and wait until pod is running
231 |
232 | 3. uncordon node with cmd : kubectl uncordon <\<*node*\>>
233 |
234 | - Upgrade of some linux pods will not complete when using 1 control plane (linux) and 1 worker node (windows) configuration.
235 |
236 | Reason: Some of the linux pods are configured to use system resources like nodePort and are also configured with node affinity to linux nodes and upgrade strategy of rolling upgrades. When there is a single linux node in the cluster and pods are being upgraded, the previous version pod will bind to system resources like nodePort, which will block the scheduling and starting of the new version.
237 |
238 | Symptom: The pod will be stuck in pending state with error message similar to the following:
239 |
240 | ```bash
241 | Warning FailedScheduling 3m5s (x38 over 3h9m) default-scheduler 0/2 nodes are available: 1 node(s) didn't have free ports for the requested pod ports, 1 node(s) had untolerated taint {os: windows}. preemption: 0/2 nodes are available: 1 Preemption is not helpful for scheduling, 1 node(s) didn't have free ports for the requested pod ports.
242 | ```
243 |
244 | Workaround: Configure with additional control plane nodes or with another node pool that has linux nodes.
245 |
246 | ### Generic Known Issues
247 |
248 | You may refer to [this link][supervisor-8-release-notes] for generic known issues for vSphere Kubernetes Service.
249 |
250 | [//]: Links
251 |
252 | [vsphere-kubernetes-service-release-notes]: https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere-supervisor/8-0/release-notes/vmware-tanzu-kubernetes-grid-service-release-notes.html
253 | [prerequisites]: https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere-supervisor/8-0/installing-and-configuring-vsphere-supervisor.html
254 | [tutorials-base]: examples/tutorial_building_an_image.md
255 | [broadcom-kb]: https://knowledge.broadcom.com/external/article/315363/how-to-install-vmware-tools.html
256 | [govc-doc]: https://github.com/vmware/govmomi/blob/main/govc/README.md
257 | [windows-setup-ans-file]: https://learn.microsoft.com/en-us/windows-hardware/manufacture/desktop/update-windows-settings-and-scripts-create-your-own-answer-file-sxs?view=windows-11
258 | [ib-windows-unattended-xml]: https://raw.githubusercontent.com/kubernetes-sigs/image-builder/refs/heads/main/images/capi/packer/ova/windows/windows-2022-efi/autounattend.xml
259 | [gather-logs]: https://knowledge.broadcom.com/external/article/345464/gathering-logs-for-vsphere-with-tanzu.html
260 | [tkgs-service-with-supervisor]: https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere-supervisor/8-0/using-tkg-service-with-vsphere-supervisor.html
261 | [supervisor-8-release-notes]:https://techdocs.broadcom.com/us/en/vmware-cis/vsphere/vsphere-supervisor/8-0/release-notes/vmware-tkrs-release-notes.html
262 |
--------------------------------------------------------------------------------
/goss/goss-command.yaml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | command:
6 | {{ if ne .Vars.OS "windows" }} # Linux Only
7 | containerd --version | awk -F' ' '{print substr($3,2); }':
8 | exit-status: 0
9 | stdout: []
10 | stderr: []
11 | timeout: 0
12 | crictl ps:
13 | exit-status: 0
14 | timeout: 0
15 | ctr -n k8s.io image ls 2>/dev/null | grep docker-registry | grep 'io.cri-containerd.pinned=pinned':
16 | exit-status: 0
17 | stderr: []
18 | timeout: 10000 # in ms
19 | ctr -n k8s.io image ls 2>/dev/null | grep pause | grep 'io.cri-containerd.pinned=pinned':
20 | exit-status: 0
21 | stderr: []
22 | timeout: 10000 # in ms
23 | {{if ne .Vars.containerd_wasm_shims_runtimes ""}}
24 | containerd-shim-lunatic-v1:
25 | exit-status: 1
26 | stdout: [ ]
27 | stderr: ["io.containerd.lunatic.v1: InvalidArgument(\"Shim namespace cannot be empty\")"]
28 | timeout: 0
29 | containerd-shim-slight-v1:
30 | exit-status: 1
31 | stdout: [ ]
32 | stderr: ["io.containerd.slight.v1: InvalidArgument(\"Shim namespace cannot be empty\")"]
33 | timeout: 0
34 | containerd-shim-spin-v2:
35 | exit-status: 1
36 | stdout: [ ]
37 | stderr: ["io.containerd.spin.v2: InvalidArgument(\"Shim namespace cannot be empty\")"]
38 | timeout: 0
39 | containerd-shim-wws-v1:
40 | exit-status: 1
41 | stdout: [ ]
42 | stderr: ["io.containerd.wws.v1: InvalidArgument(\"Shim namespace cannot be empty\")"]
43 | timeout: 0
44 | grep -E 'io\.containerd\.(lunatic|slight|spin|wws)\.v' /etc/containerd/config.toml:
45 | exit-status: 0
46 | stdout: [ ]
47 | stderr: [ ]
48 | timeout: 0
49 | {{end}}
50 | {{if eq .Vars.kubernetes_source_type "pkg"}}
51 | {{if eq .Vars.kubernetes_cni_source_type "pkg"}}
52 | # Upstream IB uses 'print $2' for fetching image actual name, but in our case this will differ since we tag image to localhost registry
53 | crictl images | grep -v 'IMAGE ID' | awk -F'[ /]' '{print $3}' | sed 's/-{{ .Vars.arch }}//g' | sort:
54 | exit-status: 0
55 | stderr: []
56 | timeout: 0
57 | stdout: ["coredns", "etcd", "kube-apiserver", "kube-controller-manager", "kube-proxy", "kube-scheduler", "pause"]
58 | {{end}}
59 | {{end}}
60 | {{if and (eq .Vars.kubernetes_source_type "http") (eq .Vars.kubernetes_cni_source_type "http") (not .Vars.kubernetes_load_additional_imgs)}}
61 | # The second last pipe of awk is to take out arch from kube-apiserver-amd64 (i.e. amd64 or any other arch)
62 | # Upstream IB uses 'print $2' for fetching image actual name, but in our case this will differ since we tag image to localhost registry
63 | crictl images | grep -v 'IMAGE ID' | awk -F'[ /]' '{print $3}' | sed 's/-{{ .Vars.arch }}//g' | sort:
64 | exit-status: 0
65 | stderr: []
66 | timeout: 0
67 | stdout: ["kube-apiserver", "kube-controller-manager", "kube-proxy", "kube-scheduler"]
68 | {{end}}
69 | {{if and (eq .Vars.kubernetes_source_type "http") (eq .Vars.kubernetes_cni_source_type "http") (.Vars.kubernetes_load_additional_imgs)}}
70 | # The second last pipe of awk is to take out arch from kube-apiserver-amd64 (i.e. amd64 or any other arch)
71 | # Upstream IB uses 'print $2' for fetching image actual name, but in our case this will differ since we tag image to localhost registry
72 | crictl images | grep -v 'IMAGE ID' | awk -F'[ /]' '{print $3}' | sed 's/-{{ .Vars.arch }}//g' | sort:
73 | exit-status: 0
74 | stderr: []
75 | timeout: 0
76 | stdout: ["coredns", "etcd", "kube-apiserver", "kube-controller-manager", "kube-proxy", "kube-scheduler", "pause"]
77 | {{end}}
78 | {{if eq .Vars.kubernetes_source_type "http"}}
79 | kubectl version --client -o json | jq .clientVersion.gitVersion | tr -d '"' | awk '{print substr($1,2); }':
80 | exit-status: 0
81 | stdout: [{{ .Vars.kubernetes_version }}]
82 | stderr: []
83 | timeout: 0
84 | kubeadm version -o json | jq .clientVersion.gitVersion | tr -d '"' | awk '{print substr($1,2); }':
85 | exit-status: 0
86 | stdout: [{{ .Vars.kubernetes_version }}]
87 | stderr: []
88 | timeout: 0
89 | kubelet --version | awk -F' ' '{print $2}' | tr -d '"' | awk '{print substr($1,2); }':
90 | exit-status: 0
91 | stdout: [{{ .Vars.kubernetes_version }}]
92 | stderr: []
93 | timeout: 0
94 | {{end}}
95 | {{if eq .Vars.kubernetes_cni_source_type "http"}}
96 | /opt/cni/bin/host-device 2>&1 | awk -F' ' '{print substr($4,2); }':
97 | exit-status: 0
98 | stdout: [{{ .Vars.kubernetes_cni_version }}]
99 | stderr: []
100 | timeout: 0
101 | {{end}}
102 | {{if eq .Vars.OS "photon"}}
103 | cat /sys/kernel/mm/transparent_hugepage/enabled:
104 | exit-status: 0
105 | stdout: ["always [madvise] never"]
106 | stderr: []
107 | timeout: 0
108 | {{end}}
109 | {{range $name, $vers := index .Vars .Vars.OS .Vars.PROVIDER "command"}}
110 | {{ $name }}:
111 | {{range $key, $val := $vers}}
112 | {{$key}}: {{$val}}
113 | {{end}}
114 | {{end}}
115 | {{end}} #End linux only
116 |
117 | {{ if eq .Vars.OS "windows" }} # Windows
118 | automatic updates set to notify:
119 | exit-status: 0
120 | exec: powershell -command "(Get-ItemPropertyValue 'HKLM:\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU' -name AUOptions) -eq '2'"
121 | stdout:
122 | - "True"
123 | timeout: 30000
124 | automatic updates set to notify with correct type:
125 | exit-status: 0
126 | exec: powershell -command "(Get-ItemPropertyValue 'HKLM:\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU' -name AUOptions).GetType().Name -eq 'Int32'"
127 | stdout:
128 | - "True"
129 | timeout: 30000
130 | automatic updates are disabled:
131 | exit-status: 0
132 | exec: powershell -command "(Get-ItemPropertyValue 'HKLM:\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU' -name NoAutoUpdate) -eq '1'"
133 | stdout:
134 | - "True"
135 | timeout: 30000
136 | automatic updates are disabled with correct type:
137 | exit-status: 0
138 | exec: powershell -command "(Get-ItemPropertyValue 'HKLM:\SOFTWARE\Policies\Microsoft\Windows\WindowsUpdate\AU' -name NoAutoUpdate).GetType().Name -eq 'Int32'"
139 | stdout:
140 | - "True"
141 | timeout: 30000
142 | kubectl version --client -o json:
143 | exit-status: 0
144 | stdout:
145 | - {{.Vars.kubernetes_version}}
146 | - "windows"
147 | - {{.Vars.arch}}
148 | timeout: 30000
149 | kubeadm version:
150 | exit-status: 0
151 | stdout:
152 | - {{.Vars.kubernetes_version}}
153 | - "windows"
154 | - {{.Vars.arch}}
155 | timeout: 30000
156 | kubelet --version:
157 | exit-status: 0
158 | stdout:
159 | - {{.Vars.kubernetes_version}}
160 | timeout: 10000
161 | {{ if eq .Vars.distribution_version "2019" }}
162 | Windows build version is high enough:
163 | exit-status: 0
164 | exec: powershell -command "(Get-ItemProperty 'HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name UBR).UBR -ge 1817"
165 | stdout:
166 | - "True"
167 | timeout: 30000
168 | Check HNS Control Flag:
169 | exit-status: 0
170 | exec: powershell -command "(Get-ItemProperty -Path "HKLM:\SYSTEM\CurrentControlSet\Services\hns\State" -Name HNSControlFlag).HNSControlFlag -eq 80"
171 | stdout:
172 | - True
173 | timeout: 30000
174 | Check WCIFS Flag:
175 | exit-status: 0
176 | exec: powershell -command "(Get-ItemProperty 'HKLM:\SYSTEM\CurrentControlSet\Services\wcifs' -Name WcifsSOPCountDisabled).WcifsSOPCountDisabled -eq 0"
177 | stdout:
178 | - True
179 | timeout: 30000
180 | {{end}}
181 | {{ if eq .Vars.runtime "containerd" }}
182 | Correct Containerd Version:
183 | exec: "\"/Program Files/containerd/containerd.exe\" --version"
184 | exit-status: 0
185 | stdout:
186 | - "{{.Vars.containerd_version}}"
187 | timeout: 30000
188 | Correct Containerd config:
189 | exec: "\"/Program Files/containerd/containerd.exe\" config dump"
190 | exit-status: 0
191 | stdout:
192 | - "sandbox_image = \"{{.Vars.pause_image}}\""
193 | - "conf_dir = \"C:/etc/cni/net.d\""
194 | - "bin_dir = \"C:/opt/cni/bin\""
195 | - "root = \"C:\\\\ProgramData\\\\containerd\\\\root\""
196 | - "state = \"C:\\\\ProgramData\\\\containerd\\\\state\""
197 | timeout: 30000
198 | Check Windows Defender Exclusions are in place:
199 | exit-status: 0
200 | exec: powershell -command "(Get-MpPreference | select ExclusionProcess)"
201 | stdout:
202 | - \Program Files\containerd\containerd.exe,
203 | - \Program Files\containerd\ctr.exe
204 | Check SMB CompartmentNamespace Flag:
205 | exit-status: 0
206 | exec: powershell -command "(Get-ItemProperty 'HKLM:\SYSTEM\CurrentControlSet\Services\hns\State' -Name EnableCompartmentNamespace).EnableCompartmentNamespace -eq 1"
207 | stdout:
208 | - True
209 | timeout: 30000
210 | Windows Port Range is Expanded:
211 | exit-status: 0
212 | exec: netsh int ipv4 show dynamicportrange tcp
213 | stdout:
214 | - "Start Port : 34000"
215 | - "Number of Ports : 31536"
216 | timeout: 30000
217 | {{end}}
218 |
219 | {{if eq .Vars.PROVIDER "azure"}}
220 | Verify firewall rule to block 168.63.129.16:80 for cve-2021-27075:
221 | exit-status: 0
222 | exec: powershell -command "(Get-NetFirewallRule -ErrorAction Stop -DisplayName 'Block-Outbound-168.63.129.16-port-80-for-cve-2021-27075').Enabled"
223 | stdout:
224 | - True
225 | stderr: []
226 | timeout: 30000
227 |
228 | # this could be moved to place for other providers if they want to install it
229 | Key Vault gMSA binary is installed:
230 | exec: powershell -command "Test-Path -Path C:\Windows\System32\CCGAKVPlugin.dll"
231 | exit-status: 0
232 | stdout:
233 | - "True"
234 | timeout: 30000
235 | Key Vault gMSA binary COM is registered:
236 | exec: powershell -command "(Get-Item 'HKLM:SYSTEM\CurrentControlSet\Control\CCG\COMClasses\{CCC2A336-D7F3-4818-A213-272B7924213E}') | Ft -autosize -wrap"
237 | exit-status: 0
238 | stdout:
239 | - "CCC2A336-D7F3-4818-A213-272B7924213E"
240 | timeout: 30000
241 | Key Vault gMSA binary is registered:
242 | exec: powershell -command "Get-ItemProperty -Path 'HKLM:SOFTWARE\CLASSES\CLSID\{CCC2A336-D7F3-4818-A213-272B7924213E}\InprocServer32\'"
243 | exit-status: 0
244 | stdout:
245 | - "C:\\Windows\\System32\\CCGAKVPlugin.dll"
246 | timeout: 30000
247 | Key Vault gMSA CCG interface is registered:
248 | exec: powershell -command "(Get-Item 'HKLM:SOFTWARE\Classes\Interface\{6ECDA518-2010-4437-8BC3-46E752B7B172}') | Ft -autosize -wrap"
249 | exit-status: 0
250 | stdout:
251 | - "ICcgDomainAuthCredentials"
252 | timeout: 30000
253 | Check Azure CLI is installed via alias:
254 | exec: powershell -command "az"
255 | exit-status: 0
256 | timeout: 30000
257 | {{end}}
258 |
259 | {{ if ne .Vars.ssh_source_url "" }}
260 | Check permission of OpenSSH directory for SYSTEM:
261 | exec: powershell -command "((Get-Acl 'C:\Program Files\OpenSSH').Access | Where-Object{$_.IdentityReference -eq 'NT AUTHORITY\SYSTEM' -and $_.FileSystemRights -eq 'FullControl'}) -ne $null"
262 | exit-status: 0
263 | stdout:
264 | - True
265 | timeout: 30000
266 | Check permission of OpenSSH directory for Administrators:
267 | exec: powershell -command "((Get-Acl 'C:\Program Files\OpenSSH').Access | Where-Object{$_.IdentityReference -eq 'BUILTIN\Administrators' -and $_.FileSystemRights -eq 'FullControl'}) -ne $null"
268 | exit-status: 0
269 | stdout:
270 | - True
271 | timeout: 30000
272 | Check permission of OpenSSH directory for Users:
273 | exec: powershell -command "((Get-Acl 'C:\Program Files\OpenSSH').Access | Where-Object{$_.IdentityReference -eq 'BUILTIN\Users' -and $_.FileSystemRights -eq 'ReadAndExecute, Synchronize'}) -eq $null"
274 | exit-status: 0
275 | stdout:
276 | - True
277 | timeout: 30000
278 | Check if SSH server port is open:
279 | exec: powershell -command "(Get-NetFirewallRule -ErrorAction Stop -DisplayName 'sshd').Enabled"
280 | exit-status: 0
281 | stdout:
282 | - True
283 | timeout: 30000
284 | {{end}}
285 | {{end}} #end windows
286 |
--------------------------------------------------------------------------------
/goss/goss-files.yaml:
--------------------------------------------------------------------------------
1 | # © Broadcom. All Rights Reserved.
2 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | file:
6 | {{range $name, $vers := index .Vars .Vars.OS "common-files"}}
7 | {{ $name }}:
8 | exists: {{ $vers.exists }}
9 | filetype: {{ $vers.filetype }}
10 | contains: {{ range $vers.contains}}
11 | - {{.}}
12 | {{end}}
13 | {{end}}
14 | {{range $name, $vers := index .Vars .Vars.OS .Vars.PROVIDER "files"}}
15 | {{ $name }}:
16 | exists: {{ $vers.exists }}
17 | filetype: {{ $vers.filetype }}
18 | contains: {{ range $vers.contains}}
19 | - {{.}}
20 | {{end}}
21 | {{end}}
22 | # TKG specific changes.
23 | {{range $name, $vers := index .Vars.tkg .Vars.OS "files"}}
24 | {{ $name }}:
25 | exists: {{ $vers.exists }}
26 | filetype: {{ $vers.filetype }}
27 | contains: {{ range $vers.contains}}
28 | - {{.}}
29 | {{end}}
30 | {{end}}
31 |
--------------------------------------------------------------------------------
/goss/goss-kernel-params.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | {{ if ne .Vars.OS "windows" }}
5 | kernel-param:
6 | net.bridge.bridge-nf-call-iptables:
7 | value: "1"
8 | net.bridge.bridge-nf-call-ip6tables:
9 | value: "1"
10 | {{if and (contains "1.24" .Vars.kubernetes_version ) (contains "photon" .Vars.OS )}}
11 | net.ipv4.ip_forward:
12 | value: "0"
13 | net.ipv6.conf.all.forwarding:
14 | value: "0"
15 | {{else}}
16 | net.ipv4.ip_forward:
17 | value: "1"
18 | net.ipv6.conf.all.forwarding:
19 | value: "1"
20 | net.ipv4.tcp_congestion_control:
21 | value: "bbr"
22 | {{end}}
23 | net.ipv6.conf.all.disable_ipv6:
24 | value: "0"
25 | {{range $name, $vers := index .Vars .Vars.OS "common-kernel-param"}}
26 | {{ $name }}:
27 | {{range $key, $val := $vers}}
28 | {{$key}}: "{{$val}}"
29 | {{end}}
30 | {{end}}
31 | {{range $name, $vers := index .Vars.tkg .Vars.OS "kernel-param"}}
32 | {{ $name }}:
33 | {{range $key, $val := $vers}}
34 | {{$key}}: "{{$val}}"
35 | {{end}}
36 | {{end}}
37 | {{range $name, $vers := index .Vars.tkg .Vars.OS .Vars.PROVIDER "kernel-param"}}
38 | {{ $name }}:
39 | {{range $key, $val := $vers}}
40 | {{$key}}: "{{$val}}"
41 | {{end}}
42 | {{end}}
43 | {{end}}
--------------------------------------------------------------------------------
/goss/goss-package.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | {{ if ne .Vars.OS "windows"}}
5 | kubernetes_version: &kubernetes_version
6 | versions:
7 | or:
8 | - contain-element:
9 | match-regexp: "^\\Q{{ .Vars.kubernetes_deb_version }}\\E$"
10 | - contain-element:
11 | match-regexp: "^\\Q{{ .Vars.kubernetes_rpm_version }}\\E$"
12 |
13 | kubernetes_cni_version: &kubernetes_cni_version
14 | versions:
15 | {{ if or .Vars.kubernetes_cni_deb_version .Vars.kubernetes_cni_rpm_version }}
16 | or:
17 | - contain-element:
18 | match-regexp: "^\\Q{{ .Vars.kubernetes_cni_deb_version }}\\E$"
19 | - contain-element:
20 | match-regexp: "^\\Q{{ .Vars.kubernetes_cni_rpm_version }}\\E$"
21 | {{ end }}
22 |
23 | package:
24 | # Flatcar uses Ignition instead of cloud-init
25 | {{if ne .Vars.OS "flatcar"}}
26 | cloud-init:
27 | installed: true
28 | {{end}}
29 | ntp:
30 | installed: false
31 | {{if eq .Vars.kubernetes_source_type "pkg"}}
32 | kubeadm:
33 | installed: true
34 | <<: *kubernetes_version
35 | kubelet:
36 | installed: true
37 | <<: *kubernetes_version
38 | kubectl:
39 | installed: true
40 | <<: *kubernetes_version
41 | {{end}}
42 | {{if eq .Vars.kubernetes_cni_source_type "pkg"}}
43 | kubernetes-cni:
44 | installed: true
45 | <<: *kubernetes_cni_version
46 | {{end}}
47 | # Looping over common packages for an OS
48 | {{range $name, $vers := index .Vars .Vars.OS "common-package"}}
49 | {{$name}}:
50 | installed: true
51 | {{range $key, $val := $vers}}
52 | {{$key}}: {{$val}}
53 | {{end}}
54 | {{end}}
55 | # Looping over provider specific packages for an OS
56 | {{range $name, $vers := index .Vars .Vars.OS .Vars.PROVIDER "package"}}
57 | {{$name}}:
58 | installed: true
59 | {{range $key, $val := $vers}}
60 | {{$key}}: {{$val}}
61 | {{end}}
62 | {{end}}
63 |
64 | # Iterate thru different OS Versions like RHEL7/8, Photon 3/4(future) etc.
65 | {{$distro_version := .Vars.OS_VERSION}}
66 | {{range $component := index .Vars .Vars.OS .Vars.PROVIDER "os_version"}}
67 | {{if eq $distro_version (index $component "distro_version")}}
68 | {{ range $name, $vers := index $component "package"}}
69 | {{$name}}:
70 | installed: true
71 | {{range $key, $val := $vers}}
72 | {{$key}}: {{$val}}
73 | {{end}}
74 | {{end}}
75 | {{end}}
76 | {{end}}
77 | {{end}}
78 |
79 | {{ if eq .Vars.OS "windows"}} # Windows
80 | # Workaround until windows features are added to goss
81 | command:
82 | {{range $name, $vers := index .Vars .Vars.OS "common-windows-features"}}
83 | "Windows Feature - {{ $name }}":
84 | exec: powershell -command "(Get-WindowsFeature {{ $name }} | select *)"
85 | exit-status: 0
86 | stdout: {{range $vers.expected}}
87 | - {{.}}
88 | timeout: 60000
89 | {{end}}
90 | {{end}}
91 | {{end}}
92 |
93 | # TKG specific changes.
94 | {{ if ne .Vars.OS "windows"}}
95 |
96 | {{range $name, $vers := index .Vars.tkg .Vars.OS "package"}}
97 | {{$name}}:
98 | installed: true
99 | {{range $key, $val := $vers}}
100 | {{$key}}: {{$val}}
101 | {{end}}
102 | {{end}}
103 |
104 | # Iterate thru different OS Versions like RHEL7/8, Photon 3/4(future) etc.
105 | {{$distro_version := .Vars.OS_VERSION}}
106 | {{range $component := index .Vars.tkg .Vars.OS .Vars.PROVIDER "os_version"}}
107 | {{if eq $distro_version (index $component "distro_version")}}
108 | {{ range $name, $vers := index $component "package"}}
109 | {{$name}}:
110 | installed: true
111 | {{range $key, $val := $vers}}
112 | {{$key}}: {{$val}}
113 | {{end}}
114 | {{end}}
115 | {{end}}
116 | {{end}}
117 | {{end}}
--------------------------------------------------------------------------------
/goss/goss-service.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | service:
5 | {{ if ne .Vars.OS "windows"}} # Linux
6 | containerd:
7 | enabled: false
8 | running: true
9 | dockerd:
10 | enabled: false
11 | running: false
12 | kubelet:
13 | enabled: true
14 | running: false
15 | conntrackd:
16 | enabled: false
17 | running: false
18 | auditd:
19 | enabled: true
20 | running: true
21 | {{if ne .Vars.OS "flatcar"}}
22 | # Flatcar uses systemd-timesyncd instead of chronyd.
23 | chronyd:
24 | enabled: true
25 | running: true
26 | {{end}}
27 | {{range $name, $vers := index .Vars .Vars.OS "common-service"}}
28 | {{ $name }}:
29 | {{range $key, $val := $vers}}
30 | {{$key}}: {{$val}}
31 | {{end}}
32 | {{end}}
33 | {{range $name, $vers := index .Vars .Vars.OS .Vars.PROVIDER "service"}}
34 | {{ $name }}:
35 | {{range $key, $val := $vers}}
36 | {{$key}}: {{$val}}
37 | {{end}}
38 | {{end}}
39 | {{range $name, $vers := index .Vars.tkg .Vars.OS "service"}}
40 | {{ $name }}:
41 | {{range $key, $val := $vers}}
42 | {{$key}}: {{$val}}
43 | {{end}}
44 | {{end}}
45 | {{end}}
46 |
47 | {{ if eq .Vars.OS "windows"}} # Windows
48 | # Workaround until windows services are added to goss
49 | command:
50 | {{range $name, $vers := index .Vars .Vars.OS "common-windows-service"}}
51 | "Windows Service - {{ $name }}":
52 | exec: powershell -command "(Get-Service {{ $name }} | select *)"
53 | exit-status: 0
54 | stdout: {{range $vers.expected}}
55 | - {{.}}
56 | {{end}}
57 | {{end}}
58 | {{range $name, $vers := index .Vars .Vars.OS .Vars.PROVIDER "windows-service"}}
59 | "Windows Service - {{ $name }}":
60 | exec: powershell -command "(Get-Service {{ $name }} | select *)"
61 | exit-status: 0
62 | stdout: {{range $vers.expected}}
63 | - {{.}}
64 | {{end}}
65 | {{end}}
66 |
67 | {{ if eq .Vars.runtime "containerd"}}
68 | "Windows Service - containerd":
69 | exec: powershell -command "(Get-Service containerd | select *)"
70 | exit-status: 0
71 | stdout:
72 | - Automatic
73 | - Running
74 | {{end}}
75 |
76 | {{end}}
77 |
--------------------------------------------------------------------------------
/goss/goss.yaml:
--------------------------------------------------------------------------------
1 | # Copyright 2023 VMware, Inc.
2 | # SPDX-License-Identifier: MPL-2.0
3 |
4 | gossfile:
5 | goss-command.yaml: {}
6 | goss-kernel-params.yaml: {}
7 | goss-service.yaml: {}
8 | goss-package.yaml: {}
9 | goss-files.yaml: {}
10 |
--------------------------------------------------------------------------------
/hack/make-helpers/build-image-builder-container.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # © Broadcom. All Rights Reserved.
3 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | set -e
7 |
8 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
9 | enable_debugging
10 |
11 | is_argument_set "KUBERNETES_VERSION argument is required" $KUBERNETES_VERSION
12 |
13 |
14 | if [ -z "$IMAGE_BUILDER_BASE_IMAGE" ]; then
15 | # Makefile creates this environment variables
16 | IMAGE_BUILDER_BASE_IMAGE=$DEFAULT_IMAGE_BUILDER_BASE_IMAGE
17 | echo "Using default image builder base image $IMAGE_BUILDER_BASE_IMAGE"
18 | fi
19 |
20 | docker_build_args=$(jq -r '.docker_build_args | keys[]' $SUPPORTED_CONTEXT_JSON)
21 | build_variables=""
22 | for docker_arg in $docker_build_args;
23 | do
24 | docker_arg_value=$(jq -r '.docker_build_args."'$docker_arg'"' $SUPPORTED_CONTEXT_JSON)
25 | build_variables="${build_variables} --build-arg ${docker_arg}=${docker_arg_value}"
26 | done
27 |
28 | # by default don't show docker output
29 | docker_debug_flags="-q"
30 | if [ ! -z ${DEBUGGING+x} ]; then
31 | docker_debug_flags="--progress plain"
32 | fi
33 |
34 | docker build --platform=linux/amd64 $docker_debug_flags \
35 | -t $(get_image_builder_container_image_name $KUBERNETES_VERSION) \
36 | $build_variables $(dirname "${BASH_SOURCE[0]}")/../../.
37 |
--------------------------------------------------------------------------------
/hack/make-helpers/build-node-image.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # © Broadcom. All Rights Reserved.
3 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | set -e
7 |
8 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
9 | ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
10 |
11 | enable_debugging
12 |
13 | is_argument_set "KUBERNETES_VERSION argument is required" $KUBERNETES_VERSION
14 | is_argument_set "OS_TARGET argument is required" $OS_TARGET
15 | # if [[ $OS_TARGET != windows-* ]]; then
16 | # is_argument_set "TKR_SUFFIX argument is required" $TKR_SUFFIX
17 | # fi
18 | is_argument_set "HOST_IP argument is required" $HOST_IP
19 | is_argument_set "IMAGE_ARTIFACTS_PATH argument is required" $IMAGE_ARTIFACTS_PATH
20 |
21 | KUBERNETES_VERSION=$(cat supported-version.txt | xargs)
22 |
23 | if [ -z "$ARTIFACTS_CONTAINER_PORT" ]; then
24 | # Makefile creates this environment variables
25 | ARTIFACTS_CONTAINER_PORT=$DEFAULT_ARTIFACTS_CONTAINER_PORT
26 | echo "Using default port for artifacts container $DEFAULT_ARTIFACTS_CONTAINER_PORT"
27 | fi
28 |
29 | if [ -z "$PACKER_HTTP_PORT" ]; then
30 | # Makefile creates this environment variables
31 | PACKER_HTTP_PORT=$DEFAULT_PACKER_HTTP_PORT
32 | echo "Using default Packer HTTP port $PACKER_HTTP_PORT"
33 | fi
34 |
35 | function build_node_image() {
36 | docker rm -f $(get_node_image_builder_container_name "$KUBERNETES_VERSION" "$OS_TARGET")
37 |
38 | # allow unattanded anwer file to be overriden
39 | AUTO_UNATTEND_ANSWER_FILE_BIND=
40 | [ -n "$AUTO_UNATTEND_ANSWER_FILE_PATH" ] && AUTO_UNATTEND_ANSWER_FILE_BIND="-v ${AUTO_UNATTEND_ANSWER_FILE_PATH}:/image-builder/images/capi/packer/ova/windows/${OS_TARGET}/autounattend.xml"
41 |
42 | # additional_jinja_args
43 | ADDITIONAL_PACKER_VAR_FILES_MOUNTS=
44 | INCONTAINER_ADDITIONAL_PACKER_VAR_ENV=
45 | if [ -n "$ADDITIONAL_PACKER_VARIABLE_FILES" ]; then
46 | for i in ${ADDITIONAL_PACKER_VARIABLE_FILES//,/ }
47 | do
48 | FILENAME=$(basename -- "${i}")
49 | INCONTAINER_PATH="/image-builder/images/capi/image/customizations/${FILENAME}"
50 | ADDITIONAL_PACKER_VAR_FILES_MOUNTS="${ADDITIONAL_PACKER_VAR_FILES_MOUNTS} -v ${i}:${INCONTAINER_PATH}"
51 | if [[ "${#INCONTAINER_ADDITIONAL_PACKER_VAR_ENV}" == 0 ]];then
52 | INCONTAINER_ADDITIONAL_PACKER_VAR_ENV="-e ADDITIONAL_PACKER_VARIABLE_FILES=${INCONTAINER_PATH}"
53 | else
54 | INCONTAINER_ADDITIONAL_PACKER_VAR_ENV="${INCONTAINER_ADDITIONAL_PACKER_VAR_ENV},${INCONTAINER_PATH}"
55 | fi
56 | done
57 | fi
58 |
59 | # override_package_repositories
60 | OVERRIDE_REPO_MOUNTS=
61 | INCONTAINER_OVERRIDE_REPO_ENV=
62 | if [ -n "$OVERRIDE_PACKAGE_REPOS" ]; then
63 | for i in ${OVERRIDE_PACKAGE_REPOS//,/ }
64 | do
65 | FILENAME=$(basename -- "${i}")
66 | INCONTAINER_PATH="/image-builder/images/capi/image/custom-repos/${FILENAME}"
67 | OVERRIDE_REPO_MOUNTS="${OVERRIDE_REPO_MOUNTS} -v ${i}:${INCONTAINER_PATH}"
68 | if [[ "${#INCONTAINER_OVERRIDE_REPO_ENV}" == 0 ]];then
69 | INCONTAINER_OVERRIDE_REPO_ENV="-e OVERRIDE_PACKAGE_REPOS=${INCONTAINER_PATH}"
70 | else
71 | INCONTAINER_OVERRIDE_REPO_ENV="${INCONTAINER_OVERRIDE_REPO_ENV},${INCONTAINER_PATH}"
72 | fi
73 | done
74 | fi
75 |
76 | docker run -d \
77 | --name $(get_node_image_builder_container_name "$KUBERNETES_VERSION" "$OS_TARGET") \
78 | $(get_node_image_builder_container_labels "$KUBERNETES_VERSION" "$OS_TARGET") \
79 | -v $ROOT/ansible:/image-builder/images/capi/image/ansible \
80 | -v $ROOT/ansible-windows:/image-builder/images/capi/image/ansible-windows \
81 | -v $ROOT/goss:/image-builder/images/capi/image/goss \
82 | -v $ROOT/hack:/image-builder/images/capi/image/hack \
83 | -v $ROOT/packer-variables:/image-builder/images/capi/image/packer-variables \
84 | -v $ROOT/scripts:/image-builder/images/capi/image/scripts \
85 | -v $IMAGE_ARTIFACTS_PATH:/image-builder/images/capi/artifacts \
86 | ${ADDITIONAL_PACKER_VAR_FILES_MOUNTS} \
87 | ${OVERRIDE_REPO_MOUNTS} \
88 | ${INCONTAINER_ADDITIONAL_PACKER_VAR_ENV} \
89 | ${INCONTAINER_OVERRIDE_REPO_ENV} \
90 | ${AUTO_UNATTEND_ANSWER_FILE_BIND} \
91 | -w /image-builder/images/capi/ \
92 | -e HOST_IP=$HOST_IP -e ARTIFACTS_CONTAINER_PORT=$ARTIFACTS_CONTAINER_PORT -e OS_TARGET=$OS_TARGET \
93 | -e TKR_SUFFIX=$TKR_SUFFIX -e KUBERNETES_VERSION=$KUBERNETES_VERSION \
94 | -e PACKER_HTTP_PORT=$PACKER_HTTP_PORT \
95 | -p $PACKER_HTTP_PORT:$PACKER_HTTP_PORT \
96 | --platform linux/amd64 \
97 | $(get_image_builder_container_image_name $KUBERNETES_VERSION)
98 | }
99 |
100 | supported_os_list=$(jq -r '.supported_os' $SUPPORTED_CONTEXT_JSON)
101 | if [ "$supported_os_list" == "null" ]; then
102 | print_error 'Use supported KUBERNETES_VERSION, run "make list-versions" to list the supported kubernetes versions'
103 | exit 1
104 | fi
105 |
106 | supported_os=false
107 | while read SUPPORTED_OS_TARGET; do
108 | if [ $SUPPORTED_OS_TARGET == $OS_TARGET ]; then
109 | build_node_image
110 | echo ""
111 | next_hint_msg "Use \"docker logs -f $(get_node_image_builder_container_name $KUBERNETES_VERSION $OS_TARGET)\" to see logs and status"
112 | next_hint_msg "Node Image OVA can be found at $IMAGE_ARTIFACTS_PATH/ovas/"
113 | supported_os=true
114 | fi
115 | done < <(jq -r '.supported_os[]' "$SUPPORTED_CONTEXT_JSON")
116 |
117 | if [ "$supported_os" == false ]; then
118 | print_error 'Use supported OS_TARGET, run "make list-supported-os" to list the supported kubernetes versions'
119 | exit 1
120 | fi
121 |
--------------------------------------------------------------------------------
/hack/make-helpers/clean-containers.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2023 VMware, Inc.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | set -e
6 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
7 | enable_debugging
8 |
9 | if [ -z "$LABEL" ]; then
10 | LABEL=byoi
11 | fi
12 |
13 | if [ "$(docker ps -a -q -f "label=$LABEL")" != '' ]; then
14 | docker rm -f $(docker ps -a -q -f "label=$LABEL")
15 | fi
--------------------------------------------------------------------------------
/hack/make-helpers/clean-image-artifacts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2023 VMware, Inc.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | set -e
6 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
7 | enable_debugging
8 |
9 | is_argument_set "IMAGE_ARTIFACTS_PATH argument is required" $IMAGE_ARTIFACTS_PATH
10 |
11 | log_folder=$IMAGE_ARTIFACTS_PATH/logs
12 | ovas_folder=$IMAGE_ARTIFACTS_PATH/ovas
13 |
14 | rm -r -f $log_folder
15 | rm -r -f $ovas_folder
--------------------------------------------------------------------------------
/hack/make-helpers/list-supported-os.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # © Broadcom. All Rights Reserved.
3 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | set -e
7 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
8 | enable_debugging
9 |
10 | printf "%20s\n" "Supported OS"
11 | jq -r '.supported_os[]' $SUPPORTED_CONTEXT_JSON | xargs printf "%20s\n"
12 |
13 | echo ""
14 | next_hint_msg "Use \"make run-artifacts-container \" to run the artifacts container."
15 |
--------------------------------------------------------------------------------
/hack/make-helpers/make-build-all.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # © Broadcom. All Rights Reserved.
3 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | set -e
7 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
8 | enable_debugging
9 |
10 | ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
11 |
12 | # First make sure there are not containers running
13 | make clean-containers -C "${ROOT}"
14 |
15 | artifact_container_port_counter=9090
16 | packer_port_counter=8081
17 | output_counter=1
18 | KUBERNETES_VERSION=$(cat $SUPPORTED_VERSION_TEXT | xargs)
19 |
20 | echo "Running artifact container for '${KUBERNETES_VERSION} exposing port at '${artifact_container_port_counter}'"
21 | make run-artifacts-container -C "${ROOT}" ARTIFACTS_CONTAINER_PORT="${artifact_container_port_counter}"
22 |
23 | while read supported_os; do
24 | echo "Building node image for '${KUBERNETES_VERSION} | ${supported_os}' using packer port '${packer_port_counter}'"
25 | make build-node-image -C "${ROOT}" OS_TARGET="${supported_os}" TKR_SUFFIX="demo" HOST_IP=$(hostname -I | awk '{print $1}') IMAGE_ARTIFACTS_PATH="${PWD}/output${output_counter}" DEBUGGING=1 ARTIFACTS_CONTAINER_PORT="${artifact_container_port_counter}" PACKER_HTTP_PORT="${packer_port_counter}"
26 |
27 | packer_port_counter=$((packer_port_counter+1))
28 | output_counter=$((output_counter+1))
29 | done < <(cat "${SUPPORTED_CONTEXT_JSON}" | jq -c -r '.supported_os[]')
30 |
--------------------------------------------------------------------------------
/hack/make-helpers/make-help.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # Copyright 2023 VMware, Inc.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | set -o errexit
6 | set -o nounset
7 | set -o pipefail
8 |
9 | set -e
10 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
11 |
12 | enable_debugging
13 |
14 | red=$(tput setaf 1)
15 | reset=$(tput sgr0)
16 | readonly red reset
17 |
18 | ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
19 | ALL_TARGETS=$(make -C "${ROOT}" PRINT_HELP=y -rpn | sed -n -e '/^$/ { n ; /^[^ .#][^ ]*:/ { s/:.*$// ; p ; } ; }' | sort)
20 |
21 | echo "--------------------------------------------------------------------------------"
22 | for tar in ${ALL_TARGETS}; do
23 | echo -e "${red}${tar}${reset}"
24 | make -C "${ROOT}" "${tar}" PRINT_HELP=y
25 | echo "---------------------------------------------------------------------------------"
26 | done
--------------------------------------------------------------------------------
/hack/make-helpers/run-artifacts-container.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # © Broadcom. All Rights Reserved.
3 | # The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | set -e
7 | source $(dirname "${BASH_SOURCE[0]}")/utils.sh
8 |
9 | enable_debugging
10 |
11 | is_argument_set "KUBERNETES_VERSION argument is required" $KUBERNETES_VERSION
12 |
13 | if [ -z "$ARTIFACTS_CONTAINER_PORT" ]; then
14 | # Makefile creates this environment variables
15 | ARTIFACTS_CONTAINER_PORT=$DEFAULT_ARTIFACTS_CONTAINER_PORT
16 | echo "Using default port for artifacts container $DEFAULT_ARTIFACTS_CONTAINER_PORT"
17 | fi
18 |
19 | artifacts_container_image_url=$(jq -r '.artifacts_image' $SUPPORTED_CONTEXT_JSON)
20 | if [ "$artifacts_container_image_url" == "null" ]; then
21 | print_error 'Missing artifact server container image url'
22 | exit 1
23 | fi
24 |
25 | container_name=$(get_artifacts_container_name "$KUBERNETES_VERSION")
26 |
27 | docker rm -f $container_name
28 | docker run -d --name $container_name $(get_artifacts_container_labels $KUBERNETES_VERSION) -p $ARTIFACTS_CONTAINER_PORT:80 --platform linux/amd64 $artifacts_container_image_url
29 |
30 | next_hint_msg "Use \"make build-node-image OS_TARGET= KUBERNETES_VERSION=${KUBERNETES_VERSION} TKR_SUFFIX= HOST_IP= IMAGE_ARTIFACTS_PATH= ARTIFACTS_CONTAINER_PORT=${ARTIFACTS_CONTAINER_PORT} PACKER_HTTP_PORT=${DEFAULT_PACKER_HTTP_PORT}\" to build node image"
31 | next_hint_msg "Change PACKER_HTTP_PORT if the ${DEFAULT_PACKER_HTTP_PORT} port is already in use or not opened"
--------------------------------------------------------------------------------
/hack/make-helpers/utils.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # Copyright 2023 VMware, Inc.
3 | # SPDX-License-Identifier: MPL-2.0
4 |
5 | # Terminal colors
6 | red='\033[0;31m'
7 | green='\033[0;32m'
8 | clear='\033[0m'
9 |
10 | function enable_debugging() {
11 | if [ ! -z ${DEBUGGING+x} ]; then
12 | set -x
13 | fi
14 | }
15 |
16 | function print_error() {
17 | printf "${red}${1}\n${clear}"
18 | }
19 |
20 | function is_argument_set() {
21 | variable=$2
22 | error_msg=$1
23 | if [ -z "$variable" ]; then
24 | printf "${red}${error_msg}\n${clear}"
25 | exit 1
26 | fi
27 | }
28 |
29 | function next_hint_msg() {
30 | printf "${green} Hint: ${1}\n${clear}"
31 | }
32 |
33 | function get_artifacts_container_name() {
34 | kubernetes_version=$1
35 | echo "$(echo $kubernetes_version | sed -e 's/+/---/' )-artifacts-server"
36 | }
37 |
38 | function get_artifacts_container_labels() {
39 | kubernetes_version=$1
40 | echo "-l byoi -l byoi_artifacts -l $kubernetes_version"
41 | }
42 |
43 | function get_node_image_builder_container_name() {
44 | kubernetes_version=$1
45 | os_target=$2
46 | echo $(echo $kubernetes_version | sed -e 's/+/---/' )-$os_target-image-builder
47 | }
48 |
49 | function get_node_image_builder_container_labels() {
50 | kubernetes_version=$1
51 | os_target=$2
52 | echo "-l byoi -l byoi_image_builder -l $kubernetes_version -l $os_target"
53 | }
54 |
55 | function get_image_builder_container_image_name() {
56 | kubernetes_version=$1
57 | default_image_name="vsphere-tanzu-byoi"
58 | image_name=$default_image_name
59 |
60 | # Docker image name doesn't support `+` so replace them with `---`
61 | kubernetes_version=${kubernetes_version//+/---}
62 | image_name=$image_name-$kubernetes_version
63 | echo "$image_name"
64 | }
--------------------------------------------------------------------------------
/hack/tkgs-image-build-ova.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright 2023 VMware, Inc.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | ################################################################################
7 | # usage: image-build-ova.py [FLAGS] ARGS
8 | # This program builds an OVA file from a VMDK and manifest file generated as a
9 | # result of a Packer build.
10 | ################################################################################
11 |
12 | import argparse
13 | import hashlib
14 | import io
15 | import json
16 | import os
17 | import subprocess
18 | from string import Template
19 | import tarfile
20 |
21 | def main():
22 | parser = argparse.ArgumentParser(
23 | description="Builds an OVA using the artifacts from a Packer build")
24 | parser.add_argument('--stream_vmdk',
25 | dest='stream_vmdk',
26 | action='store_true',
27 | help='Compress vmdk file')
28 | parser.add_argument('--vmx',
29 | dest='vmx_version',
30 | default='17',
31 | help='The virtual hardware version')
32 | parser.add_argument('--eula_file',
33 | nargs='?',
34 | metavar='EULA',
35 | default='./ovf_eula.txt',
36 | help='Text file containing EULA')
37 | parser.add_argument('--ovf_template',
38 | nargs='?',
39 | metavar='OVF_TEMPLATE',
40 | default='./ovf_template.xml',
41 | help='XML template to build OVF')
42 | parser.add_argument('--vmdk_file',
43 | nargs='?',
44 | metavar='FILE',
45 | default=None,
46 | help='Use FILE as VMDK instead of reading from manifest. '
47 | 'Must be in BUILD_DIR')
48 | parser.add_argument(dest='build_dir',
49 | nargs='?',
50 | metavar='BUILD_DIR',
51 | default='.',
52 | help='The Packer build directory')
53 | args = parser.parse_args()
54 |
55 | # Read in the EULA
56 | eula = ""
57 | with io.open(args.eula_file, 'r', encoding='utf-8') as f:
58 | eula = f.read()
59 |
60 | # Read in the OVF template
61 | ovf_template = ""
62 | with io.open(args.ovf_template, 'r', encoding='utf-8') as f:
63 | ovf_template = f.read()
64 |
65 | # Change the working directory if one is specified.
66 | os.chdir(args.build_dir)
67 | print("image-build-ova: cd %s" % args.build_dir)
68 |
69 | # Load the packer manifest JSON
70 | data = None
71 | with open('packer-manifest.json', 'r') as f:
72 | data = json.load(f)
73 |
74 | # Get the first build.
75 | build = data['builds'][0]
76 | build_data = build['custom_data']
77 |
78 | print("image-build-ova: loaded %s-kube-%s" % (build_data['build_name'],
79 | build_data['kubernetes_semver']))
80 |
81 | if args.vmdk_file is None:
82 | # Get a list of the VMDK files from the packer manifest.
83 | vmdk_files = get_vmdk_files(build['files'])
84 | else:
85 | vmdk_files = [{"name": args.vmdk_file, "size": os.path.getsize(args.vmdk_file)}]
86 |
87 | # Create stream-optimized versions of the VMDK files.
88 | if args.stream_vmdk is True:
89 | stream_optimize_vmdk_files(vmdk_files)
90 | else:
91 | for f in vmdk_files:
92 | f['stream_name'] = f['name']
93 | f['stream_size'] = os.path.getsize(f['name'])
94 |
95 | vmdk = vmdk_files[0]
96 |
97 | OS_id_map = {"vmware-photon-64": {"id": "36", "version": "", "type": "vmwarePhoton64Guest"},
98 | "centos7-64": {"id": "107", "version": "7", "type": "centos7-64"},
99 | "centos8-64": {"id": "107", "version": "8", "type": "centos8-64"},
100 | "rhel7-64": {"id": "80", "version": "7", "type": "rhel7_64guest"},
101 | "rhel8-64": {"id": "80", "version": "8", "type": "rhel8_64guest"},
102 | "ubuntu-64": {"id": "94", "version": "", "type": "ubuntu64Guest"},
103 | "flatcar-64": {"id": "100", "version": "", "type": "linux-64"},
104 | "Windows2019Server-64": {"id": "112", "version": "", "type": "windows2019srv_64Guest"},
105 | "Windows2022Server-64": {"id": "112", "version": "", "type": "windows2019srvNext_64Guest"},
106 | }
107 |
108 | # Create the OVF file.
109 | data = {
110 | 'BUILD_DATE': build_data['build_date'],
111 | 'ARTIFACT_ID': build['artifact_id'],
112 | 'BUILD_TIMESTAMP': build_data['build_timestamp'],
113 | 'EULA': eula,
114 | 'OS_NAME': build_data['os_name'],
115 | 'OS_ID': OS_id_map[build_data['guest_os_type']]['id'],
116 | 'OS_TYPE': OS_id_map[build_data['guest_os_type']]['type'],
117 | 'OS_VERSION': OS_id_map[build_data['guest_os_type']]['version'],
118 | 'IB_VERSION': build_data['ib_version'],
119 | 'DISK_NAME': vmdk['stream_name'],
120 | 'DISK_SIZE': build_data['disk_size'],
121 | 'POPULATED_DISK_SIZE': vmdk['size'],
122 | 'STREAM_DISK_SIZE': vmdk['stream_size'],
123 | 'VMX_VERSION': args.vmx_version,
124 | 'DISTRO_NAME': build_data['distro_name'],
125 | 'DISTRO_VERSION': build_data['distro_version'],
126 | 'DISTRO_ARCH': build_data['distro_arch'],
127 | 'NESTEDHV': "false",
128 | 'FIRMWARE': build_data['firmware'],
129 | 'DISTRO_TYPE': 'windows' if 'windows' in OS_id_map[build_data['guest_os_type']]['type'] else 'linux'
130 | }
131 |
132 | data['CNI_VERSION'] = build_data['kubernetes_cni_semver']
133 | data['CONTAINERD_VERSION'] = build_data['containerd_version']
134 | data['KUBERNETES_SEMVER'] = build_data['kubernetes_semver']
135 | data['KUBERNETES_SOURCE_TYPE'] = build_data['kubernetes_source_type']
136 | data['PRODUCT'] = "%s and Kubernetes %s" % (build_data['os_name'], build_data['kubernetes_semver'])
137 | data['ANNOTATION'] = "Cluster API vSphere image - %s" % (data['PRODUCT'])
138 | data['WAKEONLANENABLED'] = "false"
139 | data['TYPED_VERSION'] = build_data['kubernetes_typed_version']
140 |
141 | data['PROPERTIES'] = Template('''
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 | \n''').substitute(data)
150 |
151 | # Check if OVF_CUSTOM_PROPERTIES environment Variable is set.
152 | # If so, load the json file & add the properties to the OVF
153 |
154 | if os.environ.get("OVF_CUSTOM_PROPERTIES"):
155 | with open(os.environ.get("OVF_CUSTOM_PROPERTIES"), 'r') as f:
156 | custom_properties = json.loads(f.read())
157 | if custom_properties:
158 | for k, v in custom_properties.items():
159 | data['PROPERTIES'] = data['PROPERTIES'] + f''' \n'''
160 |
161 | if "windows" in OS_id_map[build_data['guest_os_type']]['type']:
162 | if build_data['disable_hypervisor'] != "true":
163 | data['NESTEDHV'] = "true"
164 |
165 | k8s_version = data['KUBERNETES_SEMVER'].replace('+','---')
166 | ovf = "%s-%s.ovf" % (build_data['build_name'], k8s_version)
167 | mf = "%s-%s.mf" % (build_data['build_name'], k8s_version)
168 | ova = "%s-%s.ova" % (build_data['build_name'], k8s_version)
169 |
170 | # Create OVF
171 | create_ovf(ovf, data, ovf_template)
172 |
173 | if os.environ.get("IB_OVFTOOL"):
174 | # Create the OVA.
175 | create_ova(ova, ovf, ovftool_args=os.environ.get("IB_OVFTOOL_ARGS", ""))
176 |
177 | else:
178 | # Create the OVA manifest.
179 | create_ova_manifest(mf, [ovf, vmdk['stream_name']])
180 |
181 | # Create the OVA
182 | create_ova(ova, ovf, ova_files=[mf, vmdk['stream_name']])
183 |
184 |
185 | def sha256(path):
186 | m = hashlib.sha256()
187 | with open(path, 'rb') as f:
188 | while True:
189 | data = f.read(65536)
190 | if not data:
191 | break
192 | m.update(data)
193 | return m.hexdigest()
194 |
195 |
196 | def create_ova(ova_path, ovf_path, ovftool_args=None, ova_files=None):
197 | if ova_files is None:
198 | cmd = f"ovftool {ovftool_args} {ovf_path} {ova_path}"
199 |
200 | print("image-build-ova: creating OVA from %s using ovftool" %
201 | ovf_path)
202 | subprocess.run(cmd.split(), check=True)
203 | else:
204 | infile_paths = [ovf_path]
205 | infile_paths.extend(ova_files)
206 | print("image-build-ova: creating OVA using tar")
207 | with open(ova_path, 'wb') as f:
208 | with tarfile.open(fileobj=f, mode='w|') as tar:
209 | for infile_path in infile_paths:
210 | tar.add(infile_path)
211 |
212 | chksum_path = "%s.sha256" % ova_path
213 | print("image-build-ova: create ova checksum %s" % chksum_path)
214 | with open(chksum_path, 'w') as f:
215 | f.write(sha256(ova_path))
216 |
217 |
218 | def create_ovf(path, data, ovf_template):
219 | print("image-build-ova: create ovf %s" % path)
220 | with io.open(path, 'w', encoding='utf-8') as f:
221 | f.write(Template(ovf_template).substitute(data))
222 |
223 |
224 | def create_ova_manifest(path, infile_paths):
225 | print("image-build-ova: create ova manifest %s" % path)
226 | with open(path, 'w') as f:
227 | for i in infile_paths:
228 | f.write('SHA256(%s)= %s\n' % (i, sha256(i)))
229 |
230 |
231 | def get_vmdk_files(inlist):
232 | outlist = []
233 | for f in inlist:
234 | if f['name'].endswith('.vmdk'):
235 | outlist.append(f)
236 | return outlist
237 |
238 |
239 | def stream_optimize_vmdk_files(inlist):
240 | for f in inlist:
241 | infile = f['name']
242 | outfile = infile.replace('.vmdk', '.ova.vmdk', 1)
243 | if os.path.isfile(outfile):
244 | os.remove(outfile)
245 | args = [
246 | 'vmware-vdiskmanager',
247 | '-r', infile,
248 | '-t', '5',
249 | outfile
250 | ]
251 | print("image-build-ova: stream optimize %s --> %s (1-2 minutes)" %
252 | (infile, outfile))
253 | subprocess.check_call(args)
254 | f['stream_name'] = outfile
255 | f['stream_size'] = os.path.getsize(outfile)
256 |
257 |
258 | if __name__ == "__main__":
259 | main()
260 |
--------------------------------------------------------------------------------
/hack/tkgs_ovf_template.xml:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
8 |
9 |
10 |
11 | Virtual disk information
12 |
13 |
14 |
15 | The list of logical networks
16 |
17 | Please select a network
18 |
19 |
20 |
21 | A virtual machine
22 | ${ARTIFACT_ID}
23 |
24 | A human-readable annotation
25 | This Virtual Machine is a VMware agent implementing support for the Tanzu Kubernetes Grid Service. Its lifecycle operations are managed by VMware vCenter Server
26 |
27 |
28 | The operating system installed
29 |
30 |
31 | Virtual hardware requirements
32 |
33 | Virtual Hardware Family
34 | 0
35 | ${ARTIFACT_ID}
36 | vmx-${VMX_VERSION}
37 |
38 | -
39 | hertz * 10^6
40 | Number of Virtual CPUs
41 | 2 virtual CPU(s)
42 | 1
43 | 3
44 | 2
45 | 2
46 |
47 | -
48 | byte * 2^20
49 | Memory Size
50 | 2048MB of memory
51 | 2
52 | 4
53 | 2048
54 |
55 | -
56 | 0
57 | SCSI Controller
58 | SCSI controller 0
59 | 3
60 | VirtualSCSI
61 | 6
62 |
63 |
64 | -
65 | 1
66 | IDE Controller
67 | IDE 1
68 | 4
69 | 5
70 |
71 | -
72 | 0
73 | IDE Controller
74 | IDE 0
75 | 5
76 | 5
77 |
78 | -
79 | false
80 | Video card
81 | 6
82 | 24
83 |
84 |
85 |
86 |
87 |
88 |
89 | -
90 | false
91 | VMCI device
92 | 7
93 | vmware.vmci
94 | 1
95 |
96 |
97 |
98 | -
99 | 0
100 | Hard disk 1
101 | ovf:/disk/vmdisk1
102 | 8
103 | 3
104 | 17
105 |
106 |
107 | -
108 | 7
109 | true
110 | nic0
111 | VmxNet3 ethernet adapter
112 | Network adapter 1
113 | 9
114 | VmxNet3
115 | 10
116 |
117 |
118 |
119 |
120 | -
121 | 0
122 | false
123 | CD/DVD drive 1
124 | 10
125 | 5
126 | vmware.cdrom.remotepassthrough
127 | 15
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 | An end-user license agreement
154 |
155 | ${EULA}
156 |
157 |
158 |
159 | Information about the installed software
160 | Tanzu Kubernetes Grid Service - Tanzu Kubernetes cluster Image
161 | VMware Inc.
162 | ${TYPED_VERSION}
163 | ${TYPED_VERSION}
164 | https://vmware.com
165 | Cluster API Provider (CAPI)
166 |
167 |
168 |
169 | ${PROPERTIES}
170 |
171 |
172 |
173 |
--------------------------------------------------------------------------------
/packer-variables/default-args.j2:
--------------------------------------------------------------------------------
1 | {
2 | {# Don't modify the default templating values unless required as all the
3 | all the values are automatically populated during the image build process #}
4 | {# TODO: we might not need this unless we use vsphere-clone builder #}
5 | "template": "base-{{ os_type }}",
6 | "load_additional_components": "true",
7 | "additional_url_images": "true",
8 | {# docker registry image for local registry to host container images #}
9 | "additional_url_images_list": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/linux/amd64/docker_distribution_container_image.tar",
10 | {# additional variables passed to the ansible role, may be overriden by os specific default-args-os.j2 #}
11 | {# please update OS specific ansible_user_vars also if any changes are made here #}
12 | "ansible_user_vars": "artifacts_container_url=http://{{ host_ip }}:{{ artifacts_container_port }} dockerVersion={{ docker_distribution }} imageVersion={{ image_version|replace('-', '.') }} ansible_python_interpreter=/usr/bin/python3 addon_image_list=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/calico.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/metrics-server.tar{% if capabilities_package_present %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/capabilities.tar{% endif %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/guest-cluster-auth-service.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/pinniped.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/antrea.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/vsphere-cpi.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/vsphere-pv-csi.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/secretgen-controller.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/kapp-controller.tar{% if gateway_package_present %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/gateway-api.tar{% endif %} localhost_addon_image_list={{ calico_package_localhost_path }},{{ metrics_server_package_localhost_path }}{% if capabilities_package_present %},{{ capabilities_package_localhost_path }}{% endif %},{{ guest_cluster_auth_service_package_localhost_path }},{{ pinniped_package_localhost_path }},{{ antrea_package_localhost_path }},{{ vsphere_cpi_package_localhost_path }},{{ vsphere_pv_csi_package_localhost_path }},{{ secretgen_controller_package_localhost_path }},{{ kapp_controller_localhost_path }}{% if gateway_package_present %},{{ gateway_api_package_localhost_path }}{% endif %} networkd_dispatcher_download_url=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/networkd-dispatcher-2.1.tar.bz2 registry_store_archive_url=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/registries/{{ registry_store_path }}",
13 | "kubernetes_typed_version": "{{ image_version }}",
14 | "vmx_version": "17",
15 | "cpu": "2",
16 | "memory": "4096",
17 | "cpu_cores": "2",
18 | {# kubernetes related information will be populated based on
19 | information pulled from artifacts container #}
20 | "containerd_version": "{{ containerd }}",
21 | "pause_image": "localhost:5000/vmware.io/pause:{{ pause }}",
22 | "containerd_url": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/linux/amd64/cri-containerd.tar",
23 | "containerd_sha256": "{{ containerd_sha256 }}",
24 | "kubernetes_series": "{{ kubernetes_series }}",
25 | "kubernetes_semver": "{{ kubernetes_version }}",
26 | "kubernetes_container_registry": "projects.registry.vmware.com/tkg",
27 | "kubernetes_source_type": "http",
28 | {# kubernetes binaries location for CNI, kubelet, kubectl #}
29 | "kubernetes_http_source": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts",
30 | "kubernetes_load_additional_imgs": "true",
31 | "kubeadm_template": "etc/kubeadm.yml",
32 | "kubernetes_cni_source_type": "http",
33 | "kubernetes_cni_semver": "{{ cni_plugins }}",
34 | "kubernetes_cni_http_source": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/cni_plugins",
35 | "kubernetes_cni_http_checksum": "",
36 | "kubernetes_enable_automatic_resource_sizing": "true",
37 | "disable_public_repos": "false",
38 | "remove_extra_repos": "true",
39 | {# Packer VM name #}
40 | "build_version": "{{ os_type }}-kube-{{ kubernetes_series }}-{{ ova_ts_suffix }}",
41 | "custom_role": "true"
42 | }
--------------------------------------------------------------------------------
/packer-variables/goss-args.j2:
--------------------------------------------------------------------------------
1 | {
2 | "goss_entry_file": "goss/goss.yaml",
3 | "goss_inspect_mode": "false",
4 | "goss_tests_dir": "/image-builder/images/capi/image/goss",
5 | "goss_vars_file": "/image-builder/images/capi/image/goss/goss-vars.yaml",
6 | "goss_download_path": "/tkgs-tmp/goss-linux-amd64",
7 | "goss_remote_folder": "/tkgs-tmp",
8 | "goss_remote_path": "/tkgs-tmp/goss",
9 | {% if use_artifact_server_goss %}
10 | "goss_skip_install": "false",
11 | "goss_url": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/linux/amd64/goss"
12 | {% else %}
13 | "goss_skip_install": "true"
14 | {% endif %}
15 | }
--------------------------------------------------------------------------------
/packer-variables/packer-http-config.j2:
--------------------------------------------------------------------------------
1 | {
2 | "http_port_max": "{{ packer_http_port }}",
3 | "http_port_min": "{{ packer_http_port }}",
4 | "http_ip": "{{ host_ip }}"
5 | }
--------------------------------------------------------------------------------
/packer-variables/photon-3/default-args-photon-3.j2:
--------------------------------------------------------------------------------
1 | {
2 | {# If a custom role is create in a different location append that
3 | location after mounting that Path using docker #}
4 | "custom_role_names": "/image-builder/images/capi/image/ansible /image-builder/images/capi/image/compliance/roles/photon3",
5 | {# Update the required packer based on the OS consumption #}
6 | "distro_version": "3.0",
7 | "extra_rpms": "glibc zlib filesystem ethtool pkg-config bash bzip2 shadow procps-ng iana-etc coreutils bc libtool findutils xz iproute2 util-linux kmod linux linux-devel iptables Linux-PAM systemd dbus file e2fsprogs rpm gawk cloud-utils gptfdisk nfs-utils openssh gdbm photon-release photon-repos haveged sed grep cpio gzip vim libdb tdnf less iputils bindutils diffutils bridge-utils cri-tools apparmor-utils apparmor-profiles krb5 which tzdata motd sudo iotop lsof traceroute ethtool dstat ltrace ipset netcat tcpdump wget net-tools curl tar open-vm-tools libseccomp cloud-init sysstat jq conntrack",
8 | {# additional variables passed to the ansible role, if stig is enabled #}
9 | {# variables passed to the ansible role are appended to this list at the end #}
10 | "ansible_user_vars": "artifacts_container_url=http://{{ host_ip }}:{{ artifacts_container_port }} dockerVersion={{ docker_distribution }} imageVersion={{ image_version|replace('-', '.') }} ansible_python_interpreter=/usr/bin/python3 addon_image_list=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/calico.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/metrics-server.tar{% if capabilities_package_present %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/capabilities.tar{% endif %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/guest-cluster-auth-service.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/pinniped.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/antrea.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/vsphere-cpi.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/vsphere-pv-csi.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/secretgen-controller.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/kapp-controller.tar{% if gateway_package_present %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/gateway-api.tar{% endif %} localhost_addon_image_list={{ calico_package_localhost_path }},{{ metrics_server_package_localhost_path }}{% if capabilities_package_present %},{{ capabilities_package_localhost_path }}{% endif %},{{ guest_cluster_auth_service_package_localhost_path }},{{ pinniped_package_localhost_path }},{{ antrea_package_localhost_path }},{{ vsphere_cpi_package_localhost_path }},{{ vsphere_pv_csi_package_localhost_path }},{{ secretgen_controller_package_localhost_path }},{{ kapp_controller_localhost_path }}{% if gateway_package_present %},{{ gateway_api_package_localhost_path }}{% endif %} networkd_dispatcher_download_url=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/networkd-dispatcher-2.1.tar.bz2 registry_store_archive_url=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/registries/{{ registry_store_path }} run_sysctl_net_ipv6_conf_all_mc_forwarding=false run_sysctl_net_ipv6_conf_default_mc_forwarding=false run_sysctl_net_ipv6_conf_eth0_mc_forwarding=false run_sshd_banner=false"
11 | }
--------------------------------------------------------------------------------
/packer-variables/photon-5/default-args-photon-5.j2:
--------------------------------------------------------------------------------
1 | {
2 | {# If a custom role is create in a different location append that
3 | location after mounting that Path using docker #}
4 | "custom_role_names": "/image-builder/images/capi/image/ansible /image-builder/images/capi/image/compliance",
5 | "distro_version": "5.0",
6 | "extra_rpms": "glibc zlib filesystem ethtool pkg-config bash bzip2 shadow procps-ng iana-etc coreutils bc libtool findutils xz iproute2 util-linux kmod linux linux-devel iptables Linux-PAM systemd dbus file e2fsprogs rpm gawk cloud-utils gptfdisk nfs-utils openssh gdbm photon-release photon-repos haveged sed grep cpio gzip vim tdnf less iputils bindutils diffutils bridge-utils cri-tools apparmor-utils apparmor-profiles krb5 which tzdata motd sudo iotop lsof traceroute ethtool dstat ltrace ipset netcat tcpdump wget net-tools curl tar open-vm-tools libseccomp cloud-init sysstat jq conntrack dkms",
7 | {# additional variables passed to the ansible role, if stig is enabled #}
8 | {# variables passed to the ansible role are appended to this list at the end #}
9 | "ansible_user_vars": "artifacts_container_url=http://{{ host_ip }}:{{ artifacts_container_port }} dockerVersion={{ docker_distribution }} imageVersion={{ image_version|replace('-', '.') }} ansible_python_interpreter=/usr/bin/python3 addon_image_list=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/calico.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/metrics-server.tar{% if capabilities_package_present %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/capabilities.tar{% endif %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/guest-cluster-auth-service.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/pinniped.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/antrea.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/vsphere-cpi.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/vsphere-pv-csi.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/secretgen-controller.tar,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/kapp-controller.tar{% if gateway_package_present %},http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/packages/gateway-api.tar{% endif %} localhost_addon_image_list={{ calico_package_localhost_path }},{{ metrics_server_package_localhost_path }}{% if capabilities_package_present %},{{ capabilities_package_localhost_path }}{% endif %},{{ guest_cluster_auth_service_package_localhost_path }},{{ pinniped_package_localhost_path }},{{ antrea_package_localhost_path }},{{ vsphere_cpi_package_localhost_path }},{{ vsphere_pv_csi_package_localhost_path }},{{ secretgen_controller_package_localhost_path }},{{ kapp_controller_localhost_path }}{% if gateway_package_present %},{{ gateway_api_package_localhost_path }}{% endif %} networkd_dispatcher_download_url=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/networkd-dispatcher-2.1.tar.bz2 registry_store_archive_url=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/registries/{{ registry_store_path }} run_sysctl_net_ipv6_conf_all_mc_forwarding=false run_sysctl_net_ipv6_conf_default_mc_forwarding=false run_sysctl_net_ipv6_conf_eth0_mc_forwarding=false run_kernel_net_ip_forward=false run_modprobe_conf=false run_fips_boot_enable=false run_openssl_fips=false run_sshd_banner=false"
10 | }
--------------------------------------------------------------------------------
/packer-variables/ubuntu-2004-efi/default-args-ubuntu-2004-efi.j2:
--------------------------------------------------------------------------------
1 | {
2 | "custom_role_names": "/image-builder/images/capi/image/ansible",
3 | "distro_version": "20.04",
4 | "extra_debs": "unzip iptables-persistent nfs-common",
5 | "boot_disable_ipv6": "1"
6 | }
--------------------------------------------------------------------------------
/packer-variables/ubuntu-2204-efi/default-args-ubuntu-2204-efi.j2:
--------------------------------------------------------------------------------
1 | {
2 | "custom_role_names": "/image-builder/images/capi/image/ansible",
3 | "distro_version": "22.04",
4 | "extra_debs": "unzip iptables-persistent nfs-common dkms",
5 | "boot_disable_ipv6": "1",
6 | "ip_settle_timeout": "15m"
7 | }
8 |
--------------------------------------------------------------------------------
/packer-variables/vsphere.j2:
--------------------------------------------------------------------------------
1 | {
2 | {# vCenter server IP or FQDN #}
3 | "vcenter_server":"",
4 | {# vCenter username #}
5 | "username":"",
6 | {# vCenter user password #}
7 | "password":"",
8 | {# Datacenter name where packer creates the VM for customization #}
9 | "datacenter":"",
10 | {# Datastore name for the VM #}
11 | "datastore":"",
12 | {# [Optional] Folder name #}
13 | "folder":"",
14 | {# Cluster name where packer creates the VM for customization #}
15 | "cluster": "",
16 | {# Packer VM network #}
17 | "network": "VM Network",
18 | {# To use insecure connection with vCenter #}
19 | "insecure_connection": "true",
20 | {# TO create a clone of the Packer VM after customization#}
21 | "linked_clone": "true",
22 | {# To create a snapshot of the Packer VM after customization #}
23 | "create_snapshot": "true",
24 | {# To destroy Packer VM after Image Build is completed #}
25 | "destroy": "true"
26 | }
27 |
--------------------------------------------------------------------------------
/packer-variables/windows/default-args-windows.j2:
--------------------------------------------------------------------------------
1 | {
2 | "additional_executables_destination_path": "C:\\ProgramData\\Temp",
3 | "additional_executables_list": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/windows/amd64/registry.exe,http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/windows/amd64/goss.exe",
4 | "additional_executables": "true",
5 | "additional_url_images": "false",
6 | "additional_url_images_list": "",
7 | "additional_prepull_images": "",
8 | "build_version": "{{ os_type }}-kube-{{ kubernetes_series }}-{{ ova_ts_suffix }}",
9 | "cloudbase_init_url": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/windows/amd64/CloudbaseInitSetup_x64.msi",
10 | "cloudbase_real_time_clock_utc": "true",
11 | "containerd_url": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/windows/amd64/cri-containerd.tar",
12 | "containerd_sha256_windows": "{{ containerd_sha256_windows_amd64 }}",
13 | "containerd_version": "{{ containerd }}",
14 | "convert_to_template": "true",
15 | "create_snapshot": "false",
16 | "disable_hypervisor": "false",
17 | "disk_size": "40960",
18 | "kubernetes_base_url": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/windows/amd64",
19 | "kubernetes_series": "{{ kubernetes_series }}",
20 | "kubernetes_semver": "{{ kubernetes_version }}",
21 | "kubernetes_typed_version": "{{ image_version }}",
22 | "load_additional_components": "true",
23 | "netbios_host_name_compatibility": "false",
24 | "nssm_url": "http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/bin/windows/amd64/nssm.exe",
25 | "prepull": "false",
26 | "pause_image": "localhost:5000/vmware.io/pause:{{ pause }}",
27 | "runtime": "containerd",
28 | "template": "",
29 | "unattend_timezone": "Pacific Standard Time",
30 | "windows_updates_categories": "",
31 | "windows_updates_kbs": "",
32 | "wins_url": "",
33 | "custom_role": "true",
34 | "custom_role_names": "/image-builder/images/capi/image/ansible-windows",
35 | "ansible_user_vars": "artifacts_container_url=http://{{ host_ip }}:{{ artifacts_container_port }} imageVersion={{ image_version|replace('-', '.') }} registry_store_archive_url=http://{{ host_ip }}:{{ artifacts_container_port }}/artifacts/{{ kubernetes_version }}/registries/{{ registry_store_path }}",
36 | "vmx_version": "18",
37 | "debug_tools": "false"
38 | }
39 |
--------------------------------------------------------------------------------
/packer-variables/windows/goss-args-windows.j2:
--------------------------------------------------------------------------------
1 | {
2 | "goss_entry_file": "goss/goss.yaml",
3 | "goss_inspect_mode": "true",
4 | "goss_skip_install": "true",
5 | "goss_download_path": "C:\\ProgramData\\Temp\\goss.exe",
6 | "goss_remote_folder": "C:\\goss",
7 | "goss_remote_path": "C:\\goss\\goss"
8 | }
9 |
--------------------------------------------------------------------------------
/packer-variables/windows/vsphere-windows.j2:
--------------------------------------------------------------------------------
1 | {
2 | {# [Optional] Windows only: Windows OS Image #}
3 | "os_iso_path": "[sharedVmfs-0] windows.iso",
4 | {# [Optional] Windows only: VMware Tools Image #}
5 | "vmtools_iso_path": "[sharedVmfs-0] vmtools.iso"
6 | }
7 |
--------------------------------------------------------------------------------
/scripts/utkg_custom_ovf_properties.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | # Copyright 2023 VMware, Inc.
4 | # SPDX-License-Identifier: MPL-2.0
5 |
6 | import argparse
7 | import base64
8 | import json
9 | import re
10 | import string
11 | from pathlib import Path
12 | from xml.dom.minidom import Text
13 | from os.path import join
14 | import os
15 | import io
16 | import gzip
17 | import yaml
18 |
19 | custom_ovf_properties = {}
20 | version_maps = {}
21 | componentList = ""
22 | config_data_list = {}
23 | tkg_core_directory = "/image-builder/images/capi/tkr-metadata"
24 | config_directory = join(tkg_core_directory, 'config')
25 | packages_directory = join(tkg_core_directory, 'packages')
26 | localhost_path = 'localhost:5000'
27 |
28 |
29 | def set_versions(args):
30 | global version_maps
31 |
32 | kubernetes_config = {}
33 | with open(args.kubernetes_config, 'r') as fp:
34 | kubernetes_config = json.loads(fp.read())
35 |
36 | version_maps = {
37 | "image": kubernetes_config["image_version"],
38 | "k8s": kubernetes_config["kubernetes"],
39 | "cloudInit": "22.4.2",
40 | "coredns": kubernetes_config["coredns"],
41 | "etcd": kubernetes_config["etcd"],
42 | }
43 |
44 |
45 | def substitute_data(value, tkr_version):
46 | subMap = {
47 | 'IMAGE_VERSION': version_maps['image'],
48 | 'CLOUD_INIT_VERSION': version_maps['cloudInit'],
49 | 'ETCD_VERSION': version_maps['etcd'].replace("+", "_"),
50 | 'COREDNS_VERSION': version_maps['coredns'].replace("+", "_"),
51 | 'KUBERNETES_VERSION': version_maps['k8s'],
52 | 'COMPATIBILITY_7_0_0_10100': 'true',
53 | 'COMPATIBILITY_7_0_0_10200': 'true',
54 | 'COMPATIBILITY_7_0_0_10300': 'true',
55 | 'COMPATIBILITY_VC_7_0_0_1_MP3': 'true',
56 | 'DIST_VERSION': tkr_version
57 | }
58 | value = string.Template(value).substitute(subMap)
59 | value = json.dumps(json.loads(value))
60 | return value
61 |
62 |
63 | def fetch_addon_packages():
64 | addon_packages = []
65 |
66 | for root, subdirectories, _ in os.walk(packages_directory):
67 | for subdirectory in subdirectories:
68 | addon_packages.append(join(packages_directory, subdirectory))
69 |
70 | return addon_packages
71 |
72 |
73 | def downloadUtkgAddonFiles():
74 | addon_packages = fetch_addon_packages()
75 | return addon_packages
76 |
77 |
78 | def convert_to_xml(data):
79 | t = Text()
80 | t.data = data
81 | return t.toxml()
82 |
83 |
84 | def create_non_addon_ovf_properties():
85 | tkr_version, _ = fetch_tkr_data()
86 | filenames = ["/image-builder/images/capi/vmware-system.guest.kubernetes.distribution.image.version.json",
87 | "/image-builder/images/capi/vmware-system.compatibilityoffering.json"]
88 |
89 | for file in filenames:
90 | with open(file) as f:
91 | data = json.dumps(json.load(f))
92 | data = substitute_data(data, tkr_version)
93 | key = Path(file).stem
94 | custom_ovf_properties[key] = convert_to_xml(data)
95 |
96 |
97 | # fetch tkr apiversion and tkr version
98 | def fetch_tkr_data():
99 | tkr_filename = "TanzuKubernetesRelease.yml"
100 | with open(join(config_directory, tkr_filename), 'r') as file:
101 | info = yaml.safe_load(file)
102 | tkr_version = info["spec"]["version"]
103 | api_version = info["apiVersion"]
104 | file.close()
105 | return tkr_version, api_version
106 |
107 |
108 | def fetch_addon_image_name(info, image_repo, tkg_core_package, package_name):
109 | image_path = ""
110 | addon_name = package_name.split(".")[0]
111 | # for "capabilities" addon, component is named as capabilities-package
112 | if "capabilities" in package_name:
113 | addon_name = addon_name + "-package"
114 |
115 | # Fetch kapp-controller container image instead of bundle
116 | if "kapp-controller" in package_name:
117 | package_name = "kappControllerImage"
118 |
119 | if package_name in tkg_core_package:
120 | image_path = image_repo + "/" + tkg_core_package[package_name]['imagePath'] + ':' + \
121 | tkg_core_package[package_name]['tag']
122 | elif addon_name in info['components']:
123 | # if package is not present in tkg_core_packages,
124 | # check in components
125 | image_info = info['components'][addon_name][0]['images'][package_name]
126 | image_path = image_repo + "/" + image_info['imagePath'] + ':' + image_info['tag']
127 | else:
128 | raise Exception("Could not find package")
129 |
130 | return image_path
131 |
132 |
133 | # As kapp-controller is inline package, container image
134 | # needs to pull instead of package bundle and pushed to the
135 | # same inline repo path
136 | def fetch_kapp_controller_localhost_image(addon_package):
137 | data, info = fetch_file_contents(addon_package)
138 | images_yaml_string = info['spec']['template']['spec']['fetch'][0]['inline']['paths']['.imgpkg/images.yml']
139 | images_yaml = yaml.safe_load(images_yaml_string)
140 | localhost_path = images_yaml['images'][0]['image']
141 |
142 | return localhost_path.split('@')[0]
143 |
144 |
145 | # fetch images path from the package CR
146 | def fetch_image_path():
147 | _, repo_url = fetch_tkr_data()
148 | image_repo = ""
149 | image_path_list = []
150 | localhost_image_path_list = []
151 | addon_packages = fetch_addon_packages()
152 |
153 | with open("/image-builder/images/capi/tkr-bom.yaml", 'r') as file:
154 | info = yaml.safe_load(file)
155 | image_repo = info['imageConfig']['imageRepository']
156 | tkg_core_package = info['components']['tkg-core-packages'][0]['images']
157 |
158 | for addon_package in addon_packages:
159 | package_name = addon_package.split("/")[-1]
160 | image_path = fetch_addon_image_name(info, image_repo, tkg_core_package, package_name)
161 | image_path_list.append(image_path)
162 | if "kapp-controller" in addon_package:
163 | localhost_image = fetch_kapp_controller_localhost_image(addon_package)
164 | localhost_image_path_list.append(localhost_image)
165 | continue
166 | localhost_image = image_path.replace(image_path.split('/')[0], localhost_path)
167 | localhost_image_path_list.append(":".join(localhost_image.split(":")[:-1]))
168 |
169 | return image_path_list, localhost_image_path_list
170 |
171 |
172 | # parse the data from package and packageMetadata CR for each addon
173 | def fetch_file_contents(addon_package):
174 | data = ""
175 | info = {}
176 | for filename in os.listdir(addon_package):
177 | with open(join(addon_package, filename), 'r') as file:
178 | content = file.read()
179 | if not content.endswith("\n"):
180 | content += "\n"
181 | data = data + "---\n" + content
182 |
183 | if "metadata" not in filename:
184 | info = yaml.safe_load(content)
185 |
186 | return data, info
187 |
188 |
189 | # parse the data from config CR for each addon
190 | def append_addon_config(data, addon_package):
191 | if addon_package in config_data_list:
192 | filename = config_data_list[addon_package]
193 | config_data_list.pop(addon_package)
194 | with open(filename, 'r') as file:
195 | content = file.read()
196 | if not content.endswith("\n"):
197 | content += "\n"
198 | data = data + "---\n" + content
199 |
200 | return data
201 |
202 |
203 | # validate if the key length is less than 62, DO NOT CHANGE THIS LIMIT
204 | # as this limitation comes from VirtualMachine Image name
205 | def validate_addon_key_length(key):
206 | if len(key) > 62:
207 | raise Exception("key length is too long, hence skipping", key)
208 | return True
209 |
210 |
211 | # compress the addon value yamls and encode to base64
212 | def compress_and_base64_encode(text):
213 | data = bytes(text, 'utf-8')
214 | with io.BytesIO() as buff:
215 | g = gzip.GzipFile(fileobj=buff, mode='wb')
216 | g.write(data)
217 | g.close()
218 |
219 | return str(base64.b64encode(buff.getvalue()), 'utf-8')
220 |
221 |
222 | def set_inner_data(data, name, version):
223 | inner_data = {}
224 |
225 | encoded_data = compress_and_base64_encode(data)
226 | inner_data["name"] = name
227 | inner_data["type"] = "inline"
228 | inner_data["version"] = version
229 | inner_data["value"] = encoded_data
230 | inner_data = convert_to_xml(json.dumps(inner_data))
231 |
232 | return inner_data
233 |
234 |
235 | def create_utkg_tkr_metadata_ovf_properties():
236 | addon_packages = downloadUtkgAddonFiles()
237 |
238 | # map config CR with corresponding Addon Package
239 | for filename in os.listdir(config_directory):
240 | is_addon = False
241 | for addon_package in addon_packages:
242 | addon_name = re.sub('[^A-Za-z0-9]+', '', Path(addon_package).stem.split(".")[0])
243 | if "pv-csi" in addon_package:
244 | addon_name = "csi"
245 |
246 | if addon_name in filename.lower():
247 | config_data_list[addon_package] = join(config_directory, filename)
248 | is_addon = True
249 | break
250 | if not is_addon:
251 | config_data_list[filename] = join(config_directory, filename)
252 |
253 | # fetch TKR version
254 | tkr_version, _ = fetch_tkr_data()
255 |
256 | # add the custom_ovf_property for given list of addons
257 | for addon_package in addon_packages:
258 | data, info = fetch_file_contents(addon_package)
259 | data = append_addon_config(data, addon_package)
260 | add_on_version = info["spec"]["version"]
261 |
262 | addon_name = Path(addon_package).stem.split(".")[0]
263 | inner_data = set_inner_data(data, addon_name, add_on_version)
264 |
265 | # Renaming the guest-cluster-auth-service to gc-auth-service as the name of the add on becomes
266 | # more than 63 chars which is not permissible for VirtualMachineImage Name
267 | if addon_name == "guest-cluster-auth-service":
268 | addon_name = "gc-auth-service"
269 |
270 | if validate_addon_key_length("vmware-system.guest.kubernetes.addons." + addon_name):
271 | custom_ovf_properties[f"vmware-system.guest.kubernetes.addons.{addon_name}"] = inner_data
272 |
273 | # add OSImage, ClusterBootstrapTemplate and TanzuKubernetesRelease
274 | osi_images_list = []
275 | isOsimage = False
276 |
277 | for filename in config_data_list.values():
278 | data = ""
279 |
280 | with open(filename, 'r') as file:
281 | content = file.read()
282 | if not content.endswith("\n"):
283 | content += "\n"
284 | data = data + "---\n" + content
285 |
286 | info = yaml.safe_load(content)
287 | if "OSImage" in filename:
288 | osi_content = {}
289 | osi_content["name"] = info["metadata"]["name"]
290 | osi_content["value"] = compress_and_base64_encode(data)
291 | osi_images_list.append(osi_content)
292 | isOsimage = True
293 | continue
294 |
295 | else:
296 | metadata_version = tkr_version
297 | if "ClusterBootstrapTemplate" in filename:
298 | split_version = info["metadata"]["name"]
299 | # Fetching the short version in order to maintain the max key limit(63 characters) in the ovf
300 | # property
301 | metadata_name = "tkr.cbt"
302 | else:
303 | metadata_name = "tkr"
304 |
305 | inner_data = set_inner_data(data, info["metadata"]["name"], metadata_version)
306 |
307 | if validate_addon_key_length("vmware-system." + metadata_name):
308 | custom_ovf_properties[f"vmware-system.{metadata_name}"] = inner_data
309 |
310 | if isOsimage:
311 | custom_ovf_properties[f"vmware-system.tkr.osi"] = convert_to_xml(json.dumps(osi_images_list))
312 |
313 |
314 | def write_properties_to_file(filename):
315 | with open(filename, 'w') as f:
316 | f.write(json.dumps(custom_ovf_properties))
317 |
318 |
319 | def main():
320 | parser = argparse.ArgumentParser(
321 | description='Script to generate OVF properties')
322 | parser.add_argument('--kubernetes_config', required=True,
323 | help='Kubernetes related configuration JSON')
324 | parser.add_argument('--outfile',
325 | help='Path to output file')
326 | args = parser.parse_args()
327 |
328 | set_versions(args)
329 | create_utkg_tkr_metadata_ovf_properties()
330 | create_non_addon_ovf_properties()
331 | write_properties_to_file(args.outfile)
332 | print(custom_ovf_properties)
333 |
334 |
335 | if __name__ == '__main__':
336 | main()
337 |
--------------------------------------------------------------------------------
/supported-context.json:
--------------------------------------------------------------------------------
1 | {
2 | "supported_os": [
3 | "photon-5",
4 | "ubuntu-2204-efi",
5 | "windows-2022-efi"
6 | ],
7 | "artifacts_image": "projects.packages.broadcom.com/vsphere/iaas/kubernetes-release/1.32.0/vkr-artifact-server:v1.32.0_vmware.6-fips-vkr.2",
8 | "docker_build_args": {
9 | "IMAGE_BUILDER_COMMIT_ID": "826a2d7e05288b72870ae45797e9d64efc101d07"
10 | }
11 | }
--------------------------------------------------------------------------------
/supported-version.txt:
--------------------------------------------------------------------------------
1 | v1.32.0+vmware.6-fips
2 |
--------------------------------------------------------------------------------