├── .gitattributes ├── .github ├── CODEOWNERS ├── mergify.yml ├── settings.yml └── workflows │ ├── ci-non-go.sh │ ├── ci.yaml │ └── tags.yaml ├── .gitignore ├── CONTRIBUTING.md ├── DCO.md ├── LICENSE ├── README.md ├── capt ├── README.md ├── Taskfile.yaml ├── config.yaml ├── scripts │ ├── create_vms.sh │ ├── generate_bmc.sh │ ├── generate_hardware.sh │ ├── generate_secret.sh │ ├── generate_state.sh │ ├── sushy-tools.conf │ ├── update_state.sh │ └── virtualbmc.sh ├── tasks │ ├── Taskfile-capi.yaml │ ├── Taskfile-create.yaml │ ├── Taskfile-delete.yaml │ └── Taskfile-vbmc.yaml └── templates │ ├── bmc-machine.tmpl │ ├── bmc-secret.tmpl │ ├── clusterctl.tmpl │ ├── hardware.tmpl │ ├── kustomization-iso.tmpl │ └── kustomization-netboot.tmpl ├── contrib └── tag-release.sh ├── shell.nix └── stack ├── README.md ├── docs └── quickstarts │ ├── KUBERNETES.md │ ├── VAGRANTLVIRT.md │ └── VAGRANTVBOX.md └── vagrant ├── .env ├── Vagrantfile ├── hardware.yaml ├── setup.sh ├── template.yaml ├── ubuntu-download.yaml └── workflow.yaml /.gitattributes: -------------------------------------------------------------------------------- 1 | # Use Unix line endings for scripts 2 | *.sh text eol=lf 3 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | /.github/settings.yml @jacobweinstock @chrisdoherty4 2 | /.github/CODEOWNERS @jacobweinstock @chrisdoherty4 3 | -------------------------------------------------------------------------------- /.github/mergify.yml: -------------------------------------------------------------------------------- 1 | queue_rules: 2 | - name: default 3 | queue_conditions: 4 | - base=main 5 | - "#approved-reviews-by>=1" 6 | - "#changes-requested-reviews-by=0" 7 | - "#review-requested=0" 8 | - check-success=DCO 9 | - check-success=build 10 | - label!=do-not-merge 11 | - label=ready-to-merge 12 | merge_conditions: 13 | # Conditions to get out of the queue (= merged) 14 | - check-success=DCO 15 | - check-success=build 16 | commit_message_template: | 17 | {{ title }} (#{{ number }}) 18 | 19 | {{ body }} 20 | merge_method: merge 21 | 22 | pull_request_rules: 23 | - name: Automatic merge on approval 24 | conditions: [] 25 | actions: 26 | queue: 27 | -------------------------------------------------------------------------------- /.github/settings.yml: -------------------------------------------------------------------------------- 1 | # Collaborators: give specific users access to this repository. 2 | # See https://docs.github.com/en/rest/reference/repos#add-a-repository-collaborator for available options 3 | collaborators: 4 | # Maintainers, should also be added to the .github/CODEOWNERS file as owners of this settings.yml file. 5 | - username: jacobweinstock 6 | permission: maintain 7 | - username: chrisdoherty4 8 | permission: maintain 9 | # Approvers 10 | - username: displague 11 | permission: push 12 | # Reviewers 13 | 14 | # Note: `permission` is only valid on organization-owned repositories. 15 | # The permission to grant the collaborator. Can be one of: 16 | # * `pull` - can pull, but not push to or administer this repository. 17 | # * `push` - can pull and push, but not administer this repository. 18 | # * `admin` - can pull, push and administer this repository. 19 | # * `maintain` - Recommended for project managers who need to manage the repository without access to sensitive or destructive actions. 20 | # * `triage` - Recommended for contributors who need to proactively manage issues and pull requests without write access. 21 | -------------------------------------------------------------------------------- /.github/workflows/ci-non-go.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env nix-shell 2 | #!nix-shell -i bash ../../shell.nix 3 | # shellcheck shell=bash 4 | 5 | set -eux 6 | 7 | failed=0 8 | 9 | if ! git ls-files '*.md' '*.yaml' '*.yml' | xargs prettier --list-different --write; then 10 | failed=1 11 | fi 12 | 13 | if ! git ls-files '*.json' | xargs -I '{}' sh -c 'jq --sort-keys . {} > {}.t && mv {}.t {}'; then 14 | failed=1 15 | fi 16 | 17 | if ! shfmt -f . | xargs shfmt -s -l -d; then 18 | failed=1 19 | fi 20 | 21 | if ! rufo stack/vagrant/Vagrantfile; then 22 | failed=1 23 | fi 24 | 25 | if ! git diff | (! grep .); then 26 | failed=1 27 | fi 28 | 29 | exit "$failed" 30 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: For each commit and PR 2 | on: 3 | push: 4 | pull_request: 5 | 6 | jobs: 7 | build: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout code 11 | uses: actions/checkout@v2 12 | with: 13 | lfs: true 14 | - name: Install nix 15 | uses: cachix/install-nix-action@v12 16 | with: 17 | nix_path: nixpkgs=channel:nixpkgs-unstable 18 | - name: Run ci-checks.sh 19 | run: ./.github/workflows/ci-non-go.sh 20 | -------------------------------------------------------------------------------- /.github/workflows/tags.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | tags: 4 | - "v*" 5 | name: Create release 6 | jobs: 7 | release: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Checkout code 11 | uses: actions/checkout@v2 12 | - name: Generate Release Notes 13 | run: | 14 | release_notes=$(gh api repos/{owner}/{repo}/releases/generate-notes -F tag_name=${{ github.ref }} --jq .body) 15 | echo 'RELEASE_NOTES<> $GITHUB_ENV 16 | echo "${release_notes}" >> $GITHUB_ENV 17 | echo 'EOF' >> $GITHUB_ENV 18 | env: 19 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 20 | OWNER: ${{ github.repository_owner }} 21 | REPO: ${{ github.event.repository.name }} 22 | - name: Create Release 23 | id: create_release 24 | uses: actions/create-release@v1 25 | env: 26 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 27 | with: 28 | tag_name: ${{ github.ref }} 29 | release_name: Release ${{ github.ref }} 30 | body: ${{ env.RELEASE_NOTES }} 31 | draft: false 32 | prerelease: false 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | error.log 3 | .task 4 | .state 5 | capt/output/ 6 | .vscode/ 7 | sushy.cert 8 | sushy.key 9 | htpasswd -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | ## Hello Contributors! 2 | 3 | Thanks for your interest! 4 | We're so glad you're here. 5 | 6 | ### Important Resources 7 | 8 | #### bugs: [https://github.com/tinkerbell/playground/issues](https://github.com/tinkerbell/playground/issues) 9 | 10 | ### Code of Conduct 11 | 12 | Please read and understand the code of conduct found [here](https://github.com/tinkerbell/.github/blob/main/CODE_OF_CONDUCT.md). 13 | 14 | ### DCO Sign Off 15 | 16 | Please read and understand the DCO found [here](docs/DCO.md). 17 | 18 | ### Environment Details 19 | 20 | Building is handled by `make`, please see the [Makefile](Makefile) for available targets. 21 | 22 | #### Nix 23 | 24 | This repo's build environment can be reproduced using `nix`. 25 | 26 | ##### Install Nix 27 | 28 | Follow the [Nix installation](https://nixos.org/download.html) guide to setup Nix on your box. 29 | 30 | ##### Load Dependencies 31 | 32 | Loading build dependencies is as simple as running `nix-shell` or using [lorri](https://github.com/nix-community/lorri). 33 | If you have `direnv` installed the included `.envrc` will make that step automatic. 34 | 35 | ### How to Submit Change Requests 36 | 37 | Please submit change requests and / or features via [Issues](https://github.com/tinkerbell/playground/issues). 38 | There's no guarantee it'll be changed, but you never know until you try. 39 | We'll try to add comments as soon as possible, though. 40 | 41 | ### How to Report a Bug 42 | 43 | Bugs are problems in code, in the functionality of an application or in its UI design; you can submit them through [Issues](https://github.com/tinkerbell/playground/issues). 44 | 45 | ## Code Style Guides 46 | 47 | ## Implementation Details 48 | 49 | The Playground is organized into two high level components: `infrastructure` and `stack`. 50 | 51 | - The `infrastructure` component is responsible for provisioning the infrastructure required to run the Tinkerbell stack. `Vagrant` and `Terraform` are the supported infrastructure tools. 52 | - The `stack` component is responsible for provisioning the Tinkerbell stack itself. `Docker Compose` and `Helm` are the supported stack tools. 53 | -------------------------------------------------------------------------------- /DCO.md: -------------------------------------------------------------------------------- 1 | # DCO Sign Off 2 | 3 | All authors to the project retain copyright to their work. However, to ensure 4 | that they are only submitting work that they have rights to, we are requiring 5 | everyone to acknowledge this by signing their work. 6 | 7 | Since this signature indicates your rights to the contribution and 8 | certifies the statements below, it must contain your real name and 9 | email address. Various forms of noreply email address must not be used. 10 | 11 | Any copyright notices in this repository should specify the authors as "The 12 | project authors". 13 | 14 | To sign your work, just add a line like this at the end of your commit message: 15 | 16 | ```text 17 | Signed-off-by: Jess Owens 18 | ``` 19 | 20 | This can easily be done with the `--signoff` option to `git commit`. 21 | 22 | By doing this you state that you can certify the following (from [https://developercertificate.org/][1]): 23 | 24 | ```text 25 | Developer Certificate of Origin 26 | Version 1.1 27 | 28 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 29 | 1 Letterman Drive 30 | Suite D4700 31 | San Francisco, CA, 94129 32 | 33 | Everyone is permitted to copy and distribute verbatim copies of this 34 | license document, but changing it is not allowed. 35 | 36 | 37 | Developer's Certificate of Origin 1.1 38 | 39 | By making a contribution to this project, I certify that: 40 | 41 | (a) The contribution was created in whole or in part by me and I 42 | have the right to submit it under the open source license 43 | indicated in the file; or 44 | 45 | (b) The contribution is based upon previous work that, to the best 46 | of my knowledge, is covered under an appropriate open source 47 | license and I have the right under that license to submit that 48 | work with modifications, whether created in whole or in part 49 | by me, under the same open source license (unless I am 50 | permitted to submit under a different license), as indicated 51 | in the file; or 52 | 53 | (c) The contribution was provided directly to me by some other 54 | person who certified (a), (b) or (c) and I have not modified 55 | it. 56 | 57 | (d) I understand and agree that this project and the contribution 58 | are public and that a record of the contribution (including all 59 | personal information I submit with it, including my sign-off) is 60 | maintained indefinitely and may be redistributed consistent with 61 | this project or the open source license(s) involved. 62 | ``` 63 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2020 Packet Host, Inc. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Playground 2 | 3 | Welcome to the Tinkerbell Playground! This playground repository holds example deployments for use in learning and testing. 4 | The following playgrounds are available: 5 | 6 | - [Tinkerbell stack playground](stack/README.md) 7 | - [Cluster API Provider Tinkerbell (CAPT) playground](capt/README.md) 8 | -------------------------------------------------------------------------------- /capt/README.md: -------------------------------------------------------------------------------- 1 | # Cluster API Provider Tinkerbell (CAPT) Playground 2 | 3 | The Cluster API Provider Tinkerbell (CAPT) is a Kubernetes Cluster API provider that uses Tinkerbell to provision machines. You can find more information about CAPT [here](https://github.com/tinkerbell/cluster-api-provider-tinkerbell). The CAPT playground is an example deployment for use in learning and testing. It is not a production reference architecture. 4 | 5 | ## Getting Started 6 | 7 | The CAPT playground is a tool that will create a local CAPT deployment and a single workload cluster. This includes creating and installing a Kubernetes cluster (KinD), the Tinkerbell stack, all CAPI and CAPT components, Virtual machines that will be used to create the workload cluster, and a Virtual BMC server to manage the VMs. 8 | 9 | Start by reviewing and installing the [prerequisites](#prerequisites) and understanding and customizing the [configuration file](./config.yaml) as needed. 10 | 11 | ## Prerequisites 12 | 13 | ### Operating System 14 | 15 | This playground has only been tested on Ubuntu 22.04 LTS. If you are using a virtual machine, ensure that you have hardware virtualization enabled. 16 | 17 | ### Binaries 18 | 19 | - [Libvirtd](https://wiki.debian.org/KVM) >= libvirtd (libvirt) 8.0.0 20 | - [Docker](https://docs.docker.com/engine/install/) >= 24.0.7 21 | - [Helm](https://helm.sh/docs/intro/install/) >= v3.13.1 22 | - [KinD](https://kind.sigs.k8s.io/docs/user/quick-start/#installation) >= v0.20.0 23 | - [clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start#install-clusterctl) >= v1.6.0 24 | - [kubectl](https://www.downloadkubernetes.com/) >= v1.28.2 25 | - [virt-install](https://virt-manager.org/) >= 4.0.0 26 | - [yq](https://github.com/mikefarah/yq/#install) >= v4.44.2 27 | - [task](https://taskfile.dev/installation/) >= 3.37.2 28 | 29 | ### Packages 30 | 31 | The `ovmf` package is required for the libvirt VMs to run properly. OVMF is a port of Intel's tianocore firmware to the qemu virtual machine. Install it with the following command. 32 | 33 | ```bash 34 | sudo apt install ovmf 35 | ``` 36 | 37 | ### Hardware 38 | 39 | - at least 60GB of free and very fast disk space (etcd is very disk I/O sensitive) 40 | - at least 8GB of free RAM 41 | - at least 4 CPU cores 42 | 43 | ## Usage 44 | 45 | Start by looking at the [`config.yaml`](./config.yaml) file. This file contains the configuration for the playground. You can customize the playground by changing the values in this file. We recommend you start with the defaults to get familiar with the playground before customizing. 46 | 47 | Create the CAPT playground: 48 | 49 | ```bash 50 | # Run the creation process and follow the outputted next steps at the end of the process. 51 | task create-playground 52 | ``` 53 | 54 | Delete the CAPT playground: 55 | 56 | ```bash 57 | task delete-playground 58 | ``` 59 | 60 | ## Next Steps 61 | 62 | With the playground up and running and a workload cluster created, you can run through a few CAPI lifecycle operations. 63 | 64 | ### Move/pivot the Tinkerbell stack and CAPI/CAPT components to a workload cluster 65 | 66 | To be written. 67 | 68 | ### Upgrade the management cluster 69 | 70 | To be written. 71 | 72 | ### Upgrade the workload cluster 73 | 74 | To be written. 75 | 76 | ### Scale out the workload cluster 77 | 78 | To be written. 79 | 80 | ### Scale in the workload cluster 81 | 82 | To be written. 83 | 84 | ## Known Issues 85 | 86 | ### DNS issue 87 | 88 | KinD on Ubuntu has a known issue with DNS resolution in KinD pod containers. This affect the Download of HookOS in the Tink stack helm deployment. There are a few [known workarounds](https://github.com/kubernetes-sigs/kind/issues/1594#issuecomment-629509450). The recommendation for the CAPT playground is to add a DNS nameservers to Docker's `daemon.json` file. This can be done by adding the following to `/etc/docker/daemon.json`: 89 | 90 | ```json 91 | { 92 | "dns": ["1.1.1.1"] 93 | } 94 | ``` 95 | 96 | Then restart Docker: 97 | 98 | ```bash 99 | sudo systemctl restart docker 100 | ``` 101 | -------------------------------------------------------------------------------- /capt/Taskfile.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | includes: 4 | create: ./tasks/Taskfile-create.yaml 5 | delete: ./tasks/Taskfile-delete.yaml 6 | vbmc: ./tasks/Taskfile-vbmc.yaml 7 | capi: ./tasks/Taskfile-capi.yaml 8 | 9 | vars: 10 | OUTPUT_DIR: 11 | sh: echo $(yq eval '.outputDir' config.yaml) 12 | CURR_DIR: 13 | sh: pwd 14 | STATE_FILE: ".state" 15 | STATE_FILE_FQ_PATH: 16 | sh: echo {{joinPath .CURR_DIR .STATE_FILE}} 17 | 18 | tasks: 19 | create-playground: 20 | silent: true 21 | summary: | 22 | Create the CAPT playground. Use the config.yaml file to define things like cluster size and Kubernetes version. 23 | cmds: 24 | - task: system-deps-warnings 25 | - task: validate-binaries 26 | - task: ensure-output-dir 27 | - task: generate-state 28 | - task: create:playground-ordered 29 | - task: next-steps 30 | 31 | delete-playground: 32 | silent: true 33 | summary: | 34 | Delete the CAPT playground. 35 | cmds: 36 | - task: validate-binaries 37 | - task: delete:playground 38 | 39 | validate-binaries: 40 | silent: true 41 | summary: | 42 | Validate all required dependencies for the CAPT playground. 43 | cmds: 44 | - for: 45 | [ 46 | "virsh", 47 | "docker", 48 | "helm", 49 | "kind", 50 | "kubectl", 51 | "clusterctl", 52 | "virt-install", 53 | "yq", 54 | ] 55 | cmd: command -v {{ .ITEM }} >/dev/null || echo "'{{ .ITEM }}' was not found in the \$PATH, please ensure it is installed." 56 | # sudo apt install virtinst # for virt-install 57 | # sudo apt install bridge-utils # for brctl 58 | 59 | system-deps-warnings: 60 | summary: | 61 | Run CAPT playground system warnings. 62 | silent: true 63 | cmds: 64 | - echo "Please ensure you have the following:" 65 | - echo "60GB of free and very fast disk space (etcd is very disk I/O sensitive)" 66 | - echo "8GB of free RAM" 67 | - echo "4 CPU cores" 68 | 69 | ensure-output-dir: 70 | summary: | 71 | Create the output directory. 72 | cmds: 73 | - mkdir -p {{.OUTPUT_DIR}} 74 | - mkdir -p {{.OUTPUT_DIR}}/xdg 75 | status: 76 | - echo ;[ -d {{.OUTPUT_DIR}} ] 77 | - echo ;[ -d {{.OUTPUT_DIR}}/xdg ] 78 | 79 | generate-state: 80 | summary: | 81 | Populate the state file. 82 | sources: 83 | - config.yaml 84 | generates: 85 | - .state 86 | cmds: 87 | - ./scripts/generate_state.sh config.yaml .state 88 | 89 | next-steps: 90 | silent: true 91 | summary: | 92 | Next steps after creating the CAPT playground. 93 | vars: 94 | NAMESPACE: 95 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 96 | NODE_BASE: 97 | sh: yq eval '.vm.baseName' {{.STATE_FILE_FQ_PATH}} 98 | CLUSTER_NAME: 99 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 100 | KIND_KUBECONFIG: 101 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 102 | cmds: 103 | - | 104 | echo 105 | echo The workload cluster is now being created. 106 | echo Once the cluster nodes are up and running, you will need to deploy a CNI for the cluster to be fully functional. 107 | echo The management cluster kubeconfig is located at: {{.KIND_KUBECONFIG}} 108 | echo The workload cluster kubeconfig is located at: {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig 109 | echo 110 | echo 1. Watch and wait for the first control plane node to be provisioned successfully: STATE_SUCCESS 111 | echo "KUBECONFIG={{.KIND_KUBECONFIG}} kubectl get workflows -n {{.NAMESPACE}} -w" 112 | echo 113 | echo 114 | echo 2. Watch and wait for the Kubernetes API server to be ready and responding: 115 | echo "until KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get node; do echo 'Waiting for Kube API server to respond...'; sleep 5; done" 116 | echo 117 | echo 3. Deploy a CNI 118 | echo Cilium 119 | echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig cilium install" 120 | echo or KUBEROUTER 121 | echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl apply -f https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/daemonset/kubeadm-kuberouter.yaml" 122 | echo 123 | echo 4. Watch and wait for all nodes to join the cluster and be ready: 124 | echo "KUBECONFIG={{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig kubectl get nodes -w" 125 | - touch {{.OUTPUT_DIR}}/.next-steps-displayed 126 | status: 127 | - echo ;[ -f {{.OUTPUT_DIR}}/.next-steps-displayed ] 128 | -------------------------------------------------------------------------------- /capt/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | clusterName: "capt-playground" 3 | outputDir: "output" 4 | namespace: "tink" 5 | counts: 6 | controlPlanes: 1 7 | workers: 1 8 | spares: 1 9 | versions: 10 | capt: v0.6.1 11 | chart: 0.6.1 12 | kube: v1.29.4 13 | os: 20.04 14 | kubevip: 0.8.7 15 | capt: 16 | providerRepository: "https://github.com/tinkerbell/cluster-api-provider-tinkerbell/releases" 17 | #providerRepository: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/out/release/infrastructure-tinkerbell" 18 | chart: 19 | location: "oci://ghcr.io/tinkerbell/charts/stack" 20 | #location: "/home/tink/repos/tinkerbell/charts/tinkerbell/stack" 21 | os: 22 | registry: ghcr.io/tinkerbell/cluster-api-provider-tinkerbell 23 | distro: ubuntu 24 | sshKey: "" 25 | vm: 26 | baseName: "node" 27 | cpusPerVM: 2 28 | memInMBPerVM: 2048 29 | diskSizeInGBPerVM: 10 30 | diskPath: "/tmp" 31 | virtualBMC: 32 | containerName: "virtualbmc" 33 | image: ghcr.io/jacobweinstock/virtualbmc:latest 34 | user: "root" 35 | pass: "calvin" 36 | bootMode: netboot 37 | -------------------------------------------------------------------------------- /capt/scripts/create_vms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # Create VMs 6 | 7 | function main() { 8 | declare -r STATE_FILE="$1" 9 | declare -r OUTPUT_DIR=$(yq eval '.outputDir' "$STATE_FILE") 10 | declare BRIDGE_NAME="$(yq eval '.kind.bridgeName' "$STATE_FILE")" 11 | declare CPUS="$(yq eval '.vm.cpusPerVM' "$STATE_FILE")" 12 | declare MEM="$(yq eval '.vm.memInMBPerVM' "$STATE_FILE")" 13 | declare DISK_SIZE="$(yq eval '.vm.diskSizeInGBPerVM' "$STATE_FILE")" 14 | declare DISK_PATH="$(yq eval '.vm.diskPath' "$STATE_FILE")" 15 | 16 | while IFS=$',' read -r name mac; do 17 | # create the VM 18 | virt-install \ 19 | --description "CAPT VM" \ 20 | --ram "$MEM" --vcpus "$CPUS" \ 21 | --os-variant "ubuntu20.04" \ 22 | --graphics "vnc" \ 23 | --boot "uefi,firmware.feature0.name=enrolled-keys,firmware.feature0.enabled=no,firmware.feature1.name=secure-boot,firmware.feature1.enabled=yes" \ 24 | --noautoconsole \ 25 | --noreboot \ 26 | --import \ 27 | --connect "qemu:///system" \ 28 | --name "$name" \ 29 | --disk "path=$DISK_PATH/$name-disk.img,bus=virtio,size=10,sparse=yes" \ 30 | --network "bridge:$BRIDGE_NAME,mac=$mac" 31 | done < <(yq e '.vm.details.[] | [key, .mac] | @csv' "$STATE_FILE") 32 | } 33 | 34 | main "$@" 35 | -------------------------------------------------------------------------------- /capt/scripts/generate_bmc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # This script creates the BMC machine yaml files needed for the CAPT playground. 6 | 7 | function main() { 8 | declare -r STATE_FILE="$1" 9 | declare -r OUTPUT_DIR=$(yq eval '.outputDir' "$STATE_FILE") 10 | 11 | rm -f "$OUTPUT_DIR"/bmc-machine*.yaml 12 | 13 | namespace=$(yq eval '.namespace' "$STATE_FILE") 14 | bmc_ip=$(yq eval '.virtualBMC.ip' "$STATE_FILE") 15 | 16 | while IFS=$',' read -r name port; do 17 | export NODE_NAME="$name" 18 | export BMC_IP="$bmc_ip" 19 | export BMC_PORT="$port" 20 | export NAMESPACE="$namespace" 21 | envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "$OUTPUT_DIR"/bmc-machine-"$NODE_NAME".yaml 22 | unset NODE_NAME 23 | unset BMC_IP 24 | unset BMC_PORT 25 | unset NAMESPACE 26 | done < <(yq e '.vm.details.[] | [key, .bmc.port] | @csv' "$STATE_FILE") 27 | } 28 | 29 | main "$@" 30 | -------------------------------------------------------------------------------- /capt/scripts/generate_hardware.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Generate hardware 4 | 5 | set -euo pipefail 6 | 7 | function main() { 8 | # Generate hardware 9 | declare -r STATE_FILE="$1" 10 | declare -r OUTPUT_DIR=$(yq eval '.outputDir' "$STATE_FILE") 11 | declare -r NS=$(yq eval '.namespace' "$STATE_FILE") 12 | 13 | rm -f "$OUTPUT_DIR"/hardware*.yaml 14 | 15 | while IFS=$',' read -r name mac role ip gateway; do 16 | export NODE_NAME="$name" 17 | export NODE_MAC="$mac" 18 | export NODE_ROLE="$role" 19 | export NODE_IP="$ip" 20 | export GATEWAY_IP="$gateway" 21 | export NAMESPACE="$NS" 22 | envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "$OUTPUT_DIR"/hardware-"$NODE_NAME".yaml 23 | unset NODE_ROLE 24 | unset NODE_NAME 25 | unset NODE_IP 26 | unset NODE_MAC 27 | unset GATEWAY_IP 28 | done < <(yq e '.vm.details.[] | [key, .mac, .role, .ip, .gateway] | @csv' "$STATE_FILE") 29 | 30 | } 31 | 32 | main "$@" 33 | -------------------------------------------------------------------------------- /capt/scripts/generate_secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Generate secret. All machines share the same secret. The only customization is the namespace, user name, and password. 4 | 5 | function main() { 6 | declare -r STATE_FILE="$1" 7 | declare -r OUTPUT_DIR=$(yq eval '.outputDir' "$STATE_FILE") 8 | export NAMESPACE=$(yq eval '.namespace' "$STATE_FILE") 9 | export BMC_USER_BASE64=$(yq eval '.virtualBMC.user' "$STATE_FILE" | tr -d '\n' | base64) 10 | export BMC_PASS_BASE64=$(yq eval '.virtualBMC.pass' "$STATE_FILE" | tr -d '\n' | base64) 11 | 12 | envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" "$OUTPUT_DIR"/bmc-secret.yaml 13 | unset BMC_USER_BASE64 14 | unset BMC_PASS_BASE64 15 | unset NAMESPACE 16 | } 17 | 18 | main "$@" 19 | -------------------------------------------------------------------------------- /capt/scripts/generate_state.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script generates the state data needed for creating the CAPT playground. 3 | 4 | # state file spec 5 | cat </dev/null 6 | --- 7 | clusterName: "capt-playground" 8 | outputDir: "/home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output" 9 | namespace: "tink" 10 | counts: 11 | controlPlanes: 1 12 | workers: 1 13 | spares: 1 14 | versions: 15 | capt: 0.5.3 16 | chart: 0.5.0 17 | kube: v1.28.8 18 | os: 22.04 19 | os: 20 | registry: reg.weinstocklabs.com/tinkerbell/cluster-api-provider-tinkerbell 21 | distro: ubuntu 22 | sshKey: "" 23 | version: "2204" 24 | vm: 25 | baseName: "node" 26 | cpusPerVM: 2 27 | memInMBPerVM: 2048 28 | diskSizeInGBPerVM: 10 29 | diskPath: "/tmp" 30 | details: 31 | node1: 32 | mac: 02:7f:92:bd:2d:57 33 | bmc: 34 | port: 6231 35 | role: control-plane 36 | ip: 172.18.10.21 37 | gateway: 172.18.0.1 38 | node2: 39 | mac: 02:f3:eb:c1:aa:2b 40 | bmc: 41 | port: 6232 42 | role: worker 43 | ip: 172.18.10.22 44 | gateway: 172.18.0.1 45 | node3: 46 | mac: 02:3c:e6:70:1b:5e 47 | bmc: 48 | port: 6233 49 | role: spare 50 | ip: 172.18.10.23 51 | gateway: 172.18.0.1 52 | virtualBMC: 53 | containerName: "virtualbmc" 54 | image: ghcr.io/jacobweinstock/virtualbmc 55 | user: "root" 56 | pass: "calvin" 57 | ip: 172.18.0.3 58 | totalNodes: 3 59 | kind: 60 | kubeconfig: /home/tink/repos/tinkerbell/cluster-api-provider-tinkerbell/playground/output/kind.kubeconfig 61 | gatewayIP: 172.18.0.1 62 | nodeIPBase: 172.18.10.20 63 | bridgeName: br-d086780dac6b 64 | tinkerbell: 65 | vip: 172.18.10.74 66 | cluster: 67 | controlPlane: 68 | vip: 172.18.10.75 69 | podCIDR: 172.100.0.0/16 70 | bootMode: netboot 71 | EOF 72 | 73 | set -euo pipefail 74 | 75 | function generate_mac() { 76 | declare NODE_NAME="$1" 77 | 78 | echo "$NODE_NAME" | md5sum | sed 's/^\(..\)\(..\)\(..\)\(..\)\(..\).*$/02:\1:\2:\3:\4:\5/' 79 | } 80 | 81 | function main() { 82 | # read in the config.yaml file and populate the .state file 83 | declare CONFIG_FILE="$1" 84 | declare STATE_FILE="$2" 85 | 86 | # update outputDir to be a fully qualified path 87 | output_dir=$(yq eval '.outputDir' "$CONFIG_FILE") 88 | if [[ $output_dir == /* ]]; then 89 | echo 90 | else 91 | current_dir=$(pwd) 92 | output_dir="$current_dir/$output_dir" 93 | fi 94 | config_file=$(realpath "$CONFIG_FILE") 95 | state_file="$STATE_FILE" 96 | 97 | cp -a "$config_file" "$state_file" 98 | yq e -i '.outputDir = "'$output_dir'"' "$state_file" 99 | 100 | # totalNodes 101 | total_nodes=$(($(yq eval '.counts.controlPlanes' "$state_file") + $(yq eval '.counts.workers' "$state_file") + $(yq eval '.counts.spares' "$state_file"))) 102 | yq e -i ".totalNodes = $total_nodes" "$state_file" 103 | 104 | # populate vmNames 105 | base_name=$(yq eval '.vm.baseName' "$state_file") 106 | base_ipmi_port=6230 107 | for i in $(seq 1 $total_nodes); do 108 | name="$base_name$i" 109 | mac=$(generate_mac "$name") 110 | yq e -i ".vm.details.$name.mac = \"$mac\"" "$state_file" 111 | yq e -i ".vm.details.$name.bmc.port = $((base_ipmi_port + i))" "$state_file" 112 | # set the node role 113 | if [[ $i -le $(yq eval '.counts.controlPlanes' "$state_file") ]]; then 114 | yq e -i ".vm.details.$name.role = \"control-plane\"" "$state_file" 115 | elif [[ $i -le $(($(yq eval '.counts.controlPlanes' "$state_file") + $(yq eval '.counts.workers' "$state_file"))) ]]; then 116 | yq e -i ".vm.details.$name.role = \"worker\"" "$state_file" 117 | else 118 | yq e -i ".vm.details.$name.role = \"spare\"" "$state_file" 119 | fi 120 | unset name 121 | unset mac 122 | done 123 | 124 | # populate kind.kubeconfig 125 | yq e -i '.kind.kubeconfig = "'$output_dir'/kind.kubeconfig"' "$state_file" 126 | 127 | # populate the expected OS version in the raw image name (22.04 -> 2204) 128 | os_version=$(yq eval '.versions.os' "$state_file") 129 | os_version=$(echo "$os_version" | tr -d '.') 130 | yq e -i '.os.version = "'$os_version'"' "$state_file" 131 | } 132 | 133 | main "$@" 134 | -------------------------------------------------------------------------------- /capt/scripts/sushy-tools.conf: -------------------------------------------------------------------------------- 1 | SUSHY_EMULATOR_LISTEN_IP = u'0.0.0.0' 2 | SUSHY_EMULATOR_LISTEN_PORT = 443 3 | SUSHY_EMULATOR_OS_CLOUD = None 4 | SUSHY_EMULATOR_LIBVIRT_URI = u'qemu:///system' 5 | SUSHY_EMULATOR_IGNORE_BOOT_DEVICE = False 6 | SUSHY_EMULATOR_FEATURE_SET = u'full' 7 | SUSHY_EMULATOR_AUTH_FILE = u'/etc/sushy/htpasswd' 8 | SUSHY_EMULATOR_SSL_CERT = u'/etc/sushy/sushy.cert' 9 | SUSHY_EMULATOR_SSL_KEY = u'/etc/sushy/sushy.key' 10 | SUSHY_EMULATOR_BOOT_LOADER_MAP = { 11 | u'UEFI': { 12 | u'x86_64': u'/usr/share/OVMF/OVMF_CODE.fd' 13 | }, 14 | u'Legacy': { 15 | u'x86_64': None 16 | } 17 | } 18 | SUSHY_EMULATOR_VMEDIA_DEVICES = { 19 | u'Cd': { 20 | u'Name': 'Virtual CD', 21 | u'MediaTypes': [ 22 | u'CD', 23 | u'DVD' 24 | ] 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /capt/scripts/update_state.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # this script updates the state file with the generated hardware data 6 | 7 | function main() { 8 | declare -r STATE_FILE="$1" 9 | declare CLUSTER_NAME=$(yq eval '.clusterName' "$STATE_FILE") 10 | declare GATEWAY_IP=$(docker inspect -f '{{ .NetworkSettings.Networks.kind.Gateway }}' "$CLUSTER_NAME"-control-plane) 11 | declare NODE_IP_BASE=$(awk -F"." '{print $1"."$2".10.20"}' <<<"$GATEWAY_IP") 12 | declare NODE_BASE=$(yq eval '.vm.baseName' "$STATE_FILE") 13 | declare IP_LAST_OCTET=$(echo "$NODE_IP_BASE" | cut -d. -f4) 14 | 15 | yq e -i '.kind.gatewayIP = "'$GATEWAY_IP'"' "$STATE_FILE" 16 | yq e -i '.kind.nodeIPBase = "'$NODE_IP_BASE'"' "$STATE_FILE" 17 | 18 | # set an ip and gateway per node 19 | idx=1 20 | while IFS=$',' read -r name; do 21 | v=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx)) 22 | ((idx++)) 23 | yq e -i ".vm.details.$name.ip = \"$v\"" "$STATE_FILE" 24 | yq e -i ".vm.details.$name.gateway = \"$GATEWAY_IP\"" "$STATE_FILE" 25 | unset v 26 | done < <(yq e '.vm.details.[] | [key] | @csv' "$STATE_FILE") 27 | 28 | # set the Tinkerbell Load Balancer IP (VIP) 29 | offset=50 30 | t_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset)) 31 | yq e -i '.tinkerbell.vip = "'$t_lb'"' "$STATE_FILE" 32 | 33 | # set the cluster control plane load balancer IP (VIP) 34 | cp_lb=$(echo "$NODE_IP_BASE" | awk -F"." '{print $1"."$2"."$3}').$((IP_LAST_OCTET + idx + offset + 1)) 35 | yq e -i '.cluster.controlPlane.vip = "'$cp_lb'"' "$STATE_FILE" 36 | 37 | # set the cluster pod cidr 38 | POD_CIDR=$(awk -F"." '{print $1".100.0.0/16"}' <<<"$GATEWAY_IP") 39 | yq e -i '.cluster.podCIDR = "'$POD_CIDR'"' "$STATE_FILE" 40 | 41 | # set the KinD bridge name 42 | network_id=$(docker network inspect -f '{{.Id}}' kind) 43 | bridge_name="br-${network_id:0:12}" 44 | yq e -i '.kind.bridgeName = "'$bridge_name'"' "$STATE_FILE" 45 | 46 | } 47 | 48 | main "$@" 49 | -------------------------------------------------------------------------------- /capt/scripts/virtualbmc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # This script will registry and start virtual bmc entries in a running virtualbmc container 6 | 7 | function main() { 8 | declare -r STATE_FILE="$1" 9 | declare -r OUTPUT_DIR=$(yq eval '.outputDir' "$STATE_FILE") 10 | 11 | username=$(yq eval '.virtualBMC.user' "$STATE_FILE") 12 | password=$(yq eval '.virtualBMC.pass' "$STATE_FILE") 13 | 14 | container_name=$(yq eval '.virtualBMC.containerName' "$STATE_FILE") 15 | while IFS=$',' read -r name port; do 16 | docker exec "$container_name" vbmc add --username "$username" --password "$password" --port "$port" "$name" 17 | docker exec "$container_name" vbmc start "$name" 18 | done < <(yq e '.vm.details.[] | [key, .bmc.port] | @csv' "$STATE_FILE") 19 | } 20 | 21 | main "$@" 22 | -------------------------------------------------------------------------------- /capt/tasks/Taskfile-capi.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | tasks: 4 | ordered: 5 | summary: | 6 | CAPI tasks run in order of dependency. 7 | cmds: 8 | - task: create-cluster-yaml 9 | - task: init 10 | - task: generate-cluster-yaml 11 | - task: create-kustomize-file 12 | - task: apply-kustomization 13 | 14 | create-cluster-yaml: 15 | run: once 16 | summary: | 17 | Create the cluster yaml. 18 | env: 19 | CAPT_VERSION: 20 | sh: yq eval '.versions.capt' {{.STATE_FILE_FQ_PATH}} 21 | LOCATION: 22 | sh: yq eval '.capt.providerRepository' {{.STATE_FILE_FQ_PATH}} 23 | vars: 24 | OUTPUT_DIR: 25 | sh: echo $(yq eval '.outputDir' config.yaml) 26 | cmds: 27 | - envsubst '$CAPT_VERSION,$LOCATION' < templates/clusterctl.tmpl > {{.OUTPUT_DIR}}/clusterctl.yaml 28 | status: 29 | - grep -q "$CAPT_VERSION" {{.OUTPUT_DIR}}/clusterctl.yaml 30 | 31 | init: 32 | run: once 33 | deps: [create-cluster-yaml] 34 | summary: | 35 | Initialize the cluster. 36 | env: 37 | TINKERBELL_IP: 38 | sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}} 39 | CLUSTERCTL_DISABLE_VERSIONCHECK: true 40 | XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg" 41 | XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg" 42 | XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg" 43 | XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg" 44 | XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg" 45 | XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg" 46 | XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg" 47 | vars: 48 | OUTPUT_DIR: 49 | sh: echo $(yq eval '.outputDir' config.yaml) 50 | KIND_GATEWAY_IP: 51 | sh: yq eval '.kind.gatewayIP' {{.STATE_FILE_FQ_PATH}} 52 | KUBECONFIG: 53 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 54 | cmds: 55 | - KUBECONFIG="{{.KUBECONFIG}}" clusterctl --config {{.OUTPUT_DIR}}/clusterctl.yaml init --infrastructure tinkerbell 56 | status: 57 | - expected=1; got=$(KUBECONFIG="{{.KUBECONFIG}}" kubectl get pods -n capt-system |grep -ce "capt-controller"); [[ "$got" == "$expected" ]] 58 | 59 | generate-cluster-yaml: 60 | run: once 61 | deps: [init] 62 | summary: | 63 | Generate the cluster yaml. 64 | env: 65 | CONTROL_PLANE_VIP: 66 | sh: yq eval '.cluster.controlPlane.vip' {{.STATE_FILE_FQ_PATH}} 67 | POD_CIDR: 68 | sh: yq eval '.cluster.podCIDR' {{.STATE_FILE_FQ_PATH}} 69 | CLUSTERCTL_DISABLE_VERSIONCHECK: true 70 | XDG_CONFIG_HOME: "{{.OUTPUT_DIR}}/xdg" 71 | XDG_CONFIG_DIRS: "{{.OUTPUT_DIR}}/xdg" 72 | XDG_STATE_HOME: "{{.OUTPUT_DIR}}/xdg" 73 | XDG_CACHE_HOME: "{{.OUTPUT_DIR}}/xdg" 74 | XDG_RUNTIME_DIR: "{{.OUTPUT_DIR}}/xdg" 75 | XDG_DATA_HOME: "{{.OUTPUT_DIR}}/xdg" 76 | XDG_DATA_DIRS: "{{.OUTPUT_DIR}}/xdg" 77 | vars: 78 | CLUSTER_NAME: 79 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 80 | OUTPUT_DIR: 81 | sh: yq eval '.outputDir' config.yaml 82 | KUBE_VERSION: 83 | sh: yq eval '.versions.kube' {{.STATE_FILE_FQ_PATH}} 84 | CP_COUNT: 85 | sh: yq eval '.counts.controlPlanes' {{.STATE_FILE_FQ_PATH}} 86 | WORKER_COUNT: 87 | sh: yq eval '.counts.workers' {{.STATE_FILE_FQ_PATH}} 88 | NAMESPACE: 89 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 90 | KUBECONFIG: 91 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 92 | cmds: 93 | - KUBECONFIG="{{.KUBECONFIG}}" clusterctl generate cluster {{.CLUSTER_NAME}} --config {{.OUTPUT_DIR}}/clusterctl.yaml --kubernetes-version "{{.KUBE_VERSION}}" --control-plane-machine-count="{{.CP_COUNT}}" --worker-machine-count="{{.WORKER_COUNT}}" --target-namespace={{.NAMESPACE}} --write-to {{.OUTPUT_DIR}}/prekustomization.yaml 94 | status: 95 | - grep -q "{{.KUBE_VERSION}}" {{.OUTPUT_DIR}}/prekustomization.yaml 96 | 97 | create-kustomize-file: 98 | run: once 99 | summary: | 100 | Kustomize file for the CAPI generated config file (prekustomization.yaml). 101 | env: 102 | NAMESPACE: 103 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 104 | OS_REGISTRY: 105 | sh: yq eval '.os.registry' {{.STATE_FILE_FQ_PATH}} 106 | OS_DISTRO: 107 | sh: yq eval '.os.distro' {{.STATE_FILE_FQ_PATH}} 108 | OS_VERSION: 109 | sh: yq eval '.os.version' {{.STATE_FILE_FQ_PATH}} 110 | VERSIONS_OS: 111 | sh: yq eval '.versions.os' {{.STATE_FILE_FQ_PATH}} 112 | SSH_AUTH_KEY: 113 | sh: yq eval '.os.sshKey' {{.STATE_FILE_FQ_PATH}} 114 | KUBE_VERSION: 115 | sh: yq eval '.versions.kube' {{.STATE_FILE_FQ_PATH}} 116 | TINKERBELL_VIP: 117 | sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}} 118 | CLUSTER_NAME: 119 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 120 | KUBEVIP_VERSION: 121 | sh: yq eval '.versions.kubevip' {{.STATE_FILE_FQ_PATH}} 122 | CONTROL_PLANE_VIP: 123 | sh: yq eval '.cluster.controlPlane.vip' {{.STATE_FILE_FQ_PATH}} 124 | vars: 125 | KUBE_VERSION: 126 | sh: yq eval '.versions.kube' {{.STATE_FILE_FQ_PATH}} 127 | OUTPUT_DIR: 128 | sh: yq eval '.outputDir' config.yaml 129 | BOOTMODE: 130 | sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}} 131 | KUSTOMIZE_FILE: 132 | sh: "[[ {{.BOOTMODE}} == 'iso' ]] && echo kustomization-iso.tmpl || echo kustomization-netboot.tmpl" 133 | sources: 134 | - config.yaml 135 | generates: 136 | - "{{.OUTPUT_DIR}}/kustomization.yaml" 137 | cmds: 138 | - envsubst "$(printf '${%s} ' $(env | cut -d'=' -f1))" < templates/{{.KUSTOMIZE_FILE}} > {{.OUTPUT_DIR}}/kustomization.yaml 139 | # FYI, the `preKubeadmCommands` checks for k8s v1.29 because of https://github.com/kube-vip/kube-vip/issues/684 140 | 141 | apply-kustomization: 142 | run: once 143 | deps: [generate-cluster-yaml, create-kustomize-file] 144 | summary: | 145 | Kustomize the cluster yaml. 146 | vars: 147 | CLUSTER_NAME: 148 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 149 | KUBECONFIG: 150 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 151 | sources: 152 | - "{{.OUTPUT_DIR}}/kustomization.yaml" 153 | - "{{.OUTPUT_DIR}}/prekustomization.yaml" 154 | generates: 155 | - "{{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.yaml" 156 | cmds: 157 | - KUBECONFIG="{{.KUBECONFIG}}" kubectl kustomize {{.OUTPUT_DIR}} -o {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.yaml 158 | -------------------------------------------------------------------------------- /capt/tasks/Taskfile-create.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | includes: 4 | vbmc: ./Taskfile-vbmc.yaml 5 | capi: ./Taskfile-capi.yaml 6 | 7 | tasks: 8 | playground-ordered: 9 | silent: true 10 | summary: | 11 | Create the CAPT playground. 12 | cmds: 13 | - task: kind-cluster 14 | - task: update-state 15 | - task: deploy-tinkerbell-helm-chart 16 | - task: vbmc:prepare 17 | - task: vbmc:start-server 18 | - task: vbmc:update-state 19 | - task: hardware-cr 20 | - task: bmc-machine-cr 21 | - task: bmc-secret 22 | - task: vms 23 | - task: vbmc:start-vbmcs 24 | - task: default-storage-pool 25 | - task: apply-bmc-secret 26 | - task: apply-bmc-machines 27 | - task: apply-hardware 28 | - task: capi:ordered 29 | - task: allow-customization 30 | - task: create-workload-cluster 31 | - task: get-workload-cluster-kubeconfig 32 | 33 | allow-customization: 34 | prompt: The Workload cluster is ready to be provisioned. Execution is paused to allow for any User customizations. Press `y` to continue to Workload cluster creation. Press `n` to exit the whole process. 35 | cmds: 36 | - echo 'Creating Workload cluster' 37 | 38 | kind-cluster: 39 | run: once 40 | summary: | 41 | Install a KinD cluster. 42 | vars: 43 | CLUSTER_NAME: 44 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 45 | KUBECONFIG: 46 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 47 | cmds: 48 | - kind create cluster --name {{.CLUSTER_NAME}} --kubeconfig "{{.KUBECONFIG}}" 49 | - until KUBECONFIG="{{.KUBECONFIG}}" kubectl wait --for=condition=ready node --all --timeout=5m; do echo "Waiting for nodes to be ready..."; sleep 1; done 50 | status: 51 | - KUBECONFIG="{{.KUBECONFIG}}" kind get clusters | grep -q {{.CLUSTER_NAME}} 52 | 53 | update-state: 54 | silent: true 55 | run: once 56 | deps: [kind-cluster] 57 | summary: | 58 | Update the state file with the KinD cluster information. Should be run only after the KinD cluster is created. 59 | cmds: 60 | - ./scripts/update_state.sh "{{.STATE_FILE_FQ_PATH}}" 61 | 62 | hardware-cr: 63 | run: once 64 | deps: [update-state] 65 | summary: | 66 | Create BMC Machine object. 67 | sources: 68 | - "{{.STATE_FILE_FQ_PATH}}" 69 | generates: 70 | - "{{.OUTPUT_DIR}}/hardware-*.yaml" 71 | cmds: 72 | - ./scripts/generate_hardware.sh {{.STATE_FILE_FQ_PATH}} 73 | 74 | bmc-machine-cr: 75 | run: once 76 | deps: [vbmc:update-state] 77 | summary: | 78 | Create BMC Machine objects. 79 | sources: 80 | - "{{.STATE_FILE_FQ_PATH}}" 81 | generates: 82 | - "{{.OUTPUT_DIR}}/bmc-machine-*.yaml" 83 | cmds: 84 | - ./scripts/generate_bmc.sh {{.STATE_FILE_FQ_PATH}} 85 | 86 | bmc-secret: 87 | run: once 88 | deps: [update-state] 89 | summary: | 90 | Create BMC Machine objects. 91 | sources: 92 | - "{{.STATE_FILE_FQ_PATH}}" 93 | generates: 94 | - "{{.OUTPUT_DIR}}/bmc-secret.yaml" 95 | cmds: 96 | - ./scripts/generate_secret.sh {{.STATE_FILE_FQ_PATH}} 97 | 98 | deploy-tinkerbell-helm-chart: 99 | run: once 100 | deps: [kind-cluster, update-state] 101 | summary: | 102 | Deploy the Tinkerbell Helm chart. 103 | vars: 104 | KUBECONFIG: 105 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 106 | LB_IP: 107 | sh: yq eval '.tinkerbell.vip' {{.STATE_FILE_FQ_PATH}} 108 | TRUSTED_PROXIES: 109 | sh: KUBECONFIG={{.KUBECONFIG}} kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' 110 | STACK_CHART_VERSION: 111 | sh: yq eval '.versions.chart' {{.STATE_FILE_FQ_PATH}} 112 | NAMESPACE: 113 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 114 | LOCATION: 115 | sh: yq eval '.chart.location' {{.STATE_FILE_FQ_PATH}} 116 | CHART_NAME: tink-stack 117 | BOOTMODE: 118 | sh: yq eval '.bootMode' {{.STATE_FILE_FQ_PATH}} 119 | GLOBAL_VARS: 120 | - global.trustedProxies={"{{.TRUSTED_PROXIES}}"} 121 | - global.publicIP={{.LB_IP}} 122 | ISO_VARS: 123 | - stack.hook.extension=both 124 | - smee.iso.enabled=true 125 | - smee.iso.url=http://{{.LB_IP}}:8080/hook-latest-lts-x86_64-efi-initrd.iso 126 | - smee.iso.staticIPAMEnabled=true 127 | - smee.dhcp.enabled=false 128 | - stack.relay.enabled=false 129 | cmds: 130 | - KUBECONFIG="{{.KUBECONFIG}}" helm install {{.CHART_NAME}} {{.LOCATION}} --version "{{.STACK_CHART_VERSION}}" --create-namespace --namespace {{.NAMESPACE}} --wait {{range .GLOBAL_VARS}}--set "{{.}}" {{end}} {{- if eq .BOOTMODE "iso" }} {{- range .ISO_VARS }}--set "{{.}}" {{end}} {{end}} 131 | status: 132 | - KUBECONFIG="{{.KUBECONFIG}}" helm list -n {{.NAMESPACE}} | grep -q {{.CHART_NAME}} 133 | 134 | vms: 135 | run: once 136 | deps: [update-state, vbmc:update-state] 137 | summary: | 138 | Create Libvirt VMs. 139 | vars: 140 | TOTAL_HARDWARE: 141 | sh: yq eval '.totalNodes' {{.STATE_FILE_FQ_PATH}} 142 | VM_BASE_NAME: 143 | sh: yq eval '.vm.baseName' {{.STATE_FILE_FQ_PATH}} 144 | cmds: 145 | - ./scripts/create_vms.sh "{{.STATE_FILE_FQ_PATH}}" 146 | status: 147 | - expected={{.TOTAL_HARDWARE}}; got=$(virsh --connect qemu:///system list --all --name |grep -ce "{{.VM_BASE_NAME}}*"); [[ "$got" == "$expected" ]] 148 | 149 | apply-bmc-secret: 150 | run: once 151 | deps: [kind-cluster, bmc-secret] 152 | summary: | 153 | Apply the BMC secret. 154 | vars: 155 | NAMESPACE: 156 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 157 | KUBECONFIG: 158 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 159 | cmds: 160 | - KUBECONFIG="{{.KUBECONFIG}}" kubectl apply -f {{.OUTPUT_DIR}}/bmc-secret.yaml 161 | status: 162 | - KUBECONFIG="{{.KUBECONFIG}}" kubectl get secret bmc-creds -n {{.NAMESPACE}} 163 | 164 | apply-bmc-machines: 165 | run: once 166 | deps: [kind-cluster, bmc-machine-cr] 167 | summary: | 168 | Apply the BMC machines. 169 | vars: 170 | NAMES: 171 | sh: yq e '.vm.details[] | [key] | @csv' {{.STATE_FILE_FQ_PATH}} 172 | TOTAL_HARDWARE: 173 | sh: yq eval '.totalNodes' {{.STATE_FILE_FQ_PATH}} 174 | VM_BASE_NAME: 175 | sh: yq eval '.vm.baseName' {{.STATE_FILE_FQ_PATH}} 176 | NAMESPACE: 177 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 178 | KUBECONFIG: 179 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 180 | cmds: 181 | - for: { var: NAMES } 182 | cmd: KUBECONFIG="{{.KUBECONFIG}}" kubectl apply -f {{.OUTPUT_DIR}}/bmc-machine-{{.ITEM}}.yaml 183 | status: 184 | - expected={{.TOTAL_HARDWARE}}; got=$(KUBECONFIG="{{.KUBECONFIG}}" kubectl get machines.bmc -n {{.NAMESPACE}} | grep -ce "{{.VM_BASE_NAME}}*"); [[ "$got" == "$expected" ]] 185 | 186 | apply-hardware: 187 | run: once 188 | deps: [kind-cluster, hardware-cr] 189 | summary: | 190 | Apply the hardware. 191 | vars: 192 | NAMES: 193 | sh: yq e '.vm.details[] | [key] | @csv' {{.STATE_FILE_FQ_PATH}} 194 | TOTAL_HARDWARE: 195 | sh: yq eval '.totalNodes' {{.STATE_FILE_FQ_PATH}} 196 | VM_BASE_NAME: 197 | sh: yq eval '.vm.baseName' {{.STATE_FILE_FQ_PATH}} 198 | NAMESPACE: 199 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 200 | KUBECONFIG: 201 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 202 | cmds: 203 | - for: { var: NAMES } 204 | cmd: KUBECONFIG="{{.KUBECONFIG}}" kubectl apply -f {{.OUTPUT_DIR}}/hardware-{{.ITEM}}.yaml 205 | status: 206 | - expected={{.TOTAL_HARDWARE}}; got=$(KUBECONFIG="{{.KUBECONFIG}}" kubectl get hardware -n {{.NAMESPACE}} | grep -ce "{{.VM_BASE_NAME}}*"); [[ "$got" == "$expected" ]] 207 | 208 | create-workload-cluster: 209 | run: once 210 | deps: [kind-cluster, capi:ordered] 211 | summary: | 212 | Create the workload cluster by applying the generated manifest file. 213 | vars: 214 | CLUSTER_NAME: 215 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 216 | KUBECONFIG: 217 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 218 | NAMESPACE: 219 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 220 | cmds: 221 | - until KUBECONFIG="{{.KUBECONFIG}}" kubectl apply -f {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.yaml >>{{.OUTPUT_DIR}}/error.log 2>&1; do echo "Trying kubectl apply again..."; sleep 3; done 222 | - echo "Workload manifest applied to cluster." 223 | status: 224 | - KUBECONFIG="{{.KUBECONFIG}}" kubectl get -n {{.NAMESPACE}} cluster {{.CLUSTER_NAME}} 225 | 226 | get-workload-cluster-kubeconfig: 227 | run: once 228 | deps: [create-workload-cluster] 229 | summary: | 230 | Get the workload cluster's kubeconfig. 231 | vars: 232 | KUBECONFIG: 233 | sh: yq eval '.kind.kubeconfig' {{.STATE_FILE_FQ_PATH}} 234 | NAMESPACE: 235 | sh: yq eval '.namespace' {{.STATE_FILE_FQ_PATH}} 236 | CLUSTER_NAME: 237 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 238 | cmds: 239 | - until KUBECONFIG="{{.KUBECONFIG}}" clusterctl get kubeconfig -n {{.NAMESPACE}} {{.CLUSTER_NAME}} >>{{.OUTPUT_DIR}}/error.log 2>&1 ; do echo "Waiting for workload cluster kubeconfig to be available..."; sleep 4; done 240 | - KUBECONFIG="{{.KUBECONFIG}}" clusterctl get kubeconfig -n {{.NAMESPACE}} {{.CLUSTER_NAME}} > {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig 241 | - echo "Workload cluster kubeconfig saved to {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig." 242 | status: 243 | - echo ; [ -f {{.OUTPUT_DIR}}/{{.CLUSTER_NAME}}.kubeconfig ] 244 | 245 | default-storage-pool: 246 | summary: | 247 | Create the default storage pool for the redfish emulator to work properly. 248 | cmds: 249 | - virsh --connect qemu:///system pool-define-as --name default --type dir --target /tmp/iso 250 | - virsh --connect qemu:///system pool-start --build default 251 | status: 252 | - virsh --connect qemu:///system pool-info default 253 | -------------------------------------------------------------------------------- /capt/tasks/Taskfile-delete.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | tasks: 4 | playground: 5 | summary: | 6 | Delete the CAPT playground. 7 | cmds: 8 | - task: kind-cluster 9 | - task: vbmc-container 10 | - task: vbmc-generated-files 11 | - task: vms 12 | - task: default-storage-pool 13 | - task: output-dir 14 | 15 | kind-cluster: 16 | summary: | 17 | Delete the KinD cluster. 18 | vars: 19 | CLUSTER_NAME: 20 | sh: yq eval '.clusterName' {{.STATE_FILE_FQ_PATH}} 21 | cmds: 22 | - kind delete cluster --name {{.CLUSTER_NAME}} 23 | status: 24 | - got=$(kind get clusters | grep -c {{.CLUSTER_NAME}} || :); [[ "$got" == "0" ]] 25 | 26 | vms: 27 | summary: | 28 | Delete the VMs. 29 | vars: 30 | VM_NAMES: 31 | sh: yq e '.vm.details[] | [key] | @csv' {{.STATE_FILE_FQ_PATH}} 32 | VM_BASE_NAME: 33 | sh: yq eval '.vm.baseName' {{.STATE_FILE_FQ_PATH}} 34 | cmds: 35 | - for: { var: VM_NAMES } 36 | cmd: (virsh --connect qemu:///system destroy {{.ITEM}} || true) ## if the VM is already off, this will fail 37 | - for: { var: VM_NAMES } 38 | cmd: virsh --connect qemu:///system undefine --nvram --remove-all-storage {{.ITEM}} 39 | status: 40 | - got=$(virsh --connect qemu:///system list --all --name | grep -ce "{{.VM_BASE_NAME}}*" || :); [[ "$got" == "0" ]] 41 | 42 | default-storage-pool: 43 | summary: | 44 | Delete the default storage pool. 45 | cmds: 46 | - for vol in $(virsh --connect qemu:///system -q vol-list default | xargs | cut -d " " -f1,3,5,7,9); do virsh --connect qemu:///system vol-delete --pool default $vol; done 47 | - virsh --connect qemu:///system pool-destroy default || true 48 | - virsh --connect qemu:///system pool-undefine default 49 | status: 50 | - (! virsh --connect qemu:///system pool-info default ) 51 | 52 | vbmc-container: 53 | summary: | 54 | Delete the Virtual BMC container. 55 | vars: 56 | VBMC_CONTAINER_NAME: 57 | sh: yq eval '.virtualBMC.containerName' {{.STATE_FILE_FQ_PATH}} 58 | cmds: 59 | - docker rm -f {{.VBMC_CONTAINER_NAME}} 60 | status: 61 | - got=$(docker ps -a | grep -c {{.VBMC_CONTAINER_NAME}} || :); [[ "$got" == "0" ]] 62 | 63 | vbmc-generated-files: 64 | summary: | 65 | Delete the Virtual BMC generated files. 66 | cmds: 67 | - rm -f {{.CURR_DIR}}/scripts/htpasswd {{.CURR_DIR}}/scripts/sushy.key {{.CURR_DIR}}/scripts/sushy.cert 68 | status: 69 | - test ! -f {{.CURR_DIR}}/scripts/htpasswd 70 | - test ! -f {{.CURR_DIR}}/scripts/sushy.key 71 | - test ! -f {{.CURR_DIR}}/scripts/sushy.cert 72 | 73 | output-dir: 74 | summary: | 75 | Delete the output directory. 76 | cmds: 77 | - rm -rf {{.OUTPUT_DIR}} 78 | status: 79 | - echo ;[ ! -d {{.OUTPUT_DIR}} ] 80 | -------------------------------------------------------------------------------- /capt/tasks/Taskfile-vbmc.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | tasks: 4 | prepare: 5 | run: once 6 | summary: | 7 | Prepare the virtualbmc server. 8 | vars: 9 | VBMC_CONTAINER_IMAGE: 10 | sh: yq eval '.virtualBMC.image' {{.STATE_FILE_FQ_PATH}} 11 | USERNAME: 12 | sh: yq eval '.virtualBMC.user' {{.STATE_FILE_FQ_PATH}} 13 | PASSWORD: 14 | sh: yq eval '.virtualBMC.pass' {{.STATE_FILE_FQ_PATH}} 15 | cmds: 16 | - docker run -it --rm --entrypoint htpasswd {{.VBMC_CONTAINER_IMAGE}} -nbB "{{.USERNAME}}" "{{.PASSWORD}}" > {{.CURR_DIR}}/scripts/htpasswd 17 | - docker run -it --rm --entrypoint openssl -v {{.CURR_DIR}}/scripts:/scripts {{.VBMC_CONTAINER_IMAGE}} req -x509 -newkey rsa:2048 -keyout /scripts/sushy.key -out /scripts/sushy.cert -days 365 -nodes -subj "/C=US/ST=CA/L=Los Angeles/O=Engineering/OU=Engineering/CN=tinkerbell.org" 18 | status: 19 | - test -f {{.CURR_DIR}}/scripts/htpasswd 20 | - test -f {{.CURR_DIR}}/scripts/sushy.key 21 | - test -f {{.CURR_DIR}}/scripts/sushy.cert 22 | 23 | start-server: 24 | run: once 25 | deps: [prepare] 26 | summary: | 27 | Start the virtualbmc server. Requires the "kind" docker network to exist. 28 | vars: 29 | VBMC_CONTAINER_NAME: 30 | sh: yq eval '.virtualBMC.containerName' {{.STATE_FILE_FQ_PATH}} 31 | VBMC_CONTAINER_IMAGE: 32 | sh: yq eval '.virtualBMC.image' {{.STATE_FILE_FQ_PATH}} 33 | cmds: 34 | - docker run -d --privileged --rm --network kind -e SUSHY_EMULATOR_CONFIG=/etc/sushy/sushy-emulator.conf -v /var/run/libvirt:/var/run/libvirt -v "${PWD}/scripts/sushy.key:/etc/sushy/sushy.key" -v "${PWD}/scripts/sushy.cert:/etc/sushy/sushy.cert" -v "${PWD}/scripts/sushy-tools.conf:/etc/sushy/sushy-emulator.conf" -v "${PWD}/scripts/htpasswd:/etc/sushy/htpasswd" --name {{.VBMC_CONTAINER_NAME}} {{.VBMC_CONTAINER_IMAGE}} 35 | status: 36 | - docker ps | grep -q {{.VBMC_CONTAINER_NAME}} 37 | 38 | start-vbmcs: 39 | run: once 40 | deps: [start-server] 41 | summary: | 42 | Register and start the virtualbmc servers. Requires that the virtual machines exist. 43 | vars: 44 | VBMC_NAME: 45 | sh: yq e '.virtualBMC.containerName' {{.STATE_FILE_FQ_PATH}} 46 | cmds: 47 | - ./scripts/virtualbmc.sh {{.STATE_FILE_FQ_PATH}} 48 | status: 49 | - expected=$(yq e '.totalNodes' {{.STATE_FILE_FQ_PATH}}); got=$(docker exec {{.VBMC_NAME}} vbmc list | grep -c "running" || :); [[ "$got" == "$expected" ]] 50 | 51 | update-state: 52 | run: once 53 | deps: [start-server] 54 | summary: | 55 | Update the state file with the virtual bmc server information. 56 | vars: 57 | VBMC_CONTAINER_NAME: 58 | sh: yq eval '.virtualBMC.containerName' {{.STATE_FILE_FQ_PATH}} 59 | cmds: 60 | - vbmc_ip=$(docker inspect -f '{{`{{ .NetworkSettings.Networks.kind.IPAddress }}`}}' {{.VBMC_CONTAINER_NAME}}); yq e -i '.virtualBMC.ip = "'$vbmc_ip'"' {{.STATE_FILE_FQ_PATH}} 61 | status: 62 | - vbmc_ip=$(docker inspect -f '{{`{{ .NetworkSettings.Networks.kind.IPAddress }}`}}' {{.VBMC_CONTAINER_NAME}}); [[ "$(yq eval '.virtualBMC.ip' {{.STATE_FILE_FQ_PATH}})" == "$vbmc_ip" ]] 63 | -------------------------------------------------------------------------------- /capt/templates/bmc-machine.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: bmc.tinkerbell.org/v1alpha1 2 | kind: Machine 3 | metadata: 4 | name: $NODE_NAME 5 | namespace: $NAMESPACE 6 | spec: 7 | connection: 8 | authSecretRef: 9 | name: bmc-creds 10 | namespace: $NAMESPACE 11 | host: $BMC_IP 12 | insecureTLS: true 13 | port: $BMC_PORT 14 | providerOptions: 15 | preferredOrder: 16 | - ipmitool 17 | ipmitool: 18 | port: $BMC_PORT 19 | redfish: 20 | useBasicAuth: true 21 | systemName: $NODE_NAME 22 | -------------------------------------------------------------------------------- /capt/templates/bmc-secret.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | password: $BMC_PASS_BASE64 4 | username: $BMC_USER_BASE64 5 | kind: Secret 6 | metadata: 7 | name: bmc-creds 8 | namespace: $NAMESPACE 9 | type: kubernetes.io/basic-auth -------------------------------------------------------------------------------- /capt/templates/clusterctl.tmpl: -------------------------------------------------------------------------------- 1 | providers: 2 | - name: "tinkerbell" 3 | url: "$LOCATION/$CAPT_VERSION/infrastructure-components.yaml" 4 | type: "InfrastructureProvider" 5 | images: 6 | infrastructure-tinkerbell: 7 | tag: $CAPT_VERSION -------------------------------------------------------------------------------- /capt/templates/hardware.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: tinkerbell.org/v1alpha1 2 | kind: Hardware 3 | metadata: 4 | labels: 5 | tinkerbell.org/role: $NODE_ROLE 6 | name: $NODE_NAME 7 | namespace: $NAMESPACE 8 | spec: 9 | bmcRef: 10 | apiGroup: bmc.tinkerbell.org 11 | kind: Machine 12 | name: $NODE_NAME 13 | disks: 14 | - device: /dev/vda 15 | interfaces: 16 | - dhcp: 17 | arch: x86_64 18 | hostname: $NODE_NAME 19 | ip: 20 | address: $NODE_IP 21 | gateway: $GATEWAY_IP 22 | netmask: 255.255.0.0 23 | lease_time: 4294967294 24 | mac: $NODE_MAC 25 | uefi: true 26 | name_servers: 27 | - 8.8.8.8 28 | - 1.1.1.1 29 | netboot: 30 | allowPXE: true 31 | allowWorkflow: true 32 | metadata: 33 | instance: 34 | hostname: $NODE_NAME 35 | id: $NODE_MAC -------------------------------------------------------------------------------- /capt/templates/kustomization-iso.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: $NAMESPACE 4 | resources: 5 | - prekustomization.yaml 6 | patches: 7 | - target: 8 | group: infrastructure.cluster.x-k8s.io 9 | kind: TinkerbellMachineTemplate 10 | name: ".*control-plane.*" 11 | version: v1beta1 12 | patch: |- 13 | - op: add 14 | path: /spec/template/spec 15 | value: 16 | bootOptions: 17 | bootMode: iso 18 | isoURL: "http://$TINKERBELL_VIP:7171/iso/:macAddress/hook.iso" 19 | hardwareAffinity: 20 | required: 21 | - labelSelector: 22 | matchLabels: 23 | tinkerbell.org/role: control-plane 24 | templateOverride: | 25 | version: "0.1" 26 | name: playground-template 27 | global_timeout: 6000 28 | tasks: 29 | - name: "playground-template" 30 | worker: "{{.device_1}}" 31 | volumes: 32 | - /dev:/dev 33 | - /dev/console:/dev/console 34 | - /lib/firmware:/lib/firmware:ro 35 | actions: 36 | - name: "stream image" 37 | image: quay.io/tinkerbell/actions/oci2disk 38 | timeout: 1200 39 | environment: 40 | IMG_URL: $OS_REGISTRY/$OS_DISTRO-$OS_VERSION:$KUBE_VERSION.gz 41 | DEST_DISK: {{ index .Hardware.Disks 0 }} 42 | COMPRESSED: true 43 | - name: "add tink cloud-init config" 44 | image: quay.io/tinkerbell/actions/writefile 45 | timeout: 90 46 | environment: 47 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 48 | FS_TYPE: ext4 49 | DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg 50 | UID: 0 51 | GID: 0 52 | MODE: 0600 53 | DIRMODE: 0700 54 | CONTENTS: | 55 | datasource: 56 | Ec2: 57 | metadata_urls: ["http://$TINKERBELL_VIP:50061"] 58 | strict_id: false 59 | system_info: 60 | default_user: 61 | name: tink 62 | groups: [wheel, adm] 63 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 64 | shell: /bin/bash 65 | manage_etc_hosts: localhost 66 | warnings: 67 | dsid_missing_source: off 68 | - name: "disable cloud-init networking" 69 | image: quay.io/tinkerbell/actions/writefile 70 | timeout: 90 71 | environment: 72 | CONTENTS: 'network: {config: disabled}' 73 | DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 1 }}' 74 | DEST_PATH: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg 75 | DIRMODE: "0700" 76 | FS_TYPE: ext4 77 | GID: "0" 78 | MODE: "0600" 79 | UID: "0" 80 | - name: "create static netplan" 81 | image: quay.io/tinkerbell/actions/writefile 82 | timeout: 90 83 | environment: 84 | CONTENTS: | 85 | network: 86 | version: 2 87 | renderer: networkd 88 | ethernets: 89 | id0: 90 | match: 91 | macaddress: {{ (index .Hardware.Interfaces 0).DHCP.MAC }} 92 | addresses: 93 | - {{ (index .Hardware.Interfaces 0).DHCP.IP.Address }}/16 94 | nameservers: 95 | addresses: [{{ (index .Hardware.Interfaces 0).DHCP.NameServers | join ","}}] 96 | routes: 97 | - to: default 98 | via: {{ (index .Hardware.Interfaces 0).DHCP.IP.Gateway }} 99 | DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 1 }}' 100 | DEST_PATH: /etc/netplan/config.yaml 101 | DIRMODE: "0755" 102 | FS_TYPE: ext4 103 | GID: "0" 104 | MODE: "0600" 105 | UID: "0" 106 | - name: "add tink cloud-init ds-config" 107 | image: quay.io/tinkerbell/actions/writefile 108 | timeout: 90 109 | environment: 110 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 111 | FS_TYPE: ext4 112 | DEST_PATH: /etc/cloud/ds-identify.cfg 113 | UID: 0 114 | GID: 0 115 | MODE: 0600 116 | DIRMODE: 0700 117 | CONTENTS: | 118 | datasource: Ec2 119 | - name: "kexec image" 120 | image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 121 | timeout: 90 122 | pid: host 123 | environment: 124 | BLOCK_DEVICE: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 125 | FS_TYPE: ext4 126 | IMAGE: quay.io/tinkerbell/actions/kexec 127 | WAIT_SECONDS: 5 128 | volumes: 129 | - /var/run/docker.sock:/var/run/docker.sock 130 | - target: 131 | group: infrastructure.cluster.x-k8s.io 132 | kind: TinkerbellMachineTemplate 133 | name: ".*worker.*" 134 | version: v1beta1 135 | patch: |- 136 | - op: add 137 | path: /spec/template/spec 138 | value: 139 | bootOptions: 140 | bootMode: iso 141 | isoURL: "http://$TINKERBELL_VIP:7171/iso/:macAddress/hook.iso" 142 | hardwareAffinity: 143 | required: 144 | - labelSelector: 145 | matchLabels: 146 | tinkerbell.org/role: worker 147 | templateOverride: | 148 | version: "0.1" 149 | name: playground-template 150 | global_timeout: 6000 151 | tasks: 152 | - name: "playground-template" 153 | worker: "{{.device_1}}" 154 | volumes: 155 | - /dev:/dev 156 | - /dev/console:/dev/console 157 | - /lib/firmware:/lib/firmware:ro 158 | actions: 159 | - name: "stream image" 160 | image: quay.io/tinkerbell/actions/oci2disk 161 | timeout: 1200 162 | environment: 163 | IMG_URL: $OS_REGISTRY/$OS_DISTRO-$OS_VERSION:$KUBE_VERSION.gz 164 | DEST_DISK: {{ index .Hardware.Disks 0 }} 165 | COMPRESSED: true 166 | - name: "add tink cloud-init config" 167 | image: quay.io/tinkerbell/actions/writefile 168 | timeout: 90 169 | environment: 170 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 171 | FS_TYPE: ext4 172 | DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg 173 | UID: 0 174 | GID: 0 175 | MODE: 0600 176 | DIRMODE: 0700 177 | CONTENTS: | 178 | datasource: 179 | Ec2: 180 | metadata_urls: ["http://$TINKERBELL_VIP:50061"] 181 | strict_id: false 182 | system_info: 183 | default_user: 184 | name: tink 185 | groups: [wheel, adm] 186 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 187 | shell: /bin/bash 188 | manage_etc_hosts: localhost 189 | warnings: 190 | dsid_missing_source: off 191 | - name: "disable cloud-init networking" 192 | image: quay.io/tinkerbell/actions/writefile 193 | timeout: 90 194 | environment: 195 | CONTENTS: 'network: {config: disabled}' 196 | DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 1 }}' 197 | DEST_PATH: /etc/cloud/cloud.cfg.d/99-disable-network-config.cfg 198 | DIRMODE: "0700" 199 | FS_TYPE: ext4 200 | GID: "0" 201 | MODE: "0600" 202 | UID: "0" 203 | - name: "create static netplan" 204 | image: quay.io/tinkerbell/actions/writefile 205 | timeout: 90 206 | environment: 207 | CONTENTS: | 208 | network: 209 | version: 2 210 | renderer: networkd 211 | ethernets: 212 | id0: 213 | match: 214 | macaddress: {{ (index .Hardware.Interfaces 0).DHCP.MAC }} 215 | addresses: 216 | - {{ (index .Hardware.Interfaces 0).DHCP.IP.Address }}/16 217 | nameservers: 218 | addresses: [{{ (index .Hardware.Interfaces 0).DHCP.NameServers | join ","}}] 219 | routes: 220 | - to: default 221 | via: {{ (index .Hardware.Interfaces 0).DHCP.IP.Gateway }} 222 | DEST_DISK: '{{ formatPartition ( index .Hardware.Disks 0 ) 1 }}' 223 | DEST_PATH: /etc/netplan/config.yaml 224 | DIRMODE: "0755" 225 | FS_TYPE: ext4 226 | GID: "0" 227 | MODE: "0600" 228 | UID: "0" 229 | - name: "add tink cloud-init ds-config" 230 | image: quay.io/tinkerbell/actions/writefile 231 | timeout: 90 232 | environment: 233 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 234 | FS_TYPE: ext4 235 | DEST_PATH: /etc/cloud/ds-identify.cfg 236 | UID: 0 237 | GID: 0 238 | MODE: 0600 239 | DIRMODE: 0700 240 | CONTENTS: | 241 | datasource: Ec2 242 | - name: "kexec image" 243 | image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 244 | timeout: 90 245 | pid: host 246 | environment: 247 | BLOCK_DEVICE: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 248 | FS_TYPE: ext4 249 | IMAGE: quay.io/tinkerbell/actions/kexec 250 | WAIT_SECONDS: 5 251 | volumes: 252 | - /var/run/docker.sock:/var/run/docker.sock 253 | - target: 254 | group: infrastructure.cluster.x-k8s.io 255 | kind: TinkerbellCluster 256 | name: ".*" 257 | version: v1beta1 258 | patch: |- 259 | - op: add 260 | path: /spec 261 | value: 262 | imageLookupBaseRegistry: "$OS_REGISTRY" 263 | imageLookupOSDistro: "$OS_DISTRO" 264 | imageLookupOSVersion: "$VERSIONS_OS" 265 | - target: 266 | group: bootstrap.cluster.x-k8s.io 267 | kind: KubeadmConfigTemplate 268 | name: "$CLUSTER_NAME-.*" 269 | version: v1beta1 270 | patch: |- 271 | - op: add 272 | path: /spec/template/spec/users 273 | value: 274 | - name: tink 275 | sudo: ALL=(ALL) NOPASSWD:ALL 276 | sshAuthorizedKeys: 277 | - $SSH_AUTH_KEY 278 | - target: 279 | group: controlplane.cluster.x-k8s.io 280 | kind: KubeadmControlPlane 281 | name: "$CLUSTER_NAME-.*" 282 | version: v1beta1 283 | patch: |- 284 | - op: add 285 | path: /spec/kubeadmConfigSpec/users 286 | value: 287 | - name: tink 288 | sudo: ALL=(ALL) NOPASSWD:ALL 289 | sshAuthorizedKeys: 290 | - $SSH_AUTH_KEY 291 | - target: 292 | group: controlplane.cluster.x-k8s.io 293 | kind: KubeadmControlPlane 294 | name: "$CLUSTER_NAME-.*" 295 | version: v1beta1 296 | patch: |- 297 | - op: add 298 | path: /spec/kubeadmConfigSpec/preKubeadmCommands 299 | value: 300 | - if [ $(cat /etc/kubernetes-version | awk -F. '{print $2}') -ge 29 ] && [ -f /run/kubeadm/kubeadm.yaml ]; then export KUBE_FILE=/etc/kubernetes/super-admin.conf; else export KUBE_FILE=/etc/kubernetes/admin.conf; fi && mkdir -p /etc/kubernetes/manifests && ctr images pull ghcr.io/kube-vip/kube-vip:v$KUBEVIP_VERSION && ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:v$KUBEVIP_VERSION vip /kube-vip manifest pod --arp --interface $(ip -4 -j route list default | jq -r .[0].dev) --address $CONTROL_PLANE_VIP --controlplane --leaderElection --k8sConfigPath $KUBE_FILE > /etc/kubernetes/manifests/kube-vip.yaml 301 | -------------------------------------------------------------------------------- /capt/templates/kustomization-netboot.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: $NAMESPACE 4 | resources: 5 | - prekustomization.yaml 6 | patches: 7 | - target: 8 | group: infrastructure.cluster.x-k8s.io 9 | kind: TinkerbellMachineTemplate 10 | name: ".*control-plane.*" 11 | version: v1beta1 12 | patch: |- 13 | - op: add 14 | path: /spec/template/spec 15 | value: 16 | bootOptions: 17 | bootMode: netboot 18 | hardwareAffinity: 19 | required: 20 | - labelSelector: 21 | matchLabels: 22 | tinkerbell.org/role: control-plane 23 | templateOverride: | 24 | version: "0.1" 25 | name: playground-template 26 | global_timeout: 6000 27 | tasks: 28 | - name: "playground-template" 29 | worker: "{{.device_1}}" 30 | volumes: 31 | - /dev:/dev 32 | - /dev/console:/dev/console 33 | - /lib/firmware:/lib/firmware:ro 34 | actions: 35 | - name: "stream image" 36 | image: quay.io/tinkerbell/actions/oci2disk 37 | timeout: 1200 38 | environment: 39 | IMG_URL: $OS_REGISTRY/$OS_DISTRO-$OS_VERSION:$KUBE_VERSION.gz 40 | DEST_DISK: {{ index .Hardware.Disks 0 }} 41 | COMPRESSED: true 42 | - name: "add tink cloud-init config" 43 | image: quay.io/tinkerbell/actions/writefile 44 | timeout: 90 45 | environment: 46 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 47 | FS_TYPE: ext4 48 | DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg 49 | UID: 0 50 | GID: 0 51 | MODE: 0600 52 | DIRMODE: 0700 53 | CONTENTS: | 54 | datasource: 55 | Ec2: 56 | metadata_urls: ["http://$TINKERBELL_VIP:50061"] 57 | strict_id: false 58 | system_info: 59 | default_user: 60 | name: tink 61 | groups: [wheel, adm] 62 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 63 | shell: /bin/bash 64 | manage_etc_hosts: localhost 65 | warnings: 66 | dsid_missing_source: off 67 | - name: "add tink cloud-init ds-config" 68 | image: quay.io/tinkerbell/actions/writefile 69 | timeout: 90 70 | environment: 71 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 72 | FS_TYPE: ext4 73 | DEST_PATH: /etc/cloud/ds-identify.cfg 74 | UID: 0 75 | GID: 0 76 | MODE: 0600 77 | DIRMODE: 0700 78 | CONTENTS: | 79 | datasource: Ec2 80 | - name: "kexec image" 81 | image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 82 | timeout: 90 83 | pid: host 84 | environment: 85 | BLOCK_DEVICE: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 86 | FS_TYPE: ext4 87 | IMAGE: quay.io/tinkerbell/actions/kexec 88 | WAIT_SECONDS: 5 89 | volumes: 90 | - /var/run/docker.sock:/var/run/docker.sock 91 | - target: 92 | group: infrastructure.cluster.x-k8s.io 93 | kind: TinkerbellMachineTemplate 94 | name: ".*worker.*" 95 | version: v1beta1 96 | patch: |- 97 | - op: add 98 | path: /spec/template/spec 99 | value: 100 | bootOptions: 101 | bootMode: netboot 102 | hardwareAffinity: 103 | required: 104 | - labelSelector: 105 | matchLabels: 106 | tinkerbell.org/role: worker 107 | templateOverride: | 108 | version: "0.1" 109 | name: playground-template 110 | global_timeout: 6000 111 | tasks: 112 | - name: "playground-template" 113 | worker: "{{.device_1}}" 114 | volumes: 115 | - /dev:/dev 116 | - /dev/console:/dev/console 117 | - /lib/firmware:/lib/firmware:ro 118 | actions: 119 | - name: "stream image" 120 | image: quay.io/tinkerbell/actions/oci2disk 121 | timeout: 1200 122 | environment: 123 | IMG_URL: $OS_REGISTRY/$OS_DISTRO-$OS_VERSION:$KUBE_VERSION.gz 124 | DEST_DISK: {{ index .Hardware.Disks 0 }} 125 | COMPRESSED: true 126 | - name: "add tink cloud-init config" 127 | image: quay.io/tinkerbell/actions/writefile 128 | timeout: 90 129 | environment: 130 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 131 | FS_TYPE: ext4 132 | DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg 133 | UID: 0 134 | GID: 0 135 | MODE: 0600 136 | DIRMODE: 0700 137 | CONTENTS: | 138 | datasource: 139 | Ec2: 140 | metadata_urls: ["http://$TINKERBELL_VIP:50061"] 141 | strict_id: false 142 | system_info: 143 | default_user: 144 | name: tink 145 | groups: [wheel, adm] 146 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 147 | shell: /bin/bash 148 | manage_etc_hosts: localhost 149 | warnings: 150 | dsid_missing_source: off 151 | - name: "add tink cloud-init ds-config" 152 | image: quay.io/tinkerbell/actions/writefile 153 | timeout: 90 154 | environment: 155 | DEST_DISK: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 156 | FS_TYPE: ext4 157 | DEST_PATH: /etc/cloud/ds-identify.cfg 158 | UID: 0 159 | GID: 0 160 | MODE: 0600 161 | DIRMODE: 0700 162 | CONTENTS: | 163 | datasource: Ec2 164 | - name: "kexec image" 165 | image: ghcr.io/jacobweinstock/waitdaemon:0.2.1 166 | timeout: 90 167 | pid: host 168 | environment: 169 | BLOCK_DEVICE: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 170 | FS_TYPE: ext4 171 | IMAGE: quay.io/tinkerbell/actions/kexec 172 | WAIT_SECONDS: 5 173 | volumes: 174 | - /var/run/docker.sock:/var/run/docker.sock 175 | - target: 176 | group: infrastructure.cluster.x-k8s.io 177 | kind: TinkerbellCluster 178 | name: ".*" 179 | version: v1beta1 180 | patch: |- 181 | - op: add 182 | path: /spec 183 | value: 184 | imageLookupBaseRegistry: "$OS_REGISTRY" 185 | imageLookupOSDistro: "$OS_DISTRO" 186 | imageLookupOSVersion: "$VERSIONS_OS" 187 | - target: 188 | group: bootstrap.cluster.x-k8s.io 189 | kind: KubeadmConfigTemplate 190 | name: "$CLUSTER_NAME-.*" 191 | version: v1beta1 192 | patch: |- 193 | - op: add 194 | path: /spec/template/spec/users 195 | value: 196 | - name: tink 197 | sudo: ALL=(ALL) NOPASSWD:ALL 198 | sshAuthorizedKeys: 199 | - $SSH_AUTH_KEY 200 | - target: 201 | group: controlplane.cluster.x-k8s.io 202 | kind: KubeadmControlPlane 203 | name: "$CLUSTER_NAME-.*" 204 | version: v1beta1 205 | patch: |- 206 | - op: add 207 | path: /spec/kubeadmConfigSpec/users 208 | value: 209 | - name: tink 210 | sudo: ALL=(ALL) NOPASSWD:ALL 211 | sshAuthorizedKeys: 212 | - $SSH_AUTH_KEY 213 | - target: 214 | group: controlplane.cluster.x-k8s.io 215 | kind: KubeadmControlPlane 216 | name: "$CLUSTER_NAME-.*" 217 | version: v1beta1 218 | patch: |- 219 | - op: add 220 | path: /spec/kubeadmConfigSpec/preKubeadmCommands 221 | value: 222 | - if [ $(cat /etc/kubernetes-version | awk -F. '{print $2}') -ge 29 ] && [ -f /run/kubeadm/kubeadm.yaml ]; then export KUBE_FILE=/etc/kubernetes/super-admin.conf; else export KUBE_FILE=/etc/kubernetes/admin.conf; fi && mkdir -p /etc/kubernetes/manifests && ctr images pull ghcr.io/kube-vip/kube-vip:v$KUBEVIP_VERSION && ctr run --rm --net-host ghcr.io/kube-vip/kube-vip:v$KUBEVIP_VERSION vip /kube-vip manifest pod --arp --interface $(ip -4 -j route list default | jq -r .[0].dev) --address $CONTROL_PLANE_VIP --controlplane --leaderElection --k8sConfigPath $KUBE_FILE > /etc/kubernetes/manifests/kube-vip.yaml 223 | -------------------------------------------------------------------------------- /contrib/tag-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit -o nounset -o pipefail 4 | 5 | if [ -z "${1-}" ]; then 6 | echo "Must specify new tag" 7 | exit 1 8 | fi 9 | 10 | new_tag=${1-} 11 | [[ $new_tag =~ ^v[0-9]*\.[0-9]*\.[0-9]*$ ]] || ( 12 | echo "Tag must be in the form of vX.Y.Z" 13 | exit 1 14 | ) 15 | 16 | if [[ $(git symbolic-ref HEAD) != refs/heads/main ]] && [[ -z ${ALLOW_NON_MAIN-} ]]; then 17 | echo "Must be on main branch" >&2 18 | exit 1 19 | fi 20 | if [[ $(git describe --dirty) != $(git describe) ]]; then 21 | echo "Repo must be in a clean state" >&2 22 | exit 1 23 | fi 24 | 25 | git fetch --all 26 | 27 | last_tag=$(git describe --abbrev=0) 28 | last_tag_commit=$(git rev-list -n1 "$last_tag") 29 | last_specific_tag=$(git tag --contains="$last_tag_commit" | grep -E "^v[0-9]*\.[0-9]*\.[0-9]*$" | tail -n 1) 30 | last_specific_tag_commit=$(git rev-list -n1 "$last_specific_tag") 31 | if [[ $last_specific_tag_commit == $(git rev-list -n1 HEAD) ]]; then 32 | echo "No commits since last tag" >&2 33 | exit 1 34 | fi 35 | 36 | if [[ -n ${SIGN_TAG-} ]]; then 37 | git tag -s -m "${new_tag}" "${new_tag}" &>/dev/null && echo "created signed tag ${new_tag}" >&2 && exit 38 | else 39 | git tag -a -m "${new_tag}" "${new_tag}" &>/dev/null && echo "created annotated tag ${new_tag}" >&2 && exit 40 | fi 41 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let _pkgs = import { }; 2 | in { pkgs ? import (_pkgs.fetchFromGitHub { 3 | owner = "NixOS"; 4 | repo = "nixpkgs"; 5 | #branch@date: master@2022-06-02 6 | rev = "17e891b141ca8e599ebf6443d0870a67dd98f94f"; 7 | sha256 = "0qiyl04s4q0b3dhvyryz10hfdqhb2c7hk2lqn5llsb8lxsqj07l9"; 8 | }) { } }: 9 | 10 | with pkgs; 11 | 12 | mkShell { 13 | buildInputs = [ 14 | nodePackages.prettier 15 | jq 16 | shellcheck 17 | shfmt 18 | rufo 19 | ]; 20 | } 21 | -------------------------------------------------------------------------------- /stack/README.md: -------------------------------------------------------------------------------- 1 | ## Tinkerbell Stack Playground 2 | 3 | The following section container the Tinkerbell stack playground instructions. It is not a production reference architecture. 4 | Please use the [Helm chart](https://github.com/tinkerbell/charts) for production deployments. 5 | 6 | ## Quick-Starts 7 | 8 | The following quick-start guides will walk you through standing up the Tinkerbell stack. 9 | There are a few options for this. 10 | Pick the one that works best for you. 11 | 12 | ## Options 13 | 14 | - [Vagrant and VirtualBox](docs/quickstarts/VAGRANTVBOX.md) 15 | - [Vagrant and Libvirt](docs/quickstarts/VAGRANTLVIRT.md) 16 | - [Kubernetes](docs/quickstarts/KUBERNETES.md) 17 | 18 | ## Next Steps 19 | 20 | By default the Vagrant quickstart guides automatically install Ubuntu on the VM (machine1). You can provide your own OS template. To do this: 21 | 22 | 1. Login to the stack VM 23 | 24 | ```bash 25 | vagrant ssh stack 26 | ``` 27 | 28 | 1. Add your template. An example Template object can be found [here](https://github.com/tinkerbell/tink/tree/main/config/crd/examples/template.yaml) and more Template documentation can be found [here](https://tinkerbell.org/docs/concepts/templates/). 29 | 30 | ```bash 31 | kubectl apply -f my-OS-template.yaml 32 | ``` 33 | 34 | 1. Create the workflow. An example Workflow object can be found [here](https://github.com/tinkerbell/tink/tree/main/config/crd/examples/workflow.yaml). 35 | 36 | ```bash 37 | kubectl apply -f my-custom-workflow.yaml 38 | ``` 39 | 40 | 1. Restart the machine to provision (if using the vagrant playground test machine this is done by running `vagrant destroy -f machine1 && vagrant up machine1`) 41 | -------------------------------------------------------------------------------- /stack/docs/quickstarts/KUBERNETES.md: -------------------------------------------------------------------------------- 1 | # Quick start guide for Kubernetes 2 | 3 | This option will walk through creating a light weight Kubernetes cluster, after which you will be able to deploy the Tinkerbell stack via the Helm chart, and then provision a machine. 4 | You will need to bring your own hardware (machine) for this guide. 5 | 6 | ## Prerequisites 7 | 8 | - [Docker](https://docs.docker.com/get-docker/) 9 | - [K3D](https://k3d.io/#installation) 10 | - [Kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 11 | - [Helm](https://helm.sh/docs/intro/install/) 12 | - A machine to provision 13 | 14 | ## Steps 15 | 16 | 1. Create a Kubernetes cluster with K3D 17 | 18 | ```bash 19 | k3d cluster create --network host --no-lb --k3s-arg "--disable=traefik,servicelb,metrics-server,local-storage" 20 | # `--network host` : host network is used to allow the load balancer to advertise a layer 2 address. 21 | # `--no-lb` : the K3D built in load balancer is disabled so we don't have conflicts with the stack load balancer. 22 | # `--k3s-arg "--disable=traefik,servicelb,metrics-server,local-storage"` : disable the built in K3S load balancer, metrics server, and local storage. 23 | ``` 24 | 25 | 1. Install the Tinkerbell stack Helm chart 26 | 27 | ```bash 28 | trusted_proxies=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ',') 29 | LB_IP= 30 | STACK_CHART_VERSION=0.6.2 31 | helm install tink-stack oci://ghcr.io/tinkerbell/charts/stack --version "$STACK_CHART_VERSION" --create-namespace --namespace tink --wait --set "global.trustedProxies={${trusted_proxies}}" --set "global.publicIP=$LB_IP" 32 | ``` 33 | 34 | > These instructions above should be checked against the Charts repo before using. See the [README.md](https://github.com/tinkerbell/charts/tree/main/tinkerbell/stack) in the Helm chart repository for more information on how to use the Helm chart. 35 | 36 | 1. Verify the stack is up and running 37 | 38 | ```bash 39 | kubectl get pods -n tink-system # verify all pods are running 40 | kubectl get svc -n tink-system # Verify the tink-stack service has the IP you specified with $LB_IP under the EXTERNAL-IP column 41 | ``` 42 | 43 | 1. Download and convert a cloud image to a raw image 44 | 45 | ```bash 46 | kubectl apply -n tink-system -f https://raw.githubusercontent.com/tinkerbell/playground/main/vagrant/ubuntu-download.yaml 47 | # This will download and convert the Ubuntu Jammy 22.04 cloud image. 48 | ``` 49 | 50 | 1. Create and/or customize Hardware, Template, and Workflow objects and apply them to the cluster. You can use the Hardware, Template, and Workflow in this repo, in the `vagrant/` directory, as a base from which to start. 51 | 52 | ```bash 53 | kubectl apply -n tink-system -f my-hardware.yaml 54 | kubectl apply -n tink-system -f my-template.yaml 55 | kubectl apply -n tink-system -f my-workflow.yaml 56 | ``` 57 | 58 | 1. Start the machine provision process by rebooting, into a netbooting state, the machine you have specified in the Hardware object above. 59 | 60 | 1. Watch the progress of the workflow. 61 | 62 | ```bash 63 | kubectl get workflow -n tink-system --watch 64 | # Once the workflow is state is `STATE_SUCCESS`, you can login to the machine via the console or via SSH. 65 | ``` 66 | -------------------------------------------------------------------------------- /stack/docs/quickstarts/VAGRANTLVIRT.md: -------------------------------------------------------------------------------- 1 | # Quick start guide for Vagrant and Libvirt 2 | 3 | This option will create the stack in a Libvirt VM using Vagrant. 4 | This option will also create a VM and provision an OS onto it. 5 | 6 | ## Prerequisites 7 | 8 | - [Vagrant](https://www.vagrantup.com/downloads) is installed 9 | - [Libvirt](https://ubuntu.com/server/docs/virtualization-libvirt) is installed 10 | - Vagrant Libvirt plugin is installed: `vagrant plugin install vagrant-libvirt` 11 | - A connection to the public internet (air gapped and proxied environments are not supported) 12 | 13 | ## Steps 14 | 15 | 1. Clone this repository 16 | 17 | ```bash 18 | git clone https://github.com/tinkerbell/playground.git 19 | cd playground 20 | ``` 21 | 22 | 1. Start the stack 23 | 24 | ```bash 25 | cd stack/vagrant 26 | vagrant up 27 | # This process will take about 5-10 minutes depending on your internet connection. 28 | # Hook is about 400MB in size and the Ubuntu jammy image is about 500MB 29 | ``` 30 | 31 |
32 | expected output 33 | 34 | ```bash 35 | Bringing machine 'stack' up with 'libvirt' provider... 36 | ==> stack: Checking if box 'generic/ubuntu2204' version '4.3.4' is up to date... 37 | ==> stack: Creating image (snapshot of base box volume). 38 | ==> stack: Creating domain with the following settings... 39 | ==> stack: -- Name: vagrant_stack 40 | ==> stack: -- Description: Source: /home/tink/repos/tinkerbell/playground/vagrant/Vagrantfile 41 | ==> stack: -- Domain type: kvm 42 | ==> stack: -- Cpus: 2 43 | ==> stack: -- Feature: acpi 44 | ==> stack: -- Feature: apic 45 | ==> stack: -- Feature: pae 46 | ==> stack: -- Clock offset: utc 47 | ==> stack: -- Memory: 2048M 48 | ==> stack: -- Base box: generic/ubuntu2204 49 | ==> stack: -- Storage pool: default 50 | ==> stack: -- Image(vda): /var/lib/libvirt/images/vagrant_stack.img, virtio, 128G 51 | ==> stack: -- Disk driver opts: cache='default' 52 | ==> stack: -- Graphics Type: vnc 53 | ==> stack: -- Video Type: cirrus 54 | ==> stack: -- Video VRAM: 256 55 | ==> stack: -- Video 3D accel: false 56 | ==> stack: -- Keymap: en-us 57 | ==> stack: -- TPM Backend: passthrough 58 | ==> stack: -- INPUT: type=mouse, bus=ps2 59 | ==> stack: Creating shared folders metadata... 60 | ==> stack: Starting domain. 61 | ==> stack: Domain launching with graphics connection settings... 62 | ==> stack: -- Graphics Port: 5900 63 | ==> stack: -- Graphics IP: 127.0.0.1 64 | ==> stack: -- Graphics Password: Not defined 65 | ==> stack: -- Graphics Websocket: 5700 66 | ==> stack: Waiting for domain to get an IP address... 67 | ==> stack: Waiting for machine to boot. This may take a few minutes... 68 | stack: SSH address: 192.168.121.127:22 69 | stack: SSH username: vagrant 70 | stack: SSH auth method: private key 71 | stack: Warning: Connection refused. Retrying... 72 | stack: Warning: Connection refused. Retrying... 73 | stack: Warning: Connection refused. Retrying... 74 | stack: Warning: Connection refused. Retrying... 75 | stack: Warning: Connection refused. Retrying... 76 | stack: Warning: Connection refused. Retrying... 77 | stack: Warning: Connection refused. Retrying... 78 | stack: Warning: Connection refused. Retrying... 79 | stack: Warning: Connection refused. Retrying... 80 | stack: Warning: Connection refused. Retrying... 81 | stack: Warning: Connection refused. Retrying... 82 | stack: Warning: Connection refused. Retrying... 83 | stack: 84 | stack: Vagrant insecure key detected. Vagrant will automatically replace 85 | stack: this with a newly generated keypair for better security. 86 | stack: 87 | stack: Inserting generated public key within guest... 88 | stack: Removing insecure key from the guest if it's present... 89 | stack: Key inserted! Disconnecting and reconnecting using new SSH key... 90 | ==> stack: Machine booted and ready! 91 | ==> stack: Rsyncing folder: /home/tink/repos/tinkerbell/playground/vagrant/ => /playground/stack 92 | ==> stack: Configuring and enabling network interfaces... 93 | ==> stack: Running provisioner: shell... 94 | stack: Running: /tmp/vagrant-shell20231031-285946-1krhzm0.sh 95 | stack: + main 192.168.56.4 192.168.56.43 08:00:27:9e:f5:3a /playground/stack/ 192.168.56.5 0.4.2 eth1 1.28.3 v5.6.0 '' 96 | stack: + local host_ip=192.168.56.4 97 | stack: + local worker_ip=192.168.56.43 98 | stack: + local worker_mac=08:00:27:9e:f5:3a 99 | stack: + local manifests_dir=/playground/stack/ 100 | stack: + local loadbalancer_ip=192.168.56.5 101 | stack: + local helm_chart_version=0.4.2 102 | stack: + local loadbalancer_interface=eth1 103 | stack: + local kubectl_version=1.28.3 104 | stack: + local k3d_version=v5.6.0 105 | stack: + update_apt 106 | stack: + apt-get update 107 | stack: + DEBIAN_FRONTEND=noninteractive 108 | stack: + command apt-get --allow-change-held-packages --allow-downgrades --allow-remove-essential --allow-unauthenticated --option Dpkg::Options::=--force-confdef --option Dpkg::Options::=--force-confold --yes update 109 | stack: Hit:1 https://mirrors.edge.kernel.org/ubuntu jammy InRelease 110 | stack: Get:2 https://mirrors.edge.kernel.org/ubuntu jammy-updates InRelease [119 kB] 111 | stack: Get:3 https://mirrors.edge.kernel.org/ubuntu jammy-backports InRelease [109 kB] 112 | stack: Get:4 https://mirrors.edge.kernel.org/ubuntu jammy-security InRelease [110 kB] 113 | stack: Get:5 https://mirrors.edge.kernel.org/ubuntu jammy-updates/main amd64 Packages [1,148 kB] 114 | stack: Get:6 https://mirrors.edge.kernel.org/ubuntu jammy-updates/main Translation-en [245 kB] 115 | stack: Get:7 https://mirrors.edge.kernel.org/ubuntu jammy-updates/main amd64 c-n-f Metadata [16.1 kB] 116 | stack: Get:8 https://mirrors.edge.kernel.org/ubuntu jammy-updates/restricted amd64 Packages [1,103 kB] 117 | stack: Get:9 https://mirrors.edge.kernel.org/ubuntu jammy-updates/restricted Translation-en [179 kB] 118 | stack: Get:10 https://mirrors.edge.kernel.org/ubuntu jammy-updates/restricted amd64 c-n-f Metadata [536 B] 119 | stack: Get:11 https://mirrors.edge.kernel.org/ubuntu jammy-updates/universe amd64 Packages [998 kB] 120 | stack: Get:12 https://mirrors.edge.kernel.org/ubuntu jammy-updates/universe Translation-en [218 kB] 121 | stack: Get:13 https://mirrors.edge.kernel.org/ubuntu jammy-updates/universe amd64 c-n-f Metadata [22.0 kB] 122 | stack: Get:14 https://mirrors.edge.kernel.org/ubuntu jammy-backports/main amd64 Packages [64.2 kB] 123 | stack: Get:15 https://mirrors.edge.kernel.org/ubuntu jammy-backports/main amd64 c-n-f Metadata [388 B] 124 | stack: Get:16 https://mirrors.edge.kernel.org/ubuntu jammy-backports/universe amd64 Packages [27.8 kB] 125 | stack: Get:17 https://mirrors.edge.kernel.org/ubuntu jammy-backports/universe amd64 c-n-f Metadata [644 B] 126 | stack: Get:18 https://mirrors.edge.kernel.org/ubuntu jammy-security/main amd64 Packages [938 kB] 127 | stack: Get:19 https://mirrors.edge.kernel.org/ubuntu jammy-security/main Translation-en [185 kB] 128 | stack: Get:20 https://mirrors.edge.kernel.org/ubuntu jammy-security/main amd64 c-n-f Metadata [11.4 kB] 129 | stack: Get:21 https://mirrors.edge.kernel.org/ubuntu jammy-security/restricted amd64 Packages [1,079 kB] 130 | stack: Get:22 https://mirrors.edge.kernel.org/ubuntu jammy-security/restricted Translation-en [175 kB] 131 | stack: Get:23 https://mirrors.edge.kernel.org/ubuntu jammy-security/restricted amd64 c-n-f Metadata [536 B] 132 | stack: Get:24 https://mirrors.edge.kernel.org/ubuntu jammy-security/universe amd64 Packages [796 kB] 133 | stack: Get:25 https://mirrors.edge.kernel.org/ubuntu jammy-security/universe Translation-en [146 kB] 134 | stack: Get:26 https://mirrors.edge.kernel.org/ubuntu jammy-security/universe amd64 c-n-f Metadata [16.8 kB] 135 | stack: Fetched 7,709 kB in 2s (4,266 kB/s) 136 | stack: Reading package lists... 137 | stack: + install_docker 138 | stack: + curl -fsSL https://download.docker.com/linux/ubuntu/gpg 139 | stack: + sudo apt-key add - 140 | stack: Warning: apt-key is deprecated. Manage keyring files in trusted.gpg.d instead (see apt-key(8)). 141 | stack: OK 142 | stack: ++ lsb_release -cs 143 | stack: + add-apt-repository 'deb https://download.docker.com/linux/ubuntu jammy stable' 144 | stack: Get:1 https://download.docker.com/linux/ubuntu jammy InRelease [48.8 kB] 145 | stack: Get:2 https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages [22.7 kB] 146 | stack: Hit:3 https://mirrors.edge.kernel.org/ubuntu jammy InRelease 147 | stack: Hit:4 https://mirrors.edge.kernel.org/ubuntu jammy-updates InRelease 148 | stack: Hit:5 https://mirrors.edge.kernel.org/ubuntu jammy-backports InRelease 149 | stack: Hit:6 https://mirrors.edge.kernel.org/ubuntu jammy-security InRelease 150 | stack: Fetched 71.5 kB in 6s (11.8 kB/s) 151 | stack: Reading package lists... 152 | stack: W: https://download.docker.com/linux/ubuntu/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details. 153 | stack: Repository: 'deb https://download.docker.com/linux/ubuntu jammy stable' 154 | stack: Description: 155 | stack: Archive for codename: jammy components: stable 156 | stack: More info: https://download.docker.com/linux/ubuntu 157 | stack: Adding repository. 158 | stack: Adding deb entry to /etc/apt/sources.list.d/archive_uri-https_download_docker_com_linux_ubuntu-jammy.list 159 | stack: Adding disabled deb-src entry to /etc/apt/sources.list.d/archive_uri-https_download_docker_com_linux_ubuntu-jammy.list 160 | stack: + update_apt 161 | stack: + apt-get update 162 | stack: + DEBIAN_FRONTEND=noninteractive 163 | stack: + command apt-get --allow-change-held-packages --allow-downgrades --allow-remove-essential --allow-unauthenticated --option Dpkg::Options::=--force-confdef --option Dpkg::Options::=--force-confold --yes update 164 | stack: Hit:1 https://mirrors.edge.kernel.org/ubuntu jammy InRelease 165 | stack: Hit:2 https://mirrors.edge.kernel.org/ubuntu jammy-updates InRelease 166 | stack: Hit:3 https://mirrors.edge.kernel.org/ubuntu jammy-backports InRelease 167 | stack: Hit:4 https://mirrors.edge.kernel.org/ubuntu jammy-security InRelease 168 | stack: Hit:5 https://download.docker.com/linux/ubuntu jammy InRelease 169 | stack: Reading package lists... 170 | stack: W: https://download.docker.com/linux/ubuntu/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details. 171 | stack: + apt-get install --no-install-recommends containerd.io docker-ce docker-ce-cli 172 | stack: + DEBIAN_FRONTEND=noninteractive 173 | stack: + command apt-get --allow-change-held-packages --allow-downgrades --allow-remove-essential --allow-unauthenticated --option Dpkg::Options::=--force-confdef --option Dpkg::Options::=--force-confold --yes install --no-install-recommends containerd.io docker-ce docker-ce-cli 174 | stack: Reading package lists... 175 | stack: Building dependency tree... 176 | stack: Reading state information... 177 | stack: Suggested packages: 178 | stack: aufs-tools cgroupfs-mount | cgroup-lite 179 | stack: Recommended packages: 180 | stack: docker-ce-rootless-extras libltdl7 pigz docker-buildx-plugin 181 | stack: docker-compose-plugin 182 | stack: The following NEW packages will be installed: 183 | stack: containerd.io docker-ce docker-ce-cli 184 | stack: 0 upgraded, 3 newly installed, 0 to remove and 29 not upgraded. 185 | stack: Need to get 64.5 MB of archives. 186 | stack: After this operation, 249 MB of additional disk space will be used. 187 | stack: Get:1 https://download.docker.com/linux/ubuntu jammy/stable amd64 containerd.io amd64 1.6.24-1 [28.6 MB] 188 | stack: Get:2 https://download.docker.com/linux/ubuntu jammy/stable amd64 docker-ce-cli amd64 5:24.0.7-1~ubuntu.22.04~jammy [13.3 MB] 189 | stack: Get:3 https://download.docker.com/linux/ubuntu jammy/stable amd64 docker-ce amd64 5:24.0.7-1~ubuntu.22.04~jammy [22.6 MB] 190 | stack: Fetched 64.5 MB in 1s (77.3 MB/s) 191 | stack: Selecting previously unselected package containerd.io. 192 | (Reading database ... 76025 files and directories currently installed.) 193 | stack: Preparing to unpack .../containerd.io_1.6.24-1_amd64.deb ... 194 | stack: Unpacking containerd.io (1.6.24-1) ... 195 | stack: Selecting previously unselected package docker-ce-cli. 196 | stack: Preparing to unpack .../docker-ce-cli_5%3a24.0.7-1~ubuntu.22.04~jammy_amd64.deb ... 197 | stack: Unpacking docker-ce-cli (5:24.0.7-1~ubuntu.22.04~jammy) ... 198 | stack: Selecting previously unselected package docker-ce. 199 | stack: Preparing to unpack .../docker-ce_5%3a24.0.7-1~ubuntu.22.04~jammy_amd64.deb ... 200 | stack: Unpacking docker-ce (5:24.0.7-1~ubuntu.22.04~jammy) ... 201 | stack: Setting up containerd.io (1.6.24-1) ... 202 | stack: Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. 203 | stack: Setting up docker-ce-cli (5:24.0.7-1~ubuntu.22.04~jammy) ... 204 | stack: Setting up docker-ce (5:24.0.7-1~ubuntu.22.04~jammy) ... 205 | stack: Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. 206 | stack: Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. 207 | stack: Processing triggers for man-db (2.10.2-1) ... 208 | stack: NEEDRESTART-VER: 3.5 209 | stack: NEEDRESTART-KCUR: 5.15.0-86-generic 210 | stack: NEEDRESTART-KEXP: 5.15.0-86-generic 211 | stack: NEEDRESTART-KSTA: 1 212 | stack: + gpasswd -a vagrant docker 213 | stack: Adding user vagrant to group docker 214 | stack: + sudo ethtool -K eth1 tx off sg off tso off 215 | stack: Actual changes: 216 | stack: tx-scatter-gather: off 217 | stack: tx-checksum-ip-generic: off 218 | stack: tx-generic-segmentation: off [not requested] 219 | stack: tx-tcp-segmentation: off 220 | stack: tx-tcp-ecn-segmentation: off 221 | stack: tx-tcp6-segmentation: off 222 | stack: + install_kubectl 1.28.3 223 | stack: + local kubectl_version=1.28.3 224 | stack: + curl -LO https://dl.k8s.io/v1.28.3/bin/linux/amd64/kubectl 225 | stack: % Total % Received % Xferd Average Speed Time Time Time Current 226 | stack: Dload Upload Total Spent Left Speed 227 | 100 138 100 138 0 0 410 0 --:--:-- --:--:-- --:--:-- 410 228 | 100 47.5M 100 47.5M 0 0 24.8M 0 0:00:01 0:00:01 --:--:-- 37.9M 229 | stack: + chmod +x ./kubectl 230 | stack: + mv ./kubectl /usr/local/bin/kubectl 231 | stack: + run_helm 192.168.56.4 192.168.56.43 08:00:27:9e:f5:3a /playground/stack/ 192.168.56.5 0.4.2 eth1 v5.6.0 232 | stack: + local host_ip=192.168.56.4 233 | stack: + local worker_ip=192.168.56.43 234 | stack: + local worker_mac=08:00:27:9e:f5:3a 235 | stack: + local manifests_dir=/playground/stack/ 236 | stack: + local loadbalancer_ip=192.168.56.5 237 | stack: + local helm_chart_version=0.4.2 238 | stack: + local loadbalancer_interface=eth1 239 | stack: + local k3d_version=v5.6.0 240 | stack: + local namespace=tink-system 241 | stack: + install_k3d v5.6.0 242 | stack: + local k3d_Version=v5.6.0 243 | stack: + wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh 244 | stack: + TAG=v5.6.0 245 | stack: + bash 246 | stack: Preparing to install k3d into /usr/local/bin 247 | stack: k3d installed into /usr/local/bin/k3d 248 | stack: Run 'k3d --help' to see what you can do with it. 249 | stack: + start_k3d 250 | stack: + k3d cluster create --network host --no-lb --k3s-arg --disable=traefik,servicelb --k3s-arg --kube-apiserver-arg=feature-gates=MixedProtocolLBService=true --host-pid-mode 251 | stack: INFO[0000] [SimpleConfig] Hostnetwork selected - disabling injection of docker host into the cluster, server load balancer and setting the api port to the k3s default 252 | stack: WARN[0000] No node filter specified 253 | stack: WARN[0000] No node filter specified 254 | stack: INFO[0000] [ClusterConfig] Hostnetwork selected - disabling injection of docker host into the cluster, server load balancer and setting the api port to the k3s default 255 | stack: INFO[0000] Prep: Network 256 | stack: INFO[0000] Re-using existing network 'host' (2ecf52da28c15a6bbe026b5e71f3af288fefbbb222b2762bafc29e9b1791ff8b) 257 | stack: INFO[0000] Created image volume k3d-k3s-default-images 258 | stack: INFO[0000] Starting new tools node... 259 | stack: INFO[0001] Creating node 'k3d-k3s-default-server-0' 260 | stack: INFO[0001] Pulling image 'ghcr.io/k3d-io/k3d-tools:5.6.0' 261 | stack: INFO[0002] Pulling image 'docker.io/rancher/k3s:v1.27.4-k3s1' 262 | stack: INFO[0003] Starting Node 'k3d-k3s-default-tools' 263 | stack: INFO[0010] Using the k3d-tools node to gather environment information 264 | stack: INFO[0011] Starting cluster 'k3s-default' 265 | stack: INFO[0011] Starting servers... 266 | stack: INFO[0011] Starting Node 'k3d-k3s-default-server-0' 267 | stack: INFO[0014] All agents already running. 268 | stack: INFO[0014] All helpers already running. 269 | stack: INFO[0014] Cluster 'k3s-default' created successfully! 270 | stack: INFO[0014] You can now use it like this: 271 | stack: kubectl cluster-info 272 | stack: + mkdir -p /root/.kube/ 273 | stack: + k3d kubeconfig get -a 274 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 275 | stack: error: no matching resources found 276 | stack: + sleep 1 277 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 278 | stack: error: no matching resources found 279 | stack: + sleep 1 280 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 281 | stack: error: no matching resources found 282 | stack: + sleep 1 283 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 284 | stack: error: no matching resources found 285 | stack: + sleep 1 286 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 287 | stack: error: no matching resources found 288 | stack: + sleep 1 289 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 290 | stack: node/k3d-k3s-default-server-0 condition met 291 | stack: + install_helm 292 | stack: + helm_ver=v3.9.4 293 | stack: + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 294 | stack: + chmod 700 get_helm.sh 295 | stack: + ./get_helm.sh --version v3.9.4 296 | stack: Downloading https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz 297 | stack: Verifying checksum... Done. 298 | stack: Preparing to install helm into /usr/local/bin 299 | stack: helm installed into /usr/local/bin/helm 300 | stack: + helm_install_tink_stack tink-system 0.4.2 eth1 192.168.56.5 301 | stack: + local namespace=tink-system 302 | stack: + local version=0.4.2 303 | stack: + local interface=eth1 304 | stack: + local loadbalancer_ip=192.168.56.5 305 | stack: + trusted_proxies= 306 | stack: + '[' '' '!=' '' ']' 307 | stack: ++ kubectl get nodes -o 'jsonpath={.items[*].spec.podCIDR}' 308 | stack: ++ tr ' ' , 309 | stack: + trusted_proxies= 310 | stack: + '[' '' '!=' '' ']' 311 | stack: ++ kubectl get nodes -o 'jsonpath={.items[*].spec.podCIDR}' 312 | stack: ++ tr ' ' , 313 | stack: + trusted_proxies=10.42.0.0/24 314 | stack: + '[' 10.42.0.0/24 '!=' '' ']' 315 | stack: + helm install tink-stack oci://ghcr.io/tinkerbell/charts/stack --version 0.4.2 --create-namespace --namespace tink-system --wait --set 'smee.trustedProxies={10.42.0.0/24}' --set 'hegel.trustedProxies={10.42.0.0/24}' --set stack.kubevip.interface=eth1 --set stack.relay.sourceInterface=eth1 --set stack.loadBalancerIP=192.168.56.5 --set smee.publicIP=192.168.56.5 316 | stack: NAME: tink-stack 317 | stack: LAST DEPLOYED: Tue Oct 31 21:56:58 2023 318 | stack: NAMESPACE: tink-system 319 | stack: STATUS: deployed 320 | stack: REVISION: 1 321 | stack: TEST SUITE: None 322 | stack: + apply_manifests 192.168.56.43 08:00:27:9e:f5:3a /playground/stack/ 192.168.56.5 tink-system 323 | stack: + local worker_ip=192.168.56.43 324 | stack: + local worker_mac=08:00:27:9e:f5:3a 325 | stack: + local manifests_dir=/playground/stack/ 326 | stack: + local host_ip=192.168.56.5 327 | stack: + local namespace=tink-system 328 | stack: + disk_device=/dev/sda 329 | stack: + lsblk 330 | stack: + grep -q vda 331 | stack: + disk_device=/dev/vda 332 | stack: + export DISK_DEVICE=/dev/vda 333 | stack: + DISK_DEVICE=/dev/vda 334 | stack: + export TINKERBELL_CLIENT_IP=192.168.56.43 335 | stack: + TINKERBELL_CLIENT_IP=192.168.56.43 336 | stack: + export TINKERBELL_CLIENT_MAC=08:00:27:9e:f5:3a 337 | stack: + TINKERBELL_CLIENT_MAC=08:00:27:9e:f5:3a 338 | stack: + export TINKERBELL_HOST_IP=192.168.56.5 339 | stack: + TINKERBELL_HOST_IP=192.168.56.5 340 | stack: + for i in "$manifests_dir"/{hardware.yaml,template.yaml,workflow.yaml} 341 | stack: + envsubst 342 | stack: + echo -e --- 343 | stack: + for i in "$manifests_dir"/{hardware.yaml,template.yaml,workflow.yaml} 344 | stack: + envsubst 345 | stack: + echo -e --- 346 | stack: + for i in "$manifests_dir"/{hardware.yaml,template.yaml,workflow.yaml} 347 | stack: + envsubst 348 | stack: + echo -e --- 349 | stack: + kubectl apply -n tink-system -f /tmp/manifests.yaml 350 | stack: hardware.tinkerbell.org/machine1 created 351 | stack: template.tinkerbell.org/ubuntu-jammy created 352 | stack: workflow.tinkerbell.org/playground-workflow created 353 | stack: + kubectl apply -n tink-system -f /playground/stack//ubuntu-download.yaml 354 | stack: configmap/download-image created 355 | stack: job.batch/download-ubuntu-jammy created 356 | stack: + kubectl_for_vagrant_user 357 | stack: + runuser -l vagrant -c 'mkdir -p ~/.kube/' 358 | stack: + runuser -l vagrant -c 'k3d kubeconfig get -a > ~/.kube/config' 359 | stack: + chmod 600 /home/vagrant/.kube/config 360 | stack: + echo 'export KUBECONFIG="/home/vagrant/.kube/config"' 361 | stack: all done! 362 | stack: + echo 'all done!' 363 | ``` 364 | 365 |
366 | 367 | 1. Wait for HookOS and Ubuntu image to be downloaded 368 | 369 | ```bash 370 | vagrant ssh stack 371 | kubectl get jobs -n tink-system --watch 372 | exit 373 | # There are 2 Kubernetes jobs that run to download HookOS and the Ubuntu image. 374 | # Once both jobs are complete exit the stack VM. 375 | ``` 376 | 377 |
378 | example output 379 | 380 | ```bash 381 | NAME COMPLETIONS DURATION AGE 382 | download-hook 1/1 27s 72s 383 | download-ubuntu-jammy 0/1 49s 49s 384 | download-ubuntu-jammy 0/1 70s 70s 385 | download-ubuntu-jammy 0/1 72s 72s 386 | download-ubuntu-jammy 1/1 72s 72s 387 | ``` 388 | 389 |
390 | 391 | 1. Start the machine to be provisioned 392 | 393 | ```bash 394 | vagrant up machine1 395 | # This will start a VM to pxe boot. 396 | # The `vagrant up machine1` command will exit quickly and show the following message. This is expected. 397 | # Once the command line control is returned to you, you can move on to the next step. 398 | ``` 399 | 400 |
401 | expected output 402 | 403 | ```bash 404 | Bringing machine 'machine1' up with 'libvirt' provider... 405 | ==> machine1: Creating domain with the following settings... 406 | ==> machine1: -- Name: vagrant_machine1 407 | ==> machine1: -- Description: Source: /home/tink/repos/tinkerbell/playground/vagrant/Vagrantfile 408 | ==> machine1: -- Domain type: kvm 409 | ==> machine1: -- Cpus: 2 410 | ==> machine1: -- Feature: acpi 411 | ==> machine1: -- Feature: apic 412 | ==> machine1: -- Feature: pae 413 | ==> machine1: -- Clock offset: utc 414 | ==> machine1: -- Memory: 4096M 415 | ==> machine1: -- Storage pool: default 416 | ==> machine1: -- Disk driver opts: cache='default' 417 | ==> machine1: -- Graphics Type: vnc 418 | ==> machine1: -- Video Type: cirrus 419 | ==> machine1: -- Video VRAM: 16384 420 | ==> machine1: -- Video 3D accel: false 421 | ==> machine1: -- Keymap: en-us 422 | ==> machine1: -- TPM Backend: passthrough 423 | ==> machine1: -- Boot device: hd 424 | ==> machine1: -- Boot device: network 425 | ==> machine1: -- Disk(vda): /var/lib/libvirt/images/vagrant_machine1-vda.qcow2, virtio, 20G 426 | ==> machine1: -- INPUT: type=mouse, bus=ps2 427 | ==> machine1: Starting domain. 428 | ==> machine1: Domain launching with graphics connection settings... 429 | ==> machine1: -- Graphics Port: 5901 430 | ==> machine1: -- Graphics IP: 0.0.0.0 431 | ==> machine1: -- Graphics Password: Not defined 432 | ==> machine1: -- Graphics Websocket: 5701 433 | ``` 434 | 435 |
436 | 437 | 1. Watch the provision complete 438 | 439 | ```bash 440 | # log in to the stack VM 441 | vagrant ssh stack 442 | 443 | # watch for the workflow to complete 444 | # once the workflow is complete (see the expected output below for completion), move on to the next step 445 | kubectl get -n tink-system workflow playground-workflow --watch 446 | ``` 447 | 448 |
449 | expected output 450 | 451 | ```bash 452 | NAME TEMPLATE STATE 453 | playground-workflow ubuntu-jammy STATE_PENDING 454 | playground-workflow ubuntu-jammy STATE_RUNNING 455 | playground-workflow ubuntu-jammy STATE_RUNNING 456 | playground-workflow ubuntu-jammy STATE_RUNNING 457 | playground-workflow ubuntu-jammy STATE_RUNNING 458 | playground-workflow ubuntu-jammy STATE_RUNNING 459 | playground-workflow ubuntu-jammy STATE_RUNNING 460 | playground-workflow ubuntu-jammy STATE_RUNNING 461 | playground-workflow ubuntu-jammy STATE_RUNNING 462 | playground-workflow ubuntu-jammy STATE_RUNNING 463 | playground-workflow ubuntu-jammy STATE_RUNNING 464 | playground-workflow ubuntu-jammy STATE_RUNNING 465 | playground-workflow ubuntu-jammy STATE_RUNNING 466 | playground-workflow ubuntu-jammy STATE_RUNNING 467 | playground-workflow ubuntu-jammy STATE_SUCCESS 468 | ``` 469 | 470 |
471 | 472 | 1. Login to the machine 473 | 474 | The machine has been provisioned with Ubuntu. 475 | You can now SSH into the machine. 476 | 477 | ```bash 478 | ssh tink@192.168.56.43 # user/pass => tink/tink 479 | ``` 480 | -------------------------------------------------------------------------------- /stack/docs/quickstarts/VAGRANTVBOX.md: -------------------------------------------------------------------------------- 1 | # Quick start guide for Vagrant and VirtualBox 2 | 3 | This option will create the stack in a Virtualbox VM using Vagrant. 4 | This option will also create a VM and provision an OS onto it. 5 | 6 | ## Prerequisites 7 | 8 | - [Vagrant](https://www.vagrantup.com/downloads) is installed 9 | - [VirtualBox](https://www.virtualbox.org/) is installed 10 | - A connection to the public internet (air gapped and proxied environments are not supported) 11 | 12 | ## Steps 13 | 14 | 1. Clone this repository 15 | 16 | ```bash 17 | git clone https://github.com/tinkerbell/playground.git 18 | cd playground 19 | ``` 20 | 21 | 1. Start the stack 22 | 23 | ```bash 24 | cd stack/vagrant 25 | vagrant up 26 | # This process will take up to 10 minutes depending on your internet connection. 27 | # It will download HookOS, which is a couple hundred megabytes in size, and an Ubuntu cloud image, which is about 600MB. 28 | ``` 29 | 30 |
31 | example output 32 | 33 | ```bash 34 | Bringing machine 'stack' up with 'virtualbox' provider... 35 | ==> stack: Importing base box 'generic/ubuntu2204'... 36 | ==> stack: Matching MAC address for NAT networking... 37 | ==> stack: Checking if box 'generic/ubuntu2204' version '4.1.14' is up to date... 38 | ==> stack: Setting the name of the VM: vagrant_stack_1698780219785_94529 39 | ==> stack: Clearing any previously set network interfaces... 40 | ==> stack: Preparing network interfaces based on configuration... 41 | stack: Adapter 1: nat 42 | stack: Adapter 2: hostonly 43 | ==> stack: Forwarding ports... 44 | stack: 22 (guest) => 2222 (host) (adapter 1) 45 | ==> stack: Running 'pre-boot' VM customizations... 46 | ==> stack: Booting VM... 47 | ==> stack: Waiting for machine to boot. This may take a few minutes... 48 | stack: SSH address: 127.0.0.1:2222 49 | stack: SSH username: vagrant 50 | stack: SSH auth method: private key 51 | stack: Warning: Connection reset. Retrying... 52 | stack: 53 | stack: Vagrant insecure key detected. Vagrant will automatically replace 54 | stack: this with a newly generated keypair for better security. 55 | stack: 56 | stack: Inserting generated public key within guest... 57 | stack: Removing insecure key from the guest if it's present... 58 | stack: Key inserted! Disconnecting and reconnecting using new SSH key... 59 | ==> stack: Machine booted and ready! 60 | ==> stack: Checking for guest additions in VM... 61 | stack: The guest additions on this VM do not match the installed version of 62 | stack: VirtualBox! In most cases this is fine, but in rare cases it can 63 | stack: prevent things such as shared folders from working properly. If you see 64 | stack: shared folder errors, please make sure the guest additions within the 65 | stack: virtual machine match the version of VirtualBox you have installed on 66 | stack: your host and reload your VM. 67 | stack: 68 | stack: Guest Additions Version: 6.1.38 69 | stack: VirtualBox Version: 7.0 70 | ==> stack: Configuring and enabling network interfaces... 71 | ==> stack: Mounting shared folders... 72 | stack: /playground/stack => ~/tinkerbell/playground/vagrant 73 | ==> stack: Running provisioner: shell... 74 | stack: Running: /var/folders/xt/8w5g0fv54tj4njvjhk_0_25r0000gr/T/vagrant-shell20231031-54683-k09nai.sh 75 | stack: + main 192.168.56.4 192.168.56.43 08:00:27:9e:f5:3a /playground/stack/ 192.168.56.5 0.4.2 eth1 1.28.3 v5.6.0 '' 76 | stack: + local host_ip=192.168.56.4 77 | stack: + local worker_ip=192.168.56.43 78 | stack: + local worker_mac=08:00:27:9e:f5:3a 79 | stack: + local manifests_dir=/playground/stack/ 80 | stack: + local loadbalancer_ip=192.168.56.5 81 | stack: + local helm_chart_version=0.4.2 82 | stack: + local loadbalancer_interface=eth1 83 | stack: + local kubectl_version=1.28.3 84 | stack: + local k3d_version=v5.6.0 85 | stack: + update_apt 86 | stack: + apt-get update 87 | stack: + DEBIAN_FRONTEND=noninteractive 88 | stack: + command apt-get --allow-change-held-packages --allow-downgrades --allow-remove-essential --allow-unauthenticated --option Dpkg::Options::=--force-confdef --option Dpkg::Options::=--force-confold --yes update 89 | stack: Hit:1 https://mirrors.edge.kernel.org/ubuntu jammy InRelease 90 | stack: Get:2 https://mirrors.edge.kernel.org/ubuntu jammy-updates InRelease [119 kB] 91 | stack: Get:3 https://mirrors.edge.kernel.org/ubuntu jammy-backports InRelease [109 kB] 92 | stack: Get:4 https://mirrors.edge.kernel.org/ubuntu jammy-security InRelease [110 kB] 93 | stack: Get:5 https://mirrors.edge.kernel.org/ubuntu jammy-updates/main amd64 Packages [1,148 kB] 94 | stack: Get:6 https://mirrors.edge.kernel.org/ubuntu jammy-updates/main Translation-en [245 kB] 95 | stack: Get:7 https://mirrors.edge.kernel.org/ubuntu jammy-updates/main amd64 c-n-f Metadata [16.1 kB] 96 | stack: Get:8 https://mirrors.edge.kernel.org/ubuntu jammy-updates/restricted amd64 Packages [1,103 kB] 97 | stack: Get:9 https://mirrors.edge.kernel.org/ubuntu jammy-updates/restricted Translation-en [179 kB] 98 | stack: Get:10 https://mirrors.edge.kernel.org/ubuntu jammy-updates/restricted amd64 c-n-f Metadata [536 B] 99 | stack: Get:11 https://mirrors.edge.kernel.org/ubuntu jammy-updates/universe amd64 Packages [998 kB] 100 | stack: Get:12 https://mirrors.edge.kernel.org/ubuntu jammy-updates/universe Translation-en [218 kB] 101 | stack: Get:13 https://mirrors.edge.kernel.org/ubuntu jammy-updates/universe amd64 c-n-f Metadata [22.0 kB] 102 | stack: Get:14 https://mirrors.edge.kernel.org/ubuntu jammy-updates/multiverse amd64 Packages [41.6 kB] 103 | stack: Get:15 https://mirrors.edge.kernel.org/ubuntu jammy-updates/multiverse Translation-en [9,768 B] 104 | stack: Get:16 https://mirrors.edge.kernel.org/ubuntu jammy-updates/multiverse amd64 c-n-f Metadata [472 B] 105 | stack: Get:17 https://mirrors.edge.kernel.org/ubuntu jammy-backports/main amd64 Packages [64.2 kB] 106 | stack: Get:18 https://mirrors.edge.kernel.org/ubuntu jammy-backports/main Translation-en [10.5 kB] 107 | stack: Get:19 https://mirrors.edge.kernel.org/ubuntu jammy-backports/main amd64 c-n-f Metadata [388 B] 108 | stack: Get:20 https://mirrors.edge.kernel.org/ubuntu jammy-backports/universe amd64 Packages [27.8 kB] 109 | stack: Get:21 https://mirrors.edge.kernel.org/ubuntu jammy-backports/universe Translation-en [16.4 kB] 110 | stack: Get:22 https://mirrors.edge.kernel.org/ubuntu jammy-backports/universe amd64 c-n-f Metadata [644 B] 111 | stack: Get:23 https://mirrors.edge.kernel.org/ubuntu jammy-security/main amd64 Packages [938 kB] 112 | stack: Get:24 https://mirrors.edge.kernel.org/ubuntu jammy-security/main Translation-en [185 kB] 113 | stack: Get:25 https://mirrors.edge.kernel.org/ubuntu jammy-security/main amd64 c-n-f Metadata [11.4 kB] 114 | stack: Get:26 https://mirrors.edge.kernel.org/ubuntu jammy-security/restricted amd64 Packages [1,079 kB] 115 | stack: Get:27 https://mirrors.edge.kernel.org/ubuntu jammy-security/restricted Translation-en [175 kB] 116 | stack: Get:28 https://mirrors.edge.kernel.org/ubuntu jammy-security/restricted amd64 c-n-f Metadata [536 B] 117 | stack: Get:29 https://mirrors.edge.kernel.org/ubuntu jammy-security/universe amd64 Packages [796 kB] 118 | stack: Get:30 https://mirrors.edge.kernel.org/ubuntu jammy-security/universe Translation-en [146 kB] 119 | stack: Get:31 https://mirrors.edge.kernel.org/ubuntu jammy-security/universe amd64 c-n-f Metadata [16.8 kB] 120 | stack: Get:32 https://mirrors.edge.kernel.org/ubuntu jammy-security/multiverse amd64 Packages [36.5 kB] 121 | stack: Get:33 https://mirrors.edge.kernel.org/ubuntu jammy-security/multiverse Translation-en [7,060 B] 122 | stack: Get:34 https://mirrors.edge.kernel.org/ubuntu jammy-security/multiverse amd64 c-n-f Metadata [260 B] 123 | stack: Fetched 7,831 kB in 2s (3,321 kB/s) 124 | stack: Reading package lists... 125 | stack: + install_docker 126 | stack: + curl -fsSL https://download.docker.com/linux/ubuntu/gpg 127 | stack: + sudo apt-key add - 128 | stack: Warning: apt-key is deprecated. Manage keyring files in trusted.gpg.d instead (see apt-key(8)). 129 | stack: OK 130 | stack: ++ lsb_release -cs 131 | stack: + add-apt-repository 'deb https://download.docker.com/linux/ubuntu jammy stable' 132 | stack: Hit:1 https://mirrors.edge.kernel.org/ubuntu jammy InRelease 133 | stack: Hit:2 https://mirrors.edge.kernel.org/ubuntu jammy-updates InRelease 134 | stack: Hit:3 https://mirrors.edge.kernel.org/ubuntu jammy-backports InRelease 135 | stack: Hit:4 https://mirrors.edge.kernel.org/ubuntu jammy-security InRelease 136 | stack: Get:5 https://download.docker.com/linux/ubuntu jammy InRelease [48.8 kB] 137 | stack: Get:6 https://download.docker.com/linux/ubuntu jammy/stable amd64 Packages [22.7 kB] 138 | stack: Fetched 71.5 kB in 1s (72.5 kB/s) 139 | stack: Reading package lists... 140 | stack: W: https://download.docker.com/linux/ubuntu/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details. 141 | stack: Repository: 'deb https://download.docker.com/linux/ubuntu jammy stable' 142 | stack: Description: 143 | stack: Archive for codename: jammy components: stable 144 | stack: More info: https://download.docker.com/linux/ubuntu 145 | stack: Adding repository. 146 | stack: Adding deb entry to /etc/apt/sources.list.d/archive_uri-https_download_docker_com_linux_ubuntu-jammy.list 147 | stack: Adding disabled deb-src entry to /etc/apt/sources.list.d/archive_uri-https_download_docker_com_linux_ubuntu-jammy.list 148 | stack: + update_apt 149 | stack: + apt-get update 150 | stack: + DEBIAN_FRONTEND=noninteractive 151 | stack: + command apt-get --allow-change-held-packages --allow-downgrades --allow-remove-essential --allow-unauthenticated --option Dpkg::Options::=--force-confdef --option Dpkg::Options::=--force-confold --yes update 152 | stack: Hit:1 https://download.docker.com/linux/ubuntu jammy InRelease 153 | stack: Hit:2 https://mirrors.edge.kernel.org/ubuntu jammy InRelease 154 | stack: Hit:3 https://mirrors.edge.kernel.org/ubuntu jammy-updates InRelease 155 | stack: Hit:4 https://mirrors.edge.kernel.org/ubuntu jammy-backports InRelease 156 | stack: Hit:5 https://mirrors.edge.kernel.org/ubuntu jammy-security InRelease 157 | stack: Reading package lists... 158 | stack: W: https://download.docker.com/linux/ubuntu/dists/jammy/InRelease: Key is stored in legacy trusted.gpg keyring (/etc/apt/trusted.gpg), see the DEPRECATION section in apt-key(8) for details. 159 | stack: + apt-get install --no-install-recommends containerd.io docker-ce docker-ce-cli 160 | stack: + DEBIAN_FRONTEND=noninteractive 161 | stack: + command apt-get --allow-change-held-packages --allow-downgrades --allow-remove-essential --allow-unauthenticated --option Dpkg::Options::=--force-confdef --option Dpkg::Options::=--force-confold --yes install --no-install-recommends containerd.io docker-ce docker-ce-cli 162 | stack: Reading package lists... 163 | stack: Building dependency tree... 164 | stack: Reading state information... 165 | stack: Suggested packages: 166 | stack: aufs-tools cgroupfs-mount | cgroup-lite 167 | stack: Recommended packages: 168 | stack: docker-ce-rootless-extras libltdl7 pigz docker-buildx-plugin 169 | stack: docker-compose-plugin 170 | stack: The following NEW packages will be installed: 171 | stack: containerd.io docker-ce docker-ce-cli 172 | stack: 0 upgraded, 3 newly installed, 0 to remove and 195 not upgraded. 173 | stack: Need to get 64.5 MB of archives. 174 | stack: After this operation, 249 MB of additional disk space will be used. 175 | stack: Get:1 https://download.docker.com/linux/ubuntu jammy/stable amd64 containerd.io amd64 1.6.24-1 [28.6 MB] 176 | stack: Get:2 https://download.docker.com/linux/ubuntu jammy/stable amd64 docker-ce-cli amd64 5:24.0.7-1~ubuntu.22.04~jammy [13.3 MB] 177 | stack: Get:3 https://download.docker.com/linux/ubuntu jammy/stable amd64 docker-ce amd64 5:24.0.7-1~ubuntu.22.04~jammy [22.6 MB] 178 | stack: Fetched 64.5 MB in 1s (53.8 MB/s) 179 | stack: Selecting previously unselected package containerd.io. 180 | (Reading database ... 75348 files and directories currently installed.) 181 | stack: Preparing to unpack .../containerd.io_1.6.24-1_amd64.deb ... 182 | stack: Unpacking containerd.io (1.6.24-1) ... 183 | stack: Selecting previously unselected package docker-ce-cli. 184 | stack: Preparing to unpack .../docker-ce-cli_5%3a24.0.7-1~ubuntu.22.04~jammy_amd64.deb ... 185 | stack: Unpacking docker-ce-cli (5:24.0.7-1~ubuntu.22.04~jammy) ... 186 | stack: Selecting previously unselected package docker-ce. 187 | stack: Preparing to unpack .../docker-ce_5%3a24.0.7-1~ubuntu.22.04~jammy_amd64.deb ... 188 | stack: Unpacking docker-ce (5:24.0.7-1~ubuntu.22.04~jammy) ... 189 | stack: Setting up containerd.io (1.6.24-1) ... 190 | stack: Created symlink /etc/systemd/system/multi-user.target.wants/containerd.service → /lib/systemd/system/containerd.service. 191 | stack: Setting up docker-ce-cli (5:24.0.7-1~ubuntu.22.04~jammy) ... 192 | stack: Setting up docker-ce (5:24.0.7-1~ubuntu.22.04~jammy) ... 193 | stack: Created symlink /etc/systemd/system/multi-user.target.wants/docker.service → /lib/systemd/system/docker.service. 194 | stack: Created symlink /etc/systemd/system/sockets.target.wants/docker.socket → /lib/systemd/system/docker.socket. 195 | stack: Processing triggers for man-db (2.10.2-1) ... 196 | stack: NEEDRESTART-VER: 3.5 197 | stack: NEEDRESTART-KCUR: 5.15.0-48-generic 198 | stack: NEEDRESTART-KEXP: 5.15.0-48-generic 199 | stack: NEEDRESTART-KSTA: 1 200 | stack: + gpasswd -a vagrant docker 201 | stack: Adding user vagrant to group docker 202 | stack: + sudo ethtool -K eth1 tx off sg off tso off 203 | stack: Actual changes: 204 | stack: tx-scatter-gather: off 205 | stack: tx-checksum-ip-generic: off 206 | stack: tx-generic-segmentation: off [not requested] 207 | stack: tx-tcp-segmentation: off 208 | stack: + install_kubectl 1.28.3 209 | stack: + local kubectl_version=1.28.3 210 | stack: + curl -LO https://dl.k8s.io/v1.28.3/bin/linux/amd64/kubectl 211 | stack: % Total % Received % Xferd Average Speed Time Time Time Current 212 | stack: Dload Upload Total Spent Left Speed 213 | 100 138 100 138 0 0 242 0 --:--:-- --:--:-- --:--:-- 242 214 | 100 47.5M 100 47.5M 0 0 21.3M 0 0:00:02 0:00:02 --:--:-- 31.6M 215 | stack: + chmod +x ./kubectl 216 | stack: + mv ./kubectl /usr/local/bin/kubectl 217 | stack: + run_helm 192.168.56.4 192.168.56.43 08:00:27:9e:f5:3a /playground/stack/ 192.168.56.5 0.4.2 eth1 v5.6.0 218 | stack: + local host_ip=192.168.56.4 219 | stack: + local worker_ip=192.168.56.43 220 | stack: + local worker_mac=08:00:27:9e:f5:3a 221 | stack: + local manifests_dir=/playground/stack/ 222 | stack: + local loadbalancer_ip=192.168.56.5 223 | stack: + local helm_chart_version=0.4.2 224 | stack: + local loadbalancer_interface=eth1 225 | stack: + local k3d_version=v5.6.0 226 | stack: + local namespace=tink-system 227 | stack: + install_k3d v5.6.0 228 | stack: + local k3d_Version=v5.6.0 229 | stack: + wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh 230 | stack: + TAG=v5.6.0 231 | stack: + bash 232 | stack: Preparing to install k3d into /usr/local/bin 233 | stack: k3d installed into /usr/local/bin/k3d 234 | stack: Run 'k3d --help' to see what you can do with it. 235 | stack: + start_k3d 236 | stack: + k3d cluster create --network host --no-lb --k3s-arg --disable=traefik,servicelb --k3s-arg --kube-apiserver-arg=feature-gates=MixedProtocolLBService=true --host-pid-mode 237 | stack: INFO[0000] [SimpleConfig] Hostnetwork selected - disabling injection of docker host into the cluster, server load balancer and setting the api port to the k3s default 238 | stack: WARN[0000] No node filter specified 239 | stack: WARN[0000] No node filter specified 240 | stack: INFO[0000] [ClusterConfig] Hostnetwork selected - disabling injection of docker host into the cluster, server load balancer and setting the api port to the k3s default 241 | stack: INFO[0000] Prep: Network 242 | stack: INFO[0000] Re-using existing network 'host' (0dfc7dbbdde7db0b7a7a5eba280e71248bb0cf010603bfaa0a0a09928df8d555) 243 | stack: INFO[0000] Created image volume k3d-k3s-default-images 244 | stack: INFO[0000] Starting new tools node... 245 | stack: INFO[0001] Creating node 'k3d-k3s-default-server-0' 246 | stack: INFO[0001] Pulling image 'ghcr.io/k3d-io/k3d-tools:5.6.0' 247 | stack: INFO[0002] Pulling image 'docker.io/rancher/k3s:v1.27.4-k3s1' 248 | stack: INFO[0002] Starting Node 'k3d-k3s-default-tools' 249 | stack: INFO[0008] Using the k3d-tools node to gather environment information 250 | stack: INFO[0008] Starting cluster 'k3s-default' 251 | stack: INFO[0008] Starting servers... 252 | stack: INFO[0008] Starting Node 'k3d-k3s-default-server-0' 253 | stack: INFO[0013] All agents already running. 254 | stack: INFO[0013] All helpers already running. 255 | stack: INFO[0013] Cluster 'k3s-default' created successfully! 256 | stack: INFO[0013] You can now use it like this: 257 | stack: kubectl cluster-info 258 | stack: + mkdir -p /root/.kube/ 259 | stack: + k3d kubeconfig get -a 260 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 261 | stack: error: no matching resources found 262 | stack: + sleep 1 263 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 264 | stack: error: no matching resources found 265 | stack: + sleep 1 266 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 267 | stack: error: no matching resources found 268 | stack: + sleep 1 269 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 270 | stack: error: no matching resources found 271 | stack: + sleep 1 272 | stack: + kubectl wait --for=condition=Ready nodes --all --timeout=600s 273 | stack: node/k3d-k3s-default-server-0 condition met 274 | stack: + install_helm 275 | stack: + helm_ver=v3.9.4 276 | stack: + curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 277 | stack: + chmod 700 get_helm.sh 278 | stack: + ./get_helm.sh --version v3.9.4 279 | stack: Downloading https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz 280 | stack: Verifying checksum... Done. 281 | stack: Preparing to install helm into /usr/local/bin 282 | stack: helm installed into /usr/local/bin/helm 283 | stack: + helm_install_tink_stack tink-system 0.4.2 eth1 192.168.56.5 284 | stack: + local namespace=tink-system 285 | stack: + local version=0.4.2 286 | stack: + local interface=eth1 287 | stack: + local loadbalancer_ip=192.168.56.5 288 | stack: + trusted_proxies= 289 | stack: + '[' '' '!=' '' ']' 290 | stack: ++ tr ' ' , 291 | stack: ++ kubectl get nodes -o 'jsonpath={.items[*].spec.podCIDR}' 292 | stack: + trusted_proxies= 293 | stack: + '[' '' '!=' '' ']' 294 | stack: + trusted_proxies= 295 | stack: + '[' '' '!=' '' ']' 296 | stack: ++ kubectl get nodes -o 'jsonpath={.items[*].spec.podCIDR}' 297 | stack: ++ tr ' ' , 298 | stack: + trusted_proxies=10.42.0.0/24 299 | stack: + '[' 10.42.0.0/24 '!=' '' ']' 300 | stack: + helm install tink-stack oci://ghcr.io/tinkerbell/charts/stack --version 0.4.2 --create-namespace --namespace tink-system --wait --set 'smee.trustedProxies={10.42.0.0/24}' --set 'hegel.trustedProxies={10.42.0.0/24}' --set stack.kubevip.interface=eth1 --set stack.relay.sourceInterface=eth1 --set stack.loadBalancerIP=192.168.56.5 --set smee.publicIP=192.168.56.5 301 | stack: NAME: tink-stack 302 | stack: LAST DEPLOYED: Tue Oct 31 19:25:06 2023 303 | stack: NAMESPACE: tink-system 304 | stack: STATUS: deployed 305 | stack: REVISION: 1 306 | stack: TEST SUITE: None 307 | stack: + apply_manifests 192.168.56.43 08:00:27:9e:f5:3a /playground/stack/ 192.168.56.5 tink-system 308 | stack: + local worker_ip=192.168.56.43 309 | stack: + local worker_mac=08:00:27:9e:f5:3a 310 | stack: + local manifests_dir=/playground/stack/ 311 | stack: + local host_ip=192.168.56.5 312 | stack: + local namespace=tink-system 313 | stack: + disk_device=/dev/sda 314 | stack: + lsblk 315 | stack: + grep -q vda 316 | stack: + export DISK_DEVICE=/dev/sda 317 | stack: + DISK_DEVICE=/dev/sda 318 | stack: + export TINKERBELL_CLIENT_IP=192.168.56.43 319 | stack: + TINKERBELL_CLIENT_IP=192.168.56.43 320 | stack: + export TINKERBELL_CLIENT_MAC=08:00:27:9e:f5:3a 321 | stack: + TINKERBELL_CLIENT_MAC=08:00:27:9e:f5:3a 322 | stack: + export TINKERBELL_HOST_IP=192.168.56.5 323 | stack: + TINKERBELL_HOST_IP=192.168.56.5 324 | stack: + for i in "$manifests_dir"/{hardware.yaml,template.yaml,workflow.yaml} 325 | stack: + envsubst 326 | stack: + echo -e --- 327 | stack: + for i in "$manifests_dir"/{hardware.yaml,template.yaml,workflow.yaml} 328 | stack: + envsubst 329 | stack: + echo -e --- 330 | stack: + for i in "$manifests_dir"/{hardware.yaml,template.yaml,workflow.yaml} 331 | stack: + envsubst 332 | stack: + echo -e --- 333 | stack: + kubectl apply -n tink-system -f /tmp/manifests.yaml 334 | stack: hardware.tinkerbell.org/machine1 created 335 | stack: template.tinkerbell.org/ubuntu-jammy created 336 | stack: workflow.tinkerbell.org/playground-workflow created 337 | stack: + kubectl apply -n tink-system -f /playground/stack//ubuntu-download.yaml 338 | stack: configmap/download-image created 339 | stack: job.batch/download-ubuntu-jammy created 340 | stack: + kubectl_for_vagrant_user 341 | stack: + runuser -l vagrant -c 'mkdir -p ~/.kube/' 342 | stack: + runuser -l vagrant -c 'k3d kubeconfig get -a > ~/.kube/config' 343 | stack: + chmod 600 /home/vagrant/.kube/config 344 | stack: + echo 'export KUBECONFIG="/home/vagrant/.kube/config"' 345 | stack: all done! 346 | stack: + echo 'all done!' 347 | 348 | ``` 349 | 350 |
351 | 352 | 1. Wait for HookOS and Ubuntu image to be downloaded 353 | 354 | ```bash 355 | vagrant ssh stack 356 | kubectl get jobs -n tink-system --watch 357 | exit 358 | # There are 2 Kubernetes jobs that run to download HookOS and the Ubuntu image. 359 | # Once both jobs are complete exit the stack VM. 360 | ``` 361 | 362 |
363 | example output 364 | 365 | ```bash 366 | NAME COMPLETIONS DURATION AGE 367 | download-hook 1/1 27s 72s 368 | download-ubuntu-jammy 0/1 49s 49s 369 | download-ubuntu-jammy 0/1 70s 70s 370 | download-ubuntu-jammy 0/1 72s 72s 371 | download-ubuntu-jammy 1/1 72s 72s 372 | ``` 373 | 374 |
375 | 376 | 1. Start the machine to be provisioned 377 | 378 | ```bash 379 | vagrant up machine1 380 | # This will start a VM to pxe boot. A GUI window of this machines console will be opened. 381 | # The `vagrant up machine1` command will exit quickly and show the following error message. This is expected. 382 | # Once the command line control is returned to you, you can move on to the next step. 383 | ``` 384 | 385 |
386 | example output 387 | 388 | ```bash 389 | Bringing machine 'machine1' up with 'virtualbox' provider... 390 | ==> machine1: Importing base box 'jtyr/pxe'... 391 | ==> machine1: Matching MAC address for NAT networking... 392 | ==> machine1: Checking if box 'jtyr/pxe' version '2' is up to date... 393 | ==> machine1: Setting the name of the VM: vagrant_machine1_1626365105119_9800 394 | ==> machine1: Fixed port collision for 22 => 2222. Now on port 2200. 395 | ==> machine1: Clearing any previously set network interfaces... 396 | ==> machine1: Preparing network interfaces based on configuration... 397 | machine1: Adapter 1: hostonly 398 | ==> machine1: Forwarding ports... 399 | machine1: 22 (guest) => 2200 (host) (adapter 1) 400 | machine1: VirtualBox adapter #1 not configured as "NAT". Skipping port 401 | machine1: forwards on this adapter. 402 | ==> machine1: Running 'pre-boot' VM customizations... 403 | ==> machine1: Booting VM... 404 | ==> machine1: Waiting for machine to boot. This may take a few minutes... 405 | machine1: SSH address: 127.0.0.1:22 406 | machine1: SSH username: vagrant 407 | machine1: SSH auth method: private key 408 | machine1: Warning: Authentication failure. Retrying... 409 | Timed out while waiting for the machine to boot. This means that 410 | Vagrant was unable to communicate with the guest machine within 411 | the configured ("config.vm.boot_timeout" value) time period. 412 | 413 | If you look above, you should be able to see the error(s) that 414 | Vagrant had when attempting to connect to the machine. These errors 415 | are usually good hints as to what may be wrong. 416 | 417 | If you're using a custom box, make sure that networking is properly 418 | working and you're able to connect to the machine. It is a common 419 | problem that networking isn't setup properly in these boxes. 420 | Verify that authentication configurations are also setup properly, 421 | as well. 422 | 423 | If the box appears to be booting properly, you may want to increase 424 | the timeout ("config.vm.boot_timeout") value. 425 | 426 | ``` 427 | 428 |
429 | 430 | 1. Watch the provision complete 431 | 432 | ```bash 433 | # log in to the stack VM 434 | vagrant ssh stack 435 | 436 | # watch for the workflow to complete 437 | # once the workflow is complete (see the example output below for completion), move on to the next step 438 | kubectl get -n tink-system workflow playground-workflow --watch 439 | ``` 440 | 441 |
442 | example output 443 | 444 | ```bash 445 | NAME TEMPLATE STATE 446 | playground-workflow ubuntu-jammy STATE_PENDING 447 | playground-workflow ubuntu-jammy STATE_RUNNING 448 | playground-workflow ubuntu-jammy STATE_RUNNING 449 | playground-workflow ubuntu-jammy STATE_RUNNING 450 | playground-workflow ubuntu-jammy STATE_RUNNING 451 | playground-workflow ubuntu-jammy STATE_RUNNING 452 | playground-workflow ubuntu-jammy STATE_RUNNING 453 | playground-workflow ubuntu-jammy STATE_RUNNING 454 | playground-workflow ubuntu-jammy STATE_RUNNING 455 | playground-workflow ubuntu-jammy STATE_RUNNING 456 | playground-workflow ubuntu-jammy STATE_RUNNING 457 | playground-workflow ubuntu-jammy STATE_RUNNING 458 | playground-workflow ubuntu-jammy STATE_RUNNING 459 | playground-workflow ubuntu-jammy STATE_RUNNING 460 | playground-workflow ubuntu-jammy STATE_SUCCESS 461 | ``` 462 | 463 |
464 | 465 | 1. Login to the machine 466 | 467 | The machine has been provisioned with Ubuntu. 468 | You can now SSH into the machine. 469 | 470 | ```bash 471 | ssh tink@192.168.56.43 # user/pass => tink/tink 472 | ``` 473 | -------------------------------------------------------------------------------- /stack/vagrant/.env: -------------------------------------------------------------------------------- 1 | LIBVIRT_HOST_IP=192.168.56.1 2 | PROVISIONER_IP=192.168.56.4 3 | LOADBALANCER_IP=192.168.56.40 4 | LOADBALANCER_IP_2=192.168.56.41 5 | MACHINE1_IP=192.168.56.43 6 | MACHINE1_MAC=08:00:27:9e:f5:3a 7 | 8 | # https://github.com/tinkerbell/tinkerbell/pkgs/container/charts%2Ftinkerbell 9 | HELM_CHART_VERSION=v0.17.1 10 | KUBECTL_VERSION=1.32.4 11 | K3D_VERSION=v5.8.3 12 | HELM_VERSION=v3.17.0 13 | -------------------------------------------------------------------------------- /stack/vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | open(".env", "r").readlines.each { 5 | |l| 6 | kv = l.split("=") 7 | if kv[1] != nil 8 | ENV[kv[0]] = kv[1].strip 9 | end 10 | } 11 | 12 | LIBVIRT_HOST_IP = ENV["LIBVIRT_HOST_IP"] || "192.168.56.1" 13 | PROVISIONER_IP = ENV["PROVISIONER_IP"] || "192.168.56.4" 14 | LOADBALANCER_IP = ENV["LOADBALANCER_IP"] || "192.168.56.40" 15 | LOADBALANCER_IP_2 = ENV["LOADBALANCER_IP_2"] || "192.168.56.41" 16 | GATEWAY_IP = ENV["GATEWAY_IP"] || "" 17 | MACHINE1_IP = ENV["MACHINE1_IP"] || "192.168.56.43" 18 | MACHINE1_MAC = (ENV["MACHINE1_MAC"] || "08:00:27:9E:F5:3A").downcase 19 | HELM_CHART_VERSION = ENV["HELM_CHART_VERSION"] || "0.17.0" 20 | KUBECTL_VERSION = ENV["KUBECTL_VERSION"] || "1.28.3" 21 | K3D_VERSION = ENV["K3D_VERSION"] || "v5.6.0" 22 | HELM_VERSION = ENV["HELM_VERSION"] || "v3.14.4" 23 | HELM_LOADBALANCER_INTERFACE = ENV["HELM_LOADBALANCER_INTERFACE"] || "eth1" 24 | DEST_DIR_BASE = "/playground/stack/" 25 | 26 | Vagrant.configure("2") do |config| 27 | config.vm.provider :libvirt do |libvirt| 28 | libvirt.qemu_use_session = false 29 | end 30 | 31 | config.vm.define "stack" do |stack| 32 | stack.vm.box = "generic/ubuntu2204" 33 | stack.vm.synced_folder "", DEST_DIR_BASE 34 | stack.vm.network "private_network", ip: "192.168.56.4", netmask: "255.255.255.0", 35 | libvirt__network_name: "tink_network", 36 | libvirt__host_ip: LIBVIRT_HOST_IP, 37 | libvirt__netmask: "255.255.255.0", 38 | libvirt__dhcp_enabled: false 39 | 40 | stack.vm.provider "virtualbox" do |v, override| 41 | v.memory = 2048 42 | v.cpus = 2 43 | end 44 | 45 | stack.vm.provider "libvirt" do |l, override| 46 | l.memory = 2048 47 | l.cpus = 2 48 | override.vm.synced_folder "", DEST_DIR_BASE, type: "rsync" 49 | end 50 | 51 | stack.vm.provision :shell, path: "setup.sh", args: [PROVISIONER_IP, MACHINE1_IP, MACHINE1_MAC, DEST_DIR_BASE, LOADBALANCER_IP, HELM_CHART_VERSION, HELM_LOADBALANCER_INTERFACE, KUBECTL_VERSION, K3D_VERSION, HELM_VERSION, LOADBALANCER_IP_2, GATEWAY_IP] 52 | end 53 | 54 | config.vm.define :machine1, autostart: false do |machine1| 55 | machine1.ssh.insert_key = false 56 | machine1.vm.boot_timeout = 10 57 | machine1.vm.synced_folder ".", "/vagrant", disabled: true 58 | machine1.vm.network :private_network, ip: MACHINE1_IP, 59 | mac: MACHINE1_MAC.gsub(/[:-]/, "").strip, 60 | adapter: 1, 61 | libvirt__network_name: "tink_network", 62 | libvirt__dhcp_enabled: false, 63 | libvirt__forward_mode: "nat" 64 | 65 | machine1.vm.provider "libvirt" do |v| 66 | v.storage :file, :size => "20G" 67 | v.memory = 4096 68 | v.cpus = 2 69 | v.boot "hd" 70 | v.boot "network" 71 | v.graphics_ip = "0.0.0.0" 72 | v.machine_arch = "x86_64" 73 | end 74 | 75 | machine1.vm.provider "virtualbox" do |v, override| 76 | override.vm.box = "jtyr/pxe" 77 | v.memory = 2048 78 | v.cpus = 2 79 | v.gui = true 80 | v.customize ["modifyvm", :id, "--nic1", "hostonlynet", "--nic2", "nat", "--boot1", "disk", "--boot2", "net"] 81 | v.customize ["modifyvm", :id, "--macaddress1", MACHINE1_MAC.gsub(/[:-]/, "").strip] 82 | v.customize ["setextradata", :id, "GUI/ScaleFactor", "3.0"] 83 | v.check_guest_additions = false 84 | end 85 | end 86 | end 87 | -------------------------------------------------------------------------------- /stack/vagrant/hardware.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Hardware 3 | metadata: 4 | name: machine1 5 | spec: 6 | disks: 7 | - device: $DISK_DEVICE 8 | metadata: 9 | facility: 10 | facility_code: playground 11 | instance: 12 | hostname: "machine1" 13 | id: "$TINKERBELL_CLIENT_MAC" 14 | operating_system: 15 | distro: "ubuntu" 16 | os_slug: "ubuntu_20_04" 17 | version: "20.04" 18 | interfaces: 19 | - dhcp: 20 | arch: x86_64 21 | hostname: machine1 22 | ip: 23 | address: $TINKERBELL_CLIENT_IP 24 | netmask: 255.255.255.0 25 | lease_time: 86400 26 | mac: $TINKERBELL_CLIENT_MAC 27 | name_servers: 28 | - 1.1.1.1 29 | - 8.8.8.8 30 | uefi: false 31 | netboot: 32 | allowPXE: true 33 | allowWorkflow: true 34 | -------------------------------------------------------------------------------- /stack/vagrant/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | install_docker() { 4 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 5 | add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 6 | update_apt 7 | apt-get install --no-install-recommends containerd.io docker-ce docker-ce-cli 8 | gpasswd -a vagrant docker 9 | } 10 | 11 | install_kubectl() { 12 | local kubectl_version=$1 13 | 14 | curl -LO https://dl.k8s.io/v"$kubectl_version"/bin/linux/amd64/kubectl 15 | chmod +x ./kubectl 16 | mv ./kubectl /usr/local/bin/kubectl 17 | } 18 | 19 | install_helm() { 20 | local helm_ver=$1 21 | 22 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 23 | chmod 700 get_helm.sh 24 | ./get_helm.sh --version "$helm_ver" 25 | } 26 | 27 | apt-get() { 28 | DEBIAN_FRONTEND=noninteractive command apt-get \ 29 | --allow-change-held-packages \ 30 | --allow-downgrades \ 31 | --allow-remove-essential \ 32 | --allow-unauthenticated \ 33 | --option Dpkg::Options::=--force-confdef \ 34 | --option Dpkg::Options::=--force-confold \ 35 | --yes \ 36 | "$@" 37 | } 38 | 39 | update_apt() { 40 | apt-get update 41 | } 42 | 43 | install_k3d() { 44 | local k3d_Version=$1 45 | 46 | wget -q -O - https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh | TAG="$k3d_version" bash 47 | } 48 | 49 | start_k3d() { 50 | # K3D_FIX_DNS=false is needed because host network mode won't work without it. 51 | K3D_FIX_DNS=false k3d cluster create --network host --no-lb --k3s-arg "--disable=traefik,servicelb,metrics-server,local-storage" 52 | 53 | mkdir -p ~/.kube/ 54 | k3d kubeconfig get -a >~/.kube/config 55 | until kubectl wait --for=condition=Ready nodes --all --timeout=600s; do sleep 1; done 56 | } 57 | 58 | kubectl_for_vagrant_user() { 59 | runuser -l vagrant -c "mkdir -p ~/.kube/" 60 | runuser -l vagrant -c "k3d kubeconfig get -a > ~/.kube/config" 61 | chmod 600 /home/vagrant/.kube/config 62 | echo 'export KUBECONFIG="/home/vagrant/.kube/config"' >>~vagrant/.bashrc 63 | } 64 | 65 | helm_install_tink_stack() { 66 | local namespace="$1" 67 | local version="$2" 68 | local interface="$3" 69 | local loadbalancer_ip="$4" 70 | local loadbalancer_ip_2="$5" 71 | 72 | trusted_proxies="" 73 | until [ "$trusted_proxies" != "" ]; do 74 | trusted_proxies=$(kubectl get nodes -o jsonpath='{.items[*].spec.podCIDR}' | tr ' ' ',') 75 | sleep 5 76 | done 77 | helm install tink-stack oci://ghcr.io/tinkerbell/charts/tinkerbell \ 78 | --version "$version" \ 79 | --create-namespace \ 80 | --namespace "$namespace" \ 81 | --wait \ 82 | --set "trustedProxies={${trusted_proxies}}" \ 83 | --set "publicIP=$loadbalancer_ip" \ 84 | --set "artifactsFileServer=http://$loadbalancer_ip_2:8080" \ 85 | --set "deployment.init.sourceInterface=$interface" \ 86 | --set "kubevip.interface=$interface" 87 | } 88 | 89 | apply_manifests() { 90 | local worker_ip=$1 91 | local worker_mac=$2 92 | local manifests_dir=$3 93 | local host_ip=$4 94 | local namespace=$5 95 | 96 | disk_device="/dev/sda" 97 | if lsblk | grep -q vda; then 98 | disk_device="/dev/vda" 99 | fi 100 | export DISK_DEVICE="$disk_device" 101 | export TINKERBELL_CLIENT_IP="$worker_ip" 102 | export TINKERBELL_CLIENT_MAC="$worker_mac" 103 | export TINKERBELL_HOST_IP="$host_ip" 104 | 105 | for i in "$manifests_dir"/{hardware.yaml,template.yaml,workflow.yaml}; do 106 | envsubst <"$i" 107 | echo -e '---' 108 | done >/tmp/manifests.yaml 109 | kubectl apply -n "$namespace" -f /tmp/manifests.yaml 110 | kubectl apply -n "$namespace" -f "$manifests_dir"/ubuntu-download.yaml 111 | } 112 | 113 | run_helm() { 114 | local host_ip=$1 115 | local worker_ip=$2 116 | local worker_mac=$3 117 | local manifests_dir=$4 118 | local loadbalancer_ip=$5 119 | local helm_chart_version=$6 120 | local loadbalancer_interface=$7 121 | local k3d_version=$8 122 | local namespace="tink-system" 123 | local helm_version=$9 124 | local loadbalancer_ip_2="${10}" 125 | 126 | install_k3d "$k3d_version" 127 | start_k3d 128 | kubectl_for_vagrant_user 129 | install_helm "$helm_version" 130 | helm_install_tink_stack "$namespace" "$helm_chart_version" "$loadbalancer_interface" "$loadbalancer_ip" "$loadbalancer_ip_2" 131 | apply_manifests "$worker_ip" "$worker_mac" "$manifests_dir" "$loadbalancer_ip_2" "$namespace" 132 | } 133 | 134 | main() { 135 | local host_ip="$1" 136 | local worker_ip="$2" 137 | local worker_mac="$3" 138 | local manifests_dir="$4" 139 | local loadbalancer_ip="$5" 140 | local helm_chart_version="$6" 141 | local loadbalancer_interface="$7" 142 | local kubectl_version="$8" 143 | local k3d_version="$9" 144 | local helm_version="${10}" 145 | local loadbalancer_ip_2="${11}" 146 | local gateway_ip="${12}" 147 | 148 | update_apt 149 | install_docker 150 | # https://github.com/ipxe/ipxe/pull/863 151 | # Needed after iPXE increased the default TCP window size to 2MB. 152 | sudo ethtool -K eth1 tx off sg off tso off 153 | install_kubectl "$kubectl_version" 154 | run_helm "$host_ip" "$worker_ip" "$worker_mac" "$manifests_dir" "$loadbalancer_ip" "$helm_chart_version" "$loadbalancer_interface" "$k3d_version" "$helm_version" "$loadbalancer_ip_2" 155 | } 156 | 157 | if [[ ${BASH_SOURCE[0]} == "$0" ]]; then 158 | set -euxo pipefail 159 | 160 | main "$@" 161 | echo "all done!" 162 | fi 163 | -------------------------------------------------------------------------------- /stack/vagrant/template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Template 3 | metadata: 4 | name: ubuntu 5 | spec: 6 | data: | 7 | version: "0.1" 8 | name: ubuntu 9 | global_timeout: 1800 10 | tasks: 11 | - name: "os installation" 12 | worker: "{{.device_1}}" 13 | volumes: 14 | - /dev:/dev 15 | - /dev/console:/dev/console 16 | - /lib/firmware:/lib/firmware:ro 17 | actions: 18 | - name: "stream ubuntu image" 19 | image: quay.io/tinkerbell/actions/image2disk:latest 20 | timeout: 600 21 | environment: 22 | DEST_DISK: {{ index .Hardware.Disks 0 }} 23 | IMG_URL: "http://$TINKERBELL_HOST_IP:8080/jammy-server-cloudimg-amd64.raw.gz" 24 | COMPRESSED: true 25 | - name: "grow-partition" 26 | image: quay.io/tinkerbell/actions/cexec:latest 27 | timeout: 90 28 | environment: 29 | BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1 30 | FS_TYPE: ext4 31 | CHROOT: y 32 | DEFAULT_INTERPRETER: "/bin/sh -c" 33 | CMD_LINE: "growpart {{ index .Hardware.Disks 0 }} 1 && resize2fs {{ index .Hardware.Disks 0 }}1" 34 | - name: "install openssl" 35 | image: quay.io/tinkerbell/actions/cexec:latest 36 | timeout: 90 37 | environment: 38 | BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1 39 | FS_TYPE: ext4 40 | CHROOT: y 41 | DEFAULT_INTERPRETER: "/bin/sh -c" 42 | CMD_LINE: "apt -y update && apt -y install openssl" 43 | - name: "create user" 44 | image: quay.io/tinkerbell/actions/cexec:latest 45 | timeout: 90 46 | environment: 47 | BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1 48 | FS_TYPE: ext4 49 | CHROOT: y 50 | DEFAULT_INTERPRETER: "/bin/sh -c" 51 | CMD_LINE: "useradd -p $(openssl passwd -1 tink) -s /bin/bash -d /home/tink/ -m -G sudo tink" 52 | - name: "enable ssh" 53 | image: quay.io/tinkerbell/actions/cexec:latest 54 | timeout: 90 55 | environment: 56 | BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1 57 | FS_TYPE: ext4 58 | CHROOT: y 59 | DEFAULT_INTERPRETER: "/bin/sh -c" 60 | CMD_LINE: "ssh-keygen -A; systemctl enable ssh.service; echo 'PasswordAuthentication yes' > /etc/ssh/sshd_config.d/60-cloudimg-settings.conf" 61 | - name: "disable apparmor" 62 | image: quay.io/tinkerbell/actions/cexec:latest 63 | timeout: 90 64 | environment: 65 | BLOCK_DEVICE: {{ index .Hardware.Disks 0 }}1 66 | FS_TYPE: ext4 67 | CHROOT: y 68 | DEFAULT_INTERPRETER: "/bin/sh -c" 69 | CMD_LINE: "systemctl disable apparmor; systemctl disable snapd" 70 | - name: "write netplan" 71 | image: quay.io/tinkerbell/actions/writefile:latest 72 | timeout: 90 73 | environment: 74 | DEST_DISK: {{ index .Hardware.Disks 0 }}1 75 | FS_TYPE: ext4 76 | DEST_PATH: /etc/netplan/config.yaml 77 | CONTENTS: | 78 | network: 79 | version: 2 80 | renderer: networkd 81 | ethernets: 82 | id0: 83 | match: 84 | name: en* 85 | dhcp4: true 86 | UID: 0 87 | GID: 0 88 | MODE: 0644 89 | DIRMODE: 0755 90 | - name: "kexec into os" 91 | image: ghcr.io/jacobweinstock/waitdaemon:latest 92 | timeout: 90 93 | pid: host 94 | environment: 95 | BLOCK_DEVICE: {{ formatPartition ( index .Hardware.Disks 0 ) 1 }} 96 | FS_TYPE: ext4 97 | IMAGE: quay.io/tinkerbell/actions/kexec:latest 98 | WAIT_SECONDS: 10 99 | volumes: 100 | - /var/run/docker.sock:/var/run/docker.sock 101 | -------------------------------------------------------------------------------- /stack/vagrant/ubuntu-download.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: download-image 6 | data: 7 | entrypoint.sh: |- 8 | #!/usr/bin/env bash 9 | # This script is designed to download a cloud image file (.img) and then convert it to a .raw.gz file. 10 | # This is purpose built so non-raw cloud image files can be used with the "image2disk" action. 11 | # See https://artifacthub.io/packages/tbaction/tinkerbell-community/image2disk. 12 | set -euxo pipefail 13 | if ! which pigz qemu-img &>/dev/null; then 14 | apk add --update pigz qemu-img 15 | fi 16 | image_url=$1 17 | file=$2/${image_url##*/} 18 | file=${file%.*}.raw.gz 19 | if [[ ! -f "$file" ]]; then 20 | wget "$image_url" -O image.img 21 | qemu-img convert -O raw image.img image.raw 22 | pigz "$file" 23 | rm -f image.img image.raw 24 | fi 25 | --- 26 | apiVersion: batch/v1 27 | kind: Job 28 | metadata: 29 | name: download-ubuntu-jammy 30 | spec: 31 | template: 32 | spec: 33 | containers: 34 | - name: download-ubuntu-jammy 35 | image: bash:5.2.2 36 | command: ["/script/entrypoint.sh"] 37 | args: 38 | [ 39 | "https://cloud-images.ubuntu.com/daily/server/jammy/current/jammy-server-cloudimg-amd64.img", 40 | "/output", 41 | ] 42 | volumeMounts: 43 | - mountPath: /output 44 | name: hook-artifacts 45 | - mountPath: /script 46 | name: configmap-volume 47 | restartPolicy: OnFailure 48 | volumes: 49 | - name: hook-artifacts 50 | hostPath: 51 | path: /tmp 52 | type: DirectoryOrCreate 53 | - name: configmap-volume 54 | configMap: 55 | defaultMode: 0700 56 | name: download-image 57 | -------------------------------------------------------------------------------- /stack/vagrant/workflow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Workflow 3 | metadata: 4 | name: playground-workflow 5 | spec: 6 | templateRef: ubuntu 7 | hardwareRef: machine1 8 | hardwareMap: 9 | device_1: $TINKERBELL_CLIENT_MAC 10 | --------------------------------------------------------------------------------