├── .circleci └── config.yml ├── .github ├── workflows │ ├── zz_generated.add-team-labels.yaml │ ├── zz_generated.add-to-project-board.yaml │ ├── zz_generated.create_release.yaml │ ├── zz_generated.create_release_pr.yaml │ └── zz_generated.gitleaks.yaml └── zz_generated.windows-code-signing.sh ├── .gitignore ├── .golangci.yml ├── .nancy-ignore ├── .nancy-ignore.generated ├── .pre-commit-config.yaml ├── CHANGELOG.md ├── CODEOWNERS ├── CONTRIBUTING.md ├── DCO ├── Dockerfile ├── LICENSE ├── LICENSE.md ├── Makefile ├── Makefile.gen.go.mk ├── README.md ├── SECURITY.md ├── client ├── assert_test.go ├── client.go └── client_test.go ├── config.yaml.dist ├── docs ├── README.md ├── compiling.md ├── configuration.md ├── etcd_clusters.md ├── example │ └── mayu.service.dist ├── flags.md ├── image │ ├── bootstrap.png │ └── statesMayu.jpg ├── inside.md ├── ipxe.md ├── machine_state_transition.md ├── ports.md ├── qemu.md ├── release.md ├── running.md ├── security.md └── templates.md ├── files ├── conf │ ├── lacp-bonding.conf │ └── module-bonding.conf └── my-service │ └── my-service.conf ├── flag.go ├── fs ├── fake.go ├── fs.go └── os.go ├── go.mod ├── go.sum ├── hostmgr ├── cluster.go ├── host.go ├── hoststate.go └── utils.go ├── httputil └── client.go ├── logging └── log.go ├── main.go ├── main_test.go ├── pxemgr ├── config.go ├── dnsmasq.go ├── error.go ├── etcd_discovery_handlers.go ├── filemanager.go ├── ignition.go ├── iputil.go ├── iputil_test.go ├── ipxe_handlers.go ├── key.go ├── proc_utils_linux.go ├── pxemanager.go ├── pxemanager_test.go └── schema.go ├── scripts ├── fetch-flatcar-image ├── fetch-flatcar-qemu-image ├── fetch-yochu-assets └── mayu.init ├── static_html └── index.html ├── templates ├── dnsmasq_template.conf ├── ignition.yaml └── snippets │ ├── extra.yaml │ ├── extra_nics.yaml │ ├── net_bond.yaml │ └── net_singlenic.yaml ├── tftproot ├── ipxe.efi └── undionly.kpxe └── version.go /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | architect: giantswarm/architect@4.35.5 5 | 6 | workflows: 7 | build: 8 | jobs: 9 | - architect/go-build: 10 | name: go-build 11 | binary: mayu 12 | filters: 13 | tags: 14 | only: /^v.*/ 15 | 16 | - architect/push-to-registries: 17 | context: architect 18 | name: push-to-registries 19 | requires: 20 | - go-build 21 | filters: 22 | tags: 23 | only: /^v.*/ 24 | -------------------------------------------------------------------------------- /.github/workflows/zz_generated.add-team-labels.yaml: -------------------------------------------------------------------------------- 1 | name: Add appropriate labels to issue 2 | 3 | on: 4 | issues: 5 | types: [assigned] 6 | 7 | jobs: 8 | build_user_list: 9 | name: Get yaml config of GS users 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Get user-mapping 13 | run: | 14 | mkdir -p artifacts 15 | wget --header "Authorization: token ${{ secrets.ISSUE_AUTOMATION }}" \ 16 | -O artifacts/users.yaml \ 17 | https://raw.githubusercontent.com/giantswarm/github/main/tools/issue-automation/user-mapping.yaml 18 | - name: Upload Artifact 19 | uses: actions/upload-artifact@v3 20 | with: 21 | name: users 22 | path: artifacts/users.yaml 23 | retention-days: 1 24 | 25 | add_label: 26 | name: Add team label when assigned 27 | runs-on: ubuntu-latest 28 | needs: build_user_list 29 | steps: 30 | - uses: actions/download-artifact@v3 31 | id: download-users 32 | with: 33 | name: users 34 | - name: Find team label based on user names 35 | run: | 36 | event_assignee=$(cat $GITHUB_EVENT_PATH | jq -r .assignee.login | tr '[:upper:]' '[:lower:]') 37 | echo "Issue assigned to: ${event_assignee}" 38 | 39 | TEAMS=$(cat ${{steps.download-users.outputs.download-path}}/users.yaml | tr '[:upper:]' '[:lower:]' | yq ".${event_assignee}.teams" -o csv | tr ',' ' ') 40 | 41 | echo "LABEL<> $GITHUB_ENV 42 | for team in ${TEAMS}; do 43 | echo "Team: ${team} | Label: team/${team}" 44 | echo "team/${team}" >> $GITHUB_ENV 45 | done 46 | echo "EOF" >> $GITHUB_ENV 47 | - name: Apply label to issue 48 | if: ${{ env.LABEL != '' && env.LABEL != 'null' && env.LABEL != null }} 49 | uses: actions-ecosystem/action-add-labels@v1 50 | with: 51 | github_token: ${{ secrets.ISSUE_AUTOMATION }} 52 | labels: | 53 | ${{ env.LABEL }} 54 | -------------------------------------------------------------------------------- /.github/workflows/zz_generated.add-to-project-board.yaml: -------------------------------------------------------------------------------- 1 | name: Add Issue to Project when assigned 2 | 3 | on: 4 | issues: 5 | types: 6 | - assigned 7 | - labeled 8 | 9 | jobs: 10 | build_user_list: 11 | name: Get yaml config of GS users 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Get user-mapping 15 | run: | 16 | mkdir -p artifacts 17 | wget --header "Authorization: token ${{ secrets.ISSUE_AUTOMATION }}" \ 18 | -O artifacts/users.yaml \ 19 | https://raw.githubusercontent.com/giantswarm/github/main/tools/issue-automation/user-mapping.yaml 20 | - name: Upload Artifact 21 | uses: actions/upload-artifact@v3 22 | with: 23 | name: users 24 | path: artifacts/users.yaml 25 | retention-days: 1 26 | - name: Get label-mapping 27 | run: | 28 | mkdir -p artifacts 29 | wget --header "Authorization: token ${{ secrets.ISSUE_AUTOMATION }}" \ 30 | -O artifacts/labels.yaml \ 31 | https://raw.githubusercontent.com/giantswarm/github/main/tools/issue-automation/label-mapping.yaml 32 | - name: Upload Artifact 33 | uses: actions/upload-artifact@v3 34 | with: 35 | name: labels 36 | path: artifacts/labels.yaml 37 | retention-days: 1 38 | 39 | add_to_personal_board: 40 | name: Add issue to personal board 41 | runs-on: ubuntu-latest 42 | needs: build_user_list 43 | if: github.event.action == 'assigned' 44 | steps: 45 | - uses: actions/download-artifact@v3 46 | id: download-users 47 | with: 48 | name: users 49 | - name: Find personal board based on user names 50 | run: | 51 | event_assignee=$(cat $GITHUB_EVENT_PATH | jq -r .assignee.login | tr '[:upper:]' '[:lower:]') 52 | echo "Issue assigned to: ${event_assignee}" 53 | 54 | BOARD=($(cat ${{steps.download-users.outputs.download-path}}/users.yaml | tr '[:upper:]' '[:lower:]' | yq ".${event_assignee}.personalboard")) 55 | echo "Personal board URL: ${BOARD}" 56 | 57 | echo "BOARD=${BOARD}" >> $GITHUB_ENV 58 | - name: Add issue to personal board 59 | if: ${{ env.BOARD != 'null' && env.BOARD != '' && env.BOARD != null }} 60 | uses: actions/add-to-project@main 61 | with: 62 | project-url: ${{ env.BOARD }} 63 | github-token: ${{ secrets.ISSUE_AUTOMATION }} 64 | 65 | add_to_team_board: 66 | name: Add issue to team board 67 | runs-on: ubuntu-latest 68 | needs: build_user_list 69 | if: github.event.action == 'labeled' 70 | steps: 71 | - uses: actions/download-artifact@v3 72 | id: download-labels 73 | with: 74 | name: labels 75 | - name: Find team board based on label 76 | run: | 77 | event_label=$(cat $GITHUB_EVENT_PATH | jq -r .label.name | tr '[:upper:]' '[:lower:]') 78 | echo "Issue labelled with: ${event_label}" 79 | 80 | BOARD=($(cat ${{steps.download-labels.outputs.download-path}}/labels.yaml | tr '[:upper:]' '[:lower:]' | yq ".[\"${event_label}\"].projectboard")) 81 | echo "Team board URL: ${BOARD}" 82 | 83 | echo "BOARD=${BOARD}" >> $GITHUB_ENV 84 | - name: Add issue to team board 85 | if: ${{ env.BOARD != 'null' && env.BOARD != '' && env.BOARD != null }} 86 | uses: actions/add-to-project@main 87 | with: 88 | project-url: ${{ env.BOARD }} 89 | github-token: ${{ secrets.ISSUE_AUTOMATION }} 90 | -------------------------------------------------------------------------------- /.github/workflows/zz_generated.create_release_pr.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT. Generated with: 2 | # 3 | # devctl@6.17.1 4 | # 5 | name: Create Release PR 6 | on: 7 | push: 8 | branches: 9 | - 'legacy#release#v*.*.*' 10 | - 'main#release#v*.*.*' 11 | - 'main#release#major' 12 | - 'main#release#minor' 13 | - 'main#release#patch' 14 | - 'master#release#v*.*.*' 15 | - 'master#release#major' 16 | - 'master#release#minor' 17 | - 'master#release#patch' 18 | - 'release#v*.*.*' 19 | - 'release#major' 20 | - 'release#minor' 21 | - 'release#patch' 22 | - 'release-v*.*.x#release#v*.*.*' 23 | # "!" negates previous positive patterns so it has to be at the end. 24 | - '!release-v*.x.x#release#v*.*.*' 25 | workflow_call: 26 | inputs: 27 | branch: 28 | required: true 29 | type: string 30 | jobs: 31 | debug_info: 32 | name: Debug info 33 | runs-on: ubuntu-22.04 34 | steps: 35 | - name: Print github context JSON 36 | run: | 37 | cat <> $GITHUB_OUTPUT 56 | 57 | head="${head#refs/heads/}" # Strip "refs/heads/" prefix. 58 | if [[ $(echo "$head" | grep -o '#' | wc -l) -gt 1 ]]; then 59 | base="$(echo $head | cut -d '#' -f 1)" 60 | else 61 | base="${{ github.event.base_ref }}" 62 | fi 63 | 64 | base="${base#refs/heads/}" # Strip "refs/heads/" prefix. 65 | 66 | version="$(echo $head | awk -F# '{print $NF}')" 67 | if [[ $version =~ ^major|minor|patch$ ]]; then 68 | gh auth login --with-token <<<$(echo -n ${{ secrets.TAYLORBOT_GITHUB_ACTION }}) 69 | gh_api_get_latest_release_version() 70 | { 71 | if ! version="$(gh api "repos/$1/releases/latest" --jq '.tag_name[1:] | split(".") | .[0], .[1], .[2]')" 72 | then 73 | case "$version" in 74 | *Not\ Found*) echo Assuming v0.0.0, hooray first release! >&2 ; version="0 0 0" ;; 75 | *) version="" ; return 1 ;; 76 | esac 77 | fi 78 | echo "$version" 79 | } 80 | 81 | version_parts=($(gh_api_get_latest_release_version "${{ github.repository }}")) 82 | version_major=${version_parts[0]} 83 | version_minor=${version_parts[1]} 84 | version_patch=${version_parts[2]} 85 | case ${version} in 86 | patch) 87 | version_patch=$((version_patch+1)) 88 | ;; 89 | minor) 90 | version_minor=$((version_minor+1)) 91 | version_patch=0 92 | ;; 93 | major) 94 | version_major=$((version_major+1)) 95 | version_minor=0 96 | version_patch=0 97 | if [[ "${version_major}" != "1" ]]; then 98 | echo "needs_major_bump=true" >> $GITHUB_OUTPUT 99 | fi 100 | ;; 101 | *) 102 | echo "Unknown Semver level provided" 103 | exit 1 104 | ;; 105 | esac 106 | version="${version_major}.${version_minor}.${version_patch}" 107 | else 108 | version="${version#v}" # Strip "v" prefix. 109 | version_major=$(echo "${version}" | cut -d "." -f 1) 110 | version_minor=$(echo "${version}" | cut -d "." -f 2) 111 | version_patch=$(echo "${version}" | cut -d "." -f 3) 112 | # This will help us detect versions with suffixes as majors, i.e 3.0.0-alpha1. 113 | # Even though it's a pre-release, it's still a major. 114 | if [[ $version_minor = 0 && $version_patch =~ ^0.* && $version_major != 1 ]]; then 115 | echo "needs_major_bump=true" >> $GITHUB_OUTPUT 116 | fi 117 | fi 118 | repo_name="$(echo '${{ github.repository }}' | awk -F '/' '{print $2}')" 119 | echo "repo_name=\"$repo_name\" base=\"$base\" head=\"$head\" version=\"$version\"" 120 | echo "repo_name=${repo_name}" >> $GITHUB_OUTPUT 121 | echo "base=${base}" >> $GITHUB_OUTPUT 122 | echo "head=${head}" >> $GITHUB_OUTPUT 123 | echo "version=${version}" >> $GITHUB_OUTPUT 124 | - name: Check if PR exists 125 | id: pr_exists 126 | env: 127 | GITHUB_TOKEN: "${{ secrets.TAYLORBOT_GITHUB_ACTION }}" 128 | run: | 129 | head="${{ steps.gather_facts.outputs.branch }}" 130 | branch="${head#refs/heads/}" # Strip "refs/heads/" prefix. 131 | if gh pr view --repo "${{ github.repository }}" "${branch}" --json state --jq .state | grep -i 'open' > /dev/null; then 132 | gh pr view --repo "${{ github.repository }}" "${branch}" 133 | echo "skip=true" >> $GITHUB_OUTPUT 134 | else 135 | echo "skip=false" >> $GITHUB_OUTPUT 136 | fi 137 | create_release_pr: 138 | name: Create release PR 139 | runs-on: ubuntu-22.04 140 | needs: 141 | - gather_facts 142 | if: ${{ needs.gather_facts.outputs.skip != 'true' }} 143 | env: 144 | architect_flags: "--organisation ${{ github.repository_owner }} --project ${{ needs.gather_facts.outputs.repo_name }}" 145 | steps: 146 | - uses: actions/setup-go@v3 147 | with: 148 | go-version: '=1.18.1' 149 | - name: Install architect 150 | uses: giantswarm/install-binary-action@v1.1.0 151 | with: 152 | binary: "architect" 153 | version: "6.11.0" 154 | - name: Checkout code 155 | uses: actions/checkout@v4 156 | with: 157 | ref: ${{ needs.gather_facts.outputs.branch }} 158 | - name: Prepare release changes 159 | run: | 160 | architect prepare-release ${{ env.architect_flags }} --version "${{ needs.gather_facts.outputs.version }}" 161 | - name: Update version field in Chart.yaml 162 | run: | 163 | # Define chart_dir 164 | repository="${{ needs.gather_facts.outputs.repo_name }}" 165 | chart="helm/${repository}" 166 | 167 | # Check chart directory. 168 | if [ ! -d "${chart}" ] 169 | then 170 | echo "Could not find chart directory '${chart}', adding app suffix." 171 | 172 | # Add app suffix. 173 | chart="helm/${repository}-app" 174 | 175 | # Check chart directory with app suffix. 176 | if [ ! -d "${chart}" ] 177 | then 178 | echo "Could not find chart directory '${chart}', removing app suffix." 179 | 180 | # Remove app suffix. 181 | chart="helm/${repository%-app}" 182 | 183 | if [ ! -d "${chart}" ] 184 | then 185 | # Print error. 186 | echo "Could not find chart directory '${chart}', doing nothing." 187 | fi 188 | fi 189 | fi 190 | 191 | # Define chart YAML. 192 | chart_yaml="${chart}/Chart.yaml" 193 | 194 | # Check chart YAML. 195 | if [ -f "${chart_yaml}" ] 196 | then 197 | # check if version in Chart.yaml is templated using architect 198 | if [ $(grep -c "^version:.*\.Version.*$" "${chart_yaml}") = "0" ]; then 199 | yq -i '.version = "${{ needs.gather_facts.outputs.version }}"' "${chart_yaml}" 200 | fi 201 | fi 202 | 203 | - name: Bump go module defined in go.mod if needed 204 | run: | 205 | if [ "${{ needs.gather_facts.outputs.needs_major_bump }}" = true ] && test -f "go.mod"; then 206 | go install github.com/marwan-at-work/mod/cmd/mod@v0.5.0 207 | mod upgrade 208 | fi 209 | - name: Set up git identity 210 | run: | 211 | git config --local user.email "dev@giantswarm.io" 212 | git config --local user.name "taylorbot" 213 | - name: Create release commit 214 | env: 215 | version: "${{ needs.gather_facts.outputs.version }}" 216 | run: | 217 | git add -A 218 | git commit -m "Release v${{ env.version }}" 219 | - name: Push changes 220 | env: 221 | remote_repo: "https://${{ github.actor }}:${{ secrets.TAYLORBOT_GITHUB_ACTION }}@github.com/${{ github.repository }}.git" 222 | run: | 223 | git push "${remote_repo}" HEAD:${{ needs.gather_facts.outputs.branch }} 224 | - name: Create PR 225 | env: 226 | GITHUB_TOKEN: "${{ secrets.TAYLORBOT_GITHUB_ACTION }}" 227 | base: "${{ needs.gather_facts.outputs.base }}" 228 | version: "${{ needs.gather_facts.outputs.version }}" 229 | run: | 230 | gh pr create --assignee ${{ github.actor }} --title "Release v${{ env.version }}" --body "" --base ${{ env.base }} --head "${{ needs.gather_facts.outputs.branch }}" 231 | -------------------------------------------------------------------------------- /.github/workflows/zz_generated.gitleaks.yaml: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT. Generated with: 2 | # 3 | # devctl@6.17.1 4 | # 5 | name: gitleaks 6 | 7 | on: [pull_request] 8 | 9 | jobs: 10 | gitleaks: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | with: 15 | fetch-depth: '0' 16 | - name: gitleaks-action 17 | uses: giantswarm/gitleaks-action@main 18 | -------------------------------------------------------------------------------- /.github/zz_generated.windows-code-signing.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # DO NOT EDIT. Generated with: 4 | # 5 | # devctl@6.17.1 6 | # 7 | 8 | APPLICATION=$1 9 | VERSION=$2 10 | 11 | SIGNCODE_UTIL=quay.io/giantswarm/signcode-util:1.1.1 12 | 13 | echo "APPLICATION=${APPLICATION}" 14 | echo "VERSION=${VERSION}" 15 | echo "PWD=${PWD}" 16 | 17 | NO_CODE_SIGNING="Skipping Windows binary signing. In order to create a signed Windows binary, set the environment variables CODE_SIGNING_CERT_BUNDLE_PASSWORD and CODE_SIGNING_CERT_BUNDLE_BASE64." 18 | 19 | if [ "${CODE_SIGNING_CERT_BUNDLE_PASSWORD}" = "" ]; then 20 | echo "Variable CODE_SIGNING_CERT_BUNDLE_PASSWORD not set. ${NO_CODE_SIGNING}" 21 | exit 0 22 | fi; 23 | 24 | if [ "${CODE_SIGNING_CERT_BUNDLE_BASE64}" = "" ]; then 25 | echo "Variable CODE_SIGNING_CERT_BUNDLE_BASE64 not set. ${NO_CODE_SIGNING}" 26 | exit 0 27 | fi; 28 | 29 | echo "Signing the Windows binary" 30 | 31 | mkdir -p certs 32 | 33 | echo "${CODE_SIGNING_CERT_BUNDLE_BASE64}" | base64 -d > certs/code-signing.p12 34 | 35 | mv "${APPLICATION}-v${VERSION}-windows-amd64.exe" "${APPLICATION}-v${VERSION}-windows-amd64-unsigned.exe" 36 | 37 | docker pull --quiet ${SIGNCODE_UTIL} 38 | 39 | docker run --rm \ 40 | -v "${PWD}/certs:/mnt/certs" \ 41 | -v "${PWD}:/mnt/binaries" \ 42 | ${SIGNCODE_UTIL} \ 43 | sign \ 44 | -pkcs12 /mnt/certs/code-signing.p12 \ 45 | -n "Giant Swarm CLI tool ${APPLICATION}" \ 46 | -i "https://github.com/giantswarm/${APPLICATION}" \ 47 | -t http://timestamp.digicert.com -verbose \ 48 | -in "/mnt/binaries/${APPLICATION}-v${VERSION}-windows-amd64-unsigned.exe" \ 49 | -out "/mnt/binaries/${APPLICATION}-v${VERSION}-windows-amd64.exe" \ 50 | -pass "${CODE_SIGNING_CERT_BUNDLE_PASSWORD}" 51 | 52 | echo "Verifying the signed binary" 53 | 54 | docker run --rm \ 55 | -v "${PWD}:/mnt/binaries" \ 56 | ${SIGNCODE_UTIL} \ 57 | verify \ 58 | "/mnt/binaries/${APPLICATION}-v${VERSION}-windows-amd64.exe" 59 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.swp 2 | config.yaml 3 | bin 4 | bin-dist 5 | cache 6 | */bindata.go 7 | infopusher/infopusher 8 | helpers/infopusher 9 | mayu 10 | cluster/ 11 | .gobuild 12 | -------------------------------------------------------------------------------- /.golangci.yml: -------------------------------------------------------------------------------- 1 | linters-settings: 2 | staticcheck: 3 | checks: 4 | - all 5 | - '-SA4005' 6 | gosec: 7 | excludes: 8 | - 'G114' 9 | -------------------------------------------------------------------------------- /.nancy-ignore: -------------------------------------------------------------------------------- 1 | CVE-2022-21698 2 | CVE-2023-45142 3 | -------------------------------------------------------------------------------- /.nancy-ignore.generated: -------------------------------------------------------------------------------- 1 | # This file is generated by https://github.com/giantswarm/github 2 | # Repository specific ignores should be added to .nancy-ignore 3 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | minimum_pre_commit_version: '2.17' 2 | repos: 3 | # shell scripts 4 | - repo: https://github.com/detailyang/pre-commit-shell 5 | rev: 1.0.5 6 | hooks: 7 | - id: shell-lint 8 | args: [ --format=json ] 9 | 10 | - repo: https://github.com/pre-commit/pre-commit-hooks 11 | rev: v4.5.0 12 | hooks: 13 | - id: check-added-large-files 14 | # check for unresolved merge conflicts 15 | - id: check-merge-conflict 16 | - id: check-shebang-scripts-are-executable 17 | - id: detect-private-key 18 | - id: end-of-file-fixer 19 | - id: mixed-line-ending 20 | - id: trailing-whitespace 21 | 22 | - repo: https://github.com/dnephin/pre-commit-golang 23 | rev: v0.5.1 24 | hooks: 25 | - id: go-fmt 26 | - id: go-mod-tidy 27 | - id: golangci-lint 28 | # timeout is needed for CI 29 | args: [ -E, gosec, -E, goconst, -E, govet, --timeout, 300s ] 30 | - id: go-imports 31 | args: [ -local, github.com/giantswarm/mayu ] 32 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [Unreleased] 9 | 10 | ## [1.3.0] - 2021-07-01 11 | 12 | ### Changed 13 | 14 | - Switch all legacy `coreos` flags to `flatcar` (drops CoreOS support). 15 | 16 | ## [1.2.0] - 2021-06-09 17 | 18 | ### Added 19 | 20 | - Add join function for use in ignition templates. 21 | 22 | ## [1.1.1] - 2020-07-09 23 | 24 | ### Changed 25 | 26 | - Fix killing `dnsmasq` process if it is running. 27 | 28 | ## [1.1.0] - 2020-06-30 29 | 30 | ### Added 31 | 32 | - Add github workflows. 33 | 34 | ### Changed 35 | 36 | - Switch from `dep` to go modules. 37 | - Use `architect-orb` `0.9.0`. 38 | 39 | [Unreleased]: https://github.com/giantswarm/mayu/compare/v1.3.0...HEAD 40 | [1.3.0]: https://github.com/giantswarm/mayu/compare/v1.2.0...v1.3.0 41 | [1.2.0]: https://github.com/giantswarm/mayu/compare/v1.1.1...v1.2.0 42 | [1.1.1]: https://github.com/giantswarm/mayu/compare/v1.1.0...v1.1.1 43 | [1.1.0]: https://github.com/giantswarm/mayu/releases/tag/v1.1.0 44 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # generated by giantswarm/github actions - changes will be overwritten 2 | * @giantswarm/team-rocket 3 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | 3 | Mayu is Apache 2.0 licensed and accepts contributions via GitHub pull requests. This document outlines some of the conventions on commit message formatting, contact points for developers and other resources to make getting your contribution into Mayu easier. 4 | 5 | # Email and chat 6 | 7 | - Email: [giantswarm](https://groups.google.com/forum/#!forum/giantswarm) 8 | - IRC: #[giantswarm](irc://irc.freenode.org:6667/#giantswarm) IRC channel on freenode.org 9 | 10 | ## Getting started 11 | 12 | - Fork the repository on GitHub 13 | - Read the [README.md](https://github.com/giantswarm/example-opensource-repo/blob/master/README.md) for build instructions 14 | 15 | ## Reporting Bugs and Creating Issues 16 | 17 | Reporting bugs is one of the best ways to contribute. If you find bugs or documentation mistakes in the Mayu project, please let us know by [opening an issue](https://github.com/giantswarm/mayu/issues/new). We treat bugs and mistakes very seriously and believe no issue is too small. Before creating a bug report, please check there that one does not already exist. 18 | 19 | To make your bug report accurate and easy to understand, please try to create bug reports that are: 20 | 21 | - Specific. Include as much details as possible: which version, what environment, what configuration, etc. You can also attach logs. 22 | 23 | - Reproducible. Include the steps to reproduce the problem. We understand some issues might be hard to reproduce, please includes the steps that might lead to the problem. If applicable, you can also attach affected data dir(s) and a stack trace to the bug report. 24 | 25 | - Isolated. Please try to isolate and reproduce the bug with minimum dependencies. It would significantly slow down the speed to fix a bug if too many dependencies are involved in a bug report. Debugging external systems that rely on PROJECT is out of scope, but we are happy to point you in the right direction or help you interact with PROJECT in the correct manner. 26 | 27 | - Unique. Do not duplicate existing bug reports. 28 | 29 | - Scoped. One bug per report. Do not follow up with another bug inside one report. 30 | 31 | You might also want to read [Elika Etemad’s article on filing good bug reports](http://fantasai.inkedblade.net/style/talks/filing-good-bugs/) before creating a bug report. 32 | 33 | We might ask you for further information to locate a bug. A duplicated bug report will be closed. 34 | 35 | ## Contribution flow 36 | 37 | This is a rough outline of what a contributor's workflow looks like: 38 | 39 | - Create a feature branch from where you want to base your work. This is usually master. 40 | - Make commits of logical units. 41 | - Make sure your commit messages are in the proper format (see below). 42 | - Push your changes to a topic branch in your fork of the repository. 43 | - Submit a pull request to giantswarm/PROJECT. 44 | - Adding unit tests will greatly improve the chance for getting a quick review and your PR accepted. 45 | - Your PR must receive a LGTM from one maintainer found in the MAINTAINERS file. 46 | - Before merging your PR be sure to squash all commits into one. 47 | 48 | Thanks for your contributions! 49 | 50 | ### Code style 51 | 52 | The coding style suggested by the Golang community is used. See the [style doc](https://github.com/golang/go/wiki/CodeReviewComments) for details. 53 | 54 | Please follow this style to make the code easy to review, maintain, and develop. 55 | 56 | ### Format of the Commit Message 57 | 58 | We follow a rough convention for commit messages that is designed to answer two 59 | questions: what changed and why. The subject line should feature the what and 60 | the body of the commit should describe the why. 61 | -------------------------------------------------------------------------------- /DCO: -------------------------------------------------------------------------------- 1 | Developer Certificate of Origin 2 | Version 1.1 3 | 4 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 5 | 660 York Street, Suite 102, 6 | San Francisco, CA 94110 USA 7 | 8 | Everyone is permitted to copy and distribute verbatim copies of this 9 | license document, but changing it is not allowed. 10 | 11 | 12 | Developer's Certificate of Origin 1.1 13 | 14 | By making a contribution to this project, I certify that: 15 | 16 | (a) The contribution was created in whole or in part by me and I 17 | have the right to submit it under the open source license 18 | indicated in the file; or 19 | 20 | (b) The contribution is based upon previous work that, to the best 21 | of my knowledge, is covered under an appropriate open source 22 | license and I have the right under that license to submit that 23 | work with modifications, whether created in whole or in part 24 | by me, under the same open source license (unless I am 25 | permitted to submit under a different license), as indicated 26 | in the file; or 27 | 28 | (c) The contribution was provided directly to me by some other 29 | person who certified (a), (b) or (c) and I have not modified 30 | it. 31 | 32 | (d) I understand and agree that this project and the contribution 33 | are public and that a record of the contribution (including all 34 | personal information I submit with it, including my sign-off) is 35 | maintained indefinitely and may be redistributed consistent with 36 | this project or the open source license(s) involved. 37 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.15.0 2 | 3 | RUN apk add --no-cache git ca-certificates dnsmasq 4 | 5 | RUN mkdir -p /etc/mayu /var/lib/mayu /usr/lib/mayu 6 | COPY mayu /mayu 7 | COPY tftproot /usr/lib/mayu/tftproot 8 | COPY files /usr/lib/mayu/files 9 | COPY templates /usr/lib/mayu/templates 10 | COPY config.yaml* /etc/mayu/ 11 | 12 | WORKDIR /usr/lib/mayu 13 | 14 | RUN if [ ! -f /etc/mayu/config.yaml ]; then cp /etc/mayu/config.yaml.dist /etc/mayu/config.yaml; fi 15 | 16 | ENTRYPOINT ["/mayu"] 17 | CMD ["--cluster-directory=/var/lib/mayu","-v=12"] 18 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2016 - 2023 Giant Swarm GmbH 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT. Generated with: 2 | # 3 | # devctl@6.17.1 4 | # 5 | 6 | include Makefile.*.mk 7 | 8 | ##@ General 9 | 10 | # The help target prints out all targets with their descriptions organized 11 | # beneath their categories. The categories are represented by '##@' and the 12 | # target descriptions by '##'. The awk commands is responsible for reading the 13 | # entire set of makefiles included in this invocation, looking for lines of the 14 | # file as xyz: ## something, and then pretty-format the target and help. Then, 15 | # if there's a line with ##@ something, that gets pretty-printed as a category. 16 | # More info on the usage of ANSI control characters for terminal formatting: 17 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 18 | # More info on the awk command: 19 | # http://linuxcommand.org/lc3_adv_awk.php 20 | 21 | .PHONY: help 22 | help: ## Display this help. 23 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z%\\\/_0-9-]+:.*?##/ { printf " \033[36m%-20s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 24 | -------------------------------------------------------------------------------- /Makefile.gen.go.mk: -------------------------------------------------------------------------------- 1 | # DO NOT EDIT. Generated with: 2 | # 3 | # devctl@6.17.1 4 | # 5 | 6 | PACKAGE_DIR := ./bin-dist 7 | 8 | APPLICATION := $(shell go list -m | cut -d '/' -f 3) 9 | BUILDTIMESTAMP := $(shell date -u '+%FT%TZ') 10 | GITSHA1 := $(shell git rev-parse --verify HEAD) 11 | MODULE := $(shell go list -m) 12 | OS := $(shell go env GOOS) 13 | SOURCES := $(shell find . -name '*.go') 14 | VERSION := $(shell architect project version) 15 | ifeq ($(OS), linux) 16 | EXTLDFLAGS := -static 17 | endif 18 | LDFLAGS ?= -w -linkmode 'auto' -extldflags '$(EXTLDFLAGS)' \ 19 | -X '$(shell go list -m)/pkg/project.buildTimestamp=${BUILDTIMESTAMP}' \ 20 | -X '$(shell go list -m)/pkg/project.gitSHA=${GITSHA1}' 21 | 22 | .DEFAULT_GOAL := build 23 | 24 | ##@ Go 25 | 26 | .PHONY: build build-darwin build-darwin-64 build-linux build-linux-arm64 build-windows-amd64 27 | build: $(APPLICATION) ## Builds a local binary. 28 | @echo "====> $@" 29 | build-darwin: $(APPLICATION)-darwin ## Builds a local binary for darwin/amd64. 30 | @echo "====> $@" 31 | build-darwin-arm64: $(APPLICATION)-darwin-arm64 ## Builds a local binary for darwin/arm64. 32 | @echo "====> $@" 33 | build-linux: $(APPLICATION)-linux ## Builds a local binary for linux/amd64. 34 | @echo "====> $@" 35 | build-linux-arm64: $(APPLICATION)-linux-arm64 ## Builds a local binary for linux/arm64. 36 | @echo "====> $@" 37 | build-windows-amd64: $(APPLICATION)-windows-amd64.exe ## Builds a local binary for windows/amd64. 38 | @echo "====> $@" 39 | 40 | $(APPLICATION): $(APPLICATION)-v$(VERSION)-$(OS)-amd64 41 | @echo "====> $@" 42 | cp -a $< $@ 43 | 44 | $(APPLICATION)-darwin: $(APPLICATION)-v$(VERSION)-darwin-amd64 45 | @echo "====> $@" 46 | cp -a $< $@ 47 | 48 | $(APPLICATION)-darwin-arm64: $(APPLICATION)-v$(VERSION)-darwin-arm64 49 | @echo "====> $@" 50 | cp -a $< $@ 51 | 52 | $(APPLICATION)-linux: $(APPLICATION)-v$(VERSION)-linux-amd64 53 | @echo "====> $@" 54 | cp -a $< $@ 55 | 56 | $(APPLICATION)-linux-arm64: $(APPLICATION)-v$(VERSION)-linux-arm64 57 | @echo "====> $@" 58 | cp -a $< $@ 59 | 60 | $(APPLICATION)-windows-amd64.exe: $(APPLICATION)-v$(VERSION)-windows-amd64.exe 61 | @echo "====> $@" 62 | cp -a $< $@ 63 | 64 | $(APPLICATION)-v$(VERSION)-%-amd64: $(SOURCES) 65 | @echo "====> $@" 66 | CGO_ENABLED=0 GOOS=$* GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o $@ . 67 | 68 | $(APPLICATION)-v$(VERSION)-%-arm64: $(SOURCES) 69 | @echo "====> $@" 70 | CGO_ENABLED=0 GOOS=$* GOARCH=arm64 go build -ldflags "$(LDFLAGS)" -o $@ . 71 | 72 | $(APPLICATION)-v$(VERSION)-windows-amd64.exe: $(SOURCES) 73 | @echo "====> $@" 74 | CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o $@ . 75 | 76 | .PHONY: package-darwin-amd64 package-darwin-arm64 package-linux-amd64 package-linux-arm64 package-windows-amd64 77 | package-darwin-amd64: $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-darwin-amd64.tar.gz ## Prepares a packaged darwin/amd64 version. 78 | @echo "====> $@" 79 | package-darwin-arm64: $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-darwin-arm64.tar.gz ## Prepares a packaged darwin/arm64 version. 80 | @echo "====> $@" 81 | package-linux-amd64: $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-linux-amd64.tar.gz ## Prepares a packaged linux/amd64 version. 82 | @echo "====> $@" 83 | package-linux-arm64: $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-linux-arm64.tar.gz ## Prepares a packaged linux/arm64 version. 84 | @echo "====> $@" 85 | package-windows-amd64: $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-windows-amd64.zip ## Prepares a packaged windows/amd64 version. 86 | @echo "====> $@" 87 | 88 | $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-windows-amd64.zip: DIR=$(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-windows-amd64 89 | $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-windows-amd64.zip: $(APPLICATION)-v$(VERSION)-windows-amd64.exe 90 | @echo "====> $@" 91 | /bin/sh .github/zz_generated.windows-code-signing.sh $(APPLICATION) $(VERSION) 92 | @echo "Creating directory $(DIR)" 93 | mkdir -p $(DIR) 94 | cp $< $(DIR)/$(APPLICATION).exe 95 | cp README.md LICENSE $(DIR) 96 | cd ./bin-dist && zip $(APPLICATION)-v$(VERSION)-windows-amd64.zip $(APPLICATION)-v$(VERSION)-windows-amd64/* 97 | rm -rf $(DIR) 98 | rm -rf $< 99 | 100 | $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-%-amd64.tar.gz: DIR=$(PACKAGE_DIR)/$< 101 | $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-%-amd64.tar.gz: $(APPLICATION)-v$(VERSION)-%-amd64 102 | @echo "====> $@" 103 | mkdir -p $(DIR) 104 | cp $< $(DIR)/$(APPLICATION) 105 | cp README.md LICENSE $(DIR) 106 | tar -C $(PACKAGE_DIR) -cvzf $(PACKAGE_DIR)/$<.tar.gz $< 107 | rm -rf $(DIR) 108 | rm -rf $< 109 | 110 | $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-%-arm64.tar.gz: DIR=$(PACKAGE_DIR)/$< 111 | $(PACKAGE_DIR)/$(APPLICATION)-v$(VERSION)-%-arm64.tar.gz: $(APPLICATION)-v$(VERSION)-%-arm64 112 | @echo "====> $@" 113 | mkdir -p $(DIR) 114 | cp $< $(DIR)/$(APPLICATION) 115 | cp README.md LICENSE $(DIR) 116 | tar -C $(PACKAGE_DIR) -cvzf $(PACKAGE_DIR)/$<.tar.gz $< 117 | rm -rf $(DIR) 118 | rm -rf $< 119 | 120 | .PHONY: install 121 | install: ## Install the application. 122 | @echo "====> $@" 123 | go install -ldflags "$(LDFLAGS)" . 124 | 125 | .PHONY: run 126 | run: ## Runs go run main.go. 127 | @echo "====> $@" 128 | go run -ldflags "$(LDFLAGS)" -race . 129 | 130 | .PHONY: clean 131 | clean: ## Cleans the binary. 132 | @echo "====> $@" 133 | rm -f $(APPLICATION)* 134 | go clean 135 | 136 | .PHONY: imports 137 | imports: ## Runs goimports. 138 | @echo "====> $@" 139 | goimports -local $(MODULE) -w . 140 | 141 | .PHONY: lint 142 | lint: ## Runs golangci-lint. 143 | @echo "====> $@" 144 | golangci-lint run -E gosec -E goconst --timeout=15m ./... 145 | 146 | .PHONY: nancy 147 | nancy: ## Runs nancy (requires v1.0.37 or newer). 148 | @echo "====> $@" 149 | CGO_ENABLED=0 go list -json -deps ./... | nancy sleuth --skip-update-check --quiet --exclude-vulnerability-file ./.nancy-ignore --additional-exclude-vulnerability-files ./.nancy-ignore.generated 150 | 151 | .PHONY: test 152 | test: ## Runs go test with default values. 153 | @echo "====> $@" 154 | go test -ldflags "$(LDFLAGS)" -race ./... 155 | 156 | .PHONY: build-docker 157 | build-docker: build-linux ## Builds docker image to registry. 158 | @echo "====> $@" 159 | cp -a $(APPLICATION)-linux $(APPLICATION) 160 | docker build -t ${APPLICATION}:${VERSION} . 161 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![CircleCI](https://dl.circleci.com/status-badge/img/gh/giantswarm/mayu/tree/master.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/giantswarm/mayu/tree/master) 2 | [![](https://pkg.go.dev/badge/github.com/giantswarm/mayu)](https://pkg.go.dev/github.com/giantswarm/mayu) 3 | [![](https://img.shields.io/docker/pulls/giantswarm/mayu.svg)](http://hub.docker.com/giantswarm/mayu) 4 | [![Go Report Card](https://goreportcard.com/badge/github.com/giantswarm/mayu)](https://goreportcard.com/report/github.com/giantswarm/mayu) 5 | 6 | # Mayu 7 | Mayu provides a set of mechanisms to bootstrap PXE-enabled bare metal nodes 8 | that must follow a specific configuration with Container Linux. 9 | 10 | ## Prerequisites 11 | 12 | Mayu requires some basic configuration and layer 2 connectivity to the rest 13 | of the nodes. Usually the cluster’s management node is used for this. The 14 | management node acts as a PXE server and should support three kinds of requests 15 | from the rest of the nodes: PXE, DHCP, and bootp. The rest of the nodes should 16 | be configured to boot via ethernet by default and share a network segment with 17 | the management node, so they get the PXE boot data from the management node on 18 | DHCP request. 19 | 20 | Developing Mayu requires the following tools to be installed. 21 | 22 | * `wget` 23 | * `go-bindata` 24 | * `cpio` 25 | 26 | ## Getting Mayu 27 | 28 | Get the latest Docker image here: https://quay.io/repository/giantswarm/mayu 29 | 30 | Clone the latest git repository version from here: https://github.com/giantswarm/mayu.git 31 | 32 | ## Running Mayu 33 | 34 | Configuring Mayu is explained in [docs/configuration.md](docs/configuration.md). After configuration have 35 | a look at [docs/running.md](docs/running.md) on how to start Mayu. 36 | 37 | ## Further Steps 38 | 39 | Check more detailed documentation: [docs](docs) 40 | 41 | Check code documentation: [godoc](https://godoc.org/github.com/giantswarm/mayu) 42 | 43 | ## Future Development 44 | 45 | - Future directions/vision 46 | 47 | ## Contact 48 | 49 | - Mailing list: [giantswarm](https://groups.google.com/forum/#!forum/giantswarm) 50 | - IRC: #[giantswarm](irc://irc.freenode.org:6667/#giantswarm) on freenode.org 51 | - Bugs: [issues](https://github.com/giantswarm/mayu/issues) 52 | 53 | ## Contributing & Reporting Bugs 54 | 55 | See [CONTRIBUTING](CONTRIBUTING.md) for details on submitting patches, the 56 | contribution workflow as well as reporting bugs. 57 | 58 | ## License 59 | 60 | Mayu is under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. 61 | 62 | ## Origin of the Name 63 | 64 | `mayu` (まゆ[繭] pronounced "mah-yoo") is Japanese for cocoon. 65 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | ## Reporting a Vulnerability 4 | 5 | Please visit https://www.giantswarm.io/responsible-disclosure for information on reporting security issues. 6 | -------------------------------------------------------------------------------- /client/assert_test.go: -------------------------------------------------------------------------------- 1 | package client_test 2 | 3 | import ( 4 | "net/http" 5 | "reflect" 6 | "testing" 7 | ) 8 | 9 | func assertHeader(t *testing.T, response testResponse, key string, val []string) { 10 | contentTypeHeader := response.Header[http.CanonicalHeaderKey(key)] 11 | if !reflect.DeepEqual(contentTypeHeader, val) { 12 | t.Fatalf("expected response header to be '%#v', got '%#v'", val, contentTypeHeader) 13 | } 14 | } 15 | 16 | func assertMethod(t *testing.T, response testResponse, method string) { 17 | if response.Method != method { 18 | t.Fatalf("expected response method to be '%s', got '%s'", method, response.Method) 19 | } 20 | } 21 | 22 | func assertPath(t *testing.T, response testResponse, path string) { 23 | if response.Path != path { 24 | t.Fatalf("expected response path to be '%s', got '%s'", path, response.Path) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /client/client.go: -------------------------------------------------------------------------------- 1 | // The client package is a client implementation of the mayu network API. 2 | package client 3 | 4 | import ( 5 | "bytes" 6 | "encoding/json" 7 | "fmt" 8 | "io/ioutil" 9 | "net" 10 | "net/http" 11 | 12 | "github.com/giantswarm/microerror" 13 | 14 | "github.com/giantswarm/mayu/hostmgr" 15 | "github.com/giantswarm/mayu/httputil" 16 | ) 17 | 18 | const contentType = "application/json" 19 | 20 | // Client implements the network API. Check the corresponding methods. 21 | type Client struct { 22 | // Scheme defines the protocol scheme. This is either http or https. 23 | Scheme string 24 | 25 | // Host is used to connect to mayu over network. 26 | Host string 27 | 28 | // Port is used to connect to mayu over network. 29 | Port uint16 30 | } 31 | 32 | // New creates a new configured client to interact with mayu over its network 33 | // API. 34 | func New(scheme, host string, port uint16) (*Client, error) { 35 | client := &Client{ 36 | Scheme: scheme, 37 | Host: host, 38 | Port: port, 39 | } 40 | 41 | return client, nil 42 | } 43 | 44 | func (c *Client) BootComplete(serial string, host hostmgr.Host) error { 45 | data, err := json.Marshal(host) 46 | 47 | if err != nil { 48 | return microerror.Mask(err) 49 | } 50 | 51 | resp, err := httputil.Put(fmt.Sprintf("%s://%s:%d/admin/host/%s/boot_complete", c.Scheme, c.Host, c.Port, serial), "application/json", bytes.NewBuffer(data)) 52 | if err != nil { 53 | return microerror.Mask(err) 54 | } 55 | defer resp.Body.Close() 56 | return nil 57 | } 58 | 59 | // SetProviderId sets the provider ID given by value for a node given by serial. 60 | func (c *Client) SetProviderId(serial, value string) error { 61 | data, err := json.Marshal(hostmgr.Host{ 62 | ProviderId: value, 63 | }) 64 | if err != nil { 65 | return microerror.Mask(err) 66 | } 67 | 68 | resp, err := httputil.Put(fmt.Sprintf("%s://%s:%d/admin/host/%s/set_provider_id", c.Scheme, c.Host, c.Port, serial), contentType, bytes.NewBuffer(data)) 69 | if err != nil { 70 | return microerror.Mask(err) 71 | } 72 | defer resp.Body.Close() 73 | 74 | if resp.StatusCode > 399 { 75 | return microerror.Mask(fmt.Errorf("invalid status code '%d'", resp.StatusCode)) 76 | } 77 | 78 | return nil 79 | } 80 | 81 | // SetIPMIAddr sets the IPMI address given by value for a node given by serial. 82 | func (c *Client) SetIPMIAddr(serial, value string) error { 83 | data, err := json.Marshal(hostmgr.Host{ 84 | IPMIAddr: net.ParseIP(value), 85 | }) 86 | if err != nil { 87 | return microerror.Mask(err) 88 | } 89 | 90 | resp, err := httputil.Put(fmt.Sprintf("%s://%s:%d/admin/host/%s/set_ipmi_addr", c.Scheme, c.Host, c.Port, serial), contentType, bytes.NewBuffer(data)) 91 | if err != nil { 92 | return microerror.Mask(err) 93 | } 94 | defer resp.Body.Close() 95 | 96 | if resp.StatusCode > 399 { 97 | return microerror.Mask(fmt.Errorf("invalid status code '%d'", resp.StatusCode)) 98 | } 99 | 100 | return nil 101 | } 102 | 103 | // SetEtcdClusterToken sets the etcd cluster token given by value for a node given by serial. 104 | func (c *Client) SetEtcdClusterToken(serial, value string) error { 105 | data, err := json.Marshal(hostmgr.Host{ 106 | EtcdClusterToken: value, 107 | }) 108 | if err != nil { 109 | return microerror.Mask(err) 110 | } 111 | 112 | resp, err := httputil.Put(fmt.Sprintf("%s://%s:%d/admin/host/%s/set_etcd_cluster_token", c.Scheme, c.Host, c.Port, serial), contentType, bytes.NewBuffer(data)) 113 | if err != nil { 114 | return microerror.Mask(err) 115 | } 116 | defer resp.Body.Close() 117 | 118 | if resp.StatusCode > 399 { 119 | return microerror.Mask(fmt.Errorf("invalid status code '%d'", resp.StatusCode)) 120 | } 121 | 122 | return nil 123 | } 124 | 125 | // SetState sets the machine state for a node given by serial. 126 | func (c *Client) SetState(serial, value string) error { 127 | state, err := hostmgr.HostState(value) 128 | if err != nil { 129 | return microerror.Mask(err) 130 | } 131 | 132 | data, err := json.Marshal(hostmgr.Host{ 133 | State: state, 134 | }) 135 | if err != nil { 136 | return microerror.Mask(err) 137 | } 138 | 139 | resp, err := httputil.Put(fmt.Sprintf("%s://%s:%d/admin/host/%s/set_state", c.Scheme, c.Host, c.Port, serial), contentType, bytes.NewBuffer(data)) 140 | if err != nil { 141 | return microerror.Mask(err) 142 | } 143 | defer resp.Body.Close() 144 | 145 | if resp.StatusCode > 399 { 146 | return microerror.Mask(fmt.Errorf("invalid status code '%d'", resp.StatusCode)) 147 | } 148 | 149 | return nil 150 | } 151 | 152 | // Override overrides a template properties such as docker_version, yochu_version, etc 153 | func (c *Client) Override(serial, property, value string) error { 154 | data, err := json.Marshal(hostmgr.Host{ 155 | Overrides: map[string]interface{}{property: value}, 156 | }) 157 | if err != nil { 158 | return microerror.Mask(err) 159 | } 160 | 161 | resp, err := httputil.Put(fmt.Sprintf("%s://%s:%d/admin/host/%s/override", c.Scheme, c.Host, c.Port, serial), contentType, bytes.NewBuffer(data)) 162 | if err != nil { 163 | return microerror.Mask(err) 164 | } 165 | defer resp.Body.Close() 166 | 167 | if resp.StatusCode > 399 { 168 | return microerror.Mask(fmt.Errorf("invalid status code '%d'", resp.StatusCode)) 169 | } 170 | 171 | return nil 172 | } 173 | 174 | // List fetches a list of node information within the current cluster. 175 | func (c *Client) List() ([]hostmgr.Host, error) { 176 | list := []hostmgr.Host{} 177 | 178 | resp, err := http.Get(fmt.Sprintf("%s://%s:%d/admin/hosts", c.Scheme, c.Host, c.Port)) 179 | if err != nil { 180 | return list, microerror.Mask(err) 181 | } 182 | defer resp.Body.Close() 183 | 184 | if resp.StatusCode > 399 { 185 | return nil, microerror.Mask(fmt.Errorf("invalid status code '%d'", resp.StatusCode)) 186 | } 187 | body, err := ioutil.ReadAll(resp.Body) 188 | if err != nil { 189 | return list, microerror.Mask(err) 190 | } 191 | 192 | err = json.Unmarshal(body, &list) 193 | if err != nil { 194 | return list, microerror.Mask(err) 195 | } 196 | 197 | return list, nil 198 | } 199 | 200 | // Status fetches status information for a node given by serial. 201 | func (c *Client) Status(serial string) (hostmgr.Host, error) { 202 | var host hostmgr.Host 203 | 204 | resp, err := http.Get(fmt.Sprintf("%s://%s:%d/admin/hosts", c.Scheme, c.Host, c.Port)) 205 | if err != nil { 206 | return host, microerror.Mask(err) 207 | } 208 | defer resp.Body.Close() 209 | if resp.StatusCode > 399 { 210 | return host, microerror.Mask(fmt.Errorf("invalid status code '%d'", resp.StatusCode)) 211 | } 212 | 213 | body, err := ioutil.ReadAll(resp.Body) 214 | if err != nil { 215 | return host, microerror.Mask(err) 216 | } 217 | 218 | list := []hostmgr.Host{} 219 | err = json.Unmarshal(body, &list) 220 | if err != nil { 221 | return host, microerror.Mask(err) 222 | } 223 | 224 | for _, host = range list { 225 | if host.Serial == serial { 226 | return host, nil 227 | } 228 | } 229 | 230 | return host, microerror.Mask(fmt.Errorf("host %s not found", serial)) 231 | } 232 | -------------------------------------------------------------------------------- /client/client_test.go: -------------------------------------------------------------------------------- 1 | package client_test 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "net" 8 | "net/http" 9 | "net/http/httptest" 10 | "net/url" 11 | "reflect" 12 | "strconv" 13 | "testing" 14 | 15 | "github.com/giantswarm/mayu/client" 16 | "github.com/giantswarm/mayu/hostmgr" 17 | ) 18 | 19 | type testResponse struct { 20 | Body []byte 21 | Header http.Header 22 | Method string 23 | Path string 24 | } 25 | 26 | func urlToHostPort(t *testing.T, URL string) (string, string) { 27 | u, err := url.Parse(URL) 28 | if err != nil { 29 | t.Fatalf("url.Parse returned error: %#v", err) 30 | } 31 | host, port, err := net.SplitHostPort(u.Host) 32 | if err != nil { 33 | t.Fatalf("net.SplitHostPort returned error: %#v", err) 34 | } 35 | 36 | return host, port 37 | } 38 | 39 | func newClientAndServer(t *testing.T, handler http.Handler) (*client.Client, *httptest.Server) { 40 | ts := httptest.NewServer(handler) 41 | 42 | host, port := urlToHostPort(t, ts.URL) 43 | ui, err := strconv.ParseUint(port, 10, 16) 44 | if err != nil { 45 | t.Fatalf("strconv.ParseUint returned error: %#v", err) 46 | } 47 | 48 | client, err := client.New("http", host, uint16(ui)) 49 | if err != nil { 50 | t.Fatalf("client.New returned error: %#v", err) 51 | } 52 | 53 | return client, ts 54 | } 55 | 56 | // 57 | // Client.SetProviderId 58 | // 59 | 60 | // Test_Client_004 checks for Client.SetProviderId to provide proper information 61 | // to the server as expected. 62 | func Test_Client_004(t *testing.T) { 63 | var response testResponse 64 | 65 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 66 | body, err := ioutil.ReadAll(r.Body) 67 | r.Body.Close() 68 | if err != nil { 69 | t.Fatal(err) 70 | } 71 | 72 | response = testResponse{ 73 | Body: body, 74 | Header: r.Header, 75 | Method: r.Method, 76 | Path: r.URL.Path, 77 | } 78 | 79 | w.WriteHeader(http.StatusOK) 80 | _, _ = w.Write([]byte("ok")) 81 | })) 82 | defer ts.Close() 83 | 84 | err := newClient.SetProviderId("serial", "provider-id") 85 | if err != nil { 86 | t.Fatalf("Client.SetProviderId returned error: %#v", err) 87 | } 88 | 89 | data, err := json.Marshal(hostmgr.Host{ 90 | ProviderId: "provider-id", 91 | }) 92 | if err != nil { 93 | t.Fatalf("json.Marshal returned error: %#v", err) 94 | } 95 | if string(response.Body) != string(data) { 96 | t.Fatalf("expected response body to be '%s', got '%s'", string(response.Body), string(data)) 97 | } 98 | 99 | assertHeader(t, response, "content-type", []string{"application/json"}) 100 | assertMethod(t, response, "PUT") 101 | assertPath(t, response, fmt.Sprintf("/admin/host/%s/set_provider_id", "serial")) 102 | } 103 | 104 | // Test_Client_005 checks for Client.SetProviderId to provide proper error 105 | // information to the client as expected, when there are errors returned from 106 | // the server. 107 | func Test_Client_005(t *testing.T) { 108 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 109 | w.WriteHeader(http.StatusInternalServerError) 110 | _, _ = w.Write([]byte("internal server error")) 111 | })) 112 | defer ts.Close() 113 | 114 | err := newClient.SetProviderId("serial", "provider-id") 115 | if err == nil { 116 | t.Fatalf("Client.SetProviderId NOT returned error") 117 | } 118 | } 119 | 120 | // Test_Client_006 checks for Client.SetProviderId to provide proper error 121 | // information to the client as expected, when there is no server running. 122 | func Test_Client_006(t *testing.T) { 123 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 124 | w.WriteHeader(http.StatusInternalServerError) 125 | _, _ = w.Write([]byte("internal server error")) 126 | })) 127 | // Immediatelly close the server. 128 | ts.Close() 129 | 130 | err := newClient.SetProviderId("serial", "provider-id") 131 | if err == nil { 132 | t.Fatalf("Client.SetProviderId NOT returned error") 133 | } 134 | } 135 | 136 | // 137 | // Client.SetIPMIAddr 138 | // 139 | 140 | // Test_Client_007 checks for Client.SetIPMIAddr to provide proper information 141 | // to the server as expected. 142 | func Test_Client_007(t *testing.T) { 143 | var response testResponse 144 | 145 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 146 | body, err := ioutil.ReadAll(r.Body) 147 | r.Body.Close() 148 | if err != nil { 149 | t.Fatal(err) 150 | } 151 | 152 | response = testResponse{ 153 | Body: body, 154 | Header: r.Header, 155 | Method: r.Method, 156 | Path: r.URL.Path, 157 | } 158 | 159 | w.WriteHeader(http.StatusOK) 160 | _, _ = w.Write([]byte("ok")) 161 | })) 162 | defer ts.Close() 163 | 164 | err := newClient.SetIPMIAddr("serial", "127.0.0.1") 165 | if err != nil { 166 | t.Fatalf("Client.SetIPMIAddr returned error: %#v", err) 167 | } 168 | 169 | data, err := json.Marshal(hostmgr.Host{ 170 | IPMIAddr: net.ParseIP("127.0.0.1"), 171 | }) 172 | if err != nil { 173 | t.Fatalf("json.Marshal returned error: %#v", err) 174 | } 175 | if string(response.Body) != string(data) { 176 | t.Fatalf("expected response body to be '%s', got '%s'", string(response.Body), string(data)) 177 | } 178 | 179 | assertHeader(t, response, "content-type", []string{"application/json"}) 180 | assertMethod(t, response, "PUT") 181 | assertPath(t, response, fmt.Sprintf("/admin/host/%s/set_ipmi_addr", "serial")) 182 | } 183 | 184 | // Test_Client_008 checks for Client.SetIPMIAddr to provide proper error 185 | // information to the client as expected, when there are errors returned from 186 | // the server. 187 | func Test_Client_008(t *testing.T) { 188 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 189 | w.WriteHeader(http.StatusInternalServerError) 190 | _, _ = w.Write([]byte("internal server error")) 191 | })) 192 | defer ts.Close() 193 | 194 | err := newClient.SetIPMIAddr("serial", "127.0.0.1") 195 | if err == nil { 196 | t.Fatalf("Client.SetIPMIAddr NOT returned error") 197 | } 198 | } 199 | 200 | // Test_Client_009 checks for Client.SetIPMIAddr to provide proper error 201 | // information to the client as expected, when there is no server running. 202 | func Test_Client_009(t *testing.T) { 203 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 204 | w.WriteHeader(http.StatusInternalServerError) 205 | _, _ = w.Write([]byte("internal server error")) 206 | })) 207 | // Immediatelly close the server. 208 | ts.Close() 209 | 210 | err := newClient.SetIPMIAddr("serial", "127.0.0.1") 211 | if err == nil { 212 | t.Fatalf("Client.SetIPMIAddr NOT returned error") 213 | } 214 | } 215 | 216 | // 217 | // Client.List 218 | // 219 | 220 | // Test_Client_013 checks for Client.List to provide proper information 221 | // to the server as expected. 222 | func Test_Client_013(t *testing.T) { 223 | var response testResponse 224 | expectedList := []hostmgr.Host{ 225 | hostmgr.Host{ 226 | Id: 101, 227 | Name: "test-host-101", 228 | }, 229 | hostmgr.Host{ 230 | Id: 102, 231 | Name: "test-host-102", 232 | }, 233 | hostmgr.Host{ 234 | Id: 103, 235 | Name: "test-host-103", 236 | }, 237 | } 238 | 239 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 240 | response = testResponse{ 241 | Method: r.Method, 242 | Path: r.URL.Path, 243 | } 244 | 245 | if err := json.NewEncoder(w).Encode(expectedList); err != nil { 246 | t.Fatalf("json.NewEncoder(w).Encode returned error: %#v", err) 247 | } 248 | })) 249 | defer ts.Close() 250 | 251 | list, err := newClient.List() 252 | if err != nil { 253 | t.Fatalf("Client.List returned error: %#v", err) 254 | } 255 | 256 | if !reflect.DeepEqual(list, expectedList) { 257 | t.Fatalf("expected %#v got %#v", expectedList, list) 258 | } 259 | 260 | assertMethod(t, response, "GET") 261 | assertPath(t, response, "/admin/hosts") 262 | } 263 | 264 | // Test_Client_014 checks for Client.List to provide proper error 265 | // information to the client as expected, when there are errors returned from 266 | // the server. 267 | func Test_Client_014(t *testing.T) { 268 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 269 | w.WriteHeader(http.StatusInternalServerError) 270 | _, _ = w.Write([]byte("internal server error")) 271 | })) 272 | defer ts.Close() 273 | 274 | _, err := newClient.List() 275 | if err == nil { 276 | t.Fatalf("Client.List NOT returned error") 277 | } 278 | } 279 | 280 | // Test_Client_015 checks for Client.List to provide proper error 281 | // information to the client as expected, when there is no server running. 282 | func Test_Client_015(t *testing.T) { 283 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 284 | w.WriteHeader(http.StatusInternalServerError) 285 | _, _ = w.Write([]byte("internal server error")) 286 | })) 287 | // Immediatelly close the server. 288 | ts.Close() 289 | 290 | _, err := newClient.List() 291 | if err == nil { 292 | t.Fatalf("Client.List NOT returned error") 293 | } 294 | } 295 | 296 | // 297 | // Client.Status 298 | // 299 | 300 | // Test_Client_016 checks for Client.Status to provide proper information 301 | // to the server as expected. 302 | func Test_Client_016(t *testing.T) { 303 | var response testResponse 304 | returnedList := []hostmgr.Host{ 305 | hostmgr.Host{ 306 | Id: 101, 307 | Serial: "serial-101", 308 | Name: "test-host-101", 309 | }, 310 | hostmgr.Host{ 311 | Id: 102, 312 | Serial: "serial-102", 313 | Name: "test-host-102", 314 | }, 315 | hostmgr.Host{ 316 | Id: 103, 317 | Serial: "serial-103", 318 | Name: "test-host-103", 319 | }, 320 | } 321 | 322 | expectedHost := hostmgr.Host{ 323 | Id: 102, 324 | Serial: "serial-102", 325 | Name: "test-host-102", 326 | } 327 | 328 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 329 | response = testResponse{ 330 | Method: r.Method, 331 | Path: r.URL.Path, 332 | } 333 | 334 | if err := json.NewEncoder(w).Encode(returnedList); err != nil { 335 | t.Fatalf("json.NewEncoder(w).Encode returned error: %#v", err) 336 | } 337 | })) 338 | defer ts.Close() 339 | 340 | host, err := newClient.Status("serial-102") 341 | if err != nil { 342 | t.Fatalf("Client.Status returned error: %#v", err) 343 | } 344 | 345 | if !reflect.DeepEqual(host, expectedHost) { 346 | t.Fatalf("expected %#v got %#v", expectedHost, host) 347 | } 348 | 349 | assertMethod(t, response, "GET") 350 | assertPath(t, response, "/admin/hosts") 351 | } 352 | 353 | // Test_Client_017 checks for Client.Status to provide proper error 354 | // information to the client as expected, when there are errors returned from 355 | // the server. 356 | func Test_Client_017(t *testing.T) { 357 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 358 | w.WriteHeader(http.StatusInternalServerError) 359 | _, _ = w.Write([]byte("internal server error")) 360 | })) 361 | defer ts.Close() 362 | 363 | _, err := newClient.Status("serial") 364 | if err == nil { 365 | t.Fatalf("Client.Status NOT returned error") 366 | } 367 | } 368 | 369 | // Test_Client_018 checks for Client.Status to provide proper error 370 | // information to the client as expected, when there is no server running. 371 | func Test_Client_018(t *testing.T) { 372 | newClient, ts := newClientAndServer(t, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 373 | w.WriteHeader(http.StatusInternalServerError) 374 | _, _ = w.Write([]byte("internal server error")) 375 | })) 376 | // Immediatelly close the server. 377 | ts.Close() 378 | 379 | _, err := newClient.Status("serial") 380 | if err == nil { 381 | t.Fatalf("Client.Status NOT returned error") 382 | } 383 | } 384 | -------------------------------------------------------------------------------- /config.yaml.dist: -------------------------------------------------------------------------------- 1 | # used to install machines and if no coreos 2 | # version is given in the profile 3 | default_flatcar_version: 1409.7.0 4 | 5 | network: 6 | bind_addr: 10.0.0.254 7 | pxe: 8 | enabled: true 9 | pxe_interface: 10 | interface_name: eth0 11 | ip_range: 12 | start: 10.0.0.10 13 | end: 10.0.0.30 14 | subnet_gateway: 10.0.0.1 15 | subnet_size: 24 16 | primary_nic: 17 | interface_name: eth0 18 | ip_range: 19 | start: 10.0.0.101 20 | end: 10.0.0.200 21 | subnet_size: 24 22 | subnet_gateway: 10.0.0.1 23 | network_model: 24 | type: singlenic 25 | routes: 26 | - destination_cidr: 87.12.13.0/28 27 | route_hop: 10.0.4.199 28 | dns: 29 | - 8.8.8.8 30 | - 1.1.1.1 31 | 32 | extra_nics: 33 | - interface_name: eth1 34 | ip_range: 35 | start: 10.0.5.210 36 | end: 10.0.5.245 37 | subnet_size: 24 38 | network_model: 39 | type: singlenic 40 | routes: 41 | - destination_cidr: 1.1.4.0/28 42 | route_hop: 10.0.5.200 43 | uefi: false 44 | ntp: [0.pool.ntp.org, 1.pool.ntp.org] 45 | 46 | profiles: 47 | - name: core 48 | quantity: 3 49 | - name: default 50 | 51 | templates_env: 52 | users: 53 | - Key: ssh-rsa xxxxxxxxxxxxxxx 54 | Name: my_user 55 | - Key: ssh-rsa yyyyyyyyyyyyyyy 56 | Name: second_user 57 | mayu_https_endpoint: https://10.0.1.254:4080 58 | mayu_http_endpoint: http://10.0.1.254:4081 59 | mayu_api_ip: 10.0.1.254 60 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Mayu Documentation 2 | 3 | Here we provide more detailed documentation about Mayu. 4 | 5 | ## Table Of Contents 6 | 7 | - [Mayu Configuration](configuration.md) 8 | - [Flags of the Mayu binary](flags.md) 9 | - [Running Mayu](running.md) 10 | - [Mayu Cluster Insides](inside.md) 11 | - [Machine State Transitions](machine_state_transition.md) 12 | - [Mayuctl](mayuctl.md) 13 | - [Release A New Mayu Version](release.md) 14 | - [Templates Env](templates.md) 15 | - [iPXE Setup](ipxe.md) 16 | - [Compiling Mayu](compiling.md) 17 | - [Security Overview](security.md) 18 | - [Qemu KVM inside Container Linux](qemu.md) 19 | -------------------------------------------------------------------------------- /docs/compiling.md: -------------------------------------------------------------------------------- 1 | # Compiling Mayu 2 | 3 | In order to compile Mayu you need to have `golang` installed. 4 | 5 | Use following command to compile `mayu` binary: 6 | ``` 7 | CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' . 8 | ``` 9 | This will compile statically linked `mayu` binary. 10 | 11 | ## Building docker image 12 | 13 | After `mayu` binary successfully compiled docker image can be built: 14 | ``` 15 | docker build -t mayu . 16 | ``` 17 | 18 | ## Updating vendors 19 | 20 | To update the vendored libraries used by Mayu's binaries you need to have [glide](https://github.com/Masterminds/glide) installed. 21 | 22 | Updating the vendored libraries is done by running the following `glide` targets: 23 | 24 | ```nohighlight 25 | $ glide up -v 26 | ``` 27 | -------------------------------------------------------------------------------- /docs/configuration.md: -------------------------------------------------------------------------------- 1 | # Mayu Configuration 2 | 3 | Here we provide more detailed documentation about configuring Mayu. By 4 | default TLS is enabled when communicating with `mayu` over network. If your 5 | setup does not provide or rely on TLS for whatever reasons, you can set 6 | `--no-tls`. The corresponding flag for `mayuctl` is `--no-tls`. 7 | Check [mayuctl](mayuctl.md) for more information about the client. 8 | 9 | ## File Tree 10 | 11 | ```nohighlight 12 | . 13 | |-- mayu - the mayu executable 14 | |-- config.yaml.dist - mayu configuration file template 15 | |-- templates 16 | | |-- dnsmasq_template.conf - template file used to generate the dnsmasq configuration 17 | | |-- ignition.yaml - template used to generate the ignition 18 | | |-- snippets - directory containing some template snippets used in the ignition template 19 | | | |-- net_bond.yaml 20 | | | |-- net_singlenic.yaml 21 | | | |-- extra.yaml 22 | `-- tftproot 23 | `-- undionly.kpxe - ipxe pxe image 24 | `-- ipxe.efi - ipxe pxe image for UEFI enabled hosts 25 | ``` 26 | 27 | For a new environment to be configured, there are three main files that might 28 | have to be adapted: `config.yaml`, `ignition.yaml` and one of the 29 | snippets `extra.yaml`, `net_bond.yaml` or `net_singlenic.yaml`. 30 | 31 | 32 | ## `/etc/mayu/config.yaml` 33 | 34 | The very first thing to do is to copy `config.yaml.dist` to 35 | `/etc/mayu/config.yaml` and modify it regarding your needs. The initial 36 | section configures the network, profiles for the machines and the versions 37 | of the software that should be installed via Yochu. 38 | 39 | ### Default Container Linux Version 40 | 41 | To successfully run Mayu you need to specify a default Container Linux version. This version is used to bootstrap 42 | machine. So whenever a new machine starts this Container Linux version is used to install Container Linux on the disk of 43 | the machine. You can also specify other Container Linux versions within profiles or single machines that overwrite 44 | this default value. 45 | 46 | Most importantly you also need to fetch the Container Linux image version. This is explained in the [Running Mayu](running.md) section. 47 | 48 | ```yaml 49 | default_flatcar_version: 2543.3.0 50 | ``` 51 | 52 | ### Network 53 | 54 | ```yaml 55 | network: 56 | pxe: true 57 | uefi: false 58 | pxe_interface: eth0 59 | machine_interface: eth0 60 | bind_addr: 10.0.3.251 61 | bootstrap_range: 62 | start: 10.0.3.10 63 | end: 10.0.3.30 64 | ip_range: 65 | start: 10.0.4.31 66 | end: 10.0.4.70 67 | dns: [8.8.8.8] 68 | ntp: [0.pool.ntp.org, 1.pool.ntp.org] 69 | router: 10.0.3.251 70 | subnet_size: 24 71 | subnet_gateway: 10.0.4.251 72 | network_model: singlenic 73 | ``` 74 | 75 | Here we have three less obvious settings: the `bootstrap_range` is used by the 76 | DHCP server during the bootstrap procedure and the nodes only use it during the 77 | installation. The `ip_range` is a range of addresses that will be statically 78 | assigned to the cluster nodes. The `network_model` specifies which network 79 | template snippet will be used. 80 | 81 | 82 | `pxe_interface` is defining on which network interface mayu should listen for pxe and dhcp 83 | 84 | 85 | `machine_interface` is defining the name for interface that will be used for configuring network if `network_model: singlenic` is used. 86 | 87 | ### Profiles 88 | 89 | ```yaml 90 | profiles: 91 | - name: core 92 | quantity: 3 93 | - name: default 94 | ``` 95 | 96 | Each profile has a `name`, a `quantity` 97 | (defines the number of cluster nodes that should have this profile). Name can be used for distinguishing machines in the ignition templates. Once all the profiles' quantities are matched 98 | (in this example that means we have 3 nodes with the profile core), mayu will assign 99 | the profile "default" to the remaining nodes. Thus, profiles with a `quantity` 100 | set are of higher priority than the default profile. 101 | 102 | ### Template Variables For Cloudconfig 103 | 104 | ```yaml 105 | templates_env: 106 | users: 107 | - Key: ssh-rsa xxxxxxxxxxxxxxx 108 | Name: my_user 109 | - Key: ssh-rsa yyyyyyyyyyyyyyy 110 | Name: second_user 111 | mayu_https_endpoint: https://10.0.1.254:4080 112 | mayu_http_endpoint: http://10.0.1.254:4081 113 | mayu_api_ip: 10.0.1.254 114 | ``` 115 | 116 | These variables are used by the templates (most of them are directly injected 117 | into the ignition file). 118 | 119 | ## Commandline flags 120 | 121 | ``` 122 | --v=12 123 | --cluster-directory=/var/lib/mayu 124 | --alsologtostderr 125 | --etcd-quorum-size=3 126 | --etcd-endpoint=https://127.0.0.1:2379 127 | --images-cache-dir=/var/lib/mayu/images 128 | --file-server-path=/var/lib/mayu/fileserver 129 | --log_dir=/tmp 130 | ``` 131 | 132 | ### Certificates 133 | 134 | Communication between `mayu` and `mayuctl` by default is TLS encrypted. For 135 | that you need to provide certificates as follows. To disable tls 136 | you can set `--no-tls` to `true`. Then no certificate needs to be 137 | provided. 138 | 139 | ``` 140 | --no-tls=false 141 | --tls-cert-file="./cert.pem" 142 | --tls_key-file="./key.pem" 143 | ``` 144 | 145 | ## `ignition.yaml` 146 | 147 | This template is a vanilla 148 | [ignition](https://coreos.com/ignition/docs/latest/) file with a 149 | few additions to automatically deploy the few units, define the etcd3 with discovery url, confgure ssh keys for users` and configure the network. 150 | 151 | ## `templates/snippets/net_singlenic.yaml` 152 | 153 | In the near future, the existence of multiple network template snippets will be 154 | changed, so we'll focus on the singlenic template (used by the default 155 | configuration) for now. 156 | 157 | ```yaml 158 | {{define "net_singlenic"}} 159 | networkd: 160 | units: 161 | - name: 10-nodhcp.network 162 | contents: | 163 | [Match] 164 | Name=* 165 | 166 | [Network] 167 | DHCP=no 168 | - name: 00-{{.ClusterNetwork.MachineInterface}}.network 169 | contents: | 170 | [Match] 171 | Name={{.ClusterNetwork.MachineInterface}} 172 | 173 | [Network] 174 | Address={{.Host.InternalAddr}}/{{.ClusterNetwork.SubnetSize}} 175 | Gateway={{.ClusterNetwork.SubnetGateway}} 176 | {{ range $server := .ClusterNetwork.DNS }}DNS={{ $server }} 177 | {{ end }} 178 | {{ range $server := .ClusterNetwork.NTP }}NTP={{ $server }} 179 | {{ end }} 180 | {{end}} 181 | ``` 182 | 183 | This snippet will be merged into the ignition file, so the right 184 | indentation must be taken into account. The Container Linux [network 185 | configuration](https://coreos.com/os/docs/latest/network-config-with-networkd.html) 186 | defines the 187 | [systemd-networkd](http://www.freedesktop.org/software/systemd/man/systemd.network.html) 188 | .network (and optionally .device) files used by each node. 189 | 190 | In this example it just disables DHCP and configures the `machine_interface` with a 191 | static IP address. The `machine_interface` is configured in mayu config. 192 | 193 | -------------------------------------------------------------------------------- /docs/etcd_clusters.md: -------------------------------------------------------------------------------- 1 | # etcd Discovery 2 | 3 | Mayu contains an internal etcd discovery to setup and manage your etcd clusters. During startup you can configure if you like to use an external etcd discovery or Mayu itself. 4 | 5 | Use external etcd discovery: 6 | 7 | ``` 8 | mayu --etcd-discovery=https://discovery.etcd.io --use-internal-etcd-discovery=false --etcd-quorum-size=3 9 | ``` 10 | 11 | Use Mayus discovery: 12 | 13 | ``` 14 | mayu --etcd-endpoint=http://localhost:2379 --etcd-quorum-size=3 15 | ``` 16 | 17 | Note: Mayu defaults to the internal discovery. The parameter `--etcd-discovery` must be empty and `--use-internal-etcd-discovery` defaults to true. 18 | 19 | As you see you also need an etcd endpoint for the internal etcd discovery in Mayu. Mayu only implements the creation of discovery tokens. All other requests are proxied to etcd. The data of the etcd clusters is only stored in etcd. Mayu automatically creates a first token and uses this token as default for all machines. 20 | 21 | ## Run etcd for the discovery 22 | 23 | Start a single etcd instance in a container. 24 | 25 | ``` 26 | docker run --rm -v /usr/share/ca-certificates/:/etc/ssl/certs -p 4001:4001 -p 2380:2380 -p 2379:2379 \ 27 | --name etcd quay.io/coreos/etcd \ 28 | -name etcd0 \ 29 | -advertise-client-urls http://127.0.0.1:2379,http://127.0.0.1:4001 \ 30 | -listen-client-urls http://0.0.0.0:2379,http://0.0.0.0:4001 \ 31 | -initial-advertise-peer-urls http://127.0.0.1:2380 \ 32 | -listen-peer-urls http://0.0.0.0:2380 \ 33 | -initial-cluster-token etcd-cluster-1 \ 34 | -initial-cluster etcd0=http://127.0.0.1:2380 \ 35 | -initial-cluster-state new 36 | ``` 37 | 38 | ## Show tokens in etcd 39 | 40 | Mayu currently only allows to access a specific token. It is not implemented to list all tokens. To see all tokens you need to access etcd itself. 41 | 42 | ``` 43 | etcdctl ls --recursive /_etcd/registry 44 | ``` 45 | 46 | ## Create a new etcd cluster token 47 | 48 | To create a new token you need to send a `PUT` request to `/etcd/new`. The response will be a full URL to access and manage the etcd cluster. This is similar to how the official etcd discovery works. 49 | 50 | ``` 51 | curl -X PUT http://localhost:4080/etcd/new 52 | ``` 53 | 54 | ## Change etcd cluster of a host 55 | 56 | To change the etcd cluster of a host you need to overwrite the default etcd token and then reinstall the machine. 57 | 58 | ``` 59 | mayuctl set etcdtoken 60 | ``` 61 | 62 | Note: The token is only the last part of the full discovery url. eg http://localhost:4080/etcd/ 63 | 64 | *Important*: If you change the etcd token of a host you need to reinstall the machine to make it happen. To reinstall just set the host state back to `configured` and reboot the machine. But if etcd on that host was part of the members of the old cluster you need to remove it as a member. Otherwise the new node will be part of the old and the new cluster as the members on the old clusters will connect to the new node again. 65 | 66 | Prepare a host to be reinstalled: 67 | 68 | ``` 69 | mayuctl set state configured 70 | ``` 71 | -------------------------------------------------------------------------------- /docs/example/mayu.service.dist: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=mayu 3 | Requires=etcd.service 4 | 5 | [Service] 6 | Restart=on-failure 7 | RestartSec=1 8 | StartLimitInterval=300s 9 | StartLimitBurst=3 10 | TimeoutStartSec=0 11 | Environment="IMAGE=quay.io/giantswarm/mayu:c832dd12fa8f5cc77101ae05820106ace18504f7" 12 | Environment="NAME=mayu" 13 | 14 | ExecStartPre=/usr/bin/docker pull $IMAGE 15 | ExecStartPre=-/usr/bin/docker stop -t 10 $NAME 16 | ExecStartPre=-/usr/bin/docker rm -f $NAME 17 | ExecStart=/usr/bin/docker run --rm --net=host --cap-add=NET_ADMIN \ 18 | -v /var/lib/mayu:/var/lib/mayu \ 19 | -v /etc/mayu/ssl:/etc/mayu/ssl \ 20 | -v /etc/mayu/config.yaml:/etc/mayu/config.yaml \ 21 | -v /etc/mayu/templates:/usr/lib/mayu/templates/ \ 22 | -v /etc/mayu/template_snippets:/usr/lib/mayu/template_snippets \ 23 | --name $NAME $IMAGE \ 24 | --v=12 \ 25 | --cluster-directory=/var/lib/mayu \ 26 | --tls-cert-file /etc/mayu/ssl/mayu-crtca.pem \ 27 | --tls-key-file /etc/mayu/ssl/mayu-key.pem \ 28 | --alsologtostderr \ 29 | --etcd-quorum-size=3 \ 30 | --etcd-cafile=/etc/mayu/ssl/etcd-ca.pem \ 31 | --etcd-endpoint=https://127.0.0.1:2379 \ 32 | --images-cache-dir=/var/lib/mayu/images \ 33 | --yochu-path=/var/lib/mayu/yochu \ 34 | --log_dir=/tmp 35 | 36 | [Install] 37 | WantedBy=multi-user.target 38 | -------------------------------------------------------------------------------- /docs/flags.md: -------------------------------------------------------------------------------- 1 | # Flags of the 'mayu' binary 2 | 3 | Mayu provides the following command line flags. When doing `mayu -h` you 4 | should see this. 5 | 6 | ```nohighlight 7 | Manage your bare metal machines 8 | 9 | Usage: 10 | mayu [flags] 11 | 12 | Flags: 13 | --alsologtostderr log to standard error as well as files 14 | --api-port int API HTTP port Mayu listens on (default 4080) 15 | --cluster-directory string Path to the cluster directory (default "cluster") 16 | --config string Path to the configuration file (default "/etc/mayu/config.yaml") 17 | -d, --debug Print debug output 18 | --dnsmasq string Path to dnsmasq binary (default "/usr/sbin/dnsmasq") 19 | --dnsmasq-template string Dnsmasq config template (default "./templates/dnsmasq_template.conf") 20 | --etcd-cafile string The etcd CA file, if etcd is using non-trustred root CA certificate 21 | --etcd-discovery string External etcd discovery base url (eg https://discovery.etcd.io). Note: This should be the base URL of the discovery without a specific token. Mayu itself creates a token for the etcd clusters. 22 | --etcd-endpoint string The etcd endpoint for the internal discovery feature (you must also specify protocol). (default "http://127.0.0.1:2379") 23 | --etcd-quorum-size int Default quorum of the etcd clusters (default 3) 24 | --files-dir string Directory for file templates (default "./files") 25 | --help Show mayu usage 26 | --http-bind-address string HTTP address Mayu listens on (default "0.0.0.0") 27 | --ignition-config string Final ignition config file that is used to boot the machine (default "./templates/ignition.yaml") 28 | --images-cache-dir string Directory for Container Linux images (default "./images") 29 | --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0) 30 | --log_dir string If non-empty, write log files in this directory 31 | --logtostderr log to standard error instead of files (default true) 32 | --no-git Disable git operations 33 | --no-tls Disable tls 34 | --pxe-port int PXE HTTP port Mayu listens on (default 4081) 35 | --show-templates Show the templates and quit 36 | --static-html-path string Path to Mayus binaries (eg. mayuctl, infopusher) (default "./static_html") 37 | --stderrthreshold severity logs at or above this threshold go to stderr (default 2) 38 | --template-snippets string Cloudconfig or Ignition template snippets (eg storage or network configuration) (default "./templates/snippets/") 39 | --tftproot string Path to the tftproot (default "./tftproot") 40 | --tls-cert-file string Path to tls certificate file 41 | --tls-key-file string Path to tls key file 42 | --use-internal-etcd-discovery Use the internal etcd discovery (default true) 43 | -v, --v Level log level for V logs 44 | --version Show the version of Mayu 45 | --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging 46 | --yochu-path string Path to Yochus assets (eg docker, etcd, rkt binaries) (default "./yochu") 47 | ``` 48 | -------------------------------------------------------------------------------- /docs/image/bootstrap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/giantswarm/mayu/6356e9ab27dde9c4832476101e22b857856af464/docs/image/bootstrap.png -------------------------------------------------------------------------------- /docs/image/statesMayu.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/giantswarm/mayu/6356e9ab27dde9c4832476101e22b857856af464/docs/image/statesMayu.jpg -------------------------------------------------------------------------------- /docs/inside.md: -------------------------------------------------------------------------------- 1 | # Mayu Cluster Insides 2 | 3 | Here we looking inside a deployed cluster. New nodes can be added to the 4 | cluster at any time and the same bootstrap procedure will be applied. In this 5 | example, we created a 4 node cluster: 6 | 7 | ```nohighlight 8 | $ cd cluster 9 | $ egrep -r 'Host|InternalAddr' */conf.json 10 | 004b27ed-692e-b32e-1f68-d89aff66c71b/conf.json: "InternalAddr": "10.0.3.31", 11 | 004b27ed-692e-b32e-1f68-d89aff66c71b/conf.json: "Hostname": "00006811af601fe8", 12 | 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a/conf.json: "InternalAddr": "10.0.3.33", 13 | 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a/conf.json: "Hostname": "0000906eb12096e3", 14 | 7100c054-d2c9-e299-b669-e8bdb85f6904/conf.json: "InternalAddr": "10.0.3.32", 15 | 7100c054-d2c9-e299-b669-e8bdb85f6904/conf.json: "Hostname": "0000d71391dc5317", 16 | aa1f18e1-f14f-2dd9-4fa0-dae7317c712c/conf.json: "InternalAddr": "10.0.3.34", 17 | aa1f18e1-f14f-2dd9-4fa0-dae7317c712c/conf.json: "Hostname": "0000b1895b74c624", 18 | ``` 19 | 20 | ```nohighlight 21 | $ ssh core@10.0.3.31 fleetctl list-machines -l 22 | Warning: Permanently added '10.0.3.31' (ED25519) to the list of known hosts. 23 | MACHINE IP METADATA 24 | 00006811af601fe8e1d3f37902021ae0 10.0.3.31 rule-core=true 25 | 0000906eb12096e3d94b002c663943f9 10.0.3.33 rule-core=true 26 | 0000b1895b74c624a51bd3b94d3adf3c 10.0.3.34 rule-worker=true,stack-compute=true 27 | 0000d71391dc5317a0a1798d6bd5448f 10.0.3.32 rule-core=true 28 | ``` 29 | 30 | We can observe that the profile `core` was assigned to the first 3 nodes and 31 | the 4th node got the `default` profile. We should also note that each node's 32 | hostname is a substring of the node's `machine-id`. 33 | 34 | How It Works? Let's start by analyzing the bootstrap process of a fresh node: 35 | 36 | ![mayu bootstrap sequence](image/bootstrap.png) 37 | 38 | Adding a fresh node to the cluster consists of three steps: 39 | 40 | * initial boot of ipxe 41 | * fetching ignition from mayu 42 | * booting Container Linux from PXE with the fetched ignition 43 | 44 | ## Initial boot 45 | The fresh node is by definition empty and boots over ethernet by default. It 46 | sends a DHCP request for a `pxeclient`, which gets answered by the management 47 | node (which acts a DHCP/PXE server) with PXE details to boot iPXE. The node 48 | then pulls iPXE boot data from the PXE server via tftp. 49 | 50 | ## Fetching ignition from mayu 51 | iPXE script will fetch Container Linux `kernel`, `initrd` and ignition, which are based on the information that iPXE provided via GET request. 52 | 53 | ## Booting Container Linux from PXE 54 | When everything is fetched, iPXE will start booting Container Linux PXE image and use ignition for bootstraping the OS. 55 | -------------------------------------------------------------------------------- /docs/ipxe.md: -------------------------------------------------------------------------------- 1 | # iPXE Setup 2 | 3 | In order to install machines `mayu` uses iPXE to ship operating system 4 | binaries. Hosts that are going to be managed with `mayu` will retrieve 5 | iPXE scripts as response to their DHCP request. For more information about this 6 | process take a look at [Mayu Cluster Insides](inside.md). 7 | 8 | __Note: If you don't know how to use iPXE you can follow the [official 9 | instructions](http://ipxe.org/start#quick_start).__ 10 | 11 | ## TLS Support 12 | 13 | When using `mayu` in TLS mode make sure that you use an iPXE version which 14 | was compiled with [`DOWNLOAD_PROTO_HTTPS`](http://ipxe.org/buildcfg/download_proto_https) 15 | support. 16 | 17 | Also make sure when providing a custom SSL certificate that you need to follow 18 | the [`cryptography`](http://ipxe.org/crypto) instuctions of iPXE. 19 | -------------------------------------------------------------------------------- /docs/machine_state_transition.md: -------------------------------------------------------------------------------- 1 | # Machine State Transitions 2 | 3 | In Mayu, the machines have a state, which can vary depending on the operations that 4 | are performed on them. The list of available states in Mayu is: 5 | 6 | - `unknown` 7 | - `installing` 8 | - `configured` 9 | - `running` 10 | 11 | In the following, we show an image that illustrates the allowed transitions from 12 | a origin state to a destination state. 13 | 14 | ![](./image/statesMayu.jpg) 15 | -------------------------------------------------------------------------------- /docs/ports.md: -------------------------------------------------------------------------------- 1 | # Mayu Network Requirements 2 | 3 | Machines need to be able to connect to Mayu on a few ports. Here is a short list on what is used. 4 | 5 | ``` 6 | PORT PROTOCOL DESCRIPTION 7 | 4080 TCP default TLS/HTTP port to communicate the state of the machine and fetch binaries and scripts to provision machines 8 | 4081 TCP default HTTP port for ipxe endpoints (kernel, initrd and ignition endpoints) 9 | 67 UDP DHCP/BOOTP to let machines boot via PXE/iPXE 10 | 69 UDP/TCP TFTP to ship images via PXE 11 | ``` 12 | -------------------------------------------------------------------------------- /docs/qemu.md: -------------------------------------------------------------------------------- 1 | # Start KVMs inside Container Linux 2 | 3 | If you would like to start Container Linux via KVM inside your physical machines then Mayu can also serve the necessary assets for you. 4 | 5 | Btw if you are looking for a way to test Mayu with Qemu. We have created [Onsho](https://github.com/giantswarm/onsho) to reproduce our datacenter setup on a laptop. 6 | 7 | ## Prepare Mayu 8 | 9 | Within a release (or after running `make bin-dist`) you will find a script called `./fetch-flatcar-qemu-image.sh`. This image will download the PXE image and kernel but extract the `/usr` filesystem and put it in a folder that can be served by Mayu. 10 | 11 | Note: You need to install the Container Linux image signing key to be able to verify the downloads. See https://docs.flatcar-linux.org/os/verify-images/ 12 | 13 | To fetch Container Linux `1122.2.0` you can run: 14 | ``` 15 | ./fetch-flatcar-qemu-image 1122.2.0 16 | ``` 17 | 18 | Or if you prefer the alpha channel use: 19 | ``` 20 | ./fetch-flatcar-qemu-image 1068.0.0 alpha 21 | ``` 22 | 23 | This will download the image into a folder called `./images/qemu/` 24 | 25 | ## Create a container 26 | 27 | This is an example how to create a VM inside of Container Linux. To have some tooling available it is easier to start KVM from within a container. So you need a Dockerfile. 28 | 29 | ``` 30 | FROM fedora:latest 31 | 32 | RUN dnf -y update && \ 33 | dnf install -y net-tools libattr libattr-devel xfsprogs bridge-utils qemu-kvm qemu-system-x86 qemu-img && \ 34 | dnf clean all 35 | 36 | ADD run.sh /run.sh 37 | ADD cloudconfig.yaml /usr/code/cloudconfig/openstack/latest/user_data 38 | 39 | RUN mkdir -p /usr/code/{rootfs,images} 40 | 41 | ENTRYPOINT ["/run.sh"] 42 | ``` 43 | 44 | The entrypoint creates a rootfs and starts the actual qemu process to start the virtual machine. This assumes that you have a bridge called `br0` on the host. 45 | 46 | ``` 47 | #!/bin/bash 48 | 49 | set -eu 50 | 51 | echo "allow br0" > /etc/qemu/bridge.conf 52 | 53 | ROOTFS=/usr/code/rootfs/rootfs.img 54 | KERNEL=/usr/code/images/flatcar_production_qemu.vmlinuz 55 | USRFS=/usr/code/images/flatcar_production_qemu_usr_image.squashfs 56 | MAC_ADDRESS=$(printf '%02X:%02X:%02X:%02X:%02X:%02X\n' $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256)) $((RANDOM%256))) 57 | 58 | if [ ! -f $ROOTFS ]; then 59 | truncate -s 4G $ROOTFS 60 | mkfs.xfs $ROOTFS 61 | fi 62 | 63 | exec /usr/bin/qemu-system-x86_64 \ 64 | -nographic \ 65 | -machine accel=kvm -cpu host -smp 4 \ 66 | -m 1024 \ 67 | -enable-kvm \ 68 | \ 69 | -net bridge,br=$BRIDGE_NETWORK,vlan=0,helper=/usr/libexec/qemu-bridge-helper \ 70 | -net nic,vlan=0,model=virtio,macaddr=$MAC_ADDRESS \ 71 | \ 72 | -fsdev local,id=conf,security_model=none,readonly,path=/usr/code/cloudconfig \ 73 | -device virtio-9p-pci,fsdev=conf,mount_tag=config-2 \ 74 | \ 75 | -drive if=virtio,file=$USRFS,format=raw,serial=usr.readonly \ 76 | -drive if=virtio,file=$ROOTFS,format=raw,discard=on,serial=rootfs \ 77 | \ 78 | -device sga \ 79 | -serial mon:stdio \ 80 | \ 81 | -kernel $KERNEL \ 82 | -append "console=ttyS0 root=/dev/disk/by-id/virtio-rootfs rootflags=rw mount.usr=/dev/disk/by-id/virtio-usr.readonly mount.usrflags=ro" 83 | ``` 84 | 85 | Don't forget to add your own cloudconfig.yaml for your VM. Then build the container image: `docker build -t giantswarm/flatcar-qemu .`. 86 | 87 | ## Fetch the image 88 | 89 | Now you just have to fetch the assets from Mayu and start the VM on the host itself. Fetching can be done via a cloudconfig unit on the host. So you need to include this snippet in your `./templates/last_stage_cloudconfig.yaml`. 90 | 91 | ``` 92 | flatcar: 93 | units: 94 | - name: fetch-qemu-images.service 95 | command: start 96 | enable: true 97 | content: | 98 | [Unit] 99 | Description=Fetch qemu images from Mayu 100 | Wants=network-online.target 101 | After=network-online.target 102 | 103 | [Service] 104 | Type=oneshot 105 | Environment="IMAGE_DIR=/home/core/images" 106 | Environment="KERNEL=flatcar_production_qemu.vmlinuz" 107 | Environment="USRFS=flatcar_production_qemu_usr_image.squashfs" 108 | ExecStartPre=/bin/mkdir -p ${IMAGE_DIR} 109 | ExecStartPre=/usr/bin/wget {{index .TemplatesEnv "mayu_http_endpoint"}}/images/{{.Host.Serial}}/qemu/${KERNEL} -O ${IMAGE_DIR}/${KERNEL} 110 | ExecStartPre=/usr/bin/wget {{index .TemplatesEnv "mayu_http_endpoint"}}/images/{{.Host.Serial}}/qemu/${KERNEL}.sha256 -O ${IMAGE_DIR}/${KERNEL}.sha256 111 | ExecStartPre=/usr/bin/wget {{index .TemplatesEnv "mayu_http_endpoint"}}/images/{{.Host.Serial}}/qemu/${USRFS} -O ${IMAGE_DIR}/${USRFS} 112 | ExecStartPre=/usr/bin/wget {{index .TemplatesEnv "mayu_http_endpoint"}}/images/{{.Host.Serial}}/qemu/${USRFS}.sha256 -O ${IMAGE_DIR}/${USRFS}.sha256 113 | ExecStart=/bin/bash -c "cd ${IMAGE_DIR} && sha256sum -c ${USRFS}.sha256 && sha256sum -c ${KERNEL}.sha256" 114 | 115 | [Install] 116 | WantedBy=multi-user.target 117 | ``` 118 | 119 | ## Start the VM 120 | 121 | Finally you can start the virtual machine by running the container: 122 | 123 | ``` 124 | mkdir -p /home/core/vms/foo 125 | docker run -ti 126 | --privileged \ 127 | --net=host \ 128 | -v $(pwd)/images:/usr/code/images \ 129 | -v /home/core/vms/foo/:/usr/code/rootfs/ \ 130 | giantswarm/flatcar-qemu 131 | ``` 132 | -------------------------------------------------------------------------------- /docs/release.md: -------------------------------------------------------------------------------- 1 | # Release A New Mayu Version 2 | 3 | Releases can be found here: https://github.com/giantswarm/mayu/releases 4 | 5 | We are no longer supporting binary releases, instead we are releasing docker images with all the tooling. See https://quay.io/giantswarm/mayu 6 | -------------------------------------------------------------------------------- /docs/running.md: -------------------------------------------------------------------------------- 1 | # Running Mayu 2 | 3 | ## Download Container Linux Images 4 | 5 | Before you start up Mayu you need to download a Container Linux image. In fact you need to download 6 | all the Container Linux versions that you specified in your `config.yaml`. You definitely need your 7 | default Container Linux version. But you might also define different Container Linux versions within your profiles. 8 | 9 | *Note:* the script is part of a release tarball or if you've build Mayu yourself it is in `bin-dist/`. Please do not run `scripts/fetch-flatcar-image` - you need to fetch or build a distribution first. 10 | 11 | ``` 12 | ./fetch-flatcar-image 1122.2.0 13 | ``` 14 | 15 | If you like to distribute your own binaries for docker, etcd or fleet have a look at [Yochu](https://github.com/giantswarm/yochu). 16 | There is also a script to fetch Giant Swarms binaries as an example. 17 | 18 | ## Start Mayu 19 | 20 | 21 | ### Run Mayu within a Docker container 22 | 23 | ``` 24 | docker run --rm -it \ 25 | --net=host \ 26 | --cap-add=NET_ADMIN \ 27 | --name=mayu \ 28 | -v /var/lib/mayu:/var/lib/mayu \ 29 | -v /etc/mayu/config.yaml:/etc/mayu/config.yaml \ 30 | -v /etc/mayu/templates:/usr/lib/mayu/templates/ \ 31 | -v /etc/mayu/files:/usr/lib/mayu/files \ 32 | -v=12 --no-git --no-tls 33 | ``` 34 | 35 | Or use the [`mayu.service`](https://github.com/giantswarm/mayu/blob/master/mayu.service) unit file included in this repository. 36 | 37 | ## Cluster information 38 | 39 | Mayu is now ready to bootstrap a new cluster. You can use [mayuctl](mayuctl.md) to list information about your cluster and machines. 40 | 41 | Mayu uses the `cluster-directory` to save the cluster state: 42 | 43 | ```nohighlight 44 | $ tree cluster 45 | cluster 46 | |-- 004b27ed-692e-b32e-1f68-d89aff66c71b 47 | | `-- conf.json 48 | |-- 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a 49 | | `-- conf.json 50 | |-- 7100c054-d2c9-e299-b669-e8bdb85f6904 51 | | `-- conf.json 52 | |-- aa1f18e1-f14f-2dd9-4fa0-dae7317c712c 53 | | `-- conf.json 54 | `-- cluster.json 55 | ``` 56 | 57 | Each cluster node has its own directory (identified by the serial number) 58 | containing a JSON file with data about the node: 59 | 60 | ```json 61 | { 62 | "Enabled": true, 63 | "Serial": "004b27ed-692e-b32e-1f68-d89aff66c71b", 64 | "MacAddresses": [ 65 | "00:16:3e:a0:b7:df" 66 | ], 67 | "InternalAddr": "10.0.3.31", 68 | "Hostname": "00006811af601fe8", 69 | "MachineID": "00006811af601fe8e1d3f37902021ae0", 70 | "ConnectedNIC": "ens3", 71 | "LastBoot": "2015-10-08T19:14:36.227056826+02:00", 72 | "Profile": "core", 73 | "State": "running" 74 | } 75 | ``` 76 | 77 | The cluster directory itself contains a `cluster.json` file with persistent 78 | data about the cluster. If this file doesn't exist, it is initialized by 79 | mayu. 80 | 81 | ```json 82 | { 83 | "GitStore": true, 84 | "Config": { 85 | "EtcdDiscoveryURL": "https://discovery.etcd.io/e94768ef0f948b0c2e53536d9c5eeb8f" 86 | } 87 | } 88 | ``` 89 | 90 | By default, mayu treats the cluster directory as a git repository, commiting 91 | every change: 92 | 93 | ```nohighlight 94 | $ git log --format="%ai => %s" 95 | 2015-10-08 19:14:37 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated state to running 96 | 2015-10-08 19:14:36 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated state to running 97 | 2015-10-08 19:14:35 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated state to running 98 | 2015-10-08 19:14:31 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated state to running 99 | 2015-10-08 19:13:28 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated state to installed 100 | 2015-10-08 19:13:28 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated state to installed 101 | 2015-10-08 19:13:28 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated state to installed 102 | 2015-10-08 19:13:28 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated state to installed 103 | 2015-10-08 19:10:54 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated host state to installing 104 | 2015-10-08 19:10:54 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated host connected nic 105 | 2015-10-08 19:10:54 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated host macAddress 106 | 2015-10-08 19:10:54 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated host profile and metadata 107 | 2015-10-08 19:10:54 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated host InternalAddr 108 | 2015-10-08 19:10:54 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: updated with predefined settings 109 | 2015-10-08 19:10:54 +0200 => aa1f18e1-f14f-2dd9-4fa0-dae7317c712c: host created 110 | 2015-10-08 19:10:54 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated host state to installing 111 | 2015-10-08 19:10:54 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated host connected nic 112 | 2015-10-08 19:10:54 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated host macAddress 113 | 2015-10-08 19:10:54 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated host state to installing 114 | 2015-10-08 19:10:54 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated host connected nic 115 | 2015-10-08 19:10:54 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated host macAddress 116 | 2015-10-08 19:10:54 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated host state to installing 117 | 2015-10-08 19:10:54 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated host connected nic 118 | 2015-10-08 19:10:54 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated host macAddress 119 | 2015-10-08 19:10:53 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated host profile and metadata 120 | 2015-10-08 19:10:53 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated host InternalAddr 121 | 2015-10-08 19:10:53 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: updated with predefined settings 122 | 2015-10-08 19:10:53 +0200 => 2843c49e-d1ba-6dd3-1320-d7cc82d8ea3a: host created 123 | 2015-10-08 19:10:53 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated host profile and metadata 124 | 2015-10-08 19:10:53 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated host InternalAddr 125 | 2015-10-08 19:10:53 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: updated with predefined settings 126 | 2015-10-08 19:10:53 +0200 => 7100c054-d2c9-e299-b669-e8bdb85f6904: host created 127 | 2015-10-08 19:10:53 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated host profile and metadata 128 | 2015-10-08 19:10:53 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated host InternalAddr 129 | 2015-10-08 19:10:53 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: updated with predefined settings 130 | 2015-10-08 19:10:53 +0200 => 004b27ed-692e-b32e-1f68-d89aff66c71b: host created 131 | 2015-10-08 19:09:19 +0200 => generated etcd discovery url 132 | 2015-10-08 19:09:19 +0200 => initial commit 133 | ``` 134 | -------------------------------------------------------------------------------- /docs/security.md: -------------------------------------------------------------------------------- 1 | # Security Overview 2 | 3 | Mayu uses `Transport Layer Security ` (TLS) 4 | to encrypt connections between `mayu` and `mayuctl` and other clients like curl. 5 | 6 | When using `mayu` in TLS mode make sure that you use an iPXE image, which 7 | was compiled with [`DOWNLOAD_PROTO_HTTPS`](http://ipxe.org/buildcfg/download_proto_https) 8 | support. 9 | 10 | Further, when providing a custom SSL certificate, you should follow 11 | the [`cryptography`](http://ipxe.org/crypto) instuctions of iPXE. 12 | 13 | ## Risks 14 | 15 | Note that the above-mentioned TLS only provides encryption, not authentication. 16 | 17 | Mayu communicates over following protocols: DHCP, TFTP, iPXE, and HTTP/HTTPS. 18 | Currently, there is two general security notices around these protocols, 19 | which you can find at the end of this section. 20 | 21 | We recommend to run `mayu` within a separate network 22 | with limited access by non-authorized users. This way the lack of 23 | authentification as well as the general protocol issues are less critical. 24 | 25 | ### iPXE 26 | 27 | See http://security.stackexchange.com/questions/64915/what-are-the-biggest-security-concerns-on-pxe 28 | 29 | > The basic PXE process: 30 | > 31 | > - Computer makes a DHCP request 32 | > - DHCP server responds with address and PXE parameters 33 | > - Computer downloads boot image using TFTP over UDP 34 | > 35 | > The obvious attacks are a rogue DHCP server responding with bad data (and thus 36 | > hijacking the boot process) and a rogue TFTP server blindly injecting forged 37 | > packets (hijacking or corrupting the boot image). 38 | > 39 | > UEFI secure boot can be used to prevent hijacking, but a rogue DHCP or TFTP 40 | > server can still prevent booting by ensuring the computer receives a corrupted 41 | > boot image. 42 | 43 | ### TFTP 44 | 45 | See https://technet.microsoft.com/en-us/library/cc754605.aspx 46 | 47 | > The TFTP protocol does not support any authentication or encryption mechanism, 48 | > and as such can introduce a security risk when present. Installing the TFTP 49 | > client is not recommended for systems that access the Internet. 50 | -------------------------------------------------------------------------------- /docs/templates.md: -------------------------------------------------------------------------------- 1 | # Templates 2 | 3 | ## Templates Env 4 | 5 | In your `/etc/mayu/config.yaml` you can define template environment variables 6 | that are dynamically usable in `templates/ignition.yaml` when 7 | doing tne following. 8 | 9 | ```yaml 10 | templates_env: 11 | my_var: '12345' 12 | ``` 13 | 14 | This sets the variable with key `my_var` to the corresponding 15 | value within the template file. See https://golang.org/pkg/text/template/ for 16 | more details about the usage of template variables in golang templates. Using 17 | the injected configuration within your `templates/ignition.yaml` 18 | looks like the following. Because of that you can configure ignition dynamically to your own needs. 19 | 20 | ```nohighlight 21 | systemd: 22 | units: 23 | - name: extra_unit.service 24 | enable: true 25 | contents: | 26 | [Unit] 27 | Description=extra_unit 28 | [Service] 29 | Type=oneshot 30 | RemainAfterExit=yes 31 | ExecStart=/usr/bin/bash -c 'echo "{{index .TemplatesEnv "my_var"}}" >> /extra-file' 32 | [Install] 33 | WantedBy=multi-user.target 34 | 35 | `` 36 | 37 | ## Files 38 | Ignition requires files to be specified via [data url format](https://tools.ietf.org/html/rfc2397). Which means no plaintext files in the ignition (like it was in cloudconfig). 39 | 40 | Mayu is doing encoding of the files automatically, but they have to be seaprate files in the `./files` folder in the mayu directory. 41 | 42 | Lets say that we want to have config file for `my-service` located on path `/etc/my-service/config.ini`. First you need to put file with the required content into `./files/my-service/config.ini` 43 | ``` 44 | [section1] 45 | value1=test 46 | value2=12345 47 | value3=&^&HG 48 | ``` 49 | 50 | Then in ignition add this configuration: 51 | ``` 52 | storage: 53 | files: 54 | - filesystem: root 55 | path: /etc/my-service/config.conf 56 | mode: 0600 57 | user: 58 | id: 0 59 | group: 0 60 | contents: 61 | source: 62 | scheme: data 63 | opaque: "text/plain;charset=utf-8;base64,{{ index .Files "my-service/config.ini" }}" 64 | 65 | ``` 66 | 67 | Where `{{ index .Files "my-service/config.ini" }}` is definning which files should be put there. `my-service/config.ini` is the relative path to the file in the `./files` directory. 68 | -------------------------------------------------------------------------------- /files/conf/lacp-bonding.conf: -------------------------------------------------------------------------------- 1 | options bonding miimon=100 mode=4 lacp_rate=1 2 | -------------------------------------------------------------------------------- /files/conf/module-bonding.conf: -------------------------------------------------------------------------------- 1 | bonding 2 | -------------------------------------------------------------------------------- /files/my-service/my-service.conf: -------------------------------------------------------------------------------- 1 | [section1] 2 | value1=test 3 | -------------------------------------------------------------------------------- /flag.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/giantswarm/mayu/fs" 7 | ) 8 | 9 | const ( 10 | DefaultConfigFile string = "/etc/mayu/config.yaml" 11 | DefaultClusterDirectory string = "cluster" 12 | DefaultShowTemplates bool = false 13 | DefaultNoGit bool = false 14 | DefaultNoTLS bool = false 15 | DefaultTFTPRoot string = "./tftproot" 16 | DefaultFileServerPath string = "./fileserver" 17 | DefaultIgnitionConfig string = "./templates/ignition.yaml" 18 | DefaultDnsmasqTemplate string = "./templates/dnsmasq_template.conf" 19 | DefaultTemplateSnippets string = "./templates/snippets/" 20 | DefaultDNSMasq string = "/usr/sbin/dnsmasq" 21 | DefaultImagesCacheDir string = "./images" 22 | DefaultFilesDir string = "./files" 23 | DefaultAPIPort int = 4080 24 | DefaultPXEPort int = 4081 25 | DefaultHTTPBindAddress string = "0.0.0.0" 26 | DefaultTLSCertFile string = "" 27 | DefaultTLSKeyFile string = "" 28 | DefaultUseInternalEtcdDiscovery bool = true 29 | DefaultEtcdQuorumSize int = 3 30 | DefaultEtcdDiscoveryUrl string = "" 31 | DefaultEtcdEndpoint string = "http://127.0.0.1:2379" 32 | DefaultEtcdCA string = "" 33 | DefaultFlatcarAutologin bool = false 34 | DefaultConsoleTTY bool = false 35 | DefaultSystemdShell bool = false 36 | ) 37 | 38 | type MayuFlags struct { 39 | debug bool 40 | version bool 41 | help bool 42 | 43 | configFile string 44 | clusterDir string 45 | showTemplates bool 46 | noGit bool 47 | noTLS bool 48 | tFTPRoot string 49 | fileServerPath string 50 | staticHTMLPath string 51 | ignitionConfig string 52 | templateSnippets string 53 | dnsmasq string 54 | dnsmasqTemplate string 55 | imagesCacheDir string 56 | filesDir string 57 | apiPort int 58 | pxePort int 59 | bindAddress string 60 | tlsCertFile string 61 | tlsKeyFile string 62 | useInternalEtcdDiscovery bool 63 | etcdQuorumSize int 64 | etcdDiscoveryUrl string 65 | etcdEndpoint string 66 | etcdCAfile string 67 | flatcarAutologin bool 68 | consoleTTY bool 69 | systemdShell bool 70 | 71 | filesystem fs.FileSystem // internal filesystem abstraction to enable testing of file operations. 72 | } 73 | 74 | var ( 75 | ErrNotAllCertFilesProvided = errors.New("Please configure a key and cert files for TLS connections.") 76 | ErrHTTPSCertFileNotRedable = errors.New("Cannot open configured certificate file for TLS connections.") 77 | ErrHTTPSKeyFileNotReadable = errors.New("Cannot open configured key file for TLS connections.") 78 | ) 79 | 80 | // Validate checks the configuration based on all Validate* functions 81 | // attached to the configuration struct. 82 | func (g MayuFlags) Validate() (bool, error) { 83 | if ok, err := g.ValidateHTTPCertificateUsage(); !ok { 84 | return ok, err 85 | } 86 | 87 | if ok, err := g.ValidateHTTPCertificateFileExistance(); !ok { 88 | return ok, err 89 | } 90 | 91 | return true, nil 92 | } 93 | 94 | // ValidateHTTPCertificateUsage checks if the fields HTTPSCertFile and HTTPSKeyFile 95 | // of the configuration struct are set whenever the NoTLS is set to false. 96 | // This makes sure that users are configuring the needed certificate files when 97 | // using TLS encrypted connections. 98 | func (g MayuFlags) ValidateHTTPCertificateUsage() (bool, error) { 99 | if g.noTLS { 100 | return true, nil 101 | } 102 | 103 | if !g.noTLS && g.tlsCertFile != "" && g.tlsKeyFile != "" { 104 | return true, nil 105 | } 106 | 107 | return false, ErrNotAllCertFilesProvided 108 | } 109 | 110 | // ValidateHTTPCertificateFileExistance checks if the filenames configured 111 | // in the fields HTTPSCertFile and HTTPSKeyFile can be stat'ed to make sure 112 | // they actually exist. 113 | func (g MayuFlags) ValidateHTTPCertificateFileExistance() (bool, error) { 114 | if g.noTLS { 115 | return true, nil 116 | } 117 | 118 | if _, err := g.filesystem.Stat(g.tlsCertFile); err != nil { 119 | return false, ErrHTTPSCertFileNotRedable 120 | } 121 | 122 | if _, err := g.filesystem.Stat(g.tlsKeyFile); err != nil { 123 | return false, ErrHTTPSKeyFileNotReadable 124 | } 125 | 126 | return true, nil 127 | } 128 | -------------------------------------------------------------------------------- /fs/fake.go: -------------------------------------------------------------------------------- 1 | package fs 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "os" 7 | "time" 8 | ) 9 | 10 | var ( 11 | ErrFileOperationNotPermitted = errors.New("fake file does not exist and therefore this operation is not permitted.") 12 | ErrFileDoesNotExist = errors.New("fake file does not exist") 13 | ) 14 | 15 | // FakeFilesystem in memory implementations for the functions 16 | // of the FileSystem interface to offer an implementation 17 | // that can be used during in tests. 18 | type FakeFilesystem struct { 19 | files map[string]FakeFile 20 | } 21 | 22 | // NewFakeFilesystemWithFiles creates a new FakeFilesystem 23 | // instance bundled with a list of FakeFile instances that 24 | // can be passed. 25 | func NewFakeFilesystemWithFiles(fs []FakeFile) FakeFilesystem { 26 | fileMap := make(map[string]FakeFile) 27 | for _, f := range fs { 28 | fileMap[f.Name] = f 29 | } 30 | 31 | return FakeFilesystem{files: fileMap} 32 | } 33 | 34 | // Open searches in the internal map of FakeFile instances and 35 | // returns it for reading. If successful, methods on the returned 36 | // file can be used for reading. If the FakeFile instance cannot 37 | // be found in the internal map an error of type *PathError will 38 | // be returned. 39 | func (ff FakeFilesystem) Open(name string) (File, error) { 40 | file, ok := ff.files[name] 41 | if !ok { 42 | return FakeFile{}, &os.PathError{Op: "stat", Path: name, Err: ErrFileDoesNotExist} 43 | } 44 | 45 | return file, nil 46 | } 47 | 48 | // Stat returns the FakeFileInfo structure describing a FakeFile instance 49 | // Found in the internal FakeFile map. If the FakeFile instance cannot 50 | // be found in the internal map an error of type *PathError will 51 | // be returned. 52 | func (ff FakeFilesystem) Stat(name string) (os.FileInfo, error) { 53 | file, ok := ff.files[name] 54 | if !ok { 55 | return FakeFileInfo{}, &os.PathError{Op: "stat", Path: name, Err: ErrFileDoesNotExist} 56 | } 57 | 58 | return file.Stat() 59 | } 60 | 61 | // Fake represents are readable in memory version of os.File which 62 | // can be used for testing. 63 | type FakeFile struct { 64 | Name string 65 | Mode os.FileMode 66 | ModTime time.Time 67 | Buffer *bytes.Reader 68 | } 69 | 70 | // NewFakeFile creates a new FakeFile instances based on a file name 71 | // and it's contents from strings. The content of the new instance will 72 | // be stored in an internal bytes.Reader instance. The default file mode 73 | // will be 0777 and the last modification time the moment when the 74 | // function is called. 75 | func NewFakeFile(name, content string) FakeFile { 76 | return FakeFile{ 77 | Name: name, 78 | Mode: os.FileMode(0777), 79 | ModTime: time.Now(), 80 | Buffer: bytes.NewReader([]byte(content)), 81 | } 82 | } 83 | 84 | // Close wraps io.Closer's functionality and does no operation. 85 | func (f FakeFile) Close() error { 86 | return nil 87 | } 88 | 89 | // Read wraps io.Reader's functionality around the internal 90 | // bytes.Reader instance. 91 | func (f FakeFile) Read(p []byte) (n int, err error) { 92 | return f.Buffer.Read(p) 93 | } 94 | 95 | // ReadAt wraps io.ReaderAt's functionality around the internalt 96 | // bytes.Reader instance. 97 | func (f FakeFile) ReadAt(p []byte, off int64) (n int, err error) { 98 | return f.Buffer.ReadAt(p, off) 99 | } 100 | 101 | // Seek wraps io.Seeker's functionality around the internalt 102 | // bytes.Reader instance. 103 | func (f FakeFile) Seek(offset int64, whence int) (int64, error) { 104 | return f.Buffer.Seek(offset, whence) 105 | } 106 | 107 | // Stat returns the FakeFileInfo structure describing the FakeFile 108 | // instance. 109 | func (f FakeFile) Stat() (os.FileInfo, error) { 110 | return FakeFileInfo{File: f}, nil 111 | } 112 | 113 | // FakeFileInfo describes a wrapped FakeFile instance and is returned 114 | // by FakeFile.Stat 115 | type FakeFileInfo struct { 116 | File FakeFile 117 | } 118 | 119 | // Name returns the base name of the FakeFile instance. 120 | func (fi FakeFileInfo) Name() string { 121 | return fi.File.Name 122 | } 123 | 124 | // Size returns the length in bytes of the file's 125 | // internal bytes.Reader instance. 126 | func (fi FakeFileInfo) Size() int64 { 127 | return int64(fi.File.Buffer.Len()) 128 | } 129 | 130 | // Mode returns file mode bits of the FakeFile instance. 131 | func (fi FakeFileInfo) Mode() os.FileMode { 132 | return fi.File.Mode 133 | } 134 | 135 | // ModTime returns the modification time of the FakeFile instance. 136 | func (fi FakeFileInfo) ModTime() time.Time { 137 | return fi.File.ModTime 138 | } 139 | 140 | // IsDir always return false since it only uses FakeFile instances. 141 | func (fi FakeFileInfo) IsDir() bool { 142 | return false 143 | } 144 | 145 | // Sys always returns nil to stay conformant to the os.FileInfo interface. 146 | func (fi FakeFileInfo) Sys() interface{} { 147 | return nil 148 | } 149 | -------------------------------------------------------------------------------- /fs/fs.go: -------------------------------------------------------------------------------- 1 | package fs 2 | 3 | import ( 4 | "io" 5 | "os" 6 | ) 7 | 8 | // FileSystem is an interface that groups the common functions 9 | // Open and Stat of the os package. 10 | type FileSystem interface { 11 | Open(name string) (File, error) 12 | Stat(name string) (os.FileInfo, error) 13 | } 14 | 15 | // File is an interface that groups several read interfaces of 16 | // the io package together with the Stat function of the os package 17 | // to make reading astractions of the filesystem possible. 18 | type File interface { 19 | io.Closer 20 | io.Reader 21 | io.ReaderAt 22 | io.Seeker 23 | Stat() (os.FileInfo, error) 24 | } 25 | -------------------------------------------------------------------------------- /fs/os.go: -------------------------------------------------------------------------------- 1 | package fs 2 | 3 | import "os" 4 | 5 | // DefaultFilesystem is the default implementation of 6 | // the FileSystem interface. It uses OSFileSystem to make 7 | // functions of the os package the default for users. 8 | var DefaultFilesystem = OSFileSystem{} 9 | 10 | // OSFileSystem wraps the functions of the FileSystem 11 | // interface around functions of the os package to offer 12 | // an abstraction for the golang library. 13 | type OSFileSystem struct{} 14 | 15 | // Open opens the named file for reading. If successful, methods on 16 | // the returned file can be used for reading; the associated file descriptor 17 | // has mode O_RDONLY. If there is an error, it will be of type *os.PathError. 18 | func (OSFileSystem) Open(name string) (File, error) { 19 | return os.Open(name) 20 | } 21 | 22 | // Stat returns the os.FileInfo structure describing file. 23 | // If there is an error, it will be of type *os.PathError. 24 | func (OSFileSystem) Stat(name string) (os.FileInfo, error) { 25 | return os.Stat(name) 26 | } 27 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/giantswarm/mayu 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/coreos/etcd v3.3.15+incompatible 7 | github.com/coreos/go-semver v0.3.0 // indirect 8 | github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect 9 | github.com/dustin/go-humanize v1.0.0 // indirect 10 | github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect 11 | github.com/giantswarm/mayu-infopusher v1.0.1 12 | github.com/giantswarm/microerror v0.0.0-20191011121515-e0ebc4ecf5a5 13 | github.com/giantswarm/micrologger v0.0.0-20191014091141-d866337f7393 14 | github.com/google/uuid v1.0.0 // indirect 15 | github.com/gorilla/handlers v1.5.1 16 | github.com/gorilla/mux v1.8.0 17 | github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect 18 | github.com/grpc-ecosystem/grpc-gateway v1.9.5 // indirect 19 | github.com/juju/errgo v0.0.0-20140925100237-08cceb5d0b53 // indirect 20 | github.com/prometheus/client_golang v1.11.0 21 | github.com/spf13/cobra v0.0.7 22 | github.com/spf13/pflag v1.0.5 23 | go.etcd.io/bbolt v1.3.5 // indirect 24 | go.uber.org/zap v1.14.1 // indirect 25 | golang.org/x/net v0.0.0-20210505024714-0287a6fb4125 26 | google.golang.org/grpc v1.26.0 // indirect 27 | gopkg.in/yaml.v2 v2.4.0 28 | sigs.k8s.io/yaml v1.1.0 // indirect 29 | ) 30 | 31 | replace ( 32 | github.com/coreos/bbolt => go.etcd.io/bbolt v1.3.3 33 | github.com/coreos/etcd v3.3.15+incompatible => github.com/coreos/etcd v3.3.25+incompatible 34 | github.com/dgrijalva/jwt-go => github.com/form3tech-oss/jwt-go v3.2.1+incompatible 35 | github.com/gogo/protobuf v1.2.1 => github.com/gogo/protobuf v1.3.2 36 | github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c => github.com/gorilla/websocket v1.4.2 37 | github.com/gorilla/websocket v1.4.0 => github.com/gorilla/websocket v1.4.2 38 | ) 39 | -------------------------------------------------------------------------------- /hostmgr/cluster.go: -------------------------------------------------------------------------------- 1 | package hostmgr 2 | 3 | import ( 4 | "crypto/rand" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "encoding/hex" 8 | "fmt" 9 | "io/ioutil" 10 | "net" 11 | "net/http" 12 | "os" 13 | "path" 14 | "strconv" 15 | "strings" 16 | "sync" 17 | "time" 18 | 19 | "github.com/coreos/etcd/client" 20 | "github.com/giantswarm/microerror" 21 | "github.com/giantswarm/micrologger" 22 | "golang.org/x/net/context" 23 | ) 24 | 25 | const clusterConfFile = "cluster.json" 26 | 27 | type Cluster struct { 28 | Config ClusterConfig 29 | 30 | baseDir string 31 | // an cached host is identified by its serial number 32 | hostsCache map[string]*cachedHost 33 | cachedModTime time.Time 34 | mu *sync.Mutex 35 | 36 | logger micrologger.Logger 37 | } 38 | 39 | type ClusterConfig struct { 40 | DefaultEtcdClusterToken string 41 | 42 | // Deprecated 43 | EtcdDiscoveryURL string `json:"EtcdDiscoveryURL,omitempty"` 44 | } 45 | 46 | type cachedHost struct { 47 | lastModTime time.Time 48 | host *Host 49 | } 50 | 51 | func OpenCluster(baseDir string, logger micrologger.Logger) (*Cluster, error) { 52 | cluster := &Cluster{logger: logger} 53 | 54 | err := loadJson(cluster, path.Join(baseDir, clusterConfFile)) 55 | if err != nil { 56 | return nil, microerror.Mask(err) 57 | } 58 | 59 | cluster.baseDir = baseDir 60 | cluster.mu = new(sync.Mutex) 61 | cluster.hostsCache = map[string]*cachedHost{} 62 | return cluster, nil 63 | } 64 | 65 | // NewCluster creates a new cluster based on the cluster directory. 66 | func NewCluster(baseDir string, logger micrologger.Logger) (*Cluster, error) { 67 | if !fileExists(baseDir) { 68 | err := os.Mkdir(baseDir, 0755) 69 | if err != nil { 70 | return nil, microerror.Mask(err) 71 | } 72 | } 73 | 74 | c := &Cluster{ 75 | baseDir: baseDir, 76 | mu: new(sync.Mutex), 77 | Config: ClusterConfig{}, 78 | hostsCache: map[string]*cachedHost{}, 79 | logger: logger, 80 | } 81 | 82 | err := c.Commit("initial commit") 83 | if err != nil { 84 | return nil, microerror.Mask(err) 85 | } 86 | return c, nil 87 | } 88 | 89 | // CreateNewHost creates a new host with the given serial. 90 | func (c *Cluster) CreateNewHost(serial string) (*Host, error) { 91 | serial = strings.ToLower(serial) 92 | hostDir := path.Join(c.baseDir, serial) 93 | newHost, err := createHost(serial, hostDir) 94 | if err != nil { 95 | return nil, microerror.Mask(err) 96 | } 97 | 98 | machineID := genMachineID() 99 | newHost.MachineID = machineID 100 | if newHost.InternalAddr != nil { 101 | newHost.Hostname = strings.Replace(newHost.InternalAddr.String(), ".", "-", 4) 102 | } 103 | _ = c.logger.Log("level", "info", "message", fmt.Sprintf("hostname for '%s' is %s", newHost.InternalAddr.String(), newHost.Hostname)) 104 | _ = newHost.Save() 105 | 106 | return newHost, nil 107 | } 108 | 109 | func (c *Cluster) Commit(msg string) error { 110 | err := c.save() 111 | if err != nil { 112 | return microerror.Mask(err) 113 | } 114 | return nil 115 | } 116 | 117 | // Update refreshs the internal host cache based on information within the 118 | // cluster directory. 119 | func (c *Cluster) Update() error { 120 | c.mu.Lock() 121 | defer c.mu.Unlock() 122 | 123 | err := c.cacheHosts() 124 | if err != nil { 125 | return microerror.Mask(err) 126 | } 127 | 128 | return nil 129 | } 130 | 131 | // HostWithSerial returns the host object given by serial based on the internal 132 | // cache. In case the host could not be found, host is nil and false is 133 | // returned as second return value. 134 | func (c *Cluster) HostWithSerial(serial string) (*Host, bool) { 135 | if err := c.Update(); err != nil { 136 | _ = c.logger.Log("level", "error", "message", "error getting the serial number using the internal cache", "stack", err) 137 | return nil, false 138 | } 139 | c.mu.Lock() 140 | defer c.mu.Unlock() 141 | 142 | if cached, exists := c.hostsCache[strings.ToLower(serial)]; exists { 143 | return cached.get(), true 144 | } else { 145 | return nil, false 146 | } 147 | } 148 | 149 | // GetProfileCount returns a matching of profiles and how many of them are 150 | // known to the cluster. Imagine there is a provile name core. If there are 2 151 | // core nodes known to the cluster, the map would look like this. 152 | // 153 | // map[string]int{ 154 | // "core": 2, 155 | // } 156 | func (c *Cluster) GetProfileCount() map[string]int { 157 | count := map[string]int{} 158 | allHosts := c.GetAllHosts() 159 | for _, host := range allHosts { 160 | if host.Profile == "" { 161 | continue 162 | } 163 | if cnt, exists := count[host.Profile]; exists { 164 | count[host.Profile] = cnt + 1 165 | } else { 166 | count[host.Profile] = 1 167 | } 168 | } 169 | return count 170 | } 171 | 172 | // GetAllHosts returns a list of all hosts based on the internal cache. 173 | func (c *Cluster) GetAllHosts() []*Host { 174 | hosts := make([]*Host, 0, len(c.hostsCache)) 175 | 176 | if err := c.Update(); err != nil { 177 | _ = c.logger.Log("level", "error", "message", "error getting the list of hosts based on the internal cache: %#v", "stack", err) 178 | return hosts 179 | } 180 | 181 | for _, cachedHost := range c.hostsCache { 182 | hosts = append(hosts, cachedHost.get()) 183 | } 184 | return hosts 185 | } 186 | 187 | func (c *Cluster) GenerateEtcdDiscoveryToken() (string, error) { 188 | b := make([]byte, 16) 189 | _, err := rand.Read(b) 190 | if err != nil { 191 | return "", microerror.Mask(err) 192 | } 193 | token := hex.EncodeToString(b) 194 | 195 | return token, nil 196 | } 197 | 198 | func (c *Cluster) StoreEtcdDiscoveryToken(etcdEndpoint, etcdCAFile, token string, size int) error { 199 | //http transport for etcd connection 200 | transport := client.DefaultTransport 201 | // read custom root CA file if https and CAfile is configured 202 | if strings.HasPrefix(etcdEndpoint, "https") && etcdCAFile != "" { 203 | customCA := x509.NewCertPool() 204 | 205 | pemData, err := ioutil.ReadFile(etcdCAFile) 206 | if err != nil { 207 | return microerror.Maskf(err, "Unable to read custom CA file: ") 208 | } 209 | customCA.AppendCertsFromPEM(pemData) 210 | transport = &http.Transport{ 211 | TLSClientConfig: &tls.Config{ 212 | RootCAs: customCA, 213 | MinVersion: tls.VersionTLS12, 214 | }, 215 | Proxy: http.ProxyFromEnvironment, 216 | Dial: (&net.Dialer{ 217 | Timeout: 30 * time.Second, 218 | KeepAlive: 30 * time.Second, 219 | }).Dial, 220 | TLSHandshakeTimeout: 10 * time.Second, 221 | } 222 | } 223 | 224 | // store in etcd 225 | cfg := client.Config{ 226 | Endpoints: []string{etcdEndpoint}, 227 | Transport: transport, 228 | // set timeout per request to fail fast when the target endpoint is unavailable 229 | HeaderTimeoutPerRequest: time.Second, 230 | } 231 | etcdClient, err := client.New(cfg) 232 | if err != nil { 233 | return microerror.Mask(err) 234 | } 235 | kapi := client.NewKeysAPI(etcdClient) 236 | 237 | _, err = kapi.Set(context.Background(), path.Join("_etcd", "registry", token), "", &client.SetOptions{ 238 | PrevExist: client.PrevNoExist, 239 | Dir: true, 240 | }) 241 | if err != nil { 242 | return microerror.Mask(err) 243 | } 244 | 245 | _, err = kapi.Set(context.Background(), path.Join("_etcd", "registry", token, "_config", "size"), strconv.Itoa(size), &client.SetOptions{ 246 | PrevExist: client.PrevNoExist, 247 | }) 248 | if err != nil { 249 | return microerror.Mask(err) 250 | } 251 | 252 | return nil 253 | } 254 | 255 | func (c *Cluster) FetchEtcdDiscoveryToken(etcdDiscoveryUrl string, size int) (string, error) { 256 | req, err := http.NewRequest("PUT", fmt.Sprintf("%s/new", etcdDiscoveryUrl), strings.NewReader(fmt.Sprintf("size=%d", size))) 257 | if err != nil { 258 | return "", microerror.Mask(err) 259 | } 260 | 261 | res, err := http.DefaultClient.Do(req) 262 | if err != nil { 263 | return "", microerror.Mask(err) 264 | } 265 | 266 | body, err := ioutil.ReadAll(res.Body) 267 | if err != nil { 268 | return "", microerror.Mask(err) 269 | } 270 | 271 | token := strings.TrimPrefix(string(body), etcdDiscoveryUrl+"/") 272 | return token, nil 273 | } 274 | 275 | func Has(host *Host, exists bool) bool { 276 | return exists 277 | } 278 | 279 | func (cached cachedHost) get() *Host { 280 | fi, err := os.Stat(cached.host.confPath()) 281 | if err != nil { 282 | panic(err) 283 | } 284 | 285 | if fi.ModTime().After(cached.lastModTime) { 286 | hostDir := cached.host.hostDir.Name() 287 | cached.host, err = HostFromDir(hostDir) 288 | if err != nil { 289 | panic(err) 290 | } 291 | cached.lastModTime = cached.host.lastModTime 292 | } 293 | 294 | return cached.host 295 | } 296 | 297 | func (c *Cluster) save() error { 298 | return saveJson(c, c.confPath()) 299 | } 300 | 301 | func (c *Cluster) confPath() string { 302 | return path.Join(c.baseDir, clusterConfFile) 303 | } 304 | 305 | func (c *Cluster) cacheHosts() error { 306 | baseDirFileInfo, err := os.Stat(c.baseDir) 307 | if err != nil { 308 | return microerror.Mask(err) 309 | } 310 | 311 | modTime := baseDirFileInfo.ModTime() 312 | 313 | fis, err := ioutil.ReadDir(c.baseDir) 314 | if err != nil { 315 | return microerror.Mask(err) 316 | } 317 | 318 | newCache := map[string]*cachedHost{} 319 | 320 | for _, fi := range fis { 321 | if fi.IsDir() && !strings.HasPrefix(fi.Name(), ".") { 322 | hostConfPath := path.Join(c.baseDir, fi.Name(), hostConfFile) 323 | if fileExists(hostConfPath) { 324 | host, err := HostFromDir(path.Join(c.baseDir, fi.Name())) 325 | if err != nil { 326 | _ = c.logger.Log("level", "warning", "message", fmt.Sprintf("unable to process '%s'", hostConfPath), "stack", err) 327 | } 328 | newCache[strings.ToLower(fi.Name())] = &cachedHost{ 329 | host: host, 330 | lastModTime: host.lastModTime, 331 | } 332 | } 333 | } 334 | } 335 | 336 | c.hostsCache = newCache 337 | c.cachedModTime = modTime 338 | return nil 339 | } 340 | 341 | func fileExists(path string) bool { 342 | if _, err := os.Stat(path); err == nil { 343 | return true 344 | } 345 | return false 346 | } 347 | -------------------------------------------------------------------------------- /hostmgr/host.go: -------------------------------------------------------------------------------- 1 | package hostmgr 2 | 3 | import ( 4 | "crypto/rand" 5 | "encoding/hex" 6 | "net" 7 | "os" 8 | "path" 9 | "time" 10 | 11 | "github.com/giantswarm/microerror" 12 | ) 13 | 14 | const hostConfFile = "conf.json" 15 | 16 | // Host represents a node within the mayu cluster. 17 | type Host struct { 18 | Id int `json:",omitempty"` 19 | ProviderId string `json:",omitempty"` 20 | Enabled bool `json:",omitempty"` 21 | Name string `json:",omitempty"` 22 | Serial string `json:",omitempty"` 23 | MacAddresses []string `json:",omitempty"` 24 | InternalAddr net.IP `json:",omitempty"` 25 | AdditionalAddrs map[string]net.IP `json:",omitempty"` 26 | IPMIAddr net.IP `json:",omitempty"` 27 | Hostname string `json:",omitempty"` 28 | MachineID string `json:",omitempty"` 29 | LastBoot time.Time `json:",omitempty"` 30 | Profile string `json:",omitempty"` 31 | EtcdClusterToken string `json:",omitempty"` 32 | 33 | Overrides map[string]interface{} `json:",omitempty"` 34 | 35 | State hostState 36 | 37 | FlatcarVersion string `json:",omitempty"` 38 | 39 | hostDir *os.File 40 | lastModTime time.Time 41 | } 42 | 43 | type IPMac struct { 44 | IP net.IP 45 | MacAddr string 46 | } 47 | 48 | func genMachineID() string { 49 | b := make([]byte, 16) 50 | _, err := rand.Read(b) 51 | if err != nil { 52 | panic(err) 53 | } 54 | return hex.EncodeToString(b) 55 | } 56 | 57 | // HostFromDir takes a path to a host directory within the cluster directory 58 | // and loads the found configuration. Then the corresponding Host is returned. 59 | func HostFromDir(hostdir string) (*Host, error) { 60 | confPath := path.Join(hostdir, hostConfFile) 61 | 62 | h := &Host{} 63 | err := loadJson(h, confPath) 64 | if err != nil { 65 | return nil, microerror.Mask(err) 66 | } 67 | 68 | h.hostDir, err = os.Open(hostdir) 69 | if err != nil { 70 | return nil, microerror.Mask(err) 71 | } 72 | 73 | fi, err := os.Stat(confPath) 74 | if err != nil { 75 | return nil, microerror.Mask(err) 76 | } 77 | h.lastModTime = fi.ModTime() 78 | 79 | return h, nil 80 | } 81 | 82 | func createHost(serial string, hostDir string) (*Host, error) { 83 | var err error 84 | if !fileExists(hostDir) { 85 | err = os.Mkdir(hostDir, 0755) 86 | if err != nil { 87 | return nil, microerror.Mask(err) 88 | } 89 | } 90 | 91 | hostDirFile, err := os.Open(hostDir) 92 | if err != nil { 93 | return nil, microerror.Mask(err) 94 | } 95 | 96 | h := &Host{ 97 | hostDir: hostDirFile, 98 | Serial: serial, 99 | Enabled: true, 100 | } 101 | err = h.Save() 102 | if err != nil { 103 | return nil, microerror.Mask(err) 104 | } 105 | 106 | return h, nil 107 | } 108 | 109 | func (h *Host) Save() error { 110 | err := saveJson(h, h.confPath()) 111 | if err != nil { 112 | return microerror.Mask(err) 113 | } 114 | 115 | fi, err := os.Stat(h.confPath()) 116 | if err != nil { 117 | return microerror.Mask(err) 118 | } 119 | 120 | h.lastModTime = fi.ModTime() 121 | return nil 122 | } 123 | 124 | func (h *Host) confPath() string { 125 | return path.Join(h.hostDir.Name(), hostConfFile) 126 | } 127 | -------------------------------------------------------------------------------- /hostmgr/hoststate.go: -------------------------------------------------------------------------------- 1 | package hostmgr 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/giantswarm/microerror" 7 | ) 8 | 9 | type hostState int 10 | 11 | const ( 12 | Unknown hostState = iota 13 | Configured 14 | Installing 15 | Running 16 | ) 17 | 18 | func HostStateMap() map[hostState]string { 19 | return map[hostState]string{ 20 | Unknown: `"unknown"`, 21 | Configured: `"configured"`, 22 | Installing: `"installing"`, 23 | Running: `"running"`, 24 | } 25 | } 26 | 27 | func (s hostState) MarshalJSON() ([]byte, error) { 28 | m := HostStateMap() 29 | if stringVal, ok := m[s]; ok { 30 | return []byte(stringVal), nil 31 | } 32 | 33 | return []byte{}, microerror.Mask(fmt.Errorf("don't know how to marshal '%d'", s)) 34 | } 35 | 36 | func HostState(state string) (hostState, error) { 37 | switch state { 38 | case "unknown": 39 | return Unknown, nil 40 | case "configured": 41 | return Configured, nil 42 | case "installing": 43 | return Installing, nil 44 | case "running": 45 | return Running, nil 46 | default: 47 | return -1, microerror.Mask(fmt.Errorf("wrong host state '%s'", state)) 48 | } 49 | } 50 | 51 | func (s *hostState) UnmarshalJSON(b []byte) error { 52 | str := string(b) 53 | switch str { 54 | case `"unknown"`: 55 | *s = Unknown 56 | case `"configured"`: 57 | *s = Configured 58 | case `"installing"`: 59 | *s = Installing 60 | case `"running"`: 61 | *s = Running 62 | default: 63 | return microerror.Mask(fmt.Errorf("don't know how to unmarshal '%+v'", b)) 64 | } 65 | return nil 66 | } 67 | -------------------------------------------------------------------------------- /hostmgr/utils.go: -------------------------------------------------------------------------------- 1 | package hostmgr 2 | 3 | import ( 4 | "encoding/json" 5 | "os" 6 | 7 | "github.com/giantswarm/microerror" 8 | ) 9 | 10 | func saveJson(data interface{}, filepath string) error { 11 | marshalled, err := json.MarshalIndent(data, "", " ") 12 | if err != nil { 13 | return microerror.Mask(err) 14 | } 15 | file, err := os.Create(filepath) 16 | if err != nil { 17 | return microerror.Mask(err) 18 | } 19 | defer file.Close() 20 | 21 | _, _ = file.Write(marshalled) 22 | return nil 23 | } 24 | 25 | func loadJson(target interface{}, filepath string) error { 26 | 27 | jsonFile, err := os.Open(filepath) 28 | if err != nil { 29 | return microerror.Mask(err) 30 | } 31 | 32 | defer jsonFile.Close() 33 | 34 | jsonDec := json.NewDecoder(jsonFile) 35 | err = jsonDec.Decode(target) 36 | if err != nil { 37 | return microerror.Mask(err) 38 | } 39 | return nil 40 | } 41 | -------------------------------------------------------------------------------- /httputil/client.go: -------------------------------------------------------------------------------- 1 | package httputil 2 | 3 | import ( 4 | "io" 5 | "net/http" 6 | ) 7 | 8 | func Put(url string, bodyType string, body io.Reader) (resp *http.Response, err error) { 9 | req, err := http.NewRequest("PUT", url, body) 10 | if err != nil { 11 | return nil, err 12 | } 13 | 14 | req.Header.Set("Content-Type", bodyType) 15 | return http.DefaultClient.Do(req) 16 | } 17 | -------------------------------------------------------------------------------- /logging/log.go: -------------------------------------------------------------------------------- 1 | package logging 2 | 3 | import ( 4 | "github.com/giantswarm/microerror" 5 | "github.com/giantswarm/micrologger" 6 | ) 7 | 8 | type MicrologgerWrapper struct { 9 | logger micrologger.Logger 10 | } 11 | 12 | func NewMicrologgerWrapper(logger micrologger.Logger) MicrologgerWrapper { 13 | return MicrologgerWrapper{ 14 | logger: logger, 15 | } 16 | } 17 | 18 | func (l MicrologgerWrapper) Write(p []byte) (int, error) { 19 | err := l.logger.Log("level", "info", "type", "http log", "message", string(p)) 20 | if err != nil { 21 | return 0, microerror.Mask(err) 22 | } 23 | return len(p), nil 24 | } 25 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "flag" 6 | "fmt" 7 | "log" 8 | "os" 9 | "path/filepath" 10 | 11 | "github.com/giantswarm/micrologger" 12 | "github.com/spf13/cobra" 13 | "github.com/spf13/pflag" 14 | 15 | "github.com/giantswarm/mayu/fs" 16 | "github.com/giantswarm/mayu/hostmgr" 17 | "github.com/giantswarm/mayu/pxemgr" 18 | ) 19 | 20 | var ( 21 | globalFlags = MayuFlags{} 22 | 23 | mainCmd = &cobra.Command{ 24 | Use: "mayu", 25 | Short: "Manage your bare metal machines", 26 | Long: "", 27 | Run: mainRun, 28 | } 29 | 30 | projectVersion string = "1.1.0" 31 | projectBuild string = "git" 32 | ) 33 | 34 | func init() { 35 | // Map any flags registered in the standard "flag" package into the 36 | // top-level mayu command (eg. log flags) 37 | pf := mainCmd.PersistentFlags() 38 | flag.VisitAll(func(f *flag.Flag) { 39 | pf.AddFlag(pflag.PFlagFromGoFlag(f)) 40 | }) 41 | 42 | pf.BoolVarP(&globalFlags.debug, "debug", "d", false, "Print debug output") 43 | pf.BoolVar(&globalFlags.version, "version", false, "Show the version of Mayu") 44 | pf.BoolVar(&globalFlags.help, "help", false, "Show mayu usage") 45 | pf.StringVar(&globalFlags.configFile, "config", DefaultConfigFile, "Path to the configuration file") 46 | pf.StringVar(&globalFlags.clusterDir, "cluster-directory", DefaultClusterDirectory, "Path to the cluster directory") 47 | pf.BoolVar(&globalFlags.showTemplates, "show-templates", DefaultShowTemplates, "Show the templates and quit") 48 | pf.BoolVar(&globalFlags.noGit, "no-git", DefaultNoGit, "Disable git operations") 49 | pf.BoolVar(&globalFlags.noTLS, "no-tls", DefaultNoTLS, "Disable tls") 50 | pf.StringVar(&globalFlags.tFTPRoot, "tftproot", DefaultTFTPRoot, "Path to the tftproot") 51 | pf.StringVar(&globalFlags.fileServerPath, "file-server-path", DefaultFileServerPath, "Path to fileserver dir.") 52 | pf.StringVar(&globalFlags.ignitionConfig, "ignition-config", DefaultIgnitionConfig, "Final ignition config file that is used to boot the machine") 53 | pf.StringVar(&globalFlags.dnsmasqTemplate, "dnsmasq-template", DefaultDnsmasqTemplate, "Dnsmasq config template") 54 | pf.StringVar(&globalFlags.templateSnippets, "template-snippets", DefaultTemplateSnippets, "Cloudconfig or Ignition template snippets (eg storage or network configuration)") 55 | pf.StringVar(&globalFlags.dnsmasq, "dnsmasq", DefaultDNSMasq, "Path to dnsmasq binary") 56 | pf.StringVar(&globalFlags.imagesCacheDir, "images-cache-dir", DefaultImagesCacheDir, "Directory for Container Linux images") 57 | pf.StringVar(&globalFlags.filesDir, "files-dir", DefaultFilesDir, "Directory for file templates") 58 | pf.IntVar(&globalFlags.apiPort, "api-port", DefaultAPIPort, "API HTTP port Mayu listens on") 59 | pf.IntVar(&globalFlags.pxePort, "pxe-port", DefaultPXEPort, "PXE HTTP port Mayu listens on") 60 | pf.StringVar(&globalFlags.bindAddress, "http-bind-address", DefaultHTTPBindAddress, "HTTP address Mayu listens on") 61 | pf.StringVar(&globalFlags.tlsCertFile, "tls-cert-file", DefaultTLSCertFile, "Path to tls certificate file") 62 | pf.StringVar(&globalFlags.tlsKeyFile, "tls-key-file", DefaultTLSKeyFile, "Path to tls key file") 63 | pf.BoolVar(&globalFlags.useInternalEtcdDiscovery, "use-internal-etcd-discovery", DefaultUseInternalEtcdDiscovery, "Use the internal etcd discovery") 64 | pf.IntVar(&globalFlags.etcdQuorumSize, "etcd-quorum-size", DefaultEtcdQuorumSize, "Default quorum of the etcd clusters") 65 | pf.StringVar(&globalFlags.etcdDiscoveryUrl, "etcd-discovery", DefaultEtcdDiscoveryUrl, "External etcd discovery base url (eg https://discovery.etcd.io). Note: This should be the base URL of the discovery without a specific token. Mayu itself creates a token for the etcd clusters.") 66 | pf.StringVar(&globalFlags.etcdEndpoint, "etcd-endpoint", DefaultEtcdEndpoint, "The etcd endpoint for the internal discovery feature (you must also specify protocol).") 67 | pf.StringVar(&globalFlags.etcdCAfile, "etcd-cafile", DefaultEtcdCA, "The etcd CA file, if etcd is using non-trustred root CA certificate") 68 | pf.BoolVar(&globalFlags.flatcarAutologin, "flatcar-autologin", DefaultFlatcarAutologin, "Sets kernel boot param 'flatcar.autologin'. This is handy for debugging. Do NOT use for production!") 69 | pf.BoolVar(&globalFlags.consoleTTY, "console-tty", DefaultConsoleTTY, "Sets kernel boot param 'console=ttyS0'. This is handy for debugging.") 70 | pf.BoolVar(&globalFlags.systemdShell, "systemd-shell", DefaultSystemdShell, "Sets kernel boot param 'rd.shell'. This will be activated if the initramfs fails to boot successfully.") 71 | globalFlags.filesystem = fs.DefaultFilesystem 72 | } 73 | 74 | func main() { 75 | log.SetFlags(0) 76 | log.SetPrefix("mayu: ") 77 | 78 | if globalFlags.version { 79 | printVersion() 80 | os.Exit(0) 81 | } 82 | 83 | _ = mainCmd.Execute() 84 | } 85 | 86 | func mainRun(cmd *cobra.Command, args []string) { 87 | if os.Args[1] == "version" { 88 | println("Mayu build: ", projectBuild) 89 | println("Mayu version: ", projectVersion) 90 | return 91 | } 92 | if globalFlags.help { 93 | cmd.PersistentFlags().Usage() 94 | return 95 | } 96 | 97 | var err error 98 | var logger micrologger.Logger 99 | { 100 | logger, err = micrologger.New(micrologger.Config{}) 101 | if err != nil { 102 | println("ERROR: failed to init logger") 103 | os.Exit(1) 104 | } 105 | } 106 | 107 | _ = logger.Log("level", "info", "message", fmt.Sprintf("Starting mayu version %s", projectVersion)) 108 | 109 | // hack to make some dnsmasq versions happy 110 | globalFlags.tFTPRoot, err = filepath.Abs(globalFlags.tFTPRoot) 111 | if err != nil { 112 | log.Fatal(err) 113 | } 114 | 115 | if ok, err := globalFlags.Validate(); !ok { 116 | log.Fatal(err) 117 | } 118 | 119 | var cluster *hostmgr.Cluster 120 | 121 | if fileExists(fmt.Sprintf("%s/cluster.json", globalFlags.clusterDir)) { 122 | cluster, err = hostmgr.OpenCluster(globalFlags.clusterDir, logger) 123 | } else { 124 | cluster, err = hostmgr.NewCluster(globalFlags.clusterDir, logger) 125 | } 126 | 127 | if err != nil { 128 | _ = logger.Log("level", "error", "message", "unable to get a cluster", "stack", err) 129 | os.Exit(1) 130 | } 131 | 132 | globalFlags.templateSnippets = DefaultTemplateSnippets 133 | 134 | pxeManager, err := pxemgr.PXEManager(pxemgr.PXEManagerConfiguration{ 135 | ConfigFile: globalFlags.configFile, 136 | UseInternalEtcdDiscovery: globalFlags.useInternalEtcdDiscovery, 137 | EtcdQuorumSize: globalFlags.etcdQuorumSize, 138 | EtcdDiscoveryUrl: globalFlags.etcdDiscoveryUrl, 139 | EtcdEndpoint: globalFlags.etcdEndpoint, 140 | EtcdCAFile: globalFlags.etcdCAfile, 141 | DNSmasqExecutable: globalFlags.dnsmasq, 142 | DNSmasqTemplate: globalFlags.dnsmasqTemplate, 143 | TFTPRoot: globalFlags.tFTPRoot, 144 | NoTLS: globalFlags.noTLS, 145 | APIPort: globalFlags.apiPort, 146 | PXEPort: globalFlags.pxePort, 147 | BindAddress: globalFlags.bindAddress, 148 | TLSCertFile: globalFlags.tlsCertFile, 149 | TLSKeyFile: globalFlags.tlsKeyFile, 150 | FileServerPath: globalFlags.fileServerPath, 151 | StaticHTMLPath: globalFlags.staticHTMLPath, 152 | TemplateSnippets: globalFlags.templateSnippets, 153 | IgnitionConfig: globalFlags.ignitionConfig, 154 | ImagesCacheDir: globalFlags.imagesCacheDir, 155 | FilesDir: globalFlags.filesDir, 156 | FlatcarAutologin: globalFlags.flatcarAutologin, 157 | ConsoleTTY: globalFlags.consoleTTY, 158 | SystemdShell: globalFlags.systemdShell, 159 | Version: projectVersion, 160 | 161 | Logger: logger, 162 | }, cluster) 163 | if err != nil { 164 | _ = logger.Log("level", "error", "message", "unable to create a pxe manager", "stack", err) 165 | os.Exit(1) 166 | } 167 | 168 | if globalFlags.showTemplates { 169 | placeholderHost := hostmgr.Host{} 170 | 171 | b := bytes.NewBuffer(nil) 172 | if err := pxeManager.WriteIgnitionConfig(placeholderHost, b); err != nil { 173 | _ = logger.Log("level", "error", "message", "error found while checking generated ignition config ", "stack", err) 174 | os.Exit(1) 175 | } 176 | os.Stdout.WriteString("ignition config:\n") 177 | os.Stdout.WriteString(b.String()) 178 | 179 | os.Exit(0) 180 | } 181 | 182 | err = pxeManager.Start() 183 | if err != nil { 184 | _ = logger.Log("level", "error", "message", err) 185 | os.Exit(1) 186 | } 187 | } 188 | 189 | func fileExists(path string) bool { 190 | if _, err := os.Stat(path); err == nil { 191 | return true 192 | } 193 | return false 194 | } 195 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/giantswarm/mayu/fs" 7 | ) 8 | 9 | func TestHTTPSCertConfigValidation(t *testing.T) { 10 | cases := []struct { 11 | globalFlags MayuFlags 12 | expectedResult bool 13 | expectedError error 14 | }{ 15 | {MayuFlags{noTLS: true, tlsCertFile: "", tlsKeyFile: ""}, true, nil}, 16 | {MayuFlags{noTLS: true, tlsCertFile: "certfile", tlsKeyFile: ""}, true, nil}, 17 | {MayuFlags{noTLS: true, tlsCertFile: "", tlsKeyFile: "keyfile"}, true, nil}, 18 | {MayuFlags{noTLS: true, tlsCertFile: "certfile", tlsKeyFile: "keyfile"}, true, nil}, 19 | {MayuFlags{noTLS: false, tlsCertFile: "", tlsKeyFile: ""}, false, ErrNotAllCertFilesProvided}, 20 | {MayuFlags{noTLS: false, tlsCertFile: "certfile", tlsKeyFile: ""}, false, ErrNotAllCertFilesProvided}, 21 | {MayuFlags{noTLS: false, tlsCertFile: "", tlsKeyFile: "keyfile"}, false, ErrNotAllCertFilesProvided}, 22 | {MayuFlags{noTLS: false, tlsCertFile: "certfile", tlsKeyFile: "keyfile"}, true, nil}, 23 | } 24 | 25 | for _, c := range cases { 26 | result, err := c.globalFlags.ValidateHTTPCertificateUsage() 27 | 28 | if result != c.expectedResult { 29 | t.Errorf("expected function ValidateHTTPCertificateUsage() to return %v for configuration :%#v", c.expectedResult, c.globalFlags) 30 | } 31 | 32 | if err != c.expectedError { 33 | t.Errorf("expected function ValidateHTTPCertificateUsage() to return error '%s' but got '%s' for configuration :%#v", c.expectedError, err, c.globalFlags) 34 | } 35 | 36 | } 37 | } 38 | 39 | func TestHTTPCertConfigFileStatValidation(t *testing.T) { 40 | cases := []struct { 41 | globalFlags MayuFlags 42 | expectedResult bool 43 | expectedError error 44 | }{ 45 | { // TLS connections are turned off, so no cert files should be needed. 46 | MayuFlags{ 47 | filesystem: fs.FakeFilesystem{}, 48 | noTLS: true, 49 | }, 50 | true, 51 | nil, 52 | }, 53 | { // Both files are provided but TLS connections are turned off, which should be ok too. 54 | MayuFlags{ 55 | filesystem: fs.NewFakeFilesystemWithFiles([]fs.FakeFile{ 56 | fs.NewFakeFile("cert.pem", "foobar"), 57 | fs.NewFakeFile("key.pem", "barbaz"), 58 | }), 59 | tlsCertFile: "cert.pem", 60 | tlsKeyFile: "key.pem", 61 | noTLS: true, 62 | }, 63 | true, 64 | nil, 65 | }, 66 | { 67 | MayuFlags{ 68 | filesystem: fs.FakeFilesystem{}, 69 | tlsCertFile: "cert.pem", 70 | tlsKeyFile: "key.pem", 71 | }, 72 | false, 73 | ErrHTTPSCertFileNotRedable, 74 | }, 75 | { // Only a key file is provided which should result in a missing cert file error. 76 | MayuFlags{ 77 | filesystem: fs.NewFakeFilesystemWithFiles([]fs.FakeFile{ 78 | fs.NewFakeFile("key.pem", "foobar"), 79 | }), 80 | tlsCertFile: "cert.pem", 81 | tlsKeyFile: "key.pem", 82 | }, 83 | false, 84 | ErrHTTPSCertFileNotRedable, 85 | }, 86 | { // Only a cert file is provided which should result in a missing key file error. 87 | MayuFlags{ 88 | filesystem: fs.NewFakeFilesystemWithFiles([]fs.FakeFile{ 89 | fs.NewFakeFile("cert.pem", "foobar"), 90 | }), 91 | tlsCertFile: "cert.pem", 92 | tlsKeyFile: "key.pem", 93 | }, 94 | false, 95 | ErrHTTPSKeyFileNotReadable, 96 | }, 97 | { // Both files are provided which should be ok. 98 | MayuFlags{ 99 | filesystem: fs.NewFakeFilesystemWithFiles([]fs.FakeFile{ 100 | fs.NewFakeFile("cert.pem", "foobar"), 101 | fs.NewFakeFile("key.pem", "barbaz"), 102 | }), 103 | tlsCertFile: "cert.pem", 104 | tlsKeyFile: "key.pem", 105 | }, 106 | true, 107 | nil, 108 | }, 109 | } 110 | 111 | for _, c := range cases { 112 | result, err := c.globalFlags.ValidateHTTPCertificateFileExistance() 113 | 114 | if result != c.expectedResult { 115 | t.Errorf("Expected result to be %v but got %v for configuration %#v", c.expectedResult, result, c.globalFlags) 116 | } 117 | 118 | if err != c.expectedError { 119 | t.Errorf("Expected error to be '%s' but got '%s' for configuration %#v", c.expectedError, err, c.globalFlags) 120 | } 121 | } 122 | 123 | } 124 | -------------------------------------------------------------------------------- /pxemgr/config.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "os" 7 | 8 | "github.com/giantswarm/microerror" 9 | "gopkg.in/yaml.v2" 10 | 11 | "github.com/giantswarm/mayu/hostmgr" 12 | ) 13 | 14 | func LoadConfig(filePath string) (Configuration, error) { 15 | conf := Configuration{} 16 | 17 | f, err := os.Open(filePath) 18 | if err != nil { 19 | return conf, microerror.Mask(err) 20 | } 21 | defer f.Close() 22 | 23 | confBytes, err := ioutil.ReadAll(f) 24 | if err != nil { 25 | return conf, microerror.Mask(err) 26 | } 27 | 28 | err = yaml.Unmarshal(confBytes, &conf) 29 | 30 | fmt.Printf("loaded config: %#v\n", conf) 31 | 32 | return conf, microerror.Mask(err) 33 | } 34 | 35 | type Configuration struct { 36 | DefaultFlatcarVersion string `yaml:"default_flatcar_version"` 37 | Network Network 38 | Profiles []Profile 39 | TemplatesEnv map[string]interface{} `yaml:"templates_env"` 40 | } 41 | 42 | type Profile struct { 43 | Quantity int 44 | Name string 45 | Tags []string 46 | DisableEngine bool `yaml:"disable_engine"` 47 | FlatcarVersion string `yaml:"flatcar_version"` 48 | EtcdClusterToken string `yaml:"etcd_cluster_token"` 49 | } 50 | 51 | type NetworkRange struct { 52 | Start string 53 | End string 54 | } 55 | 56 | type NetworkRoute struct { 57 | DestinationCIDR string `yaml:"destination_cidr"` 58 | RouteHop string `yaml:"route_hop"` 59 | } 60 | 61 | type NetworkModel struct { 62 | Type string `yaml:"type"` 63 | VlanId string `yaml:"vlan_id"` 64 | BondMode string `yaml:"bond_mode"` 65 | BondInterfaceMatch string `yaml:"bond_interface_match"` 66 | } 67 | 68 | type NetworkInterface struct { 69 | Routes []NetworkRoute `yaml:"routes"` 70 | InterfaceName string `yaml:"interface_name"` 71 | IPRange NetworkRange `yaml:"ip_range"` 72 | SubnetSize string `yaml:"subnet_size"` 73 | SubnetGateway string `yaml:"subnet_gateway"` 74 | Model NetworkModel `yaml:"network_model"` 75 | 76 | DNS []string `yaml:"dns"` 77 | } 78 | 79 | type Network struct { 80 | BindAddr string `yaml:"bind_addr"` 81 | PXE struct { 82 | Enabled bool 83 | PxeInterface NetworkInterface `yaml:"pxe_interface"` 84 | } `yaml:"pxe"` 85 | 86 | PrimaryNIC NetworkInterface `yaml:"primary_nic"` 87 | ExtraNICs []NetworkInterface `yaml:"extra_nics"` 88 | 89 | // if set true use UEFI boot, otherwise use legacy BIOS 90 | UEFI bool 91 | 92 | // NTP list for installed machines 93 | NTP []string 94 | 95 | IgnoredHosts []string 96 | StaticHosts []hostmgr.IPMac 97 | } 98 | -------------------------------------------------------------------------------- /pxemgr/dnsmasq.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "bufio" 5 | "io" 6 | "os" 7 | "os/exec" 8 | "text/template" 9 | 10 | "github.com/giantswarm/microerror" 11 | "github.com/giantswarm/micrologger" 12 | ) 13 | 14 | type DNSmasqConfiguration struct { 15 | Executable string 16 | Template string 17 | TFTPRoot string 18 | PXEPort int 19 | 20 | Logger micrologger.Logger 21 | } 22 | 23 | type DNSmasqInstance struct { 24 | confpath string 25 | args []string 26 | 27 | conf DNSmasqConfiguration 28 | cmd *exec.Cmd 29 | } 30 | 31 | func NewDNSmasq(baseFile string, conf DNSmasqConfiguration) *DNSmasqInstance { 32 | confFile := baseFile + ".conf" 33 | leaseFile := baseFile + ".lease" 34 | 35 | return &DNSmasqInstance{ 36 | args: []string{"-k", "-d", "--conf-file=" + confFile, "--dhcp-leasefile=" + leaseFile}, 37 | confpath: confFile, 38 | conf: conf, 39 | } 40 | } 41 | 42 | func (dnsmasq *DNSmasqInstance) Start() error { 43 | _ = dnsmasq.conf.Logger.Log("level", "info", "component", "dnsmasq", "message", "starting Dnsmasq server") 44 | 45 | cmd := exec.Command(dnsmasq.conf.Executable, dnsmasq.args...) //nolint 46 | 47 | stdout, err := cmd.StdoutPipe() 48 | if err != nil { 49 | return microerror.Mask(err) 50 | } 51 | stderr, err := cmd.StderrPipe() 52 | if err != nil { 53 | return microerror.Mask(err) 54 | } 55 | 56 | pipeLogger := func(rdr io.Reader) { 57 | scanner := bufio.NewScanner(rdr) 58 | for scanner.Scan() { 59 | _ = dnsmasq.conf.Logger.Log("level", "info", "component", "dnsmasq", "message", scanner.Text()) 60 | } 61 | } 62 | go pipeLogger(stdout) 63 | go pipeLogger(stderr) 64 | 65 | cmd.SysProcAttr = genPlatformSysProcAttr() 66 | dnsmasq.cmd = cmd 67 | err = cmd.Start() 68 | if err != nil { 69 | _ = dnsmasq.conf.Logger.Log("level", "error", "component", "dnsmasq", "message", "failed to start dns command", "stack", err) 70 | return microerror.Mask(err) 71 | } 72 | go func(cmd *exec.Cmd) { 73 | err := cmd.Wait() 74 | if err != nil { 75 | _ = dnsmasq.conf.Logger.Log("level", "error", "component", "dnsmasq", "message", "failed to start dns command", "stack", err) 76 | } 77 | }(cmd) 78 | 79 | return nil 80 | } 81 | 82 | func (dnsmasq *DNSmasqInstance) Restart() error { 83 | _ = dnsmasq.conf.Logger.Log("level", "info", "component", "dnsmasq", "message", "restarting Dnsmasq server") 84 | 85 | if dnsmasq.cmd != nil { 86 | _ = dnsmasq.cmd.Process.Kill() 87 | } 88 | err := dnsmasq.Start() 89 | if err != nil { 90 | return microerror.Mask(err) 91 | } 92 | return nil 93 | } 94 | 95 | func (dnsmasq *DNSmasqInstance) updateConf(net Network) error { 96 | _ = dnsmasq.conf.Logger.Log("level", "info", "component", "dnsmasq", "message", "updating Dnsmasq configuration") 97 | 98 | tmpl, err := template.ParseFiles(dnsmasq.conf.Template) 99 | if err != nil { 100 | return microerror.Mask(err) 101 | } 102 | 103 | tmplArgs := struct { 104 | Network Network 105 | Global DNSmasqConfiguration 106 | }{ 107 | Network: net, 108 | Global: dnsmasq.conf, 109 | } 110 | 111 | file, err := os.Create(dnsmasq.confpath) 112 | if err != nil { 113 | return microerror.Mask(err) 114 | } 115 | defer file.Close() 116 | 117 | err = tmpl.Execute(file, tmplArgs) 118 | if err != nil { 119 | return microerror.Mask(err) 120 | } 121 | return nil 122 | } 123 | -------------------------------------------------------------------------------- /pxemgr/error.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import "github.com/giantswarm/microerror" 4 | 5 | var executionFailedError = µerror.Error{ 6 | Kind: "executionFailedError", 7 | } 8 | 9 | // IsExecutionFailed asserts executionFailedError. 10 | func IsExecutionFailed(err error) bool { 11 | return microerror.Cause(err) == executionFailedError 12 | } 13 | 14 | var invalidConfigError = µerror.Error{ 15 | Kind: "invalidConfigError", 16 | } 17 | 18 | // IsInvalidConfig asserts invalidConfigError. 19 | func IsInvalidConfig(err error) bool { 20 | return microerror.Cause(err) == invalidConfigError 21 | } 22 | -------------------------------------------------------------------------------- /pxemgr/etcd_discovery_handlers.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "bytes" 5 | "crypto/tls" 6 | "crypto/x509" 7 | "errors" 8 | "fmt" 9 | "io" 10 | "io/ioutil" 11 | "net/http" 12 | "net/url" 13 | "path" 14 | "strconv" 15 | "strings" 16 | 17 | "github.com/giantswarm/microerror" 18 | "github.com/gorilla/mux" 19 | ) 20 | 21 | type EtcdNode struct { 22 | Key string `json:"key"` 23 | Value string `json:"value,omitempty"` 24 | Nodes []*EtcdNode `json:"nodes,omitempty"` 25 | Dir bool `json:"dir,omitempty"` 26 | } 27 | 28 | type EtcdResponse struct { 29 | Action string `json:"action"` 30 | Node *EtcdNode `json:"node,omitempty"` 31 | } 32 | 33 | type EtcdResponseError struct { 34 | ErrorCode int `json:"errorCode"` 35 | Message string `json:"message"` 36 | Cause string `json:"cause"` 37 | } 38 | 39 | func (mgr *pxeManagerT) defineEtcdDiscoveryRoutes(etcdRouter *mux.Router) { 40 | etcdRouter.PathPrefix("/new").Methods("PUT").HandlerFunc(mgr.etcdDiscoveryNewCluster) 41 | 42 | tokenRouter := etcdRouter.PathPrefix("/{token:[a-f0-9]{32}}").Subrouter() 43 | tokenRouter.PathPrefix("/_config/size").Methods("GET").HandlerFunc(mgr.etcdDiscoveryProxyHandler) 44 | tokenRouter.PathPrefix("/_config/size").Methods("PUT").HandlerFunc(mgr.etcdDiscoveryProxyHandler) 45 | tokenRouter.PathPrefix("/{machine}").Methods("PUT").HandlerFunc(mgr.etcdDiscoveryProxyHandler) 46 | tokenRouter.PathPrefix("/{machine}").Methods("GET").HandlerFunc(mgr.etcdDiscoveryProxyHandler) 47 | tokenRouter.PathPrefix("/{machine}").Methods("DELETE").HandlerFunc(mgr.etcdDiscoveryProxyHandler) 48 | tokenRouter.Methods("GET").HandlerFunc(mgr.etcdDiscoveryProxyHandler) 49 | 50 | etcdRouter.Methods("GET").HandlerFunc(mgr.etcdDiscoveryHandler) 51 | } 52 | 53 | func (mgr *pxeManagerT) etcdDiscoveryHandler(w http.ResponseWriter, r *http.Request) { 54 | http.Redirect(w, r, 55 | "https://github.com/giantswarm/mayu/blob/master/docs/etcd-discovery.md", 56 | http.StatusMovedPermanently, 57 | ) 58 | } 59 | 60 | func (mgr *pxeManagerT) etcdDiscoveryNewCluster(w http.ResponseWriter, r *http.Request) { 61 | var err error 62 | size := mgr.defaultEtcdQuorumSize 63 | s := r.FormValue("size") 64 | if s != "" { 65 | size, err = strconv.Atoi(s) 66 | if err != nil { 67 | mgr.httpError(w, err.Error(), http.StatusBadRequest) 68 | return 69 | } 70 | } 71 | 72 | token, err := mgr.cluster.GenerateEtcdDiscoveryToken() 73 | if err != nil { 74 | mgr.httpError(w, fmt.Sprintf("Unable to generate token '%v'", err), 400) 75 | return 76 | } 77 | 78 | err = mgr.cluster.StoreEtcdDiscoveryToken(mgr.etcdEndpoint, mgr.etcdCAFile, token, size) 79 | if err != nil { 80 | mgr.httpError(w, fmt.Sprintf("Unable to store token in etcd '%v'", err), 400) 81 | return 82 | } 83 | 84 | _ = mgr.logger.Log("level", "info", "message", fmt.Sprintf("New cluster created '%s'", token)) 85 | 86 | fmt.Fprintf(w, "%s/%s", mgr.etcdDiscoveryBaseURL(), token) 87 | } 88 | 89 | func (mgr *pxeManagerT) etcdDiscoveryBaseURL() string { 90 | return fmt.Sprintf("%s/etcd", mgr.apiURL()) 91 | } 92 | 93 | func (mgr *pxeManagerT) etcdDiscoveryProxyHandler(w http.ResponseWriter, r *http.Request) { 94 | resp, err := mgr.etcdDiscoveryProxyRequest(r) 95 | if err != nil { 96 | mgr.httpError(w, fmt.Sprintf("Error proxying request to etcd '%v'", err), 500) 97 | } 98 | 99 | copyHeader(w.Header(), resp.Header) 100 | w.WriteHeader(resp.StatusCode) 101 | _, _ = io.Copy(w, resp.Body) 102 | } 103 | 104 | func (mgr *pxeManagerT) etcdDiscoveryProxyRequest(r *http.Request) (*http.Response, error) { 105 | body, err := ioutil.ReadAll(r.Body) 106 | if err != nil { 107 | return nil, microerror.Mask(err) 108 | } 109 | u, err := url.Parse(mgr.etcdEndpoint) 110 | if err != nil { 111 | return nil, microerror.Mask(errors.New("invalid etcd-endpoint: " + err.Error())) 112 | } 113 | 114 | u.Path = path.Join("v2", "keys", "_etcd", "registry", strings.TrimPrefix(r.URL.Path, "/etcd")) 115 | u.RawQuery = r.URL.RawQuery 116 | var transport = http.DefaultTransport 117 | 118 | if u.Scheme == "https" && mgr.etcdCAFile != "" { 119 | customCA := x509.NewCertPool() 120 | 121 | pemData, err := ioutil.ReadFile(mgr.etcdCAFile) 122 | if err != nil { 123 | return nil, microerror.Mask(errors.New("unable to read custom CA file: " + err.Error())) 124 | } 125 | customCA.AppendCertsFromPEM(pemData) 126 | transport = &http.Transport{ 127 | TLSClientConfig: &tls.Config{ 128 | RootCAs: customCA, 129 | MinVersion: tls.VersionTLS12, 130 | }, 131 | } 132 | } 133 | 134 | for i := 0; i <= 10; i++ { // nolint 135 | buf := bytes.NewBuffer(body) 136 | _ = mgr.logger.Log("level", "info", "message", fmt.Sprintf("Body '%s'", body)) 137 | 138 | outreq, err := http.NewRequest(r.Method, u.String(), buf) 139 | if err != nil { 140 | return nil, microerror.Mask(err) 141 | } 142 | 143 | copyHeader(outreq.Header, r.Header) 144 | 145 | client := http.Client{Transport: transport} 146 | resp, err := client.Do(outreq) 147 | if err != nil { 148 | return nil, microerror.Mask(err) 149 | } 150 | 151 | return resp, nil // nolint 152 | } 153 | 154 | return nil, microerror.Mask(errors.New("all attempts at proxying to etcd failed")) 155 | } 156 | 157 | // copyHeader copies all of the headers from dst to src. 158 | func copyHeader(dst, src http.Header) { 159 | for k, v := range src { 160 | for _, q := range v { 161 | dst.Add(k, q) 162 | } 163 | } 164 | } 165 | -------------------------------------------------------------------------------- /pxemgr/filemanager.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "bytes" 5 | "encoding/base64" 6 | "fmt" 7 | "io/ioutil" 8 | "path" 9 | "text/template" 10 | 11 | "github.com/giantswarm/microerror" 12 | ) 13 | 14 | // Files is map[string]string for files that we fetched from disk and then filled with data. 15 | type Files map[string]string 16 | 17 | func (mgr *pxeManagerT) RenderFiles(ctx interface{}) (*Files, error) { 18 | files := Files{} 19 | dirList, err := ioutil.ReadDir(mgr.filesDir) 20 | if err != nil { 21 | _ = mgr.logger.Log("level", "error", "message", fmt.Sprintf("Failed to read files dir: %s", mgr.filesDir), "stack", err) 22 | return nil, microerror.Mask(err) 23 | } 24 | 25 | for _, dir := range dirList { 26 | fileList, err := ioutil.ReadDir(path.Join(mgr.filesDir, dir.Name())) 27 | if err != nil { 28 | _ = mgr.logger.Log("level", "error", "message", fmt.Sprintf("Failed to read dir: %s", path.Join(mgr.filesDir, dir.Name())), "stack", err) 29 | return nil, microerror.Mask(err) 30 | } 31 | 32 | for _, file := range fileList { 33 | tmpl, err := template.ParseFiles(path.Join(mgr.filesDir, dir.Name(), file.Name())) 34 | if err != nil { 35 | _ = mgr.logger.Log("level", "error", "message", fmt.Sprintf("Failed to file: %s", path.Join(mgr.filesDir, dir.Name(), file.Name())), "stack", err) 36 | return nil, microerror.Mask(err) 37 | } 38 | 39 | var data bytes.Buffer 40 | err = tmpl.Execute(&data, ctx) 41 | if err != nil { 42 | _ = mgr.logger.Log("level", "error", "message", fmt.Sprintf("Failed to execute tmpl for %s", path.Join(mgr.filesDir, dir.Name(), file.Name())), "stack", err) 43 | return nil, microerror.Mask(err) 44 | } 45 | 46 | files[dir.Name()+"/"+file.Name()] = base64.StdEncoding.EncodeToString(data.Bytes()) 47 | } 48 | } 49 | return &files, nil 50 | } 51 | -------------------------------------------------------------------------------- /pxemgr/ignition.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io" 8 | "io/ioutil" 9 | "os" 10 | "path" 11 | "path/filepath" 12 | "strings" 13 | "text/template" 14 | 15 | "github.com/giantswarm/microerror" 16 | "gopkg.in/yaml.v2" 17 | 18 | "github.com/giantswarm/mayu/hostmgr" 19 | ) 20 | 21 | func (mgr *pxeManagerT) WriteIgnitionConfig(host hostmgr.Host, wr io.Writer) error { 22 | etcdClusterToken := mgr.cluster.Config.DefaultEtcdClusterToken 23 | 24 | if host.EtcdClusterToken != "" { 25 | etcdClusterToken = host.EtcdClusterToken 26 | } 27 | 28 | mergedTemplatesEnv := mgr.config.TemplatesEnv 29 | for k, v := range host.Overrides { 30 | mergedTemplatesEnv[k] = v 31 | } 32 | 33 | ctx := struct { 34 | Host hostmgr.Host 35 | EtcdDiscoveryUrl string 36 | ClusterNetwork Network 37 | MayuHost string 38 | MayuPort int 39 | MayuURL string 40 | PostBootURL string 41 | NoTLS bool 42 | TemplatesEnv map[string]interface{} 43 | Files Files 44 | }{ 45 | Host: host, 46 | ClusterNetwork: mgr.config.Network, 47 | EtcdDiscoveryUrl: fmt.Sprintf("%s/%s", mgr.etcdDiscoveryUrl, etcdClusterToken), 48 | MayuHost: mgr.config.Network.BindAddr, 49 | MayuPort: mgr.apiPort, 50 | MayuURL: mgr.apiURL(), 51 | PostBootURL: mgr.apiURL() + "/admin/host/" + host.Serial + "/boot_complete", 52 | NoTLS: mgr.noTLS, 53 | TemplatesEnv: mergedTemplatesEnv, 54 | } 55 | 56 | files, err := mgr.RenderFiles(ctx) 57 | if err != nil { 58 | return microerror.Mask(err) 59 | } 60 | 61 | ctx.Files = *files 62 | tmpl, err := getTemplate(mgr.ignitionConfig, mgr.templateSnippets) 63 | if err != nil { 64 | return microerror.Mask(err) 65 | } 66 | 67 | var data bytes.Buffer 68 | if err = tmpl.Execute(&data, ctx); err != nil { 69 | return microerror.Mask(err) 70 | } 71 | ignitionJSON, err := convertTemplateToJSON(data.Bytes(), false) 72 | if err != nil { 73 | return microerror.Mask(err) 74 | } 75 | host.State = hostmgr.Installing 76 | fmt.Fprintln(wr, string(ignitionJSON[:])) 77 | return nil 78 | } 79 | 80 | var snippetsFiles []string 81 | 82 | func maybeInitSnippets(snippets string) { 83 | if snippetsFiles != nil { 84 | return 85 | } 86 | snippetsFiles = []string{} 87 | 88 | if len(snippets) > 0 { 89 | if _, err := os.Stat(snippets); err == nil { 90 | if fis, err := ioutil.ReadDir(snippets); err == nil { 91 | for _, fi := range fis { 92 | snippetsFiles = append(snippetsFiles, path.Join(snippets, fi.Name())) 93 | } 94 | } 95 | } 96 | } 97 | } 98 | 99 | func join(sep string, i []interface{}) string { 100 | var s []string 101 | for _, si := range i { 102 | s = append(s, si.(string)) 103 | } 104 | return strings.Join(s, sep) 105 | } 106 | 107 | func getTemplate(path, snippets string) (*template.Template, error) { 108 | maybeInitSnippets(snippets) 109 | templates := []string{path} 110 | templates = append(templates, snippetsFiles...) 111 | 112 | name := filepath.Base(path) 113 | tmpl := template.New(name) 114 | tmpl.Funcs(map[string]interface{}{ 115 | "join": join, 116 | }) 117 | 118 | var err error 119 | tmpl, err = tmpl.ParseFiles(templates...) 120 | if err != nil { 121 | return nil, microerror.Mask(err) 122 | } 123 | 124 | return tmpl, nil 125 | } 126 | 127 | func convertTemplateToJSON(dataIn []byte, pretty bool) ([]byte, error) { 128 | cfg := Config{} 129 | 130 | if err := yaml.Unmarshal(dataIn, &cfg); err != nil { 131 | return nil, microerror.Maskf(executionFailedError, "failed to unmarshal input: %v", err) 132 | } 133 | 134 | var ( 135 | dataOut []byte 136 | err error 137 | ) 138 | 139 | if pretty { 140 | dataOut, err = json.MarshalIndent(&cfg, "", " ") 141 | if err != nil { 142 | return nil, microerror.Maskf(executionFailedError, "failed to marshal output: %v", err) 143 | } 144 | dataOut = append(dataOut, '\n') 145 | } else { 146 | dataOut, err = json.Marshal(&cfg) 147 | if err != nil { 148 | return nil, microerror.Maskf(executionFailedError, "failed to marshal output: %v", err) 149 | } 150 | } 151 | 152 | return dataOut, nil 153 | } 154 | -------------------------------------------------------------------------------- /pxemgr/iputil.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "bytes" 5 | "net" 6 | ) 7 | 8 | func incIP(ip net.IP) net.IP { 9 | ip = ip.To4() 10 | numIP := uint32(ip[0])<<24 + uint32(ip[1])<<16 + uint32(ip[2])<<8 + uint32(ip[3]) 11 | numIP++ 12 | newIP := net.IPv4(byte(numIP>>24&0xff), byte(numIP>>16&0xff), byte(numIP>>8&0xff), byte(numIP&0xff)) 13 | 14 | if newIP.IsMulticast() { 15 | return incIP(newIP) 16 | } 17 | return newIP 18 | } 19 | 20 | // ip less or equal 21 | func ipLessThanOrEqual(ip net.IP, upperBound net.IP) bool { 22 | return bytes.Compare(ip, upperBound) <= 0 23 | } 24 | 25 | // ip less or equal 26 | func ipMoreThanOrEqual(ip net.IP, upperBound net.IP) bool { 27 | return bytes.Compare(ip, upperBound) >= 0 28 | } 29 | -------------------------------------------------------------------------------- /pxemgr/iputil_test.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "testing" 5 | 6 | "net" 7 | ) 8 | 9 | func TestIPUtil_IncIP(t *testing.T) { 10 | // Create test IPs. Each slice represents a test case. The first IP is the 11 | // input. The last IP is the expected output. 12 | IPs := [][]net.IP{ 13 | []net.IP{net.ParseIP("0.0.0.0"), net.ParseIP("0.0.0.1")}, 14 | []net.IP{net.ParseIP("0.0.0.1"), net.ParseIP("0.0.0.2")}, 15 | []net.IP{net.ParseIP("0.0.1.0"), net.ParseIP("0.0.1.1")}, 16 | []net.IP{net.ParseIP("3.0.0.0"), net.ParseIP("3.0.0.1")}, 17 | []net.IP{net.ParseIP("0.0.0.255"), net.ParseIP("0.0.1.0")}, 18 | []net.IP{net.ParseIP("0.0.255.255"), net.ParseIP("0.1.0.0")}, 19 | []net.IP{net.ParseIP("0.255.255.255"), net.ParseIP("1.0.0.0")}, 20 | []net.IP{net.ParseIP("0.0.255.0"), net.ParseIP("0.0.255.1")}, 21 | []net.IP{net.ParseIP("0.255.0.0"), net.ParseIP("0.255.0.1")}, 22 | []net.IP{net.ParseIP("255.0.0.0"), net.ParseIP("255.0.0.1")}, 23 | } 24 | 25 | for _, testCase := range IPs { 26 | input := testCase[0] 27 | expected := testCase[1] 28 | output := incIP(input) 29 | if output.String() != expected.String() { 30 | t.Fatalf("expected IP '%s' to be incremented to IP '%s', got IP '%s'", input, expected, output) 31 | } 32 | } 33 | } 34 | 35 | func TestIPUtil_IPLessThanOrEqual(t *testing.T) { 36 | // Create test IPs. Each slice represents a test case. The first IP is the 37 | // smaller one. The last IP is the bigger or equal one. 38 | IPs := [][]net.IP{ 39 | []net.IP{net.ParseIP("0.0.0.0"), net.ParseIP("0.0.0.1")}, 40 | []net.IP{net.ParseIP("0.0.0.0"), net.ParseIP("0.0.0.0")}, // equal 41 | []net.IP{net.ParseIP("0.0.0.1"), net.ParseIP("0.0.0.2")}, 42 | []net.IP{net.ParseIP("0.0.1.0"), net.ParseIP("0.0.1.1")}, 43 | []net.IP{net.ParseIP("0.0.1.0"), net.ParseIP("0.0.1.0")}, // equal 44 | []net.IP{net.ParseIP("3.0.0.0"), net.ParseIP("3.0.0.1")}, 45 | []net.IP{net.ParseIP("3.0.0.0"), net.ParseIP("3.0.0.0")}, // equal 46 | []net.IP{net.ParseIP("0.0.0.255"), net.ParseIP("0.0.1.0")}, 47 | []net.IP{net.ParseIP("0.0.255.255"), net.ParseIP("0.1.0.0")}, 48 | []net.IP{net.ParseIP("0.255.255.255"), net.ParseIP("1.0.0.0")}, 49 | []net.IP{net.ParseIP("0.255.255.255"), net.ParseIP("0.255.255.255")}, // equal 50 | []net.IP{net.ParseIP("0.0.255.0"), net.ParseIP("0.0.255.1")}, 51 | []net.IP{net.ParseIP("0.255.0.0"), net.ParseIP("0.255.0.1")}, 52 | []net.IP{net.ParseIP("255.0.0.0"), net.ParseIP("255.0.0.1")}, 53 | } 54 | 55 | for _, testCase := range IPs { 56 | smaller := testCase[0] 57 | biggerOrEqual := testCase[1] 58 | if !ipLessThanOrEqual(smaller, biggerOrEqual) { 59 | t.Fatalf("expected IP '%s' to be less then, or equal to IP '%s', but it was detected to be greater", smaller, biggerOrEqual) 60 | } 61 | } 62 | } 63 | 64 | func TestIPUtil_IPMoreThanOrEqual(t *testing.T) { 65 | // Create test IPs. Each slice represents a test case. The first IP is the 66 | // bigger one. The last IP is the smaller or equal one. 67 | IPs := [][]net.IP{ 68 | []net.IP{net.ParseIP("0.0.0.1"), net.ParseIP("0.0.0.0")}, 69 | []net.IP{net.ParseIP("0.0.0.0"), net.ParseIP("0.0.0.0")}, // equal 70 | []net.IP{net.ParseIP("0.0.0.2"), net.ParseIP("0.0.0.1")}, 71 | []net.IP{net.ParseIP("0.0.1.1"), net.ParseIP("0.0.1.0")}, 72 | []net.IP{net.ParseIP("0.0.1.0"), net.ParseIP("0.0.1.0")}, // equal 73 | []net.IP{net.ParseIP("3.0.0.1"), net.ParseIP("3.0.0.0")}, 74 | []net.IP{net.ParseIP("3.0.0.0"), net.ParseIP("3.0.0.0")}, // equal 75 | []net.IP{net.ParseIP("0.0.1.0"), net.ParseIP("0.0.0.255")}, 76 | []net.IP{net.ParseIP("0.1.0.0"), net.ParseIP("0.0.255.255")}, 77 | []net.IP{net.ParseIP("1.0.0.0"), net.ParseIP("0.255.255.255")}, 78 | []net.IP{net.ParseIP("0.255.255.255"), net.ParseIP("0.255.255.255")}, // equal 79 | []net.IP{net.ParseIP("0.0.255.1"), net.ParseIP("0.0.255.0")}, 80 | []net.IP{net.ParseIP("0.255.0.1"), net.ParseIP("0.255.0.0")}, 81 | []net.IP{net.ParseIP("255.0.0.1"), net.ParseIP("255.0.0.0")}, 82 | } 83 | 84 | for _, testCase := range IPs { 85 | bigger := testCase[0] 86 | smallerOrEqual := testCase[1] 87 | 88 | if !ipMoreThanOrEqual(bigger, smallerOrEqual) { 89 | t.Fatalf("expected IP '%s' to be bigger then, or equal to IP '%s', but it was detected to be smaller", bigger, smallerOrEqual) 90 | } 91 | } 92 | } 93 | -------------------------------------------------------------------------------- /pxemgr/key.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "os" 5 | "path" 6 | ) 7 | 8 | const ( 9 | vmlinuzFile = "flatcar_production_pxe.vmlinuz" 10 | initrdFile = "flatcar_production_pxe_image.cpio.gz" 11 | ) 12 | 13 | func (mgr *pxeManagerT) pxeKernelImage(flatcarVersion string) (*os.File, error) { 14 | return os.Open(path.Join(mgr.imagesCacheDir+"/"+flatcarVersion, vmlinuzFile)) 15 | } 16 | 17 | func (mgr *pxeManagerT) pxeInitRD(flatcarVersion string) (*os.File, error) { 18 | return os.Open(path.Join(mgr.imagesCacheDir+"/"+flatcarVersion, initrdFile)) 19 | } 20 | -------------------------------------------------------------------------------- /pxemgr/proc_utils_linux.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import "syscall" 4 | 5 | func genPlatformSysProcAttr() *syscall.SysProcAttr { 6 | return &syscall.SysProcAttr{Pdeathsig: 9} 7 | } 8 | -------------------------------------------------------------------------------- /pxemgr/pxemanager_test.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "net/http/httptest" 10 | "os" 11 | "path/filepath" 12 | "strings" 13 | "testing" 14 | 15 | "github.com/giantswarm/micrologger" 16 | 17 | "github.com/giantswarm/mayu-infopusher/machinedata" 18 | 19 | "github.com/giantswarm/mayu/hostmgr" 20 | ) 21 | 22 | const ( 23 | baseConfig = `default_flatcar_version: myversion 24 | network: 25 | primary_nic: 26 | ip_range: 27 | start: 1.1.1.1 28 | end: 1.1.1.2 29 | templates_env: 30 | mayu_https_endpoint: https://mayu 31 | http_proxy_enabled: true 32 | http_proxy: 33 | username: username 34 | password: password 35 | uri: uri 36 | port: 123 37 | no_proxy: 38 | - example.com 39 | - other.example.com 40 | ` 41 | configOK = baseConfig + ` update: "no_updates"` 42 | configErr = baseConfig + ` update: "update"` 43 | ignition = `ignition: 44 | version: 2.2.0 45 | systemd: 46 | {{if eq .TemplatesEnv.update "no_updates"}} 47 | units: 48 | - name: update-engine.service 49 | enabled: false 50 | mask: true{{end}} 51 | - name: docker.service 52 | dropins: 53 | - name: 40-docker.conf 54 | contents: | 55 | [Service] 56 | {{- if .TemplatesEnv.http_proxy_enabled }} 57 | {{- with .TemplatesEnv.http_proxy }} 58 | Environment="HTTP_PROXY=http://{{ .username }}:{{ .password }}@{{ .uri }}:{{ .port }}" 59 | Environment="HTTPS_PROXY=http://{{ .username }}:{{ .password }}@{{ .uri }}:{{ .port }}" 60 | {{- if .no_proxy }} 61 | Environment="NO_PROXY={{ join "," .no_proxy }}" 62 | {{- end }} 63 | {{- end }} 64 | {{- end }} 65 | ` 66 | ) 67 | 68 | type helper struct { 69 | dir string 70 | fakeEtcd *httptest.Server 71 | pxeCfg PXEManagerConfiguration 72 | req *http.Request 73 | w *httptest.ResponseRecorder 74 | cluster *hostmgr.Cluster 75 | } 76 | 77 | func setUp(t *testing.T) *helper { 78 | h := &helper{} 79 | 80 | var err error 81 | h.dir, err = ioutil.TempDir("", "pxmgr_cloudconfig_") 82 | if err != nil { 83 | t.Fatal(err) 84 | } 85 | 86 | if err := ioutil.WriteFile(filepath.Join(h.dir, "config_ok.yaml"), []byte(configOK), 0644); err != nil { // nolint 87 | t.Fatal(err) 88 | } 89 | if err := ioutil.WriteFile(filepath.Join(h.dir, "config_err.yaml"), []byte(configErr), 0644); err != nil { // nolint 90 | t.Fatal(err) 91 | } 92 | if err := ioutil.WriteFile(filepath.Join(h.dir, "ignition.yaml"), []byte(ignition), 0644); err != nil { // nolint 93 | t.Fatal(err) 94 | } 95 | if err := os.Mkdir(filepath.Join(h.dir, "files"), 0644); err != nil { 96 | t.Fatal(err) 97 | } 98 | 99 | logger, err := micrologger.New(micrologger.Config{}) 100 | if err != nil { 101 | t.Fatalf("failed to create logger cluster: %s", err) 102 | } 103 | 104 | h.cluster, err = hostmgr.NewCluster(h.dir, logger) 105 | if err != nil { 106 | t.Fatalf("creating cluster: %s", err) 107 | } 108 | 109 | h.fakeEtcd = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 110 | fmt.Fprintln(w, "{}") 111 | })) 112 | 113 | h.pxeCfg = PXEManagerConfiguration{ 114 | UseInternalEtcdDiscovery: true, 115 | NoTLS: true, 116 | // This port is declared only to allow PXEMAnager instantiation (APIPort and 117 | // PXEPort must be different), the server is not going to be started and we 118 | // are going to test the handler method directly 119 | APIPort: 4080, 120 | FilesDir: filepath.Join(h.dir, "files"), 121 | IgnitionConfig: filepath.Join(h.dir, "ignition.yaml"), 122 | EtcdEndpoint: h.fakeEtcd.URL, 123 | } 124 | 125 | hostData := machinedata.HostData{ 126 | Serial: "myserial", 127 | } 128 | b := new(bytes.Buffer) 129 | _ = json.NewEncoder(b).Encode(hostData) 130 | h.req = httptest.NewRequest("GET", "http://127.0.0.1:4080/ignition?serial=test1234", b) 131 | h.w = httptest.NewRecorder() 132 | 133 | return h 134 | } 135 | 136 | func tearDown(h *helper) { 137 | os.RemoveAll(h.dir) 138 | h.fakeEtcd.Close() 139 | } 140 | 141 | func TestFinalCloudConfigChecksErrorOk(t *testing.T) { 142 | h := setUp(t) 143 | defer tearDown(h) 144 | 145 | h.pxeCfg.ConfigFile = filepath.Join(h.dir, "config_ok.yaml") 146 | 147 | logger, err := micrologger.New(micrologger.Config{}) 148 | if err != nil { 149 | t.Fatalf("failed to create logger cluster: %s", err) 150 | } 151 | h.pxeCfg.Logger = logger 152 | 153 | // instantiate PXEManager (no need to start it) 154 | mgr, err := PXEManager(h.pxeCfg, h.cluster) 155 | if err != nil { 156 | t.Fatalf("unable to create a pxe manager: %s\n", err) 157 | } 158 | 159 | // call handler func and make assertions on the response recorder 160 | mgr.ignitionGenerator(h.w, h.req) 161 | 162 | if status := h.w.Code; status != http.StatusOK { 163 | t.Errorf("handler returned wrong status code: got %v want %v", 164 | status, http.StatusOK) 165 | } 166 | 167 | actual := h.w.Body.String() 168 | expected := `{"ignition":{"config":{},"security":{"tls":{}},"timeouts":{},"version":"2.2.0"},"networkd":{},"passwd":{},"storage":{},"systemd":{"units":[{"enabled":false,"mask":true,"name":"update-engine.service"},{"dropins":[{"contents":"[Service]\nEnvironment=\"HTTP_PROXY=http://username:password@uri:123\"\nEnvironment=\"HTTPS_PROXY=http://username:password@uri:123\"\nEnvironment=\"NO_PROXY=example.com,other.example.com\"\n","name":"40-docker.conf"}],"name":"docker.service"}]}} 169 | ` 170 | if actual != expected { 171 | t.Errorf("handler returned unexpected body: got %v want %v", 172 | actual, expected) 173 | } 174 | 175 | // make sure the template is complete 176 | if !strings.Contains(actual, "update-engine.service") { 177 | t.Errorf("response body contains incomplete template: %s", actual) 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /pxemgr/schema.go: -------------------------------------------------------------------------------- 1 | package pxemgr 2 | 3 | // This schema structure is based on github.com/coreos/ignition/config/v2_2/types/schema.go 4 | // Due to issue with unmarshalling embedded anonymous nested structures, 5 | // this file removes such structures. 6 | // Changed types: Directory, File, Link. 7 | type CaReference struct { 8 | Source string `json:"source,omitempty" yaml:"source,omitempty"` 9 | Verification Verification `json:"verification,omitempty" yaml:"verification,omitempty"` 10 | } 11 | type Config struct { 12 | Ignition Ignition `json:"ignition" yaml:"ignition,omitempty"` 13 | Networkd Networkd `json:"networkd,omitempty" yaml:"networkd,omitempty"` 14 | Passwd Passwd `json:"passwd,omitempty" yaml:"passwd,omitempty"` 15 | Storage Storage `json:"storage,omitempty" yaml:"storage,omitempty"` 16 | Systemd Systemd `json:"systemd,omitempty" yaml:"systemd,omitempty"` 17 | } 18 | type ConfigReference struct { 19 | Source string `json:"source,omitempty" yaml:"source,omitempty"` 20 | Verification Verification `json:"verification,omitempty" yaml:"verification,omitempty"` 21 | } 22 | type Create struct { 23 | Force bool `json:"force,omitempty" yaml:"force,omitempty"` 24 | Options []CreateOption `json:"options,omitempty" yaml:"options,omitempty"` 25 | } 26 | type CreateOption string 27 | type Device string 28 | type Directory struct { 29 | Filesystem string `json:"filesystem,omitempty" yaml:"filesystem,omitempty"` 30 | Group *NodeGroup `json:"group,omitempty" yaml:"group,omitempty"` 31 | Mode *int `json:"mode,omitempty" yaml:"mode,omitempty"` 32 | Overwrite *bool `json:"overwrite,omitempty" yaml:"overwrite,omitempty"` 33 | Path string `json:"path,omitempty" yaml:"path,omitempty"` 34 | User *NodeUser `json:"user,omitempty" yaml:"user,omitempty"` 35 | } 36 | type Disk struct { 37 | Device string `json:"device,omitempty" yaml:"device,omitempty"` 38 | Partitions []Partition `json:"partitions,omitempty" yaml:"partitions,omitempty"` 39 | WipeTable bool `json:"wipeTable,omitempty" yaml:"wipeTable,omitempty"` 40 | } 41 | type File struct { 42 | Append bool `json:"append,omitempty" yaml:"append,omitempty"` 43 | Contents FileContents `json:"contents,omitempty" yaml:"contents,omitempty"` 44 | Filesystem string `json:"filesystem,omitempty" yaml:"filesystem,omitempty"` 45 | Mode int `json:"mode,omitempty" yaml:"mode,omitempty"` 46 | Group *NodeGroup `json:"group,omitempty" yaml:"group,omitempty"` 47 | Overwrite *bool `json:"overwrite,omitempty" yaml:"overwrite,omitempty"` 48 | Path string `json:"path,omitempty" yaml:"path,omitempty"` 49 | User *NodeUser `json:"user,omitempty" yaml:"user,omitempty"` 50 | } 51 | type FileContents struct { 52 | Compression string `json:"compression,omitempty" yaml:"compression,omitempty"` 53 | Source string `json:"source,omitempty" yaml:"source,omitempty"` 54 | Verification Verification `json:"verification,omitempty" yaml:"verification,omitempty"` 55 | } 56 | type Filesystem struct { 57 | Mount *Mount `json:"mount,omitempty" yaml:"mount,omitempty"` 58 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 59 | Path *string `json:"path,omitempty" yaml:"path,omitempty"` 60 | } 61 | type Group string 62 | type Ignition struct { 63 | Config IgnitionConfig `json:"config,omitempty" yaml:"config,omitempty"` 64 | Security Security `json:"security,omitempty" yaml:"security,omitempty"` 65 | Timeouts Timeouts `json:"timeouts,omitempty" yaml:"timeouts,omitempty"` 66 | Version string `json:"version,omitempty" yaml:"version,omitempty"` 67 | } 68 | type IgnitionConfig struct { 69 | Append []ConfigReference `json:"append,omitempty" yaml:"append,omitempty"` 70 | Replace *ConfigReference `json:"replace,omitempty" yaml:"replace,omitempty"` 71 | } 72 | type Link struct { 73 | Filesystem string `json:"filesystem,omitempty" yaml:"filesystem,omitempty"` 74 | Group *NodeGroup `json:"group,omitempty" yaml:"group,omitempty"` 75 | Hard bool `json:"hard,omitempty" yaml:"hard,omitempty"` 76 | Overwrite *bool `json:"overwrite,omitempty" yaml:"overwrite,omitempty"` 77 | Path string `json:"path,omitempty" yaml:"path,omitempty"` 78 | Target string `json:"target,omitempty" yaml:"target,omitempty"` 79 | User *NodeUser `json:"user,omitempty" yaml:"user,omitempty"` 80 | } 81 | type Mount struct { 82 | Create *Create `json:"create,omitempty" yaml:"create,omitempty"` 83 | Device string `json:"device,omitempty" yaml:"device,omitempty"` 84 | Format string `json:"format,omitempty" yaml:"format,omitempty"` 85 | Label *string `json:"label,omitempty" yaml:"label,omitempty"` 86 | Options []MountOption `json:"options,omitempty" yaml:"options,omitempty"` 87 | UUID *string `json:"uuid,omitempty" yaml:"uuid,omitempty"` 88 | WipeFilesystem bool `json:"wipeFilesystem,omitempty" yaml:"wipeFilesystem,omitempty"` 89 | } 90 | type MountOption string 91 | type Networkd struct { 92 | Units []Networkdunit `json:"units,omitempty" yaml:"units,omitempty"` 93 | } 94 | type NetworkdDropin struct { 95 | Contents string `json:"contents,omitempty" yaml:"contents,omitempty"` 96 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 97 | } 98 | type Networkdunit struct { 99 | Contents string `json:"contents,omitempty" yaml:"contents,omitempty"` 100 | Dropins []NetworkdDropin `json:"dropins,omitempty" yaml:"dropins,omitempty"` 101 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 102 | } 103 | type Node struct { 104 | Filesystem string `json:"filesystem,omitempty" yaml:"filesystem,omitempty"` 105 | Group *NodeGroup `json:"group,omitempty" yaml:"group,omitempty"` 106 | Overwrite *bool `json:"overwrite,omitempty" yaml:"overwrite,omitempty"` 107 | Path string `json:"path,omitempty" yaml:"path,omitempty"` 108 | User *NodeUser `json:"user,omitempty" yaml:"user,omitempty"` 109 | } 110 | type NodeGroup struct { 111 | ID *int `json:"id,omitempty" yaml:"id,omitempty"` 112 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 113 | } 114 | type NodeUser struct { 115 | ID *int `json:"id,omitempty" yaml:"id,omitempty"` 116 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 117 | } 118 | type Partition struct { 119 | GUID string `json:"guid,omitempty" yaml:"guid,omitempty"` 120 | Label string `json:"label,omitempty" yaml:"label,omitempty"` 121 | Number int `json:"number,omitempty" yaml:"number,omitempty"` 122 | Size int `json:"size,omitempty" yaml:"size,omitempty"` 123 | Start int `json:"start,omitempty" yaml:"start,omitempty"` 124 | TypeGUID string `json:"typeGuid,omitempty" yaml:"typeGUID,omitempty"` 125 | } 126 | type Passwd struct { 127 | Groups []PasswdGroup `json:"groups,omitempty" yaml:"groups,omitempty"` 128 | Users []PasswdUser `json:"users,omitempty" yaml:"users,omitempty"` 129 | } 130 | type PasswdGroup struct { 131 | Gid *int `json:"gid,omitempty" yaml:"gid,omitempty"` 132 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 133 | PasswordHash string `json:"passwordHash,omitempty" yaml:"passwordHash,omitempty"` 134 | System bool `json:"system,omitempty" yaml:"system,omitempty"` 135 | } 136 | type PasswdUser struct { 137 | Create *Usercreate `json:"create,omitempty" yaml:"create,omitempty"` 138 | Gecos string `json:"gecos,omitempty" yaml:"gecos,omitempty"` 139 | Groups []Group `json:"groups,omitempty" yaml:"groups,omitempty"` 140 | HomeDir string `json:"homeDir,omitempty" yaml:"homeDir,omitempty"` 141 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 142 | NoCreateHome bool `json:"noCreateHome,omitempty" yaml:"noCreateHome,omitempty"` 143 | NoLogInit bool `json:"noLogInit,omitempty" yaml:"noLogInit,omitempty"` 144 | NoUserGroup bool `json:"noUserGroup,omitempty" yaml:"noUserGroup,omitempty"` 145 | PasswordHash *string `json:"passwordHash,omitempty" yaml:"passwordHash,omitempty"` 146 | PrimaryGroup string `json:"primaryGroup,omitempty" yaml:"primaryGroup,omitempty"` 147 | SSHAuthorizedKeys []SSHAuthorizedKey `json:"sshAuthorizedKeys,omitempty" yaml:"sshAuthorizedKeys,omitempty"` 148 | Shell string `json:"shell,omitempty" yaml:"shell,omitempty"` 149 | System bool `json:"system,omitempty" yaml:"system,omitempty"` 150 | UID *int `json:"uid,omitempty" yaml:"uid,omitempty"` 151 | } 152 | type Raid struct { 153 | Devices []Device `json:"devices,omitempty" yaml:"devices,omitempty"` 154 | Level string `json:"level,omitempty" yaml:"level,omitempty"` 155 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 156 | Options []RaidOption `json:"options,omitempty" yaml:"options,omitempty"` 157 | Spares int `json:"spares,omitempty" yaml:"spares,omitempty"` 158 | } 159 | type RaidOption string 160 | type SSHAuthorizedKey string 161 | type Security struct { 162 | TLS TLS `json:"tls,omitempty" yaml:"tls,omitempty"` 163 | } 164 | type Storage struct { 165 | Directories []Directory `json:"directories,omitempty" yaml:"directories,omitempty"` 166 | Disks []Disk `json:"disks,omitempty" yaml:"disks,omitempty"` 167 | Files []File `json:"files,omitempty" yaml:"files,omitempty"` 168 | Filesystems []Filesystem `json:"filesystems,omitempty" yaml:"filesystems,omitempty"` 169 | Links []Link `json:"links,omitempty" yaml:"links,omitempty"` 170 | Raid []Raid `json:"raid,omitempty" yaml:"raid,omitempty"` 171 | } 172 | type Systemd struct { 173 | Units []Unit `json:"units,omitempty" yaml:"units,omitempty"` 174 | } 175 | type SystemdDropin struct { 176 | Contents string `json:"contents,omitempty" yaml:"contents,omitempty"` 177 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 178 | } 179 | type TLS struct { 180 | CertificateAuthorities []CaReference `json:"certificateAuthorities,omitempty" yaml:"certificateAuthorities,omitempty"` 181 | } 182 | type Timeouts struct { 183 | HTTPResponseHeaders *int `json:"httpResponseHeaders,omitempty" yaml:"httpResponseHeaders,omitempty"` 184 | HTTPTotal *int `json:"httpTotal,omitempty" yaml:"httpTotal,omitempty"` 185 | } 186 | type Unit struct { 187 | Contents string `json:"contents,omitempty" yaml:"contents,omitempty"` 188 | Dropins []SystemdDropin `json:"dropins,omitempty" yaml:"dropins,omitempty"` 189 | Enable bool `json:"enable,omitempty" yaml:"enable,omitempty"` 190 | Enabled *bool `json:"enabled,omitempty" yaml:"enabled,omitempty"` 191 | Mask bool `json:"mask,omitempty" yaml:"mask,omitempty"` 192 | Name string `json:"name,omitempty" yaml:"name,omitempty"` 193 | } 194 | type Usercreate struct { 195 | Gecos string `json:"gecos,omitempty" yaml:"gecos,omitempty"` 196 | Groups []UsercreateGroup `json:"groups,omitempty" yaml:"groups,omitempty"` 197 | HomeDir string `json:"homeDir,omitempty" yaml:"homeDir,omitempty"` 198 | NoCreateHome bool `json:"noCreateHome,omitempty" yaml:"noCreateHome,omitempty"` 199 | NoLogInit bool `json:"noLogInit,omitempty" yaml:"noLogInit,omitempty"` 200 | NoUserGroup bool `json:"noUserGroup,omitempty" yaml:"noUserGroup,omitempty"` 201 | PrimaryGroup string `json:"primaryGroup,omitempty" yaml:"primaryGroup,omitempty"` 202 | Shell string `json:"shell,omitempty" yaml:"shell,omitempty"` 203 | System bool `json:"system,omitempty" yaml:"system,omitempty"` 204 | UID *int `json:"uid,omitempty" yaml:"uid,omitempty"` 205 | } 206 | type UsercreateGroup string 207 | type Verification struct { 208 | Hash *string `json:"hash,omitempty" yaml:"hash,omitempty"` 209 | } 210 | -------------------------------------------------------------------------------- /scripts/fetch-flatcar-image: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -eu 4 | 5 | : ${1?"Usage: $0 "} 6 | 7 | FLATCAR_VERSION=$1 8 | FLATCAR_CHANNEL=$2 9 | DEFAULT_IMAGES_DIR=$(pwd)/images 10 | IMAGES_DIR=${IMAGES_DIR:-$DEFAULT_IMAGES_DIR} 11 | KEEP_IMAGES=${KEEP_IMAGES:-"false"} 12 | 13 | if [ ${KEEP_IMAGES} = "false" ]; then 14 | # remove old images 15 | rm -rf ${IMAGES_DIR}/${FLATCAR_VERSION} 16 | fi 17 | 18 | mkdir -p ${IMAGES_DIR}/${FLATCAR_VERSION} 19 | 20 | if [ ! -f ${IMAGES_DIR}/${FLATCAR_VERSION}/keep ]; then 21 | wget -O ${IMAGES_DIR}/${FLATCAR_VERSION}/flatcar_production_image.bin.bz2 http://${FLATCAR_CHANNEL}.release.flatcar-linux.net/amd64-usr/${FLATCAR_VERSION}/flatcar_production_image.bin.bz2 && \ 22 | wget -O ${IMAGES_DIR}/${FLATCAR_VERSION}/flatcar_production_pxe.vmlinuz http://${FLATCAR_CHANNEL}.release.flatcar-linux.net/amd64-usr/${FLATCAR_VERSION}/flatcar_production_pxe.vmlinuz && \ 23 | wget -O ${IMAGES_DIR}/${FLATCAR_VERSION}/flatcar_pxe_image.cpio.gz http://${FLATCAR_CHANNEL}.release.flatcar-linux.net/amd64-usr/${FLATCAR_VERSION}/flatcar_production_pxe_image.cpio.gz 24 | echo "$FLATCAR_VERSION" > ${IMAGES_DIR}/${FLATCAR_VERSION}/flatcar-version 25 | 26 | mkdir -p ${IMAGES_DIR}/${FLATCAR_VERSION}/etc/systemd/system/ignition-disks.service.d/ 27 | cp inject_pxe/00-ignition-delay.conf ${IMAGES_DIR}/${FLATCAR_VERSION}/etc/systemd/system/ignition-disks.service.d/ 28 | 29 | docker run --net=host --rm -v ${IMAGES_DIR}/${FLATCAR_VERSION}:/usr/code/images \ 30 | alpine:3.8 /bin/sh -c "apk add cpio gzip && \ 31 | zcat /usr/code/images/flatcar_pxe_image.cpio.gz > /usr/code/images/flatcar_production_pxe_image.cpio && \ 32 | cd /usr/code/images && find etc | cpio -o -A -H newc -O flatcar_production_pxe_image.cpio && \ 33 | gzip -f flatcar_production_pxe_image.cpio && echo DONE" 34 | 35 | rm -rf ${IMAGES_DIR}/${FLATCAR_VERSION}/etc 36 | rm -f ${IMAGES_DIR}/${FLATCAR_VERSION}/flatcar_pxe_image.cpio.gz 37 | 38 | if [ ${KEEP_IMAGES} = "true" ]; then 39 | touch ${IMAGES_DIR}/${FLATCAR_VERSION}/keep 40 | fi 41 | 42 | else 43 | echo "Images for ${FLATCAR_VERSION} already exist on filesystem" 44 | fi 45 | -------------------------------------------------------------------------------- /scripts/fetch-flatcar-qemu-image: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -eu 4 | 5 | : ${1?"Usage: $0 "} 6 | 7 | FLATCAR_VERSION=$1 8 | FLATCAR_CHANNEL=${2:-stable} 9 | IMAGE_PATH=$(pwd)/images/qemu/${FLATCAR_VERSION} 10 | KERNEL=flatcar_production_qemu.vmlinuz 11 | USRFS=flatcar_production_qemu_usr_image.squashfs 12 | 13 | mkdir -p ${IMAGE_PATH} 14 | cd ${IMAGE_PATH} 15 | 16 | # remove old images 17 | rm -f coreos* 18 | 19 | wget http://${FLATCAR_CHANNEL}.release.flatcar-linux.net/amd64-usr/${FLATCAR_VERSION}/flatcar_production_pxe.vmlinuz 20 | wget http://${FLATCAR_CHANNEL}.release.flatcar-linux.net/amd64-usr/${FLATCAR_VERSION}/flatcar_production_pxe.vmlinuz.sig 21 | wget http://${FLATCAR_CHANNEL}.release.flatcar-linux.net/amd64-usr/${FLATCAR_VERSION}/flatcar_production_pxe_image.cpio.gz 22 | wget http://${FLATCAR_CHANNEL}.release.flatcar-linux.net/amd64-usr/${FLATCAR_VERSION}/flatcar_production_pxe_image.cpio.gz.sig 23 | echo "$FLATCAR_VERSION" > flatcar-version 24 | 25 | gpg --verify flatcar_production_pxe.vmlinuz.sig 26 | gpg --verify flatcar_production_pxe_image.cpio.gz.sig 27 | 28 | docker run --rm -v $IMAGE_PATH:/usr/code/images --net=host ubuntu:xenial /bin/bash -c "apt-get update -y && apt-get install cpio && \ 29 | zcat /usr/code/images/flatcar_production_pxe_image.cpio.gz | cpio -i --quiet --sparse usr.squashfs && mv usr.squashfs /usr/code/images/$USRFS" 30 | 31 | cp flatcar_production_pxe.vmlinuz $KERNEL 32 | 33 | sha256sum $KERNEL > $KERNEL.sha256 34 | sha256sum $USRFS > $USRFS.sha256 35 | 36 | cd - 37 | -------------------------------------------------------------------------------- /scripts/fetch-yochu-assets: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ETCD_VERSION=v3.0.3 4 | FLEET_VERSION=v0.11.5-gs-grpc-2rc1 5 | DOCKER_VERSION=1.10.3 6 | YOCHU_VERSION=0.20.1 7 | RKT_VERSION=v1.3.0 8 | K8S_VERSION=v1.4.4 9 | CALICO_VERSION=v0.22.0 10 | CALICO_CNI_VERSION=v1.4.2 11 | 12 | mkdir -p ./yochu 13 | 14 | if [ ! -d "./yochu/yochu/${YOCHU_VERSION}" ]; then 15 | mkdir -p ./yochu/yochu/${YOCHU_VERSION} 16 | wget -O ./yochu/yochu/${YOCHU_VERSION}/yochu https://downloads.giantswarm.io/yochu/${YOCHU_VERSION}/yochu 17 | fi 18 | 19 | if [ ! -d "./yochu/etcd/${ETCD_VERSION}" ]; then 20 | mkdir -p ./yochu/etcd/${ETCD_VERSION} 21 | wget -O ./yochu/etcd/${ETCD_VERSION}/etcd https://downloads.giantswarm.io/etcd/${ETCD_VERSION}/etcd 22 | wget -O ./yochu/etcd/${ETCD_VERSION}/etcdctl https://downloads.giantswarm.io/etcd/${ETCD_VERSION}/etcdctl 23 | fi 24 | 25 | if [ ! -d "./yochu/fleet/${FLEET_VERSION}" ]; then 26 | mkdir -p ./yochu/fleet/${FLEET_VERSION} 27 | wget -O ./yochu/fleet/${FLEET_VERSION}/fleetd https://downloads.giantswarm.io/fleet/${FLEET_VERSION}/fleetd 28 | wget -O ./yochu/fleet/${FLEET_VERSION}/fleetctl https://downloads.giantswarm.io/fleet/${FLEET_VERSION}/fleetctl 29 | fi 30 | 31 | if [ ! -d "./yochu/docker/${DOCKER_VERSION}" ]; then 32 | mkdir -p ./yochu/docker/${DOCKER_VERSION} 33 | wget -O ./yochu/docker/${DOCKER_VERSION}/docker https://downloads.giantswarm.io/docker/${DOCKER_VERSION}/docker 34 | fi 35 | 36 | if [ ! -d "./yochu/rkt/${RKT_VERSION}" ]; then 37 | mkdir -p ./yochu/rkt/${RKT_VERSION} 38 | wget -O ./yochu/rkt/${RKT_VERSION}/rkt https://downloads.giantswarm.io/rkt/${RKT_VERSION}/rkt 39 | fi 40 | 41 | if [ ! -d "./yochu/k8s/${K8S_VERSION}" ]; then 42 | mkdir -p ./yochu/k8s/${K8S_VERSION} 43 | wget -O ./yochu/k8s/${K8S_VERSION}/kubectl https://downloads.giantswarm.io/k8s/${K8S_VERSION}/kubectl 44 | fi 45 | 46 | if [ ! -d "./yochu/calico-cni/${CALICO_CNI_VERSION}" ]; then 47 | mkdir -p ./yochu/calico-cni/${CALICO_CNI_VERSION} 48 | wget -O ./yochu/calico-cni/${CALICO_CNI_VERSION}/calico https://downloads.giantswarm.io/calico-cni/${CALICO_CNI_VERSION}/calico 49 | wget -O ./yochu/calico-cni/${CALICO_CNI_VERSION}/calico-ipam https://downloads.giantswarm.io/calico-cni/${CALICO_CNI_VERSION}/calico-ipam 50 | fi 51 | 52 | if [ ! -d "./yochu/calicoctl/${CALICO_VERSION}" ]; then 53 | mkdir -p ./yochu/calicoctl/${CALICO_VERSION} 54 | wget -O ./yochu/calicoctl/${CALICO_VERSION}/calicoctl https://downloads.giantswarm.io/calicoctl/${CALICO_VERSION}/calicoctl 55 | fi 56 | -------------------------------------------------------------------------------- /scripts/mayu.init: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Source function library. 4 | . /etc/init.d/functions 5 | 6 | wd="/opt/giantswarm/mayu" 7 | exec="$wd/mayu" 8 | prog="mayu" 9 | 10 | [ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog 11 | 12 | lockfile=/var/lock/subsys/$prog 13 | 14 | start() { 15 | [[ -x $exec ]] || exit 5 16 | echo -n "Starting $prog: " 17 | cd $wd 18 | nohup $exec -v 12 -no-git >> /var/log/mayu.log 2>&1 & 19 | retval=$? 20 | echo 21 | [ $retval -eq 0 ] && touch $lockfile 22 | return $retval 23 | } 24 | 25 | stop() { 26 | echo -n "Shutting down $prog: " 27 | killproc $prog 28 | retval=$? 29 | echo 30 | [ $retval -eq 0 ] && rm -f $lockfile 31 | return $retval 32 | } 33 | 34 | case "$1" in 35 | start) 36 | start 37 | ;; 38 | stop) 39 | stop 40 | ;; 41 | status) 42 | status $prog 43 | ;; 44 | restart) 45 | stop 46 | start 47 | ;; 48 | reload) 49 | stop 50 | start 51 | ;; 52 | condrestart) 53 | [ -f $lockfile ] && restart 54 | ;; 55 | *) 56 | echo "Usage: mayu {start|stop|status|reload|restart}" 57 | exit 1 58 | ;; 59 | esac 60 | exit $? 61 | -------------------------------------------------------------------------------- /static_html/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | things 5 | 7 | 8 | 9 | 10 | 36 | 37 | 38 |
39 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 60 | 61 | 62 | 63 | 64 |
SerialInternalAddrStateLastPongFleetMetadata
{{host.Serial}}{{host.InternalAddr}}{{host.State}} 58 | 59 | {{host.FleetMetadata}}
65 |
66 | 67 | -------------------------------------------------------------------------------- /templates/dnsmasq_template.conf: -------------------------------------------------------------------------------- 1 | interface={{.Network.PXE.PxeInterface.InterfaceName}} 2 | listen-address={{.Network.BindAddr}} 3 | bind-interfaces 4 | except-interface=lo 5 | 6 | {{if .Network.PXE.PxeInterface.SubnetGateway}}dhcp-option=option:router,{{.Network.PXE.PxeInterface.SubnetGateway}} 7 | {{end}} 8 | dhcp-option=option:dns-server{{ range $dns := .Network.PrimaryNIC.DNS}},{{$dns}}{{end}} 9 | 10 | {{if .Network.PXE}}enable-tftp 11 | dhcp-range={{.Network.PXE.PxeInterface.IPRange.Start}},{{.Network.PXE.PxeInterface.IPRange.End}},1m 12 | tftp-root={{.Global.TFTPRoot}} 13 | dhcp-match=set:ipxe,175 14 | dhcp-vendorclass=set:pxe,PXEClient 15 | {{if .Network.UEFI}} 16 | dhcp-boot=tag:!ipxe,ipxe.efi 17 | {{else}} 18 | dhcp-boot=tag:!ipxe,undionly.kpxe 19 | {{end}} 20 | dhcp-boot=tag:ipxe,http://{{.Network.BindAddr}}:{{.Global.PXEPort}}/ipxebootscript 21 | {{end}} 22 | 23 | {{range $ignoredHost := .Network.IgnoredHosts}} 24 | dhcp-mac=installed,{{$ignoredHost}} 25 | {{end}} 26 | dhcp-ignore=tag:installed,tag:ipxe 27 | dhcp-ignore=tag:installed,tag:pxe 28 | dhcp-ignore=tag:installed 29 | 30 | {{range $staticHost := .Network.StaticHosts}} 31 | dhcp-host={{$staticHost.MacAddr}},{{$staticHost.IP}} 32 | {{end}} 33 | -------------------------------------------------------------------------------- /templates/ignition.yaml: -------------------------------------------------------------------------------- 1 | ignition: 2 | version: 2.2.0 3 | systemd: 4 | units: 5 | - name: etcd2.service 6 | enabled: false 7 | mask: true 8 | - name: fleet.service 9 | enabled: false 10 | mask: true 11 | - name: systemd-modules-load.service 12 | enabled: true 13 | - name: systemd-networkd-wait-online.service 14 | enabled: true 15 | - name: docker.service 16 | enabled: true 17 | - name: var-lib-docker.mount 18 | enabled: true 19 | contents: | 20 | [Unit] 21 | Description=Mount /dev/sda to /var/lib/docker 22 | Before=docker.service 23 | [Mount] 24 | What=/dev/sda 25 | Where=/var/lib/docker 26 | Type=xfs 27 | [Install] 28 | WantedBy=multi-user.target 29 | - name: docker.service 30 | dropins: 31 | - name: 40-docker.conf 32 | contents: | 33 | [Unit] 34 | Requires=docker.socket var-lib-docker.mount 35 | After=docker.socket var-lib-docker.mount 36 | [Service] 37 | Environment="DOCKER_OPTS=--iptables=true --storage-driver=overlay --log-opt max-size=50m --log-opt max-file=2 --live-restore --userland-proxy=false --icc=false --disable-legacy-registry=true {{index .TemplatesEnv "docker_args"}}" 38 | Environment="DOCKER_OPT_BIP=" 39 | Environment="DOCKER_OPT_IPMASQ=" 40 | Environment="DOCKER_OPT_MTU=" 41 | {{- if .TemplatesEnv.http_proxy_enabled }} 42 | {{- with .TemplatesEnv.http_proxy }} 43 | Environment="HTTP_PROXY=http://{{ .username }}:{{ .password }}@{{ .uri }}:{{ .port }}" 44 | Environment="HTTPS_PROXY=http://{{ .username }}:{{ .password }}@{{ .uri }}:{{ .port }}" 45 | {{- if .no_proxy }} 46 | Environment="NO_PROXY={{ join "," .no_proxy }}" 47 | {{- end }} 48 | {{- end }} 49 | {{- end }} 50 | - name: ensure-connectivity.service 51 | enabled: true 52 | contents: | 53 | [Unit] 54 | Description=Ensures theres connectivity before starting etcd 55 | After=systemd-networkd.service 56 | Wants=systemd-networkd.service 57 | Before=etcd3.service 58 | 59 | [Service] 60 | Type=oneshot 61 | ExecStart=/bin/sh -c 'while [[ -z $(curl -k -s {{index .TemplatesEnv "mayu_https_endpoint"}}) ]] ; do sleep 2 ; done' 62 | RemainAfterExit=yes 63 | 64 | [Install] 65 | WantedBy=multi-user.target 66 | - name: finish-install.service 67 | enabled: true 68 | contents: | 69 | [Unit] 70 | Description=finish mayu installation 71 | Before=etcd3.service 72 | 73 | [Service] 74 | Type=oneshot 75 | ExecStartPre=/usr/bin/mkdir -p /opt/bin 76 | ExecStartPre=/bin/sh -c 'while [[ -z $(curl -k -s {{index .TemplatesEnv "mayu_https_endpoint"}}) ]] ; do sleep 2 ; done' 77 | ExecStart=/usr/bin/docker run --privileged=true -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt -v /etc/os-release:/etc/os-release -v /etc/mayu-env:/etc/mayu-env -v /dev:/dev -v /sys:/sys --net=host quay.io/giantswarm/mayuctl:latest boot-complete --host={{index .TemplatesEnv "mayu_api_ip"}} --port={{.MayuPort}} {{if .NoTLS}}--no-tls{{end}} --update-versions 78 | RemainAfterExit=yes 79 | 80 | [Install] 81 | WantedBy=multi-user.target 82 | - name: etcd3.service 83 | command: start 84 | enabled: true 85 | contents: | 86 | [Unit] 87 | Description=etcd 88 | Requires=docker.service 89 | 90 | [Service] 91 | Restart=always 92 | RestartSec=10s 93 | EnvironmentFile=/etc/environment 94 | Environment=IMAGE=quay.io/coreos/etcd:v3.2.7 95 | Environment=NAME=%p.service 96 | ExecStartPre=-/usr/bin/docker stop $NAME 97 | ExecStartPre=-/usr/bin/docker rm $NAME 98 | ExecStartPre=-/usr/bin/docker pull $IMAGE 99 | ExecStart=/usr/bin/docker run \ 100 | -v /etc/ssl/certs/ca-certificates.crt:/etc/ssl/certs/ca-certificates.crt \ 101 | -v /var/lib/etcd:/var/lib/etcd \ 102 | --net=host \ 103 | --name $NAME \ 104 | $IMAGE \ 105 | etcd \ 106 | --name {{.Host.Serial}} 107 | --advertise-client-urls https//{{.Host.InternalAddr}}:2379 \ 108 | --listen-client-urls http://0.0.0.0:2379 \ 109 | --listen-peer-urls http://{{.Host.InternalAddr}}:2380 \ 110 | --initial-advertise-peer-urls http://{{.Host.InternalAddr}}:2380\ 111 | --discovery={{.EtcdDiscoveryUrl}} \ 112 | --data-dir=/var/lib/etcd \ 113 | --enable-v2 114 | 115 | [Install] 116 | WantedBy=multi-user.target 117 | {{if eq .Host.Profile "core"}} 118 | {{template "extra_units" .}} 119 | {{end}} 120 | 121 | {{if eq .ClusterNetwork.PrimaryNIC.Model.Type "bond"}}{{template "net_bond" .}}{{end}} 122 | {{if eq .ClusterNetwork.PrimaryNIC.Model.Type "singlenic"}}{{template "net_singlenic" .}}{{end}} 123 | {{if eq .ClusterNetwork.ExtraNICs}}{{template "extra_nics" .}}{{end}} 124 | 125 | storage: 126 | filesystems: 127 | - mount: 128 | device: "/dev/sda" 129 | format: "xfs" 130 | label: docker 131 | wipeFileSystem: true 132 | files: 133 | - filesystem: root 134 | path: /etc/hostname 135 | mode: 420 136 | user: 137 | id: 0 138 | group: 0 139 | contents: 140 | source: "data:text/plain;charset=utf-8,{{if eq .Host.Profile "core"}}master{{else}}worker{{end}}-{{.Host.Hostname}}" 141 | - filesystem: root 142 | path: /etc/mayu-env 143 | mode: 420 144 | user: 145 | id: 0 146 | group: 0 147 | contents: 148 | source: "data:text/plain;charset=utf-8,SERIAL={{.Host.Serial}}" 149 | 150 | {{if eq .ClusterNetwork.NetworkModel "bond"}} 151 | - filesystem: root 152 | path: /etc/modprobe.d/bonding.conf 153 | mode: 420 154 | user: 155 | id: 0 156 | group: 0 157 | contents: 158 | source: "data:text/plain;charset=utf-8;base64,{{ index .Files "conf/lacp-bonding.conf" }}" 159 | - filesystem: root 160 | path: /etc/modules-load.d/bonding.conf 161 | mode: 420 162 | user: 163 | id: 0 164 | group: 0 165 | contents: 166 | source: "data:text/plain;charset=utf-8;base64,{{ index .Files "conf/module-bonding.conf" }}" 167 | {{end}} 168 | 169 | {{if eq .Host.Profile "core"}} 170 | {{template "extra_files" .}} 171 | {{end}} 172 | 173 | passwd: 174 | users: 175 | {{ range $index, $user := (index .TemplatesEnv "users")}} - name: {{ $user.Name }} 176 | create: 177 | shell: "/bin/bash" 178 | groups: 179 | - "sudo" 180 | - "docker" 181 | sshauthorizedkeys: 182 | - "{{ $user.Key }}" 183 | {{end}} 184 | -------------------------------------------------------------------------------- /templates/snippets/extra.yaml: -------------------------------------------------------------------------------- 1 | {{define "extra_units"}} 2 | - name: extra_unit.service 3 | enabled: true 4 | contents: | 5 | [Unit] 6 | Description=extraUnit 7 | Requires=docker.service 8 | After=docker.service 9 | 10 | [Service] 11 | EnvironmentFile=/etc/environment 12 | Type=oneshot 13 | RemainAfterExit=yes 14 | ExecStart=/usr/bin/bash -c 'echo "extra work" >> /extra-file' 15 | [Install] 16 | WantedBy=multi-user.target 17 | {{end} 18 | {{define "extra_files"}} 19 | - filesystem: root 20 | path: /etc/my-service/config.conf 21 | mode: 420 22 | user: 23 | id: 0 24 | group: 0 25 | contents: "data:text/plain;charset=utf-8;base64,{{ index .Files "my-service/my-service.conf" }}" 26 | {{end}} 27 | -------------------------------------------------------------------------------- /templates/snippets/extra_nics.yaml: -------------------------------------------------------------------------------- 1 | {{define "extra_nics"}} 2 | {{ range $i, $nic := .ClusterNetwork.ExtraNICs }} 3 | - name: 80-{{$nic.InterfaceName}}.network 4 | contents: | 5 | [Match] 6 | Name={{$nic.InterfaceName}} 7 | 8 | [Network] 9 | Address={{index .Host.AdditionalAddrs "$nic.InterfaceName"}}/{{$nic.SubnetSize}} 10 | {{ range $server := $nic.DNS }}DNS={{ $server }} 11 | {{ end }} 12 | 13 | {{ range $r := $nic.Routes }} 14 | [Route] 15 | Destination={{ $r.DestinationCIDR }} 16 | Gateway={{ $r.RouteHop }} 17 | GatewayOnlink=yes 18 | {{ end }} 19 | 20 | # This is ugly workaround to prevent address removal from interface even for static IP. 21 | # https://github.com/systemd/systemd/blob/v238/src/network/networkd-link.c#L2599 22 | [DHCP] 23 | CriticalConnection=yes 24 | {{end}} 25 | {{end}} 26 | 27 | -------------------------------------------------------------------------------- /templates/snippets/net_bond.yaml: -------------------------------------------------------------------------------- 1 | {{define "net_bond"}} 2 | networkd: 3 | units: 4 | - name: 10-bond0-slave.network 5 | contents: | 6 | [Match] 7 | Name={{ or .ClusterNetwork.PrimaryNIC.Model.BondInterfaceMatch "en*" }} 8 | 9 | [Network] 10 | Bond=bond0 11 | 12 | - name: 20-bond0.network 13 | contents: | 14 | [Match] 15 | Name=bond0 16 | 17 | [Network] 18 | DHCP=none 19 | VLAN=bond0.3 20 | VLAN=bond0.4 21 | 22 | - name: 15-bond0.netdev 23 | contents: | 24 | [NetDev] 25 | Name=bond0 26 | Kind=bond 27 | 28 | [Bond] 29 | Mode=802.3ad 30 | LACPTransmitRate=fast 31 | MIIMonitorSec=1s 32 | UpDelaySec=3s 33 | DownDelaySec=9s 34 | 35 | - name: 35-bond0.3.netdev 36 | contents: | 37 | [NetDev] 38 | Name=bond0.3 39 | Kind=vlan 40 | 41 | [VLAN] 42 | Id=3 43 | 44 | - name: 35-bond0.4.netdev 45 | contents: | 46 | [NetDev] 47 | Name=bond0.4 48 | Kind=vlan 49 | 50 | [VLAN] 51 | Id=4 52 | 53 | - name: 40-vlan.network 54 | contents: | 55 | [Match] 56 | Name=bond0.3 57 | 58 | [Network] 59 | Address={{.Host.InternalAddr}}/{{.ClusterNetwork.PrimaryNIC.SubnetSize}} 60 | Gateway={{{{.ClusterNetwork.PrimaryNIC.SubnetGateway}}}} 61 | {{ range $server := .ClusterNetwork.PrimaryNIC.DNS }}DNS={{ $server }} 62 | {{ end }} 63 | 64 | {{end}} 65 | -------------------------------------------------------------------------------- /templates/snippets/net_singlenic.yaml: -------------------------------------------------------------------------------- 1 | {{define "net_singlenic"}} 2 | networkd: 3 | units: 4 | - name: 10-nodhcp.network 5 | contents: | 6 | [Match] 7 | Name=* 8 | 9 | [Network] 10 | DHCP=no 11 | - name: 00-{{.ClusterNetwork.PrimaryNIC.InterfaceName}}.network 12 | contents: | 13 | [Match] 14 | Name={{.ClusterNetwork.PrimaryNIC.InterfaceName}} 15 | 16 | [Network] 17 | Address={{.Host.InternalAddr}}/{{.ClusterNetwork.PrimaryNIC.SubnetSize}} 18 | Gateway={{.ClusterNetwork.PrimaryNIC.SubnetGateway}} 19 | {{ range $server := .ClusterNetwork.PrimaryNIC.DNS }}DNS={{ $server }} 20 | {{ end }} 21 | {{ range $server := .ClusterNetwork.NTP }}NTP={{ $server }} 22 | {{ end }} 23 | 24 | {{ range $r := .ClusterNetwork.PrimaryNIC.Routes }} 25 | [Route] 26 | Destination={{ $r.DestinationCIDR }} 27 | Gateway={{ $r.RouteHop }} 28 | GatewayOnlink=yes 29 | {{ end }} 30 | {{end}} 31 | 32 | -------------------------------------------------------------------------------- /tftproot/ipxe.efi: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/giantswarm/mayu/6356e9ab27dde9c4832476101e22b857856af464/tftproot/ipxe.efi -------------------------------------------------------------------------------- /tftproot/undionly.kpxe: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/giantswarm/mayu/6356e9ab27dde9c4832476101e22b857856af464/tftproot/undionly.kpxe -------------------------------------------------------------------------------- /version.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import "fmt" 4 | 5 | func printVersion() { 6 | fmt.Println("mayu version", projectVersion) 7 | } 8 | --------------------------------------------------------------------------------