├── .codespell-whitelist ├── .github ├── CODEOWNERS ├── dependabot.yml ├── mergify.yml ├── settings.yml └── workflows │ ├── ci.yaml │ └── tags.yaml ├── .gitignore ├── .golangci.yml ├── .yamlfmt ├── .yamllint ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── RELEASING.md ├── Tools.mk ├── api ├── v1alpha1 │ ├── doc.go │ ├── groupversion_info.go │ ├── hardware_methods.go │ ├── hardware_test.go │ ├── hardware_types.go │ ├── template_methods.go │ ├── template_test.go │ ├── template_types.go │ ├── workflow_methods.go │ ├── workflow_test.go │ ├── workflow_types.go │ └── zz_generated.deepcopy.go └── v1alpha2 │ ├── conditions.go │ ├── groupversion_info.go │ ├── hardware.go │ ├── osie.go │ ├── template.go │ ├── workflow.go │ └── zz_generated.deepcopy.go ├── buf.gen.yaml ├── buf.lock ├── buf.yaml ├── ci-checks.sh ├── cmd ├── tink-agent │ ├── Dockerfile │ └── main.go ├── tink-controller-v1alpha2 │ ├── Dockerfile │ └── main.go ├── tink-controller │ ├── Dockerfile │ └── main.go ├── tink-server │ ├── Dockerfile │ └── main.go ├── tink-worker │ ├── Dockerfile │ ├── cmd │ │ └── root.go │ ├── main.go │ └── worker │ │ ├── container_manager.go │ │ ├── container_manager_test.go │ │ ├── log_capturer.go │ │ ├── log_capturer_test.go │ │ ├── registry.go │ │ ├── registry_test.go │ │ └── worker.go └── virtual-worker │ ├── Dockerfile │ ├── cmd │ └── root.go │ ├── main.go │ └── worker │ ├── container_manager.go │ └── log_capturer.go ├── codecov.yml ├── config ├── crd │ ├── bases │ │ ├── tinkerbell.org_hardware.yaml │ │ ├── tinkerbell.org_osies.yaml │ │ ├── tinkerbell.org_templates.yaml │ │ └── tinkerbell.org_workflows.yaml │ ├── examples │ │ ├── hardware.yaml │ │ ├── template.yaml │ │ └── workflow.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_config_patch.yaml │ └── namespace.yaml ├── manager-rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── server-rbac │ ├── kustomization.yaml │ ├── role.yaml │ ├── role_binding.yaml │ └── service_account.yaml ├── server │ ├── kustomization.yaml │ └── server.yaml └── tink-controller-v1alpha2 │ ├── crd_patch.json │ └── kustomization.yaml ├── contrib └── tag-release.sh ├── docs ├── DCO.md ├── Template.md └── Workflow.md ├── go.mod ├── go.sum ├── hack └── boilerplate │ └── boilerplate.generatego.txt ├── internal ├── agent │ ├── agent.go │ ├── agent_test.go │ ├── event │ │ ├── action.go │ │ ├── error.go │ │ ├── event.go │ │ ├── fake.go │ │ ├── mock.go │ │ ├── workflow.go │ │ └── zz_from_package.go │ ├── failure │ │ └── reason.go │ ├── mock.go │ ├── run.go │ ├── runtime.go │ ├── runtime │ │ ├── MACOS_TESTING.md │ │ ├── action_failure.go │ │ ├── docker.go │ │ ├── docker_test.go │ │ ├── fake.go │ │ ├── internal │ │ │ ├── failure_files.go │ │ │ └── failure_files_test.go │ │ └── runtime.go │ ├── transport.go │ ├── transport │ │ ├── fake.go │ │ ├── file.go │ │ ├── file_test.go │ │ ├── grpc.go │ │ ├── grpc_test.go │ │ ├── handler.go │ │ ├── mock.go │ │ ├── testdata │ │ │ └── workflow.yml │ │ └── transport.go │ └── workflow │ │ └── workflow.go ├── cli │ └── agent.go ├── client │ └── client.go ├── deprecated │ ├── controller │ │ └── manager.go │ └── workflow │ │ ├── convert.go │ │ ├── convert_test.go │ │ ├── hardware.go │ │ ├── job.go │ │ ├── job_test.go │ │ ├── journal │ │ ├── journal.go │ │ └── journal_test.go │ │ ├── post.go │ │ ├── pre.go │ │ ├── pre_test.go │ │ ├── reconciler.go │ │ ├── reconciler_test.go │ │ ├── template_funcs.go │ │ ├── template_validator.go │ │ ├── template_validator_test.go │ │ └── types.go ├── e2e │ ├── e2e_test.go │ ├── testdata │ │ ├── 01 │ │ │ ├── hardware.yaml │ │ │ ├── template.yaml │ │ │ └── workflow.yaml │ │ └── 02 │ │ │ ├── hardware1.yaml │ │ │ ├── template1.yaml │ │ │ ├── template2.yaml │ │ │ ├── template3.yaml │ │ │ ├── workflow1.yaml │ │ │ ├── workflow2.yaml │ │ │ └── workflow3.yaml │ └── tink_suite_test.go ├── grpcserver │ └── grpc_server.go ├── hardware │ ├── admission.go │ ├── admission_conditional.go │ ├── admission_ip.go │ ├── admission_mac.go │ ├── admission_test.go │ ├── duplicate.go │ └── internal │ │ └── index.go ├── httpserver │ └── http_server.go ├── proto │ ├── doc.go │ ├── workflow.pb.go │ ├── workflow.proto │ ├── workflow │ │ └── v2 │ │ │ ├── mock.go │ │ │ ├── workflow.pb.go │ │ │ ├── workflow.proto │ │ │ └── workflow_grpc.pb.go │ └── workflow_grpc.pb.go ├── ptr │ └── ptr.go ├── server │ ├── index.go │ ├── index_test.go │ ├── kubernetes_api.go │ ├── kubernetes_api_test.go │ └── kubernetes_api_workflow.go ├── testtime │ ├── frozen_time.go │ └── frozen_time_test.go └── workflow │ ├── internal │ ├── reconcile.go │ ├── reconcile_test.go │ └── template.go │ ├── reconciler.go │ └── reconciler_test.go └── shell.nix /.codespell-whitelist: -------------------------------------------------------------------------------- 1 | alls 2 | ba 3 | cas 4 | eventtypes 5 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | /.github/settings.yml @chrisdoherty4 @jacobweinstock 2 | /.github/CODEOWNERS @chrisdoherty4 @jacobweinstock 3 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | day: "monday" 8 | time: "04:39" 9 | timezone: "America/New_York" 10 | reviewers: 11 | - chrisdoherty4 12 | - jacobweinstock 13 | open-pull-requests-limit: 10 14 | 15 | - package-ecosystem: "gomod" 16 | directory: "/" 17 | schedule: 18 | interval: "weekly" 19 | day: "friday" 20 | time: "03:52" 21 | timezone: "America/New_York" 22 | reviewers: 23 | - chrisdoherty4 24 | - jacobweinstock 25 | open-pull-requests-limit: 20 26 | 27 | - package-ecosystem: "docker" 28 | directory: "/" 29 | schedule: 30 | interval: "weekly" 31 | day: "monday" 32 | time: "04:22" 33 | timezone: "America/New_York" 34 | reviewers: 35 | - chrisdoherty4 36 | - jacobweinstock 37 | open-pull-requests-limit: 10 38 | -------------------------------------------------------------------------------- /.github/mergify.yml: -------------------------------------------------------------------------------- 1 | # The `check-success` should be specifying the final job in the CI workflow assuming it depends 2 | # directly or indirectly on all other jobs passing. 3 | 4 | shared: 5 | # Rules applicable to both queueing and merge requests. 6 | compulsory: &compulsory 7 | 8 | # Ensure the minimal CI checks have passed. 9 | - check-success=DCO 10 | - check-success=Package (quay.io/tinkerbell/tink, tink-server) 11 | - check-success=Package (quay.io/tinkerbell/tink-controller, tink-controller) 12 | - check-success=Package (quay.io/tinkerbell/tink-worker, tink-worker) 13 | 14 | # Ensure we're targetting the default branch. 15 | - base=main 16 | 17 | # Ensure we have adequete reviews. 18 | - "#approved-reviews-by>=1" 19 | - "#changes-requested-reviews-by=0" 20 | 21 | # Ensure we aren't being explicitly blocked with a label. 22 | - label!=do-not-merge 23 | 24 | queue_rules: 25 | - name: default 26 | queue_conditions: 27 | - and: *compulsory 28 | - label=ready-to-merge 29 | merge_conditions: 30 | - and: *compulsory 31 | 32 | merge_method: merge 33 | commit_message_template: | 34 | {{ title }} (#{{ number }}) 35 | 36 | {{ body }} 37 | 38 | pull_request_rules: 39 | - name: refactored queue action rule 40 | conditions: [] 41 | actions: 42 | queue: 43 | -------------------------------------------------------------------------------- /.github/settings.yml: -------------------------------------------------------------------------------- 1 | # Collaborators: give specific users access to this repository. 2 | # See https://docs.github.com/en/rest/reference/repos#add-a-repository-collaborator for available options 3 | collaborators: 4 | # Maintainers, should also be added to the .github/CODEOWNERS file as owners of this settings.yml file. 5 | - username: chrisdoherty4 6 | permission: maintain 7 | - username: jacobweinstock 8 | permission: maintain 9 | # Approvers 10 | - username: displague 11 | permission: push 12 | # Reviewers 13 | 14 | # Note: `permission` is only valid on organization-owned repositories. 15 | # The permission to grant the collaborator. Can be one of: 16 | # * `pull` - can pull, but not push to or administer this repository. 17 | # * `push` - can pull and push, but not administer this repository. 18 | # * `admin` - can pull, push and administer this repository. 19 | # * `maintain` - Recommended for project managers who need to manage the repository without access to sensitive or destructive actions. 20 | # * `triage` - Recommended for contributors who need to proactively manage issues and pull requests without write access. 21 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | name: Tink 2 | on: 3 | push: 4 | branches: 5 | - "*" 6 | tags-ignore: 7 | - "v*" 8 | pull_request: {} 9 | workflow_dispatch: {} 10 | env: 11 | CGO_ENABLED: "0" 12 | GO_VERSION: "1.22" 13 | jobs: 14 | verify: 15 | name: Verify 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v4 19 | - uses: actions/setup-go@v5 20 | with: 21 | go-version: "${{ env.GO_VERSION }}" 22 | - run: make verify 23 | test: 24 | name: Test 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v4 28 | with: 29 | fetch-depth: 5 30 | 31 | - uses: actions/setup-go@v5 32 | with: 33 | go-version: "${{ env.GO_VERSION }}" 34 | - name: Run unit tests 35 | run: make test 36 | 37 | - name: Run e2e tests 38 | run: make e2e-test 39 | 40 | - name: Upload codecov 41 | uses: codecov/codecov-action@v5 42 | env: 43 | CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} 44 | 45 | checks: 46 | name: CI Checks 47 | runs-on: ubuntu-latest 48 | steps: 49 | - uses: actions/checkout@v4 50 | 51 | - name: Install nix 52 | uses: cachix/install-nix-action@v30 53 | with: 54 | nix_path: nixpkgs=channel:nixos-unstable 55 | 56 | - name: Run ci-checks.sh 57 | run: nix-shell --run 'make ci-checks' 58 | 59 | # We preemptively build the binaries for efficiency instead of waiting on unit tests to pass 60 | # hence this doesn't depend on anything. 61 | build: 62 | name: Build 63 | runs-on: ubuntu-latest 64 | strategy: 65 | matrix: 66 | platform: [amd64, arm64] 67 | steps: 68 | - uses: actions/checkout@v4 69 | 70 | - uses: actions/setup-go@v5 71 | with: 72 | go-version: "${{ env.GO_VERSION }}" 73 | - name: Build linux/${{ matrix.platform }} 74 | run: make build -j$(nproc) GOOS=linux GOARCH=${{ matrix.platform }} 75 | 76 | - uses: actions/upload-artifact@v4 77 | with: 78 | name: ${{ matrix.platform }}-binaries 79 | path: bin/* 80 | 81 | package: 82 | name: Package 83 | runs-on: ubuntu-latest 84 | needs: 85 | - verify 86 | - test 87 | - checks 88 | - build 89 | strategy: 90 | matrix: 91 | include: 92 | - repository: quay.io/tinkerbell/tink 93 | binary: tink-server 94 | - repository: quay.io/tinkerbell/tink-worker 95 | binary: tink-worker 96 | - repository: quay.io/tinkerbell/tink-controller 97 | binary: tink-controller 98 | steps: 99 | - name: Create docker image tags 100 | id: docker-image-tag 101 | run: echo ::set-output name=tags::${{ matrix.repository }}:latest,${{ matrix.repository }}:sha-${GITHUB_SHA::8} 102 | 103 | - uses: actions/checkout@v4 104 | 105 | - name: Login to quay.io 106 | uses: docker/login-action@v3 107 | if: ${{ startsWith(github.ref, 'refs/heads/main') }} 108 | with: 109 | registry: quay.io 110 | username: ${{ secrets.QUAY_USERNAME }} 111 | password: ${{ secrets.QUAY_PASSWORD }} 112 | 113 | - name: Set up Docker Buildx 114 | uses: docker/setup-buildx-action@v3 115 | 116 | - name: Download all binaries 117 | uses: actions/download-artifact@v4 118 | with: 119 | path: ./bin 120 | merge-multiple: true 121 | 122 | # Artifact upload doesn't preserve permissions so we need to fix them before use in 123 | # the Dockerfiles. 124 | - name: Fix permissions 125 | run: chmod +x bin/* 126 | - name: Build ${{ matrix.repository }} and push 127 | 128 | uses: docker/build-push-action@v6 129 | with: 130 | context: . 131 | file: cmd/${{ matrix.binary }}/Dockerfile 132 | cache-from: type=registry,ref=${{ matrix.repository }}:latest 133 | push: ${{ startsWith(github.ref, 'refs/heads/main') }} 134 | tags: ${{ steps.docker-image-tag.outputs.tags }} 135 | platforms: linux/amd64,linux/arm64 136 | -------------------------------------------------------------------------------- /.github/workflows/tags.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | tags: 4 | - "v*" 5 | name: Release 6 | env: 7 | REGISTRY: quay.io 8 | IMAGE_NAME: ${{ github.repository }} 9 | jobs: 10 | release: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Generate release notes 15 | run: | 16 | release_notes=$(gh api repos/{owner}/{repo}/releases/generate-notes -F tag_name=${{ github.ref }} --jq .body) 17 | echo 'RELEASE_NOTES<> $GITHUB_ENV 18 | echo "${release_notes}" >> $GITHUB_ENV 19 | echo 'EOF' >> $GITHUB_ENV 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | OWNER: ${{ github.repository_owner }} 23 | REPO: ${{ github.event.repository.name }} 24 | - name: Generate Docker image metadata 25 | id: meta 26 | uses: docker/metadata-action@v5 27 | with: 28 | images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }} 29 | flavor: latest=false 30 | tags: type=ref,event=tag 31 | github-token: ${{ secrets.GITHUB_TOKEN }} 32 | - name: Set the FROM_TAG variable 33 | run: echo "FROM_TAG=sha-${GITHUB_SHA::8}" >> $GITHUB_ENV 34 | - name: Create tink-server image 35 | run: skopeo copy --all --dest-creds="${DST_REG_USER}":"${DST_REG_PASS}" docker://"${SRC_IMAGE}" docker://"${DST_IMAGE}" 36 | env: 37 | SRC_IMAGE: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ env.FROM_TAG }} 38 | DST_IMAGE: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }} 39 | DST_REG_USER: ${{ secrets.QUAY_USERNAME }} 40 | DST_REG_PASS: ${{ secrets.QUAY_PASSWORD }} 41 | - name: Create tink-controller image 42 | run: skopeo copy --all --dest-creds="${DST_REG_USER}":"${DST_REG_PASS}" docker://"${SRC_IMAGE}" docker://"${DST_IMAGE}" 43 | env: 44 | SRC_IMAGE: ${{ env.REGISTRY }}/tinkerbell/tink-controller:${{ env.FROM_TAG }} 45 | DST_IMAGE: ${{ env.REGISTRY }}/tinkerbell/tink-controller:${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }} 46 | DST_REG_USER: ${{ secrets.QUAY_USERNAME }} 47 | DST_REG_PASS: ${{ secrets.QUAY_PASSWORD }} 48 | - name: Create tink-worker image 49 | run: skopeo copy --all --dest-creds="${DST_REG_USER}":"${DST_REG_PASS}" docker://"${SRC_IMAGE}" docker://"${DST_IMAGE}" 50 | env: 51 | SRC_IMAGE: ${{ env.REGISTRY }}/tinkerbell/tink-worker:${{ env.FROM_TAG }} 52 | DST_IMAGE: ${{ env.REGISTRY }}/tinkerbell/tink-worker:${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }} 53 | DST_REG_USER: ${{ secrets.QUAY_USERNAME }} 54 | DST_REG_PASS: ${{ secrets.QUAY_PASSWORD }} 55 | - name: Create release 56 | uses: actions/create-release@v1 57 | env: 58 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 59 | with: 60 | tag_name: ${{ github.ref }} 61 | release_name: ${{ github.ref }} 62 | body: ${{ env.RELEASE_NOTES }} 63 | draft: false 64 | prerelease: true 65 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/ 2 | certs/ 3 | cmd/tink-controller/tink-controller 4 | cmd/tink-server/tink-server 5 | cmd/tink-worker/tink-worker 6 | doc/ 7 | .idea 8 | .vscode 9 | coverage.txt 10 | 11 | # Terraform 12 | .terraform 13 | terraform.tfstate 14 | terraform.tfstate.backup 15 | 16 | # Vagrant 17 | **/.vagrant 18 | envrc 19 | .env 20 | deploy/state 21 | out/ 22 | 23 | .*.swp 24 | hack/tools 25 | 26 | # test worker files 27 | tests/worker 28 | tmp/ 29 | _tmp/ -------------------------------------------------------------------------------- /.yamlfmt: -------------------------------------------------------------------------------- 1 | exclude: 2 | - .github/** 3 | -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | extends: default 2 | 3 | rules: 4 | braces: 5 | max-spaces-inside: 1 6 | brackets: 7 | max-spaces-inside: 1 8 | comments: disable 9 | comments-indentation: disable 10 | document-start: disable 11 | line-length: 12 | level: warning 13 | max: 160 14 | allow-non-breakable-inline-mappings: true 15 | truthy: disable 16 | indentation: 17 | indent-sequences: whatever 18 | 19 | ignore: | 20 | out 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tinkerbell 2 | 3 | [![Build Status](https://github.com/tinkerbell/tink/actions/workflows/ci.yaml/badge.svg)](https://github.com/tinkerbell/tink/actions/workflows/ci.yaml) 4 | [![codecov](https://codecov.io/gh/tinkerbell/tink/branch/main/graph/badge.svg)](https://codecov.io/gh/tinkerbell/tink) 5 | [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4512/badge)](https://bestpractices.coreinfrastructure.org/projects/4512) 6 | 7 | ## License 8 | 9 | Tinkerbell is licensed under the Apache License, Version 2.0. See [LICENSE](./LICENSE) for the full license text. Some of the projects used by the Tinkerbell project may be governed by a different license, please refer to its specific license. 10 | 11 | Tinkerbell is part of the CNCF Projects. 12 | 13 | [![CNCF Landscape](https://img.shields.io/badge/CNCF%20Landscape-5699C6)](https://landscape.cncf.io/?item=provisioning--automation-configuration--tinkerbell) 14 | 15 | 16 | ## Community 17 | 18 | The Tinkerbell community meets bi-weekly on Tuesday. The meeting details can be found [here][7]. 19 | 20 | Community Resources: 21 | 22 | - [CNCF #tinkerbell](https://app.slack.com/client/T08PSQ7BQ/C01SRB41GMT) 23 | - [YouTube Channel (demos, meeting recordings, virtual meetups)](https://www.youtube.com/channel/UCTzWInTQPvzH21KHS8jrq7A/featured) 24 | 25 | ## What's Powering Tinkerbell? 26 | 27 | The Tinkerbell stack consists of several microservices, and a gRPC API: 28 | 29 | ### Tink 30 | 31 | [Tink][1] is the short-hand name for the tink-server and tink-worker. 32 | `tink-worker` and `tink-server` communicate over gRPC, and are responsible for processing workflows. 33 | The CLI is the user-interactive piece for creating workflows and their building blocks, templates and hardware data. 34 | 35 | ### Smee 36 | 37 | [Smee][2] is Tinkerbell's DHCP server. 38 | It handles DHCP requests, hands out IPs, and serves up iPXE. 39 | It uses the Tinkerbell client to pull and push hardware data. 40 | It only responds to a predefined set of MAC addresses so it can be deployed in an existing network without interfering with existing DHCP infrastructure. 41 | 42 | ### Hegel 43 | 44 | [Hegel][3] is the metadata service used by Tinkerbell and OSIE. 45 | It collects data from both and transforms it into a JSON format to be consumed as metadata. 46 | 47 | ### OSIE 48 | 49 | [OSIE][4] is Tinkerbell's default an in-memory installation environment for bare metal. 50 | It installs operating systems and handles deprovisioning. 51 | 52 | ### Hook 53 | 54 | [Hook][5] is the newly introduced alternative to OSIE. 55 | It's the next iteration of the in-memory installation environment to handle operating system installation and deprovisioning. 56 | 57 | ### PBnJ 58 | 59 | [PBnJ][6] is an optional microservice that can communicate with baseboard management controllers (BMCs) to control power and boot settings. 60 | 61 | ## Building 62 | 63 | Use `make help`. 64 | The most interesting targets are `make all` (or just `make`) and `make images`. 65 | `make all` builds all the binaries for your host OS and CPU to enable running directly. 66 | `make images` will build all the binaries for Linux/x86_64 and build docker images with them. 67 | 68 | ## Configuring OpenTelemetry 69 | 70 | Rather than adding a bunch of command line options or a config file, OpenTelemetry 71 | is configured via environment variables. The most relevant ones are below, for others 72 | see https://github.com/equinix-labs/otel-init-go 73 | 74 | Currently this is just for tracing, metrics needs to be discussed with the community. 75 | 76 | | Env Variable | Required | Default | 77 | | ----------------------------- | -------- | --------- | 78 | | `OTEL_EXPORTER_OTLP_ENDPOINT` | n | localhost | 79 | | `OTEL_EXPORTER_OTLP_INSECURE` | n | false | 80 | | `OTEL_LOG_LEVEL` | n | info | 81 | 82 | To work with a local [opentelemetry-collector](https://github.com/open-telemetry/opentelemetry-collector), 83 | try the following. For examples of how to set up the collector to relay to various services 84 | take a look at [otel-cli](https://github.com/packethost/otel-cli) 85 | 86 | ``` 87 | export OTEL_EXPORTER_OTLP_ENDPOINT=localhost:4317 88 | export OTEL_EXPORTER_OTLP_INSECURE=true 89 | ./cmd/tink-server/tink-server 90 | ``` 91 | 92 | ## Website 93 | 94 | For complete documentation, please visit the Tinkerbell project hosted at [tinkerbell.org](https://tinkerbell.org). 95 | 96 | [1]: https://github.com/tinkerbell/tink 97 | [2]: https://github.com/tinkerbell/smee 98 | [3]: https://github.com/tinkerbell/hegel 99 | [4]: https://github.com/tinkerbell/osie 100 | [5]: https://github.com/tinkerbell/hook 101 | [6]: https://github.com/tinkerbell/pbnj 102 | [7]: https://docs.google.com/document/d/1Hmqrhj2rPjZ5W0DvRynFNY2cJq6jFCbNOc4p26U5Dgg/edit?usp=sharing 103 | -------------------------------------------------------------------------------- /RELEASING.md: -------------------------------------------------------------------------------- 1 | # Releasing 2 | 3 | ## Process 4 | 5 | For version v0.x.y: 6 | 7 | 1. Create the annotated tag 8 | > NOTE: To use your GPG signature when pushing the tag, use `SIGN_TAG=1 ./contrib/tag-release.sh v0.x.y` instead) 9 | - `./contrib/tag-release.sh v0.x.y` 10 | 1. Push the tag to the GitHub repository. This will automatically trigger a [Github Action](https://github.com/tinkerbell/tink/actions) to create a release. 11 | > NOTE: `origin` should be the name of the remote pointing to `github.com/tinkerbell/tink` 12 | - `git push origin v0.x.y` 13 | 1. Review the release on GitHub. 14 | 15 | ### Permissions 16 | 17 | Releasing requires a particular set of permissions. 18 | 19 | - Tag push access to the GitHub repository 20 | -------------------------------------------------------------------------------- /api/v1alpha1/doc.go: -------------------------------------------------------------------------------- 1 | // Package v1alpha1 contains API Schema definitions for the Tinkerbell v1alpha1 API group 2 | package v1alpha1 3 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // +kubebuilder:object:generate=true 2 | // +groupName=tinkerbell.org 3 | 4 | package v1alpha1 5 | 6 | import ( 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | "sigs.k8s.io/controller-runtime/pkg/scheme" 9 | ) 10 | 11 | var ( 12 | // GroupVersion is group version used to register these objects. 13 | GroupVersion = schema.GroupVersion{Group: "tinkerbell.org", Version: "v1alpha1"} 14 | 15 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme. 16 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 17 | 18 | // AddToScheme adds the types in this group-version to the given scheme. 19 | AddToScheme = SchemeBuilder.AddToScheme 20 | ) 21 | -------------------------------------------------------------------------------- /api/v1alpha1/hardware_methods.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | const ( 4 | // HardwareIDAnnotation is used by the controller to store the 5 | // ID assigned to the hardware by Tinkerbell for migrated hardware. 6 | HardwareIDAnnotation = "hardware.tinkerbell.org/id" 7 | ) 8 | 9 | // TinkID returns the Tinkerbell ID associated with this Hardware. 10 | func (h *Hardware) TinkID() string { 11 | return h.Annotations[HardwareIDAnnotation] 12 | } 13 | 14 | // SetTinkID sets the Tinkerbell ID associated with this Hardware. 15 | func (h *Hardware) SetTinkID(id string) { 16 | if h.Annotations == nil { 17 | h.Annotations = make(map[string]string) 18 | } 19 | h.Annotations[HardwareIDAnnotation] = id 20 | } 21 | -------------------------------------------------------------------------------- /api/v1alpha1/hardware_test.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | "testing" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | func TestHardwareTinkID(t *testing.T) { 10 | id := "d2c26e20-97e0-449c-b665-61efa7373f47" 11 | cases := []struct { 12 | name string 13 | input *Hardware 14 | want string 15 | overwrite string 16 | }{ 17 | { 18 | "Already set", 19 | &Hardware{ 20 | ObjectMeta: metav1.ObjectMeta{ 21 | Name: "debian", 22 | Namespace: "default", 23 | Annotations: map[string]string{ 24 | HardwareIDAnnotation: id, 25 | }, 26 | }, 27 | }, 28 | id, 29 | "", 30 | }, 31 | { 32 | "nil annotations", 33 | &Hardware{ 34 | ObjectMeta: metav1.ObjectMeta{ 35 | Name: "debian", 36 | Namespace: "default", 37 | Annotations: nil, 38 | }, 39 | }, 40 | "", 41 | "abc", 42 | }, 43 | } 44 | for _, tc := range cases { 45 | t.Run(tc.name, func(t *testing.T) { 46 | if tc.input.TinkID() != tc.want { 47 | t.Errorf("Got unexpected ID: got %v, wanted %v", tc.input.TinkID(), tc.want) 48 | } 49 | 50 | tc.input.SetTinkID(tc.overwrite) 51 | 52 | if tc.input.TinkID() != tc.overwrite { 53 | t.Errorf("Got unexpected ID: got %v, wanted %v", tc.input.TinkID(), tc.overwrite) 54 | } 55 | }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /api/v1alpha1/template_methods.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | const ( 4 | // TemplateIDAnnotation is used by the controller to store the 5 | // ID assigned to the template by Tinkerbell for migrated templates. 6 | TemplateIDAnnotation = "template.tinkerbell.org/id" 7 | ) 8 | 9 | // TinkID returns the Tinkerbell ID associated with this Template. 10 | func (t *Template) TinkID() string { 11 | return t.Annotations[TemplateIDAnnotation] 12 | } 13 | 14 | // SetTinkID sets the Tinkerbell ID associated with this Template. 15 | func (t *Template) SetTinkID(id string) { 16 | if t.Annotations == nil { 17 | t.Annotations = make(map[string]string) 18 | } 19 | t.Annotations[TemplateIDAnnotation] = id 20 | } 21 | -------------------------------------------------------------------------------- /api/v1alpha1/template_test.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | "testing" 5 | 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | ) 8 | 9 | func TestTemplateTinkID(t *testing.T) { 10 | id := "d2c26e20-97e0-449c-b665-61efa7373f47" 11 | cases := []struct { 12 | name string 13 | input *Template 14 | want string 15 | overwrite string 16 | }{ 17 | { 18 | "Already set", 19 | &Template{ 20 | ObjectMeta: metav1.ObjectMeta{ 21 | Name: "debian", 22 | Namespace: "default", 23 | Annotations: map[string]string{ 24 | TemplateIDAnnotation: id, 25 | }, 26 | }, 27 | }, 28 | id, 29 | "", 30 | }, 31 | { 32 | "nil annotations", 33 | &Template{ 34 | ObjectMeta: metav1.ObjectMeta{ 35 | Name: "debian", 36 | Namespace: "default", 37 | Annotations: nil, 38 | }, 39 | }, 40 | "", 41 | "abc", 42 | }, 43 | } 44 | for _, tc := range cases { 45 | t.Run(tc.name, func(t *testing.T) { 46 | if tc.input.TinkID() != tc.want { 47 | t.Errorf("Got unexpected ID: got %v, wanted %v", tc.input.TinkID(), tc.want) 48 | } 49 | 50 | tc.input.SetTinkID(tc.overwrite) 51 | 52 | if tc.input.TinkID() != tc.overwrite { 53 | t.Errorf("Got unexpected ID: got %v, wanted %v", tc.input.TinkID(), tc.overwrite) 54 | } 55 | }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /api/v1alpha1/template_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // TemplateState represents the template state. 8 | type TemplateState string 9 | 10 | const ( 11 | // TemplateError represents a template that is in an error state. 12 | TemplateError = TemplateState("Error") 13 | 14 | // TemplateReady represents a template that is in a ready state. 15 | TemplateReady = TemplateState("Ready") 16 | ) 17 | 18 | // TemplateSpec defines the desired state of Template. 19 | type TemplateSpec struct { 20 | // +optional 21 | Data *string `json:"data,omitempty"` 22 | } 23 | 24 | // TemplateStatus defines the observed state of Template. 25 | type TemplateStatus struct { 26 | State TemplateState `json:"state,omitempty"` 27 | } 28 | 29 | // +kubebuilder:subresource:status 30 | // +kubebuilder:object:root=true 31 | // +kubebuilder:resource:path=templates,scope=Namespaced,categories=tinkerbell,shortName=tpl,singular=template 32 | // +kubebuilder:storageversion 33 | // +kubebuilder:printcolumn:JSONPath=".status.state",name=State,type=string 34 | 35 | // Template is the Schema for the Templates API. 36 | type Template struct { 37 | metav1.TypeMeta `json:",inline"` 38 | metav1.ObjectMeta `json:"metadata,omitempty"` 39 | 40 | Spec TemplateSpec `json:"spec,omitempty"` 41 | Status TemplateStatus `json:"status,omitempty"` 42 | } 43 | 44 | // +kubebuilder:object:root=true 45 | 46 | // TemplateList contains a list of Templates. 47 | type TemplateList struct { 48 | metav1.TypeMeta `json:",inline"` 49 | metav1.ListMeta `json:"metadata,omitempty"` 50 | Items []Template `json:"items"` 51 | } 52 | 53 | func init() { 54 | SchemeBuilder.Register(&Template{}, &TemplateList{}) 55 | } 56 | -------------------------------------------------------------------------------- /api/v1alpha1/workflow_methods.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | const ( 8 | // WorkflowIDAnnotation is used by the controller to store the 9 | // ID assigned to the workflow by Tinkerbell for migrated workflows. 10 | WorkflowIDAnnotation = "workflow.tinkerbell.org/id" 11 | ) 12 | 13 | // TinkID returns the Tinkerbell ID associated with this Workflow. 14 | func (w *Workflow) TinkID() string { 15 | return w.Annotations[WorkflowIDAnnotation] 16 | } 17 | 18 | // SetTinkID sets the Tinkerbell ID associated with this Workflow. 19 | func (w *Workflow) SetTinkID(id string) { 20 | if w.Annotations == nil { 21 | w.Annotations = make(map[string]string) 22 | } 23 | w.Annotations[WorkflowIDAnnotation] = id 24 | } 25 | 26 | // GetStartTime returns the start time, for the first action of the first task. 27 | func (w *Workflow) GetStartTime() *metav1.Time { 28 | if len(w.Status.Tasks) > 0 { 29 | if len(w.Status.Tasks[0].Actions) > 0 { 30 | return w.Status.Tasks[0].Actions[0].StartedAt 31 | } 32 | } 33 | return nil 34 | } 35 | 36 | type taskInfo struct { 37 | CurrentWorker string 38 | CurrentTask string 39 | CurrentTaskIndex int 40 | CurrentAction string 41 | CurrentActionIndex int 42 | CurrentActionState WorkflowState 43 | TotalNumberOfActions int 44 | } 45 | 46 | // helper function for task info. 47 | func (w *Workflow) getTaskActionInfo() taskInfo { 48 | var ( 49 | found bool 50 | taskIndex = -1 51 | actionIndex int 52 | actionTaskIndex int 53 | actionCount int 54 | ) 55 | for ti, task := range w.Status.Tasks { 56 | actionCount += len(task.Actions) 57 | if found { 58 | continue 59 | } 60 | INNER: 61 | for ai, action := range task.Actions { 62 | // Find the first non-successful action 63 | switch action.Status { //nolint:exhaustive // WorkflowStateWaiting is only used in Workflows not Actions. 64 | case WorkflowStateSuccess: 65 | actionIndex++ 66 | continue 67 | case WorkflowStatePending, WorkflowStateRunning, WorkflowStateFailed, WorkflowStateTimeout: 68 | taskIndex = ti 69 | actionTaskIndex = ai 70 | found = true 71 | break INNER 72 | } 73 | } 74 | } 75 | 76 | ti := taskInfo{ 77 | TotalNumberOfActions: actionCount, 78 | CurrentActionIndex: actionIndex, 79 | } 80 | if taskIndex >= 0 { 81 | ti.CurrentWorker = w.Status.Tasks[taskIndex].WorkerAddr 82 | ti.CurrentTask = w.Status.Tasks[taskIndex].Name 83 | ti.CurrentTaskIndex = taskIndex 84 | } 85 | if taskIndex >= 0 && actionIndex >= 0 { 86 | ti.CurrentAction = w.Status.Tasks[taskIndex].Actions[actionTaskIndex].Name 87 | ti.CurrentActionState = w.Status.Tasks[taskIndex].Actions[actionTaskIndex].Status 88 | } 89 | 90 | return ti 91 | } 92 | 93 | func (w *Workflow) GetCurrentWorker() string { 94 | return w.getTaskActionInfo().CurrentWorker 95 | } 96 | 97 | func (w *Workflow) GetCurrentTask() string { 98 | return w.getTaskActionInfo().CurrentTask 99 | } 100 | 101 | func (w *Workflow) GetCurrentTaskIndex() int { 102 | return w.getTaskActionInfo().CurrentTaskIndex 103 | } 104 | 105 | func (w *Workflow) GetCurrentAction() string { 106 | return w.getTaskActionInfo().CurrentAction 107 | } 108 | 109 | func (w *Workflow) GetCurrentActionIndex() int { 110 | return w.getTaskActionInfo().CurrentActionIndex 111 | } 112 | 113 | func (w *Workflow) GetCurrentActionState() WorkflowState { 114 | return w.getTaskActionInfo().CurrentActionState 115 | } 116 | 117 | func (w *Workflow) GetTotalNumberOfActions() int { 118 | return w.getTaskActionInfo().TotalNumberOfActions 119 | } 120 | -------------------------------------------------------------------------------- /api/v1alpha2/conditions.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 4 | 5 | // ConditionType identifies the type of condition. 6 | type ConditionType string 7 | 8 | // ConditionStatus expresses the current state of the condition. 9 | type ConditionStatus string 10 | 11 | const ( 12 | // ConditionStatusUnknown is the default status and indicates the condition cannot be 13 | // evaluated as True or False. 14 | ConditionStatusUnknown ConditionStatus = "Unknown" 15 | 16 | // ConditionStatusTrue indicates the condition has been evaluated as true. 17 | ConditionStatusTrue ConditionStatus = "True" 18 | 19 | // ConditionStatusFalse indicates the condition has been evaluated as false. 20 | ConditionStatusFalse ConditionStatus = "False" 21 | ) 22 | 23 | // Condition defines an observation on a resource that is generally attainable by inspecting 24 | // other status fields. 25 | type Condition struct { 26 | // Type of condition. 27 | Type ConditionType `json:"type"` 28 | 29 | // Status of the condition. 30 | Status ConditionStatus `json:"status"` 31 | 32 | // LastTransition is the last time the condition transitioned from one status to another. 33 | LastTransition metav1.Time `json:"lastTransitionTime"` 34 | 35 | // Reason is a short CamelCase description for the conditions last transition. 36 | // +optional 37 | Reason *string `json:"reason,omitempty"` 38 | 39 | // Message is a human readable message indicating details about the last transition. 40 | // +optional 41 | Message *string `json:"message,omitempty"` 42 | } 43 | 44 | // Conditions define a list of observations of a particular resource. 45 | type Conditions []Condition 46 | -------------------------------------------------------------------------------- /api/v1alpha2/groupversion_info.go: -------------------------------------------------------------------------------- 1 | // +groupName=tinkerbell.org 2 | // +kubebuilder:object:generate=true 3 | 4 | package v1alpha2 5 | 6 | import ( 7 | "k8s.io/apimachinery/pkg/runtime/schema" 8 | "sigs.k8s.io/controller-runtime/pkg/scheme" 9 | ) 10 | 11 | var ( 12 | // GroupVersion is group version used to register these objects. 13 | GroupVersion = schema.GroupVersion{Group: "tinkerbell.org", Version: "v1alpha2"} 14 | 15 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme. 16 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 17 | 18 | // AddToScheme adds the types in this group-version to the given scheme. 19 | AddToScheme = SchemeBuilder.AddToScheme 20 | ) 21 | -------------------------------------------------------------------------------- /api/v1alpha2/osie.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 4 | 5 | type OSIESpec struct { 6 | // KernelURL is a URL to a kernel image. 7 | KernelURL string `json:"kernelUrl,omitempty"` 8 | 9 | // InitrdURL is a URL to an initrd image. 10 | InitrdURL string `json:"initrdUrl,omitempty"` 11 | } 12 | 13 | // +kubebuilder:object:root=true 14 | // +kubebuilder:storageversion 15 | // +kubebuilder:resource:categories=tinkerbell 16 | 17 | // OSIE describes an Operating System Installation Environment. It is used by Tinkerbell 18 | // to provision machines and should launch the Tink Worker component. 19 | type OSIE struct { 20 | metav1.TypeMeta `json:",inline"` 21 | metav1.ObjectMeta `json:"metadata,omitempty"` 22 | 23 | Spec OSIESpec `json:"spec,omitempty"` 24 | } 25 | 26 | // +kubebuilder:object:root=true 27 | 28 | type OSIEList struct { 29 | metav1.TypeMeta `json:",inline"` 30 | metav1.ListMeta `json:"metadata,omitempty"` 31 | Items []OSIE `json:"items"` 32 | } 33 | 34 | func init() { 35 | SchemeBuilder.Register(&OSIE{}, &OSIEList{}) 36 | } 37 | -------------------------------------------------------------------------------- /api/v1alpha2/template.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | type TemplateSpec struct { 8 | // Actions defines the set of actions to be run on a target machine. Actions are run sequentially 9 | // in the order they are specified. At least 1 action must be specified. Names of actions 10 | // must be unique within a Template. 11 | // +kubebuilder:validation:MinItems=1 12 | Actions []Action `json:"actions,omitempty"` 13 | 14 | // Volumes to be mounted on all actions. If an action specifies the same volume it will take 15 | // precedence. 16 | // +optional 17 | Volumes []Volume `json:"volumes,omitempty"` 18 | 19 | // Env defines environment variables to be available in all actions. If an action specifies 20 | // the same environment variable it will take precedence. 21 | // +optional 22 | Env map[string]string `json:"env,omitempty"` 23 | } 24 | 25 | // Action defines an individual action to be run on a target machine. 26 | type Action struct { 27 | // Name is a name for the action. 28 | Name string `json:"name"` 29 | 30 | // Image is an OCI image. 31 | Image string `json:"image"` 32 | 33 | // Cmd defines the command to use when launching the image. It overrides the default command 34 | // of the action. It must be a unix path to an executable program. 35 | // +kubebuilder:validation:Pattern=`^(/[^/ ]*)+/?$` 36 | // +optional 37 | Cmd *string `json:"cmd,omitempty"` 38 | 39 | // Args are a set of arguments to be passed to the command executed by the container on 40 | // launch. 41 | // +optional 42 | Args []string `json:"args,omitempty"` 43 | 44 | // Env defines environment variables used when launching the container. 45 | //+optional 46 | Env map[string]string `json:"env,omitempty"` 47 | 48 | // Volumes defines the volumes to mount into the container. 49 | // +optional 50 | Volumes []Volume `json:"volumes,omitempty"` 51 | 52 | // Namespace defines the Linux namespaces this container should execute in. 53 | // +optional 54 | Namespace *Namespace `json:"namespaces,omitempty"` 55 | } 56 | 57 | // Volume is a specification for mounting a volume in an action. Volumes take the form 58 | // {SRC-VOLUME-NAME | SRC-HOST-DIR}:TGT-CONTAINER-DIR:OPTIONS. When specifying a VOLUME-NAME that 59 | // does not exist it will be created for you. Examples: 60 | // 61 | // Read-only bind mount bound to /data 62 | // 63 | // /etc/data:/data:ro 64 | // 65 | // Writable volume name bound to /data 66 | // 67 | // shared_volume:/data 68 | // 69 | // See https://docs.docker.com/storage/volumes/ for additional details. 70 | type Volume string 71 | 72 | // Namespace defines the Linux namespaces to use for the container. 73 | // See https://man7.org/linux/man-pages/man7/namespaces.7.html. 74 | type Namespace struct { 75 | // Network defines the network namespace. 76 | // +optional 77 | Network *string `json:"network,omitempty"` 78 | 79 | // PID defines the PID namespace 80 | // +optional 81 | PID *int `json:"pid,omitempty"` 82 | } 83 | 84 | // +kubebuilder:object:root=true 85 | // +kubebuilder:resource:categories=tinkerbell,shortName=tpl 86 | // +kubebuilder:unservedversion 87 | 88 | // Template defines a set of actions to be run on a target machine. The template is rendered 89 | // prior to execution where it is exposed to Hardware and user defined data. Most fields within the 90 | // TemplateSpec may contain templates values excluding .TemplateSpec.Actions[].Name. 91 | // See https://pkg.go.dev/text/template for more details. 92 | type Template struct { 93 | metav1.TypeMeta `json:",inline"` 94 | metav1.ObjectMeta `json:"metadata,omitempty"` 95 | 96 | Spec TemplateSpec `json:"spec,omitempty"` 97 | } 98 | 99 | // +kubebuilder:object:root=true 100 | 101 | type TemplateList struct { 102 | metav1.TypeMeta `json:",inline"` 103 | metav1.ListMeta `json:"metadata,omitempty"` 104 | Items []Template `json:"items"` 105 | } 106 | 107 | func init() { 108 | SchemeBuilder.Register(&Template{}, &TemplateList{}) 109 | } 110 | -------------------------------------------------------------------------------- /buf.gen.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | plugins: 3 | - name: go 4 | out: . 5 | opt: paths=source_relative 6 | - name: go-grpc 7 | out: . 8 | opt: 9 | - paths=source_relative 10 | - require_unimplemented_servers=false 11 | -------------------------------------------------------------------------------- /buf.lock: -------------------------------------------------------------------------------- 1 | # Generated by buf. DO NOT EDIT. 2 | version: v1 3 | deps: 4 | - remote: buf.build 5 | owner: googleapis 6 | repository: googleapis 7 | commit: e7f8d366f5264595bcc4cd4139af9973 8 | -------------------------------------------------------------------------------- /buf.yaml: -------------------------------------------------------------------------------- 1 | version: v1 2 | lint: 3 | use: 4 | - DEFAULT 5 | deps: 6 | - buf.build/googleapis/googleapis 7 | breaking: 8 | use: 9 | - FILE 10 | -------------------------------------------------------------------------------- /ci-checks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env nix-shell 2 | #!nix-shell -i bash 3 | # shellcheck shell=bash 4 | 5 | set -eux 6 | 7 | failed=0 8 | 9 | # spell-checks only language files to avoid spell-checking checksums 10 | if ! git ls-files '*.sh' '*.go' | xargs codespell -q 3 -I .codespell-whitelist; then 11 | failed=1 12 | fi 13 | 14 | # --check doesn't show what line number fails, so write the result to disk for the diff to catch 15 | if ! git ls-files '*.json' | xargs prettier --list-different --write; then 16 | failed=1 17 | fi 18 | 19 | if ! git ls-files '*.sh' | xargs shfmt -l -d; then 20 | failed=1 21 | fi 22 | 23 | if ! git ls-files '*.sh' | xargs shellcheck; then 24 | failed=1 25 | fi 26 | 27 | if ! nixfmt shell.nix; then 28 | failed=1 29 | fi 30 | 31 | if ! git diff | (! grep .); then 32 | failed=1 33 | fi 34 | 35 | exit "$failed" 36 | -------------------------------------------------------------------------------- /cmd/tink-agent/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20.3 2 | 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | RUN apk add --no-cache --update --upgrade ca-certificates=20241121-r1 7 | 8 | COPY bin/tink-agent-${TARGETOS}-${TARGETARCH} /usr/bin/tink-agent 9 | 10 | ENTRYPOINT ["/usr/bin/tink-agent"] 11 | -------------------------------------------------------------------------------- /cmd/tink-agent/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/tinkerbell/tink/internal/cli" 7 | ) 8 | 9 | func main() { 10 | if err := cli.NewAgent().Execute(); err != nil { 11 | os.Exit(-1) 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /cmd/tink-controller-v1alpha2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20.3 2 | 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | RUN apk add --no-cache --update --upgrade ca-certificates=20241121-r1 7 | 8 | COPY bin/tink-controller-v1alpha2-${TARGETOS}-${TARGETARCH} /usr/bin/tink-controller 9 | 10 | ENTRYPOINT ["/usr/bin/tink-controller"] 11 | -------------------------------------------------------------------------------- /cmd/tink-controller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20.3 2 | 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | RUN apk add --no-cache --update --upgrade ca-certificates=20241121-r1 7 | 8 | COPY bin/tink-controller-${TARGETOS}-${TARGETARCH} /usr/bin/tink-controller 9 | 10 | ENTRYPOINT ["/usr/bin/tink-controller"] 11 | -------------------------------------------------------------------------------- /cmd/tink-server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20.3 2 | 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | EXPOSE 42113 42114 7 | 8 | RUN apk add --no-cache --update --upgrade ca-certificates=20241121-r1 9 | 10 | COPY bin/tink-server-${TARGETOS}-${TARGETARCH} /usr/bin/tink-server 11 | 12 | ENTRYPOINT ["/usr/bin/tink-server"] 13 | -------------------------------------------------------------------------------- /cmd/tink-worker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20.3 2 | 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | RUN apk add --no-cache --update --upgrade ca-certificates=20241121-r1 7 | 8 | COPY bin/tink-worker-${TARGETOS}-${TARGETARCH} /usr/bin/tink-worker 9 | 10 | ENTRYPOINT [ "/usr/bin/tink-worker" ] 11 | -------------------------------------------------------------------------------- /cmd/tink-worker/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/tinkerbell/tink/cmd/tink-worker/cmd" 7 | ) 8 | 9 | // version is set at build time. 10 | var version = "devel" 11 | 12 | func main() { 13 | rootCmd := cmd.NewRootCommand(version) 14 | if err := rootCmd.Execute(); err != nil { 15 | os.Exit(1) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /cmd/tink-worker/worker/log_capturer.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "bufio" 5 | "context" 6 | "fmt" 7 | "io" 8 | 9 | "github.com/docker/docker/api/types/container" 10 | "github.com/docker/docker/client" 11 | "github.com/go-logr/logr" 12 | ) 13 | 14 | // DockerLogCapturer is a LogCapturer that can stream docker container logs to an io.Writer. 15 | type DockerLogCapturer struct { 16 | dockerClient client.ContainerAPIClient 17 | logger logr.Logger 18 | writer io.Writer 19 | } 20 | 21 | // getLogger is a helper function to get logging out of a context, or use the default logger. 22 | func (l *DockerLogCapturer) getLogger(ctx context.Context) logr.Logger { 23 | loggerIface := ctx.Value(loggingContextKey) 24 | if loggerIface == nil { 25 | return l.logger 26 | } 27 | lg, _ := loggerIface.(logr.Logger) 28 | return lg 29 | } 30 | 31 | // NewDockerLogCapturer returns a LogCapturer that can stream container logs to a given writer. 32 | func NewDockerLogCapturer(cli client.ContainerAPIClient, logger logr.Logger, writer io.Writer) *DockerLogCapturer { 33 | return &DockerLogCapturer{ 34 | dockerClient: cli, 35 | logger: logger, 36 | writer: writer, 37 | } 38 | } 39 | 40 | // CaptureLogs streams container logs to the capturer's writer. 41 | func (l *DockerLogCapturer) CaptureLogs(ctx context.Context, id string) { 42 | reader, err := l.dockerClient.ContainerLogs(ctx, id, container.LogsOptions{ 43 | ShowStdout: true, 44 | ShowStderr: true, 45 | Follow: true, 46 | Timestamps: false, 47 | }) 48 | if err != nil { 49 | l.getLogger(ctx).Error(err, "failed to capture logs for container ", "containerID", id) 50 | return 51 | } 52 | defer reader.Close() 53 | 54 | scanner := bufio.NewScanner(reader) 55 | for scanner.Scan() { 56 | fmt.Fprintln(l.writer, scanner.Text()) 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /cmd/tink-worker/worker/log_capturer_test.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "io" 7 | "os" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/docker/docker/api/types/container" 12 | "github.com/docker/docker/client" 13 | "github.com/go-logr/logr" 14 | "github.com/go-logr/zapr" 15 | "github.com/pkg/errors" 16 | "go.uber.org/zap" 17 | ) 18 | 19 | type fakeDockerLoggerClient struct { 20 | client.ContainerAPIClient 21 | content string 22 | err error 23 | } 24 | 25 | func (c *fakeDockerLoggerClient) ContainerLogs(context.Context, string, container.LogsOptions) (io.ReadCloser, error) { 26 | if c.err != nil { 27 | return nil, c.err 28 | } 29 | return io.NopCloser(strings.NewReader(c.content)), nil 30 | } 31 | 32 | func newFakeDockerLoggerClient(content string, err error) *fakeDockerLoggerClient { 33 | return &fakeDockerLoggerClient{ 34 | content: content, 35 | err: err, 36 | } 37 | } 38 | 39 | func TestLogCapturer(t *testing.T) { 40 | cases := []struct { 41 | name string 42 | writer bytes.Buffer 43 | wanterr error 44 | content string 45 | }{ 46 | { 47 | name: "Content written to buffer", 48 | writer: *bytes.NewBufferString(""), 49 | wanterr: nil, 50 | content: "Line1\nline2\n", 51 | }, 52 | { 53 | name: "empty buffer from error", 54 | writer: *bytes.NewBufferString(""), 55 | wanterr: errors.New("Docker failure"), 56 | content: "", 57 | }, 58 | } 59 | 60 | for _, tc := range cases { 61 | t.Run(tc.name, func(t *testing.T) { 62 | logger := zapr.NewLogger(zap.Must(zap.NewDevelopment())) 63 | ctx := context.Background() 64 | clogger := NewDockerLogCapturer( 65 | newFakeDockerLoggerClient(tc.content, tc.wanterr), 66 | logger, 67 | &tc.writer) 68 | clogger.CaptureLogs(ctx, tc.name) 69 | got := tc.writer.String() 70 | if got != tc.content { 71 | t.Errorf("Wrong content written to buffer. Expected '%s', got '%s'", tc.content, got) 72 | } 73 | }) 74 | } 75 | } 76 | 77 | func TestLogCapturerContextLogger(t *testing.T) { 78 | cases := []struct { 79 | name string 80 | logger func() logr.Logger 81 | writer bytes.Buffer 82 | }{ 83 | { 84 | name: "no context logger", 85 | logger: nil, 86 | }, 87 | { 88 | name: "with context logger", 89 | logger: func() logr.Logger { 90 | return zapr.NewLogger(zap.Must(zap.NewDevelopment())) 91 | }, 92 | writer: *bytes.NewBufferString(""), 93 | }, 94 | } 95 | 96 | for _, tc := range cases { 97 | t.Run(tc.name, func(_ *testing.T) { 98 | logger := zapr.NewLogger(zap.Must(zap.NewDevelopment())) 99 | ctx := context.Background() 100 | if tc.logger != nil { 101 | ctx = context.WithValue(ctx, loggingContextKey, tc.logger()) 102 | } 103 | clogger := &DockerLogCapturer{ 104 | newFakeDockerLoggerClient("", nil), 105 | logger, 106 | os.Stdout, 107 | } 108 | clogger.getLogger(ctx) 109 | }) 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /cmd/tink-worker/worker/registry.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "encoding/json" 7 | "io" 8 | "path" 9 | 10 | "github.com/docker/docker/api/types/image" 11 | "github.com/docker/docker/api/types/registry" 12 | "github.com/pkg/errors" 13 | ) 14 | 15 | // RegistryConnDetails are the connection details for accessing a Docker registry. 16 | type RegistryConnDetails struct { 17 | Registry string 18 | Username string 19 | Password string 20 | } 21 | 22 | // ImagePullStatus is the status of the downloaded Image chunk. 23 | type ImagePullStatus struct { 24 | Status string `json:"status"` 25 | Error string `json:"error"` 26 | Progress string `json:"progress"` 27 | ProgressDetail struct { 28 | Current int `json:"current"` 29 | Total int `json:"total"` 30 | } `json:"progressDetail"` 31 | } 32 | 33 | // PullImage outputs to stdout the contents of the requested image (relative to the registry). 34 | // If a pull fails but the image already exists then we will return a nil error. 35 | func (m *containerManager) PullImage(ctx context.Context, img string) error { 36 | l := m.getLogger(ctx) 37 | authConfig := registry.AuthConfig{ 38 | Username: m.registryDetails.Username, 39 | Password: m.registryDetails.Password, 40 | ServerAddress: m.registryDetails.Registry, 41 | } 42 | encodedJSON, err := json.Marshal(authConfig) 43 | if err != nil { 44 | return errors.Wrap(err, "DOCKER AUTH") 45 | } 46 | authStr := base64.URLEncoding.EncodeToString(encodedJSON) 47 | 48 | out, err := m.cli.ImagePull(ctx, path.Join(m.registryDetails.Registry, img), image.PullOptions{RegistryAuth: authStr}) 49 | if err != nil { 50 | if _, _, err := m.cli.ImageInspectWithRaw(ctx, path.Join(m.registryDetails.Registry, img)); err == nil { 51 | return nil 52 | } 53 | return errors.Wrap(err, "DOCKER PULL") 54 | } 55 | defer func() { 56 | if err := out.Close(); err != nil { 57 | l.Error(err, "") 58 | } 59 | }() 60 | fd := json.NewDecoder(out) 61 | var status *ImagePullStatus 62 | for { 63 | if err := fd.Decode(&status); err != nil { 64 | if errors.Is(err, io.EOF) { 65 | break 66 | } 67 | return errors.Wrap(err, "DOCKER PULL") 68 | } 69 | if status.Error != "" { 70 | return errors.Wrap(errors.New(status.Error), "DOCKER PULL") 71 | } 72 | } 73 | return nil 74 | } 75 | -------------------------------------------------------------------------------- /cmd/tink-worker/worker/registry_test.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "io" 7 | "strings" 8 | "testing" 9 | 10 | "github.com/docker/docker/api/types" 11 | "github.com/docker/docker/api/types/image" 12 | "github.com/go-logr/zapr" 13 | "go.uber.org/zap" 14 | ) 15 | 16 | func (c *fakeDockerClient) ImagePull(context.Context, string, image.PullOptions) (io.ReadCloser, error) { 17 | if c.err != nil { 18 | return nil, c.err 19 | } 20 | return io.NopCloser(strings.NewReader(c.imagePullContent)), nil 21 | } 22 | 23 | func (c *fakeDockerClient) ImageInspectWithRaw(context.Context, string) (types.ImageInspect, []byte, error) { 24 | return types.ImageInspect{}, nil, c.imageInspectErr 25 | } 26 | 27 | func TestContainerManagerPullImage(t *testing.T) { 28 | cases := []struct { 29 | name string 30 | image string 31 | responseContent string 32 | registry RegistryConnDetails 33 | clientErr error 34 | wantErr error 35 | imageInspectErr error 36 | }{ 37 | { 38 | name: "Happy Path", 39 | image: "yav.in/4/deathstar:nomedalforchewie", 40 | responseContent: "{}\n{}", 41 | }, 42 | { 43 | name: "malformed JSON", 44 | image: "yav.in/4/deathstar:nomedalforchewie", 45 | responseContent: "{", 46 | clientErr: errors.New("You missed the shot"), 47 | wantErr: errors.New("DOCKER PULL: You missed the shot"), 48 | imageInspectErr: errors.New("Image not in local cache"), 49 | }, 50 | { 51 | name: "pull error", 52 | image: "yav.in/4/deathstar:nomedalforchewie", 53 | responseContent: `{"error": "You missed the shot"}`, 54 | wantErr: errors.New("DOCKER PULL: You missed the shot"), 55 | imageInspectErr: errors.New("Image not in local cache"), 56 | }, 57 | { 58 | name: "image already exists, no error", 59 | image: "yav.in/4/deathstar:nomedalforchewie", 60 | clientErr: errors.New("You missed the shot"), 61 | wantErr: nil, 62 | }, 63 | } 64 | 65 | for _, tc := range cases { 66 | t.Run(tc.name, func(t *testing.T) { 67 | logger := zapr.NewLogger(zap.Must(zap.NewDevelopment())) 68 | mgr := NewContainerManager(logger, newFakeDockerClient("", tc.responseContent, 0, 0, tc.clientErr, nil, withImageInspectErr(tc.imageInspectErr)), tc.registry) 69 | 70 | ctx := context.Background() 71 | gotErr := mgr.PullImage(ctx, tc.image) 72 | if gotErr != nil { 73 | if tc.wantErr == nil { 74 | t.Errorf(`Got unexpected error: %v"`, gotErr) 75 | } else if gotErr.Error() != tc.wantErr.Error() { 76 | t.Errorf(`Got unexpected error: got "%v" wanted "%v"`, gotErr, tc.wantErr) 77 | } 78 | return 79 | } 80 | if gotErr == nil && tc.wantErr != nil { 81 | t.Errorf("Missing expected error: %v", tc.wantErr) 82 | return 83 | } 84 | }) 85 | } 86 | } 87 | -------------------------------------------------------------------------------- /cmd/virtual-worker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20.3 2 | 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | RUN apk add --no-cache --update --upgrade ca-certificates=20241121-r1 7 | 8 | COPY bin/virtual-worker-${TARGETOS}-${TARGETARCH} /usr/bin/virtual-worker 9 | 10 | ENTRYPOINT [ "/usr/bin/virtual-worker" ] 11 | -------------------------------------------------------------------------------- /cmd/virtual-worker/cmd/root.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/go-logr/logr" 8 | "github.com/go-logr/zapr" 9 | "github.com/pkg/errors" 10 | "github.com/spf13/cobra" 11 | "github.com/spf13/pflag" 12 | "github.com/spf13/viper" 13 | tinkWorker "github.com/tinkerbell/tink/cmd/tink-worker/worker" 14 | "github.com/tinkerbell/tink/cmd/virtual-worker/worker" 15 | "github.com/tinkerbell/tink/internal/client" 16 | "github.com/tinkerbell/tink/internal/proto" 17 | "go.uber.org/zap" 18 | ) 19 | 20 | const ( 21 | defaultRetryIntervalSeconds = 3 22 | defaultRetryCount = 3 23 | defaultMaxFileSize = 10 * 1024 * 1024 // 10MB 24 | ) 25 | 26 | // NewRootCommand creates a new Virtual Worker Cobra root command. 27 | func NewRootCommand(version string) *cobra.Command { 28 | zlog, err := zap.NewProduction() 29 | if err != nil { 30 | panic(err) 31 | } 32 | logger := zapr.NewLogger(zlog).WithName("github.com/tinkerbell/tink") 33 | 34 | rootCmd := &cobra.Command{ 35 | Use: "virtual-worker", 36 | Short: "Virtual Tink Worker", 37 | PreRunE: func(cmd *cobra.Command, _ []string) error { 38 | return createViper(logger, cmd) 39 | }, 40 | RunE: func(cmd *cobra.Command, _ []string) error { 41 | retryInterval := viper.GetDuration("retry-interval") 42 | retries := viper.GetInt("max-retry") 43 | workerID := viper.GetString("id") 44 | maxFileSize := viper.GetInt64("max-file-size") 45 | captureActionLogs := viper.GetBool("capture-action-logs") 46 | sleepMin := viper.GetDuration("sleep-min") 47 | sleepJitter := viper.GetDuration("sleep-jitter") 48 | 49 | logger.Info("starting", "version", version) 50 | 51 | conn, err := client.NewClientConn( 52 | viper.GetString("tinkerbell-grpc-authority"), 53 | viper.GetBool("tinkerbell-tls"), 54 | viper.GetBool("tinkerbell-insecure-tls"), 55 | ) 56 | if err != nil { 57 | return err 58 | } 59 | workflowClient := proto.NewWorkflowServiceClient(conn) 60 | 61 | containerManager := worker.NewFakeContainerManager(logger, sleepMin, sleepJitter) 62 | logCapturer := worker.NewEmptyLogCapturer() 63 | 64 | w := tinkWorker.NewWorker( 65 | workerID, 66 | workflowClient, 67 | containerManager, 68 | logCapturer, 69 | logger, 70 | tinkWorker.WithMaxFileSize(maxFileSize), 71 | tinkWorker.WithRetries(retryInterval, retries), 72 | tinkWorker.WithDataDir("./worker"), 73 | tinkWorker.WithLogCapture(captureActionLogs)) 74 | 75 | err = w.ProcessWorkflowActions(cmd.Context()) 76 | if err != nil { 77 | return errors.Wrap(err, "worker Finished with error") 78 | } 79 | return nil 80 | }, 81 | } 82 | 83 | rootCmd.Flags().Duration("retry-interval", defaultRetryIntervalSeconds*time.Second, "Retry interval in seconds (RETRY_INTERVAL)") 84 | rootCmd.Flags().Int("max-retry", defaultRetryCount, "Maximum number of retries to attempt (MAX_RETRY)") 85 | rootCmd.Flags().Int64("max-file-size", defaultMaxFileSize, "Maximum file size in bytes (MAX_FILE_SIZE)") 86 | rootCmd.Flags().Bool("capture-action-logs", true, "Capture action container output as part of worker logs") 87 | rootCmd.Flags().Duration("sleep-min", time.Second*4, "The minimum amount of time to sleep during faked docker operations") 88 | rootCmd.Flags().Duration("sleep-jitter", time.Second*2, "The amount of jitter to add during faked docker operations") 89 | 90 | must := func(err error) { 91 | if err != nil { 92 | logger.Error(err, "") 93 | } 94 | } 95 | 96 | rootCmd.Flags().StringP("id", "i", "", "Sets the worker id (ID)") 97 | must(rootCmd.MarkFlagRequired("id")) 98 | 99 | _ = viper.BindPFlags(rootCmd.Flags()) 100 | 101 | return rootCmd 102 | } 103 | 104 | // createViper creates a Viper object configured to read in configuration files 105 | // (from various paths with content type specific filename extensions) and loads 106 | // environment variables. 107 | func createViper(logger logr.Logger, cmd *cobra.Command) error { 108 | viper.AutomaticEnv() 109 | viper.SetConfigName("virtual-worker") 110 | viper.AddConfigPath("/etc/tinkerbell") 111 | viper.AddConfigPath(".") 112 | viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) 113 | 114 | // If a config file is found, read it in. 115 | if err := viper.ReadInConfig(); err != nil { 116 | if _, ok := err.(viper.ConfigFileNotFoundError); !ok { 117 | logger.Error(err, "could not load config file", "configFile", viper.ConfigFileUsed()) 118 | return err 119 | } 120 | logger.Info("no config file found") 121 | } else { 122 | logger.Info("loaded config file", "configFile", viper.ConfigFileUsed()) 123 | } 124 | 125 | cmd.Flags().VisitAll(func(f *pflag.Flag) { 126 | if viper.IsSet(f.Name) { 127 | _ = cmd.Flags().SetAnnotation(f.Name, cobra.BashCompOneRequiredFlag, []string{"false"}) 128 | } 129 | }) 130 | 131 | return nil 132 | } 133 | -------------------------------------------------------------------------------- /cmd/virtual-worker/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os" 5 | 6 | "github.com/tinkerbell/tink/cmd/virtual-worker/cmd" 7 | ) 8 | 9 | // version is set at build time. 10 | var version = "devel" 11 | 12 | func main() { 13 | rootCmd := cmd.NewRootCommand(version) 14 | if err := rootCmd.Execute(); err != nil { 15 | os.Exit(1) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /cmd/virtual-worker/worker/container_manager.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "context" 5 | "math/rand" 6 | "time" 7 | 8 | "github.com/go-logr/logr" 9 | "github.com/tinkerbell/tink/cmd/tink-worker/worker" 10 | "github.com/tinkerbell/tink/internal/proto" 11 | ) 12 | 13 | func getRandHexStr(r *rand.Rand, length int) string { 14 | alphabet := []byte("1234567890abcdef") 15 | resp := []byte{} 16 | for i := 0; i < length; i++ { 17 | resp = append(resp, alphabet[r.Intn(len(alphabet))]) 18 | } 19 | return string(resp) 20 | } 21 | 22 | type fakeManager struct { 23 | // minimum milliseconds to sleep for faked Docker API calls 24 | sleepMinimum time.Duration 25 | // additional jitter milliseconds to sleep for faked Docker API calls 26 | sleepJitter time.Duration 27 | 28 | r *rand.Rand 29 | logger logr.Logger 30 | } 31 | 32 | func (m *fakeManager) sleep() { 33 | jitter := time.Duration(m.r.Int63n(m.sleepJitter.Milliseconds())) * time.Millisecond 34 | time.Sleep(jitter + m.sleepMinimum) 35 | } 36 | 37 | // NewFakeContainerManager returns a fake worker.ContainerManager that will sleep for Docker API calls. 38 | func NewFakeContainerManager(l logr.Logger, sleepMinimum, sleepJitter time.Duration) worker.ContainerManager { 39 | if sleepMinimum <= 0 { 40 | sleepMinimum = 1 41 | } 42 | if sleepJitter <= 0 { 43 | sleepJitter = 1 44 | } 45 | return &fakeManager{ 46 | sleepMinimum: sleepMinimum, 47 | sleepJitter: sleepJitter, 48 | logger: l, 49 | // intentionally weak RNG. This is only for fake output 50 | r: rand.New(rand.NewSource(time.Now().UnixNano())), 51 | } 52 | } 53 | 54 | func (m *fakeManager) CreateContainer(_ context.Context, cmd []string, _ string, _ *proto.WorkflowAction, _, _ bool) (string, error) { 55 | m.logger.Info("creating container", "command", cmd) 56 | return getRandHexStr(m.r, 64), nil 57 | } 58 | 59 | func (m *fakeManager) StartContainer(_ context.Context, id string) error { 60 | m.logger.Info("starting container", "containerID", id) 61 | return nil 62 | } 63 | 64 | func (m *fakeManager) WaitForContainer(_ context.Context, id string) (proto.State, error) { 65 | m.logger.Info("waiting for container", "containerID", id) 66 | m.sleep() 67 | 68 | return proto.State_STATE_SUCCESS, nil 69 | } 70 | 71 | func (m *fakeManager) WaitForFailedContainer(_ context.Context, id string, failedActionStatus chan proto.State) { 72 | m.logger.Info("waiting for container", "containerID", id) 73 | m.sleep() 74 | failedActionStatus <- proto.State_STATE_SUCCESS 75 | } 76 | 77 | func (m *fakeManager) RemoveContainer(_ context.Context, id string) error { 78 | m.logger.Info("removing container", "containerID", id) 79 | return nil 80 | } 81 | 82 | func (m *fakeManager) PullImage(_ context.Context, image string) error { 83 | m.logger.Info("pulling image", "image", image) 84 | m.sleep() 85 | 86 | return nil 87 | } 88 | -------------------------------------------------------------------------------- /cmd/virtual-worker/worker/log_capturer.go: -------------------------------------------------------------------------------- 1 | package worker 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/tinkerbell/tink/cmd/tink-worker/worker" 7 | ) 8 | 9 | type emptyLogger struct{} 10 | 11 | func (l *emptyLogger) CaptureLogs(context.Context, string) {} 12 | 13 | // NewEmptyLogCapturer returns an no-op log capturer. 14 | func NewEmptyLogCapturer() worker.LogCapturer { 15 | return &emptyLogger{} 16 | } 17 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | ignore: 2 | - "**/zz_generated.deepcopy.go" 3 | -------------------------------------------------------------------------------- /config/crd/bases/tinkerbell.org_osies.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.16.3 6 | name: osies.tinkerbell.org 7 | spec: 8 | group: tinkerbell.org 9 | names: 10 | categories: 11 | - tinkerbell 12 | kind: OSIE 13 | listKind: OSIEList 14 | plural: osies 15 | singular: osie 16 | scope: Namespaced 17 | versions: 18 | - name: v1alpha1 19 | schema: 20 | openAPIV3Schema: 21 | description: OSIE configuration. 22 | properties: 23 | baseURL: 24 | type: string 25 | initrd: 26 | type: string 27 | kernel: 28 | type: string 29 | type: object 30 | served: true 31 | storage: false 32 | - name: v1alpha2 33 | schema: 34 | openAPIV3Schema: 35 | description: |- 36 | OSIE describes an Operating System Installation Environment. It is used by Tinkerbell 37 | to provision machines and should launch the Tink Worker component. 38 | properties: 39 | apiVersion: 40 | description: |- 41 | APIVersion defines the versioned schema of this representation of an object. 42 | Servers should convert recognized schemas to the latest internal value, and 43 | may reject unrecognized values. 44 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 45 | type: string 46 | kind: 47 | description: |- 48 | Kind is a string value representing the REST resource this object represents. 49 | Servers may infer this from the endpoint the client submits requests to. 50 | Cannot be updated. 51 | In CamelCase. 52 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 53 | type: string 54 | metadata: 55 | type: object 56 | spec: 57 | properties: 58 | initrdUrl: 59 | description: InitrdURL is a URL to an initrd image. 60 | type: string 61 | kernelUrl: 62 | description: KernelURL is a URL to a kernel image. 63 | type: string 64 | type: object 65 | type: object 66 | served: true 67 | storage: true 68 | -------------------------------------------------------------------------------- /config/crd/bases/tinkerbell.org_templates.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | annotations: 5 | controller-gen.kubebuilder.io/version: v0.16.3 6 | name: templates.tinkerbell.org 7 | spec: 8 | group: tinkerbell.org 9 | names: 10 | categories: 11 | - tinkerbell 12 | kind: Template 13 | listKind: TemplateList 14 | plural: templates 15 | shortNames: 16 | - tpl 17 | singular: template 18 | scope: Namespaced 19 | versions: 20 | - additionalPrinterColumns: 21 | - jsonPath: .status.state 22 | name: State 23 | type: string 24 | name: v1alpha1 25 | schema: 26 | openAPIV3Schema: 27 | description: Template is the Schema for the Templates API. 28 | properties: 29 | apiVersion: 30 | description: |- 31 | APIVersion defines the versioned schema of this representation of an object. 32 | Servers should convert recognized schemas to the latest internal value, and 33 | may reject unrecognized values. 34 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 35 | type: string 36 | kind: 37 | description: |- 38 | Kind is a string value representing the REST resource this object represents. 39 | Servers may infer this from the endpoint the client submits requests to. 40 | Cannot be updated. 41 | In CamelCase. 42 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 43 | type: string 44 | metadata: 45 | type: object 46 | spec: 47 | description: TemplateSpec defines the desired state of Template. 48 | properties: 49 | data: 50 | type: string 51 | type: object 52 | status: 53 | description: TemplateStatus defines the observed state of Template. 54 | properties: 55 | state: 56 | description: TemplateState represents the template state. 57 | type: string 58 | type: object 59 | type: object 60 | served: true 61 | storage: true 62 | subresources: 63 | status: {} 64 | -------------------------------------------------------------------------------- /config/crd/examples/hardware.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Hardware 3 | metadata: 4 | name: sm01 5 | namespace: default 6 | spec: 7 | disks: 8 | - device: /dev/nvme0n1 9 | metadata: 10 | facility: 11 | facility_code: onprem 12 | manufacturer: 13 | slug: supermicro 14 | instance: 15 | userdata: "" 16 | hostname: "sm01" 17 | id: "3c:ec:ef:4c:4f:54" 18 | operating_system: 19 | distro: "ubuntu" 20 | os_slug: "ubuntu_20_04" 21 | version: "20.04" 22 | interfaces: 23 | - dhcp: 24 | arch: x86_64 25 | hostname: sm01 26 | ip: 27 | address: 172.16.10.100 28 | gateway: 172.16.10.1 29 | netmask: 255.255.255.0 30 | lease_time: 86400 31 | mac: 3c:ec:ef:4c:4f:54 32 | name_servers: 33 | - 172.16.10.1 34 | - 10.1.1.11 35 | uefi: true 36 | netboot: 37 | allowPXE: true 38 | allowWorkflow: true 39 | -------------------------------------------------------------------------------- /config/crd/examples/template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Template 3 | metadata: 4 | name: debian 5 | namespace: default 6 | spec: 7 | data: | 8 | version: "0.1" 9 | name: debian 10 | global_timeout: 1800 11 | tasks: 12 | - name: "os-installation" 13 | worker: "{{.device_1}}" 14 | volumes: 15 | - /dev:/dev 16 | - /dev/console:/dev/console 17 | - /lib/firmware:/lib/firmware:ro 18 | actions: 19 | - name: "stream-debian-image" 20 | image: quay.io/tinkerbell-actions/image2disk:v1.0.0 21 | timeout: 600 22 | environment: 23 | DEST_DISK: /dev/nvme0n1 24 | # Hegel IP 25 | IMG_URL: "http://10.1.1.11:8080/debian-10-openstack-amd64.raw.gz" 26 | COMPRESSED: true 27 | - name: "add-tink-cloud-init-config" 28 | image: quay.io/tinkerbell-actions/writefile:v1.0.0 29 | timeout: 90 30 | environment: 31 | DEST_DISK: /dev/nvme0n1p1 32 | FS_TYPE: ext4 33 | DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg 34 | UID: 0 35 | GID: 0 36 | MODE: 0600 37 | DIRMODE: 0700 38 | CONTENTS: | 39 | datasource: 40 | Ec2: 41 | # Hegel IP 42 | #metadata_urls: ["http://10.1.1.11:50061"] 43 | strict_id: false 44 | system_info: 45 | default_user: 46 | name: tink 47 | groups: [wheel, adm, sudo] 48 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 49 | shell: /bin/bash 50 | users: 51 | - name: tink 52 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 53 | warnings: 54 | dsid_missing_source: off 55 | - name: "add-tink-cloud-init-ds-config" 56 | image: quay.io/tinkerbell-actions/writefile:v1.0.0 57 | timeout: 90 58 | environment: 59 | DEST_DISK: /dev/nvme0n1p1 60 | FS_TYPE: ext4 61 | DEST_PATH: /etc/cloud/ds-identify.cfg 62 | UID: 0 63 | GID: 0 64 | MODE: 0600 65 | DIRMODE: 0700 66 | CONTENTS: | 67 | datasource: Ec2 68 | - name: "kexec-debian" 69 | image: quay.io/tinkerbell-actions/kexec:v1.0.0 70 | timeout: 90 71 | pid: host 72 | environment: 73 | BLOCK_DEVICE: /dev/nvme0n1p1 74 | FS_TYPE: ext4 75 | -------------------------------------------------------------------------------- /config/crd/examples/workflow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Workflow 3 | metadata: 4 | name: wf1 5 | namespace: default 6 | spec: 7 | templateRef: debian 8 | hardwareRef: sm01 9 | hardwareMap: 10 | device_1: 3c:ec:ef:4c:4f:54 11 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/tinkerbell.org_hardware.yaml 6 | - bases/tinkerbell.org_templates.yaml 7 | - bases/tinkerbell.org_workflows.yaml 8 | #+kubebuilder:scaffold:crdkustomizeresource 9 | 10 | # the following config is for teaching kustomize how to do kustomization for CRDs. 11 | configurations: 12 | - kustomizeconfig.yaml 13 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | version: v1 13 | group: apiextensions.k8s.io 14 | path: spec/conversion/webhook/clientConfig/service/namespace 15 | create: false 16 | varReference: 17 | - path: metadata/annotations 18 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: tink-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: tink- 10 | 11 | resources: 12 | - namespace.yaml 13 | - ../crd 14 | - ../manager-rbac 15 | - ../manager 16 | - ../server-rbac 17 | - ../server 18 | 19 | 20 | apiVersion: kustomize.config.k8s.io/v1beta1 21 | kind: Kustomization 22 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | securityContext: 14 | allowPrivilegeEscalation: false 15 | capabilities: 16 | drop: 17 | - "ALL" 18 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.15.0 19 | args: 20 | - "--secure-listen-address=0.0.0.0:8443" 21 | - "--upstream=http://127.0.0.1:8080/" 22 | - "--logtostderr=true" 23 | - "--v=0" 24 | ports: 25 | - containerPort: 8443 26 | protocol: TCP 27 | name: https 28 | resources: 29 | limits: 30 | cpu: 500m 31 | memory: 128Mi 32 | requests: 33 | cpu: 5m 34 | memory: 64Mi 35 | - name: manager 36 | args: 37 | - "--health-probe-bind-address=:8081" 38 | - "--metrics-bind-address=127.0.0.1:8080" 39 | - "--leader-elect" 40 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | -------------------------------------------------------------------------------- /config/default/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | -------------------------------------------------------------------------------- /config/manager-rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: 7 | - "/metrics" 8 | verbs: 9 | - get 10 | -------------------------------------------------------------------------------- /config/manager-rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: 7 | - authentication.k8s.io 8 | resources: 9 | - tokenreviews 10 | verbs: 11 | - create 12 | - apiGroups: 13 | - authorization.k8s.io 14 | resources: 15 | - subjectaccessreviews 16 | verbs: 17 | - create 18 | -------------------------------------------------------------------------------- /config/manager-rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/manager-rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | name: controller-manager-metrics-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - name: https 10 | port: 8443 11 | protocol: TCP 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /config/manager-rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | -------------------------------------------------------------------------------- /config/manager-rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - coordination.k8s.io 21 | resources: 22 | - leases 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - events 35 | verbs: 36 | - create 37 | - patch 38 | -------------------------------------------------------------------------------- /config/manager-rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/manager-rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - bmc.tinkerbell.org 9 | resources: 10 | - job 11 | - job/status 12 | verbs: 13 | - create 14 | - delete 15 | - get 16 | - list 17 | - watch 18 | - apiGroups: 19 | - tinkerbell.org 20 | resources: 21 | - hardware 22 | - hardware/status 23 | - templates 24 | - templates/status 25 | verbs: 26 | - get 27 | - list 28 | - patch 29 | - update 30 | - watch 31 | - apiGroups: 32 | - tinkerbell.org 33 | resources: 34 | - workflows 35 | - workflows/status 36 | verbs: 37 | - delete 38 | - get 39 | - list 40 | - patch 41 | - update 42 | - watch 43 | -------------------------------------------------------------------------------- /config/manager-rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/manager-rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | labels: 7 | control-plane: controller-manager 8 | spec: 9 | selector: 10 | matchLabels: 11 | control-plane: controller-manager 12 | replicas: 1 13 | template: 14 | metadata: 15 | annotations: 16 | kubectl.kubernetes.io/default-container: manager 17 | labels: 18 | control-plane: controller-manager 19 | spec: 20 | containers: 21 | - image: tink-controller:latest 22 | imagePullPolicy: IfNotPresent 23 | name: manager 24 | resources: 25 | limits: 26 | cpu: 500m 27 | memory: 128Mi 28 | requests: 29 | cpu: 10m 30 | memory: 64Mi 31 | serviceAccountName: controller-manager 32 | terminationGracePeriodSeconds: 10 33 | -------------------------------------------------------------------------------- /config/server-rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | -------------------------------------------------------------------------------- /config/server-rbac/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: server-role 5 | rules: 6 | - apiGroups: 7 | - tinkerbell.org 8 | resources: 9 | - hardware 10 | - hardware/status 11 | - templates 12 | - templates/status 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - tinkerbell.org 19 | resources: 20 | - workflows 21 | - workflows/status 22 | verbs: 23 | - get 24 | - list 25 | - patch 26 | - update 27 | - watch 28 | -------------------------------------------------------------------------------- /config/server-rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: server-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: server-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: server 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/server-rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: server 5 | namespace: system 6 | -------------------------------------------------------------------------------- /config/server/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - server.yaml 3 | -------------------------------------------------------------------------------- /config/server/server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: server 5 | namespace: system 6 | labels: 7 | control-plane: server 8 | spec: 9 | selector: 10 | matchLabels: 11 | control-plane: server 12 | replicas: 1 13 | template: 14 | metadata: 15 | annotations: 16 | kubectl.kubernetes.io/default-container: server 17 | labels: 18 | control-plane: server 19 | spec: 20 | containers: 21 | - args: 22 | - "--backend=kubernetes" 23 | image: server:latest 24 | imagePullPolicy: IfNotPresent 25 | name: tink-server 26 | ports: 27 | - containerPort: 42113 28 | hostPort: 42113 29 | name: grpc 30 | resources: 31 | limits: 32 | cpu: 500m 33 | memory: 128Mi 34 | requests: 35 | cpu: 10m 36 | memory: 64Mi 37 | serviceAccountName: server 38 | terminationGracePeriodSeconds: 10 39 | -------------------------------------------------------------------------------- /config/tink-controller-v1alpha2/crd_patch.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "replace", 4 | "path": "/spec/versions/0/served", 5 | "value": false 6 | }, 7 | { 8 | "op": "replace", 9 | "path": "/spec/versions/0/storage", 10 | "value": false 11 | }, 12 | { 13 | "op": "replace", 14 | "path": "/spec/versions/1/served", 15 | "value": true 16 | }, 17 | { 18 | "op": "replace", 19 | "path": "/spec/versions/1/storage", 20 | "value": true 21 | } 22 | ] 23 | -------------------------------------------------------------------------------- /config/tink-controller-v1alpha2/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - ../default 3 | 4 | patches: 5 | - target: 6 | group: apiextensions.k8s.io 7 | version: v1 8 | kind: CustomResourceDefinition 9 | name: workflows.tinkerbell.org 10 | path: crd_patch.json 11 | - target: 12 | group: apiextensions.k8s.io 13 | version: v1 14 | kind: CustomResourceDefinition 15 | name: templates.tinkerbell.org 16 | path: crd_patch.json 17 | - target: 18 | group: apiextensions.k8s.io 19 | version: v1 20 | kind: CustomResourceDefinition 21 | name: hardware.tinkerbell.org 22 | path: crd_patch.json 23 | 24 | apiVersion: kustomize.config.k8s.io/v1beta1 25 | kind: Kustomization 26 | -------------------------------------------------------------------------------- /contrib/tag-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit -o nounset -o pipefail 4 | 5 | if [ -z "${1-}" ]; then 6 | echo "Must specify new tag" 7 | exit 1 8 | fi 9 | 10 | new_tag=${1-} 11 | [[ $new_tag =~ ^v[0-9]*\.[0-9]*\.[0-9]?(-rc[1-9])*$ ]] || ( 12 | echo "Tag must be in the form of vX.Y.Z or vX.Y.Z-rc1" 13 | exit 1 14 | ) 15 | 16 | if [[ $(git symbolic-ref HEAD) != refs/heads/main ]] && [[ -z ${ALLOW_NON_MAIN:-} ]]; then 17 | echo "Must be on main branch" >&2 18 | exit 1 19 | fi 20 | if [[ $(git describe --dirty) != $(git describe) ]]; then 21 | echo "Repo must be in a clean state" >&2 22 | exit 1 23 | fi 24 | 25 | git fetch --all 26 | 27 | last_tag=$(git describe --abbrev=0) 28 | last_tag_commit=$(git rev-list -n1 "$last_tag") 29 | last_specific_tag=$(git tag --contains="$last_tag_commit" | grep -E "^v[0-9]*\.[0-9]*\.[0-9]*$" | tail -n 1) 30 | last_specific_tag_commit=$(git rev-list -n1 "$last_specific_tag") 31 | if [[ $last_specific_tag_commit == $(git rev-list -n1 HEAD) ]]; then 32 | echo "No commits since last tag" >&2 33 | exit 1 34 | fi 35 | 36 | if [[ -n ${SIGN_TAG-} ]]; then 37 | git tag -s -m "${new_tag}" "${new_tag}" &>/dev/null && echo "created signed tag ${new_tag}" >&2 && exit 38 | else 39 | git tag -a -m "${new_tag}" "${new_tag}" &>/dev/null && echo "created annotated tag ${new_tag}" >&2 && exit 40 | fi 41 | -------------------------------------------------------------------------------- /docs/DCO.md: -------------------------------------------------------------------------------- 1 | # DCO Sign Off 2 | 3 | All authors to the project retain copyright to their work. However, to ensure 4 | that they are only submitting work that they have rights to, we are requiring 5 | everyone to acknowledge this by signing their work. 6 | 7 | Since this signature indicates your rights to the contribution and 8 | certifies the statements below, it must contain your real name and 9 | email address. Various forms of noreply email address must not be used. 10 | 11 | Any copyright notices in this repository should specify the authors as "The 12 | project authors". 13 | 14 | To sign your work, just add a line like this at the end of your commit message: 15 | 16 | ```text 17 | Signed-off-by: Jess Owens 18 | ``` 19 | 20 | This can easily be done with the `--signoff` option to `git commit`. 21 | 22 | By doing this you state that you can certify the following (from [https://developercertificate.org/][1]): 23 | 24 | ```text 25 | Developer Certificate of Origin 26 | Version 1.1 27 | 28 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 29 | 1 Letterman Drive 30 | Suite D4700 31 | San Francisco, CA, 94129 32 | 33 | Everyone is permitted to copy and distribute verbatim copies of this 34 | license document, but changing it is not allowed. 35 | 36 | 37 | Developer's Certificate of Origin 1.1 38 | 39 | By making a contribution to this project, I certify that: 40 | 41 | (a) The contribution was created in whole or in part by me and I 42 | have the right to submit it under the open source license 43 | indicated in the file; or 44 | 45 | (b) The contribution is based upon previous work that, to the best 46 | of my knowledge, is covered under an appropriate open source 47 | license and I have the right under that license to submit that 48 | work with modifications, whether created in whole or in part 49 | by me, under the same open source license (unless I am 50 | permitted to submit under a different license), as indicated 51 | in the file; or 52 | 53 | (c) The contribution was provided directly to me by some other 54 | person who certified (a), (b) or (c) and I have not modified 55 | it. 56 | 57 | (d) I understand and agree that this project and the contribution 58 | are public and that a record of the contribution (including all 59 | personal information I submit with it, including my sign-off) is 60 | maintained indefinitely and may be redistributed consistent with 61 | this project or the open source license(s) involved. 62 | ``` 63 | -------------------------------------------------------------------------------- /docs/Workflow.md: -------------------------------------------------------------------------------- 1 | # The Workflow custom resource 2 | 3 | This doc provides details for different parts of the Workflow custom resource. 4 | 5 | ## Spec 6 | 7 | ### BootOptions 8 | 9 | The `spec.bootOptions` object contains optional functionality that will run before a Workflow and triggers handling of different Hardware booting capabilities. 10 | 11 | ## Status 12 | 13 | ### State 14 | 15 | There are several states that a Workflow can be in: 16 | 17 | `STATE_WAITING` - 18 | `STATE_PENDING` - 19 | `STATE_RUNNING` - 20 | `STATE_SUCCESS` - 21 | `STATE_FAILED` - 22 | `STATE_TIMEOUT` - 23 | 24 | ### OneTimeNetboot 25 | 26 | ### TemplateRendering 27 | 28 | ### Conditions 29 | -------------------------------------------------------------------------------- /hack/boilerplate/boilerplate.generatego.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright The Tinkerbell Authors. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | -------------------------------------------------------------------------------- /internal/agent/agent.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "sync" 7 | 8 | "github.com/go-logr/logr" 9 | "github.com/tinkerbell/tink/internal/agent/event" 10 | "github.com/tinkerbell/tink/internal/agent/workflow" 11 | ) 12 | 13 | // Agent is the core data structure for handling workflow execution on target nodes. It leverages 14 | // a Transport and a ContainerRuntime to retrieve workflows and execute actions. 15 | // 16 | // The agent runs a single workflow at a time. Concurrent requests to run workflows will have the 17 | // second workflow rejected with an event.WorkflowRejected event. 18 | type Agent struct { 19 | Log logr.Logger 20 | 21 | // ID is the unique identifier for the agent. It is used by the transport to identify workflows 22 | // scheduled for this agent. 23 | ID string 24 | 25 | // Transport is the transport used by the agent for communicating workflows and events. 26 | Transport Transport 27 | 28 | // Runtime is the container runtime used to execute workflow actions. 29 | Runtime ContainerRuntime 30 | 31 | // sem ensure we handle a single workflow at a time. 32 | sem chan struct{} 33 | 34 | // executionContext tracks the currently executing workflow. 35 | executionContext *executionContext 36 | mtx sync.RWMutex 37 | } 38 | 39 | // Start finalizes the Agent configuration and starts the configured Transport so it is ready 40 | // to receive workflows. On receiving a workflow, it will leverage the configured Runtime to 41 | // execute workflow actions. 42 | func (agent *Agent) Start(ctx context.Context) error { 43 | if agent.ID == "" { 44 | return errors.New("ID field must be set before calling Start()") 45 | } 46 | 47 | if agent.Transport == nil { 48 | return errors.New("Transport field must be set before calling Start()") 49 | } 50 | 51 | if agent.Runtime == nil { 52 | //nolint:stylecheck // Runtime is a field of agent. 53 | return errors.New("Runtime field must be set before calling Start()") 54 | } 55 | 56 | if agent.Log.GetSink() == nil { 57 | agent.Log = logr.Discard() 58 | } 59 | 60 | agent.Log = agent.Log.WithValues("agent_id", agent.ID) 61 | 62 | // Initialize the semaphore and add a resource to it ensuring we can run 1 workflow at a time. 63 | agent.sem = make(chan struct{}, 1) 64 | agent.sem <- struct{}{} 65 | 66 | return agent.Transport.Start(ctx, agent.ID, agent) 67 | } 68 | 69 | // HandleWorkflow satisfies transport. 70 | func (agent *Agent) HandleWorkflow(ctx context.Context, wflw workflow.Workflow, events event.Recorder) { 71 | if agent.sem == nil { 72 | agent.Log.Info("Agent must have Start() called before calling HandleWorkflow()") 73 | } 74 | 75 | select { 76 | case <-agent.sem: 77 | // Ensure we configure the current workflow and cancellation func before we launch the 78 | // goroutine to avoid a race with CancelWorkflow. 79 | agent.mtx.Lock() 80 | defer agent.mtx.Unlock() 81 | 82 | ctx, cancel := context.WithCancel(ctx) 83 | agent.executionContext = &executionContext{ 84 | Workflow: wflw, 85 | Cancel: cancel, 86 | } 87 | 88 | go func() { 89 | // Replenish the semaphore on exit so we can pick up another workflow. 90 | defer func() { agent.sem <- struct{}{} }() 91 | 92 | agent.run(ctx, wflw, events) 93 | 94 | // Nilify the execution context after running so cancellation requests are ignored. 95 | agent.mtx.Lock() 96 | defer agent.mtx.Unlock() 97 | agent.executionContext = nil 98 | }() 99 | 100 | default: 101 | log := agent.Log.WithValues("workflow_id", wflw.ID) 102 | 103 | reject := event.WorkflowRejected{ 104 | ID: wflw.ID, 105 | Message: "workflow already in progress", 106 | } 107 | 108 | if err := events.RecordEvent(ctx, reject); err != nil { 109 | log.Error(err, "Failed to record workflow rejection event") 110 | return 111 | } 112 | 113 | log.Info("Workflow already executing; dropping request") 114 | } 115 | } 116 | 117 | func (agent *Agent) CancelWorkflow(workflowID string) { 118 | agent.mtx.RLock() 119 | defer agent.mtx.RUnlock() 120 | 121 | if agent.executionContext == nil { 122 | agent.Log.Info("No workflow running; ignoring cancellation request", "workflow_id", workflowID) 123 | return 124 | } 125 | 126 | if agent.executionContext.Workflow.ID != workflowID { 127 | agent.Log.Info( 128 | "Incorrect workflow ID in cancellation request; ignoring cancellation request", 129 | "workflow_id", workflowID, 130 | "running_workflow_id", agent.executionContext.Workflow.ID, 131 | ) 132 | return 133 | } 134 | 135 | agent.Log.Info("Cancel workflow", "workflow_id", workflowID) 136 | agent.executionContext.Cancel() 137 | } 138 | 139 | type executionContext struct { 140 | Workflow workflow.Workflow 141 | Cancel context.CancelFunc 142 | } 143 | -------------------------------------------------------------------------------- /internal/agent/event/action.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | import "fmt" 4 | 5 | const ( 6 | ActionStartedName Name = "ActionStarted" 7 | ActionSucceededName Name = "ActionSucceeded" 8 | ActionFailedName Name = "ActionFailed" 9 | ) 10 | 11 | // ActionStarted occurs when an action begins running. 12 | type ActionStarted struct { 13 | ActionID string 14 | WorkflowID string 15 | } 16 | 17 | func (ActionStarted) GetName() Name { 18 | return ActionStartedName 19 | } 20 | 21 | func (e ActionStarted) String() string { 22 | return fmt.Sprintf("workflow=%v action=%v", e.WorkflowID, e.ActionID) 23 | } 24 | 25 | // ActionSucceeded occurs when an action successfully completes. 26 | type ActionSucceeded struct { 27 | ActionID string 28 | WorkflowID string 29 | } 30 | 31 | func (ActionSucceeded) GetName() Name { 32 | return ActionSucceededName 33 | } 34 | 35 | func (e ActionSucceeded) String() string { 36 | return fmt.Sprintf("workflow=%v action=%v", e.WorkflowID, e.ActionID) 37 | } 38 | 39 | // ActionFailed occurs when an action fails to complete. 40 | type ActionFailed struct { 41 | ActionID string 42 | WorkflowID string 43 | Reason string 44 | Message string 45 | } 46 | 47 | func (ActionFailed) GetName() Name { 48 | return ActionFailedName 49 | } 50 | 51 | func (e ActionFailed) String() string { 52 | return fmt.Sprintf("workflow='%v' action='%v' reason='%v'", e.WorkflowID, e.ActionID, e.Reason) 53 | } 54 | -------------------------------------------------------------------------------- /internal/agent/event/error.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // IncompatibleError indicates an event was received that. 8 | type IncompatibleError struct { 9 | Event Event 10 | } 11 | 12 | func (e IncompatibleError) Error() string { 13 | return fmt.Sprintf("incompatible event: %v", e.Event.GetName()) 14 | } 15 | -------------------------------------------------------------------------------- /internal/agent/event/event.go: -------------------------------------------------------------------------------- 1 | // Package event describes the event set and an interface for recording events. Events are 2 | // generated as workflows execute. 3 | package event 4 | 5 | import "context" 6 | 7 | // Name is a unique name identifying an event. 8 | type Name string 9 | 10 | // Event is an event generated during execution of a Workflow. Each event in the event package 11 | // implements this interface. Consumers may type switch the Event to the appropriate type for 12 | // event handling. 13 | // 14 | // E.g. 15 | // 16 | // switch ev.(type) { 17 | // case event.ActionStarted: 18 | // // Handle ActionStarted event. 19 | // default: 20 | // // Unsupported event. 21 | // } 22 | type Event interface { 23 | // GetName retrieves the event name. 24 | GetName() Name 25 | 26 | // Force events to reside in this package - see zz_known.go. 27 | isEventFromThisPackage() 28 | } 29 | 30 | // Recorder provides event recording methods. 31 | type Recorder interface { 32 | RecordEvent(context.Context, Event) error 33 | } 34 | -------------------------------------------------------------------------------- /internal/agent/event/fake.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | import "context" 4 | 5 | // NoopRecorder retrieves a nooping fake recorder. 6 | func NoopRecorder() *RecorderMock { 7 | return &RecorderMock{ 8 | RecordEventFunc: func(context.Context, Event) error { return nil }, 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /internal/agent/event/mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by moq; DO NOT EDIT. 2 | // github.com/matryer/moq 3 | 4 | package event 5 | 6 | import ( 7 | "context" 8 | "sync" 9 | ) 10 | 11 | // Ensure, that RecorderMock does implement Recorder. 12 | // If this is not the case, regenerate this file with moq. 13 | var _ Recorder = &RecorderMock{} 14 | 15 | // RecorderMock is a mock implementation of Recorder. 16 | // 17 | // func TestSomethingThatUsesRecorder(t *testing.T) { 18 | // 19 | // // make and configure a mocked Recorder 20 | // mockedRecorder := &RecorderMock{ 21 | // RecordEventFunc: func(contextMoqParam context.Context, event Event) error { 22 | // panic("mock out the RecordEvent method") 23 | // }, 24 | // } 25 | // 26 | // // use mockedRecorder in code that requires Recorder 27 | // // and then make assertions. 28 | // 29 | // } 30 | type RecorderMock struct { 31 | // RecordEventFunc mocks the RecordEvent method. 32 | RecordEventFunc func(contextMoqParam context.Context, event Event) error 33 | 34 | // calls tracks calls to the methods. 35 | calls struct { 36 | // RecordEvent holds details about calls to the RecordEvent method. 37 | RecordEvent []struct { 38 | // ContextMoqParam is the contextMoqParam argument value. 39 | ContextMoqParam context.Context 40 | // Event is the event argument value. 41 | Event Event 42 | } 43 | } 44 | lockRecordEvent sync.RWMutex 45 | } 46 | 47 | // RecordEvent calls RecordEventFunc. 48 | func (mock *RecorderMock) RecordEvent(contextMoqParam context.Context, event Event) error { 49 | if mock.RecordEventFunc == nil { 50 | panic("RecorderMock.RecordEventFunc: method is nil but Recorder.RecordEvent was just called") 51 | } 52 | callInfo := struct { 53 | ContextMoqParam context.Context 54 | Event Event 55 | }{ 56 | ContextMoqParam: contextMoqParam, 57 | Event: event, 58 | } 59 | mock.lockRecordEvent.Lock() 60 | mock.calls.RecordEvent = append(mock.calls.RecordEvent, callInfo) 61 | mock.lockRecordEvent.Unlock() 62 | return mock.RecordEventFunc(contextMoqParam, event) 63 | } 64 | 65 | // RecordEventCalls gets all the calls that were made to RecordEvent. 66 | // Check the length with: 67 | // 68 | // len(mockedRecorder.RecordEventCalls()) 69 | func (mock *RecorderMock) RecordEventCalls() []struct { 70 | ContextMoqParam context.Context 71 | Event Event 72 | } { 73 | var calls []struct { 74 | ContextMoqParam context.Context 75 | Event Event 76 | } 77 | mock.lockRecordEvent.RLock() 78 | calls = mock.calls.RecordEvent 79 | mock.lockRecordEvent.RUnlock() 80 | return calls 81 | } 82 | -------------------------------------------------------------------------------- /internal/agent/event/workflow.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | const WorkflowRejectedName Name = "WorkflowRejected" 4 | 5 | // WorkflowRejected is generated when a workflow is being rejected by the agent. 6 | type WorkflowRejected struct { 7 | ID string 8 | Message string 9 | } 10 | 11 | func (WorkflowRejected) GetName() Name { 12 | return WorkflowRejectedName 13 | } 14 | 15 | func (e WorkflowRejected) String() string { 16 | return e.Message 17 | } 18 | -------------------------------------------------------------------------------- /internal/agent/event/zz_from_package.go: -------------------------------------------------------------------------------- 1 | package event 2 | 3 | // We want to force events to reside in this package so its clear what events are usable 4 | // with by agent code. We achieve this using a compile time check that ensures all events 5 | // implement an unexported method on the Event interface which is the interface passed around 6 | // by event handling code. 7 | // 8 | // This source file should not contain methods other than the isEventFromThisPackage(). 9 | // 10 | // This code is hand written. 11 | 12 | func (ActionStarted) isEventFromThisPackage() {} 13 | func (ActionSucceeded) isEventFromThisPackage() {} 14 | func (ActionFailed) isEventFromThisPackage() {} 15 | 16 | func (WorkflowRejected) isEventFromThisPackage() {} 17 | -------------------------------------------------------------------------------- /internal/agent/failure/reason.go: -------------------------------------------------------------------------------- 1 | package failure 2 | 3 | import "errors" 4 | 5 | // Reason extracts a failure reason from err. err has a reason if it satisfies the failure reason 6 | // interface: 7 | // 8 | // interface { 9 | // FailureReason() string 10 | // } 11 | // 12 | // If err does not have a reason or FailureReason() returns an empty string, ReasonUnknown is 13 | // returned. 14 | func Reason(err error) (string, bool) { 15 | fr, ok := err.(interface { 16 | FailureReason() string 17 | }) 18 | 19 | if !ok || fr.FailureReason() == "" { 20 | return "", false 21 | } 22 | 23 | return fr.FailureReason(), true 24 | } 25 | 26 | // WithReason decorates err with reason. The reason can be extracted using Reason(). 27 | func WithReason(err error, reason string) error { 28 | return withReason{err, reason} 29 | } 30 | 31 | // NewReason creates a new error using message and wraps it with reason. The reason can be 32 | // extracted using Reason(). 33 | func NewReason(message, reason string) error { 34 | return WithReason(errors.New(message), reason) 35 | } 36 | 37 | type withReason struct { 38 | error 39 | reason string 40 | } 41 | 42 | func (e withReason) FailureReason() string { 43 | return e.reason 44 | } 45 | -------------------------------------------------------------------------------- /internal/agent/run.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "context" 5 | "regexp" 6 | "strings" 7 | "time" 8 | 9 | "github.com/go-logr/logr" 10 | "github.com/tinkerbell/tink/internal/agent/event" 11 | "github.com/tinkerbell/tink/internal/agent/failure" 12 | "github.com/tinkerbell/tink/internal/agent/workflow" 13 | ) 14 | 15 | // ReasonRuntimeError is the default reason used when no reason is provided by the runtime. 16 | const ReasonRuntimeError = "RuntimeError" 17 | 18 | // ReasonInvalid indicates a reason provided by the runtime was invalid. 19 | const ReasonInvalid = "InvalidReason" 20 | 21 | // validReasonRegex defines the regex for a valid action failure reason. 22 | var validReasonRegex = regexp.MustCompile(`^[a-zA-Z]+$`) 23 | 24 | // run executes the workflow using the runtime configured on agent. 25 | func (agent *Agent) run(ctx context.Context, wflw workflow.Workflow, events event.Recorder) { 26 | log := agent.Log.WithValues("workflow_id", wflw.ID) 27 | 28 | workflowStart := time.Now() 29 | log.Info("Starting workflow") 30 | 31 | for _, action := range wflw.Actions { 32 | log := log.WithValues("action_id", action.ID, "action_name", action.Name) 33 | 34 | actionStart := time.Now() 35 | log.Info("Starting action") 36 | 37 | started := event.ActionStarted{ 38 | ActionID: action.ID, 39 | WorkflowID: wflw.ID, 40 | } 41 | if err := events.RecordEvent(ctx, started); err != nil { 42 | log.Error(err, "Record action start event") 43 | return 44 | } 45 | 46 | if err := agent.Runtime.Run(ctx, action); err != nil { 47 | reason := extractReason(log, err) 48 | 49 | // We consider newlines in the failure message invalid because it upsets formatting. 50 | // The failure message is vital to easy debugability so we force the string into 51 | // something we're happy with and communicate that. 52 | message := strings.ReplaceAll(err.Error(), "\n", `\n`) 53 | 54 | log.Info("Action failed; terminating workflow", 55 | "error", err, 56 | "reason", reason, 57 | "duration", time.Since(actionStart).String(), 58 | ) 59 | 60 | failed := event.ActionFailed{ 61 | ActionID: action.ID, 62 | WorkflowID: wflw.ID, 63 | Reason: reason, 64 | Message: message, 65 | } 66 | if err := events.RecordEvent(ctx, failed); err != nil { 67 | log.Error(err, "Record failed action event", "event", failed) 68 | } 69 | 70 | return 71 | } 72 | 73 | succeed := event.ActionSucceeded{ 74 | ActionID: action.ID, 75 | WorkflowID: wflw.ID, 76 | } 77 | if err := events.RecordEvent(ctx, succeed); err != nil { 78 | log.Error(err, "Record succeeded action event") 79 | return 80 | } 81 | 82 | log.Info("Finished action", "duration", time.Since(actionStart).String()) 83 | } 84 | 85 | log.Info("Finished workflow", "duration", time.Since(workflowStart).String()) 86 | } 87 | 88 | func extractReason(log logr.Logger, err error) string { 89 | reason := ReasonRuntimeError 90 | if r, ok := failure.Reason(err); ok { 91 | reason = r 92 | if !validReasonRegex.MatchString(reason) { 93 | log.Info( 94 | "Received invalid reason for action failure; using InvalidReason", 95 | "invalid_reason", reason, 96 | ) 97 | reason = ReasonInvalid 98 | } 99 | } 100 | return reason 101 | } 102 | -------------------------------------------------------------------------------- /internal/agent/runtime.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/tinkerbell/tink/internal/agent/workflow" 7 | ) 8 | 9 | // ContainerRuntime is a runtime capable of executing workflow actions. 10 | type ContainerRuntime interface { 11 | // Run executes the action. The runtime should mount the following files for the action 12 | // implementation to communicate a reason and message in the event of failure: 13 | // 14 | // /tinkerbell/failure-reason 15 | // /tinkerbell/failure-message 16 | // 17 | // The reason and message should be communicataed via the returned error. The message should 18 | // be the error message and the reason should be provided as defined in failure.Reason(). 19 | Run(context.Context, workflow.Action) error 20 | } 21 | -------------------------------------------------------------------------------- /internal/agent/runtime/MACOS_TESTING.md: -------------------------------------------------------------------------------- 1 | # MacOS Testing 2 | 3 | When developing on MacOS it may be necessary to create a symlink to `/var/run/docker.sock`. First, 4 | validate `/var/run/docker.sock` does not exist. If it does not exist, verify the socket exists at 5 | `$HOME/.docker/run/docker.sock` and create a symlink. 6 | 7 | ``` 8 | sudo ln -s $HOME/.docker/run/docker.sock /var/run/docker.sock 9 | ``` -------------------------------------------------------------------------------- /internal/agent/runtime/action_failure.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | const ( 4 | // ReasonMountPath is the path used by Actions to write their failure reasons. 5 | ReasonMountPath = "/tinkerbell/failure-reason" 6 | 7 | // MessageMountPath is the path used by Actions to write their failure message. 8 | MessageMountPath = "/tinkerbell/failure-message" 9 | ) 10 | -------------------------------------------------------------------------------- /internal/agent/runtime/fake.go: -------------------------------------------------------------------------------- 1 | package runtime 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | "github.com/tinkerbell/tink/internal/agent" 8 | "github.com/tinkerbell/tink/internal/agent/workflow" 9 | ) 10 | 11 | var _ agent.ContainerRuntime = Fake{} 12 | 13 | func Noop() Fake { 14 | return Fake{ 15 | Log: logr.Discard(), 16 | } 17 | } 18 | 19 | // Fake is a runtime that always succeeds. It does not literally execute any actions. 20 | type Fake struct { 21 | Log logr.Logger 22 | } 23 | 24 | // Run satisfies agent.ContainerRuntime. 25 | func (f Fake) Run(_ context.Context, a workflow.Action) error { 26 | f.Log.Info("Starting fake container", "action", a) 27 | return nil 28 | } 29 | -------------------------------------------------------------------------------- /internal/agent/runtime/internal/failure_files.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "os" 7 | "strings" 8 | 9 | "github.com/tinkerbell/tink/internal/agent/failure" 10 | ) 11 | 12 | // NewFailureFiles creates a new FailureFiles instance with isolated underlying files. Consumers 13 | // are responsible for calling FailureFiles.Close(). 14 | func NewFailureFiles() (*FailureFiles, error) { 15 | reason, err := os.CreateTemp("", "failure-reason-*") 16 | if err != nil { 17 | return nil, err 18 | } 19 | 20 | message, err := os.CreateTemp("", "failure-message-*") 21 | if err != nil { 22 | return nil, err 23 | } 24 | 25 | return &FailureFiles{ 26 | reason: reason, 27 | message: message, 28 | }, nil 29 | } 30 | 31 | // FailureFiles provides mountable files for runtimes that can be used to extract 32 | // a reason and message from actions. 33 | type FailureFiles struct { 34 | reason *os.File 35 | message *os.File 36 | } 37 | 38 | // Close closes all files tracked by f. 39 | func (f *FailureFiles) Close() error { 40 | os.Remove(f.reason.Name()) 41 | os.Remove(f.message.Name()) 42 | return nil 43 | } 44 | 45 | // ReasonPath returns the path for the reason file. 46 | func (f *FailureFiles) ReasonPath() string { 47 | return f.reason.Name() 48 | } 49 | 50 | // Reason retrieves the reason from the reason file. 51 | func (f *FailureFiles) Reason() (string, error) { 52 | // Always seek back to the original point. If this fails, assume the file is missing and so 53 | // any further interactions will also receive errors. 54 | defer func() { 55 | _, _ = f.reason.Seek(0, 0) 56 | }() 57 | 58 | var reason bytes.Buffer 59 | if _, err := reason.ReadFrom(f.reason); err != nil { 60 | return "", err 61 | } 62 | return strings.TrimRight(reason.String(), "\n"), nil 63 | } 64 | 65 | // MessagePath returns the path for the message file. 66 | func (f *FailureFiles) MessagePath() string { 67 | return f.message.Name() 68 | } 69 | 70 | // Message retrieves the message from the message file. 71 | func (f *FailureFiles) Message() (string, error) { 72 | // Always seek back to the original point. If this fails, assume the file is missing and so 73 | // any further interactions will also receive errors. 74 | defer func() { 75 | _, _ = f.message.Seek(0, 0) 76 | }() 77 | 78 | var message bytes.Buffer 79 | if _, err := message.ReadFrom(f.message); err != nil { 80 | return "", err 81 | } 82 | return strings.TrimRight(message.String(), "\n"), nil 83 | } 84 | 85 | func (f *FailureFiles) ToError() error { 86 | // Always seek back to the original point. If this fails, assume the file is missing and so 87 | // any further interactions will also receive errors. 88 | defer func() { 89 | _, _ = f.reason.Seek(0, 0) 90 | _, _ = f.message.Seek(0, 0) 91 | }() 92 | 93 | message, err := f.Message() 94 | if err != nil { 95 | return fmt.Errorf("read failure message: %w", err) 96 | } 97 | 98 | reason, err := f.Reason() 99 | if err != nil { 100 | return fmt.Errorf("read failure reason: %w", err) 101 | } 102 | 103 | return failure.NewReason(message, reason) 104 | } 105 | -------------------------------------------------------------------------------- /internal/agent/runtime/internal/failure_files_test.go: -------------------------------------------------------------------------------- 1 | package internal_test 2 | 3 | import ( 4 | "fmt" 5 | "io" 6 | "os" 7 | "testing" 8 | 9 | "github.com/tinkerbell/tink/internal/agent/failure" 10 | "github.com/tinkerbell/tink/internal/agent/runtime/internal" 11 | ) 12 | 13 | func TestFailureFiles(t *testing.T) { 14 | ff, err := internal.NewFailureFiles() 15 | if err != nil { 16 | t.Fatalf("Could not create failure files: %v", err) 17 | } 18 | 19 | expectMessage := "my special message" 20 | expectReason := "MyReason" 21 | 22 | fh, err := os.OpenFile(ff.MessagePath(), os.O_RDWR, 0) 23 | if err != nil { 24 | t.Fatalf("Could not open message file: %v", err) 25 | } 26 | defer fh.Close() 27 | if _, err := io.WriteString(fh, expectMessage); err != nil { 28 | t.Fatalf("Couldn't write to message file: %v", err) 29 | } 30 | 31 | fh, err = os.OpenFile(ff.ReasonPath(), os.O_RDWR, 0) 32 | if err != nil { 33 | t.Fatalf("Could not open reason file: %v", err) 34 | } 35 | defer fh.Close() 36 | if _, err := io.WriteString(fh, expectReason); err != nil { 37 | t.Fatalf("Couldn't write to reason file: %v", err) 38 | } 39 | 40 | // Read the individual messages and ensure they match. 41 | receivedMessage, err := ff.Message() 42 | if err != nil { 43 | t.Fatalf("Could not retrieve message: %v", err) 44 | } 45 | if receivedMessage != expectMessage { 46 | t.Fatalf("Expected: %v; Received: %v", expectMessage, receivedMessage) 47 | } 48 | 49 | receivedReason, err := ff.Reason() 50 | if err != nil { 51 | t.Fatalf("Could not retrieve message: %v", err) 52 | } 53 | if receivedReason != expectReason { 54 | t.Fatalf("Expected: %v; Received: %v", expectReason, receivedReason) 55 | } 56 | 57 | // Convert to an error and ensure we can extract using the failure package. 58 | toErr := ff.ToError() 59 | 60 | fmt.Printf("%T %v\n", toErr, toErr.Error()) 61 | 62 | receivedReason, ok := failure.Reason(toErr) 63 | if !ok { 64 | t.Fatalf("Expected a reason that could be extracted with failure package, received none") 65 | } 66 | if receivedReason != expectReason { 67 | t.Fatalf("Expected: %v; Received: %v", receivedReason, expectReason) 68 | } 69 | 70 | if toErr.Error() != expectMessage { 71 | t.Fatalf("Expected: %v; Received: %v", expectMessage, toErr.Error()) 72 | } 73 | 74 | // Close the files and ensure they've been deleted. 75 | ff.Close() 76 | 77 | _, err = os.Stat(ff.MessagePath()) 78 | switch { 79 | case err == nil: 80 | t.Fatal("Expected os.Stat error but received none") 81 | case !os.IsNotExist(err): 82 | t.Fatalf("Expected not exists path error, received '%v'", err) 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /internal/agent/runtime/runtime.go: -------------------------------------------------------------------------------- 1 | // Package runtime contains runtime implementations that can execute workflow actions. They are 2 | // responsible for extracting workflow failure reason and messages from the the action 3 | // file system at the following locations: 4 | // 5 | // /tinkerbell/failure-reason 6 | // /tinkerbell/failure-message 7 | package runtime 8 | -------------------------------------------------------------------------------- /internal/agent/transport.go: -------------------------------------------------------------------------------- 1 | package agent 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/tinkerbell/tink/internal/agent/transport" 7 | ) 8 | 9 | // Transport is a transport mechanism for communicating workflows to the agent. 10 | type Transport interface { 11 | // Start is a blocking call that starts the transport and begins retrieving workflows for the 12 | // given agentID. The transport should pass workflows to the Handler. The transport 13 | // should block until its told to cancel via the context. 14 | Start(_ context.Context, agentID string, _ transport.WorkflowHandler) error 15 | } 16 | -------------------------------------------------------------------------------- /internal/agent/transport/fake.go: -------------------------------------------------------------------------------- 1 | package transport 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | "github.com/tinkerbell/tink/internal/agent/event" 8 | "github.com/tinkerbell/tink/internal/agent/workflow" 9 | ) 10 | 11 | func Noop() Fake { 12 | return Fake{ 13 | Log: logr.Discard(), 14 | } 15 | } 16 | 17 | type Fake struct { 18 | Log logr.Logger 19 | Workflows []workflow.Workflow 20 | } 21 | 22 | func (f Fake) Start(ctx context.Context, _ string, handler WorkflowHandler) error { 23 | f.Log.Info("Starting fake transport") 24 | for _, w := range f.Workflows { 25 | handler.HandleWorkflow(ctx, w, f) 26 | } 27 | return nil 28 | } 29 | 30 | func (f Fake) RecordEvent(_ context.Context, e event.Event) error { 31 | f.Log.Info("Recording event", "event", e.GetName()) 32 | return nil 33 | } 34 | -------------------------------------------------------------------------------- /internal/agent/transport/file.go: -------------------------------------------------------------------------------- 1 | package transport 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "path/filepath" 7 | 8 | "github.com/go-logr/logr" 9 | "github.com/tinkerbell/tink/internal/agent/event" 10 | "github.com/tinkerbell/tink/internal/agent/workflow" 11 | "gopkg.in/yaml.v3" 12 | ) 13 | 14 | // File is a transport implementation that executes a single workflow stored as a file. 15 | type File struct { 16 | // Log is a logger for debugging. 17 | Log logr.Logger 18 | 19 | // Path to the workflow to run. 20 | Path string 21 | } 22 | 23 | // Start begins watching f.Dir for files. When it finds a file it hasn't handled before, it 24 | // attempts to parse it and offload to the handler. It will run workflows once where a workflow 25 | // is determined by its file name. 26 | func (f *File) Start(ctx context.Context, _ string, handler WorkflowHandler) error { 27 | path, err := filepath.Abs(f.Path) 28 | if err != nil { 29 | return err 30 | } 31 | 32 | fh, err := os.Open(path) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | var wrkflow workflow.Workflow 38 | if err := yaml.NewDecoder(fh).Decode(&wrkflow); err != nil { 39 | return err 40 | } 41 | 42 | handler.HandleWorkflow(ctx, wrkflow, f) 43 | 44 | return nil 45 | } 46 | 47 | func (f *File) RecordEvent(_ context.Context, e event.Event) error { 48 | // Noop because we don't particularly care about events for File based transports. Maybe 49 | // we'll record this in a dedicated file one day. 50 | f.Log.Info("Recording event", "event", e.GetName()) 51 | return nil 52 | } 53 | -------------------------------------------------------------------------------- /internal/agent/transport/file_test.go: -------------------------------------------------------------------------------- 1 | package transport_test 2 | 3 | import ( 4 | "context" 5 | "testing" 6 | "time" 7 | 8 | "github.com/go-logr/zerologr" 9 | "github.com/google/go-cmp/cmp" 10 | "github.com/rs/zerolog" 11 | "github.com/tinkerbell/tink/internal/agent/event" 12 | "github.com/tinkerbell/tink/internal/agent/transport" 13 | "github.com/tinkerbell/tink/internal/agent/workflow" 14 | ) 15 | 16 | func TestFile(t *testing.T) { 17 | logger := zerolog.New(zerolog.NewConsoleWriter()) 18 | 19 | expect := workflow.Workflow{ 20 | ID: "test-workflow-id", 21 | Actions: []workflow.Action{ 22 | { 23 | ID: "test-action-1", 24 | Name: "my test action", 25 | Image: "docker.io/hub/alpine", 26 | Cmd: "sh -c", 27 | Args: []string{"echo", "action 1"}, 28 | Env: map[string]string{"foo": "bar"}, 29 | Volumes: []string{"mount:/foo/bar:ro"}, 30 | NetworkNamespace: "custom-namespace", 31 | }, 32 | { 33 | ID: "test-action-2", 34 | Name: "my test action", 35 | Image: "docker.io/hub/alpine", 36 | Cmd: "sh -c", 37 | Args: []string{"echo", "action 2"}, 38 | Env: map[string]string{"foo": "bar"}, 39 | Volumes: []string{"mount:/foo/bar:ro"}, 40 | NetworkNamespace: "custom-namespace", 41 | }, 42 | }, 43 | } 44 | 45 | ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) 46 | defer cancel() 47 | 48 | handler := &transport.WorkflowHandlerMock{ 49 | HandleWorkflowFunc: func(_ context.Context, workflow workflow.Workflow, _ event.Recorder) { 50 | if !cmp.Equal(expect, workflow) { 51 | t.Fatalf("Workflow diff:\n%v", cmp.Diff(expect, workflow)) 52 | } 53 | }, 54 | } 55 | 56 | f := transport.File{ 57 | Log: zerologr.New(&logger), 58 | Path: "./testdata/workflow.yml", 59 | } 60 | 61 | err := f.Start(ctx, "agent_id", handler) 62 | if err != nil { 63 | t.Fatal(err) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /internal/agent/transport/grpc_test.go: -------------------------------------------------------------------------------- 1 | package transport_test 2 | 3 | import ( 4 | "context" 5 | "io" 6 | "sync" 7 | "testing" 8 | 9 | "github.com/go-logr/zerologr" 10 | "github.com/rs/zerolog" 11 | "github.com/tinkerbell/tink/internal/agent/event" 12 | "github.com/tinkerbell/tink/internal/agent/transport" 13 | "github.com/tinkerbell/tink/internal/agent/workflow" 14 | workflowproto "github.com/tinkerbell/tink/internal/proto/workflow/v2" 15 | "google.golang.org/grpc" 16 | ) 17 | 18 | func TestGRPC(t *testing.T) { 19 | logger := zerolog.New(zerolog.NewConsoleWriter()) 20 | type streamResponse struct { 21 | Workflow *workflowproto.GetWorkflowsResponse 22 | Error error 23 | } 24 | responses := make(chan streamResponse, 2) 25 | responses <- streamResponse{ 26 | Workflow: &workflowproto.GetWorkflowsResponse{ 27 | Cmd: &workflowproto.GetWorkflowsResponse_StartWorkflow_{ 28 | StartWorkflow: &workflowproto.GetWorkflowsResponse_StartWorkflow{ 29 | Workflow: &workflowproto.Workflow{}, 30 | }, 31 | }, 32 | }, 33 | } 34 | responses <- streamResponse{ 35 | Error: io.EOF, 36 | } 37 | 38 | stream := &workflowproto.WorkflowService_GetWorkflowsClientMock{ 39 | RecvFunc: func() (*workflowproto.GetWorkflowsResponse, error) { 40 | r, ok := <-responses 41 | if !ok { 42 | return nil, io.EOF 43 | } 44 | return r.Workflow, r.Error 45 | }, 46 | ContextFunc: context.Background, 47 | } 48 | client := &workflowproto.WorkflowServiceClientMock{ 49 | GetWorkflowsFunc: func(_ context.Context, _ *workflowproto.GetWorkflowsRequest, _ ...grpc.CallOption) (workflowproto.WorkflowService_GetWorkflowsClient, error) { 50 | return stream, nil 51 | }, 52 | } 53 | 54 | var wg sync.WaitGroup 55 | wg.Add(1) 56 | handler := &transport.WorkflowHandlerMock{ 57 | HandleWorkflowFunc: func(_ context.Context, _ workflow.Workflow, _ event.Recorder) { 58 | defer wg.Done() 59 | close(responses) 60 | }, 61 | } 62 | 63 | g := transport.NewGRPC(zerologr.New(&logger), client) 64 | 65 | err := g.Start(context.Background(), "id", handler) 66 | if err != nil { 67 | t.Fatal(err) 68 | } 69 | 70 | wg.Wait() 71 | } 72 | -------------------------------------------------------------------------------- /internal/agent/transport/handler.go: -------------------------------------------------------------------------------- 1 | package transport 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/tinkerbell/tink/internal/agent/event" 7 | "github.com/tinkerbell/tink/internal/agent/workflow" 8 | ) 9 | 10 | // WorkflowHandler is responsible for workflow execution. 11 | type WorkflowHandler interface { 12 | // HandleWorkflow executes the given workflow. The event.Recorder can be used to publish events 13 | // as the workflow transits its lifecycle. HandleWorkflow should not block and should be efficient 14 | // in handing off workflow processing. 15 | HandleWorkflow(context.Context, workflow.Workflow, event.Recorder) 16 | 17 | // CancelWorkflow cancels a workflow identified by workflowID. It should not block and should 18 | // be efficient in handing off the cancellation request. 19 | CancelWorkflow(workflowID string) 20 | } 21 | -------------------------------------------------------------------------------- /internal/agent/transport/mock.go: -------------------------------------------------------------------------------- 1 | // Code generated by moq; DO NOT EDIT. 2 | // github.com/matryer/moq 3 | 4 | package transport 5 | 6 | import ( 7 | "context" 8 | "sync" 9 | 10 | "github.com/tinkerbell/tink/internal/agent/event" 11 | "github.com/tinkerbell/tink/internal/agent/workflow" 12 | ) 13 | 14 | // Ensure, that WorkflowHandlerMock does implement WorkflowHandler. 15 | // If this is not the case, regenerate this file with moq. 16 | var _ WorkflowHandler = &WorkflowHandlerMock{} 17 | 18 | // WorkflowHandlerMock is a mock implementation of WorkflowHandler. 19 | // 20 | // func TestSomethingThatUsesWorkflowHandler(t *testing.T) { 21 | // 22 | // // make and configure a mocked WorkflowHandler 23 | // mockedWorkflowHandler := &WorkflowHandlerMock{ 24 | // CancelWorkflowFunc: func(workflowID string) { 25 | // panic("mock out the CancelWorkflow method") 26 | // }, 27 | // HandleWorkflowFunc: func(contextMoqParam context.Context, workflowMoqParam workflow.Workflow, recorder event.Recorder) { 28 | // panic("mock out the HandleWorkflow method") 29 | // }, 30 | // } 31 | // 32 | // // use mockedWorkflowHandler in code that requires WorkflowHandler 33 | // // and then make assertions. 34 | // 35 | // } 36 | type WorkflowHandlerMock struct { 37 | // CancelWorkflowFunc mocks the CancelWorkflow method. 38 | CancelWorkflowFunc func(workflowID string) 39 | 40 | // HandleWorkflowFunc mocks the HandleWorkflow method. 41 | HandleWorkflowFunc func(contextMoqParam context.Context, workflowMoqParam workflow.Workflow, recorder event.Recorder) 42 | 43 | // calls tracks calls to the methods. 44 | calls struct { 45 | // CancelWorkflow holds details about calls to the CancelWorkflow method. 46 | CancelWorkflow []struct { 47 | // WorkflowID is the workflowID argument value. 48 | WorkflowID string 49 | } 50 | // HandleWorkflow holds details about calls to the HandleWorkflow method. 51 | HandleWorkflow []struct { 52 | // ContextMoqParam is the contextMoqParam argument value. 53 | ContextMoqParam context.Context 54 | // WorkflowMoqParam is the workflowMoqParam argument value. 55 | WorkflowMoqParam workflow.Workflow 56 | // Recorder is the recorder argument value. 57 | Recorder event.Recorder 58 | } 59 | } 60 | lockCancelWorkflow sync.RWMutex 61 | lockHandleWorkflow sync.RWMutex 62 | } 63 | 64 | // CancelWorkflow calls CancelWorkflowFunc. 65 | func (mock *WorkflowHandlerMock) CancelWorkflow(workflowID string) { 66 | if mock.CancelWorkflowFunc == nil { 67 | panic("WorkflowHandlerMock.CancelWorkflowFunc: method is nil but WorkflowHandler.CancelWorkflow was just called") 68 | } 69 | callInfo := struct { 70 | WorkflowID string 71 | }{ 72 | WorkflowID: workflowID, 73 | } 74 | mock.lockCancelWorkflow.Lock() 75 | mock.calls.CancelWorkflow = append(mock.calls.CancelWorkflow, callInfo) 76 | mock.lockCancelWorkflow.Unlock() 77 | mock.CancelWorkflowFunc(workflowID) 78 | } 79 | 80 | // CancelWorkflowCalls gets all the calls that were made to CancelWorkflow. 81 | // Check the length with: 82 | // 83 | // len(mockedWorkflowHandler.CancelWorkflowCalls()) 84 | func (mock *WorkflowHandlerMock) CancelWorkflowCalls() []struct { 85 | WorkflowID string 86 | } { 87 | var calls []struct { 88 | WorkflowID string 89 | } 90 | mock.lockCancelWorkflow.RLock() 91 | calls = mock.calls.CancelWorkflow 92 | mock.lockCancelWorkflow.RUnlock() 93 | return calls 94 | } 95 | 96 | // HandleWorkflow calls HandleWorkflowFunc. 97 | func (mock *WorkflowHandlerMock) HandleWorkflow(contextMoqParam context.Context, workflowMoqParam workflow.Workflow, recorder event.Recorder) { 98 | if mock.HandleWorkflowFunc == nil { 99 | panic("WorkflowHandlerMock.HandleWorkflowFunc: method is nil but WorkflowHandler.HandleWorkflow was just called") 100 | } 101 | callInfo := struct { 102 | ContextMoqParam context.Context 103 | WorkflowMoqParam workflow.Workflow 104 | Recorder event.Recorder 105 | }{ 106 | ContextMoqParam: contextMoqParam, 107 | WorkflowMoqParam: workflowMoqParam, 108 | Recorder: recorder, 109 | } 110 | mock.lockHandleWorkflow.Lock() 111 | mock.calls.HandleWorkflow = append(mock.calls.HandleWorkflow, callInfo) 112 | mock.lockHandleWorkflow.Unlock() 113 | mock.HandleWorkflowFunc(contextMoqParam, workflowMoqParam, recorder) 114 | } 115 | 116 | // HandleWorkflowCalls gets all the calls that were made to HandleWorkflow. 117 | // Check the length with: 118 | // 119 | // len(mockedWorkflowHandler.HandleWorkflowCalls()) 120 | func (mock *WorkflowHandlerMock) HandleWorkflowCalls() []struct { 121 | ContextMoqParam context.Context 122 | WorkflowMoqParam workflow.Workflow 123 | Recorder event.Recorder 124 | } { 125 | var calls []struct { 126 | ContextMoqParam context.Context 127 | WorkflowMoqParam workflow.Workflow 128 | Recorder event.Recorder 129 | } 130 | mock.lockHandleWorkflow.RLock() 131 | calls = mock.calls.HandleWorkflow 132 | mock.lockHandleWorkflow.RUnlock() 133 | return calls 134 | } 135 | -------------------------------------------------------------------------------- /internal/agent/transport/testdata/workflow.yml: -------------------------------------------------------------------------------- 1 | id: "test-workflow-id" 2 | actions: 3 | - id: "test-action-1" 4 | name: "my test action" 5 | image: "docker.io/hub/alpine" 6 | cmd: "sh -c" 7 | args: ["echo", "action 1"] 8 | env: 9 | foo: bar 10 | volumes: 11 | - mount:/foo/bar:ro 12 | networkNamespace: "custom-namespace" 13 | - id: "test-action-2" 14 | name: "my test action" 15 | image: "docker.io/hub/alpine" 16 | cmd: "sh -c" 17 | args: ["echo", "action 2"] 18 | env: 19 | foo: bar 20 | volumes: 21 | - mount:/foo/bar:ro 22 | networkNamespace: "custom-namespace" 23 | -------------------------------------------------------------------------------- /internal/agent/transport/transport.go: -------------------------------------------------------------------------------- 1 | // Package transport contains data structures that implement agent transport capabilities. 2 | // transport implementations are responsible for connecting to the Tink server and retrieving 3 | // workflows for the agent to run. 4 | package transport 5 | -------------------------------------------------------------------------------- /internal/agent/workflow/workflow.go: -------------------------------------------------------------------------------- 1 | // Package workflow contains workflow domain objects. The domain objects will be moved to 2 | // /internal/workflow at a later date when they are required/we transition to the new codebase. 3 | package workflow 4 | 5 | // Workflow represents a runnable workflow for the Handler. 6 | type Workflow struct { 7 | // Do we need a workflow name? Does that even come down in the proto definition? 8 | ID string `yaml:"id"` 9 | Actions []Action `yaml:"actions"` 10 | } 11 | 12 | func (w Workflow) String() string { 13 | return w.ID 14 | } 15 | 16 | // Action represents an individually runnable action. 17 | type Action struct { 18 | ID string `yaml:"id"` 19 | Name string `yaml:"name"` 20 | Image string `yaml:"image"` 21 | Cmd string `yaml:"cmd"` 22 | Args []string `yaml:"args"` 23 | Env map[string]string `yaml:"env"` 24 | Volumes []string `yaml:"volumes"` 25 | NetworkNamespace string `yaml:"networkNamespace"` 26 | } 27 | 28 | func (a Action) String() string { 29 | // We should consider normalizing the action name and combining it with the ID. It would 30 | // make human identification easier. Alternatively, we could have a dedicated method for 31 | // retrieving names. 32 | return a.ID 33 | } 34 | -------------------------------------------------------------------------------- /internal/cli/agent.go: -------------------------------------------------------------------------------- 1 | package cli 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/go-logr/zapr" 7 | "github.com/spf13/cobra" 8 | "github.com/tinkerbell/tink/internal/agent" 9 | "github.com/tinkerbell/tink/internal/agent/runtime" 10 | "github.com/tinkerbell/tink/internal/agent/transport" 11 | "github.com/tinkerbell/tink/internal/proto/workflow/v2" 12 | "go.uber.org/zap" 13 | "google.golang.org/grpc" 14 | ) 15 | 16 | // NewAgent builds a command that launches the agent component. 17 | func NewAgent() *cobra.Command { 18 | var opts struct { 19 | AgentID string 20 | TinkServerAddr string 21 | } 22 | 23 | // TODO(chrisdoherty4) Handle signals 24 | cmd := cobra.Command{ 25 | Use: "tink-agent", 26 | RunE: func(cmd *cobra.Command, _ []string) error { 27 | zl, err := zap.NewProduction() 28 | if err != nil { 29 | return fmt.Errorf("init logger: %w", err) 30 | } 31 | logger := zapr.NewLogger(zl) 32 | 33 | rntime, err := runtime.NewDocker() 34 | if err != nil { 35 | return fmt.Errorf("create runtime: %w", err) 36 | } 37 | 38 | conn, err := grpc.NewClient(opts.TinkServerAddr) 39 | if err != nil { 40 | return fmt.Errorf("dial tink server: %w", err) 41 | } 42 | defer conn.Close() 43 | trnport := transport.NewGRPC(logger, workflow.NewWorkflowServiceClient(conn)) 44 | 45 | return (&agent.Agent{ 46 | Log: logger, 47 | ID: opts.AgentID, 48 | Transport: trnport, 49 | Runtime: rntime, 50 | }).Start(cmd.Context()) 51 | }, 52 | } 53 | 54 | flgs := cmd.Flags() 55 | flgs.StringVar(&opts.AgentID, "agent-id", "", "An ID that uniquely identifies the agent instance") 56 | flgs.StringVar(&opts.TinkServerAddr, "tink-server-addr", "127.0.0.1:42113", "Tink server address") 57 | 58 | return &cmd 59 | } 60 | -------------------------------------------------------------------------------- /internal/client/client.go: -------------------------------------------------------------------------------- 1 | package client 2 | 3 | import ( 4 | "crypto/tls" 5 | 6 | "github.com/pkg/errors" 7 | "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" 8 | "google.golang.org/grpc" 9 | "google.golang.org/grpc/credentials" 10 | "google.golang.org/grpc/credentials/insecure" 11 | ) 12 | 13 | func NewClientConn(authority string, tlsEnabled bool, tlsInsecure bool) (*grpc.ClientConn, error) { 14 | var creds grpc.DialOption 15 | if tlsEnabled { // #nosec G402 16 | creds = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{InsecureSkipVerify: tlsInsecure})) 17 | } else { 18 | creds = grpc.WithTransportCredentials(insecure.NewCredentials()) 19 | } 20 | 21 | conn, err := grpc.NewClient(authority, creds, grpc.WithStatsHandler(otelgrpc.NewClientHandler())) 22 | if err != nil { 23 | return nil, errors.Wrap(err, "dial tinkerbell server") 24 | } 25 | 26 | return conn, nil 27 | } 28 | -------------------------------------------------------------------------------- /internal/deprecated/controller/manager.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "fmt" 5 | 6 | rufio "github.com/tinkerbell/rufio/api/v1alpha1" 7 | "github.com/tinkerbell/tink/api/v1alpha1" 8 | "github.com/tinkerbell/tink/internal/deprecated/workflow" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 11 | "k8s.io/client-go/rest" 12 | ctrl "sigs.k8s.io/controller-runtime" 13 | "sigs.k8s.io/controller-runtime/pkg/healthz" 14 | ) 15 | 16 | var schemeBuilder = runtime.NewSchemeBuilder( 17 | clientgoscheme.AddToScheme, 18 | v1alpha1.AddToScheme, 19 | rufio.AddToScheme, 20 | ) 21 | 22 | // DefaultScheme returns a scheme with all the types necessary for the tink controller. 23 | func DefaultScheme() *runtime.Scheme { 24 | s := runtime.NewScheme() 25 | _ = schemeBuilder.AddToScheme(s) 26 | return s 27 | } 28 | 29 | // NewManager creates a new controller manager with tink controller controllers pre-registered. 30 | // If opts.Scheme is nil, DefaultScheme() is used. 31 | func NewManager(cfg *rest.Config, opts ctrl.Options) (ctrl.Manager, error) { 32 | if opts.Scheme == nil { 33 | opts.Scheme = DefaultScheme() 34 | } 35 | 36 | mgr, err := ctrl.NewManager(cfg, opts) 37 | if err != nil { 38 | return nil, fmt.Errorf("controller manager: %w", err) 39 | } 40 | 41 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 42 | return nil, fmt.Errorf("set up health check: %w", err) 43 | } 44 | 45 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 46 | return nil, fmt.Errorf("set up ready check: %w", err) 47 | } 48 | 49 | err = workflow.NewReconciler(mgr.GetClient()).SetupWithManager(mgr) 50 | if err != nil { 51 | return nil, fmt.Errorf("setup workflow reconciler: %w", err) 52 | } 53 | 54 | return mgr, nil 55 | } 56 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/convert.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "fmt" 5 | "sort" 6 | 7 | "github.com/tinkerbell/tink/api/v1alpha1" 8 | "github.com/tinkerbell/tink/internal/proto" 9 | ) 10 | 11 | func ToWorkflowContext(wf *v1alpha1.Workflow) *proto.WorkflowContext { 12 | if wf == nil { 13 | return nil 14 | } 15 | return &proto.WorkflowContext{ 16 | WorkflowId: wf.GetName(), 17 | CurrentWorker: wf.GetCurrentWorker(), 18 | CurrentTask: wf.GetCurrentTask(), 19 | CurrentAction: wf.GetCurrentAction(), 20 | CurrentActionIndex: int64(wf.GetCurrentActionIndex()), 21 | CurrentActionState: proto.State(proto.State_value[string(wf.GetCurrentActionState())]), 22 | TotalNumberOfActions: int64(wf.GetTotalNumberOfActions()), 23 | } 24 | } 25 | 26 | func YAMLToStatus(wf *Workflow) *v1alpha1.WorkflowStatus { 27 | if wf == nil { 28 | return nil 29 | } 30 | tasks := []v1alpha1.Task{} 31 | for _, task := range wf.Tasks { 32 | actions := []v1alpha1.Action{} 33 | for _, action := range task.Actions { 34 | actions = append(actions, v1alpha1.Action{ 35 | Name: action.Name, 36 | Image: action.Image, 37 | Timeout: action.Timeout, 38 | Command: action.Command, 39 | Volumes: action.Volumes, 40 | Status: v1alpha1.WorkflowState(proto.State_name[int32(proto.State_STATE_PENDING)]), 41 | Environment: action.Environment, 42 | Pid: action.Pid, 43 | }) 44 | } 45 | tasks = append(tasks, v1alpha1.Task{ 46 | Name: task.Name, 47 | WorkerAddr: task.WorkerAddr, 48 | Volumes: task.Volumes, 49 | Environment: task.Environment, 50 | Actions: actions, 51 | }) 52 | } 53 | return &v1alpha1.WorkflowStatus{ 54 | GlobalTimeout: int64(wf.GlobalTimeout), 55 | Tasks: tasks, 56 | } 57 | } 58 | 59 | func ActionListCRDToProto(wf *v1alpha1.Workflow) *proto.WorkflowActionList { 60 | if wf == nil { 61 | return nil 62 | } 63 | resp := &proto.WorkflowActionList{ 64 | ActionList: []*proto.WorkflowAction{}, 65 | } 66 | for _, task := range wf.Status.Tasks { 67 | for _, action := range task.Actions { 68 | resp.ActionList = append(resp.ActionList, &proto.WorkflowAction{ 69 | TaskName: task.Name, 70 | Name: action.Name, 71 | Image: action.Image, 72 | Timeout: action.Timeout, 73 | Command: action.Command, 74 | WorkerId: task.WorkerAddr, 75 | Volumes: append(task.Volumes, action.Volumes...), 76 | // TODO: (micahhausler) Dedupe task volume targets overridden in the action volumes? 77 | // Also not sure how Docker handles nested mounts (ex: "/foo:/foo" and "/bar:/foo/bar") 78 | Environment: func(env map[string]string) []string { 79 | resp := []string{} 80 | merged := map[string]string{} 81 | for k, v := range env { 82 | merged[k] = v 83 | } 84 | for k, v := range action.Environment { 85 | merged[k] = v 86 | } 87 | for k, v := range merged { 88 | resp = append(resp, fmt.Sprintf("%s=%s", k, v)) 89 | } 90 | sort.Strings(resp) 91 | return resp 92 | }(task.Environment), 93 | Pid: action.Pid, 94 | }) 95 | } 96 | } 97 | return resp 98 | } 99 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/hardware.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/tinkerbell/tink/api/v1alpha1" 8 | "github.com/tinkerbell/tink/internal/ptr" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | ) 12 | 13 | // setAllowPXE sets the allowPXE field on the hardware network interfaces. 14 | // If hardware is nil then it will be retrieved using the client. 15 | // The hardware object will be updated in the cluster. 16 | func setAllowPXE(ctx context.Context, cc client.Client, w *v1alpha1.Workflow, h *v1alpha1.Hardware, allowPXE bool) error { 17 | if h == nil && w == nil { 18 | return fmt.Errorf("both workflow and hardware cannot be nil") 19 | } 20 | if h == nil { 21 | h = &v1alpha1.Hardware{} 22 | if err := cc.Get(ctx, client.ObjectKey{Name: w.Spec.HardwareRef, Namespace: w.Namespace}, h); err != nil { 23 | return fmt.Errorf("hardware not found: name=%v; namespace=%v, error: %w", w.Spec.HardwareRef, w.Namespace, err) 24 | } 25 | } 26 | 27 | for _, iface := range h.Spec.Interfaces { 28 | iface.Netboot.AllowPXE = ptr.Bool(allowPXE) 29 | } 30 | 31 | if err := cc.Update(ctx, h); err != nil { 32 | return fmt.Errorf("error updating allow pxe: %w", err) 33 | } 34 | 35 | return nil 36 | } 37 | 38 | // hardwareFrom retrieves the in cluster hardware object defined in the given workflow. 39 | func hardwareFrom(ctx context.Context, cc client.Client, w *v1alpha1.Workflow) (*v1alpha1.Hardware, error) { 40 | if w == nil { 41 | return nil, fmt.Errorf("workflow is nil") 42 | } 43 | if w.Spec.HardwareRef == "" { 44 | return nil, fmt.Errorf("hardware ref is empty") 45 | } 46 | h := &v1alpha1.Hardware{} 47 | if err := cc.Get(ctx, client.ObjectKey{Name: w.Spec.HardwareRef, Namespace: w.Namespace}, h); err != nil { 48 | return nil, fmt.Errorf("hardware not found: name=%v; namespace=%v, error: %w", w.Spec.HardwareRef, w.Namespace, err) 49 | } 50 | 51 | return h, nil 52 | } 53 | 54 | // toggleHardware toggles the allowPXE field on the hardware network interfaces. 55 | // It is idempotent and uses the Workflow.Status.BootOptionsStatus.AllowNetboot fields for idempotent checks. 56 | // This function will update the Workflow status. 57 | func (s *state) toggleHardware(ctx context.Context, allowPXE bool) error { 58 | // 1. check if we've already set the allowPXE field to the desired value 59 | // 2. if not, set the allowPXE field to the desired value 60 | // 3. return a WorkflowCondition with the result of the operation 61 | 62 | hw, err := hardwareFrom(ctx, s.client, s.workflow) 63 | if err != nil { 64 | s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ 65 | Type: v1alpha1.ToggleAllowNetbootTrue, 66 | Status: metav1.ConditionFalse, 67 | Reason: "Error", 68 | Message: fmt.Sprintf("error getting hardware: %v", err), 69 | Time: &metav1.Time{Time: metav1.Now().UTC()}, 70 | }) 71 | 72 | return err 73 | } 74 | 75 | if allowPXE { 76 | if s.workflow.Status.BootOptions.AllowNetboot.ToggledTrue { 77 | return nil 78 | } 79 | if err := setAllowPXE(ctx, s.client, s.workflow, hw, allowPXE); err != nil { 80 | s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ 81 | Type: v1alpha1.ToggleAllowNetbootTrue, 82 | Status: metav1.ConditionFalse, 83 | Reason: "Error", 84 | Message: fmt.Sprintf("error setting allowPXE to %v: %v", allowPXE, err), 85 | Time: &metav1.Time{Time: metav1.Now().UTC()}, 86 | }) 87 | return err 88 | } 89 | s.workflow.Status.BootOptions.AllowNetboot.ToggledTrue = true 90 | s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ 91 | Type: v1alpha1.ToggleAllowNetbootTrue, 92 | Status: metav1.ConditionTrue, 93 | Reason: "Complete", 94 | Message: fmt.Sprintf("set allowPXE to %v", allowPXE), 95 | Time: &metav1.Time{Time: metav1.Now().UTC()}, 96 | }) 97 | return nil 98 | } 99 | 100 | if s.workflow.Status.BootOptions.AllowNetboot.ToggledFalse { 101 | return nil 102 | } 103 | if err := setAllowPXE(ctx, s.client, s.workflow, hw, allowPXE); err != nil { 104 | s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ 105 | Type: v1alpha1.ToggleAllowNetbootFalse, 106 | Status: metav1.ConditionFalse, 107 | Reason: "Error", 108 | Message: fmt.Sprintf("error setting allowPXE to %v: %v", allowPXE, err), 109 | Time: &metav1.Time{Time: metav1.Now().UTC()}, 110 | }) 111 | return err 112 | } 113 | s.workflow.Status.BootOptions.AllowNetboot.ToggledFalse = true 114 | s.workflow.Status.SetCondition(v1alpha1.WorkflowCondition{ 115 | Type: v1alpha1.ToggleAllowNetbootFalse, 116 | Status: metav1.ConditionTrue, 117 | Reason: "Complete", 118 | Message: fmt.Sprintf("set allowPXE to %v", allowPXE), 119 | Time: &metav1.Time{Time: metav1.Now().UTC()}, 120 | }) 121 | return nil 122 | } 123 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/journal/journal.go: -------------------------------------------------------------------------------- 1 | package journal 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "log/slog" 7 | "path/filepath" 8 | "runtime" 9 | "strings" 10 | "time" 11 | ) 12 | 13 | type CtxKey string 14 | 15 | const Name CtxKey = "journal" 16 | 17 | type Entry struct { 18 | Msg string `json:"msg"` 19 | Args map[string]any `json:"args,omitempty"` 20 | Source slog.Source `json:"source"` 21 | Time string `json:"time"` 22 | } 23 | 24 | // New creates a slice of Entries in the provided context. 25 | func New(ctx context.Context) context.Context { 26 | e := &[]Entry{} 27 | return context.WithValue(ctx, Name, e) 28 | } 29 | 30 | // Log adds a new Entry to the journal in the provided context. 31 | // Log is not thread-safe. 32 | func Log(ctx context.Context, msg string, args ...any) { 33 | t := time.Now().UTC().Format(time.RFC3339Nano) 34 | m := make(map[string]any) 35 | for i := 0; i < len(args); i += 2 { 36 | k, ok := args[i].(string) 37 | if !ok { 38 | k = fmt.Sprintf("%v", args[i]) 39 | } 40 | m[k] = args[i+1] 41 | } 42 | e, ok := ctx.Value(Name).(*[]Entry) 43 | if !ok { 44 | return 45 | } 46 | *e = append(*e, Entry{Msg: msg, Args: m, Source: fileAndLine(), Time: t}) 47 | } 48 | 49 | // Journal returns the journal from the provided context. 50 | func Journal(ctx context.Context) []Entry { 51 | e, ok := ctx.Value(Name).(*[]Entry) 52 | if !ok { 53 | return nil 54 | } 55 | return *e 56 | } 57 | 58 | func fileAndLine() slog.Source { 59 | pc, file, line, _ := runtime.Caller(2) 60 | fn := runtime.FuncForPC(pc) 61 | var fnName string 62 | if fn == nil { 63 | fnName = "?()" 64 | } else { 65 | fnName = strings.TrimLeft(filepath.Ext(fn.Name()), ".") + "()" 66 | } 67 | 68 | return slog.Source{ 69 | Function: fnName, 70 | File: filepath.Base(file), 71 | Line: line, 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/journal/journal_test.go: -------------------------------------------------------------------------------- 1 | package journal 2 | 3 | import ( 4 | "context" 5 | "log/slog" 6 | "testing" 7 | 8 | "github.com/google/go-cmp/cmp" 9 | "github.com/google/go-cmp/cmp/cmpopts" 10 | ) 11 | 12 | func TestJournal(t *testing.T) { 13 | type input struct { 14 | msg string 15 | args []any 16 | } 17 | tests := map[string]struct { 18 | want []Entry 19 | inputs []input 20 | }{ 21 | "empty": { 22 | want: []Entry{}, 23 | }, 24 | "single": { 25 | want: []Entry{ 26 | { 27 | Msg: "one", 28 | Args: map[string]any{"key": "value"}, 29 | Source: slog.Source{ 30 | File: "journal_test.go", 31 | Function: "func1()", 32 | }, 33 | }, 34 | }, 35 | inputs: []input{ 36 | {msg: "one", args: []any{"key", "value"}}, 37 | }, 38 | }, 39 | "non normal key": { 40 | want: []Entry{ 41 | { 42 | Msg: "msg", 43 | Args: map[string]any{"1.1": "value"}, 44 | Source: slog.Source{ 45 | File: "journal_test.go", 46 | Function: "func1()", 47 | }, 48 | }, 49 | }, 50 | inputs: []input{ 51 | {msg: "msg", args: []any{1.1, "value"}}, 52 | }, 53 | }, 54 | } 55 | 56 | for name, tc := range tests { 57 | t.Run(name, func(t *testing.T) { 58 | ctx := New(context.Background()) 59 | for _, input := range tc.inputs { 60 | Log(ctx, input.msg, input.args...) 61 | } 62 | got := Journal(ctx) 63 | if diff := cmp.Diff(tc.want, got, cmpopts.IgnoreFields(Entry{}, "Time"), cmpopts.IgnoreFields(slog.Source{}, "Line")); diff != "" { 64 | t.Errorf("unexpected journal (-want +got):\n%s", diff) 65 | } 66 | }) 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/post.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/pkg/errors" 8 | rufio "github.com/tinkerbell/rufio/api/v1alpha1" 9 | "github.com/tinkerbell/tink/api/v1alpha1" 10 | "github.com/tinkerbell/tink/internal/deprecated/workflow/journal" 11 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 12 | ) 13 | 14 | func (s *state) postActions(ctx context.Context) (reconcile.Result, error) { 15 | // 1. Handle toggling allowPXE in a hardware object if toggleAllowNetboot is true. 16 | if s.workflow.Spec.BootOptions.ToggleAllowNetboot && !s.workflow.Status.BootOptions.AllowNetboot.ToggledFalse { 17 | journal.Log(ctx, "toggling allowPXE false") 18 | if err := s.toggleHardware(ctx, false); err != nil { 19 | return reconcile.Result{}, err 20 | } 21 | } 22 | 23 | // 2. Handle ISO eject scenario. 24 | if s.workflow.Spec.BootOptions.BootMode == v1alpha1.BootModeISO { 25 | name := jobName(fmt.Sprintf("%s-%s", jobNameISOEject, s.workflow.GetName())) 26 | if j := s.workflow.Status.BootOptions.Jobs[name.String()]; !j.ExistingJobDeleted || j.UID == "" || !j.Complete { 27 | journal.Log(ctx, "boot mode iso") 28 | if s.workflow.Spec.BootOptions.ISOURL == "" { 29 | return reconcile.Result{}, errors.New("iso url must be a valid url") 30 | } 31 | actions := []rufio.Action{ 32 | { 33 | VirtualMediaAction: &rufio.VirtualMediaAction{ 34 | MediaURL: "", // empty to unmount/eject the media 35 | Kind: rufio.VirtualMediaCD, 36 | }, 37 | }, 38 | } 39 | 40 | r, err := s.handleJob(ctx, actions, name) 41 | if s.workflow.Status.BootOptions.Jobs[name.String()].Complete { 42 | s.workflow.Status.State = v1alpha1.WorkflowStateSuccess 43 | } 44 | return r, err 45 | } 46 | } 47 | 48 | s.workflow.Status.State = v1alpha1.WorkflowStateSuccess 49 | return reconcile.Result{}, nil 50 | } 51 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/pre.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "github.com/pkg/errors" 8 | rufio "github.com/tinkerbell/rufio/api/v1alpha1" 9 | "github.com/tinkerbell/tink/api/v1alpha1" 10 | "github.com/tinkerbell/tink/internal/deprecated/workflow/journal" 11 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 12 | ) 13 | 14 | // prepareWorkflow prepares the workflow for execution. 15 | // The workflow (s.workflow) can be updated even if an error occurs. 16 | // Any patching of the workflow object in a cluster is left up to the caller. 17 | // At the moment prepareWorkflow requires the workflow have a hardwareRef and the object exists. 18 | func (s *state) prepareWorkflow(ctx context.Context) (reconcile.Result, error) { 19 | // handle bootoptions 20 | // 1. Handle toggling allowPXE in a hardware object if toggleAllowNetboot is true. 21 | if s.workflow.Spec.BootOptions.ToggleAllowNetboot && !s.workflow.Status.BootOptions.AllowNetboot.ToggledTrue { 22 | journal.Log(ctx, "toggling allowPXE true") 23 | if err := s.toggleHardware(ctx, true); err != nil { 24 | return reconcile.Result{}, err 25 | } 26 | } 27 | 28 | // 2. Handle booting scenarios. 29 | switch s.workflow.Spec.BootOptions.BootMode { 30 | case v1alpha1.BootModeNetboot: 31 | name := jobName(fmt.Sprintf("%s-%s", jobNameNetboot, s.workflow.GetName())) 32 | if j := s.workflow.Status.BootOptions.Jobs[name.String()]; !j.ExistingJobDeleted || j.UID == "" || !j.Complete { 33 | journal.Log(ctx, "boot mode netboot") 34 | hw, err := hardwareFrom(ctx, s.client, s.workflow) 35 | if err != nil { 36 | return reconcile.Result{}, errors.Wrap(err, "failed to get hardware") 37 | } 38 | efiBoot := func() bool { 39 | for _, iface := range hw.Spec.Interfaces { 40 | if iface.DHCP != nil && iface.DHCP.UEFI { 41 | return true 42 | } 43 | } 44 | return false 45 | }() 46 | actions := []rufio.Action{ 47 | { 48 | PowerAction: rufio.PowerHardOff.Ptr(), 49 | }, 50 | { 51 | OneTimeBootDeviceAction: &rufio.OneTimeBootDeviceAction{ 52 | Devices: []rufio.BootDevice{ 53 | rufio.PXE, 54 | }, 55 | EFIBoot: efiBoot, 56 | }, 57 | }, 58 | { 59 | PowerAction: rufio.PowerOn.Ptr(), 60 | }, 61 | } 62 | 63 | r, err := s.handleJob(ctx, actions, name) 64 | if s.workflow.Status.BootOptions.Jobs[name.String()].Complete && s.workflow.Status.State == v1alpha1.WorkflowStatePreparing { 65 | s.workflow.Status.State = v1alpha1.WorkflowStatePending 66 | } 67 | return r, err 68 | } 69 | case v1alpha1.BootModeISO: 70 | name := jobName(fmt.Sprintf("%s-%s", jobNameISOMount, s.workflow.GetName())) 71 | if j := s.workflow.Status.BootOptions.Jobs[name.String()]; !j.ExistingJobDeleted || j.UID == "" || !j.Complete { 72 | journal.Log(ctx, "boot mode iso") 73 | if s.workflow.Spec.BootOptions.ISOURL == "" { 74 | return reconcile.Result{}, errors.New("iso url must be a valid url") 75 | } 76 | hw, err := hardwareFrom(ctx, s.client, s.workflow) 77 | if err != nil { 78 | return reconcile.Result{}, errors.Wrap(err, "failed to get hardware") 79 | } 80 | efiBoot := func() bool { 81 | for _, iface := range hw.Spec.Interfaces { 82 | if iface.DHCP != nil && iface.DHCP.UEFI { 83 | return true 84 | } 85 | } 86 | return false 87 | }() 88 | actions := []rufio.Action{ 89 | { 90 | PowerAction: rufio.PowerHardOff.Ptr(), 91 | }, 92 | { 93 | VirtualMediaAction: &rufio.VirtualMediaAction{ 94 | MediaURL: "", // empty to unmount/eject the media 95 | Kind: rufio.VirtualMediaCD, 96 | }, 97 | }, 98 | { 99 | VirtualMediaAction: &rufio.VirtualMediaAction{ 100 | MediaURL: s.workflow.Spec.BootOptions.ISOURL, 101 | Kind: rufio.VirtualMediaCD, 102 | }, 103 | }, 104 | { 105 | OneTimeBootDeviceAction: &rufio.OneTimeBootDeviceAction{ 106 | Devices: []rufio.BootDevice{ 107 | rufio.CDROM, 108 | }, 109 | EFIBoot: efiBoot, 110 | }, 111 | }, 112 | { 113 | PowerAction: rufio.PowerOn.Ptr(), 114 | }, 115 | } 116 | 117 | r, err := s.handleJob(ctx, actions, name) 118 | if s.workflow.Status.BootOptions.Jobs[name.String()].Complete && s.workflow.Status.State == v1alpha1.WorkflowStatePreparing { 119 | s.workflow.Status.State = v1alpha1.WorkflowStatePending 120 | } 121 | return r, err 122 | } 123 | } 124 | s.workflow.Status.State = v1alpha1.WorkflowStatePending 125 | 126 | return reconcile.Result{}, nil 127 | } 128 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/template_funcs.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // templateFuncs defines the custom functions available to workflow templates. 9 | var templateFuncs = map[string]interface{}{ 10 | "formatPartition": formatPartition, 11 | } 12 | 13 | // formatPartition formats a device path with partition for the device type. If it receives an 14 | // unidentifiable device path it returns the dev. 15 | // 16 | // Examples 17 | // 18 | // formatPartition("/dev/nvme0n1", 0) -> /dev/nvme0n1p1 19 | // formatPartition("/dev/sda", 1) -> /dev/sda1 20 | // formatPartition("/dev/vda", 2) -> /dev/vda2 21 | func formatPartition(dev string, partition int) string { 22 | switch { 23 | case strings.HasPrefix(dev, "/dev/nvme"): 24 | return fmt.Sprintf("%vp%v", dev, partition) 25 | case strings.HasPrefix(dev, "/dev/sd"), 26 | strings.HasPrefix(dev, "/dev/vd"), 27 | strings.HasPrefix(dev, "/dev/xvd"), 28 | strings.HasPrefix(dev, "/dev/hd"): 29 | return fmt.Sprintf("%v%v", dev, partition) 30 | } 31 | return dev 32 | } 33 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/template_validator.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "text/template" 7 | 8 | "github.com/Masterminds/sprig/v3" 9 | "github.com/distribution/reference" 10 | "github.com/pkg/errors" 11 | "gopkg.in/yaml.v3" 12 | ) 13 | 14 | const ( 15 | errInvalidLength = "name cannot be empty or have more than 200 characters: %s" 16 | errTemplateParsing = "failed to parse template with ID %s" 17 | ) 18 | 19 | // parse parses the template yaml content into a Workflow. 20 | func parse(yamlContent []byte) (*Workflow, error) { 21 | var workflow Workflow 22 | if err := yaml.Unmarshal(yamlContent, &workflow); err != nil { 23 | // The yamlContent is normally quite large but is invaluable in debugging. 24 | return &Workflow{}, errors.Wrapf(err, "parsing yaml data, content: %s", yamlContent) 25 | } 26 | 27 | if err := validate(&workflow); err != nil { 28 | return &Workflow{}, errors.Wrap(err, "validating workflow template") 29 | } 30 | 31 | return &workflow, nil 32 | } 33 | 34 | // renderTemplateHardware renders the workflow template and returns the Workflow and the interpolated bytes. 35 | func renderTemplateHardware(templateID, templateData string, hardware map[string]interface{}) (*Workflow, error) { 36 | t := template.New("workflow-template"). 37 | Option("missingkey=error"). 38 | Funcs(sprig.FuncMap()). 39 | Funcs(templateFuncs) 40 | 41 | _, err := t.Parse(templateData) 42 | if err != nil { 43 | err = errors.Wrapf(err, errTemplateParsing, templateID) 44 | return nil, err 45 | } 46 | 47 | var buf bytes.Buffer 48 | if err := t.Execute(&buf, hardware); err != nil { 49 | err = errors.Wrapf(err, errTemplateParsing, templateID) 50 | return nil, err 51 | } 52 | 53 | wf, err := parse(buf.Bytes()) 54 | if err != nil { 55 | return nil, err 56 | } 57 | 58 | for _, task := range wf.Tasks { 59 | if task.WorkerAddr == "" { 60 | return nil, fmt.Errorf("failed to render template, empty hardware address (%v)", hardware) 61 | } 62 | } 63 | 64 | return wf, nil 65 | } 66 | 67 | // validate validates a workflow template against certain requirements. 68 | func validate(wf *Workflow) error { 69 | if !hasValidLength(wf.Name) { 70 | return errors.Errorf(errInvalidLength, wf.Name) 71 | } 72 | 73 | if len(wf.Tasks) == 0 { 74 | return errors.New("template must have at least one task defined") 75 | } 76 | 77 | taskNameMap := make(map[string]struct{}) 78 | for _, task := range wf.Tasks { 79 | if !hasValidLength(task.Name) { 80 | return errors.Errorf(errInvalidLength, task.Name) 81 | } 82 | 83 | if _, ok := taskNameMap[task.Name]; ok { 84 | return errors.Errorf("two tasks in a template cannot have same name (%s)", task.Name) 85 | } 86 | 87 | taskNameMap[task.Name] = struct{}{} 88 | actionNameMap := make(map[string]struct{}) 89 | for _, action := range task.Actions { 90 | if !hasValidLength(action.Name) { 91 | return errors.Errorf(errInvalidLength, action.Name) 92 | } 93 | 94 | if err := validateImageName(action.Image); err != nil { 95 | return errors.Errorf("invalid action image (%s): %v", action.Image, err) 96 | } 97 | 98 | _, ok := actionNameMap[action.Name] 99 | if ok { 100 | return errors.Errorf("two actions in a task cannot have same name: %s", action.Name) 101 | } 102 | actionNameMap[action.Name] = struct{}{} 103 | } 104 | } 105 | return nil 106 | } 107 | 108 | func hasValidLength(name string) bool { 109 | return len(name) > 0 && len(name) < 200 110 | } 111 | 112 | func validateImageName(name string) error { 113 | _, err := reference.ParseNormalizedNamed(name) 114 | return err 115 | } 116 | -------------------------------------------------------------------------------- /internal/deprecated/workflow/types.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | // Workflow represents a workflow to be executed. 4 | type Workflow struct { 5 | Version string `yaml:"version"` 6 | Name string `yaml:"name"` 7 | ID string `yaml:"id"` 8 | GlobalTimeout int `yaml:"global_timeout"` 9 | Tasks []Task `yaml:"tasks"` 10 | } 11 | 12 | // Task represents a task to be executed as part of a workflow. 13 | type Task struct { 14 | Name string `yaml:"name"` 15 | WorkerAddr string `yaml:"worker"` 16 | Actions []Action `yaml:"actions"` 17 | Volumes []string `yaml:"volumes,omitempty"` 18 | Environment map[string]string `yaml:"environment,omitempty"` 19 | } 20 | 21 | // Action is the basic executional unit for a workflow. 22 | type Action struct { 23 | Name string `yaml:"name"` 24 | Image string `yaml:"image"` 25 | Timeout int64 `yaml:"timeout"` 26 | Command []string `yaml:"command,omitempty"` 27 | OnTimeout []string `yaml:"on-timeout,omitempty"` 28 | OnFailure []string `yaml:"on-failure,omitempty"` 29 | Volumes []string `yaml:"volumes,omitempty"` 30 | Environment map[string]string `yaml:"environment,omitempty"` 31 | Pid string `yaml:"pid,omitempty"` 32 | } 33 | -------------------------------------------------------------------------------- /internal/e2e/testdata/01/hardware.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Hardware 3 | metadata: 4 | name: "sm01" 5 | namespace: default 6 | spec: 7 | metadata: 8 | facility: 9 | facility_code: onprem 10 | manufacturer: 11 | slug: supermicro 12 | instance: 13 | userdata: "" 14 | hostname: "sm01" 15 | id: "3c:ec:ef:4c:4f:54" 16 | operating_system: 17 | distro: "ubuntu" 18 | os_slug: "ubuntu_20_04" 19 | version: "20.04" 20 | storage: 21 | disks: 22 | - device: /dev/nvme0n1 23 | partitions: 24 | - label: ROOT 25 | number: 1 26 | size: 0 27 | wipe_table: true 28 | interfaces: 29 | - dhcp: 30 | arch: x86_64 31 | hostname: sm01 32 | ip: 33 | address: 172.16.10.100 34 | gateway: 172.16.10.1 35 | netmask: 255.255.255.0 36 | lease_time: 86400 37 | mac: 3c:ec:ef:4c:4f:54 38 | name_servers: 39 | - 172.16.10.1 40 | - 10.1.1.11 41 | uefi: true 42 | netboot: 43 | allowPXE: true 44 | allowWorkflow: true 45 | -------------------------------------------------------------------------------- /internal/e2e/testdata/01/template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Template 3 | metadata: 4 | name: debian 5 | namespace: default 6 | spec: 7 | data: | 8 | version: "0.1" 9 | name: debian 10 | global_timeout: 1800 11 | tasks: 12 | - name: "os-installation" 13 | worker: "{{.device_1}}" 14 | volumes: 15 | - /dev:/dev 16 | - /dev/console:/dev/console 17 | - /lib/firmware:/lib/firmware:ro 18 | actions: 19 | - name: "stream-debian-image" 20 | image: quay.io/tinkerbell-actions/image2disk:v1.0.0 21 | timeout: 600 22 | environment: 23 | DEST_DISK: /dev/nvme0n1 24 | # Hegel IP 25 | IMG_URL: "http://10.1.1.11:8080/debian-10-openstack-amd64.raw.gz" 26 | COMPRESSED: true 27 | - name: "add-tink-cloud-init-config" 28 | image: writefile:v1.0.0 29 | timeout: 90 30 | environment: 31 | DEST_DISK: /dev/nvme0n1p1 32 | FS_TYPE: ext4 33 | DEST_PATH: /etc/cloud/cloud.cfg.d/10_tinkerbell.cfg 34 | UID: 0 35 | GID: 0 36 | MODE: 0600 37 | DIRMODE: 0700 38 | CONTENTS: | 39 | datasource: 40 | Ec2: 41 | # Hegel IP 42 | #metadata_urls: ["http://10.1.1.11:50061"] 43 | strict_id: false 44 | system_info: 45 | default_user: 46 | name: tink 47 | groups: [wheel, adm, sudo] 48 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 49 | shell: /bin/bash 50 | users: 51 | - name: tink 52 | sudo: ["ALL=(ALL) NOPASSWD:ALL"] 53 | warnings: 54 | dsid_missing_source: off 55 | - name: "add-tink-cloud-init-ds-config" 56 | image: writefile:v1.0.0 57 | timeout: 90 58 | environment: 59 | DEST_DISK: /dev/nvme0n1p1 60 | FS_TYPE: ext4 61 | DEST_PATH: /etc/cloud/ds-identify.cfg 62 | UID: 0 63 | GID: 0 64 | MODE: 0600 65 | DIRMODE: 0700 66 | CONTENTS: | 67 | datasource: Ec2 68 | - name: "kexec-debian" 69 | image: quay.io/tinkerbell-actions/kexec:v1.0.1 70 | timeout: 90 71 | pid: host 72 | environment: 73 | BLOCK_DEVICE: /dev/nvme0n1p1 74 | FS_TYPE: ext4 75 | -------------------------------------------------------------------------------- /internal/e2e/testdata/01/workflow.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Workflow 3 | metadata: 4 | name: wf1 5 | namespace: default 6 | spec: 7 | templateRef: debian 8 | hardwareMap: 9 | device_1: 3c:ec:ef:4c:4f:54 10 | -------------------------------------------------------------------------------- /internal/e2e/testdata/02/hardware1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Hardware 3 | metadata: 4 | name: "hardware1" 5 | namespace: default 6 | spec: 7 | metadata: 8 | facility: 9 | facility_code: onprem 10 | manufacturer: 11 | slug: supermicro 12 | instance: 13 | userdata: "" 14 | hostname: "sm01" 15 | id: "3c:ec:ef:4c:4f:54" 16 | operating_system: 17 | distro: "ubuntu" 18 | os_slug: "ubuntu_20_04" 19 | version: "20.04" 20 | storage: 21 | disks: 22 | - device: /dev/nvme0n1 23 | partitions: 24 | - label: ROOT 25 | number: 1 26 | size: 0 27 | wipe_table: true 28 | interfaces: 29 | - dhcp: 30 | arch: x86_64 31 | hostname: sm01 32 | ip: 33 | address: 172.16.10.100 34 | gateway: 172.16.10.1 35 | netmask: 255.255.255.0 36 | lease_time: 86400 37 | mac: 3c:ec:ef:4c:4f:54 38 | name_servers: 39 | - 172.16.10.1 40 | - 10.1.1.11 41 | uefi: true 42 | netboot: 43 | allowPXE: true 44 | allowWorkflow: true 45 | -------------------------------------------------------------------------------- /internal/e2e/testdata/02/template1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Template 3 | metadata: 4 | name: template1 5 | namespace: default 6 | spec: 7 | data: | 8 | version: "0.1" 9 | name: debian 10 | global_timeout: 1800 11 | tasks: 12 | - name: "os-installation" 13 | worker: "{{.device_1}}" 14 | volumes: 15 | - /dev:/dev 16 | - /dev/console:/dev/console 17 | - /lib/firmware:/lib/firmware:ro 18 | actions: 19 | - name: "stream-image" 20 | image: quay.io/tinkerbell-actions/image2disk:v1.0.0 21 | timeout: 600 22 | environment: 23 | DEST_DISK: /dev/nvme0n1 24 | # Hegel IP 25 | IMG_URL: "http://10.1.1.11:8080/debian-10-openstack-amd64.raw.gz" 26 | COMPRESSED: true 27 | - name: "add-tink-cloud-init-ds-config" 28 | image: writefile:v1.0.0 29 | timeout: 90 30 | environment: 31 | DEST_DISK: /dev/nvme0n1p1 32 | FS_TYPE: ext4 33 | DEST_PATH: /etc/cloud/ds-identify.cfg 34 | UID: 0 35 | GID: 0 36 | MODE: 0600 37 | DIRMODE: 0700 38 | CONTENTS: | 39 | datasource: Ec2 40 | - name: "kexec-debian" 41 | image: quay.io/tinkerbell-actions/kexec:v1.0.1 42 | timeout: 90 43 | pid: host 44 | environment: 45 | BLOCK_DEVICE: /dev/nvme0n1p1 46 | FS_TYPE: ext4 47 | -------------------------------------------------------------------------------- /internal/e2e/testdata/02/template2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Template 3 | metadata: 4 | name: template2 5 | namespace: default 6 | spec: 7 | data: | 8 | version: "0.1" 9 | name: template1 10 | global_timeout: 1800 11 | tasks: 12 | - name: "bmc-setup" 13 | worker: "{{.device_2}}" 14 | volumes: 15 | - /dev:/dev 16 | - /dev/console:/dev/console 17 | - /lib/firmware:/lib/firmware:ro 18 | actions: 19 | - name: "setup-netboot" 20 | image: quay.io/tinkerbell-actions/pbnj:v1.0.0 21 | timeout: 60 22 | environment: 23 | NET_BOOT: IPXE 24 | MACHINE: "{{.device_1}}" 25 | - name: "power-on" 26 | image: quay.io/tinkerbell-actions/pbnj:v1.0.0 27 | timeout: 60 28 | environment: 29 | POWER: ON 30 | MACHINE: "{{.device_1}}" 31 | - name: "os-installation" 32 | worker: "{{.device_1}}" 33 | volumes: 34 | - /dev:/dev 35 | - /dev/console:/dev/console 36 | - /lib/firmware:/lib/firmware:ro 37 | actions: 38 | - name: "stream-debian-image" 39 | image: quay.io/tinkerbell-actions/image2disk:v1.0.0 40 | timeout: 600 41 | environment: 42 | DEST_DISK: /dev/nvme0n1 43 | # Hegel IP 44 | IMG_URL: "http://10.1.1.11:8080/debian-10-openstack-amd64.raw.gz" 45 | COMPRESSED: true 46 | - name: "add-tink-cloud-init-ds-config" 47 | image: writefile:v1.0.0 48 | timeout: 90 49 | environment: 50 | DEST_DISK: /dev/nvme0n1p1 51 | FS_TYPE: ext4 52 | DEST_PATH: /etc/cloud/ds-identify.cfg 53 | UID: 0 54 | GID: 0 55 | MODE: 0600 56 | DIRMODE: 0700 57 | CONTENTS: | 58 | datasource: Ec2 59 | - name: "kexec-debian" 60 | image: quay.io/tinkerbell-actions/kexec:v1.0.1 61 | timeout: 90 62 | pid: host 63 | environment: 64 | BLOCK_DEVICE: /dev/nvme0n1p1 65 | FS_TYPE: ext4 66 | -------------------------------------------------------------------------------- /internal/e2e/testdata/02/template3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Template 3 | metadata: 4 | name: template3 5 | namespace: default 6 | spec: 7 | data: | 8 | version: "0.1" 9 | name: ubuntu 10 | global_timeout: 1800 11 | tasks: 12 | - name: "task-1" 13 | worker: "{{.device_1}}" 14 | volumes: 15 | - /dev:/dev 16 | - /dev/console:/dev/console 17 | - /lib/firmware:/lib/firmware:ro 18 | actions: 19 | - name: "task-1-action-1" 20 | image: quay.io/tinkerbell-actions/image2disk:v1.0.0 21 | timeout: 600 22 | environment: 23 | DEST_DISK: /dev/nvme0n1 24 | # Hegel IP 25 | IMG_URL: "http://10.1.1.11:8080/ubuntu-amd64.raw.gz" 26 | COMPRESSED: true 27 | - name: "task-1-action-2" 28 | image: quay.io/tinkerbell-actions/kexec:v1.0.1 29 | timeout: 90 30 | pid: host 31 | environment: 32 | BLOCK_DEVICE: /dev/nvme0n1p1 33 | FS_TYPE: ext4 34 | -------------------------------------------------------------------------------- /internal/e2e/testdata/02/workflow1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Workflow 3 | metadata: 4 | name: wf1 5 | namespace: default 6 | spec: 7 | templateRef: template1 8 | hardwareMap: 9 | device_1: 3c:ec:ef:4c:4f:54 10 | -------------------------------------------------------------------------------- /internal/e2e/testdata/02/workflow2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Workflow 3 | metadata: 4 | name: wf2 5 | namespace: default 6 | spec: 7 | templateRef: template2 8 | hardwareMap: 9 | device_1: 3c:ec:ef:4c:4f:54 10 | device_2: pnj 11 | -------------------------------------------------------------------------------- /internal/e2e/testdata/02/workflow3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "tinkerbell.org/v1alpha1" 2 | kind: Workflow 3 | metadata: 4 | name: wf3 5 | namespace: default 6 | spec: 7 | templateRef: template3 8 | hardwareMap: 9 | device_1: 3c:ec:ef:4c:4f:54 10 | -------------------------------------------------------------------------------- /internal/e2e/tink_suite_test.go: -------------------------------------------------------------------------------- 1 | //go:build e2e 2 | 3 | package e2e_test 4 | 5 | import ( 6 | "context" 7 | "fmt" 8 | "path/filepath" 9 | "testing" 10 | "time" 11 | 12 | "github.com/go-logr/logr" 13 | "github.com/go-logr/zapr" 14 | . "github.com/onsi/ginkgo/v2" 15 | . "github.com/onsi/gomega" 16 | "github.com/tinkerbell/tink/api/v1alpha1" 17 | "github.com/tinkerbell/tink/internal/deprecated/controller" 18 | "github.com/tinkerbell/tink/internal/grpcserver" 19 | "github.com/tinkerbell/tink/internal/server" 20 | "go.uber.org/zap" 21 | "k8s.io/client-go/kubernetes/scheme" 22 | ctrl "sigs.k8s.io/controller-runtime" 23 | "sigs.k8s.io/controller-runtime/pkg/client" 24 | "sigs.k8s.io/controller-runtime/pkg/envtest" 25 | ) 26 | 27 | var ( 28 | k8sClient client.Client // You'll be using this client in your tests. 29 | testEnv *envtest.Environment 30 | ctx context.Context 31 | cancel context.CancelFunc 32 | serverAddr string 33 | logger logr.Logger 34 | ) 35 | 36 | func TestTests(t *testing.T) { 37 | RegisterFailHandler(Fail) 38 | RunSpecs(t, "Tests Suite") 39 | } 40 | 41 | var _ = BeforeSuite(func() { 42 | ctx, cancel = context.WithCancel(context.TODO()) 43 | 44 | var err error 45 | logger = zapr.NewLogger(zap.Must(zap.NewDevelopment())) 46 | 47 | // Installs CRDs into cluster 48 | By("bootstrapping test environment") 49 | testEnv = &envtest.Environment{ 50 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 51 | ErrorIfCRDPathMissing: true, 52 | } 53 | 54 | // Start the test cluster 55 | cfg, err := testEnv.Start() 56 | Expect(err).NotTo(HaveOccurred()) 57 | Expect(cfg).NotTo(BeNil()) 58 | cfg.Timeout = time.Second * 5 // Graceful shutdown of testenv for only 5s 59 | logger.Info("started test environment", "host", cfg.Host) 60 | 61 | // Add tink API to the client scheme 62 | err = v1alpha1.AddToScheme(scheme.Scheme) 63 | Expect(err).NotTo(HaveOccurred()) 64 | 65 | // Create the K8s client 66 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 67 | Expect(err).NotTo(HaveOccurred()) 68 | Expect(k8sClient).NotTo(BeNil()) 69 | 70 | errCh := make(chan error, 2) 71 | 72 | tinkServer, err := server.NewKubeBackedServerFromREST(logger, cfg, "default") 73 | Expect(err).To(Succeed()) 74 | 75 | serverAddr, err = grpcserver.SetupGRPC( 76 | ctx, 77 | tinkServer, 78 | "127.0.0.1:0", // Randomly selected port 79 | errCh, 80 | ) 81 | Expect(err).NotTo(HaveOccurred()) 82 | logger.Info(fmt.Sprintf("HTTP server: %v", serverAddr)) 83 | 84 | // Start the controller 85 | options := ctrl.Options{ 86 | Logger: logger, 87 | } 88 | 89 | manager, err := controller.NewManager(cfg, options) 90 | Expect(err).NotTo(HaveOccurred()) 91 | 92 | go func() { 93 | err := manager.Start(ctx) 94 | Expect(err).To(BeNil()) 95 | }() 96 | }) 97 | 98 | var _ = AfterSuite(func() { 99 | By("Cancelling the context") 100 | 101 | By("stopping the test environment") 102 | err := testEnv.Stop() 103 | Expect(err).NotTo(HaveOccurred()) 104 | }) 105 | -------------------------------------------------------------------------------- /internal/grpcserver/grpc_server.go: -------------------------------------------------------------------------------- 1 | package grpcserver 2 | 3 | import ( 4 | "context" 5 | "net" 6 | 7 | grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" 8 | "github.com/pkg/errors" 9 | "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" 10 | "google.golang.org/grpc" 11 | "google.golang.org/grpc/reflection" 12 | ) 13 | 14 | // Registrar is an interface for registering APIs on a gRPC server. 15 | type Registrar interface { 16 | Register(*grpc.Server) 17 | } 18 | 19 | // SetupGRPC opens a listener and serves a given Registrar's APIs on a gRPC server and returns the listener's address or an error. 20 | func SetupGRPC(ctx context.Context, r Registrar, listenAddr string, errCh chan<- error) (string, error) { 21 | params := []grpc.ServerOption{ 22 | grpc.StatsHandler(otelgrpc.NewServerHandler()), 23 | grpc.UnaryInterceptor(grpcprometheus.UnaryServerInterceptor), 24 | grpc.StreamInterceptor(grpcprometheus.StreamServerInterceptor), 25 | } 26 | 27 | // register servers 28 | s := grpc.NewServer(params...) 29 | r.Register(s) 30 | reflection.Register(s) 31 | grpcprometheus.Register(s) 32 | 33 | lis, err := net.Listen("tcp", listenAddr) 34 | if err != nil { 35 | return "", errors.Wrap(err, "failed to listen") 36 | } 37 | 38 | go func(errChan chan<- error) { 39 | errChan <- s.Serve(lis) 40 | }(errCh) 41 | 42 | go func(ctx context.Context, s *grpc.Server) { 43 | <-ctx.Done() 44 | s.GracefulStop() 45 | }(ctx, s) 46 | 47 | return lis.Addr().String(), nil 48 | } 49 | -------------------------------------------------------------------------------- /internal/hardware/admission.go: -------------------------------------------------------------------------------- 1 | package hardware 2 | 3 | import ( 4 | "context" 5 | "errors" 6 | "fmt" 7 | "net/http" 8 | 9 | "github.com/tinkerbell/tink/api/v1alpha2" 10 | "github.com/tinkerbell/tink/internal/hardware/internal" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/webhook" 14 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 15 | ) 16 | 17 | // admissionWebhookEndpoint is the endpoint serving the Admission handler. 18 | const admissionWebhookEndpoint = "/validate-tinkerbell-org-v1alpha2-hardware" 19 | 20 | // +kubebuilder:webhook:path=/validate-tinkerbell-org-v1alpha2-hardware,mutating=false,failurePolicy=fail,groups="",resources=hardware,verbs=create;update,versions=v1alpha2,name=hardware.tinkerbell.org 21 | 22 | // Admission handles complex validation for admitting a Hardware object to the cluster. 23 | type Admission struct { 24 | client ctrlclient.Client 25 | decoder admission.Decoder 26 | } 27 | 28 | // Handle satisfies controller-runtime/pkg/webhook/admission#Handler. It is responsible for deciding 29 | // if the given req is valid and should be admitted to the cluster. 30 | func (a *Admission) Handle(ctx context.Context, req admission.Request) admission.Response { 31 | if a.client == nil { 32 | return admission.Errored(http.StatusInternalServerError, errors.New("misconfigured client")) 33 | } 34 | 35 | var hw v1alpha2.Hardware 36 | if err := a.decoder.Decode(req, &hw); err != nil { 37 | return admission.Errored(http.StatusBadRequest, err) 38 | } 39 | 40 | // Ensure conditionally optional fields are valid 41 | if resp := a.validateConditionalFields(&hw); !resp.Allowed { 42 | return resp 43 | } 44 | 45 | // Ensure MACs on the hardware are valid. 46 | if resp := a.validateMACs(&hw); !resp.Allowed { 47 | return resp 48 | } 49 | 50 | // Ensure there's no hardware in the cluster with the same MAC addresses. 51 | if resp := a.validateUniqueMACs(ctx, &hw); !resp.Allowed { 52 | return resp 53 | } 54 | 55 | // Ensure there's no hardware in the cluster with the same IP addresses. 56 | if resp := a.validateUniqueIPs(ctx, &hw); !resp.Allowed { 57 | return resp 58 | } 59 | 60 | return admission.Allowed("") 61 | } 62 | 63 | // InjectDecoder satisfies controller-runtime/pkg/webhook/admission#DecoderInjector. It is used 64 | // when registering the webhook to inject the decoder used by the controller manager. 65 | func (a *Admission) InjectDecoder(d admission.Decoder) error { 66 | a.decoder = d 67 | return nil 68 | } 69 | 70 | // SetClient sets a's internal Kubernetes client. 71 | func (a *Admission) SetClient(c ctrlclient.Client) { 72 | a.client = c 73 | } 74 | 75 | // SetupWithManager registers a with mgr as a webhook served from AdmissionWebhookEndpoint. 76 | func (a *Admission) SetupWithManager(ctx context.Context, mgr ctrl.Manager) error { 77 | idx := mgr.GetFieldIndexer() 78 | 79 | err := idx.IndexField( 80 | ctx, 81 | &v1alpha2.Hardware{}, 82 | internal.HardwareByMACAddr, 83 | internal.HardwareByMACAddrFunc, 84 | ) 85 | if err != nil { 86 | return fmt.Errorf("register index %s: %w", internal.HardwareByMACAddr, err) 87 | } 88 | 89 | err = idx.IndexField( 90 | ctx, 91 | &v1alpha2.Hardware{}, 92 | internal.HardwareByIPAddr, 93 | internal.HardwareByIPAddrFunc, 94 | ) 95 | if err != nil { 96 | return fmt.Errorf("register index %s: %w", internal.HardwareByIPAddr, err) 97 | } 98 | 99 | mgr.GetWebhookServer().Register( 100 | admissionWebhookEndpoint, 101 | &webhook.Admission{Handler: a}, 102 | ) 103 | 104 | return nil 105 | } 106 | -------------------------------------------------------------------------------- /internal/hardware/admission_conditional.go: -------------------------------------------------------------------------------- 1 | package hardware 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | 7 | "github.com/tinkerbell/tink/api/v1alpha2" 8 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 9 | ) 10 | 11 | func (a *Admission) validateConditionalFields(hw *v1alpha2.Hardware) admission.Response { 12 | for mac, ni := range hw.Spec.NetworkInterfaces { 13 | if ni.IsDHCPEnabled() && ni.DHCP == nil { 14 | return admission.Errored(http.StatusBadRequest, fmt.Errorf( 15 | "network interface for %v has DHCP enabled but no DHCP config", 16 | mac, 17 | )) 18 | } 19 | } 20 | 21 | return admission.Allowed("") 22 | } 23 | -------------------------------------------------------------------------------- /internal/hardware/admission_ip.go: -------------------------------------------------------------------------------- 1 | package hardware 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "strings" 8 | 9 | "github.com/tinkerbell/tink/api/v1alpha2" 10 | "github.com/tinkerbell/tink/internal/hardware/internal" 11 | ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 13 | ) 14 | 15 | func (a *Admission) validateUniqueIPs(ctx context.Context, hw *v1alpha2.Hardware) admission.Response { 16 | // Determine if there are IP duplicates within the hw object. 17 | seen := map[string]struct{}{} 18 | var dupOnHw []string 19 | for _, ip := range hw.GetIPs() { 20 | if _, ok := seen[ip]; ok { 21 | dupOnHw = append(dupOnHw, ip) 22 | } 23 | seen[ip] = struct{}{} 24 | } 25 | 26 | if len(dupOnHw) > 0 { 27 | return admission.Errored(http.StatusBadRequest, fmt.Errorf( 28 | "duplicate IPs on Hardware: %v", 29 | strings.Join(dupOnHw, ", "), 30 | )) 31 | } 32 | 33 | // Determine if there are IP duplicates with other Hardware objects. 34 | dups := duplicates{} 35 | for _, ip := range hw.GetIPs() { 36 | var hwWithIP v1alpha2.HardwareList 37 | err := a.client.List(ctx, &hwWithIP, ctrlclient.MatchingFields{ 38 | internal.HardwareByIPAddr: ip, 39 | }) 40 | if err != nil { 41 | return admission.Errored(http.StatusInternalServerError, err) 42 | } 43 | if len(hwWithIP.Items) > 0 { 44 | dups.AppendTo(ip, hwWithIP.Items...) 45 | } 46 | } 47 | 48 | if len(dups) > 0 { 49 | return admission.Errored(http.StatusBadRequest, fmt.Errorf( 50 | "IP associated with existing Hardware: %v", 51 | dups.String(), 52 | )) 53 | } 54 | 55 | return admission.Allowed("") 56 | } 57 | -------------------------------------------------------------------------------- /internal/hardware/admission_mac.go: -------------------------------------------------------------------------------- 1 | package hardware 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net/http" 7 | "regexp" 8 | "strings" 9 | 10 | "github.com/tinkerbell/tink/api/v1alpha2" 11 | "github.com/tinkerbell/tink/internal/hardware/internal" 12 | ctrlclient "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 14 | ) 15 | 16 | func (a *Admission) validateMACs(hw *v1alpha2.Hardware) admission.Response { 17 | // Validate all MACs on hw are valid before we compare them with Hardware in the cluster. 18 | if invalidMACs := getInvalidMACs(hw); len(invalidMACs) > 0 { 19 | return admission.Errored(http.StatusBadRequest, fmt.Errorf( 20 | "invalid MAC address (%v): %v", 21 | macRegex.String(), 22 | strings.Join(invalidMACs, ", "), 23 | )) 24 | } 25 | 26 | return admission.Allowed("") 27 | } 28 | 29 | // macRegex is taken from the API package documentation. It checks for valid MAC addresses. 30 | // It expects MACs to be lowercase which is necessary for index lookups on API objects. 31 | var macRegex = regexp.MustCompile("^([0-9a-f]{2}:){5}([0-9a-f]{2})$") 32 | 33 | func getInvalidMACs(hw *v1alpha2.Hardware) []string { 34 | var invalidMACs []string 35 | for _, mac := range hw.GetMACs() { 36 | if mac == "" { 37 | mac = "" 38 | } 39 | if !macRegex.MatchString(mac) { 40 | invalidMACs = append(invalidMACs, mac) 41 | } 42 | } 43 | return invalidMACs 44 | } 45 | 46 | func (a *Admission) validateUniqueMACs(ctx context.Context, hw *v1alpha2.Hardware) admission.Response { 47 | dups := duplicates{} 48 | for _, mac := range hw.GetMACs() { 49 | var hwWithMAC v1alpha2.HardwareList 50 | err := a.client.List(ctx, &hwWithMAC, ctrlclient.MatchingFields{ 51 | internal.HardwareByMACAddr: mac, 52 | }) 53 | if err != nil { 54 | return admission.Errored(http.StatusInternalServerError, err) 55 | } 56 | 57 | if len(hwWithMAC.Items) > 0 { 58 | dups.AppendTo(mac, hwWithMAC.Items...) 59 | } 60 | } 61 | 62 | if len(dups) > 0 { 63 | return admission.Errored(http.StatusBadRequest, fmt.Errorf( 64 | "MAC associated with existing Hardware: %s", 65 | dups.String(), 66 | )) 67 | } 68 | 69 | return admission.Allowed("") 70 | } 71 | -------------------------------------------------------------------------------- /internal/hardware/duplicate.go: -------------------------------------------------------------------------------- 1 | package hardware 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/tinkerbell/tink/api/v1alpha2" 8 | ) 9 | 10 | type duplicates map[string]*hardwareList 11 | 12 | func (d *duplicates) AppendTo(k string, hw ...v1alpha2.Hardware) { 13 | if _, ok := (*d)[k]; !ok { 14 | (*d)[k] = &hardwareList{} 15 | } 16 | (*d)[k].Append(hw...) 17 | } 18 | 19 | func (d duplicates) String() string { 20 | var buf []string 21 | for mac, dupes := range d { 22 | buf = append(buf, fmt.Sprintf("{%v: %v}", mac, dupes.String())) 23 | } 24 | return strings.Join(buf, "; ") 25 | } 26 | 27 | type hardwareList []v1alpha2.Hardware 28 | 29 | func (d *hardwareList) Append(hw ...v1alpha2.Hardware) { 30 | *d = append(*d, hw...) 31 | } 32 | 33 | func (d hardwareList) String() string { 34 | var names []string 35 | for _, hw := range d { 36 | names = append(names, fmt.Sprintf("[Name: %v; Namespace: %v]", hw.Name, hw.Namespace)) 37 | } 38 | return strings.Join(names, " ") 39 | } 40 | -------------------------------------------------------------------------------- /internal/hardware/internal/index.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "github.com/tinkerbell/tink/api/v1alpha2" 5 | "sigs.k8s.io/controller-runtime/pkg/client" 6 | ) 7 | 8 | // HardwareByMACAddr is an index used with a controller-runtime client to lookup hardware by MAC. 9 | const HardwareByMACAddr = ".Spec.NetworkInterfaces.MAC" 10 | 11 | // HardwareByMACAddrFunc returns a list of MAC addresses for a Hardware object. 12 | func HardwareByMACAddrFunc(obj client.Object) []string { 13 | hw, ok := obj.(*v1alpha2.Hardware) 14 | if !ok { 15 | return nil 16 | } 17 | return hw.GetMACs() 18 | } 19 | 20 | // HardwareByIPAddr is an index used with a controller-runtime client to lookup hardware by IP. 21 | const HardwareByIPAddr = ".Spec.NetworkInterfaces.DHCP.IP" 22 | 23 | // HardwareByIPAddrFunc returns a list of IP addresses for a Hardware object. 24 | func HardwareByIPAddrFunc(obj client.Object) []string { 25 | hw, ok := obj.(*v1alpha2.Hardware) 26 | if !ok { 27 | return nil 28 | } 29 | return hw.GetIPs() 30 | } 31 | -------------------------------------------------------------------------------- /internal/httpserver/http_server.go: -------------------------------------------------------------------------------- 1 | package httpserver 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "net/http" 7 | "runtime" 8 | "time" 9 | 10 | "github.com/go-logr/logr" 11 | "github.com/pkg/errors" 12 | "github.com/prometheus/client_golang/prometheus/promhttp" 13 | ) 14 | 15 | var ( 16 | gitRev = "unknown" 17 | startTime = time.Now() 18 | logger logr.Logger 19 | ) 20 | 21 | // SetupHTTP setup and return an HTTP server. 22 | func SetupHTTP(ctx context.Context, logger logr.Logger, authority string, errCh chan<- error) { 23 | http.Handle("/metrics", promhttp.Handler()) 24 | http.HandleFunc("/version", getGitRevJSONHandler()) 25 | http.HandleFunc("/healthz", healthCheckHandler) 26 | 27 | srv := &http.Server{ //nolint:gosec // TODO: fix Potential Slowloris Attack because ReadHeaderTimeout is not configured 28 | Addr: authority, 29 | } 30 | go func() { 31 | logger.Info("serving http") 32 | err := srv.ListenAndServe() 33 | if errors.Is(err, http.ErrServerClosed) { 34 | err = nil 35 | } 36 | errCh <- err 37 | }() 38 | go func() { 39 | <-ctx.Done() 40 | if err := srv.Shutdown(context.Background()); err != nil { 41 | logger.Error(err, "shutting down http server") 42 | } 43 | }() 44 | } 45 | 46 | func healthCheckHandler(w http.ResponseWriter, _ *http.Request) { 47 | res := struct { 48 | GitRev string `json:"git_rev"` 49 | Uptime float64 `json:"uptime"` 50 | Goroutines int `json:"goroutines"` 51 | }{ 52 | GitRev: gitRev, 53 | Uptime: time.Since(startTime).Seconds(), 54 | Goroutines: runtime.NumGoroutine(), 55 | } 56 | 57 | b, err := json.Marshal(&res) 58 | if err != nil { 59 | w.WriteHeader(http.StatusInternalServerError) 60 | } 61 | 62 | w.Header().Set("Content-Type", "application/json") 63 | _, _ = w.Write(b) 64 | } 65 | 66 | func getGitRevJSONHandler() http.HandlerFunc { 67 | res := struct { 68 | GitRev string `json:"git_rev"` 69 | Service string `json:"service_name"` 70 | }{ 71 | GitRev: gitRev, 72 | Service: "tinkerbell", 73 | } 74 | b, err := json.Marshal(&res) 75 | if err != nil { 76 | err = errors.Wrap(err, "could not marshal version json") 77 | logger.Error(err, "") 78 | panic(err) 79 | } 80 | 81 | return func(w http.ResponseWriter, _ *http.Request) { 82 | w.Header().Set("Content-Type", "application/json") 83 | _, _ = w.Write(b) 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /internal/proto/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package proto contains generated gRPC and Protobuf types for use in Tink Worker - Tink 3 | Server communication. Maintaining the generated code separately from core packages helps maintain 4 | a smaller surface area for those packages. 5 | */ 6 | package proto 7 | -------------------------------------------------------------------------------- /internal/proto/workflow/v2/workflow.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | package internal.proto.workflow.v2; 4 | 5 | option go_package = "github.com/tinkerbell/tink/internal/proto/workflow/v2;workflow"; 6 | 7 | // WorkflowService is responsible for retrieving workflows to be executed by the agent and 8 | // publishing events as a workflow executes. 9 | service WorkflowService { 10 | // GetWorkflows creates a stream that will receive workflows intended for the agent identified 11 | // by the GetWorkflowsRequest.agent_id. 12 | rpc GetWorkflows(GetWorkflowsRequest) returns (stream GetWorkflowsResponse) {} 13 | 14 | // PublishEvent publishes a workflow event. 15 | rpc PublishEvent(PublishEventRequest) returns (PublishEventResponse) {} 16 | } 17 | 18 | message GetWorkflowsRequest { 19 | string agent_id = 1; 20 | } 21 | 22 | message GetWorkflowsResponse { 23 | oneof cmd { 24 | StartWorkflow start_workflow = 1; 25 | StopWorkflow stop_workflow = 2; 26 | } 27 | 28 | message StartWorkflow { 29 | Workflow workflow = 1; 30 | } 31 | 32 | message StopWorkflow { 33 | string workflow_id = 1; 34 | } 35 | } 36 | 37 | message PublishEventRequest { 38 | Event event = 1; 39 | } 40 | 41 | message PublishEventResponse {} 42 | 43 | message Workflow { 44 | // A unique identifier for a workflow. 45 | string workflow_id = 1; 46 | 47 | // The actions that make up the workflow. 48 | repeated Action actions = 2; 49 | 50 | message Action { 51 | // A unique identifier for an action in the context of a workflow. 52 | string id = 1; 53 | 54 | // The name of the action. This can be used to identify actions in logging. 55 | string name = 2; 56 | 57 | // The image to run. 58 | string image = 3; 59 | 60 | // The command to execute when launching the image. When using Docker as the action runtime 61 | // it is used as the entrypoint. 62 | optional string cmd = 4; 63 | 64 | // Arguments to pass to the container. 65 | repeated string args = 5; 66 | 67 | // Environment variables to configure when launching the container. 68 | map env = 6; 69 | 70 | // Volumes to mount when launching the container. 71 | repeated string volumes = 7; 72 | 73 | // The network namespace to launch the container in. 74 | optional string network_namespace = 8; 75 | } 76 | } 77 | 78 | message Event { 79 | // A unique identifier for a workflow. 80 | string workflow_id = 1; 81 | 82 | oneof event { 83 | ActionStarted action_started = 2; 84 | ActionSucceeded action_succeeded = 3; 85 | ActionFailed action_failed = 4; 86 | WorkflowRejected workflow_rejected = 5; 87 | } 88 | 89 | message ActionStarted { 90 | // A unique identifier for an action in the context of a workflow. 91 | string action_id = 1; 92 | } 93 | 94 | message ActionSucceeded { 95 | // A unique identifier for an action in the context of a workflow. 96 | string action_id = 1; 97 | } 98 | 99 | message ActionFailed { 100 | // A unique identifier for an action in the context of a workflow. 101 | string action_id = 1; 102 | 103 | // A UpperCamelCase word or phrase concisly describing why an action failed. It is typically 104 | // provided by the action itself. 105 | optional string failure_reason = 2; 106 | 107 | // A free-form human readable string elaborating on the reason for failure. It is typically 108 | // provided by the action itself. 109 | optional string failure_message = 3; 110 | 111 | } 112 | 113 | message WorkflowRejected { 114 | // A message describing why the workflow was rejected. 115 | string message = 2; 116 | } 117 | } -------------------------------------------------------------------------------- /internal/ptr/ptr.go: -------------------------------------------------------------------------------- 1 | /* 2 | Package ptr provides utility functions for converting non-addressable primitive types to pointers. 3 | Its useful in contexts where a variable gives nil primitive type pointers semantics 4 | (often meaning "not set") which can make it annoying to set the value. 5 | 6 | Example 7 | 8 | type Foo struct { 9 | A *int 10 | } 11 | 12 | func main() { 13 | foo := Foo{ 14 | A: ptr.Int(1) 15 | } 16 | } 17 | */ 18 | package ptr 19 | 20 | func Int(v int) *int { 21 | return &v 22 | } 23 | 24 | func Int8(v int8) *int8 { 25 | return &v 26 | } 27 | 28 | func Int16(v int16) *int16 { 29 | return &v 30 | } 31 | 32 | func Int32(v int32) *int32 { 33 | return &v 34 | } 35 | 36 | func Int64(v int64) *int64 { 37 | return &v 38 | } 39 | 40 | func Uint(v uint) *uint { 41 | return &v 42 | } 43 | 44 | func Uint8(v uint8) *uint8 { 45 | return &v 46 | } 47 | 48 | func Uint16(v uint16) *uint16 { 49 | return &v 50 | } 51 | 52 | func Uint32(v uint32) *uint32 { 53 | return &v 54 | } 55 | 56 | func Uint64(v uint64) *uint64 { 57 | return &v 58 | } 59 | 60 | func Float32(v float32) *float32 { 61 | return &v 62 | } 63 | 64 | func Float64(v float64) *float64 { 65 | return &v 66 | } 67 | 68 | func String(v string) *string { 69 | return &v 70 | } 71 | 72 | func Bool(v bool) *bool { 73 | return &v 74 | } 75 | 76 | func Byte(v byte) *byte { 77 | return &v 78 | } 79 | 80 | func Rune(v rune) *rune { 81 | return &v 82 | } 83 | 84 | func Complex64(v complex64) *complex64 { 85 | return &v 86 | } 87 | 88 | func Complex128(v complex128) *complex128 { 89 | return &v 90 | } 91 | -------------------------------------------------------------------------------- /internal/server/index.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "github.com/tinkerbell/tink/api/v1alpha1" 5 | "sigs.k8s.io/controller-runtime/pkg/client" 6 | ) 7 | 8 | // workflowByNonTerminalState is the index name for retrieving workflows in a non-terminal state. 9 | const workflowByNonTerminalState = ".status.state.nonTerminalWorker" 10 | 11 | // workflowByNonTerminalStateFunc inspects obj - which must be a Workflow - for a Pending or 12 | // Running state. If in either Pending or Running it returns a list of worker addresses. 13 | func workflowByNonTerminalStateFunc(obj client.Object) []string { 14 | wf, ok := obj.(*v1alpha1.Workflow) 15 | if !ok { 16 | return nil 17 | } 18 | 19 | resp := []string{} 20 | if !(wf.Status.State == v1alpha1.WorkflowStateRunning || wf.Status.State == v1alpha1.WorkflowStatePending) { 21 | return resp 22 | } 23 | for _, task := range wf.Status.Tasks { 24 | if task.WorkerAddr != "" { 25 | resp = append(resp, task.WorkerAddr) 26 | } 27 | } 28 | 29 | return resp 30 | } 31 | -------------------------------------------------------------------------------- /internal/server/index_test.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | 7 | "github.com/tinkerbell/tink/api/v1alpha1" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/client" 10 | ) 11 | 12 | func TestWorkflowIndexFuncs(t *testing.T) { 13 | cases := []struct { 14 | name string 15 | input client.Object 16 | wantStateAddrs []string 17 | }{ 18 | { 19 | "non workflow", 20 | &v1alpha1.Hardware{}, 21 | nil, 22 | }, 23 | { 24 | "empty workflow", 25 | &v1alpha1.Workflow{ 26 | Status: v1alpha1.WorkflowStatus{ 27 | State: "", 28 | Tasks: []v1alpha1.Task{}, 29 | }, 30 | }, 31 | []string{}, 32 | }, 33 | { 34 | "pending workflow", 35 | &v1alpha1.Workflow{ 36 | Status: v1alpha1.WorkflowStatus{ 37 | State: v1alpha1.WorkflowStatePending, 38 | Tasks: []v1alpha1.Task{ 39 | { 40 | WorkerAddr: "worker1", 41 | }, 42 | }, 43 | }, 44 | }, 45 | []string{"worker1"}, 46 | }, 47 | { 48 | "running workflow", 49 | &v1alpha1.Workflow{ 50 | Status: v1alpha1.WorkflowStatus{ 51 | State: v1alpha1.WorkflowStateRunning, 52 | Tasks: []v1alpha1.Task{ 53 | { 54 | WorkerAddr: "worker1", 55 | }, 56 | { 57 | WorkerAddr: "worker2", 58 | }, 59 | }, 60 | }, 61 | }, 62 | []string{"worker1", "worker2"}, 63 | }, 64 | { 65 | "complete workflow", 66 | &v1alpha1.Workflow{ 67 | Status: v1alpha1.WorkflowStatus{ 68 | State: v1alpha1.WorkflowStateSuccess, 69 | Tasks: []v1alpha1.Task{ 70 | { 71 | WorkerAddr: "worker1", 72 | }, 73 | }, 74 | }, 75 | }, 76 | []string{}, 77 | }, 78 | } 79 | 80 | for _, tc := range cases { 81 | t.Run(tc.name, func(t *testing.T) { 82 | gotStateAddrs := workflowByNonTerminalStateFunc(tc.input) 83 | if !reflect.DeepEqual(tc.wantStateAddrs, gotStateAddrs) { 84 | t.Errorf("Unexpected non-terminating workflow response: wanted %#v, got %#v", tc.wantStateAddrs, gotStateAddrs) 85 | } 86 | }) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /internal/server/kubernetes_api.go: -------------------------------------------------------------------------------- 1 | package server 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "github.com/go-logr/logr" 9 | "github.com/go-logr/zapr" 10 | "github.com/tinkerbell/tink/api/v1alpha1" 11 | "github.com/tinkerbell/tink/internal/deprecated/controller" 12 | "github.com/tinkerbell/tink/internal/proto" 13 | "go.uber.org/zap" 14 | "google.golang.org/grpc" 15 | "k8s.io/client-go/rest" 16 | "k8s.io/client-go/tools/clientcmd" 17 | clientcmdapi "k8s.io/client-go/tools/clientcmd/api" 18 | "sigs.k8s.io/controller-runtime/pkg/cache" 19 | "sigs.k8s.io/controller-runtime/pkg/client" 20 | "sigs.k8s.io/controller-runtime/pkg/cluster" 21 | ) 22 | 23 | // +kubebuilder:rbac:groups=tinkerbell.org,resources=hardware;hardware/status,verbs=get;list;watch 24 | // +kubebuilder:rbac:groups=tinkerbell.org,resources=templates;templates/status,verbs=get;list;watch 25 | // +kubebuilder:rbac:groups=tinkerbell.org,resources=workflows;workflows/status,verbs=get;list;watch;update;patch 26 | 27 | // NewKubeBackedServer returns a server that implements the Workflow server interface for a given kubeconfig. 28 | func NewKubeBackedServer(logger logr.Logger, kubeconfig, apiserver, namespace string) (*KubernetesBackedServer, error) { 29 | ccfg := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( 30 | &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig}, 31 | &clientcmd.ConfigOverrides{ 32 | ClusterInfo: clientcmdapi.Cluster{ 33 | Server: apiserver, 34 | }, 35 | Context: clientcmdapi.Context{ 36 | Namespace: namespace, 37 | }, 38 | }, 39 | ) 40 | 41 | cfg, err := ccfg.ClientConfig() 42 | if err != nil { 43 | return nil, err 44 | } 45 | 46 | return NewKubeBackedServerFromREST(logger, cfg, namespace) 47 | } 48 | 49 | // NewKubeBackedServerFromREST returns a server that implements the Workflow 50 | // server interface with the given Kubernetes rest client and namespace. 51 | func NewKubeBackedServerFromREST(logger logr.Logger, config *rest.Config, namespace string) (*KubernetesBackedServer, error) { 52 | clstr, err := cluster.New(config, func(opts *cluster.Options) { 53 | opts.Scheme = controller.DefaultScheme() 54 | opts.Logger = zapr.NewLogger(zap.NewNop()) 55 | if namespace != "" { 56 | opts.Cache.DefaultNamespaces = map[string]cache.Config{ 57 | namespace: {}, 58 | } 59 | } 60 | }) 61 | if err != nil { 62 | return nil, fmt.Errorf("init client: %w", err) 63 | } 64 | 65 | err = clstr.GetFieldIndexer().IndexField( 66 | context.Background(), 67 | &v1alpha1.Workflow{}, 68 | workflowByNonTerminalState, 69 | workflowByNonTerminalStateFunc, 70 | ) 71 | if err != nil { 72 | return nil, fmt.Errorf("setup %s index: %w", workflowByNonTerminalState, err) 73 | } 74 | 75 | go func() { 76 | err := clstr.Start(context.Background()) 77 | if err != nil { 78 | logger.Error(err, "Error starting cluster") 79 | } 80 | }() 81 | 82 | return &KubernetesBackedServer{ 83 | logger: logger, 84 | ClientFunc: clstr.GetClient, 85 | nowFunc: time.Now, 86 | }, nil 87 | } 88 | 89 | // KubernetesBackedServer is a server that implements a workflow API. 90 | type KubernetesBackedServer struct { 91 | logger logr.Logger 92 | ClientFunc func() client.Client 93 | 94 | nowFunc func() time.Time 95 | } 96 | 97 | // Register registers the service on the gRPC server. 98 | func (s *KubernetesBackedServer) Register(server *grpc.Server) { 99 | proto.RegisterWorkflowServiceServer(server, s) 100 | } 101 | -------------------------------------------------------------------------------- /internal/testtime/frozen_time.go: -------------------------------------------------------------------------------- 1 | package testtime 2 | 3 | import ( 4 | "time" 5 | 6 | "google.golang.org/protobuf/types/known/timestamppb" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | type TimeFunc func() time.Time 11 | 12 | type MetaV1TimeFunc func() *metav1.Time 13 | 14 | type ProtobufTimeFunc func() *timestamppb.Timestamp 15 | 16 | // NewFrozenTime returns a FrozenTime for a given unix second. 17 | func NewFrozenTimeUnix(unix int64) *FrozenTime { 18 | return &FrozenTime{t: time.Unix(unix, 0).UTC()} 19 | } 20 | 21 | // NewFrozenTime returns a FrozenTime for a given time.Time. 22 | func NewFrozenTime(t time.Time) *FrozenTime { 23 | return &FrozenTime{t.UTC()} 24 | } 25 | 26 | // FrozenTime is a type for testing out fake times. 27 | type FrozenTime struct { 28 | t time.Time 29 | } 30 | 31 | // Now never changes. 32 | func (f *FrozenTime) Now() time.Time { return f.t } 33 | 34 | // Before returns a time before FrozenTime.Now() by a given duration. 35 | func (f *FrozenTime) Before(d time.Duration) time.Time { return f.Now().Add(-d) } 36 | 37 | // After returns a time after FrozenTime.Now() by a given duration. 38 | func (f *FrozenTime) After(d time.Duration) time.Time { return f.Now().Add(d) } 39 | 40 | // Before Now() by int64 seconds. 41 | func (f *FrozenTime) BeforeSec(s int64) time.Time { 42 | return f.Now().Add(time.Duration(-s) * time.Second) 43 | } 44 | 45 | // After Now() by int64 seconds. 46 | func (f *FrozenTime) AfterSec(s int64) time.Time { return f.Now().Add(time.Duration(s) * time.Second) } 47 | 48 | // BeforeFunc returns a TimeFunc where the return value is a time before FrozenTime.Now() by a given duration. 49 | func (f *FrozenTime) BeforeFunc(d time.Duration) TimeFunc { 50 | return func() time.Time { return f.Before(d) } 51 | } 52 | 53 | // AfterFunc returns a TimeFunc where the return value is a time after FrozenTime.Now() by a given duration. 54 | func (f *FrozenTime) AfterFunc(d time.Duration) TimeFunc { 55 | return func() time.Time { return f.After(d) } 56 | } 57 | 58 | func (f *FrozenTime) MetaV1Now() *metav1.Time { t := metav1.NewTime(f.Now()); return &t } 59 | func (f *FrozenTime) MetaV1Before(d time.Duration) *metav1.Time { 60 | t := metav1.NewTime(f.Before(d)) 61 | return &t 62 | } 63 | 64 | func (f *FrozenTime) MetaV1After(d time.Duration) *metav1.Time { 65 | t := metav1.NewTime(f.After(d)) 66 | return &t 67 | } 68 | 69 | func (f *FrozenTime) MetaV1BeforeSec(s int64) *metav1.Time { 70 | t := metav1.NewTime(f.BeforeSec(s)) 71 | return &t 72 | } 73 | 74 | func (f *FrozenTime) MetaV1AfterSec(s int64) *metav1.Time { 75 | t := metav1.NewTime(f.AfterSec(s)) 76 | return &t 77 | } 78 | 79 | func (f *FrozenTime) MetaV1BeforeFunc(d time.Duration) MetaV1TimeFunc { 80 | return func() *metav1.Time { return f.MetaV1Before(d) } 81 | } 82 | 83 | func (f *FrozenTime) MetaV1AfterFunc(d time.Duration) MetaV1TimeFunc { 84 | return func() *metav1.Time { return f.MetaV1After(d) } 85 | } 86 | 87 | func (f *FrozenTime) PbNow() *timestamppb.Timestamp { return timestamppb.New(f.Now()) } 88 | func (f *FrozenTime) PbBefore(d time.Duration) *timestamppb.Timestamp { 89 | return timestamppb.New(f.Before(d)) 90 | } 91 | 92 | func (f *FrozenTime) PbAfter(d time.Duration) *timestamppb.Timestamp { 93 | return timestamppb.New(f.After(d)) 94 | } 95 | 96 | func (f *FrozenTime) PbBeforeSec(s int64) *timestamppb.Timestamp { 97 | return timestamppb.New(f.BeforeSec(s)) 98 | } 99 | 100 | func (f *FrozenTime) PbAfterSec(s int64) *timestamppb.Timestamp { 101 | return timestamppb.New(f.AfterSec(s)) 102 | } 103 | 104 | func (f *FrozenTime) PbBeforeFunc(d time.Duration) ProtobufTimeFunc { 105 | return func() *timestamppb.Timestamp { return f.PbBefore(d) } 106 | } 107 | 108 | func (f *FrozenTime) PbAfterFunc(d time.Duration) ProtobufTimeFunc { 109 | return func() *timestamppb.Timestamp { return f.PbAfter(d) } 110 | } 111 | -------------------------------------------------------------------------------- /internal/testtime/frozen_time_test.go: -------------------------------------------------------------------------------- 1 | package testtime 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | ) 7 | 8 | func TestFrozenTime(t *testing.T) { 9 | cases := []struct { 10 | name string 11 | beginTime int64 12 | timeOffsetSec int64 13 | now time.Time 14 | before time.Time 15 | after time.Time 16 | }{ 17 | { 18 | name: "a new hope premier", 19 | beginTime: 233391600, 20 | timeOffsetSec: 7260, // 121 minuets 21 | now: time.Unix(233391600, 0), 22 | before: time.Unix(233384340, 0), 23 | after: time.Unix(233398860, 0), 24 | }, 25 | } 26 | 27 | for _, tc := range cases { 28 | t.Run(tc.name, func(t *testing.T) { 29 | ft := NewFrozenTimeUnix(tc.beginTime) 30 | if !tc.now.Equal(ft.Now()) { 31 | t.Fatalf("Unexpected now: wanted %#v, got %#v", tc.now, ft.Now()) 32 | } 33 | if !tc.before.Equal(ft.Before(time.Duration(tc.timeOffsetSec) * time.Second)) { 34 | t.Fatalf("Unexpected before: wanted %#v, got %#v", tc.before, ft.Before(time.Duration(tc.timeOffsetSec)*time.Second)) 35 | } 36 | if !tc.after.Equal(ft.After(time.Duration(tc.timeOffsetSec) * time.Second)) { 37 | t.Fatalf("Unexpected after: wanted %#v, got %#v", tc.after, ft.After(time.Duration(tc.timeOffsetSec)*time.Second)) 38 | } 39 | if !tc.before.Equal(ft.BeforeSec(tc.timeOffsetSec)) { 40 | t.Fatalf("Unexpected beforeSec: wanted %#v, got %#v", tc.before, ft.BeforeSec(tc.timeOffsetSec)) 41 | } 42 | if !tc.after.Equal(ft.AfterSec(tc.timeOffsetSec)) { 43 | t.Fatalf("Unexpected afterSec: wanted %#v, got %#v", tc.after, ft.AfterSec(tc.timeOffsetSec)) 44 | } 45 | if !tc.before.Equal(ft.BeforeFunc(time.Duration(tc.timeOffsetSec) * time.Second)()) { 46 | t.Fatalf("Unexpected beforeSec: wanted %#v, got %#v", tc.before, ft.BeforeFunc(time.Duration(tc.timeOffsetSec)*time.Second)()) 47 | } 48 | if !tc.after.Equal(ft.AfterFunc(time.Duration(tc.timeOffsetSec) * time.Second)()) { 49 | t.Fatalf("Unexpected afterSec: wanted %#v, got %#v", tc.after, ft.AfterFunc(time.Duration(tc.timeOffsetSec)*time.Second)()) 50 | } 51 | }) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /internal/workflow/internal/reconcile.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "text/template" 7 | "time" 8 | 9 | "github.com/go-logr/logr" 10 | "github.com/google/uuid" 11 | tinkv1 "github.com/tinkerbell/tink/api/v1alpha2" 12 | "gopkg.in/yaml.v3" 13 | "k8s.io/apimachinery/pkg/api/errors" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 16 | ) 17 | 18 | // ReconciliationContext reconciles Workflow resources when created or updated. 19 | type ReconciliationContext struct { 20 | // Workflow is the Workflow instance we're reconciling. 21 | Workflow *tinkv1.Workflow 22 | 23 | // NewActionID generated unique IDs for actions. Defaults to generating UUIDv4s. 24 | NewActionID func() string 25 | 26 | Log logr.Logger 27 | Client client.Client 28 | } 29 | 30 | // Reconcile reconciles the Workflow. 31 | func (rc ReconciliationContext) Reconcile(ctx context.Context) (reconcile.Result, error) { 32 | tmplRef := client.ObjectKey{ 33 | Name: rc.Workflow.Spec.TemplateRef.Name, 34 | Namespace: rc.Workflow.Namespace, 35 | } 36 | var tmpl tinkv1.Template 37 | if err := rc.Client.Get(ctx, tmplRef, &tmpl); err != nil { 38 | if errors.IsNotFound(err) { 39 | // The Template may yet to be submitted to the cluster so just requeue. 40 | rc.Log.Info("Template not found; requeue in 5 seconds", "ref", tmplRef) 41 | return reconcile.Result{RequeueAfter: 5 * time.Second}, nil 42 | } 43 | return reconcile.Result{}, err 44 | } 45 | 46 | hwRef := client.ObjectKey{ 47 | Name: rc.Workflow.Spec.HardwareRef.Name, 48 | Namespace: rc.Workflow.Namespace, 49 | } 50 | var hw tinkv1.Hardware 51 | if err := rc.Client.Get(ctx, hwRef, &hw); err != nil { 52 | if errors.IsNotFound(err) { 53 | // The Hardware may yet to be submitted to the cluster so just requeue. 54 | rc.Log.Info("Hardware not found; requeue in 5 seconds", "ref", tmplRef) 55 | return reconcile.Result{RequeueAfter: 5 * time.Second}, nil 56 | } 57 | return reconcile.Result{}, err 58 | } 59 | 60 | // Only render the template and configure action status if its not been done before. 61 | if len(rc.Workflow.Status.Actions) == 0 { 62 | tmpl, err := rc.renderTemplate(tmpl, &hw) 63 | if err != nil { 64 | return reconcile.Result{}, err 65 | } 66 | 67 | rc.Workflow.Status.Actions = rc.toActionStatus(tmpl.Spec.Actions) 68 | } 69 | 70 | return reconcile.Result{}, nil 71 | } 72 | 73 | func (rc ReconciliationContext) renderTemplate(tpl tinkv1.Template, hw *tinkv1.Hardware) (tinkv1.Template, error) { 74 | tplYAML, err := yaml.Marshal(tpl) 75 | if err != nil { 76 | return tinkv1.Template{}, err 77 | } 78 | 79 | renderer, err := template.New(""). 80 | Option("missingkey=error"). 81 | Funcs(workflowTemplateFuncs). 82 | Parse(string(tplYAML)) 83 | if err != nil { 84 | return tinkv1.Template{}, err 85 | } 86 | 87 | tplData := map[string]any{ 88 | "Hardware": hw.Spec, 89 | "Param": rc.Workflow.Spec.TemplateParams, 90 | } 91 | 92 | var renderedTplYAML bytes.Buffer 93 | if err := renderer.Execute(&renderedTplYAML, tplData); err != nil { 94 | return tinkv1.Template{}, err 95 | } 96 | 97 | if err := yaml.Unmarshal(renderedTplYAML.Bytes(), &tpl); err != nil { 98 | return tinkv1.Template{}, err 99 | } 100 | 101 | return tpl, nil 102 | } 103 | 104 | func (rc ReconciliationContext) toActionStatus(actions []tinkv1.Action) []tinkv1.ActionStatus { 105 | var status []tinkv1.ActionStatus 106 | for _, action := range actions { 107 | status = append(status, tinkv1.ActionStatus{ 108 | Rendered: action, 109 | ID: rc.newActionID(), 110 | State: tinkv1.ActionStatePending, 111 | }) 112 | } 113 | return status 114 | } 115 | 116 | func (rc ReconciliationContext) newActionID() string { 117 | if rc.NewActionID != nil { 118 | return rc.NewActionID() 119 | } 120 | return uuid.New().String() 121 | } 122 | -------------------------------------------------------------------------------- /internal/workflow/internal/reconcile_test.go: -------------------------------------------------------------------------------- 1 | package internal_test 2 | 3 | import ( 4 | "context" 5 | "os" 6 | "testing" 7 | 8 | "github.com/go-logr/zerologr" 9 | "github.com/google/go-cmp/cmp" 10 | "github.com/rs/zerolog" 11 | tinkv1 "github.com/tinkerbell/tink/api/v1alpha2" 12 | "github.com/tinkerbell/tink/internal/ptr" 13 | . "github.com/tinkerbell/tink/internal/workflow/internal" //nolint:revive // Dot imports should not be used. Problem for another time though. 14 | corev1 "k8s.io/api/core/v1" 15 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/runtime" 17 | machineryruntimeutil "k8s.io/apimachinery/pkg/util/runtime" 18 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 19 | ) 20 | 21 | func TestReconcileContext(t *testing.T) { 22 | ctx := context.Background() 23 | 24 | hw := newHardware(func(*tinkv1.Hardware) {}) 25 | tmpl := newTemplate(func(t *tinkv1.Template) { 26 | t.Spec.Actions = []tinkv1.Action{ 27 | { 28 | Name: "action", 29 | Image: "image", 30 | Cmd: ptr.String("{{ .Param.Foo }}"), 31 | }, 32 | } 33 | }) 34 | wrkflw := newWorkflow(func(w *tinkv1.Workflow) { 35 | w.Spec.HardwareRef = corev1.LocalObjectReference{Name: hw.Name} 36 | w.Spec.TemplateRef = corev1.LocalObjectReference{Name: tmpl.Name} 37 | w.Spec.TemplateParams = map[string]string{"Foo": "Bar"} 38 | }) 39 | 40 | expectWrkflw := wrkflw.DeepCopy() 41 | expectWrkflw.Status.Actions = []tinkv1.ActionStatus{ 42 | { 43 | Rendered: newAction(func(a *tinkv1.Action) { 44 | a.Name = "action" 45 | a.Image = "image" 46 | a.Cmd = ptr.String("Bar") 47 | }), 48 | State: "Pending", 49 | ID: newActionID(), 50 | }, 51 | } 52 | 53 | zl := zerolog.New(os.Stdout) 54 | logger := zerologr.New(&zl) 55 | 56 | scheme := runtime.NewScheme() 57 | machineryruntimeutil.Must(tinkv1.AddToScheme(scheme)) 58 | 59 | clnt := fake.NewClientBuilder(). 60 | WithScheme(scheme). 61 | WithObjects(hw, tmpl). 62 | Build() 63 | 64 | reconcileCtx := ReconciliationContext{ 65 | Client: clnt, 66 | Log: logger, 67 | Workflow: wrkflw, 68 | NewActionID: newActionID, 69 | } 70 | _, err := reconcileCtx.Reconcile(ctx) 71 | if err != nil { 72 | t.Fatal(err) 73 | } 74 | 75 | if !cmp.Equal(expectWrkflw, wrkflw) { 76 | t.Fatal(cmp.Diff(expectWrkflw, wrkflw)) 77 | } 78 | } 79 | 80 | func newWorkflow(fn func(*tinkv1.Workflow)) *tinkv1.Workflow { 81 | w := &tinkv1.Workflow{ 82 | TypeMeta: v1.TypeMeta{ 83 | Kind: "Workflow", 84 | APIVersion: tinkv1.GroupVersion.String(), 85 | }, 86 | ObjectMeta: v1.ObjectMeta{ 87 | Name: "workflow", 88 | }, 89 | Spec: tinkv1.WorkflowSpec{}, 90 | } 91 | fn(w) 92 | return w 93 | } 94 | 95 | func newTemplate(fn func(*tinkv1.Template)) *tinkv1.Template { 96 | t := &tinkv1.Template{ 97 | TypeMeta: v1.TypeMeta{ 98 | Kind: "Template", 99 | APIVersion: tinkv1.GroupVersion.String(), 100 | }, 101 | ObjectMeta: v1.ObjectMeta{ 102 | Name: "template", 103 | }, 104 | } 105 | fn(t) 106 | return t 107 | } 108 | 109 | func newHardware(fn func(*tinkv1.Hardware)) *tinkv1.Hardware { 110 | hw := &tinkv1.Hardware{ 111 | TypeMeta: v1.TypeMeta{ 112 | Kind: "Hardware", 113 | APIVersion: tinkv1.GroupVersion.String(), 114 | }, 115 | ObjectMeta: v1.ObjectMeta{ 116 | Name: "hardware", 117 | }, 118 | } 119 | fn(hw) 120 | return hw 121 | } 122 | 123 | func newAction(fn func(*tinkv1.Action)) tinkv1.Action { 124 | a := tinkv1.Action{ 125 | Args: []string{}, 126 | Env: map[string]string{}, 127 | Volumes: []tinkv1.Volume{}, 128 | } 129 | fn(&a) 130 | return a 131 | } 132 | 133 | func newActionID() string { 134 | return "8659e46f-00ff-40e4-a19b-c8661ca81167" 135 | } 136 | -------------------------------------------------------------------------------- /internal/workflow/internal/template.go: -------------------------------------------------------------------------------- 1 | package internal 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | // workflowTemplateFuncs defines the custom functions available to workflow templates. 9 | var workflowTemplateFuncs = map[string]interface{}{ 10 | "contains": strings.Contains, 11 | "hasPrefix": strings.HasPrefix, 12 | "hasSuffix": strings.HasSuffix, 13 | "formatPartition": formatPartition, 14 | } 15 | 16 | // formatPartition formats a device path with partition for the device type. If it receives an 17 | // unidentifiable device path it returns the dev. 18 | // 19 | // Examples 20 | // 21 | // formatPartition("/dev/nvme0n1", 0) -> /dev/nvme0n1p1 22 | // formatPartition("/dev/sda", 1) -> /dev/sda1 23 | // formatPartition("/dev/vda", 2) -> /dev/vda2 24 | func formatPartition(dev string, partition int) string { 25 | switch { 26 | case strings.HasPrefix(dev, "/dev/nvme"): 27 | return fmt.Sprintf("%vp%v", dev, partition) 28 | case strings.HasPrefix(dev, "/dev/sd"), 29 | strings.HasPrefix(dev, "/dev/vd"), 30 | strings.HasPrefix(dev, "/dev/xvd"), 31 | strings.HasPrefix(dev, "/dev/hd"): 32 | return fmt.Sprintf("%v%v", dev, partition) 33 | } 34 | return dev 35 | } 36 | -------------------------------------------------------------------------------- /internal/workflow/reconciler.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "context" 5 | "time" 6 | 7 | tinkv1 "github.com/tinkerbell/tink/api/v1alpha2" 8 | "github.com/tinkerbell/tink/internal/workflow/internal" 9 | "k8s.io/apimachinery/pkg/api/errors" 10 | kerrors "k8s.io/apimachinery/pkg/util/errors" 11 | ctrl "sigs.k8s.io/controller-runtime" 12 | "sigs.k8s.io/controller-runtime/pkg/client" 13 | "sigs.k8s.io/controller-runtime/pkg/manager" 14 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 15 | ) 16 | 17 | // Reconciler reconciles Workflow instances. 18 | type Reconciler struct { 19 | client client.Client 20 | nowFunc func() time.Time 21 | } 22 | 23 | // NewReconciler creates a Reconciler instance. 24 | func NewReconciler(clnt client.Client) *Reconciler { 25 | return &Reconciler{ 26 | client: clnt, 27 | nowFunc: time.Now, 28 | } 29 | } 30 | 31 | // +kubebuilder:rbac:groups=tinkerbell.org,resources=hardware;hardware/status,verbs=get;list;watch;update;patch 32 | // +kubebuilder:rbac:groups=tinkerbell.org,resources=templates;templates/status,verbs=get;list;watch;update;patch 33 | // +kubebuilder:rbac:groups=tinkerbell.org,resources=workflows;workflows/status,verbs=get;list;watch;update;patch 34 | // +kubebuilder:rbac:groups=tinkerbell.org,resources=workflows;workflows/finalizers,verbs=update 35 | 36 | func (r *Reconciler) Reconcile(ctx context.Context, req reconcile.Request) (result reconcile.Result, rerr error) { 37 | logger := ctrl.LoggerFrom(ctx) 38 | logger.Info("Reconciling") 39 | 40 | wrkflw := &tinkv1.Workflow{} 41 | if err := r.client.Get(ctx, req.NamespacedName, wrkflw); err != nil { 42 | if errors.IsNotFound(err) { 43 | logger.Info("Workflow not found; discontinuing reconciliation") 44 | } 45 | return reconcile.Result{}, client.IgnoreNotFound(err) 46 | } 47 | 48 | // TODO(chrisdoherty) 49 | if !wrkflw.DeletionTimestamp.IsZero() { 50 | return reconcile.Result{}, nil 51 | } 52 | 53 | rc := internal.ReconciliationContext{ 54 | Client: r.client, 55 | Log: logger, 56 | Workflow: wrkflw.DeepCopy(), 57 | } 58 | 59 | // Always attempt to patch. 60 | defer func() { 61 | if err := r.client.Status().Patch(ctx, rc.Workflow, client.MergeFrom(wrkflw)); err != nil { 62 | rerr = kerrors.NewAggregate([]error{rerr, err}) 63 | } 64 | }() 65 | 66 | return rc.Reconcile(ctx) 67 | } 68 | 69 | func (r *Reconciler) SetupWithManager(mgr manager.Manager) error { 70 | return ctrl.NewControllerManagedBy(mgr). 71 | For(&tinkv1.Workflow{}). 72 | Complete(r) 73 | } 74 | -------------------------------------------------------------------------------- /internal/workflow/reconciler_test.go: -------------------------------------------------------------------------------- 1 | package workflow 2 | 3 | import ( 4 | "github.com/tinkerbell/tink/api/v1alpha2" 5 | "k8s.io/apimachinery/pkg/runtime" 6 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 7 | ) 8 | 9 | var scheme = runtime.NewScheme() 10 | 11 | func init() { 12 | _ = clientgoscheme.AddToScheme(scheme) 13 | _ = v1alpha2.AddToScheme(scheme) 14 | } 15 | -------------------------------------------------------------------------------- /shell.nix: -------------------------------------------------------------------------------- 1 | let _pkgs = import { }; 2 | in { pkgs ? import (_pkgs.fetchFromGitHub { 3 | owner = "NixOS"; 4 | repo = "nixpkgs"; 5 | #branch@date: 21.11@2021-12-02 6 | rev = "21.11"; 7 | sha256 = "sha256-AjhmbT4UBlJWqxY0ea8a6GU2C2HdKUREkG43oRr3TZg="; 8 | }) { } }: 9 | 10 | with pkgs; 11 | 12 | mkShell { 13 | buildInputs = [ 14 | git 15 | gnumake 16 | jq 17 | nixfmt 18 | nodePackages.prettier 19 | python3Packages.codespell 20 | python3Packages.pip 21 | python3Packages.setuptools 22 | shellcheck 23 | shfmt 24 | ]; 25 | } 26 | --------------------------------------------------------------------------------