├── .dockerignore
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── config.yml
│ └── feature_request.md
├── PULL_REQUEST_TEMPLATE.md
├── boilerplate.go.txt
├── changelog-charts.json
├── changelog-charts.sh
├── changelog-configuration.json
├── helm-docs-footer.gotmpl.md
├── helm-docs-header.gotmpl.md
├── render-charts.sh
├── signature.asc
└── workflows
│ ├── build.yml
│ ├── chart-lint.yml
│ ├── chart-release.yml
│ ├── docs.yml
│ ├── e2e.yaml
│ ├── lint.yml
│ ├── release.yml
│ └── test.yml
├── .gitignore
├── .goreleaser.yml
├── Dockerfile
├── LICENSE
├── Makefile
├── Makefile.vars.mk
├── README.adoc
├── charts
├── charts.mk
└── clustercode
│ ├── .helmignore
│ ├── Chart.yaml
│ ├── Makefile
│ ├── README.gotmpl.md
│ ├── README.md
│ ├── templates
│ ├── NOTES.txt
│ ├── _helpers.tpl
│ ├── operator
│ │ ├── clusterrole.yaml
│ │ ├── clusterrolebinding.yaml
│ │ ├── deployment.yaml
│ │ └── user-clusterrole.yaml
│ ├── serviceaccount.yaml
│ ├── webhook
│ │ ├── deployment.yaml
│ │ ├── secret.yaml
│ │ ├── service.yaml
│ │ └── webhook-config.yaml
│ └── webui
│ │ ├── api-service.yaml
│ │ ├── deployment.yaml
│ │ ├── ingress.yaml
│ │ ├── service.yaml
│ │ ├── user-sa.yaml
│ │ └── user-secret.yaml
│ └── values.yaml
├── cleanup_command.go
├── count_command.go
├── docs
├── .gitignore
├── README.adoc
├── antora-playbook.yml
├── antora.yml
├── docs.mk
├── modules
│ └── ROOT
│ │ ├── assets
│ │ └── images
│ │ │ ├── clustercode-overview.drawio.svg
│ │ │ └── clustercode-process.drawio.svg
│ │ ├── examples
│ │ └── blueprint.yaml
│ │ ├── nav.adoc
│ │ └── pages
│ │ ├── explanations
│ │ ├── history.adoc
│ │ └── how-it-works.adoc
│ │ ├── how-tos
│ │ └── create-blueprint.adoc
│ │ ├── index.adoc
│ │ └── tutorials
│ │ └── tutorial.adoc
├── package-lock.json
├── package.json
└── supplemental-ui
│ └── partials
│ ├── footer-content.hbs
│ └── header-content.hbs
├── flags.go
├── go.mod
├── go.sum
├── kind
├── config.yaml
└── kind.mk
├── logger.go
├── main.go
├── operator_command.go
├── package
├── crds
│ ├── clustercode.github.io_blueprints.yaml
│ └── clustercode.github.io_tasks.yaml
├── rbac
│ └── role.yaml
└── webhook
│ └── manifests.yaml
├── pkg
├── api
│ ├── conditions
│ │ └── types.go
│ ├── generate.go
│ ├── init.go
│ └── v1alpha1
│ │ ├── blueprint_types.go
│ │ ├── common.go
│ │ ├── groupversion_info.go
│ │ ├── podtemplate_types.go
│ │ ├── task_types.go
│ │ └── zz_generated.deepcopy.go
├── cleanupcmd
│ └── run.go
├── countcmd
│ └── run.go
├── internal
│ ├── pipe
│ │ ├── debuglogger.go
│ │ ├── failedcondition.go
│ │ └── kubeclient.go
│ ├── types
│ │ └── types.go
│ └── utils
│ │ ├── utils.go
│ │ └── utils_test.go
├── operator
│ ├── blueprintcontroller
│ │ ├── controller.go
│ │ └── setup.go
│ ├── command.go
│ ├── generate.go
│ ├── jobcontroller
│ │ ├── controller.go
│ │ ├── handler.go
│ │ └── setup.go
│ ├── reconciler
│ │ └── reconciler.go
│ ├── setup.go
│ └── taskcontroller
│ │ ├── cleanup_job.go
│ │ ├── controller.go
│ │ ├── count_job.go
│ │ ├── merge_job.go
│ │ ├── setup.go
│ │ ├── slice_job.go
│ │ ├── split_job.go
│ │ ├── utils.go
│ │ └── utils_test.go
├── scancmd
│ └── run.go
├── webhook
│ ├── blueprintwebhook
│ │ ├── defaulter.go
│ │ ├── defaulter_test.go
│ │ ├── setup.go
│ │ ├── validator.go
│ │ └── validator_test.go
│ ├── command.go
│ ├── generate.go
│ └── setup.go
└── webui
│ ├── server.go
│ └── settings.go
├── renovate.json
├── scan_command.go
├── test
├── e2e.mk
├── e2e
│ ├── .gitignore
│ ├── kuttl-test.yaml
│ └── operator
│ │ ├── 00-delete.yaml
│ │ ├── 01-assert.yaml
│ │ ├── 01-install.yaml
│ │ ├── 02-assert.yaml
│ │ ├── 02-install.yaml
│ │ ├── 03-assert.yaml
│ │ ├── 03-install.yaml.template
│ │ └── 04-delete.yaml
├── integration.mk
├── media.mk
└── values.yaml
├── tools.go
├── ui
├── .eslintrc.cjs
├── .gitignore
├── .prettierrc.cjs
├── README.md
├── cypress.config.ts
├── cypress.d.ts
├── cypress
│ └── support
│ │ ├── commands.ts
│ │ ├── component-index.html
│ │ └── component.ts
├── embed.go
├── embed.init.go
├── index.html
├── package-lock.json
├── package.json
├── public
│ ├── robots.txt
│ └── vite.svg
├── src
│ ├── App.svelte
│ ├── assets
│ │ └── svelte.svg
│ ├── kube
│ │ ├── client.ts
│ │ ├── object.ts
│ │ └── types
│ │ │ ├── list.ts
│ │ │ ├── selfSubjectAccessReview.ts
│ │ │ └── task.ts
│ ├── lib
│ │ ├── LoginForm.cy.ts
│ │ └── LoginForm.svelte
│ ├── main.ts
│ ├── stores
│ │ ├── ClientStore.ts
│ │ └── SettingsStore.ts
│ └── vite-env.d.ts
├── svelte.config.js
├── tsconfig.json
├── tsconfig.node.json
├── ui.mk
└── vite.config.ts
├── webhook_command.go
└── webui_command.go
/.dockerignore:
--------------------------------------------------------------------------------
1 | .*
2 | *
3 | !clustercode
4 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: 🐛 Bug report
3 | about: Create a report to help improve 🎉
4 | title: '[Bug] '
5 | labels: 'bug'
6 |
7 | ---
8 |
9 | ## Describe the bug
10 |
11 | A clear and concise description of what the bug is.
12 |
13 | ## Additional context
14 |
15 | Add any other context about the problem here.
16 |
17 | ## Logs
18 |
19 | If applicable, add logs to help explain your problem.
20 | ```console
21 |
22 | ```
23 |
24 | ## Expected behavior
25 |
26 | A clear and concise description of what you expected to happen.
27 |
28 | ## To Reproduce
29 |
30 | Steps to reproduce the behavior:
31 | 1. Specs
32 | ```yaml
33 |
34 | ```
35 | 2. Commands
36 | ```bash
37 |
38 | ```
39 |
40 | ## Environment (please complete the following information):
41 |
42 | - Image Version: e.g. v1.0
43 | - K8s Version: e.g. v1.18
44 | - K8s Distribution: e.g. OpenShift, Rancher, etc.
45 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: false
2 | contact_links:
3 | - name: ❓ Question
4 | url: https://github.com/ccremer/clustercode/discussions
5 | about: Ask or discuss with me, I'm happy to help 🙋
6 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: 🚀 Feature request
3 | about: Suggest an idea for this project 💡
4 | title: '[Feature] '
5 | labels: 'enhancement'
6 |
7 | ---
8 |
13 |
14 | ## Summary
15 |
16 | **As** "role name"\
17 | **I want** "a feature or functionality"\
18 | **So that** "business value(s)"
19 |
20 | ## Context
21 |
22 | Add more information here. You are completely free regarding form and length
23 |
24 | ## Out of Scope
25 |
26 | * List aspects that are explicitly not part of this feature
27 |
28 | ## Further links
29 |
30 | * URLs of relevant Git repositories, PRs, Issues, etc.
31 |
32 | ## Acceptance criteria
33 |
34 |
40 |
41 | *Given* "a precondition"\
42 | *When* "an action happens"\
43 | *Then* "a result is expected"
44 |
45 | ## Implementation Ideas
46 |
47 | * If applicable, shortly list possible implementation ideas
48 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Summary
2 |
3 | * Short summary of what's included in the PR
4 | * Give special note to breaking changes: List the exact changes or provide links to documentation.
5 |
6 | ## Checklist
7 |
8 |
12 |
13 | ### For Code changes
14 |
15 | - [ ] Categorize the PR by setting a good title and adding one of the labels:
16 | `kind:bug`, `kind:enhancement`, `kind:documentation`, `kind:change`, `kind:breaking`, `kind:dependency`
17 | as they show up in the changelog
18 | - [ ] PR contains the label `area:operator`
19 | - [ ] Link this PR to related issues
20 | - [ ] I have not made _any_ changes in the `charts/` directory.
21 |
22 | ### For Helm Chart changes
23 |
24 | - [ ] Categorize the PR by setting a good title and adding one of the labels:
25 | `kind:bug`, `kind:enhancement`, `kind:documentation`, `kind:change`, `kind:breaking`, `kind:dependency`
26 | as they show up in the changelog
27 | - [ ] PR contains the label `area:chart`
28 | - [ ] PR contains the chart label, e.g. `chart:clustercode`
29 | - [ ] Variables are documented in the values.yaml using the format required by [Helm-Docs](https://github.com/norwoodj/helm-docs#valuesyaml-metadata).
30 | - [ ] Chart Version bumped if immediate release after merging is planned
31 | - [ ] I have run `make chart-docs`
32 | - [ ] Link this PR to related code release or other issues.
33 |
34 |
41 |
--------------------------------------------------------------------------------
/.github/boilerplate.go.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ccremer/clustercode/f651028f35ff9346504f732b43f73e2b4be80dfe/.github/boilerplate.go.txt
--------------------------------------------------------------------------------
/.github/changelog-charts.json:
--------------------------------------------------------------------------------
1 | {
2 | "pr_template": "- ${{TITLE}} (#${{NUMBER}})",
3 | "categories": [
4 | {
5 | "title": "## 🚀 Features",
6 | "labels": [
7 | "kind:enhancement",
8 | "area:chart"
9 | ],
10 | "exhaustive": true
11 | },
12 | {
13 | "title": "## 🛠️ Minor Changes",
14 | "labels": [
15 | "kind:change",
16 | "area:chart"
17 | ],
18 | "exhaustive": true
19 | },
20 | {
21 | "title": "## 🔎 Breaking Changes",
22 | "labels": [
23 | "kind:breaking",
24 | "area:chart"
25 | ],
26 | "exhaustive": true
27 | },
28 | {
29 | "title": "## 🐛 Fixes",
30 | "labels": [
31 | "kind:bug",
32 | "area:chart"
33 | ],
34 | "exhaustive": true
35 | },
36 | {
37 | "title": "## 📄 Documentation",
38 | "labels": [
39 | "kind:documentation",
40 | "area:chart"
41 | ],
42 | "exhaustive": true
43 | },
44 | {
45 | "title": "## 🔗 Dependency Updates",
46 | "labels": [
47 | "kind:dependency",
48 | "area:chart"
49 | ],
50 | "exhaustive": true
51 | }
52 | ],
53 | "template": "This release contains only Helm chart changes\n\n${{CATEGORIZED_COUNT}} changes since ${{FROM_TAG}}\n\n${{CHANGELOG}}"
54 | }
55 |
--------------------------------------------------------------------------------
/.github/changelog-charts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eo pipefail
4 |
5 | chart="${1}"
6 |
7 | tagPattern="${chart}-(.+)"
8 | chartLabel="chart:${chart}"
9 |
10 | echo ::group::Configuring changelog generator
11 | jq '.tag_resolver.filter.pattern="'$tagPattern'" | .tag_resolver.transformer.pattern="'$tagPattern'" | .categories[].labels += ["'$chartLabel'"]' \
12 | .github/changelog-charts.json | tee .github/configuration.json
13 | echo ::endgroup::
14 |
--------------------------------------------------------------------------------
/.github/changelog-configuration.json:
--------------------------------------------------------------------------------
1 | {
2 | "pr_template": "- ${{TITLE}} by @${{AUTHOR}} (#${{NUMBER}})",
3 | "ignore_labels": [
4 | "area:chart"
5 | ],
6 | "categories": [
7 | {
8 | "title": "## 🚀 Features",
9 | "labels": [
10 | "kind:enhancement"
11 | ]
12 | },
13 | {
14 | "title": "## 🛠️ Minor Changes",
15 | "labels": [
16 | "kind:change"
17 | ]
18 | },
19 | {
20 | "title": "## 🔎 Breaking Changes",
21 | "labels": [
22 | "kind:breaking"
23 | ]
24 | },
25 | {
26 | "title": "## 🐛 Fixes",
27 | "labels": [
28 | "kind:bug"
29 | ]
30 | },
31 | {
32 | "title": "## 📄 Documentation",
33 | "labels": [
34 | "kind:documentation"
35 | ]
36 | },
37 | {
38 | "title": "## 🔗 Dependency Updates",
39 | "labels": [
40 | "kind:dependency"
41 | ]
42 | }
43 | ],
44 | "template": "${{CATEGORIZED_COUNT}} changes since ${{FROM_TAG}}\n\n${{CHANGELOG}}"
45 | }
46 |
--------------------------------------------------------------------------------
/.github/helm-docs-footer.gotmpl.md:
--------------------------------------------------------------------------------
1 |
2 | {{ template "chart.sourcesSection" . }}
3 |
4 | {{ template "chart.requirementsSection" . }}
5 |
--------------------------------------------------------------------------------
/.github/helm-docs-header.gotmpl.md:
--------------------------------------------------------------------------------
1 | {{ template "chart.header" . }}
2 | {{ template "chart.deprecationWarning" . }}
3 |
4 | {{ template "chart.versionBadge" . }}{{ template "chart.typeBadge" . }}{{ template "chart.appVersionBadge" . }}
5 |
6 | {{ template "chart.description" . }}
7 |
8 | {{ template "chart.homepageLine" . }}
9 |
--------------------------------------------------------------------------------
/.github/render-charts.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -eo pipefail
4 |
5 | chartYaml="${1}"
6 | chartName=$(dirname "${chartYaml}")
7 |
8 | echo "::group::Render chart ${chartName}"
9 | helm template "${chartName}"
10 | echo "::endgroup::"
11 |
--------------------------------------------------------------------------------
/.github/signature.asc:
--------------------------------------------------------------------------------
1 | -----BEGIN PGP PUBLIC KEY BLOCK-----
2 |
3 | mQGNBF/mlCYBDAC65s/GSbIjW8aFkc/+cedim6RA01fnvwi1PmxZwYq0QnT3w5sm
4 | XUHDyhgFH70UPNM+T9rJ9XB7rB5a710oeA4xAOPiDZ1GVV7/SX87MqUPVZ5OImjI
5 | sP9huWO9cS37ln04lW3k0/01jCzCQZbHnkYAvyyNx6a87bFs6vVd3tiXkThecJIX
6 | 9YKkXBkEmVtHHcwUZWVQu67/muxd+GY2y8fmf50qqrAouxIB8pnDhZr27QB4b6c/
7 | KfqTNyeyEOAvCPzHuop2Yt+fSc9xYGmNnpIcJ1mHZeTolDobaOADoyLnsUtjQG79
8 | luKFOAzJhpX+y1LRmknVewzeKkVz6I1FcUgY2nqpPv5iJj5cOqN5vCth3deYkonu
9 | WSyJ+Tv2a9Fg+0M0btULEuyGiFMkOYuRq22WzHgIB+208KhMjxzgPVF+fM13jo9S
10 | Lrda1TLlW0dLxmCHeA2/hyujqxFnS/xk47fU55Lv/bYTcZszWenQw4TOO3lPI+fp
11 | nBWPJm7NdVYZ1L8AEQEAAbQTY2NyZW1lci1jbHVzdGVyY29kZYkBzgQTAQoAOBYh
12 | BD0ltAE93hlcst8sZWnjfDx4IuaYBQJf5pQmAhsDBQsJCAcCBhUKCQgLAgQWAgMB
13 | Ah4BAheAAAoJEGnjfDx4IuaYHFIMAJD31P8Afis38dKMtgVyeoKlTbS8lxfi+BS6
14 | Ch6k/O0JlBENC1bNmLQXU5sNRLeWBK9cgJCynvP4qFy+r1hOFGV/axKBM1KJYWTL
15 | gJjukgSpYUpGIcXqpifEB8c+jddhm1/LwqkduB3hrpcWVih+jVSkzspcl1vlRQXq
16 | iUayxuqAOmmqwcNdEcMbI9bcoEVdndpvaefjIEkZuRdqGRCCKUcWTmr0iWx/xxW6
17 | +nI4skLPCRuivIWbU7mXUM/+gR5Egxg+pbr9E4JIwGbQam9rlcHdJkPLN/scPS3a
18 | cPfuuNtoi1dzr8TDYXz4pF52r8VB1YjPxWn0k82mxghgOmUbxOc7V3Mmt8weCug5
19 | H3Aq4BFcPWoVozPzH0QDGGR4X06G232F+EM87HfyXu7SnmflUot5TzStZuCYvWwR
20 | PZ8wFIwJ3Z/TPizdX8TtvV1Ojk58hf3vpZv1y/y/nv2hNdvCYm3P8DSvaw9PFfKx
21 | uL/r0GIC4hFYkDzrhllhCi7n5vEcjrkBjQRf5pQmAQwAshiisqVss0CYTcJ0GdwA
22 | RURVbyzqsInMjtfBhQIE5HEqIVnbG41eo7T/NP78p9QdKsbJ/EglYKyiyF+wJA8Q
23 | MBJUd6W9NFEaMvNcS0gT/nvMmDjSwWx8ahxIu+Mbr39ydxYpmjJEGmi9QC4M+huA
24 | M55/eEeasLnCo4ucsXEuEblcTncDY2gwJFd07FbRirNQoMCp4hr1fYSTNn2OMOJg
25 | TT5XCaFDwARTqZnymQ2uK5OK/BoClWve5nb9BJ7Q1cWIfLdyVQdx5krSUF9TUSlN
26 | 03TmBx7oMN3ngeS4mg+hxVwbxPOoGKSxJ50frF2tGwDRp4k3BG3RcG80ujhnwHsB
27 | b/eqS+57TJZ7hEz47chJt5TzDQ4cD+yxRLBVby9Tnm2xZI4wy1afePYogrLdKqgn
28 | WVIlbEdn2mBj9wplL8Wu9MI8lfqbQCiGACSlVislJHxke5XO3p+1op3CnVzk09OP
29 | p2Z/DCsPC52UMMHhqnw0z2BY4HbDX0G2RCRuxLWYX/j/ABEBAAGJAbYEGAEKACAW
30 | IQQ9JbQBPd4ZXLLfLGVp43w8eCLmmAUCX+aUJgIbDAAKCRBp43w8eCLmmNbnDACZ
31 | MYHtLqiezR8H2XfZ8NTjrZS5ZEaO6lD/R3TOwxJ6DVHx5+zu8wxOyQNSLoJnpU0K
32 | NVNzv6GrpY5Zi2V4HtQZkdvF3mRW4RrYOLUb892Mtv0JE0kA3yqaZanJzjgk57hb
33 | XFjehTteT1CQWhnOmSaQMH2QrrAJUH3IUi902UJp7nG59abtdyTQL7FQxTC1owmw
34 | xjDGxiq3xJsi0rEYdg6QjBBbPI/FC/SUzX4CQpWk3i+sUqN+DALWcpDQsFIhZLHi
35 | Z5MZ/5Hby2SqaS6yZp6rR10jOaL1AKhyTCXbspZ4aw5cKc0PO0AQlJiL+ARxNapo
36 | uGlXPhFZxNAEZ0qNt0a1Sl/fy/fx+WvVv8ibhagRc6wgcpa52k0lwmCzvfXnb4Hg
37 | uAyl0cIIaLeutBwcx1tT2DCk5ycFrElD9AZ+lzcdQHTg8ZqjoUjStpeV8ACtw40F
38 | G9Kv2DgEousnebXESHw/4J9IoprHGoZTg21DngO/FjF6tteLVBJCuc2kM0gz9AU=
39 | =K2ZZ
40 | -----END PGP PUBLIC KEY BLOCK-----
41 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: Build
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | paths-ignore:
8 | - charts/**
9 | - docs/**
10 |
11 | jobs:
12 | go:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 |
17 | - uses: actions/setup-go@v4
18 | with:
19 | go-version-file: 'go.mod'
20 |
21 | - name: Run build
22 | run: make build-docker
23 |
24 | vite:
25 | runs-on: ubuntu-latest
26 | steps:
27 | - uses: actions/checkout@v3
28 |
29 | - uses: actions/setup-node@v3
30 | with:
31 | node-version: 16
32 | cache: npm
33 | cache-dependency-path: ui/package-lock.json
34 |
35 | - name: Run build
36 | run: make build-ui
37 |
--------------------------------------------------------------------------------
/.github/workflows/chart-lint.yml:
--------------------------------------------------------------------------------
1 | name: ChartLint
2 |
3 | on:
4 | pull_request:
5 | # only run when there are chart changes
6 | paths:
7 | - 'charts/**'
8 | - '!charts/charts.mk'
9 |
10 | jobs:
11 | lint:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v3
15 |
16 | - uses: actions/setup-go@v4
17 | with:
18 | go-version-file: 'go.mod'
19 |
20 | - name: Verify charts are upt-do-date
21 | run: make chart-lint
22 |
23 | template:
24 | runs-on: ubuntu-latest
25 | steps:
26 | - uses: actions/checkout@v3
27 |
28 | - name: Render Helm charts
29 | run: find charts -type f -name Chart.yaml -exec .github/render-charts.sh {} \;
30 |
--------------------------------------------------------------------------------
/.github/workflows/chart-release.yml:
--------------------------------------------------------------------------------
1 | name: ChartRelease
2 |
3 | on:
4 | push:
5 | tags:
6 | - "chart/[a-z0-9]+-*" # match tags following the 'chart/$chartname-x.y.z'
7 |
8 | jobs:
9 | gh-pages:
10 | runs-on: ubuntu-latest
11 | steps:
12 | # We can't use 'go install' due to some go.mod conflicts.
13 | - name: Download cr
14 | uses: giantswarm/install-binary-action@v1.1.0
15 | with:
16 | binary: cr
17 | version: "1.4.1"
18 | download_url: https://github.com/helm/chart-releaser/releases/download/v${version}/chart-releaser_${version}_linux_amd64.tar.gz
19 | tarball_binary_path: "${binary}"
20 | smoke_test: "${binary} version"
21 |
22 | - name: Checkout
23 | uses: actions/checkout@v3
24 | with:
25 | fetch-depth: '0'
26 |
27 | - uses: actions/setup-go@v4
28 | with:
29 | go-version-file: 'go.mod'
30 |
31 | - name: Configure Git
32 | run: |
33 | git config user.name "$GITHUB_ACTOR"
34 | git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
35 | - name: Get chart name
36 | run: echo "CHART_NAME=$(echo ${GITHUB_REF##*/} | grep --perl-regexp --only-matching '^([a-zA-Z0-9-]+)(?![0-9.]+)')" >> $GITHUB_ENV
37 |
38 | - name: Get chart versions
39 | run: |
40 | echo "CHART_VERSION=$(yq e '.version' charts/${CHART_NAME}/Chart.yaml)" >> $GITHUB_ENV
41 | echo "PREVIOUS_CHART_VERSION=$(git tag --sort=taggerdate --list "chart/${CHART_NAME}-*" | tail -n 2 | head -n 1 | rev | cut -d - -f 1 | rev)" >> $GITHUB_ENV
42 | - name: Prepare changelog config
43 | run: .github/changelog-charts.sh ${CHART_NAME}
44 |
45 | - name: Generate additional artifacts
46 | run: make chart-generate
47 |
48 | - name: Build changelog from PRs with labels
49 | id: build_changelog
50 | uses: mikepenz/release-changelog-builder-action@v3
51 | with:
52 | configuration: ".github/configuration.json"
53 | ignorePreReleases: true
54 | outputFile: charts/${{ env.CHART_NAME}}/CHANGELOG.md
55 | fromTag: chart/${{ env.CHART_NAME }}-${{ env.PREVIOUS_CHART_VERSION }}
56 | env:
57 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
58 |
59 | - name: Package Helm chart
60 | run: |
61 | make chart-release
62 | env:
63 | CR_TOKEN: ${{ secrets.GITHUB_TOKEN }}
64 | CR_OWNER: ${{ github.repository_owner }}
65 | CR_GIT_REPO: ${{ github.event.repository.name }}
66 |
67 | # there doesn't seem to be any maintained GitHub actions that allow uploading assets after release has been made.
68 | - name: Update release
69 | run: |
70 | gh release upload chart/${CHART_NAME}-${CHART_VERSION} .github/crds.yaml
71 | env:
72 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
73 |
--------------------------------------------------------------------------------
/.github/workflows/docs.yml:
--------------------------------------------------------------------------------
1 | name: Docs
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | tags:
8 | - "v*"
9 |
10 | jobs:
11 | antora:
12 | runs-on: ubuntu-latest
13 | if: ${{ contains(github.ref, 'tags') }}
14 | steps:
15 | - uses: actions/checkout@v3
16 | with:
17 | fetch-depth: 0
18 |
19 | - name: Configure Git
20 | run: |
21 | git config user.name "Antora via GitHub Actions"
22 | git config user.email "actions@github.com"
23 |
24 | - name: Parse semver string
25 | id: semver
26 | uses: booxmedialtd/ws-action-parse-semver@v1
27 | with:
28 | input_string: ${{ github.ref }}
29 | version_extractor_regex: '\/v(.*)$'
30 |
31 | - name: Set variables
32 | run: |
33 | echo "MINOR_VERSION=${{ steps.semver.outputs.major }}.${{ steps.semver.outputs.minor }}" >> $GITHUB_ENV
34 | echo "BRANCH_NAME=docs/v${{ steps.semver.outputs.major }}.${{ steps.semver.outputs.minor }}" >> $GITHUB_ENV
35 | - name: Set branch name for Prerelease
36 | if: ${{ steps.semver.outputs.prerelease != '' }}
37 | run: echo "BRANCH_NAME=${{ env.BRANCH_NAME }}-rc" >> $GITHUB_ENV
38 |
39 | - name: Checkout remote branch if exists
40 | run: git checkout ${{ env.BRANCH_NAME }}
41 | continue-on-error: true
42 | - name: Rebase if possible
43 | run: git rebase ${GITHUB_REF##*/} ${{ env.BRANCH_NAME }}
44 | continue-on-error: true
45 | - name: Create new branch if not existing
46 | run: git switch --create ${{ env.BRANCH_NAME }}
47 | continue-on-error: true
48 |
49 | - name: Patch Antora file for Release
50 | run: yq eval 'del(.prerelease) | del (.display_version) | .version = "${{ env.MINOR_VERSION }}"' -i docs/antora.yml
51 | if: ${{ steps.semver.outputs.prerelease == '' }}
52 | - name: Patch Antora file for Prerelease
53 | run: yq eval 'del (.display_version) | .version = "${{ env.MINOR_VERSION }}", .prerelease = "-${{ steps.semver.outputs.prerelease }}"' -i docs/antora.yml
54 | if: ${{ steps.semver.outputs.prerelease != '' }}
55 |
56 | - name: Commit
57 | run: git commit --all --message "Update version for Antora"
58 | continue-on-error: true
59 | - name: Push
60 | run: git push --atomic --force --set-upstream origin ${{ env.BRANCH_NAME }}
61 |
62 | - name: Cleanup prerelease branch if existing
63 | if: ${{ steps.semver.outputs.prerelease == '' }}
64 | run: git push origin --delete ${{ env.BRANCH_NAME }}-rc
65 | continue-on-error: true
66 |
67 | gh-pages:
68 | runs-on: ubuntu-latest
69 | # These will cause this job to wait until Antora versioning is done for tags, but still run on master branch
70 | needs: antora
71 | if: always()
72 | steps:
73 | - uses: actions/checkout@v3
74 | with:
75 | fetch-depth: 0
76 |
77 | - name: Configure Git
78 | run: |
79 | git remote set-url origin "https://x-access-token:${{ secrets.GITHUB_TOKEN }}@github.com/${{ github.repository }}"
80 | git config user.name "Antora via GitHub Actions"
81 | git config user.email "actions@github.com"
82 |
83 | - name: Publish documentation
84 | run: make docs-publish
85 |
--------------------------------------------------------------------------------
/.github/workflows/e2e.yaml:
--------------------------------------------------------------------------------
1 | name: E2E
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | paths-ignore:
8 | - docs/**
9 |
10 | jobs:
11 | kuttl:
12 | runs-on: ubuntu-latest
13 | steps:
14 | - uses: actions/checkout@v3
15 |
16 | - uses: actions/setup-go@v4
17 | with:
18 | go-version-file: 'go.mod'
19 |
20 | - name: Run tests
21 | run: make test-e2e
22 |
23 | - name: Cleanup
24 | run: make clean
25 | if: always()
26 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 |
3 | on:
4 | pull_request: {}
5 |
6 | jobs:
7 | go:
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/checkout@v3
11 |
12 | - uses: actions/setup-go@v4
13 | with:
14 | go-version-file: 'go.mod'
15 |
16 | - name: Run linters
17 | run: make lint-go git-diff
18 |
19 | - name: golangci-lint
20 | uses: golangci/golangci-lint-action@v3
21 | with:
22 | version: latest
23 | skip-pkg-cache: true
24 | args: --timeout 5m --out-${NO_FUTURE}format colored-line-number
25 |
26 | ui:
27 | runs-on: ubuntu-latest
28 | steps:
29 | - uses: actions/checkout@v3
30 |
31 | - uses: actions/setup-node@v3
32 | with:
33 | node-version: 16
34 | cache: npm
35 | cache-dependency-path: ui/package-lock.json
36 |
37 | - name: Run linters
38 | run: make lint-ui git-diff
39 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release
2 |
3 | on:
4 | push:
5 | tags:
6 | - "v*"
7 |
8 | env:
9 | CONTAINER_REGISTRY: ghcr.io
10 |
11 | jobs:
12 | dist:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 | with:
17 | fetch-depth: 0
18 |
19 | - name: Set up QEMU
20 | uses: docker/setup-qemu-action@v2
21 |
22 | - name: Set up Docker Buildx
23 | uses: docker/setup-buildx-action@v2
24 |
25 | - uses: actions/setup-go@v4
26 | with:
27 | go-version-file: 'go.mod'
28 |
29 | - uses: actions/setup-node@v3
30 | with:
31 | node-version: 16
32 | cache: npm
33 | cache-dependency-path: ui/package-lock.json
34 |
35 | - name: Import GPG signing key
36 | uses: crazy-max/ghaction-import-gpg@v5
37 | with:
38 | gpg_private_key: ${{ secrets.SIGNING_KEY }}
39 |
40 | - name: Login to ${{ env.CONTAINER_REGISTRY }}
41 | uses: docker/login-action@v2
42 | with:
43 | registry: ${{ env.CONTAINER_REGISTRY }}
44 | username: ${{ github.repository_owner }}
45 | password: ${{ secrets.GITHUB_TOKEN }}
46 |
47 | - name: Generate artifacts
48 | run: make release-prepare
49 |
50 | - name: Build changelog from PRs with labels
51 | id: build_changelog
52 | uses: mikepenz/release-changelog-builder-action@v3
53 | with:
54 | configuration: ".github/changelog-configuration.json"
55 | outputFile: .github/release-notes.md
56 | # PreReleases still get a changelog, but the next full release gets a diff since the last full release,
57 | # combining possible changelogs of all previous PreReleases in between.
58 | # PreReleases show a partial changelog since last PreRelease.
59 | ignorePreReleases: "${{ !contains(github.ref, '-rc') }}"
60 | env:
61 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
62 |
63 | - name: Publish releases
64 | uses: goreleaser/goreleaser-action@v4
65 | with:
66 | args: release --release-notes .github/release-notes.md
67 | env:
68 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
69 | IMAGE_NAME: ${{ github.repository }}
70 |
--------------------------------------------------------------------------------
/.github/workflows/test.yml:
--------------------------------------------------------------------------------
1 | name: Test
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - master
7 | paths-ignore:
8 | - charts/**
9 | - docs/**
10 |
11 | jobs:
12 | go:
13 | runs-on: ubuntu-latest
14 | steps:
15 | - uses: actions/checkout@v3
16 |
17 | - uses: actions/setup-go@v4
18 | with:
19 | go-version-file: 'go.mod'
20 |
21 | - name: Run tests
22 | run: make test-integration
23 |
24 | cypress:
25 | runs-on: ubuntu-latest
26 | steps:
27 | - uses: actions/checkout@v3
28 |
29 | - uses: actions/setup-node@v3
30 | with:
31 | node-version: 16
32 | cache: npm
33 | cache-dependency-path: ui/package-lock.json
34 |
35 | - name: Run tests
36 | uses: cypress-io/github-action@v4
37 | with:
38 | component: true
39 | working-directory: ui
40 |
41 | - uses: actions/upload-artifact@v3
42 | if: failure()
43 | with:
44 | name: e2e-videos
45 | path: ui/cypress/videos
46 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Goreleaser
2 | /dist/
3 | /.github/release-notes.md
4 | /.github/crds.yaml
5 | /.github/ui.tar.gz
6 |
7 | # Build
8 | /clustercode
9 | *.out
10 |
11 | # Docs
12 | /.cache/
13 | /.public/
14 | /node_modules/
15 |
16 | # work
17 | /.work/
18 |
19 | # Media
20 | /data/intermediate/**
21 | /data/source/**
22 | /data/target/**
23 |
24 | # Chart
25 | /charts/**/CHANGELOG.md
26 | /.cr-index/
27 | /.cr-release-packages/
28 |
--------------------------------------------------------------------------------
/.goreleaser.yml:
--------------------------------------------------------------------------------
1 | # Make sure to check the documentation at http://goreleaser.com
2 | builds:
3 | - env:
4 | - CGO_ENABLED=0 # this is needed otherwise the Docker image build is faulty
5 | goarch:
6 | - amd64
7 | - arm64
8 | goos:
9 | - linux
10 | goarm:
11 | - 8
12 | flags:
13 | - -tags=ui
14 |
15 | archives:
16 | - format: binary
17 | name_template: "{{ .Binary }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
18 |
19 | checksum:
20 | name_template: checksums.txt
21 | extra_files:
22 | - glob: ./.github/crds.yaml
23 | - glob: ./.github/ui.tar.gz
24 |
25 | snapshot:
26 | name_template: "{{ .Tag }}-snapshot"
27 |
28 | signs:
29 | - artifacts: checksum
30 | args: ["-u", "3D25B4013DDE195CB2DF2C6569E37C3C7822E698", "--output", "${signature}", "--detach-sign", "${artifact}"]
31 |
32 | nfpms:
33 | - vendor: ccremer
34 | homepage: https://github.com/ccremer/clustercode
35 | maintainer: ccremer
36 | description: Clustercode converts media files in Kubernetes with ffmpeg
37 | license: MIT
38 | file_name_template: "{{ .Binary }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}"
39 | formats:
40 | - deb
41 | - rpm
42 |
43 | dockers:
44 | - goarch: amd64
45 | use: buildx
46 | build_flag_templates:
47 | - "--platform=linux/amd64"
48 | image_templates:
49 | - "{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:v{{ .Version }}-amd64"
50 |
51 | - goarch: arm64
52 | use: buildx
53 | build_flag_templates:
54 | - "--platform=linux/arm64/v8"
55 | image_templates:
56 | - "{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:v{{ .Version }}-arm64"
57 |
58 | docker_manifests:
59 | ## ghcr.io
60 | # For prereleases, updating `latest` does not make sense.
61 | # Only the image for the exact version should be pushed.
62 | - name_template: "{{ if not .Prerelease }}{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:latest{{ end }}"
63 | image_templates:
64 | - "{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:v{{ .Version }}-amd64"
65 | - "{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:v{{ .Version }}-arm64"
66 |
67 | - name_template: "{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:v{{ .Version }}"
68 | image_templates:
69 | - "{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:v{{ .Version }}-amd64"
70 | - "{{ .Env.CONTAINER_REGISTRY }}/{{ .Env.IMAGE_NAME }}:v{{ .Version }}-arm64"
71 |
72 | release:
73 | prerelease: auto
74 | extra_files:
75 | - glob: ./.github/crds.yaml
76 | - glob: ./.github/ui.tar.gz
77 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/alpine:3.17 as runtime
2 |
3 | ENTRYPOINT ["clustercode"]
4 |
5 | RUN \
6 | apk add --update --no-cache \
7 | bash \
8 | curl \
9 | ca-certificates \
10 | tzdata
11 |
12 | COPY clustercode /usr/bin/
13 | USER 65536:0
14 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Chris
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Set Shell to bash, otherwise some targets fail with dash/zsh etc.
2 | SHELL := /bin/bash
3 | .SHELLFLAGS := -eu -o pipefail -c
4 |
5 | # Disable built-in rules
6 | MAKEFLAGS += --no-builtin-rules
7 | MAKEFLAGS += --no-builtin-variables
8 | .SUFFIXES:
9 | .SECONDARY:
10 | .DEFAULT_GOAL := help
11 |
12 | # extensible array of targets. Modules can add target to this variable for the all-in-one target.
13 | clean_targets := build-clean release-clean
14 | test_targets := test-unit
15 |
16 | # General variables
17 | include Makefile.vars.mk
18 |
19 | # Following includes do not print warnings or error if files aren't found
20 | # Optional Documentation module.
21 | -include docs/docs.mk
22 | # Optional kind module
23 | -include kind/kind.mk
24 | # Optional Helm chart module
25 | -include charts/charts.mk
26 | # Local Env & testing
27 | include test/integration.mk test/e2e.mk test/media.mk
28 | # UI
29 | include ui/ui.mk
30 |
31 | .PHONY: help
32 | help: ## Show this help
33 | @grep -E -h '\s##\s' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}'
34 |
35 | .PHONY: build
36 | build: build-ui build-docker ## All-in-one build
37 |
38 | .PHONY: build-bin
39 | build-bin: export CGO_ENABLED = 0
40 | build-bin: fmt vet ## Build binary
41 | @go build $(go_build_args) -o $(BIN_FILENAME) .
42 |
43 | .PHONY: build-docker
44 | build-docker: build-bin ## Build docker image
45 | $(DOCKER_CMD) build -t $(CONTAINER_IMG) .
46 |
47 | build-clean: ## Deletes binary and docker image
48 | rm -rf $(BIN_FILENAME) dist/ cover.out
49 | $(DOCKER_CMD) rmi $(CONTAINER_IMG) || true
50 |
51 | .PHONY: test
52 | test: $(test_targets) ## All-in-one test
53 |
54 | .PHONY: test-unit
55 | test-unit: ## Run unit tests against code
56 | go test -race -coverprofile cover.out -covermode atomic ./...
57 |
58 | .PHONY: fmt
59 | fmt: ## Run 'go fmt' against code
60 | go fmt ./...
61 |
62 | .PHONY: vet
63 | vet: ## Run 'go vet' against code
64 | go vet ./...
65 |
66 | .PHONY: lint
67 | lint: lint-go lint-ui git-diff ## All-in-one linting
68 |
69 | .PHONY: lint-go
70 | lint-go: fmt vet generate ## Run linting for Go code
71 |
72 | .PHONY: git-diff
73 | git-diff:
74 | @echo 'Check for uncommitted changes ...'
75 | git diff --exit-code
76 |
77 | .PHONY: generate
78 | generate: generate-go generate-docs ## All-in-one code generation
79 |
80 | .PHONY: generate-go
81 | generate-go: ## Generate Go artifacts
82 | @go generate ./...
83 |
84 | .PHONY: generate-docs
85 | generate-docs: generate-go ## Generate example code snippets for documentation
86 |
87 | .PHONY: install-crd
88 | install-crd: export KUBECONFIG = $(KIND_KUBECONFIG)
89 | install-crd: generate kind-setup ## Install CRDs into cluster
90 | kubectl apply -f package/crds
91 |
92 | .PHONY: install-samples
93 | install-samples: export KUBECONFIG = $(KIND_KUBECONFIG)
94 | install-samples: kind-setup ## Install samples into cluster
95 | yq ./samples/*.yaml | kubectl apply -f -
96 |
97 | .PHONY: delete-samples
98 | delete-samples: export KUBECONFIG = $(KIND_KUBECONFIG)
99 | delete-samples: kind-setup
100 | yq ./samples/*.yaml | kubectl delete --ignore-not-found -f -
101 |
102 | .PHONY: run-operator
103 | run-operator: ## Run in Operator mode against your current kube context
104 | go run . -v 1 operator
105 |
106 | .PHONY: run-webui
107 | run-webui: ## Run in webui mode on localhost:8080
108 | go run . -v 1 webui
109 |
110 | .PHONY: run
111 | run: ## Run webui and vite in local mode
112 | $(MAKE) -j 2 run-webui run-ui
113 |
114 | .PHONY: release-prepare
115 | release-prepare: .github/crds.yaml .github/ui.tar.gz ## Prepares artifacts for releases
116 |
117 | .PHONY: release-clean
118 | release-clean:
119 | rm -rf .github/ui.tar.gz .github/crds.yaml
120 |
121 | .github/crds.yaml: generate-go
122 | @cat package/crds/*.yaml | yq > .github/crds.yaml
123 |
124 | .github/ui.tar.gz: build-ui
125 | @tar -czf .github/ui.tar.gz ui/dist
126 |
127 | .PHONY: clean
128 | clean: $(clean_targets) ## All-in-one target to cleanup local artifacts
129 |
--------------------------------------------------------------------------------
/Makefile.vars.mk:
--------------------------------------------------------------------------------
1 | ## These are some common variables for Make
2 |
3 | PROJECT_ROOT_DIR = .
4 | PROJECT_NAME ?= clustercode
5 | PROJECT_OWNER ?= ccremer
6 |
7 | WORK_DIR = $(PWD)/.work
8 |
9 | ## BUILD:go
10 | BIN_FILENAME ?= $(PROJECT_NAME)
11 | go_bin ?= $(WORK_DIR)/bin
12 | $(go_bin):
13 | @mkdir -p $@
14 |
15 | ## BUILD:docker
16 | DOCKER_CMD ?= docker
17 |
18 | IMG_TAG ?= latest
19 | CONTAINER_REGISTRY ?= ghcr.io
20 | # Image URL to use all building/pushing image targets
21 | CONTAINER_IMG ?= $(CONTAINER_REGISTRY)/$(PROJECT_OWNER)/$(PROJECT_NAME):$(IMG_TAG)
22 |
23 | ## KIND:setup
24 |
25 | # https://hub.docker.com/r/kindest/node/tags
26 | KIND_NODE_VERSION ?= v1.24.0
27 | KIND_IMAGE ?= docker.io/kindest/node:$(KIND_NODE_VERSION)
28 | KIND_KUBECONFIG ?= $(kind_dir)/kind-kubeconfig
29 | KIND_CLUSTER ?= $(PROJECT_NAME)
30 |
31 | # TEST:integration
32 | ENVTEST_ADDITIONAL_FLAGS ?= --bin-dir "$(go_bin)"
33 | # See https://storage.googleapis.com/kubebuilder-tools/ for list of supported K8s versions
34 | ENVTEST_K8S_VERSION = 1.24.x
35 | INTEGRATION_TEST_DEBUG_OUTPUT ?= false
36 |
37 | ## MEDIA:
38 | FFMPEG_IMG ?= ghcr.io/jrottenberg/ffmpeg:5.0-alpine
39 |
--------------------------------------------------------------------------------
/README.adoc:
--------------------------------------------------------------------------------
1 | ifndef::env-github[:icons: font]
2 | ifdef::env-github[]
3 | :tip-caption: :bulb:
4 | :note-caption: :information_source:
5 | :important-caption: :heavy_exclamation_mark:
6 | :caution-caption: :fire:
7 | :warning-caption: :warning:
8 | :ext-relative: {outfilesuffix}
9 | endif::[]
10 |
11 | = clustercode
12 |
13 | image:https://img.shields.io/github/workflow/status/ccremer/clustercode/Test[Test,link=https://github.com/ccremer/clustercode/actions?query=workflow%3ATest]
14 | image:https://img.shields.io/github/go-mod/go-version/ccremer/clustercode[Go version]
15 | image:https://img.shields.io/github/v/release/ccremer/clustercode?include_prereleases[Version,link=https://github.com/ccremer/clustercode/releases]
16 |
17 | Automatically convert your movies and TV shows from one file format to another using ffmpeg in a cluster.
18 | It's like an Ffmpeg operator!
19 |
20 | image::docs/modules/ROOT/assets/images/clustercode-overview.drawio.svg[]
21 |
22 | == Features
23 |
24 | * Scans and encodes video files from a directory and encodes them using customizable blueprints.
25 | * Encoded files are stored in an output directory.
26 | * Schedule Scans for new files with Cron.
27 | * Take advantage of having multiple computers: Each Pod encodes a segment, enabling parallelization.
28 | * Works on single nodes too, but you might not get any speed benefits (in fact it's generating overhead).
29 |
30 | == Documentation
31 |
32 | See https://ccremer.github.io/clustercode-docs[ccremer.github.io/clustercode-docs]
33 |
34 | == Installation
35 |
36 | NOTE: Helm is coming.
37 |
38 | === Supported storage types
39 |
40 | All file-writable ReadWriteMany volumes available in Kubernetes PersistentVolumeClaims.
41 |
42 | == Project status
43 |
44 | Clustercode 2.0 is released **as a Proof-of-concept** and no new changes will be made to old https://github.com/ccremer/clustercode/tree/1.3.1[1.3 release].
45 |
46 | The code is ugly, documentation inexistent and only the Happy Path works.
47 | But feel free to try "early access" and report stuff.
48 |
49 | == Image Tags
50 |
51 | * `latest`: Floating image tag that points to the latest stable release.
52 | * `vx.y.z`: tags following the x.y.z pattern are specific releases following the SemVer scheme.
53 |
--------------------------------------------------------------------------------
/charts/charts.mk:
--------------------------------------------------------------------------------
1 | helm_docs_bin := $(go_bin)/helm-docs
2 |
3 | clean_targets += chart-clean
4 |
5 | # Prepare binary
6 | $(helm_docs_bin): export GOBIN = $(go_bin)
7 | $(helm_docs_bin): | $(go_bin)
8 | go install github.com/norwoodj/helm-docs/cmd/helm-docs@latest
9 |
10 | .PHONY: chart-generate
11 | chart-generate: .github/crds.yaml ## Prepare the Helm charts
12 | @find charts -type f -name Makefile | sed 's|/[^/]*$$||' | xargs -I '%' $(MAKE) -C '%' prepare
13 |
14 | .PHONY: chart-docs
15 | chart-docs: $(helm_docs_bin) ## Creates the Chart READMEs from template and values.yaml files
16 | @$(helm_docs_bin) \
17 | --template-files ./.github/helm-docs-header.gotmpl.md \
18 | --template-files README.gotmpl.md \
19 | --template-files ./.github/helm-docs-footer.gotmpl.md
20 |
21 | .PHONY: chart-lint
22 | chart-lint: chart-generate chart-docs ## Lint charts
23 | @echo 'Check for uncommitted changes ...'
24 | git diff --exit-code
25 |
26 | .PHONY: chart-clean
27 | chart-clean: ## Clean the Helm chart artifacts
28 | rm -rf $(helm_docs_bin) .cr-index .cr-release-packages charts/*/CHANGELOG.md
29 |
30 | .PHONY: chart-release
31 | chart-release: | .cr-index ## Release the Helm chart to GitHub
32 | # CHART_NAME is given by GH action
33 | # Download 'cr' manually from https://github.com/helm/chart-releaser/releases, 'go install' doesn't work...
34 | cr package charts/$(CHART_NAME)
35 | cr upload "--release-name-template=chart/{{ .Name }}-{{ .Version }}" --release-notes-file=CHANGELOG.md
36 | cr index "--release-name-template=chart/{{ .Name }}-{{ .Version }}" --push
37 |
38 | .cr-index:
39 | mkdir -p $@
40 |
--------------------------------------------------------------------------------
/charts/clustercode/.helmignore:
--------------------------------------------------------------------------------
1 | # Patterns to ignore when building packages.
2 | # This supports shell glob matching, relative path matching, and
3 | # negation (prefixed with !). Only one pattern per line.
4 | .DS_Store
5 | # Common VCS dirs
6 | .git/
7 | .gitignore
8 | .bzr/
9 | .bzrignore
10 | .hg/
11 | .hgignore
12 | .svn/
13 | # Common backup files
14 | *.swp
15 | *.bak
16 | *.tmp
17 | *.orig
18 | *~
19 | # Various IDEs
20 | .project
21 | .idea/
22 | *.tmproj
23 | .vscode/
24 |
25 | Makefile
26 | *gotmpl*
27 | .helmignore
28 |
29 | # Unit tests
30 | /test
31 |
--------------------------------------------------------------------------------
/charts/clustercode/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: clustercode
3 | description: Movie and Series conversion Operator with Ffmpeg
4 |
5 | # A chart can be either an 'application' or a 'library' chart.
6 | #
7 | # Application charts are a collection of templates that can be packaged into versioned archives
8 | # to be deployed.
9 | #
10 | # Library charts provide useful utilities or functions for the chart developer. They're included as
11 | # a dependency of application charts to inject those utilities and functions into the rendering
12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed.
13 | type: application
14 |
15 | # This is the chart version. This version number should be incremented each time you make changes
16 | # to the chart and its templates, including the app version.
17 | # Versions are expected to follow Semantic Versioning (https://semver.org/)
18 | version: 0.3.1
19 |
20 | # This is the version number of the application being deployed. This version number should be
21 | # incremented each time you make changes to the application. Versions are not expected to
22 | # follow Semantic Versioning. They should reflect the version the application is using.
23 | #appVersion: latest
24 |
25 | sources:
26 | - https://github.com/ccremer/clustercode
27 |
28 | home: https://ccremer.github.io/clustercode
29 |
--------------------------------------------------------------------------------
/charts/clustercode/Makefile:
--------------------------------------------------------------------------------
1 | MAKEFLAGS += --warn-undefined-variables
2 | SHELL := bash
3 | .SHELLFLAGS := -eu -o pipefail -c
4 | .DEFAULT_GOAL := help
5 | .DELETE_ON_ERROR:
6 | .SUFFIXES:
7 |
8 | webhook_gen_src = ../../package/webhook/manifests.yaml
9 | webhook_gen_tgt = templates/webhook/webhook-config.yaml
10 |
11 | rbac_gen_src = ../../package/rbac/role.yaml
12 | rbac_gen_tgt = templates/operator/clusterrole.yaml
13 |
14 | ifeq ($(shell uname -s),Darwin)
15 | sed := gsed -i
16 | else
17 | sed := sed -i
18 | endif
19 |
20 | .PHONY: $(webhook_gen_tgt)
21 | $(webhook_gen_tgt):
22 | @cp $(webhook_gen_src) $@
23 | @yq -i e '.metadata.name="{{ include \"clustercode.fullname\" . }}", del(.metadata.creationTimestamp)' $@
24 | @yq -i e '.metadata.labels.replace="LABELS"' $@
25 | @yq -i e '.metadata.annotations.replace="ANNOTATIONS"' $@
26 | @yq -i e '.webhooks[0].clientConfig.caBundle="{{ . }}"' $@
27 | @yq -i e '.webhooks[0].clientConfig.service.name="{{ include \"clustercode.fullname\" . }}-webhook"' $@
28 | @yq -i e '.webhooks[0].clientConfig.service.namespace="{{ .Release.Namespace }}"' $@
29 | @$(sed) -e '1s/^/{{- if .Values.webhook.enabled -}}\n/' $@
30 | @$(sed) -e '/^ caBundle:.*/i {{- with .Values.webhook.caBundle }}' $@
31 | @$(sed) -e '/^ caBundle:.*/a {{- end }}' $@
32 | @$(sed) -e 's/replace: LABELS/{{- include "clustercode.labels" . | nindent 4 }}/g' $@
33 | @$(sed) -e 's/replace: ANNOTATIONS/{{- toYaml .Values.webhook.annotations | nindent 4 }}/g' $@
34 | @echo "{{- end -}}" >> $@
35 |
36 | .PHONY: $(rbac_gen_tgt)
37 | $(rbac_gen_tgt):
38 | @cp $(rbac_gen_src) $@
39 | @yq -i e '.metadata.name="{{ include \"clustercode.fullname\" . }}-operator", del(.metadata.creationTimestamp)' $@
40 | @yq -i e '.metadata.labels.replace="LABELS"' $@
41 | @$(sed) -e '1s/^/{{- if .Values.operator.enabled -}}\n/' $@
42 | @$(sed) -e 's/replace: LABELS/{{- include "clustercode.labels" . | nindent 4 }}/g' $@
43 | @echo "{{- end -}}" >> $@
44 |
45 | #
46 | # "Interface" for parent Makefile
47 | #
48 | .PHONY: prepare
49 | prepare: $(rbac_gen_tgt) $(webhook_gen_tgt) ## Helmify generated artifacts
50 |
--------------------------------------------------------------------------------
/charts/clustercode/README.gotmpl.md:
--------------------------------------------------------------------------------
1 |
6 |
7 | ## Installation
8 |
9 | Install the CRDs:
10 | ```bash
11 | kubectl apply -f https://github.com/ccremer/clustercode/releases/download/{{ template "chart.name" . }}-{{ template "chart.version" . }}/crds.yaml
12 | ```
13 |
14 | To prepare the webhook server, you need `yq`, `openssl`, `base64` tools and run this:
15 | ```bash
16 | webhook_service_name=clustercode-webhook.clustercode-system.svc # Change this!
17 |
18 | openssl req -x509 -newkey rsa:4096 -nodes -keyout tls.key --noout -days 3650 -subj "/CN=${webhook_service_name}" -addext "subjectAltName = DNS:${webhook_service_name}"
19 | openssl req -x509 -key tls.key -nodes -out tls.crt -days 3650 -subj "/CN=${webhook_service_name}" -addext "subjectAltName = DNS:${webhook_service_name}"
20 |
21 | yq -n '.webhook.caBundle="$(base64 -w0 tls.crt)" | .webhook.certificate="$(base64 -w0 tls.crd)" | .webhook.privateKey="$(base64 -w0 tls.key)"' > webhook-values.yaml
22 | ```
23 |
24 | Install the chart:
25 | ```bash
26 | helm repo add clustercode https://ccremer.github.io/clustercode
27 | helm install {{ template "chart.name" . }} clustercode/{{ template "chart.name" . }} \
28 | --create-namespace \
29 | --namespace clustercode-system \
30 | --values webhook-values.yaml
31 | ```
32 | (Note that the name and namespace must match the certificate you created in the step before.)
33 |
34 | ### WebUI
35 |
36 | By default, the WebUI is also installed.
37 | To log into the frontend, you must provide Kubernetes tokens in the login form as the frontend talks directly to the Kubernetes API.
38 |
39 | To get a token, you can create Service Accounts with the `webui.users` parameter.
40 | Once deployed, get the token by the following command:
41 |
42 | ```bash
43 | kubectl -n clustercode-system get secret clustercode-webadmin -o jsonpath='{.data.token}' | base64 -d
44 | ```
45 |
46 | Alternatively, set `.skipSecret` in `webui.users[*]` to skip creating a Secret for the Service Account.
47 | To get a time-limited token without permanent Secret, you can generate one with kubectl:
48 |
49 | ```bash
50 | kubectl -n clustercode-system create token clustercode-webadmin
51 | ```
52 |
53 | ## Handling CRDs
54 |
55 | * Always upgrade the CRDs before upgrading the Helm release.
56 | * Watch out for breaking changes in the {{ title .Name }} release notes.
57 |
58 | {{ template "chart.sourcesSection" . }}
59 |
60 | {{ template "chart.requirementsSection" . }}
61 |
66 | {{ template "chart.valuesSection" . }}
67 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/NOTES.txt:
--------------------------------------------------------------------------------
1 | #####################
2 | ! Attention !
3 | #####################
4 |
5 | This Helm chart does not include CRDs.
6 | Please make sure you have installed or upgraded the necessary CRDs as instructed in the Chart README.
7 |
8 | #####################
9 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/_helpers.tpl:
--------------------------------------------------------------------------------
1 | {{/*
2 | Expand the name of the chart.
3 | */}}
4 | {{- define "clustercode.name" -}}
5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
6 | {{- end }}
7 |
8 | {{/*
9 | Create a default fully qualified app name.
10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
11 | If release name contains chart name it will be used as a full name.
12 | */}}
13 | {{- define "clustercode.fullname" -}}
14 | {{- if .Values.fullnameOverride }}
15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
16 | {{- else }}
17 | {{- $name := default .Chart.Name .Values.nameOverride }}
18 | {{- if contains $name .Release.Name }}
19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }}
20 | {{- else }}
21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 |
26 | {{/*
27 | Create chart name and version as used by the chart label.
28 | */}}
29 | {{- define "clustercode.chart" -}}
30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
31 | {{- end }}
32 |
33 | {{/*
34 | Common labels
35 | */}}
36 | {{- define "clustercode.labels" -}}
37 | helm.sh/chart: {{ include "clustercode.chart" . }}
38 | {{ include "clustercode.selectorLabels" . }}
39 | {{- if .Chart.AppVersion }}
40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
41 | {{- end }}
42 | app.kubernetes.io/managed-by: {{ .Release.Service }}
43 | {{- end }}
44 |
45 | {{/*
46 | Selector labels
47 | */}}
48 | {{- define "clustercode.selectorLabels" -}}
49 | app.kubernetes.io/name: {{ include "clustercode.name" . }}
50 | app.kubernetes.io/instance: {{ .Release.Name }}
51 | {{- end }}
52 |
53 | {{/*
54 | Static labels
55 | */}}
56 | {{- define "clustercode.staticLabels" -}}
57 | {{ include "clustercode.selectorLabels" . }}
58 | app.kubernetes.io/managed-by: {{ .Release.Service }}
59 | {{- end }}
60 |
61 | {{/*
62 | Create the name of the service account to use
63 | */}}
64 | {{- define "clustercode.serviceAccountName" -}}
65 | {{- if .Values.serviceAccount.create }}
66 | {{- default (include "clustercode.fullname" .) .Values.serviceAccount.name }}
67 | {{- else }}
68 | {{- default "default" .Values.serviceAccount.name }}
69 | {{- end }}
70 | {{- end }}
71 |
72 | {{/*
73 | Container images
74 | */}}
75 | {{- define "clustercode.containerImage" -}}
76 | {{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag }}
77 | {{- end }}
78 | {{- define "clustercode.ffmpegImage" -}}
79 | {{ .Values.clustercode.ffmpegImage.registry }}/{{ .Values.clustercode.ffmpegImage.repository }}:{{ .Values.clustercode.ffmpegImage.tag }}
80 | {{- end }}
81 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/operator/clusterrole.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.operator.enabled -}}
2 | ---
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: ClusterRole
5 | metadata:
6 | name: '{{ include "clustercode.fullname" . }}-operator'
7 | labels:
8 | {{- include "clustercode.labels" . | nindent 4 }}
9 | rules:
10 | - apiGroups:
11 | - batch
12 | resources:
13 | - cronjobs
14 | verbs:
15 | - create
16 | - delete
17 | - get
18 | - list
19 | - patch
20 | - update
21 | - watch
22 | - apiGroups:
23 | - batch
24 | resources:
25 | - cronjobs/status
26 | verbs:
27 | - get
28 | - patch
29 | - update
30 | - apiGroups:
31 | - batch
32 | resources:
33 | - jobs
34 | verbs:
35 | - create
36 | - delete
37 | - deletecollection
38 | - get
39 | - list
40 | - patch
41 | - update
42 | - watch
43 | - apiGroups:
44 | - clustercode.github.io
45 | resources:
46 | - blueprints
47 | verbs:
48 | - create
49 | - delete
50 | - get
51 | - list
52 | - patch
53 | - update
54 | - watch
55 | - apiGroups:
56 | - clustercode.github.io
57 | resources:
58 | - blueprints/finalizers
59 | - blueprints/status
60 | verbs:
61 | - get
62 | - patch
63 | - update
64 | - apiGroups:
65 | - clustercode.github.io
66 | resources:
67 | - tasks
68 | verbs:
69 | - create
70 | - delete
71 | - get
72 | - list
73 | - patch
74 | - update
75 | - watch
76 | - apiGroups:
77 | - clustercode.github.io
78 | resources:
79 | - tasks/finalizers
80 | - tasks/status
81 | verbs:
82 | - get
83 | - patch
84 | - update
85 | - apiGroups:
86 | - coordination.k8s.io
87 | resources:
88 | - leases
89 | verbs:
90 | - create
91 | - get
92 | - list
93 | - update
94 | - apiGroups:
95 | - ""
96 | resources:
97 | - configmaps
98 | verbs:
99 | - create
100 | - delete
101 | - get
102 | - list
103 | - patch
104 | - update
105 | - watch
106 | - apiGroups:
107 | - ""
108 | resources:
109 | - serviceaccounts
110 | verbs:
111 | - create
112 | - delete
113 | - get
114 | - list
115 | - watch
116 | - apiGroups:
117 | - rbac.authorization.k8s.io
118 | resources:
119 | - rolebindings
120 | - roles
121 | verbs:
122 | - create
123 | - delete
124 | - get
125 | - list
126 | - watch
127 | {{- end -}}
128 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/operator/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.operator.enabled .Values.operator.rbac.create -}}
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRoleBinding
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | roleRef:
9 | apiGroup: rbac.authorization.k8s.io
10 | kind: ClusterRole
11 | name: {{ include "clustercode.fullname" . }}-operator
12 | subjects:
13 | - kind: ServiceAccount
14 | name: {{ include "clustercode.serviceAccountName" . }}
15 | namespace: {{ .Release.Namespace }}
16 | {{- end }}
17 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/operator/deployment.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.operator.enabled }}
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}-operator
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | spec:
9 | replicas: {{ .Values.operator.replicaCount }}
10 | selector:
11 | matchLabels:
12 | {{- include "clustercode.selectorLabels" . | nindent 6 }}
13 | app.kubernetes.io/component: operator
14 | template:
15 | metadata:
16 | {{- with .Values.podAnnotations }}
17 | annotations:
18 | {{- toYaml . | nindent 8 }}
19 | {{- end }}
20 | labels:
21 | {{- include "clustercode.selectorLabels" . | nindent 8 }}
22 | app.kubernetes.io/component: operator
23 | spec:
24 | {{- with .Values.imagePullSecrets }}
25 | imagePullSecrets:
26 | {{- toYaml . | nindent 8 }}
27 | {{- end }}
28 | serviceAccountName: {{ include "clustercode.serviceAccountName" . }}
29 | securityContext:
30 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
31 | containers:
32 | - name: operator
33 | securityContext:
34 | {{- toYaml .Values.securityContext | nindent 12 }}
35 | image: {{ include "clustercode.containerImage" . | quote }}
36 | imagePullPolicy: {{ .Values.image.pullPolicy }}
37 | ports:
38 | - name: http
39 | containerPort: 8080
40 | protocol: TCP
41 | {{- with .Values.clustercode.env }}
42 | env:
43 | {{- toYaml . | nindent 12 }}
44 | {{- end }}
45 | args:
46 | - operator
47 | - --clustercode-image={{ include "clustercode.containerImage" . }}
48 | - --ffmpeg-image={{ include "clustercode.ffmpegImage" . }}
49 | livenessProbe:
50 | httpGet:
51 | path: /metrics
52 | port: http
53 | readinessProbe:
54 | httpGet:
55 | path: /metrics
56 | port: http
57 | resources:
58 | {{- toYaml .Values.operator.resources | nindent 12 }}
59 | {{- with .Values.nodeSelector }}
60 | nodeSelector:
61 | {{- toYaml . | nindent 8 }}
62 | {{- end }}
63 | {{- with .Values.affinity }}
64 | affinity:
65 | {{- toYaml . | nindent 8 }}
66 | {{- end }}
67 | {{- with .Values.tolerations }}
68 | tolerations:
69 | {{- toYaml . | nindent 8 }}
70 | {{- end }}
71 | {{- end }}
72 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/operator/user-clusterrole.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.operator.enabled .Values.operator.rbac.create -}}
2 | ---
3 | apiVersion: rbac.authorization.k8s.io/v1
4 | kind: ClusterRole
5 | metadata:
6 | name: '{{ include "clustercode.fullname" . }}-view'
7 | labels:
8 | {{- include "clustercode.labels" . | nindent 4 }}
9 | rbac.authorization.k8s.io/aggregate-to-view: "true"
10 | rules:
11 | - apiGroups:
12 | - clustercode.github.io
13 | resources:
14 | - blueprints
15 | - tasks
16 | verbs:
17 | - get
18 | - list
19 | - watch
20 | - apiGroups:
21 | - clustercode.github.io
22 | resources:
23 | - blueprints/finalizers
24 | - blueprints/status
25 | - tasks/finalizers
26 | - tasks/status
27 | verbs:
28 | - get
29 | ---
30 | apiVersion: rbac.authorization.k8s.io/v1
31 | kind: ClusterRole
32 | metadata:
33 | name: '{{ include "clustercode.fullname" . }}-edit'
34 | labels:
35 | {{- include "clustercode.labels" . | nindent 4 }}
36 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
37 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
38 | rules:
39 | - apiGroups:
40 | - clustercode.github.io
41 | resources:
42 | - blueprints
43 | - tasks
44 | verbs:
45 | - create
46 | - delete
47 | - get
48 | - list
49 | - patch
50 | - update
51 | - watch
52 | - apiGroups:
53 | - clustercode.github.io
54 | resources:
55 | - blueprints/finalizers
56 | - blueprints/status
57 | - tasks/finalizers
58 | - tasks/status
59 | verbs:
60 | - get
61 | - patch
62 | - update
63 | - apiGroups:
64 | - ""
65 | resources:
66 | - configmaps
67 | verbs:
68 | - get
69 | - list
70 | - watch
71 | - create
72 | - update
73 | {{- end }}
74 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/serviceaccount.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.serviceAccount.create -}}
2 | apiVersion: v1
3 | kind: ServiceAccount
4 | metadata:
5 | name: {{ include "clustercode.serviceAccountName" . }}
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | {{- with .Values.serviceAccount.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | {{- end }}
13 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webhook/deployment.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.webhook.enabled }}
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}-webhook
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | spec:
9 | replicas: {{ .Values.webhook.replicaCount }}
10 | selector:
11 | matchLabels:
12 | {{- include "clustercode.selectorLabels" . | nindent 6 }}
13 | app.kubernetes.io/component: webhook
14 | template:
15 | metadata:
16 | {{- with .Values.podAnnotations }}
17 | annotations:
18 | {{- toYaml . | nindent 8 }}
19 | {{- end }}
20 | labels:
21 | {{- include "clustercode.selectorLabels" . | nindent 8 }}
22 | app.kubernetes.io/component: webhook
23 | spec:
24 | {{- with .Values.imagePullSecrets }}
25 | imagePullSecrets:
26 | {{- toYaml . | nindent 8 }}
27 | {{- end }}
28 | serviceAccountName: {{ include "clustercode.serviceAccountName" . }}
29 | securityContext:
30 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
31 | containers:
32 | - name: webhook
33 | securityContext:
34 | {{- toYaml .Values.securityContext | nindent 12 }}
35 | image: {{ include "clustercode.containerImage" . | quote }}
36 | imagePullPolicy: {{ .Values.image.pullPolicy }}
37 | ports:
38 | - name: webhook
39 | containerPort: 9443
40 | protocol: TCP
41 | {{- with .Values.clustercode.env }}
42 | env:
43 | {{- toYaml . | nindent 12 }}
44 | {{- end }}
45 | args:
46 | - webhook
47 | - --webhook-tls-cert-dir=/webhook/tls
48 | livenessProbe:
49 | httpGet:
50 | path: /healthz
51 | port: http
52 | volumeMounts:
53 | - name: webhook-tls
54 | readOnly: true
55 | mountPath: /webhook/tls
56 | volumes:
57 | - name: webhook-tls
58 | secret:
59 | {{- if .Values.webhook.externalSecretName }}
60 | secretName: {{ .Values.webhook.externalSecretName }}
61 | {{- else }}
62 | secretName: {{ include "clustercode.fullname" . }}-webhook-tls
63 | {{- end }}
64 | {{- with .Values.nodeSelector }}
65 | nodeSelector:
66 | {{- toYaml . | nindent 8 }}
67 | {{- end }}
68 | {{- with .Values.affinity }}
69 | affinity:
70 | {{- toYaml . | nindent 8 }}
71 | {{- end }}
72 | {{- with .Values.tolerations }}
73 | tolerations:
74 | {{- toYaml . | nindent 8 }}
75 | {{- end }}
76 | {{- end }}
77 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webhook/secret.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.webhook.enabled (not .Values.webhook.externalSecretName) -}}
2 | apiVersion: v1
3 | kind: Secret
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}-webhook-tls
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | data:
9 | tls.crt: {{ .Values.webhook.certificate }}
10 | tls.key: {{ .Values.webhook.privateKey }}
11 | {{- end -}}
12 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webhook/service.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.webhook.enabled }}
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}-webhook
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | {{- with .Values.webhook.service.annotations }}
9 | annotations:
10 | {{- toYaml . | nindent 4 }}
11 | {{- end }}
12 | spec:
13 | type: ClusterIP
14 | ports:
15 | - port: 443
16 | targetPort: webhook
17 | protocol: TCP
18 | name: webhook
19 | selector:
20 | {{- include "clustercode.selectorLabels" . | nindent 4 }}
21 | app.kubernetes.io/component: webhook
22 | {{- end }}
23 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webhook/webhook-config.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.webhook.enabled -}}
2 | ---
3 | apiVersion: admissionregistration.k8s.io/v1
4 | kind: MutatingWebhookConfiguration
5 | metadata:
6 | name: '{{ include "clustercode.fullname" . }}'
7 | labels:
8 | {{- include "clustercode.labels" . | nindent 4 }}
9 | annotations:
10 | {{- toYaml .Values.webhook.annotations | nindent 4 }}
11 | webhooks:
12 | - admissionReviewVersions:
13 | - v1
14 | clientConfig:
15 | service:
16 | name: '{{ include "clustercode.fullname" . }}-webhook'
17 | namespace: '{{ .Release.Namespace }}'
18 | path: /mutate-clustercode-github-io-v1alpha1-blueprint
19 | {{- with .Values.webhook.caBundle }}
20 | caBundle: '{{ . }}'
21 | {{- end }}
22 | failurePolicy: Fail
23 | name: blueprints.clustercode.github.io
24 | rules:
25 | - apiGroups:
26 | - clustercode.github.io
27 | apiVersions:
28 | - v1alpha1
29 | operations:
30 | - CREATE
31 | - UPDATE
32 | resources:
33 | - blueprints
34 | sideEffects: None
35 | ---
36 | apiVersion: admissionregistration.k8s.io/v1
37 | kind: ValidatingWebhookConfiguration
38 | metadata:
39 | name: '{{ include "clustercode.fullname" . }}'
40 | labels:
41 | {{- include "clustercode.labels" . | nindent 4 }}
42 | annotations:
43 | {{- toYaml .Values.webhook.annotations | nindent 4 }}
44 | webhooks:
45 | - admissionReviewVersions:
46 | - v1
47 | clientConfig:
48 | service:
49 | name: '{{ include "clustercode.fullname" . }}-webhook'
50 | namespace: '{{ .Release.Namespace }}'
51 | path: /validate-clustercode-github-io-v1alpha1-blueprint
52 | {{- with .Values.webhook.caBundle }}
53 | caBundle: '{{ . }}'
54 | {{- end }}
55 | failurePolicy: Fail
56 | name: blueprints.clustercode.github.io
57 | rules:
58 | - apiGroups:
59 | - clustercode.github.io
60 | apiVersions:
61 | - v1alpha1
62 | operations:
63 | - CREATE
64 | - UPDATE
65 | resources:
66 | - blueprints
67 | sideEffects: None
68 | {{- end -}}
69 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webui/api-service.yaml:
--------------------------------------------------------------------------------
1 | {{- if eq .Values.webui.api.mode "externalName" }}
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}-api
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | spec:
9 | type: ExternalName
10 | externalName: {{ .Values.webui.api.externalName }}
11 | ports:
12 | - port: 443
13 | protocol: TCP
14 | {{- end }}
15 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webui/deployment.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.webui.enabled }}
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}-webui
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | spec:
9 | replicas: {{ .Values.webui.replicaCount }}
10 | selector:
11 | matchLabels:
12 | {{- include "clustercode.selectorLabels" . | nindent 6 }}
13 | app.kubernetes.io/component: webui
14 | template:
15 | metadata:
16 | {{- with .Values.podAnnotations }}
17 | annotations:
18 | {{- toYaml . | nindent 8 }}
19 | {{- end }}
20 | labels:
21 | {{- include "clustercode.selectorLabels" . | nindent 8 }}
22 | app.kubernetes.io/component: webui
23 | spec:
24 | {{- with .Values.imagePullSecrets }}
25 | imagePullSecrets:
26 | {{- toYaml . | nindent 8 }}
27 | {{- end }}
28 | serviceAccountName: {{ include "clustercode.serviceAccountName" . }}
29 | securityContext:
30 | {{- toYaml .Values.podSecurityContext | nindent 8 }}
31 | containers:
32 | - name: webui
33 | securityContext:
34 | {{- toYaml .Values.securityContext | nindent 12 }}
35 | image: {{ include "clustercode.containerImage" . | quote }}
36 | imagePullPolicy: {{ .Values.image.pullPolicy }}
37 | ports:
38 | - name: http
39 | containerPort: 8080
40 | protocol: TCP
41 | {{- with .Values.clustercode.env }}
42 | env:
43 | {{- toYaml . | nindent 12 }}
44 | {{- end }}
45 | args:
46 | - webui
47 | {{- if eq .Values.webui.api.mode "proxy" }}
48 | - --api-url={{ .Values.webui.api.proxy.url }}
49 | - --api-tls-skip-verify={{ .Values.webui.api.proxy.skipTlsVerify }}
50 | {{- end }}
51 | livenessProbe:
52 | httpGet:
53 | path: /healthz
54 | port: http
55 | readinessProbe:
56 | httpGet:
57 | path: /healthz
58 | port: http
59 | resources:
60 | {{- toYaml .Values.webui.resources | nindent 12 }}
61 | {{- with .Values.nodeSelector }}
62 | nodeSelector:
63 | {{- toYaml . | nindent 8 }}
64 | {{- end }}
65 | {{- with .Values.affinity }}
66 | affinity:
67 | {{- toYaml . | nindent 8 }}
68 | {{- end }}
69 | {{- with .Values.tolerations }}
70 | tolerations:
71 | {{- toYaml . | nindent 8 }}
72 | {{- end }}
73 | {{- end }}
74 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webui/ingress.yaml:
--------------------------------------------------------------------------------
1 | {{- if and .Values.webui.enabled .Values.webui.ingress.enabled -}}
2 | {{- $fullName := include "clustercode.fullname" . -}}
3 | {{- $svcPort := .Values.webui.service.port -}}
4 | {{- if and .Values.webui.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
5 | {{- if not (hasKey .Values.webui.ingress.annotations "kubernetes.io/ingress.class") }}
6 | {{- $_ := set .Values.webui.ingress.annotations "kubernetes.io/ingress.class" .Values.webui.ingress.className}}
7 | {{- end }}
8 | {{- end }}
9 | {{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
10 | apiVersion: networking.k8s.io/v1
11 | {{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
12 | apiVersion: networking.k8s.io/v1beta1
13 | {{- else -}}
14 | apiVersion: extensions/v1beta1
15 | {{- end }}
16 | kind: Ingress
17 | metadata:
18 | name: {{ $fullName }}
19 | labels:
20 | {{- include "clustercode.labels" . | nindent 4 }}
21 | {{- with .Values.webui.ingress.annotations }}
22 | annotations:
23 | {{- toYaml . | nindent 4 }}
24 | {{- end }}
25 | spec:
26 | {{- if and .Values.webui.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
27 | ingressClassName: {{ .Values.webui.ingress.className }}
28 | {{- end }}
29 | {{- if .Values.webui.ingress.tls }}
30 | tls:
31 | {{- range .Values.webui.ingress.tls }}
32 | - hosts:
33 | {{- range .hosts }}
34 | - {{ . | quote }}
35 | {{- end }}
36 | secretName: {{ .secretName }}
37 | {{- end }}
38 | {{- end }}
39 | rules:
40 | {{- range .Values.webui.ingress.hosts }}
41 | - host: {{ .host | quote }}
42 | http:
43 | paths:
44 | {{- with $.Values.webui.ingress }}
45 | - path: {{ .rootPath }}
46 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
47 | pathType: {{ .pathType }}
48 | {{- end }}
49 | backend:
50 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
51 | service:
52 | name: {{ $fullName }}-webui
53 | port:
54 | number: {{ $svcPort }}
55 | {{- else }}
56 | serviceName: {{ $fullName }}-webui
57 | servicePort: {{ $svcPort }}
58 | {{- end }}
59 | {{- end }}
60 | {{- if eq $.Values.webui.api.mode "externalName" }}
61 | {{- with $.Values.webui.ingress }}
62 | - path: {{ trimSuffix "/" .rootPath }}/api
63 | {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
64 | pathType: {{ .pathType }}
65 | {{- end }}
66 | backend:
67 | {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
68 | service:
69 | name: {{ $fullName }}-api
70 | port:
71 | number: 443
72 | {{- else }}
73 | serviceName: {{ $fullName }}-api
74 | servicePort: 443
75 | {{- end }}
76 | {{- end }}
77 | {{- end }}
78 | {{- end }}
79 | {{- end }}
80 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webui/service.yaml:
--------------------------------------------------------------------------------
1 | {{- if .Values.webui.enabled }}
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: {{ include "clustercode.fullname" . }}-webui
6 | labels:
7 | {{- include "clustercode.labels" . | nindent 4 }}
8 | spec:
9 | type: ClusterIP
10 | ports:
11 | - port: 80
12 | targetPort: http
13 | protocol: TCP
14 | name: webui
15 | selector:
16 | {{- include "clustercode.selectorLabels" . | nindent 4 }}
17 | app.kubernetes.io/component: webui
18 | {{- end }}
19 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webui/user-sa.yaml:
--------------------------------------------------------------------------------
1 | {{- range .Values.webui.users -}}
2 | ---
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: {{ .name }}
7 | namespace: {{ .namespace | default $.Release.Namespace }}
8 | labels:
9 | {{- include "clustercode.labels" $ | nindent 4 }}
10 | {{- if not .skipSecret }}
11 | secrets:
12 | - name: {{ .name }}
13 | {{ end }}
14 | {{- end }}
15 |
--------------------------------------------------------------------------------
/charts/clustercode/templates/webui/user-secret.yaml:
--------------------------------------------------------------------------------
1 | {{- range .Values.webui.users -}}
2 | {{- if not .skipSecret }}
3 | ---
4 | apiVersion: v1
5 | kind: Secret
6 | type: kubernetes.io/service-account-token
7 | metadata:
8 | name: {{ .name }}
9 | namespace: {{ .namespace | default $.Release.Namespace }}
10 | labels:
11 | {{- include "clustercode.labels" $ | nindent 4 }}
12 | annotations:
13 | kubernetes.io/service-account.name: {{ .name }}
14 | {{- end }}
15 | {{- end }}
16 |
--------------------------------------------------------------------------------
/cleanup_command.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/cleanupcmd"
5 | "github.com/urfave/cli/v2"
6 | controllerruntime "sigs.k8s.io/controller-runtime"
7 | )
8 |
9 | func newCleanupCommand() *cli.Command {
10 | command := cleanupcmd.Command{}
11 | return &cli.Command{
12 | Name: "cleanup",
13 | Usage: "Remove intermediary files and finish the task",
14 | Before: LogMetadata,
15 | Action: func(ctx *cli.Context) error {
16 | command.Log = AppLogger(ctx).WithName(ctx.Command.Name)
17 | controllerruntime.SetLogger(command.Log)
18 | return command.Execute(controllerruntime.LoggerInto(ctx.Context, command.Log))
19 | },
20 | Flags: []cli.Flag{
21 | newTaskNameFlag(&command.TaskName),
22 | newNamespaceFlag(&command.Namespace),
23 | newSourceRootDirFlag(&command.SourceRootDir),
24 | },
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/count_command.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/countcmd"
5 | "github.com/urfave/cli/v2"
6 | controllerruntime "sigs.k8s.io/controller-runtime"
7 | )
8 |
9 | func newCountCommand() *cli.Command {
10 | command := &countcmd.Command{}
11 | return &cli.Command{
12 | Name: "count",
13 | Usage: "Counts the number of generated intermediary media files",
14 | Before: LogMetadata,
15 | Action: func(ctx *cli.Context) error {
16 | command.Log = AppLogger(ctx).WithName(ctx.Command.Name)
17 | controllerruntime.SetLogger(command.Log)
18 | return command.Execute(controllerruntime.LoggerInto(ctx.Context, command.Log))
19 | },
20 | Flags: []cli.Flag{
21 | newTaskNameFlag(&command.TaskName),
22 | newNamespaceFlag(&command.Namespace),
23 | newSourceRootDirFlag(&command.SourceRootDir),
24 | },
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | # VSCode configs
2 | .vscode/
3 |
4 | # IntelliJ project files
5 | .idea
6 |
7 | # Antora project files
8 | public/
9 | node_modules/
10 |
--------------------------------------------------------------------------------
/docs/README.adoc:
--------------------------------------------------------------------------------
1 | = Documentation with Antora
2 |
3 | This documentation is built with https://docs.antora.org/[Antora].
4 |
5 | == Build Documentation
6 |
7 | `make docs-build`
8 |
9 | == Live Reload and Preview
10 |
11 | This will run a web server on port 8080 and open your browser.
12 | Meanwhile you can make changes to the local `modules` dir and view your change 2-3s later without browser plugins.
13 |
14 | `make docs-preview`
15 |
16 | == Requirements
17 |
18 | * node v12
19 | * npm v6
20 |
--------------------------------------------------------------------------------
/docs/antora-playbook.yml:
--------------------------------------------------------------------------------
1 | site:
2 | title: Clustercode Docs
3 | start_page: clustercode::index.adoc
4 | url: http://github.com/ccremer/clustercode
5 |
6 | content:
7 | sources:
8 | - url: ../
9 | branches:
10 | - HEAD
11 | - docs/v*
12 | start_path: docs
13 |
14 | ui:
15 | bundle:
16 | url: https://gitlab.com/antora/antora-ui-default/-/jobs/artifacts/master/raw/build/ui-bundle.zip?job=bundle-stable
17 | snapshot: true
18 | supplemental_files: ./supplemental-ui
19 |
20 | output:
21 | dir: ../.work/docs/
22 | clean: true
23 |
--------------------------------------------------------------------------------
/docs/antora.yml:
--------------------------------------------------------------------------------
1 | name: clustercode
2 | title: Clustercode
3 | nav:
4 | - modules/ROOT/nav.adoc
5 | version: master
6 |
7 | asciidoc:
8 | attributes:
9 | releaseVersion: latest
10 |
--------------------------------------------------------------------------------
/docs/docs.mk:
--------------------------------------------------------------------------------
1 | docs_output_dir := $(WORK_DIR)/docs
2 |
3 | clean_targets += .docs-clean
4 |
5 | .PHONY: docs-build
6 | docs-build: export ANTORA_OUTPUT_DIR = $(docs_output_dir)
7 | docs-build: docs/node_modules ## Build Antora documentation
8 | npm --prefix ./docs run build
9 |
10 | .PHONY: docs-preview
11 | docs-preview: export ANTORA_OUTPUT_DIR = $(docs_output_dir)
12 | docs-preview: docs-build ## Preview Antora build in local web server and browser
13 | npm --prefix ./docs run preview
14 |
15 | .PHONY: docs-publish
16 | docs-publish: export ANTORA_OUTPUT_DIR = $(docs_output_dir)
17 | docs-publish: docs-build | $(docs_output_dir) ## Publishes the documentation in gh-pages
18 | touch $(docs_output_dir)/.nojekyll
19 | wget -O $(docs_output_dir)/index.yaml https://raw.githubusercontent.com/$(PROJECT_OWNER)/$(PROJECT_NAME)/gh-pages/index.yaml
20 | npm --prefix ./docs run deploy
21 |
22 | .PHONY: .docs-clean
23 | .docs-clean: ## Clean documentation artifacts
24 | rm -rf $(ANTORA_OUTPUT_DIR) docs/node_modules
25 |
26 | # Download node packages
27 | docs/node_modules:
28 | npm --prefix ./docs install
29 |
30 | $(docs_output_dir):
31 | mkdir -p $@
32 |
--------------------------------------------------------------------------------
/docs/modules/ROOT/examples/blueprint.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: clustercode.github.io/v1alpha1
2 | kind: Blueprint
3 | metadata:
4 | name: test-blueprint
5 | spec:
6 | scanSchedule: "*/30 * * * *"
7 | storage:
8 | sourcePvc:
9 | claimName: my-nfs-source
10 | #subPath: source
11 | intermediatePvc:
12 | claimName: some-other-storage-claim
13 | #subPath: intermediate
14 | targetPvc:
15 | claimName: my-nfs-target
16 | #subPath: target
17 | scanSpec:
18 | mediaFileExtensions:
19 | - mp4
20 | taskConcurrencyStrategy:
21 | concurrentCountStrategy:
22 | maxCount: 1
23 | encodeSpec:
24 | sliceSize: 120 # after how many seconds to split
25 | splitCommandArgs:
26 | - -y
27 | - -hide_banner
28 | - -nostats
29 | - -i
30 | - ${INPUT}
31 | - -c
32 | - copy
33 | - -map
34 | - "0"
35 | - -segment_time
36 | - ${SLICE_SIZE}
37 | - -f
38 | - segment
39 | - ${OUTPUT}
40 | transcodeCommandArgs:
41 | - -y
42 | - -hide_banner
43 | - -nostats
44 | - -i
45 | - ${INPUT}
46 | - -c:v
47 | - copy
48 | - -c:a
49 | - copy
50 | - ${OUTPUT}
51 | mergeCommandArgs:
52 | - -y
53 | - -hide_banner
54 | - -nostats
55 | - -f
56 | - concat
57 | - -safe
58 | - "0"
59 | - -i
60 | - ${INPUT}
61 | - -c
62 | - copy
63 | - ${OUTPUT}
64 |
--------------------------------------------------------------------------------
/docs/modules/ROOT/nav.adoc:
--------------------------------------------------------------------------------
1 | * xref:index.adoc[Introduction]
2 | * https://github.com/ccremer/clustercode/releases[Changelog,window=_blank] 🔗
3 |
4 | .Tutorials
5 | * xref:clustercode:ROOT:tutorials/tutorial.adoc[Tutorial]
6 |
7 | .How To
8 | * xref:clustercode:ROOT:how-tos/create-blueprint.adoc[Create Blueprint]
9 |
10 | .Technical reference
11 |
12 |
13 | .Explanation
14 | * xref:clustercode:ROOT:explanations/how-it-works.adoc[How Clustercode Works]
15 | * xref:clustercode:ROOT:explanations/history.adoc[Clustercode Architecture History]
16 |
--------------------------------------------------------------------------------
/docs/modules/ROOT/pages/explanations/history.adoc:
--------------------------------------------------------------------------------
1 | = Clustercode Architecture History
2 |
3 | In Clustercode 1.x, the stack consists of a Java-based Master node and several Slave nodes.
4 | However, parallelization was not achieved by splitting the media file.
5 | Instead, each Slave would get assigned a media file on its own, enabling parallelization only when having multiple media files to convert.
6 | The file system itself served as database when it comes to remember which files were already converted.
7 |
8 | This became soon difficult to maintain.
9 |
10 | Some time later, another attempt was made by using a message based architecture.
11 | There would be a Java-based Master for scheduling, a CouchDB database and Go-based Slaves.
12 | At least the same split-encode-merge parallelization concept was planned.
13 | However, the code grew much into solving infrastructure problems: How to connect to each other, what database scheme, error handling, logging, synchronization etc.
14 | The actual, more interesting business code was never completed or released.
15 |
16 | Another year or two passed without activity.
17 |
18 | Meanwhile the original maintainer gained a lot of knowledge on Kubernetes and its concept of Operators.
19 | Soon, the decision was taken try yet another attempt, this time using Kubernetes both as database and scheduler.
20 | The big advantage of this architecture is reduce maintenance to the actual business logic.
21 | Clustercode itself would become stateless, everything is stored in Kubernetes.
22 | However, requiring Kubernetes definitely increases installation complexity, and some users might actually not install it due to this.
23 | A Proof of Concept showed promise.
24 |
25 | Another 2 years or soo passed without getting past the PoC state.
26 |
27 | The original maintainer meanwhile got some experience on writing Operators and automation.
28 | Yet another attempt at a revival is restructuring a lot of code and boilerplate.
29 |
--------------------------------------------------------------------------------
/docs/modules/ROOT/pages/explanations/how-it-works.adoc:
--------------------------------------------------------------------------------
1 | = How Clustercode Works
2 |
3 | == High level workflow
4 |
5 | Clustercode reads an input file from a directory and splits it into multiple smaller chunks.
6 | Those chunks are encoded individually, but in parallel when enabling concurrency.
7 | That means, more nodes equals faster encoding.
8 | After all chunks are converted, they are merged together again and put into target directory.
9 |
10 | Ffmpeg is used in the splitting, encoding and merging jobs.
11 | It basically boils down to
12 |
13 | . Splitting: `ffmpeg -i movie.mp4 -c copy -map 0 -segment_time 120 -f segment job-id_%d.mp4`
14 | . Encoding: `ffmpeg -i job-id_1.mp4 -c:v copy -c:a copy job-id_1_done.mkv`
15 | . Merging: `ffmpeg -f concat -i file-list.txt -c copy movie_out.mkv`
16 |
17 | You can customize the arguments passed to ffmpeg (with a few rules and constraints).
18 |
19 | == Enter Kubernetes
20 |
21 | Under the hood, 2 Kubernetes CRDs are used to describe the config.
22 | All steps with Ffmpeg are executed with Kubernetes Jobs, while the encoding step can be executed in parallel by scheduling multiple Jobs at the same time.
23 |
24 | Clustercode operates in either in "operator" or "client" mode.
25 | As an operator, clustercode creates Tasks from Blueprints, which in turn control the spawning CronJobs and Jobs.
26 | There is only one operator on the cluster across all namespaces.
27 | In Jobs and CronJobs, Clustercode is launched as a client, interacting with Kubernetes API.
28 | CronJobs and Jobs are launched in the same namespace as the Blueprint is in.
29 |
30 | == Process Diagram
31 |
32 | If you understand BPMN 2.0, the following graphic should give a more insights how Clustercode works on Kubernetes.
33 | Even if you don't fully understand it, this should give a gist.
34 |
35 | NOTE: The BPMN experts will notice that this is not 100% valid BPMN, because it's made for humans.
36 |
37 | TIP: The image is an SVG vector graphic, so you should be able to zoom in to read it.
38 |
39 | image::clustercode-process.drawio.svg[]
40 |
--------------------------------------------------------------------------------
/docs/modules/ROOT/pages/how-tos/create-blueprint.adoc:
--------------------------------------------------------------------------------
1 | = How To Create A Blueprint
2 |
3 | This is an example of Blueprint
4 |
5 | [source,yaml]
6 | ----
7 | include::example$blueprint.yaml[]
8 | ----
9 |
--------------------------------------------------------------------------------
/docs/modules/ROOT/pages/index.adoc:
--------------------------------------------------------------------------------
1 | = Clustercode - Kubernetes Movie and Series Operator
2 |
3 | [discrete]
4 | == Introduction
5 |
6 | Automatically convert your movies and TV shows from one file format to another using ffmpeg in a cluster.
7 | It's like an ffmpeg operator.
8 |
9 | image::clustercode-overview.drawio.svg[]
10 |
11 | Clustercode is written in https://golang.org/[Go] and is an {page-origin-url}[Open Source project].
12 |
13 | TIP: The xref:tutorials/tutorial.adoc[tutorial] gives you a glimpse into Clustercode. Give it a try!
14 |
15 | [discrete]
16 | == Documentation
17 |
18 | The documentation is inspired by the https://documentation.divio.com/[Divio's documentation structure]:
19 |
20 | Tutorials:: _Learning-oriented_: Simple lessons to learn about Clustercode.
21 |
22 | How-to guides:: _Problem-oriented_: step-by-step guides to achieve a goal.
23 |
24 | Technical reference:: _Information-oriented_: explaining the inner ongoings of Clustercode.
25 |
26 | Explanation:: _Understanding-oriented_: puts Clustercode in context.
27 |
--------------------------------------------------------------------------------
/docs/modules/ROOT/pages/tutorials/tutorial.adoc:
--------------------------------------------------------------------------------
1 | = Tutorial
2 |
--------------------------------------------------------------------------------
/docs/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "dependencies": {
3 | "@antora/cli": "3.1.2",
4 | "@antora/site-generator": "3.1.2",
5 | "gh-pages": "5.0.0"
6 | },
7 | "devDependencies": {
8 | "npm-run-all": "4.1.5",
9 | "reload": "3.2.1",
10 | "watch": "1.0.2"
11 | },
12 | "scripts": {
13 | "build": "antora ${ANTORA_PLAYBOOK_PATH:-antora-playbook.yml} ${ANTORA_ARGS}",
14 | "watch": "watch 'npm run build' modules",
15 | "serve": "reload -d ${ANTORA_OUTPUT_DIR} -b",
16 | "preview": "run-p watch serve",
17 | "deploy": "gh-pages -d ${ANTORA_OUTPUT_DIR} --dotfiles -m \"Update documentation $(date --utc '+%Y-%m-%d %H:%M')\""
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/docs/supplemental-ui/partials/footer-content.hbs:
--------------------------------------------------------------------------------
1 |
5 |
--------------------------------------------------------------------------------
/docs/supplemental-ui/partials/header-content.hbs:
--------------------------------------------------------------------------------
1 |
10 |
--------------------------------------------------------------------------------
/flags.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "strings"
6 |
7 | "github.com/urfave/cli/v2"
8 | )
9 |
10 | const (
11 | ClusterRole = "ClusterRole"
12 | Role = "Role"
13 | )
14 |
15 | type EnumValue struct {
16 | Enum []string
17 | Default string
18 | selected string
19 | }
20 |
21 | func (e *EnumValue) Set(value string) error {
22 | for _, enum := range e.Enum {
23 | if enum == value {
24 | e.selected = value
25 | return nil
26 | }
27 | }
28 |
29 | return fmt.Errorf("allowed values are [%s]", strings.Join(e.Enum, ", "))
30 | }
31 |
32 | func (e *EnumValue) String() string {
33 | if e.selected == "" {
34 | return e.Default
35 | }
36 | return e.selected
37 | }
38 |
39 | func newTaskNameFlag(dest *string) *cli.StringFlag {
40 | return &cli.StringFlag{Name: "task-name", EnvVars: envVars("TASK_NAME"), Required: true,
41 | Usage: "Task Name",
42 | Destination: dest,
43 | }
44 | }
45 |
46 | func newNamespaceFlag(dest *string) *cli.StringFlag {
47 | return &cli.StringFlag{Name: "namespace", Aliases: []string{"n"}, EnvVars: envVars("NAMESPACE"), Required: true,
48 | Usage: "Namespace in which to find the resource.",
49 | Destination: dest,
50 | }
51 | }
52 |
53 | func newBlueprintNameFlag(dest *string) *cli.StringFlag {
54 | return &cli.StringFlag{Name: "blueprint-name", EnvVars: envVars("BLUEPRINT_NAME"), Required: true,
55 | Usage: "Blueprint Name",
56 | Destination: dest,
57 | }
58 | }
59 |
60 | func newScanRoleKindFlag() *cli.GenericFlag {
61 | enum := &EnumValue{Enum: []string{ClusterRole, Role}, Default: ClusterRole}
62 | return &cli.GenericFlag{Name: "scan-role-kind", EnvVars: envVars("SCAN_ROLE"),
63 | Usage: "TODO",
64 | Category: "Encoding",
65 | DefaultText: fmt.Sprintf("%q [%s]", enum.Default, strings.Join(enum.Enum, ", ")),
66 | Value: enum,
67 | }
68 | }
69 |
70 | func newLogFormatFlag() *cli.GenericFlag {
71 | enum := &EnumValue{Enum: []string{"console", "json"}, Default: "console"}
72 | return &cli.GenericFlag{Name: "log-format", EnvVars: envVars("LOG_FORMAT"),
73 | Usage: "sets the log format",
74 | Category: "Encoding",
75 | DefaultText: fmt.Sprintf("%q [%s]", enum.Default, strings.Join(enum.Enum, ", ")),
76 | Value: enum,
77 | }
78 | }
79 | func newSourceRootDirFlag(dest *string) *cli.StringFlag {
80 | return &cli.StringFlag{Name: "source-root-dir", EnvVars: envVars("SOURCE_ROOT_DIR"),
81 | Usage: "Directory path where to find the source files",
82 | Destination: dest, Value: "/clustercode",
83 | }
84 | }
85 |
--------------------------------------------------------------------------------
/kind/config.yaml:
--------------------------------------------------------------------------------
1 | kind: Cluster
2 | apiVersion: kind.x-k8s.io/v1alpha4
3 | nodes:
4 | - role: control-plane
5 | kubeadmConfigPatches:
6 | - |
7 | kind: InitConfiguration
8 | nodeRegistration:
9 | kubeletExtraArgs:
10 | node-labels: "ingress-ready=true"
11 | extraPortMappings:
12 | - containerPort: 80
13 | hostPort: 8081
14 | protocol: TCP
15 | - containerPort: 443
16 | hostPort: 8443
17 | protocol: TCP
18 | extraMounts:
19 | - hostPath: .work/data
20 | containerPath: /pv/data
21 |
--------------------------------------------------------------------------------
/kind/kind.mk:
--------------------------------------------------------------------------------
1 | kind_dir ?= $(WORK_DIR)/kind
2 | kind_bin = $(go_bin)/kind
3 |
4 | clean_targets += kind-clean
5 |
6 | # Prepare kind binary
7 | $(kind_bin): export GOOS = $(shell go env GOOS)
8 | $(kind_bin): export GOARCH = $(shell go env GOARCH)
9 | $(kind_bin): export GOBIN = $(go_bin)
10 | $(kind_bin): | $(go_bin)
11 | go install sigs.k8s.io/kind@latest
12 |
13 | $(kind_dir):
14 | @mkdir -p $@
15 |
16 | .PHONY: kind
17 | kind: export KUBECONFIG = $(KIND_KUBECONFIG)
18 | kind: kind-setup-ingress kind-load-image ## All-in-one kind target
19 |
20 | .PHONY: kind-setup
21 | kind-setup: export KUBECONFIG = $(KIND_KUBECONFIG)
22 | kind-setup: $(KIND_KUBECONFIG) ## Creates the kind cluster
23 |
24 | .PHONY: kind-setup-ingress
25 | kind-setup-ingress: export KUBECONFIG = $(KIND_KUBECONFIG)
26 | kind-setup-ingress: kind-setup ## Install NGINX as ingress controller onto kind cluster (localhost:8081)
27 | kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/main/deploy/static/provider/kind/deploy.yaml
28 | kubectl wait -n ingress-nginx --for condition=Available deploy/ingress-nginx-controller
29 | kubectl wait -n ingress-nginx --for condition=ContainersReady $$(kubectl -n ingress-nginx get pods -o name -l app.kubernetes.io/component=controller)
30 |
31 | .PHONY: kind-load-image
32 | # We fix the arch to linux/amd64 since kind runs in amd64 even on Mac/arm.
33 | kind-load-image: export GOOS = linux
34 | kind-load-image: export GOARCH = amd64
35 | kind-load-image: kind-setup build-docker ## Load the container image onto kind cluster
36 | @$(kind_bin) load docker-image --name $(KIND_CLUSTER) $(CONTAINER_IMG)
37 |
38 | .PHONY: kind-clean
39 | kind-clean: export KUBECONFIG = $(KIND_KUBECONFIG)
40 | kind-clean: ## Removes the kind Cluster
41 | @$(kind_bin) delete cluster --name $(KIND_CLUSTER) || true
42 | rm -rf $(kind_dir) $(kind_bin)
43 |
44 | $(KIND_KUBECONFIG): export KUBECONFIG = $(KIND_KUBECONFIG)
45 | $(KIND_KUBECONFIG): $(kind_bin) | $(kind_dir)
46 | $(kind_bin) create cluster \
47 | --name $(KIND_CLUSTER) \
48 | --image $(KIND_IMAGE) \
49 | --config kind/config.yaml
50 | @kubectl version
51 | @kubectl cluster-info
52 | @kubectl config use-context kind-$(KIND_CLUSTER)
53 | @echo =======
54 | @echo "Setup finished. To interact with the local dev cluster, set the KUBECONFIG environment variable as follows:"
55 | @echo "export KUBECONFIG=$$(realpath "$(KIND_KUBECONFIG)")"
56 | @echo =======
57 |
--------------------------------------------------------------------------------
/logger.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "runtime"
8 | "strings"
9 | "sync/atomic"
10 |
11 | "github.com/go-logr/logr"
12 | "github.com/go-logr/zapr"
13 | "github.com/urfave/cli/v2"
14 | "go.uber.org/zap"
15 | "go.uber.org/zap/zapcore"
16 | )
17 |
18 | type loggerContextKey struct{}
19 |
20 | // AppLogger retrieves the application-wide logger instance from the cli.Context.
21 | func AppLogger(c *cli.Context) logr.Logger {
22 | return c.Context.Value(loggerContextKey{}).(*atomic.Value).Load().(logr.Logger)
23 | }
24 |
25 | // SetLogger copies the application-wide logger instance from cli.Context to new context using logr.NewContext.
26 | func SetLogger(ctx *cli.Context) context.Context {
27 | return logr.NewContext(ctx.Context, AppLogger(ctx))
28 | }
29 |
30 | // LogMetadata prints various metadata to the root logger.
31 | // It prints version, architecture and current user ID and returns nil.
32 | func LogMetadata(c *cli.Context) error {
33 | logger := AppLogger(c)
34 | if !usesProductionLoggingConfig(c) {
35 | logger = logger.WithValues("version", version)
36 | }
37 | logger.WithValues(
38 | "date", date,
39 | "commit", commit,
40 | "go_os", runtime.GOOS,
41 | "go_arch", runtime.GOARCH,
42 | "go_version", runtime.Version(),
43 | "uid", os.Getuid(),
44 | "gid", os.Getgid(),
45 | ).Info("Starting up " + appName)
46 | return nil
47 | }
48 |
49 | func setupLogging(c *cli.Context) error {
50 | logger, err := newZapLogger(appName, c.Int("log-level"), usesProductionLoggingConfig(c))
51 | c.Context.Value(loggerContextKey{}).(*atomic.Value).Store(logger)
52 | return err
53 | }
54 |
55 | func usesProductionLoggingConfig(c *cli.Context) bool {
56 | return strings.EqualFold("JSON", c.String(newLogFormatFlag().Name))
57 | }
58 |
59 | func newZapLogger(name string, verbosityLevel int, useProductionConfig bool) (logr.Logger, error) {
60 | cfg := zap.NewDevelopmentConfig()
61 | cfg.EncoderConfig.ConsoleSeparator = " | "
62 | if useProductionConfig {
63 | cfg = zap.NewProductionConfig()
64 | }
65 | if verbosityLevel > 0 {
66 | // Zap's levels get more verbose as the number gets smaller,
67 | // bug logr's level increases with greater numbers.
68 | cfg.Level = zap.NewAtomicLevelAt(zapcore.Level(verbosityLevel * -1))
69 | } else {
70 | cfg.Level = zap.NewAtomicLevelAt(zapcore.InfoLevel)
71 | }
72 | z, err := cfg.Build()
73 | if err != nil {
74 | return logr.Discard(), fmt.Errorf("error configuring the logging stack: %w", err)
75 | }
76 | zap.ReplaceGlobals(z)
77 | logger := zapr.NewLogger(z).WithName(name)
78 | if useProductionConfig {
79 | // Append the version to each log so that logging stacks like EFK/Loki can correlate errors with specific versions.
80 | return logger.WithValues("version", version), nil
81 | }
82 | return logger, nil
83 | }
84 |
--------------------------------------------------------------------------------
/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "os"
7 | "sync/atomic"
8 | "time"
9 |
10 | "github.com/go-logr/logr"
11 | "github.com/urfave/cli/v2"
12 | )
13 |
14 | var (
15 | // These will be populated by Goreleaser
16 | version = "unknown"
17 | commit = "-dirty-"
18 | date = time.Now().Format("2006-01-02")
19 |
20 | appName = "clustercode"
21 | appLongName = "Distribute your video encoding tasks across a cluster of nodes!"
22 |
23 | // envPrefix is the global prefix to use for the keys in environment variables
24 | envPrefix = "CC"
25 | )
26 |
27 | func init() {
28 | // Remove `-v` short option from --version flag
29 | cli.VersionFlag.(*cli.BoolFlag).Aliases = nil
30 | }
31 |
32 | func main() {
33 | ctx, app := newApp()
34 | err := app.RunContext(ctx, os.Args)
35 | // If required flags aren't set, it will return with error before we could set up logging
36 | if err != nil {
37 | _, _ = fmt.Fprintf(os.Stderr, "%v\n", err)
38 | os.Exit(1)
39 | }
40 |
41 | }
42 |
43 | func newApp() (context.Context, *cli.App) {
44 | logInstance := &atomic.Value{}
45 | logInstance.Store(logr.Discard())
46 | app := &cli.App{
47 | Name: appName,
48 | Usage: appLongName,
49 | Version: fmt.Sprintf("%s, revision=%s, date=%s", version, commit, date),
50 | EnableBashCompletion: true,
51 |
52 | Before: setupLogging,
53 | Flags: []cli.Flag{
54 | &cli.IntFlag{
55 | Name: "log-level", Aliases: []string{"v"}, EnvVars: envVars("LOG_LEVEL"),
56 | Usage: "number of the log level verbosity",
57 | Value: 0,
58 | },
59 | newLogFormatFlag(),
60 | },
61 | Commands: []*cli.Command{
62 | newOperatorCommand(),
63 | newWebhookCommand(),
64 | newWebuiCommand(),
65 | newScanCommand(),
66 | newCountCommand(),
67 | newCleanupCommand(),
68 | },
69 | ExitErrHandler: func(ctx *cli.Context, err error) {
70 | if err != nil {
71 | AppLogger(ctx).Error(err, "fatal error")
72 | cli.HandleExitCoder(cli.Exit("", 1))
73 | }
74 | },
75 | }
76 | hasSubcommands := len(app.Commands) > 0
77 | app.Action = rootAction(hasSubcommands)
78 | // There is logr.NewContext(...) which returns a context that carries the logger instance.
79 | // However, since we are configuring and replacing this logger after starting up and parsing the flags,
80 | // we'll store a thread-safe atomic reference.
81 | parentCtx := context.WithValue(context.Background(), loggerContextKey{}, logInstance)
82 | return parentCtx, app
83 | }
84 |
85 | func rootAction(hasSubcommands bool) func(context *cli.Context) error {
86 | return func(ctx *cli.Context) error {
87 | if hasSubcommands {
88 | return cli.ShowAppHelp(ctx)
89 | }
90 | return LogMetadata(ctx)
91 | }
92 | }
93 |
94 | // env combines envPrefix with given suffix delimited by underscore.
95 | func env(suffix string) string {
96 | return envPrefix + "_" + suffix
97 | }
98 |
99 | // envVars combines envPrefix with each given suffix delimited by underscore.
100 | func envVars(suffixes ...string) []string {
101 | arr := make([]string, len(suffixes))
102 | for i := range suffixes {
103 | arr[i] = env(suffixes[i])
104 | }
105 | return arr
106 | }
107 |
--------------------------------------------------------------------------------
/operator_command.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/operator"
5 | "github.com/ccremer/clustercode/pkg/operator/blueprintcontroller"
6 | "github.com/ccremer/clustercode/pkg/operator/taskcontroller"
7 | "github.com/urfave/cli/v2"
8 | )
9 |
10 | func newOperatorCommand() *cli.Command {
11 | command := &operator.Command{}
12 | return &cli.Command{
13 | Name: "operator",
14 | Usage: "Start clustercode in operator mode",
15 | Before: LogMetadata,
16 | Action: func(ctx *cli.Context) error {
17 | command.Log = AppLogger(ctx).WithName(ctx.Command.Name)
18 | blueprintcontroller.ScanRoleKind = ctx.String(newScanRoleKindFlag().Name)
19 | return command.Execute(ctx.Context)
20 | },
21 | Flags: []cli.Flag{
22 | &cli.BoolFlag{Name: "leader-election-enabled", Value: false, EnvVars: envVars("LEADER_ELECTION_ENABLED"),
23 | Usage: "Use leader election for the controller manager.",
24 | Destination: &command.LeaderElectionEnabled,
25 | Category: "Operator",
26 | },
27 | &cli.StringFlag{Name: "clustercode-image", EnvVars: envVars("CLUSTERCODE_IMAGE"),
28 | Usage: "Container image to be used when launching Clustercode jobs.",
29 | Destination: &blueprintcontroller.DefaultClusterCodeContainerImage,
30 | Category: "Encoding", Required: true,
31 | },
32 | &cli.StringFlag{Name: "ffmpeg-image", EnvVars: envVars("FFMPEG_IMAGE"),
33 | Usage: "Container image to be used when launching Ffmpeg jobs.",
34 | Destination: &taskcontroller.DefaultFfmpegContainerImage,
35 | Category: "Encoding", Required: true,
36 | },
37 | newScanRoleKindFlag(),
38 | &cli.StringFlag{Name: "scan-role-name", EnvVars: envVars("SCAN_ROLE_NAME"),
39 | Usage: "TODO",
40 | Value: "clustercode-edit",
41 | Destination: &blueprintcontroller.ScanRoleName,
42 | Category: "Encoding",
43 | },
44 | },
45 | }
46 | }
47 |
--------------------------------------------------------------------------------
/package/rbac/role.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | creationTimestamp: null
6 | name: manager-role
7 | rules:
8 | - apiGroups:
9 | - batch
10 | resources:
11 | - cronjobs
12 | verbs:
13 | - create
14 | - delete
15 | - get
16 | - list
17 | - patch
18 | - update
19 | - watch
20 | - apiGroups:
21 | - batch
22 | resources:
23 | - cronjobs/status
24 | verbs:
25 | - get
26 | - patch
27 | - update
28 | - apiGroups:
29 | - batch
30 | resources:
31 | - jobs
32 | verbs:
33 | - create
34 | - delete
35 | - deletecollection
36 | - get
37 | - list
38 | - patch
39 | - update
40 | - watch
41 | - apiGroups:
42 | - clustercode.github.io
43 | resources:
44 | - blueprints
45 | verbs:
46 | - create
47 | - delete
48 | - get
49 | - list
50 | - patch
51 | - update
52 | - watch
53 | - apiGroups:
54 | - clustercode.github.io
55 | resources:
56 | - blueprints/finalizers
57 | - blueprints/status
58 | verbs:
59 | - get
60 | - patch
61 | - update
62 | - apiGroups:
63 | - clustercode.github.io
64 | resources:
65 | - tasks
66 | verbs:
67 | - create
68 | - delete
69 | - get
70 | - list
71 | - patch
72 | - update
73 | - watch
74 | - apiGroups:
75 | - clustercode.github.io
76 | resources:
77 | - tasks/finalizers
78 | - tasks/status
79 | verbs:
80 | - get
81 | - patch
82 | - update
83 | - apiGroups:
84 | - coordination.k8s.io
85 | resources:
86 | - leases
87 | verbs:
88 | - create
89 | - get
90 | - list
91 | - update
92 | - apiGroups:
93 | - ""
94 | resources:
95 | - configmaps
96 | verbs:
97 | - create
98 | - delete
99 | - get
100 | - list
101 | - patch
102 | - update
103 | - watch
104 | - apiGroups:
105 | - ""
106 | resources:
107 | - serviceaccounts
108 | verbs:
109 | - create
110 | - delete
111 | - get
112 | - list
113 | - watch
114 | - apiGroups:
115 | - rbac.authorization.k8s.io
116 | resources:
117 | - rolebindings
118 | - roles
119 | verbs:
120 | - create
121 | - delete
122 | - get
123 | - list
124 | - watch
125 |
--------------------------------------------------------------------------------
/package/webhook/manifests.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: admissionregistration.k8s.io/v1
3 | kind: MutatingWebhookConfiguration
4 | metadata:
5 | creationTimestamp: null
6 | name: mutating-webhook-configuration
7 | webhooks:
8 | - admissionReviewVersions:
9 | - v1
10 | clientConfig:
11 | service:
12 | name: webhook-service
13 | namespace: system
14 | path: /mutate-clustercode-github-io-v1alpha1-blueprint
15 | failurePolicy: Fail
16 | name: blueprints.clustercode.github.io
17 | rules:
18 | - apiGroups:
19 | - clustercode.github.io
20 | apiVersions:
21 | - v1alpha1
22 | operations:
23 | - CREATE
24 | - UPDATE
25 | resources:
26 | - blueprints
27 | sideEffects: None
28 | ---
29 | apiVersion: admissionregistration.k8s.io/v1
30 | kind: ValidatingWebhookConfiguration
31 | metadata:
32 | creationTimestamp: null
33 | name: validating-webhook-configuration
34 | webhooks:
35 | - admissionReviewVersions:
36 | - v1
37 | clientConfig:
38 | service:
39 | name: webhook-service
40 | namespace: system
41 | path: /validate-clustercode-github-io-v1alpha1-blueprint
42 | failurePolicy: Fail
43 | name: blueprints.clustercode.github.io
44 | rules:
45 | - apiGroups:
46 | - clustercode.github.io
47 | apiVersions:
48 | - v1alpha1
49 | operations:
50 | - CREATE
51 | - UPDATE
52 | resources:
53 | - blueprints
54 | sideEffects: None
55 |
--------------------------------------------------------------------------------
/pkg/api/conditions/types.go:
--------------------------------------------------------------------------------
1 | package conditions
2 |
3 | import (
4 | "fmt"
5 |
6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
7 | )
8 |
9 | func SplitComplete() metav1.Condition {
10 | return metav1.Condition{
11 | Type: "SplitComplete",
12 | Status: metav1.ConditionTrue,
13 | Reason: "SplitSuccessful",
14 | Message: "Source file successfully split into multiple slices",
15 | }
16 | }
17 |
18 | func CountComplete(amount int) metav1.Condition {
19 | return metav1.Condition{
20 | Type: "CountComplete",
21 | Status: metav1.ConditionTrue,
22 | Reason: "CountedIntermediateFiles",
23 | Message: fmt.Sprintf("Counted slices: %d", amount),
24 | }
25 | }
26 |
27 | func Progressing() metav1.Condition {
28 | return metav1.Condition{
29 | Type: "Progressing",
30 | Status: metav1.ConditionTrue,
31 | Reason: "SlicesScheduled",
32 | Message: "Slices are being processed",
33 | }
34 | }
35 |
36 | func ProgressingSuccessful() metav1.Condition {
37 | return metav1.Condition{
38 | Type: "Progressing",
39 | Status: metav1.ConditionFalse,
40 | Reason: "AllSlicesCompleted",
41 | Message: "All planned slices successfully processed",
42 | }
43 | }
44 | func MergeComplete() metav1.Condition {
45 | return metav1.Condition{
46 | Type: "MergeComplete",
47 | Status: metav1.ConditionTrue,
48 | Reason: "MergedIntermediateFiles",
49 | Message: "Merged slices back together",
50 | }
51 | }
52 |
53 | func Ready() metav1.Condition {
54 | return metav1.Condition{
55 | Type: "Ready",
56 | Status: metav1.ConditionTrue,
57 | Reason: "TaskProcessedSuccessfully",
58 | Message: "Task has been successfully processed",
59 | }
60 | }
61 |
62 | const (
63 | TypeFailed = "Failed"
64 | )
65 |
66 | func Failed(err error) metav1.Condition {
67 |
68 | return metav1.Condition{
69 | Type: TypeFailed,
70 | Status: metav1.ConditionTrue,
71 | Reason: "FailedReconciliation",
72 | Message: err.Error(),
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/api/generate.go:
--------------------------------------------------------------------------------
1 | //go:build generate
2 |
3 | // Remove existing manifests
4 | //go:generate rm -rf ../../package/crds ../../package/rbac
5 |
6 | // Generate deepcopy methodsets and CRD manifests
7 | //go:generate go run -tags generate sigs.k8s.io/controller-tools/cmd/controller-gen object:headerFile=../../.github/boilerplate.go.txt paths=./... crd:crdVersions=v1 output:artifacts:config=../../package/crds
8 |
9 | package api
10 |
--------------------------------------------------------------------------------
/pkg/api/init.go:
--------------------------------------------------------------------------------
1 | // Package api contains Kubernetes API for the Template provider.
2 | package api
3 |
4 | import (
5 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
6 | "k8s.io/apimachinery/pkg/runtime"
7 | )
8 |
9 | // AddToSchemes may be used to add all resources defined in the project to a Scheme
10 | var AddToSchemes runtime.SchemeBuilder
11 |
12 | func init() {
13 | // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back
14 | AddToSchemes = append(AddToSchemes,
15 | v1alpha1.AddToScheme,
16 | )
17 | }
18 |
19 | // AddToScheme adds all Resources to the Scheme
20 | func AddToScheme(s *runtime.Scheme) error {
21 | return AddToSchemes.AddToScheme(s)
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/api/v1alpha1/blueprint_types.go:
--------------------------------------------------------------------------------
1 | package v1alpha1
2 |
3 | import (
4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
5 | )
6 |
7 | func init() {
8 | SchemeBuilder.Register(&Blueprint{}, &BlueprintList{})
9 | }
10 |
11 | // +kubebuilder:object:root=true
12 | // +kubebuilder:subresource:status
13 | // +kubebuilder:printcolumn:name="Schedule",type="string",JSONPath=".spec.scanSchedule",description="Cron schedule of media scans"
14 | // +kubebuilder:printcolumn:name="Suspended",type="boolean",JSONPath=".spec.suspend",description="Whether media scanning is suspended"
15 | // +kubebuilder:printcolumn:name="Current Tasks",type="integer",JSONPath=".status.currentTasks",description="Currently active tasks"
16 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
17 |
18 | // Blueprint is the Schema for the Blueprint API
19 | type Blueprint struct {
20 | metav1.TypeMeta `json:",inline"`
21 | metav1.ObjectMeta `json:"metadata,omitempty"`
22 |
23 | Spec BlueprintSpec `json:"spec,omitempty"`
24 | Status BlueprintStatus `json:"status,omitempty"`
25 | }
26 |
27 | // +kubebuilder:object:root=true
28 |
29 | // BlueprintList contains a list of Blueprints.
30 | type BlueprintList struct {
31 | metav1.TypeMeta `json:",inline"`
32 | metav1.ListMeta `json:"metadata,omitempty"`
33 | Items []Blueprint `json:"items"`
34 | }
35 |
36 | // BlueprintSpec specifies Clustercode settings
37 | type BlueprintSpec struct {
38 | Storage StorageSpec `json:"storage,omitempty"`
39 | MaxParallelTasks int `json:"maxParallelTasks,omitempty"`
40 |
41 | Suspend bool `json:"suspend,omitempty"`
42 | TaskConcurrencyStrategy ClustercodeStrategy `json:"taskConcurrencyStrategy,omitempty"`
43 |
44 | Scan ScanSpec `json:"scan,omitempty"`
45 | Encode EncodeSpec `json:"encode,omitempty"`
46 | Cleanup CleanupSpec `json:"cleanup,omitempty"`
47 | }
48 |
49 | type ScanSpec struct {
50 | Schedule string `json:"schedule"`
51 | MediaFileExtensions []string `json:"mediaFileExtensions,omitempty"`
52 | }
53 |
54 | type CleanupSpec struct {
55 | // PodTemplate contains a selection of fields to customize the spawned ffmpeg-based pods.
56 | // Some fields will be overwritten:
57 | // * Volumes and volume mounts will be set based on StorageSpec.
58 | // * Container args of the `ffmpeg` container will be set based on SplitCommandArgs, TranscodeCommandArgs, MergeCommandArgs.
59 | PodTemplate PodTemplate `json:"podTemplate,omitempty"`
60 | }
61 |
62 | type BlueprintStatus struct {
63 | Conditions []metav1.Condition `json:"conditions,omitempty"`
64 | CurrentTasks []TaskRef `json:"currentTasks,omitempty"`
65 | }
66 |
67 | type TaskRef struct {
68 | TaskName string `json:"taskName,omitempty"`
69 | }
70 |
71 | // IsMaxParallelTaskLimitReached will return true if the count of current task has reached MaxParallelTasks.
72 | func (in *Blueprint) IsMaxParallelTaskLimitReached() bool {
73 | return len(in.Status.CurrentTasks) >= in.Spec.MaxParallelTasks
74 | }
75 |
76 | // GetServiceAccountName retrieves a ServiceAccount name that would go along with this Blueprint.
77 | func (in *Blueprint) GetServiceAccountName() string {
78 | return in.Name + "-clustercode"
79 | }
80 |
--------------------------------------------------------------------------------
/pkg/api/v1alpha1/common.go:
--------------------------------------------------------------------------------
1 | package v1alpha1
2 |
3 | import (
4 | "fmt"
5 | "net/url"
6 | "strings"
7 |
8 | "k8s.io/apimachinery/pkg/util/runtime"
9 | )
10 |
11 | type StorageSpec struct {
12 | // SourcePvc is a reference to the PVC which contains the source media files to encode.
13 | // If `sourcePvc.claimName` is empty, then you need to specify a pod template that configures a volume named "source".
14 | SourcePvc VolumeRef `json:"sourcePvc"`
15 | // SourcePvc is a reference to the PVC which contains the intermediate media files as part of the splitting and merging.
16 | // If `intermediatePvc.claimName` is empty, then you need to specify a pod template that configures a volume named "intermediate".
17 | IntermediatePvc VolumeRef `json:"intermediatePvc"`
18 | // SourcePvc is a reference to the PVC which contains the final result files.
19 | // If `targetPvc.claimName` is empty, then you need to specify a pod template that configures a volume named "target".
20 | TargetPvc VolumeRef `json:"targetPvc"`
21 | }
22 |
23 | type VolumeRef struct {
24 | // ClaimName is the name of the PVC.
25 | ClaimName string `json:"claimName"`
26 | // SubPath is an optional path within the referenced PVC.
27 | // This is useful if the same PVC is shared.
28 | SubPath string `json:"subPath,omitempty"`
29 | }
30 |
31 | type EncodeSpec struct {
32 | SplitCommandArgs []string `json:"splitCommandArgs"`
33 | TranscodeCommandArgs []string `json:"transcodeCommandArgs"`
34 | MergeCommandArgs []string `json:"mergeCommandArgs"`
35 |
36 | // PodTemplate contains a selection of fields to customize the spawned ffmpeg-based pods.
37 | // Some fields will be overwritten:
38 | // * Volumes and volume mounts will be set based on StorageSpec, if the claim names are given.
39 | // * Container args of the `ffmpeg` container will be set based on SplitCommandArgs, TranscodeCommandArgs, MergeCommandArgs.
40 | PodTemplate PodTemplate `json:"podTemplate,omitempty"`
41 |
42 | // SliceSize is the time in seconds of the slice lengths.
43 | // Higher values yield lower parallelization but less overhead.
44 | // Lower values yield high parallelization but more overhead.
45 | // If SliceSize is higher than the total length of the media, there may be just 1 slice with effectively no parallelization.
46 | SliceSize int `json:"sliceSize,omitempty"`
47 | }
48 |
49 | const (
50 | MediaFileDoneSuffix = "_done"
51 | ConfigMapFileName = "file-list.txt"
52 | )
53 |
54 | type ClusterCodeUrl string
55 |
56 | func ToUrl(root, path string) ClusterCodeUrl {
57 | newUrl, err := url.Parse(fmt.Sprintf("cc://%s/%s", root, strings.Replace(path, root, "", 1)))
58 | runtime.Must(err)
59 | return ClusterCodeUrl(newUrl.String())
60 | }
61 |
62 | func (u ClusterCodeUrl) GetRoot() string {
63 | parsed, err := url.Parse(string(u))
64 | if err != nil {
65 | return ""
66 | }
67 | return parsed.Host
68 | }
69 |
70 | func (u ClusterCodeUrl) GetPath() string {
71 | parsed, err := url.Parse(string(u))
72 | if err != nil {
73 | return ""
74 | }
75 | return parsed.Path
76 | }
77 |
78 | func (u ClusterCodeUrl) StripSubPath(subpath string) string {
79 | path := u.GetPath()
80 | return strings.Replace(path, subpath, "", 1)
81 | }
82 |
83 | func (u ClusterCodeUrl) String() string {
84 | return string(u)
85 | }
86 |
--------------------------------------------------------------------------------
/pkg/api/v1alpha1/groupversion_info.go:
--------------------------------------------------------------------------------
1 | // Package v1alpha1 contains API Schema definitions for the sync v1alpha1 API group
2 | // +kubebuilder:object:generate=true
3 | // +groupName=clustercode.github.io
4 |
5 | // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
6 |
7 | package v1alpha1
8 |
9 | import (
10 | "k8s.io/apimachinery/pkg/runtime/schema"
11 | "sigs.k8s.io/controller-runtime/pkg/scheme"
12 | )
13 |
14 | var (
15 | // GroupVersion is group version used to register these objects
16 | GroupVersion = schema.GroupVersion{Group: "clustercode.github.io", Version: "v1alpha1"}
17 |
18 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme
19 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
20 |
21 | // AddToScheme adds the types in this group-version to the given scheme.
22 | AddToScheme = SchemeBuilder.AddToScheme
23 | )
24 |
--------------------------------------------------------------------------------
/pkg/api/v1alpha1/task_types.go:
--------------------------------------------------------------------------------
1 | package v1alpha1
2 |
3 | import (
4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
5 | "k8s.io/apimachinery/pkg/labels"
6 | )
7 |
8 | func init() {
9 | SchemeBuilder.Register(&Task{}, &TaskList{})
10 | }
11 |
12 | // +kubebuilder:object:root=true
13 | // +kubebuilder:subresource:status
14 | // +kubebuilder:printcolumn:name="Source",type="string",JSONPath=".spec.sourceUrl",description="Source file name"
15 | // +kubebuilder:printcolumn:name="Target",type="string",JSONPath=".spec.targetUrl",description="Target file name"
16 | // +kubebuilder:printcolumn:name="Blueprint",type="string",JSONPath=`.metadata.ownerReferences[?(@.kind=='Blueprint')].name`,description="Blueprint reference"
17 | // +kubebuilder:printcolumn:name="Slices",type="string",JSONPath=`.spec.slicesPlannedCount`,description="Clustercode Total Slices"
18 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp"
19 |
20 | // Task is a projection out of a Blueprint
21 | type Task struct {
22 | metav1.TypeMeta `json:",inline"`
23 | metav1.ObjectMeta `json:"metadata,omitempty"`
24 |
25 | Spec TaskSpec `json:"spec,omitempty"`
26 | Status TaskStatus `json:"status,omitempty"`
27 | }
28 |
29 | // +kubebuilder:object:root=true
30 |
31 | // TaskList contains a list of Task
32 | type TaskList struct {
33 | metav1.TypeMeta `json:",inline"`
34 | metav1.ListMeta `json:"metadata,omitempty"`
35 | Items []Task `json:"items"`
36 | }
37 |
38 | // TaskSpec defines the desired state of Task.
39 | type TaskSpec struct {
40 | TaskId ClustercodeTaskId `json:"taskId,omitempty"`
41 | Storage StorageSpec `json:"storage,omitempty"`
42 | SourceUrl ClusterCodeUrl `json:"sourceUrl,omitempty"`
43 | TargetUrl ClusterCodeUrl `json:"targetUrl,omitempty"`
44 | Suspend bool `json:"suspend,omitempty"`
45 | Encode EncodeSpec `json:"encode,omitempty"`
46 | Cleanup CleanupSpec `json:"cleanup,omitempty"`
47 | ServiceAccountName string `json:"serviceAccountName,omitempty"`
48 | FileListConfigMapRef string `json:"fileListConfigMapRef,omitempty"`
49 | ConcurrencyStrategy ClustercodeStrategy `json:"concurrencyStrategy,omitempty"`
50 | SlicesPlannedCount int `json:"slicesPlannedCount,omitempty"`
51 | }
52 |
53 | type TaskStatus struct {
54 | Conditions []metav1.Condition `json:"conditions,omitempty"`
55 | SlicesScheduledCount int `json:"slicesScheduledCount,omitempty"`
56 | SlicesFinishedCount int `json:"slicesFinishedCount,omitempty"`
57 | SlicesScheduled []ClustercodeSliceRef `json:"slicesScheduled,omitempty"`
58 | SlicesFinished []ClustercodeSliceRef `json:"slicesFinished,omitempty"`
59 | }
60 |
61 | type ClustercodeSliceRef struct {
62 | JobName string `json:"jobName,omitempty"`
63 | SliceIndex int `json:"sliceIndex"`
64 | }
65 |
66 | type ClustercodeStrategy struct {
67 | ConcurrentCountStrategy *ClustercodeCountStrategy `json:"concurrentCountStrategy,omitempty"`
68 | }
69 |
70 | type ClustercodeCountStrategy struct {
71 | MaxCount int `json:"maxCount,omitempty"`
72 | }
73 | type ClustercodeTaskId string
74 |
75 | const (
76 | ClustercodeTaskIdLabelKey = "clustercode.github.io/task-id"
77 | )
78 |
79 | func (id ClustercodeTaskId) AsLabels() labels.Set {
80 | return map[string]string{
81 | ClustercodeTaskIdLabelKey: id.String(),
82 | }
83 | }
84 |
85 | func (id ClustercodeTaskId) String() string {
86 | return string(id)
87 | }
88 |
--------------------------------------------------------------------------------
/pkg/cleanupcmd/run.go:
--------------------------------------------------------------------------------
1 | package cleanupcmd
2 |
3 | import (
4 | "context"
5 | "os"
6 | "path/filepath"
7 |
8 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
9 | "github.com/ccremer/clustercode/pkg/internal/pipe"
10 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
11 | pipeline "github.com/ccremer/go-command-pipeline"
12 | "github.com/go-logr/logr"
13 | "k8s.io/apimachinery/pkg/types"
14 | "sigs.k8s.io/controller-runtime/pkg/client"
15 | )
16 |
17 | type Command struct {
18 | Log logr.Logger
19 |
20 | SourceRootDir string
21 | Namespace string
22 | TaskName string
23 | }
24 |
25 | type commandContext struct {
26 | context.Context
27 | dependencyResolver pipeline.DependencyResolver[*commandContext]
28 |
29 | kube client.Client
30 | task *v1alpha1.Task
31 | intermediaryFiles []string
32 | }
33 |
34 | // Execute runs the command and returns an error, if any.
35 | func (c *Command) Execute(ctx context.Context) error {
36 |
37 | pctx := &commandContext{
38 | dependencyResolver: pipeline.NewDependencyRecorder[*commandContext](),
39 | Context: ctx,
40 | }
41 |
42 | p := pipeline.NewPipeline[*commandContext]().WithBeforeHooks(pipe.DebugLogger[*commandContext](c.Log), pctx.dependencyResolver.Record)
43 | p.WithSteps(
44 | p.NewStep("create client", c.createClient),
45 | p.NewStep("fetch task", c.fetchTask),
46 | p.NewStep("list intermediary files", c.listIntermediaryFiles),
47 | p.NewStep("delete intermediary files", c.deleteFiles),
48 | p.NewStep("delete source file", c.deleteSourceFile),
49 | )
50 |
51 | return p.RunWithContext(pctx)
52 | }
53 |
54 | func (c *Command) createClient(ctx *commandContext) error {
55 | kube, err := pipe.NewKubeClient(ctx)
56 | ctx.kube = kube
57 | return err
58 | }
59 |
60 | func (c *Command) fetchTask(ctx *commandContext) error {
61 | ctx.dependencyResolver.MustRequireDependencyByFuncName(c.createClient)
62 | log := c.getLogger()
63 |
64 | task := &v1alpha1.Task{}
65 | if err := ctx.kube.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: c.TaskName}, task); err != nil {
66 | return err
67 | }
68 | ctx.task = task
69 | log.Info("fetched task")
70 | return nil
71 | }
72 |
73 | func (c *Command) listIntermediaryFiles(ctx *commandContext) error {
74 | ctx.dependencyResolver.MustRequireDependencyByFuncName(c.fetchTask)
75 |
76 | intermediaryFiles, err := filepath.Glob(filepath.Join(c.SourceRootDir, internaltypes.IntermediateSubMountPath, ctx.task.Spec.TaskId.String()+"*"))
77 | ctx.intermediaryFiles = intermediaryFiles
78 | return err
79 | }
80 |
81 | func (c *Command) deleteFiles(ctx *commandContext) error {
82 | ctx.dependencyResolver.MustRequireDependencyByFuncName(c.listIntermediaryFiles)
83 | log := c.getLogger()
84 |
85 | for _, file := range ctx.intermediaryFiles {
86 | log.Info("deleting file", "file", file)
87 | if err := os.Remove(file); err != nil {
88 | log.Info("could not delete file, ignoring", "file", file, "error", err.Error())
89 | }
90 | }
91 | return nil
92 | }
93 |
94 | func (c *Command) deleteSourceFile(ctx *commandContext) error {
95 | ctx.dependencyResolver.MustRequireDependencyByFuncName(c.fetchTask)
96 | log := c.getLogger()
97 |
98 | sourceFile := filepath.Join(c.SourceRootDir, internaltypes.SourceSubMountPath, ctx.task.Spec.SourceUrl.GetPath())
99 | log.Info("deleting file", "file", sourceFile)
100 | return os.Remove(sourceFile)
101 | }
102 |
103 | func (c *Command) getLogger() logr.Logger {
104 | return c.Log.WithValues("task_name", c.TaskName, "namespace", c.Namespace)
105 | }
106 |
--------------------------------------------------------------------------------
/pkg/internal/pipe/debuglogger.go:
--------------------------------------------------------------------------------
1 | package pipe
2 |
3 | import (
4 | "context"
5 |
6 | pipeline "github.com/ccremer/go-command-pipeline"
7 | "github.com/go-logr/logr"
8 | )
9 |
10 | // DebugLogger returns a list with a single hook that logs the step name.
11 | // The logger is retrieved from the given context.
12 | func DebugLogger[T context.Context](logger logr.Logger) pipeline.Listener[T] {
13 | hook := func(step pipeline.Step[T]) {
14 | logger.V(2).Info(`Entering step "` + step.Name + `"`)
15 | }
16 | return hook
17 | }
18 |
--------------------------------------------------------------------------------
/pkg/internal/pipe/failedcondition.go:
--------------------------------------------------------------------------------
1 | package pipe
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/ccremer/clustercode/pkg/api/conditions"
7 | "k8s.io/apimachinery/pkg/api/meta"
8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
9 | "sigs.k8s.io/controller-runtime/pkg/client"
10 | )
11 |
12 | func UpdateFailedCondition(ctx context.Context, kube client.Client, conds *[]metav1.Condition, obj client.Object, err error) error {
13 | if err == nil {
14 | meta.RemoveStatusCondition(conds, conditions.TypeFailed)
15 | } else {
16 | meta.SetStatusCondition(conds, conditions.Failed(err))
17 | }
18 | return kube.Status().Update(ctx, obj)
19 | }
20 |
--------------------------------------------------------------------------------
/pkg/internal/pipe/kubeclient.go:
--------------------------------------------------------------------------------
1 | package pipe
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
8 | pipeline "github.com/ccremer/go-command-pipeline"
9 | batchv1 "k8s.io/api/batch/v1"
10 | "k8s.io/apimachinery/pkg/runtime"
11 | kubernetesscheme "k8s.io/client-go/kubernetes/scheme"
12 | "k8s.io/client-go/rest"
13 | controllerruntime "sigs.k8s.io/controller-runtime"
14 | "sigs.k8s.io/controller-runtime/pkg/client"
15 | )
16 |
17 | type kubeContext struct {
18 | context.Context
19 |
20 | kubeconfig *rest.Config
21 | kube client.Client
22 | scheme *runtime.Scheme
23 | }
24 |
25 | // NewKubeClient creates a new client.Client using in-cluster config.
26 | func NewKubeClient(ctx context.Context) (client.Client, error) {
27 | pctx := &kubeContext{Context: ctx}
28 | p := pipeline.NewPipeline[*kubeContext]().WithBeforeHooks(DebugLogger[*kubeContext](controllerruntime.LoggerFrom(ctx)))
29 | p.WithSteps(
30 | p.NewStep("register schemes", registerSchemesFn),
31 | p.NewStep("load kube config", loadKubeConfigFn),
32 | p.NewStep("create client", createClientFn),
33 | )
34 | err := p.RunWithContext(pctx)
35 | if err != nil {
36 | return nil, fmt.Errorf("cannot instantiate new kubernetes client: %w", err)
37 | }
38 | return pctx.kube, nil
39 | }
40 |
41 | var createClientFn = func(ctx *kubeContext) error {
42 | kube, err := client.New(ctx.kubeconfig, client.Options{Scheme: ctx.scheme})
43 | ctx.kube = kube
44 | return err
45 | }
46 |
47 | var registerSchemesFn = func(ctx *kubeContext) error {
48 | ctx.scheme = runtime.NewScheme()
49 | b := &runtime.SchemeBuilder{}
50 | b.Register(
51 | kubernetesscheme.AddToScheme,
52 | batchv1.AddToScheme,
53 | v1alpha1.AddToScheme,
54 | )
55 | return b.AddToScheme(ctx.scheme)
56 | }
57 |
58 | var loadKubeConfigFn = func(ctx *kubeContext) error {
59 | clientConfig, err := controllerruntime.GetConfig()
60 | ctx.kubeconfig = clientConfig
61 | return err
62 | }
63 |
--------------------------------------------------------------------------------
/pkg/internal/types/types.go:
--------------------------------------------------------------------------------
1 | package types
2 |
3 | import (
4 | "k8s.io/apimachinery/pkg/labels"
5 | )
6 |
7 | var (
8 | ClusterCodeLabels = labels.Set{
9 | "app.kubernetes.io/managed-by": "clustercode",
10 | }
11 | )
12 |
13 | type (
14 | ClusterCodeJobType string
15 | )
16 |
17 | const (
18 | SourceSubMountPath = "source"
19 | TargetSubMountPath = "target"
20 | IntermediateSubMountPath = "intermediate"
21 | ConfigSubMountPath = "config"
22 |
23 | ClustercodeTypeLabelKey = "clustercode.github.io/type"
24 | ClustercodeSliceIndexLabelKey = "clustercode.github.io/slice-index"
25 |
26 | JobTypeScan ClusterCodeJobType = "scan"
27 | JobTypeSplit ClusterCodeJobType = "split"
28 | JobTypeSlice ClusterCodeJobType = "slice"
29 | JobTypeCount ClusterCodeJobType = "count"
30 | JobTypeMerge ClusterCodeJobType = "merge"
31 | JobTypeCleanup ClusterCodeJobType = "cleanup"
32 | )
33 |
34 | var (
35 | JobTypes = []ClusterCodeJobType{
36 | JobTypeScan, JobTypeSplit, JobTypeCount, JobTypeSlice,
37 | JobTypeMerge, JobTypeCleanup}
38 | )
39 |
40 | func (t ClusterCodeJobType) AsLabels() labels.Set {
41 | return labels.Set{
42 | ClustercodeTypeLabelKey: string(t),
43 | }
44 | }
45 |
46 | func (t ClusterCodeJobType) String() string {
47 | return string(t)
48 | }
49 |
--------------------------------------------------------------------------------
/pkg/internal/utils/utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "strings"
5 |
6 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
7 | "k8s.io/api/batch/v1"
8 | corev1 "k8s.io/api/core/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "k8s.io/apimachinery/pkg/types"
11 | "k8s.io/utils/pointer"
12 | )
13 |
14 | func EnsureVolumeMountIf(enabled bool, container *corev1.Container, volumeName, podMountRoot, subPath string) {
15 | if !enabled || HasVolumeMount(*container, volumeName) {
16 | return
17 | }
18 | container.VolumeMounts = append(container.VolumeMounts,
19 | corev1.VolumeMount{Name: volumeName, MountPath: podMountRoot, SubPath: subPath})
20 | }
21 |
22 | func EnsurePVCVolume(job *v1.Job, name string, volume v1alpha1.VolumeRef) {
23 | for _, v := range job.Spec.Template.Spec.Volumes {
24 | if v.Name == name {
25 | return
26 | }
27 | }
28 | job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, corev1.Volume{
29 | Name: name,
30 | VolumeSource: corev1.VolumeSource{
31 | PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
32 | ClaimName: volume.ClaimName,
33 | },
34 | }})
35 | }
36 |
37 | func HasVolumeMount(container corev1.Container, name string) bool {
38 | for _, mount := range container.VolumeMounts {
39 | if mount.Name == name {
40 | return true
41 | }
42 | }
43 | return false
44 | }
45 |
46 | func GetOwner(obj metav1.Object) types.NamespacedName {
47 | for _, owner := range obj.GetOwnerReferences() {
48 | if pointer.BoolDeref(owner.Controller, false) {
49 | return types.NamespacedName{Namespace: obj.GetNamespace(), Name: owner.Name}
50 | }
51 | }
52 | return types.NamespacedName{}
53 | }
54 |
55 | func MergeArgsAndReplaceVariables(variables map[string]string, argsList ...[]string) (merged []string) {
56 | for _, args := range argsList {
57 | for _, arg := range args {
58 | for k, v := range variables {
59 | arg = strings.ReplaceAll(arg, k, v)
60 | }
61 | merged = append(merged, arg)
62 | }
63 | }
64 | return merged
65 | }
66 |
--------------------------------------------------------------------------------
/pkg/internal/utils/utils_test.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
--------------------------------------------------------------------------------
/pkg/operator/blueprintcontroller/setup.go:
--------------------------------------------------------------------------------
1 | package blueprintcontroller
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
5 | "github.com/ccremer/clustercode/pkg/operator/reconciler"
6 | ctrl "sigs.k8s.io/controller-runtime"
7 | "sigs.k8s.io/controller-runtime/pkg/predicate"
8 | )
9 |
10 | // +kubebuilder:rbac:groups=clustercode.github.io,resources=blueprints,verbs=get;list;watch;create;update;patch;delete
11 | // +kubebuilder:rbac:groups=clustercode.github.io,resources=blueprints/status;blueprints/finalizers,verbs=get;update;patch
12 | // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete
13 | // +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete
14 | // +kubebuilder:rbac:groups=batch,resources=cronjobs/status,verbs=get;update;patch
15 | // +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;delete
16 | // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings,verbs=get;list;watch;create;delete
17 |
18 | // SetupBlueprintController adds a controller that reconciles managed resources.
19 | func SetupBlueprintController(mgr ctrl.Manager) error {
20 | name := "blueprint.clustercode.github.io"
21 |
22 | controller := reconciler.NewReconciler[*v1alpha1.Blueprint](mgr.GetClient(), &BlueprintProvisioner{
23 | Log: mgr.GetLogger().WithName("blueprint"),
24 | client: mgr.GetClient(),
25 | })
26 |
27 | return ctrl.NewControllerManagedBy(mgr).
28 | Named(name).
29 | For(&v1alpha1.Blueprint{}).
30 | WithEventFilter(predicate.GenerationChangedPredicate{}).
31 | Complete(controller)
32 | }
33 |
--------------------------------------------------------------------------------
/pkg/operator/command.go:
--------------------------------------------------------------------------------
1 | package operator
2 |
3 | import (
4 | "context"
5 | "time"
6 |
7 | "github.com/ccremer/clustercode/pkg/api"
8 | pipeline "github.com/ccremer/go-command-pipeline"
9 | "github.com/go-logr/logr"
10 | "k8s.io/client-go/rest"
11 | "k8s.io/client-go/tools/leaderelection/resourcelock"
12 | controllerruntime "sigs.k8s.io/controller-runtime"
13 | "sigs.k8s.io/controller-runtime/pkg/manager"
14 | )
15 |
16 | type Command struct {
17 | Log logr.Logger
18 |
19 | LeaderElectionEnabled bool
20 | }
21 |
22 | type commandContext struct {
23 | context.Context
24 | manager manager.Manager
25 | kubeconfig *rest.Config
26 | }
27 |
28 | func (c *Command) Execute(ctx context.Context) error {
29 | log := c.Log
30 | log.Info("Setting up controllers", "config", c)
31 | controllerruntime.SetLogger(log)
32 |
33 | pctx := &commandContext{Context: ctx}
34 | p := pipeline.NewPipeline[*commandContext]()
35 | p.WithSteps(
36 | p.NewStep("get config", func(ctx *commandContext) error {
37 | cfg, err := controllerruntime.GetConfig()
38 | ctx.kubeconfig = cfg
39 | return err
40 | }),
41 | p.NewStep("create manager", func(ctx *commandContext) error {
42 | // configure client-side throttling
43 | ctx.kubeconfig.QPS = 100
44 | ctx.kubeconfig.Burst = 150 // more Openshift friendly
45 |
46 | mgr, err := controllerruntime.NewManager(ctx.kubeconfig, controllerruntime.Options{
47 | // controller-runtime uses both ConfigMaps and Leases for leader election by default.
48 | // Leases expire after 15 seconds, with a 10-second renewal deadline.
49 | // We've observed leader loss due to renewal deadlines being exceeded when under high load - i.e.
50 | // hundreds of reconciles per second and ~200rps to the API server.
51 | // Switching to Leases only and longer leases appears to alleviate this.
52 | LeaderElection: c.LeaderElectionEnabled,
53 | LeaderElectionID: "leader-election-clustercode",
54 | LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
55 | LeaseDuration: func() *time.Duration { d := 60 * time.Second; return &d }(),
56 | RenewDeadline: func() *time.Duration { d := 50 * time.Second; return &d }(),
57 | })
58 | ctx.manager = mgr
59 | return err
60 | }),
61 | p.NewStep("register schemes", func(ctx *commandContext) error {
62 | return api.AddToScheme(ctx.manager.GetScheme())
63 | }),
64 | p.NewStep("setup controllers", func(ctx *commandContext) error {
65 | return SetupControllers(ctx.manager)
66 | }),
67 | p.NewStep("run manager", func(ctx *commandContext) error {
68 | log.Info("Starting manager")
69 | return ctx.manager.Start(ctx)
70 | }),
71 | )
72 |
73 | return p.RunWithContext(pctx)
74 | }
75 |
--------------------------------------------------------------------------------
/pkg/operator/generate.go:
--------------------------------------------------------------------------------
1 | //go:build generate
2 |
3 | // Generate manifests
4 | //go:generate go run sigs.k8s.io/controller-tools/cmd/controller-gen rbac:roleName=manager-role paths="./..." output:crd:artifacts:config=${CRD_ROOT_DIR}/v1alpha1 crd:crdVersions=v1 output:dir=../../package/rbac
5 |
6 | package operator
7 |
--------------------------------------------------------------------------------
/pkg/operator/jobcontroller/handler.go:
--------------------------------------------------------------------------------
1 | package jobcontroller
2 |
3 | import (
4 | "fmt"
5 | "strconv"
6 |
7 | "github.com/ccremer/clustercode/pkg/api/conditions"
8 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
9 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
10 | "k8s.io/apimachinery/pkg/api/meta"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | )
13 |
14 | func (r *JobProvisioner) determineSliceIndex(ctx *JobContext) error {
15 | indexStr, found := ctx.job.Labels[internaltypes.ClustercodeSliceIndexLabelKey]
16 | if !found {
17 | return fmt.Errorf("cannot determine slice index, missing label '%s'", internaltypes.ClustercodeSliceIndexLabelKey)
18 | }
19 | index, err := strconv.Atoi(indexStr)
20 | if err != nil {
21 | return fmt.Errorf("cannot determine slice index from label '%s': %w", internaltypes.ClustercodeSliceIndexLabelKey, err)
22 | }
23 | ctx.sliceIndex = index
24 | return err
25 | }
26 |
27 | func (r *JobProvisioner) updateStatusWithSlicesFinished(ctx *JobContext) error {
28 | ctx.resolver.MustRequireDependencyByFuncName(r.fetchTask, r.determineSliceIndex)
29 |
30 | finishedList := ctx.task.Status.SlicesFinished
31 | finishedList = append(finishedList, v1alpha1.ClustercodeSliceRef{
32 | SliceIndex: ctx.sliceIndex,
33 | JobName: ctx.job.Name,
34 | })
35 | ctx.task.Status.SlicesFinished = finishedList
36 | ctx.task.Status.SlicesFinishedCount = len(finishedList)
37 |
38 | scheduled := make([]v1alpha1.ClustercodeSliceRef, 0)
39 | for _, ref := range ctx.task.Status.SlicesScheduled {
40 | if ref.SliceIndex != ctx.sliceIndex {
41 | scheduled = append(scheduled, ref)
42 | }
43 | }
44 | ctx.task.Status.SlicesScheduled = scheduled
45 | if len(ctx.task.Status.SlicesFinished) >= ctx.task.Spec.SlicesPlannedCount {
46 | meta.SetStatusCondition(&ctx.task.Status.Conditions, conditions.ProgressingSuccessful())
47 | }
48 | return r.Client.Status().Update(ctx, ctx.task)
49 | }
50 |
51 | func (r *JobProvisioner) updateStatusWithCondition(condition metav1.Condition) func(ctx *JobContext) error {
52 | return func(ctx *JobContext) error {
53 | ctx.resolver.MustRequireDependencyByFuncName(r.fetchTask)
54 | meta.SetStatusCondition(&ctx.task.Status.Conditions, condition)
55 | return r.Client.Status().Update(ctx, ctx.task)
56 | }
57 | }
58 |
59 | func (r *JobProvisioner) updateStatusWithCountComplete(ctx *JobContext) error {
60 | ctx.resolver.MustRequireDependencyByFuncName(r.fetchTask)
61 | meta.SetStatusCondition(&ctx.task.Status.Conditions, conditions.CountComplete(ctx.task.Spec.SlicesPlannedCount))
62 | return r.Client.Status().Update(ctx, ctx.task)
63 | }
64 |
--------------------------------------------------------------------------------
/pkg/operator/jobcontroller/setup.go:
--------------------------------------------------------------------------------
1 | package jobcontroller
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/internal/types"
5 | "github.com/ccremer/clustercode/pkg/operator/reconciler"
6 | batchv1 "k8s.io/api/batch/v1"
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | ctrl "sigs.k8s.io/controller-runtime"
9 | "sigs.k8s.io/controller-runtime/pkg/builder"
10 | "sigs.k8s.io/controller-runtime/pkg/predicate"
11 | )
12 |
13 | // +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete;deletecollection
14 | // +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
15 |
16 | // SetupJobController adds a controller that reconciles managed resources.
17 | func SetupJobController(mgr ctrl.Manager) error {
18 | name := "job.clustercode.github.io"
19 | pred, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: types.ClusterCodeLabels})
20 | if err != nil {
21 | return err
22 | }
23 | controller := reconciler.NewReconciler[*batchv1.Job](mgr.GetClient(), &JobProvisioner{Client: mgr.GetClient(), Log: mgr.GetLogger().WithName("job")})
24 | return ctrl.NewControllerManagedBy(mgr).
25 | Named(name).
26 | For(&batchv1.Job{}, builder.WithPredicates(pred)).
27 | Complete(controller)
28 | }
29 |
--------------------------------------------------------------------------------
/pkg/operator/reconciler/reconciler.go:
--------------------------------------------------------------------------------
1 | package reconciler
2 |
3 | import (
4 | "context"
5 |
6 | apierrors "k8s.io/apimachinery/pkg/api/errors"
7 | ctrl "sigs.k8s.io/controller-runtime"
8 | "sigs.k8s.io/controller-runtime/pkg/client"
9 | "sigs.k8s.io/controller-runtime/pkg/reconcile"
10 | )
11 |
12 | // Reconciler is a generic controller.
13 | type Reconciler[T client.Object] interface {
14 | // NewObject returns a new instance of T.
15 | // Users should just return an empty object without any fields set.
16 | NewObject() T
17 | // Provision is called when reconciling objects.
18 | // This is only called when the object exists and was fetched successfully.
19 | Provision(ctx context.Context, obj T) (reconcile.Result, error)
20 | // Deprovision is called when the object has a deletion timestamp set.
21 | Deprovision(ctx context.Context, obj T) (reconcile.Result, error)
22 | }
23 |
24 | type controller[T client.Object] struct {
25 | kube client.Client
26 | reconciler Reconciler[T]
27 | }
28 |
29 | // NewReconciler returns a new instance of Reconciler.
30 | func NewReconciler[T client.Object](kube client.Client, reconciler Reconciler[T]) reconcile.Reconciler {
31 | return &controller[T]{
32 | kube: kube,
33 | reconciler: reconciler,
34 | }
35 | }
36 |
37 | // Reconcile implements reconcile.Reconciler
38 | func (ctrl *controller[T]) Reconcile(ctx context.Context, request ctrl.Request) (ctrl.Result, error) {
39 | obj := ctrl.reconciler.NewObject()
40 | err := ctrl.kube.Get(ctx, request.NamespacedName, obj)
41 | if err != nil && apierrors.IsNotFound(err) {
42 | // doesn't exist anymore, ignore.
43 | return reconcile.Result{}, nil
44 | }
45 | if err != nil {
46 | // some other error
47 | return reconcile.Result{}, err
48 | }
49 | if !obj.GetDeletionTimestamp().IsZero() {
50 | return ctrl.reconciler.Deprovision(ctx, obj)
51 | }
52 | return ctrl.reconciler.Provision(ctx, obj)
53 | }
54 |
--------------------------------------------------------------------------------
/pkg/operator/setup.go:
--------------------------------------------------------------------------------
1 | package operator
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/operator/blueprintcontroller"
5 | "github.com/ccremer/clustercode/pkg/operator/jobcontroller"
6 | "github.com/ccremer/clustercode/pkg/operator/taskcontroller"
7 | ctrl "sigs.k8s.io/controller-runtime"
8 | )
9 |
10 | // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
11 |
12 | // SetupControllers creates all controllers and adds them to the supplied manager.
13 | func SetupControllers(mgr ctrl.Manager) error {
14 | for _, setup := range []func(ctrl.Manager) error{
15 | blueprintcontroller.SetupBlueprintController,
16 | jobcontroller.SetupJobController,
17 | taskcontroller.SetupTaskController,
18 | } {
19 | if err := setup(mgr); err != nil {
20 | return err
21 | }
22 | }
23 | return nil
24 | }
25 |
--------------------------------------------------------------------------------
/pkg/operator/taskcontroller/cleanup_job.go:
--------------------------------------------------------------------------------
1 | package taskcontroller
2 |
3 | import (
4 | "fmt"
5 |
6 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
7 | "github.com/ccremer/clustercode/pkg/operator/blueprintcontroller"
8 | batchv1 "k8s.io/api/batch/v1"
9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
10 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
11 | )
12 |
13 | func (r *TaskReconciler) ensureCleanupJob(ctx *TaskContext) error {
14 | taskId := ctx.task.Spec.TaskId
15 | job := &batchv1.Job{ObjectMeta: metav1.ObjectMeta{
16 | Name: fmt.Sprintf("%.*s-%s", 62-len(internaltypes.JobTypeCleanup), taskId, internaltypes.JobTypeCleanup),
17 | Namespace: ctx.task.Namespace,
18 | }}
19 |
20 | _, err := controllerutil.CreateOrUpdate(ctx, r.Client, job, func() error {
21 | createClustercodeJobDefinition(job, ctx.task, TaskOpts{
22 | template: ctx.task.Spec.Cleanup.PodTemplate,
23 | jobType: internaltypes.JobTypeCleanup,
24 | image: blueprintcontroller.DefaultClusterCodeContainerImage,
25 | args: []string{
26 | "--log-level=1",
27 | "cleanup",
28 | "--task-name=" + ctx.task.Name,
29 | "--namespace=" + ctx.task.Namespace,
30 | },
31 | mountSource: true,
32 | mountIntermediate: true,
33 | })
34 | return controllerutil.SetControllerReference(ctx.task, job, r.Client.Scheme())
35 | })
36 | return err
37 | }
38 |
--------------------------------------------------------------------------------
/pkg/operator/taskcontroller/count_job.go:
--------------------------------------------------------------------------------
1 | package taskcontroller
2 |
3 | import (
4 | "fmt"
5 | "path/filepath"
6 |
7 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
8 | "github.com/ccremer/clustercode/pkg/internal/utils"
9 | "github.com/ccremer/clustercode/pkg/operator/blueprintcontroller"
10 | batchv1 "k8s.io/api/batch/v1"
11 | corev1 "k8s.io/api/core/v1"
12 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
13 | "k8s.io/apimachinery/pkg/labels"
14 | "k8s.io/utils/pointer"
15 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
16 | )
17 |
18 | func (r *TaskReconciler) ensureCountJob(ctx *TaskContext) error {
19 | taskId := ctx.task.Spec.TaskId
20 | job := &batchv1.Job{
21 | ObjectMeta: metav1.ObjectMeta{
22 | Name: fmt.Sprintf("%.*s-%s", 62-len(internaltypes.JobTypeCount), taskId, internaltypes.JobTypeCount),
23 | Namespace: ctx.task.Namespace,
24 | },
25 | }
26 | _, err := controllerutil.CreateOrUpdate(ctx, r.Client, job, func() error {
27 | job.Labels = labels.Merge(job.Labels, labels.Merge(internaltypes.ClusterCodeLabels, labels.Merge(internaltypes.JobTypeCount.AsLabels(), taskId.AsLabels())))
28 | job.Spec.BackoffLimit = pointer.Int32(0)
29 | job.Spec.Template.Spec.ServiceAccountName = ctx.task.Spec.ServiceAccountName
30 | job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever
31 |
32 | container := corev1.Container{
33 | Name: "clustercode",
34 | Image: blueprintcontroller.DefaultClusterCodeContainerImage,
35 | ImagePullPolicy: corev1.PullIfNotPresent,
36 | Args: []string{
37 | "--log-level=1",
38 | "count",
39 | "--task-name=" + ctx.task.Name,
40 | "--namespace=" + ctx.task.Namespace,
41 | },
42 | }
43 | utils.EnsureVolumeMountIf(true, &container, internaltypes.IntermediateSubMountPath,
44 | filepath.Join("/clustercode", internaltypes.IntermediateSubMountPath), ctx.task.Spec.Storage.IntermediatePvc.SubPath)
45 | utils.EnsurePVCVolume(job, internaltypes.IntermediateSubMountPath, ctx.task.Spec.Storage.IntermediatePvc)
46 |
47 | job.Spec.Template.Spec.Containers = []corev1.Container{container}
48 | return controllerutil.SetControllerReference(ctx.task, job, r.Client.Scheme())
49 | })
50 | return err
51 | }
52 |
--------------------------------------------------------------------------------
/pkg/operator/taskcontroller/merge_job.go:
--------------------------------------------------------------------------------
1 | package taskcontroller
2 |
3 | import (
4 | "fmt"
5 | "path/filepath"
6 |
7 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
8 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
9 | "github.com/ccremer/clustercode/pkg/internal/utils"
10 | batchv1 "k8s.io/api/batch/v1"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
13 | )
14 |
15 | func (r *TaskReconciler) createMergeJob(ctx *TaskContext) error {
16 | configMountRoot := filepath.Join("/clustercode", internaltypes.ConfigSubMountPath)
17 | targetMountRoot := filepath.Join("/clustercode", internaltypes.TargetSubMountPath)
18 | variables := map[string]string{
19 | "${INPUT}": filepath.Join(configMountRoot, v1alpha1.ConfigMapFileName),
20 | "${OUTPUT}": filepath.Join(targetMountRoot, ctx.task.Spec.TargetUrl.GetPath()),
21 | }
22 | job := &batchv1.Job{ObjectMeta: metav1.ObjectMeta{
23 | Name: fmt.Sprintf("%s-%s", ctx.task.Spec.TaskId, internaltypes.JobTypeMerge),
24 | Namespace: ctx.task.Namespace,
25 | }}
26 |
27 | _, err := controllerutil.CreateOrUpdate(ctx, r.Client, job, func() error {
28 | createClustercodeJobDefinition(job, ctx.task, TaskOpts{
29 | template: ctx.task.Spec.Encode.PodTemplate,
30 | image: DefaultFfmpegContainerImage,
31 | args: utils.MergeArgsAndReplaceVariables(variables, ctx.task.Spec.Encode.MergeCommandArgs),
32 | jobType: internaltypes.JobTypeMerge,
33 | mountIntermediate: true,
34 | mountTarget: true,
35 | mountConfig: true,
36 | })
37 | return controllerutil.SetControllerReference(ctx.task, job, r.Client.Scheme())
38 | })
39 | return err
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/operator/taskcontroller/setup.go:
--------------------------------------------------------------------------------
1 | package taskcontroller
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
5 | "github.com/ccremer/clustercode/pkg/internal/types"
6 | "github.com/ccremer/clustercode/pkg/operator/reconciler"
7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
8 | ctrl "sigs.k8s.io/controller-runtime"
9 | "sigs.k8s.io/controller-runtime/pkg/builder"
10 | "sigs.k8s.io/controller-runtime/pkg/predicate"
11 | )
12 |
13 | // +kubebuilder:rbac:groups=clustercode.github.io,resources=tasks,verbs=get;list;watch;create;update;patch;delete
14 | // +kubebuilder:rbac:groups=clustercode.github.io,resources=tasks/status;tasks/finalizers,verbs=get;update;patch
15 |
16 | // SetupTaskController adds a controller that reconciles managed resources.
17 | func SetupTaskController(mgr ctrl.Manager) error {
18 | name := "task.clustercode.github.io"
19 |
20 | controller := reconciler.NewReconciler[*v1alpha1.Task](mgr.GetClient(), &TaskReconciler{
21 | Client: mgr.GetClient(),
22 | Log: mgr.GetLogger().WithName("task"),
23 | })
24 |
25 | pred, err := predicate.LabelSelectorPredicate(metav1.LabelSelector{MatchLabels: types.ClusterCodeLabels})
26 | if err != nil {
27 | return err
28 | }
29 | return ctrl.NewControllerManagedBy(mgr).
30 | Named(name).
31 | For(&v1alpha1.Task{}, builder.WithPredicates(pred)).
32 | Complete(controller)
33 | }
34 |
--------------------------------------------------------------------------------
/pkg/operator/taskcontroller/slice_job.go:
--------------------------------------------------------------------------------
1 | package taskcontroller
2 |
3 | import (
4 | "fmt"
5 | "path/filepath"
6 | "strconv"
7 |
8 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
9 | "github.com/ccremer/clustercode/pkg/internal/utils"
10 | batchv1 "k8s.io/api/batch/v1"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
13 | )
14 |
15 | func (r *TaskReconciler) createSliceJob(ctx *TaskContext) error {
16 | ctx.resolver.MustRequireDependencyByFuncName(r.determineNextSliceIndex)
17 |
18 | intermediateMountRoot := filepath.Join("/clustercode", internaltypes.IntermediateSubMountPath)
19 | index := ctx.nextSliceIndex
20 | variables := map[string]string{
21 | "${INPUT}": getSourceSegmentFileNameIndexPath(ctx, intermediateMountRoot, index),
22 | "${OUTPUT}": getTargetSegmentFileNameIndexPath(ctx, intermediateMountRoot, index),
23 | }
24 | job := &batchv1.Job{ObjectMeta: metav1.ObjectMeta{
25 | Name: fmt.Sprintf("%s-%s-%d", ctx.task.Spec.TaskId, internaltypes.JobTypeSlice, index),
26 | Namespace: ctx.task.Namespace,
27 | }}
28 | _, err := controllerutil.CreateOrUpdate(ctx, r.Client, job, func() error {
29 | createClustercodeJobDefinition(job, ctx.task, TaskOpts{
30 | template: ctx.task.Spec.Encode.PodTemplate,
31 | image: DefaultFfmpegContainerImage,
32 | args: utils.MergeArgsAndReplaceVariables(variables, ctx.task.Spec.Encode.TranscodeCommandArgs),
33 | jobType: internaltypes.JobTypeSlice,
34 | mountIntermediate: true,
35 | })
36 | job.Labels[internaltypes.ClustercodeSliceIndexLabelKey] = strconv.Itoa(index)
37 |
38 | return controllerutil.SetControllerReference(ctx.task, job, r.Client.Scheme())
39 | })
40 | ctx.job = job
41 | return err
42 |
43 | }
44 |
--------------------------------------------------------------------------------
/pkg/operator/taskcontroller/split_job.go:
--------------------------------------------------------------------------------
1 | package taskcontroller
2 |
3 | import (
4 | "fmt"
5 | "path/filepath"
6 | "strconv"
7 |
8 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
9 | "github.com/ccremer/clustercode/pkg/internal/utils"
10 | batchv1 "k8s.io/api/batch/v1"
11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
12 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
13 | )
14 |
15 | func (r *TaskReconciler) createSplitJob(ctx *TaskContext) error {
16 | sourceMountRoot := filepath.Join("/clustercode", internaltypes.SourceSubMountPath)
17 | intermediateMountRoot := filepath.Join("/clustercode", internaltypes.IntermediateSubMountPath)
18 | variables := map[string]string{
19 | "${INPUT}": filepath.Join(sourceMountRoot, ctx.task.Spec.SourceUrl.GetPath()),
20 | "${OUTPUT}": getSegmentFileNameTemplatePath(ctx, intermediateMountRoot),
21 | "${SLICE_SIZE}": strconv.Itoa(ctx.task.Spec.Encode.SliceSize),
22 | }
23 | job := &batchv1.Job{ObjectMeta: metav1.ObjectMeta{
24 | Name: fmt.Sprintf("%s-%s", ctx.task.Spec.TaskId, internaltypes.JobTypeSplit),
25 | Namespace: ctx.task.Namespace,
26 | }}
27 |
28 | _, err := controllerutil.CreateOrUpdate(ctx, r.Client, job, func() error {
29 | createClustercodeJobDefinition(job, ctx.task, TaskOpts{
30 | template: ctx.task.Spec.Encode.PodTemplate,
31 | image: DefaultFfmpegContainerImage,
32 | args: utils.MergeArgsAndReplaceVariables(variables, ctx.task.Spec.Encode.SplitCommandArgs),
33 | jobType: internaltypes.JobTypeSplit,
34 | mountSource: true,
35 | mountIntermediate: true,
36 | })
37 | return controllerutil.SetControllerReference(ctx.task, job, r.Client.Scheme())
38 | })
39 | return err
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/operator/taskcontroller/utils.go:
--------------------------------------------------------------------------------
1 | package taskcontroller
2 |
3 | import (
4 | "path/filepath"
5 |
6 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
7 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
8 | "github.com/ccremer/clustercode/pkg/internal/utils"
9 | batchv1 "k8s.io/api/batch/v1"
10 | corev1 "k8s.io/api/core/v1"
11 | "k8s.io/apimachinery/pkg/labels"
12 | "k8s.io/utils/pointer"
13 | )
14 |
15 | var DefaultFfmpegContainerImage string
16 |
17 | func createClustercodeJobDefinition(job *batchv1.Job, task *v1alpha1.Task, opts TaskOpts) {
18 | job.Labels = labels.Merge(job.Labels, labels.Merge(internaltypes.ClusterCodeLabels, labels.Merge(opts.jobType.AsLabels(), task.Spec.TaskId.AsLabels())))
19 | job.Spec.BackoffLimit = pointer.Int32(0)
20 |
21 | podSpec := job.Spec.Template.Spec
22 | templateSpec := opts.template
23 |
24 | // merged from template
25 | podSpec.SecurityContext = templateSpec.PodSecurityContext
26 | podSpec.Volumes = templateSpec.Volumes
27 | if templateSpec.Metadata != nil && templateSpec.Metadata.Labels != nil {
28 | job.Spec.Template.Labels = templateSpec.Metadata.Labels
29 | }
30 | if templateSpec.Metadata != nil && templateSpec.Metadata.Annotations != nil {
31 | job.Spec.Template.Annotations = templateSpec.Metadata.Annotations
32 | }
33 |
34 | podSpec.Containers = convertContainerSpec(templateSpec.Containers)
35 | podSpec.InitContainers = convertContainerSpec(templateSpec.InitContainers)
36 |
37 | // overrides
38 | podSpec.ServiceAccountName = task.Spec.ServiceAccountName
39 | podSpec.RestartPolicy = corev1.RestartPolicyNever
40 |
41 | podSpec.Containers = createOrUpdateContainer("clustercode", podSpec.Containers, func(c *corev1.Container) {
42 | c.Image = opts.image
43 | c.Args = opts.args
44 | utils.EnsureVolumeMountIf(opts.mountSource, c, internaltypes.SourceSubMountPath,
45 | filepath.Join("/clustercode", internaltypes.SourceSubMountPath), task.Spec.Storage.SourcePvc.SubPath)
46 | utils.EnsureVolumeMountIf(opts.mountIntermediate, c, internaltypes.IntermediateSubMountPath,
47 | filepath.Join("/clustercode", internaltypes.IntermediateSubMountPath), task.Spec.Storage.IntermediatePvc.SubPath)
48 | utils.EnsureVolumeMountIf(opts.mountTarget, c, internaltypes.TargetSubMountPath,
49 | filepath.Join("/clustercode", internaltypes.TargetSubMountPath), task.Spec.Storage.TargetPvc.SubPath)
50 | utils.EnsureVolumeMountIf(opts.mountConfig, c, internaltypes.ConfigSubMountPath,
51 | filepath.Join("/clustercode", internaltypes.ConfigSubMountPath), "")
52 | })
53 | job.Spec.Template.Spec = podSpec
54 |
55 | if opts.mountSource {
56 | utils.EnsurePVCVolume(job, internaltypes.SourceSubMountPath, task.Spec.Storage.SourcePvc)
57 | }
58 | if opts.mountIntermediate {
59 | utils.EnsurePVCVolume(job, internaltypes.IntermediateSubMountPath, task.Spec.Storage.IntermediatePvc)
60 | }
61 | if opts.mountTarget {
62 | utils.EnsurePVCVolume(job, internaltypes.TargetSubMountPath, task.Spec.Storage.TargetPvc)
63 | }
64 | if opts.mountConfig {
65 | EnsureConfigMapVolume(job, internaltypes.ConfigSubMountPath, task.Spec.FileListConfigMapRef)
66 | }
67 | }
68 |
69 | func convertContainerSpec(templates []v1alpha1.ContainerTemplate) []corev1.Container {
70 | containers := make([]corev1.Container, len(templates))
71 | for i, ct := range templates {
72 | c := ct.ToContainer()
73 | containers[i] = c
74 | }
75 | return containers
76 | }
77 |
78 | func createOrUpdateContainer(containerName string, podContainers []corev1.Container, mutateFn func(*corev1.Container)) []corev1.Container {
79 | for i, c := range podContainers {
80 | if c.Name == containerName {
81 | mutateFn(&c)
82 | podContainers[i] = c
83 | return podContainers
84 | }
85 | }
86 | container := &corev1.Container{Name: containerName}
87 | mutateFn(container)
88 | return append(podContainers, *container)
89 | }
90 |
91 | func EnsureConfigMapVolume(job *batchv1.Job, name, configMapName string) {
92 | for _, volume := range job.Spec.Template.Spec.Volumes {
93 | if volume.Name == name {
94 | return
95 | }
96 | }
97 | job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, corev1.Volume{
98 | Name: name,
99 | VolumeSource: corev1.VolumeSource{
100 | ConfigMap: &corev1.ConfigMapVolumeSource{
101 | LocalObjectReference: corev1.LocalObjectReference{Name: configMapName},
102 | },
103 | },
104 | })
105 | }
106 |
--------------------------------------------------------------------------------
/pkg/webhook/blueprintwebhook/defaulter.go:
--------------------------------------------------------------------------------
1 | package blueprintwebhook
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
7 | "github.com/go-logr/logr"
8 | "k8s.io/apimachinery/pkg/runtime"
9 | )
10 |
11 | type Defaulter struct {
12 | Log logr.Logger
13 | }
14 |
15 | func (d *Defaulter) Default(_ context.Context, obj runtime.Object) error {
16 | bp := obj.(*v1alpha1.Blueprint)
17 | d.Log.V(1).Info("Applying defaults", "name", bp.Name, "namespace", bp.Namespace)
18 |
19 | if bp.Spec.MaxParallelTasks == 0 {
20 | bp.Spec.MaxParallelTasks = 1
21 | }
22 |
23 | if bp.Spec.Encode.SliceSize == 0 {
24 | bp.Spec.Encode.SliceSize = 120
25 | }
26 |
27 | if len(bp.Spec.Scan.MediaFileExtensions) == 0 {
28 | bp.Spec.Scan.MediaFileExtensions = []string{"mkv", "mp4", "avi"}
29 | }
30 |
31 | if len(bp.Spec.Encode.SplitCommandArgs) == 0 {
32 | bp.Spec.Encode.SplitCommandArgs = []string{
33 | "-y", "-hide_banner", "-nostats", "-i", "${INPUT}", "-c", "copy", "-map", "0", "-segment_time", "${SLICE_SIZE}", "-f", "segment", "${OUTPUT}",
34 | }
35 | }
36 |
37 | if len(bp.Spec.Encode.TranscodeCommandArgs) == 0 {
38 | bp.Spec.Encode.TranscodeCommandArgs = []string{
39 | "-y", "-hide_banner", "-nostats", "-i", "${INPUT}", "-c:v", "copy", "-c:a", "copy", "${OUTPUT}",
40 | }
41 | }
42 |
43 | if len(bp.Spec.Encode.MergeCommandArgs) == 0 {
44 | bp.Spec.Encode.MergeCommandArgs = []string{
45 | "-y", "-hide_banner", "-nostats", "-f", "concat", "-i", "concat.txt", "-c", "copy", "media_out.mkv",
46 | }
47 | }
48 | return nil
49 | }
50 |
--------------------------------------------------------------------------------
/pkg/webhook/blueprintwebhook/defaulter_test.go:
--------------------------------------------------------------------------------
1 | package blueprintwebhook
2 |
3 | import (
4 | "context"
5 | "testing"
6 |
7 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
8 | "github.com/go-logr/logr"
9 | "github.com/stretchr/testify/assert"
10 | )
11 |
12 | func TestDefaulter_Default(t *testing.T) {
13 | tests := map[string]struct {
14 | givenSpec v1alpha1.BlueprintSpec
15 | expectedSpec v1alpha1.BlueprintSpec
16 | }{
17 | "GivenEmptySpec_ThenExpectDefaultsSet": {
18 | givenSpec: v1alpha1.BlueprintSpec{},
19 | expectedSpec: v1alpha1.BlueprintSpec{
20 | Scan: v1alpha1.ScanSpec{MediaFileExtensions: []string{"mkv", "mp4", "avi"}},
21 | MaxParallelTasks: 1,
22 | Encode: v1alpha1.EncodeSpec{
23 | SliceSize: 120,
24 | SplitCommandArgs: []string{"-y", "-hide_banner", "-nostats", "-i", "${INPUT}", "-c", "copy", "-map", "0", "-segment_time", "${SLICE_SIZE}", "-f", "segment", "${OUTPUT}"},
25 | TranscodeCommandArgs: []string{"-y", "-hide_banner", "-nostats", "-i", "${INPUT}", "-c:v", "copy", "-c:a", "copy", "${OUTPUT}"},
26 | MergeCommandArgs: []string{"-y", "-hide_banner", "-nostats", "-f", "concat", "-i", "concat.txt", "-c", "copy", "media_out.mkv"},
27 | },
28 | },
29 | },
30 | }
31 | for name, tt := range tests {
32 | t.Run(name, func(t *testing.T) {
33 | d := &Defaulter{Log: logr.Discard()}
34 | bp := &v1alpha1.Blueprint{Spec: tt.givenSpec}
35 | err := d.Default(context.TODO(), bp)
36 | assert.NoError(t, err)
37 | assert.Equal(t, tt.expectedSpec, bp.Spec)
38 | })
39 | }
40 | }
41 |
--------------------------------------------------------------------------------
/pkg/webhook/blueprintwebhook/setup.go:
--------------------------------------------------------------------------------
1 | package blueprintwebhook
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
5 | ctrl "sigs.k8s.io/controller-runtime"
6 | )
7 |
8 | // +kubebuilder:webhook:verbs=create;update,path=/validate-clustercode-github-io-v1alpha1-blueprint,mutating=false,failurePolicy=fail,groups=clustercode.github.io,resources=blueprints,versions=v1alpha1,name=blueprints.clustercode.github.io,sideEffects=None,admissionReviewVersions=v1
9 | // +kubebuilder:webhook:verbs=create;update,path=/mutate-clustercode-github-io-v1alpha1-blueprint,mutating=true,failurePolicy=fail,groups=clustercode.github.io,resources=blueprints,versions=v1alpha1,name=blueprints.clustercode.github.io,sideEffects=None,admissionReviewVersions=v1
10 |
11 | // SetupWebhook adds a webhook for v1alpha1.Blueprint managed resources.
12 | func SetupWebhook(mgr ctrl.Manager) error {
13 | return ctrl.NewWebhookManagedBy(mgr).
14 | For(&v1alpha1.Blueprint{}).
15 | WithValidator(&Validator{
16 | Log: mgr.GetLogger().WithName("blueprint.validator"),
17 | }).
18 | WithDefaulter(&Defaulter{
19 | Log: mgr.GetLogger().WithName("blueprint.defaulter"),
20 | }).
21 | Complete()
22 | }
23 |
--------------------------------------------------------------------------------
/pkg/webhook/blueprintwebhook/validator.go:
--------------------------------------------------------------------------------
1 | package blueprintwebhook
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
8 | internaltypes "github.com/ccremer/clustercode/pkg/internal/types"
9 | "github.com/go-logr/logr"
10 | "k8s.io/apimachinery/pkg/runtime"
11 | )
12 |
13 | type Validator struct {
14 | Log logr.Logger
15 | }
16 |
17 | func (v *Validator) ValidateCreate(_ context.Context, obj runtime.Object) error {
18 | bp := obj.(*v1alpha1.Blueprint)
19 | v.Log.V(1).Info("Validate Create", "name", bp.Name, "namespace", bp.Namespace)
20 | return v.validateSpec(bp)
21 | }
22 |
23 | func (v *Validator) ValidateUpdate(_ context.Context, _, newObj runtime.Object) error {
24 | bp := newObj.(*v1alpha1.Blueprint)
25 | v.Log.V(1).Info("Validate Update", "name", bp.Name, "namespace", bp.Namespace)
26 | return v.validateSpec(bp)
27 | }
28 |
29 | func (v *Validator) ValidateDelete(_ context.Context, _ runtime.Object) error {
30 | return nil
31 | }
32 |
33 | func (v *Validator) validateSpec(bp *v1alpha1.Blueprint) error {
34 | if err := v.validateStorage(bp, internaltypes.SourceSubMountPath, bp.Spec.Storage.SourcePvc); err != nil {
35 | return err
36 | }
37 | if err := v.validateStorage(bp, internaltypes.IntermediateSubMountPath, bp.Spec.Storage.IntermediatePvc); err != nil {
38 | return err
39 | }
40 | if err := v.validateStorage(bp, internaltypes.TargetSubMountPath, bp.Spec.Storage.TargetPvc); err != nil {
41 | return err
42 | }
43 | return nil
44 | }
45 |
46 | func (v *Validator) validateStorage(bp *v1alpha1.Blueprint, volName string, volRef v1alpha1.VolumeRef) error {
47 | if volRef.ClaimName == "" {
48 | if !v.hasPodVolume(bp.Spec.Encode.PodTemplate, volName) || !v.hasPodVolume(bp.Spec.Cleanup.PodTemplate, volName) {
49 | return fmt.Errorf("missing required volume template for %s volume in the spec, since PVC references are empty", volName)
50 | }
51 | }
52 | if volRef.ClaimName != "" {
53 | if v.hasPodVolume(bp.Spec.Encode.PodTemplate, volName) || v.hasPodVolume(bp.Spec.Cleanup.PodTemplate, volName) {
54 | return fmt.Errorf("duplicate volume specification for %s volume in the template: spec already references a PVC named '%s'", volName, volRef.ClaimName)
55 | }
56 | }
57 | return nil
58 | }
59 |
60 | func (v *Validator) hasPodVolume(podTemplate v1alpha1.PodTemplate, volName string) bool {
61 | for _, volume := range podTemplate.Volumes {
62 | if volume.Name == volName {
63 | return true
64 | }
65 | }
66 | return false
67 | }
68 |
--------------------------------------------------------------------------------
/pkg/webhook/blueprintwebhook/validator_test.go:
--------------------------------------------------------------------------------
1 | package blueprintwebhook
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/ccremer/clustercode/pkg/api/v1alpha1"
7 | "github.com/go-logr/logr"
8 | "github.com/stretchr/testify/assert"
9 | corev1 "k8s.io/api/core/v1"
10 | )
11 |
12 | func TestValidator_ValidateCreate(t *testing.T) {
13 | tests := map[string]struct {
14 | givenSpec v1alpha1.BlueprintSpec
15 | expectedError string
16 | }{
17 | "GivenEmptyClaim_WhenPodTemplateEmpty_ThenReturnError": {
18 | givenSpec: v1alpha1.BlueprintSpec{},
19 | expectedError: "missing required volume template for source volume in the spec, since PVC references are empty",
20 | },
21 | "GivenEmptyClaim_WhenBothPodTemplateHaveVolume_ThenReturnNil": {
22 | givenSpec: v1alpha1.BlueprintSpec{
23 | Encode: v1alpha1.EncodeSpec{PodTemplate: v1alpha1.PodTemplate{
24 | Volumes: []corev1.Volume{
25 | {Name: "source"},
26 | {Name: "intermediate"},
27 | {Name: "target"},
28 | }},
29 | },
30 | Cleanup: v1alpha1.CleanupSpec{PodTemplate: v1alpha1.PodTemplate{
31 | Volumes: []corev1.Volume{
32 | {Name: "source"},
33 | {Name: "intermediate"},
34 | {Name: "target"},
35 | }},
36 | },
37 | },
38 | expectedError: "",
39 | },
40 | "GivenEmptyClaim_WhenPodTemplateMissingVolume_ThenReturnError": {
41 | givenSpec: v1alpha1.BlueprintSpec{
42 | Encode: v1alpha1.EncodeSpec{PodTemplate: v1alpha1.PodTemplate{
43 | Volumes: []corev1.Volume{
44 | {Name: "source"},
45 | {Name: "intermediate"},
46 | {Name: "target"},
47 | }},
48 | },
49 | Cleanup: v1alpha1.CleanupSpec{PodTemplate: v1alpha1.PodTemplate{
50 | Volumes: []corev1.Volume{
51 | {Name: "source"},
52 | }},
53 | },
54 | },
55 | expectedError: "missing required volume template for intermediate volume in the spec, since PVC references are empty",
56 | },
57 | "GivenClaimRef_WhenPodTemplateMissingVolume_ThenReturnNil": {
58 | givenSpec: v1alpha1.BlueprintSpec{
59 | Storage: v1alpha1.StorageSpec{
60 | SourcePvc: v1alpha1.VolumeRef{ClaimName: "pvc-source"},
61 | IntermediatePvc: v1alpha1.VolumeRef{ClaimName: "pvc-intermediate"},
62 | TargetPvc: v1alpha1.VolumeRef{ClaimName: "pvc-target"}},
63 | },
64 | expectedError: "",
65 | },
66 | "GivenClaimRef_WhenPodTemplateVolumeDuplicates_ThenReturnError": {
67 | givenSpec: v1alpha1.BlueprintSpec{
68 | Storage: v1alpha1.StorageSpec{
69 | SourcePvc: v1alpha1.VolumeRef{ClaimName: "pvc-source"},
70 | IntermediatePvc: v1alpha1.VolumeRef{ClaimName: "pvc-intermediate"},
71 | },
72 | Encode: v1alpha1.EncodeSpec{PodTemplate: v1alpha1.PodTemplate{
73 | Volumes: []corev1.Volume{
74 | {Name: "source"},
75 | }},
76 | },
77 | },
78 | expectedError: "duplicate volume specification for source volume in the template: spec already references a PVC named 'pvc-source'",
79 | },
80 | "GivenClaimRef_WhenMixedWithPodTemplateVolume_ThenReturnNil": {
81 | givenSpec: v1alpha1.BlueprintSpec{
82 | Storage: v1alpha1.StorageSpec{
83 | SourcePvc: v1alpha1.VolumeRef{ClaimName: "pvc-source"},
84 | IntermediatePvc: v1alpha1.VolumeRef{ClaimName: "pvc-intermediate"},
85 | },
86 | Encode: v1alpha1.EncodeSpec{PodTemplate: v1alpha1.PodTemplate{
87 | Volumes: []corev1.Volume{
88 | {Name: "target"},
89 | }},
90 | },
91 | Cleanup: v1alpha1.CleanupSpec{PodTemplate: v1alpha1.PodTemplate{
92 | Volumes: []corev1.Volume{
93 | {Name: "target"},
94 | }},
95 | },
96 | },
97 | expectedError: "",
98 | },
99 | }
100 | for name, tt := range tests {
101 | t.Run(name, func(t *testing.T) {
102 | v := &Validator{Log: logr.Discard()}
103 | err := v.validateSpec(&v1alpha1.Blueprint{Spec: tt.givenSpec})
104 | if tt.expectedError != "" {
105 | assert.EqualError(t, err, tt.expectedError)
106 | } else {
107 | assert.NoError(t, err)
108 | }
109 | })
110 | }
111 | }
112 |
--------------------------------------------------------------------------------
/pkg/webhook/command.go:
--------------------------------------------------------------------------------
1 | package webhook
2 |
3 | import (
4 | "context"
5 | "net/http"
6 |
7 | "github.com/ccremer/clustercode/pkg/api"
8 | pipeline "github.com/ccremer/go-command-pipeline"
9 | "github.com/go-logr/logr"
10 | "k8s.io/client-go/rest"
11 | controllerruntime "sigs.k8s.io/controller-runtime"
12 | "sigs.k8s.io/controller-runtime/pkg/manager"
13 | )
14 |
15 | type Command struct {
16 | Log logr.Logger
17 | WebhookCertDir string
18 | }
19 |
20 | type commandContext struct {
21 | context.Context
22 | manager manager.Manager
23 | kubeconfig *rest.Config
24 | }
25 |
26 | func (c *Command) Execute(ctx context.Context) error {
27 | log := c.Log
28 | log.Info("Setting up webhook server", "config", c)
29 | controllerruntime.SetLogger(log)
30 |
31 | pctx := &commandContext{Context: ctx}
32 | p := pipeline.NewPipeline[*commandContext]()
33 | p.WithSteps(
34 | p.NewStep("get config", func(ctx *commandContext) error {
35 | cfg, err := controllerruntime.GetConfig()
36 | ctx.kubeconfig = cfg
37 | return err
38 | }),
39 | p.NewStep("create manager", func(ctx *commandContext) error {
40 | // configure client-side throttling
41 | ctx.kubeconfig.QPS = 100
42 | ctx.kubeconfig.Burst = 150 // more Openshift friendly
43 |
44 | mgr, err := controllerruntime.NewManager(ctx.kubeconfig, controllerruntime.Options{
45 | MetricsBindAddress: "0", // disable
46 | })
47 | ctx.manager = mgr
48 | return err
49 | }),
50 | p.NewStep("register schemes", func(ctx *commandContext) error {
51 | return api.AddToScheme(ctx.manager.GetScheme())
52 | }),
53 | p.NewStep("setup webhook server", func(ctx *commandContext) error {
54 | ws := ctx.manager.GetWebhookServer()
55 | ws.CertDir = c.WebhookCertDir
56 | ws.TLSMinVersion = "1.3"
57 | ws.Register("/healthz", &healthHandler{})
58 | return SetupWebhooks(ctx.manager)
59 | }),
60 | p.NewStep("run manager", func(ctx *commandContext) error {
61 | log.Info("Starting manager")
62 | return ctx.manager.Start(ctx)
63 | }),
64 | )
65 |
66 | return p.RunWithContext(pctx)
67 | }
68 |
69 | type healthHandler struct{}
70 |
71 | func (h *healthHandler) ServeHTTP(w http.ResponseWriter, _ *http.Request) {
72 | w.WriteHeader(http.StatusNoContent)
73 | }
74 |
--------------------------------------------------------------------------------
/pkg/webhook/generate.go:
--------------------------------------------------------------------------------
1 | //go:build generate
2 |
3 | // Remove existing manifests
4 | //go:generate rm -rf ../../package/webhook
5 |
6 | // Generate webhook manifests
7 | //go:generate go run -tags generate sigs.k8s.io/controller-tools/cmd/controller-gen webhook paths=./... output:artifacts:config=../../package/webhook
8 |
9 | package webhook
10 |
--------------------------------------------------------------------------------
/pkg/webhook/setup.go:
--------------------------------------------------------------------------------
1 | package webhook
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/webhook/blueprintwebhook"
5 | ctrl "sigs.k8s.io/controller-runtime"
6 | )
7 |
8 | // SetupWebhooks creates all webhooks and adds them to the supplied manager.
9 | func SetupWebhooks(mgr ctrl.Manager) error {
10 | /*
11 | Totally undocumented and hard-to-find feature is that the builder automatically registers the URL path for the webhook.
12 | What's more, not even the tests in upstream controller-runtime reveal what this path is _actually_ going to look like.
13 | So here's how the path is built (dots replaced with dash, lower-cased, single-form):
14 | /validate---
15 | /mutate---
16 | Example:
17 | /validate-clustercode-github-io-v1alpha1-blueprint
18 | This path has to be given in the `//+kubebuilder:webhook:...` magic comment, see example:
19 | +kubebuilder:webhook:verbs=create;update;delete,path=/validate-clustercode-github-io-v1alpha1-blueprint,mutating=false,failurePolicy=fail,groups=clustercode.github.io,resources=blueprints,versions=v1alpha1,name=blueprints.clustercode.github.io,sideEffects=None,admissionReviewVersions=v1
20 | Pay special attention to the plural forms and correct versions!
21 | */
22 | for _, setup := range []func(ctrl.Manager) error{
23 | blueprintwebhook.SetupWebhook,
24 | } {
25 | if err := setup(mgr); err != nil {
26 | return err
27 | }
28 | }
29 | return nil
30 | }
31 |
--------------------------------------------------------------------------------
/pkg/webui/settings.go:
--------------------------------------------------------------------------------
1 | package webui
2 |
3 | // Settings controls various aspects of the frontend.
4 | type Settings struct {
5 | // AuthCookieMaxAge sets the max-age when saving the cookie after a successful login.
6 | // This basically controls how long a user does not need to re-login.
7 | AuthCookieMaxAge int `json:"authCookieMaxAge"`
8 | }
9 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": [
3 | "config:base",
4 | ":gitSignOff",
5 | ":disableDependencyDashboard"
6 | ],
7 | "labels": [
8 | "kind:dependency"
9 | ],
10 | "postUpdateOptions": [
11 | "gomodTidy"
12 | ],
13 | "packageRules": [
14 | {
15 | "matchPackagePatterns": [
16 | "k8s.io/utils",
17 | "golang.org/x/*"
18 | ],
19 | "groupName": "utils",
20 | "schedule": [
21 | "on the first day of the month"
22 | ],
23 | "automerge": true
24 | },
25 | {
26 | "matchPackagePatterns": [
27 | "github.com/urfave/cli/v2"
28 | ],
29 | "groupName": "urfave/cli/v2",
30 | "schedule": [
31 | "on the first day of the month"
32 | ]
33 | },
34 | {
35 | "matchPaths": [
36 | "docs/**"
37 | ],
38 | "groupName": "npm",
39 | "schedule": [
40 | "on the first day of the month"
41 | ],
42 | "automerge": true
43 | },
44 | {
45 | "matchPaths": [
46 | "ui/**"
47 | ],
48 | "groupName": "npm",
49 | "schedule": [
50 | "on the first day of the month"
51 | ]
52 | }
53 | ],
54 | "prBodyNotes": [
55 | "- [ ] PR contains the label that identifies the area, one of: `area:operator`, `area:chart`\n- [ ] If the PR is targeting a Helm chart, add the chart label, e.g. `chart:clustercode`"
56 | ]
57 | }
58 |
--------------------------------------------------------------------------------
/scan_command.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/scancmd"
5 | "github.com/urfave/cli/v2"
6 | controllerruntime "sigs.k8s.io/controller-runtime"
7 | )
8 |
9 | func newScanCommand() *cli.Command {
10 | command := &scancmd.Command{}
11 | return &cli.Command{
12 | Name: "scan",
13 | Usage: "Scan source storage for new files and queue task",
14 | Before: LogMetadata,
15 | Action: func(ctx *cli.Context) error {
16 | command.Log = AppLogger(ctx).WithName(ctx.Command.Name)
17 | controllerruntime.SetLogger(command.Log)
18 | return command.Execute(controllerruntime.LoggerInto(ctx.Context, command.Log))
19 | },
20 | Flags: []cli.Flag{
21 | newBlueprintNameFlag(&command.BlueprintName),
22 | newNamespaceFlag(&command.Namespace),
23 | newSourceRootDirFlag(&command.SourceRootDir),
24 | },
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/test/e2e.mk:
--------------------------------------------------------------------------------
1 | chart_deploy_args =
2 |
3 | .PHONY: chart-deploy
4 | chart-deploy: export KUBECONFIG = $(KIND_KUBECONFIG)
5 | chart-deploy: go_build_args = -tags=ui
6 | chart-deploy: blank-media build-ui kind-load-image kind-setup-ingress install-crd webhook-cert ## Install Operator in local cluster
7 | helm upgrade --install clustercode ./charts/clustercode \
8 | --create-namespace --namespace clustercode-system \
9 | --set podAnnotations.sha="$(shell docker inspect $(CONTAINER_IMG) | jq -r '.[].Id')" \
10 | --values test/values.yaml \
11 | --values $(webhook_values) \
12 | --wait $(chart_deploy_args)
13 |
14 | ###
15 | ### E2E Tests
16 | ### with KUTTL (https://kuttl.dev)
17 | ###
18 |
19 | kuttl_bin = $(go_bin)/kubectl-kuttl
20 | $(kuttl_bin): export GOBIN = $(go_bin)
21 | $(kuttl_bin): | $(go_bin)
22 | go install github.com/kudobuilder/kuttl/cmd/kubectl-kuttl@latest
23 |
24 | test-e2e: export KUBECONFIG = $(KIND_KUBECONFIG)
25 | test-e2e: $(kuttl_bin) chart-deploy ## Run E2E tests in local cluster
26 | @cp ./test/e2e/operator/03-install.yaml.template ./test/e2e/operator/03-install.yaml
27 | @yq -i e '.spec.encode.podTemplate.containers[0].securityContext.runAsUser=$(shell id -u)' ./test/e2e/operator/03-install.yaml
28 | @yq -i e '.spec.cleanup.podTemplate.containers[0].securityContext.runAsUser=$(shell id -u)' ./test/e2e/operator/03-install.yaml
29 | $(kuttl_bin) test ./test/e2e --config ./test/e2e/kuttl-test.yaml
30 | # kuttl leaves kubeconfig garbage: https://github.com/kudobuilder/kuttl/issues/297
31 | @rm -f kubeconfig
32 |
33 | clean_targets += .e2e-test-clean
34 | .PHONY: .e2e-test-clean
35 | .e2e-test-clean:
36 | rm -f $(kuttl_bin)
37 |
38 | ###
39 | ### Generate webhook certificates
40 | ###
41 |
42 | tls_dir = $(WORK_DIR)/tls
43 | webhook_key = $(tls_dir)/tls.key
44 | webhook_cert = $(tls_dir)/tls.crt
45 | webhook_service_name = clustercode-webhook.clustercode-system.svc
46 | webhook_values = $(tls_dir)/webhook-values.yaml
47 |
48 | $(tls_dir):
49 | mkdir -p $@
50 |
51 | .PHONY: webhook-cert
52 | webhook-cert: $(webhook_values)
53 |
54 | $(webhook_key): | $(tls_dir)
55 | openssl req -x509 -newkey rsa:4096 -nodes -keyout $@ --noout -days 3650 -subj "/CN=$(webhook_service_name)" -addext "subjectAltName = DNS:$(webhook_service_name)"
56 |
57 | $(webhook_cert): $(webhook_key)
58 | openssl req -x509 -key $(webhook_key) -nodes -out $@ -days 3650 -subj "/CN=$(webhook_service_name)" -addext "subjectAltName = DNS:$(webhook_service_name)"
59 |
60 | $(webhook_values): $(webhook_cert)
61 | @yq -n '.webhook.caBundle="$(shell base64 -w0 $(webhook_cert))" | .webhook.certificate="$(shell base64 -w0 $(webhook_cert))" | .webhook.privateKey="$(shell base64 -w0 $(webhook_key))"' > $@
62 |
63 | clean_targets += .webhook-clean
64 | .PHONY: .webhook-clean
65 | .webhook-clean:
66 | rm -rf $(tls_dir)
67 |
--------------------------------------------------------------------------------
/test/e2e/.gitignore:
--------------------------------------------------------------------------------
1 | operator/03-install.yaml
2 |
--------------------------------------------------------------------------------
/test/e2e/kuttl-test.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kuttl.dev/v1beta1
2 | kind: TestSuite
3 | testDirs:
4 | - ./test/e2e
5 | timeout: 180
6 | parallel: 1
7 |
--------------------------------------------------------------------------------
/test/e2e/operator/00-delete.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kuttl.dev/v1beta1
2 | kind: TestStep
3 | delete:
4 | # This will wait until resources are really gone
5 | - apiVersion: v1
6 | kind: PersistentVolumeClaim
7 | name: source
8 | - apiVersion: v1
9 | kind: PersistentVolumeClaim
10 | name: intermediate
11 | - apiVersion: v1
12 | kind: PersistentVolumeClaim
13 | name: target
14 | - apiVersion: v1
15 | kind: PersistentVolume
16 | name: e2e-source
17 | - apiVersion: v1
18 | kind: PersistentVolume
19 | name: e2e-intermediate
20 | - apiVersion: v1
21 | kind: PersistentVolume
22 | name: e2e-target
23 |
--------------------------------------------------------------------------------
/test/e2e/operator/01-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | ---
5 | apiVersion: v1
6 | kind: PersistentVolume
7 | metadata:
8 | name: e2e-source
9 | status:
10 | phase: Available
11 | ---
12 | apiVersion: v1
13 | kind: PersistentVolume
14 | metadata:
15 | name: e2e-intermediate
16 | status:
17 | phase: Available
18 | ---
19 | apiVersion: v1
20 | kind: PersistentVolume
21 | metadata:
22 | name: e2e-target
23 | status:
24 | phase: Available
25 |
--------------------------------------------------------------------------------
/test/e2e/operator/01-install.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolume
4 | metadata:
5 | name: e2e-source
6 | labels:
7 | pv.kubernetes.io/type: source
8 | spec:
9 | capacity:
10 | storage: 1Gi
11 | accessModes:
12 | - ReadWriteMany
13 | hostPath:
14 | path: /pv/data
15 | type: Directory
16 | ---
17 | apiVersion: v1
18 | kind: PersistentVolume
19 | metadata:
20 | name: e2e-intermediate
21 | labels:
22 | pv.kubernetes.io/type: intermediate
23 | spec:
24 | capacity:
25 | storage: 1Gi
26 | accessModes:
27 | - ReadWriteMany
28 | hostPath:
29 | path: /pv/data
30 | type: Directory
31 | ---
32 | apiVersion: v1
33 | kind: PersistentVolume
34 | metadata:
35 | name: e2e-target
36 | labels:
37 | pv.kubernetes.io/type: target
38 | spec:
39 | capacity:
40 | storage: 1Gi
41 | accessModes:
42 | - ReadWriteMany
43 | hostPath:
44 | path: /pv/data
45 | type: Directory
46 |
--------------------------------------------------------------------------------
/test/e2e/operator/02-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolumeClaim
4 | metadata:
5 | name: source
6 | status:
7 | phase: Bound
8 | ---
9 | apiVersion: v1
10 | kind: PersistentVolumeClaim
11 | metadata:
12 | name: intermediate
13 | status:
14 | phase: Bound
15 | ---
16 | apiVersion: v1
17 | kind: PersistentVolumeClaim
18 | metadata:
19 | name: target
20 | status:
21 | phase: Bound
22 |
--------------------------------------------------------------------------------
/test/e2e/operator/02-install.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolumeClaim
4 | metadata:
5 | name: source
6 | spec:
7 | accessModes:
8 | - ReadWriteMany
9 | storageClassName: ""
10 | volumeMode: Filesystem
11 | resources:
12 | requests:
13 | storage: 1Gi
14 | selector:
15 | matchLabels:
16 | pv.kubernetes.io/type: source
17 | ---
18 | apiVersion: v1
19 | kind: PersistentVolumeClaim
20 | metadata:
21 | name: intermediate
22 | spec:
23 | accessModes:
24 | - ReadWriteMany
25 | storageClassName: ""
26 | volumeMode: Filesystem
27 | resources:
28 | requests:
29 | storage: 1Gi
30 | selector:
31 | matchLabels:
32 | pv.kubernetes.io/type: intermediate
33 | ---
34 | apiVersion: v1
35 | kind: PersistentVolumeClaim
36 | metadata:
37 | name: target
38 | spec:
39 | accessModes:
40 | - ReadWriteMany
41 | storageClassName: ""
42 | volumeMode: Filesystem
43 | resources:
44 | requests:
45 | storage: 1Gi
46 | selector:
47 | matchLabels:
48 | pv.kubernetes.io/type: target
49 |
--------------------------------------------------------------------------------
/test/e2e/operator/03-assert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kuttl.dev/v1beta1
3 | kind: TestAssert
4 | ---
5 | apiVersion: clustercode.github.io/v1alpha1
6 | kind: Task
7 | metadata:
8 | labels:
9 | app.kubernetes.io/managed-by: clustercode
10 | spec:
11 | slicesPlannedCount: 3
12 | status:
13 | conditions:
14 | - reason: SplitSuccessful
15 | status: 'True'
16 | type: SplitComplete
17 | - reason: AllSlicesCompleted
18 | status: 'False'
19 | type: Progressing
20 | - reason: CountedIntermediateFiles
21 | status: 'True'
22 | type: CountComplete
23 | - reason: MergedIntermediateFiles
24 | status: 'True'
25 | type: MergeComplete
26 | - reason: TaskProcessedSuccessfully
27 | status: 'True'
28 | type: Ready
29 |
--------------------------------------------------------------------------------
/test/e2e/operator/03-install.yaml.template:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: clustercode.github.io/v1alpha1
3 | kind: Blueprint
4 | metadata:
5 | name: test-blueprint
6 | spec:
7 | maxParallelTasks: 1
8 | storage:
9 | sourcePvc:
10 | claimName: source
11 | subPath: source
12 | intermediatePvc:
13 | claimName: intermediate
14 | subPath: intermediate
15 | targetPvc:
16 | claimName: target
17 | subPath: target
18 | scan:
19 | schedule: "*/1 * * * *"
20 | mediaFileExtensions:
21 | - mp4
22 | taskConcurrencyStrategy:
23 | concurrentCountStrategy:
24 | maxCount: 1
25 | cleanup:
26 | podTemplate:
27 | containers:
28 | - name: clustercode
29 | imagePullPolicy: IfNotPresent
30 | encode:
31 | podTemplate:
32 | containers:
33 | - name: clustercode
34 | imagePullPolicy: IfNotPresent
35 | sliceSize: 1
36 | splitCommandArgs:
37 | - -y
38 | - -hide_banner
39 | - -nostats
40 | - -i
41 | - ${INPUT}
42 | - -c
43 | - copy
44 | - -map
45 | - "0"
46 | - -segment_time
47 | - ${SLICE_SIZE}
48 | - -f
49 | - segment
50 | - ${OUTPUT}
51 | transcodeCommandArgs:
52 | - -y
53 | - -hide_banner
54 | - -nostats
55 | - -i
56 | - ${INPUT}
57 | - -c:v
58 | - copy
59 | - -c:a
60 | - copy
61 | - ${OUTPUT}
62 | mergeCommandArgs:
63 | - -y
64 | - -hide_banner
65 | - -nostats
66 | - -f
67 | - concat
68 | - -safe
69 | - "0"
70 | - -i
71 | - ${INPUT}
72 | - -c
73 | - copy
74 | - ${OUTPUT}
75 |
--------------------------------------------------------------------------------
/test/e2e/operator/04-delete.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kuttl.dev/v1beta1
2 | kind: TestStep
3 | delete:
4 | # This will wait until resources are really gone
5 | - apiVersion: clustercode.github.io/v1alpha1
6 | kind: Blueprint
7 | name: test-blueprint
8 | - apiVersion: v1
9 | kind: PersistentVolumeClaim
10 | name: source
11 | - apiVersion: v1
12 | kind: PersistentVolumeClaim
13 | name: intermediate
14 | - apiVersion: v1
15 | kind: PersistentVolumeClaim
16 | name: target
17 | - apiVersion: v1
18 | kind: PersistentVolume
19 | name: e2e-source
20 | - apiVersion: v1
21 | kind: PersistentVolume
22 | name: e2e-intermediate
23 | - apiVersion: v1
24 | kind: PersistentVolume
25 | name: e2e-target
26 |
--------------------------------------------------------------------------------
/test/integration.mk:
--------------------------------------------------------------------------------
1 | setup_envtest_bin = $(go_bin)/setup-envtest
2 | envtest_crd_dir ?= $(WORK_DIR)/crds
3 |
4 | test_targets += test-integration
5 | clean_targets += .envtest-clean
6 |
7 | # Prepare binary
8 | $(setup_envtest_bin): export GOBIN = $(go_bin)
9 | $(setup_envtest_bin): | $(go_bin)
10 | go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
11 |
12 | .PHONY: test-integration
13 | test-integration: export ENVTEST_CRD_DIR = $(envtest_crd_dir)
14 | test-integration: $(setup_envtest_bin) .envtest_crds ## Run integration tests against code
15 | $(setup_envtest_bin) $(ENVTEST_ADDITIONAL_FLAGS) use '$(ENVTEST_K8S_VERSION)!'
16 | @chmod -R +w $(go_bin)/k8s
17 | export KUBEBUILDER_ASSETS="$$($(setup_envtest_bin) $(ENVTEST_ADDITIONAL_FLAGS) use -i -p path '$(ENVTEST_K8S_VERSION)!')" && \
18 | go test -tags=integration -coverprofile cover.out -covermode atomic ./...
19 |
20 | $(envtest_crd_dir):
21 | @mkdir -p $@
22 |
23 | .envtest_crds: | $(envtest_crd_dir)
24 | @cp -r package/crds $(envtest_crd_dir)
25 |
26 | .PHONY: .envtest-clean
27 | .envtest-clean:
28 | rm -rf $(setup_envtest_bin) $(envtest_crd_dir) cover.out
29 |
--------------------------------------------------------------------------------
/test/media.mk:
--------------------------------------------------------------------------------
1 | media_source_dir = $(WORK_DIR)/data/source
2 | media_intermediate_dir = $(WORK_DIR)/data/intermediate
3 | media_target_dir = $(WORK_DIR)/data/target
4 |
5 | media_filename = $(media_source_dir)/blank_video.mp4
6 |
7 | clean_targets += media-clean
8 |
9 | .PHONY: blank-media
10 | blank-media: $(media_filename) | $(media_intermediate_dir) $(media_target_dir) ## Creates a blank video file
11 |
12 | .PHONY: media-clean
13 | media-clean: ## Cleans the intermediate and target dirs
14 | rm -rf $(media_source_dir) $(media_intermediate_dir) $(media_target_dir)
15 |
16 | ###
17 | ### Assets
18 | ###
19 |
20 | $(media_source_dir):
21 | @mkdir -p $@
22 |
23 | $(media_intermediate_dir):
24 | @mkdir -p $@
25 |
26 | $(media_target_dir):
27 | @mkdir -p $@
28 |
29 | $(media_filename): | $(media_source_dir)
30 | docker run --rm -u $(shell id -u):$(shell id -g) -v $(media_source_dir):/data $(FFMPEG_IMG) -y -hide_banner -t 30 -f lavfi -i color=c=black:s=320x240 -c:v libx264 -tune stillimage -pix_fmt yuv420p /data/blank_video.mp4
31 |
--------------------------------------------------------------------------------
/test/values.yaml:
--------------------------------------------------------------------------------
1 | clustercode:
2 | env:
3 | - name: CC_LOG_LEVEL
4 | value: "2"
5 |
--------------------------------------------------------------------------------
/tools.go:
--------------------------------------------------------------------------------
1 | //go:build tools
2 |
3 | // Package tools is a place to put any tooling dependencies as imports.
4 | // Go modules will be forced to download and install them.
5 | package tools
6 |
7 | import (
8 | // controller-gen
9 | _ "sigs.k8s.io/controller-tools/cmd/controller-gen"
10 | )
11 |
--------------------------------------------------------------------------------
/ui/.eslintrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | parser: '@typescript-eslint/parser',
3 | extends: [
4 | 'eslint:recommended',
5 | 'plugin:@typescript-eslint/recommended',
6 | 'plugin:@typescript-eslint/recommended-requiring-type-checking'
7 | ],
8 | parserOptions: {
9 | ecmaVersion: 2020,
10 | sourceType: 'module',
11 | tsconfigRootDir: __dirname,
12 | project: ['./tsconfig.json'],
13 | extraFileExtensions: ['.svelte']
14 | },
15 | env: {
16 | es6: true,
17 | browser: true
18 | },
19 | overrides: [
20 | {
21 | files: ['*.svelte'],
22 | processor: 'svelte3/svelte3'
23 | }
24 | ],
25 | settings: {
26 | 'svelte3/typescript': require('typescript')
27 | },
28 | plugins: ['svelte3', '@typescript-eslint'],
29 | ignorePatterns: ['node_modules']
30 | }
31 |
--------------------------------------------------------------------------------
/ui/.gitignore:
--------------------------------------------------------------------------------
1 | # Logs
2 | logs
3 | *.log
4 | npm-debug.log*
5 | yarn-debug.log*
6 | yarn-error.log*
7 | pnpm-debug.log*
8 | lerna-debug.log*
9 |
10 | node_modules
11 | dist
12 | dist-ssr
13 | *.local
14 |
15 | # Editor directories and files
16 | .vscode/*
17 | !.vscode/extensions.json
18 | .idea
19 | .DS_Store
20 | *.suo
21 | *.ntvs*
22 | *.njsproj
23 | *.sln
24 | *.sw?
25 | .env
26 |
27 | # Cypress
28 | cypress/videos/
29 | cypress/screenshots/
30 |
--------------------------------------------------------------------------------
/ui/.prettierrc.cjs:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | arrowParens: 'avoid',
3 | singleQuote: false,
4 | printWidth: 90,
5 | plugins: ['prettier-plugin-svelte'],
6 | semi: false,
7 | svelteSortOrder: 'options-styles-scripts-markup',
8 | svelteStrictMode: false,
9 | svelteIndentScriptAndStyle: true,
10 | trailingComma: 'none'
11 | }
12 |
--------------------------------------------------------------------------------
/ui/README.md:
--------------------------------------------------------------------------------
1 | # Svelte + TS + Vite
2 |
3 | This template should help get you started developing with Svelte and TypeScript in Vite.
4 |
5 | ## Recommended IDE Setup
6 |
7 | [VS Code](https://code.visualstudio.com/) + [Svelte](https://marketplace.visualstudio.com/items?itemName=svelte.svelte-vscode).
8 |
9 | ## Need an official Svelte framework?
10 |
11 | Check out [SvelteKit](https://github.com/sveltejs/kit#readme), which is also powered by Vite. Deploy anywhere with its serverless-first approach and adapt to various platforms, with out of the box support for TypeScript, SCSS, and Less, and easily-added support for mdsvex, GraphQL, PostCSS, Tailwind CSS, and more.
12 |
13 | ## Technical considerations
14 |
15 | **Why use this over SvelteKit?**
16 |
17 | - It brings its own routing solution which might not be preferable for some users.
18 | - It is first and foremost a framework that just happens to use Vite under the hood, not a Vite app.
19 | `vite dev` and `vite build` wouldn't work in a SvelteKit environment, for example.
20 |
21 | This template contains as little as possible to get started with Vite + TypeScript + Svelte, while taking into account the developer experience with regards to HMR and intellisense. It demonstrates capabilities on par with the other `create-vite` templates and is a good starting point for beginners dipping their toes into a Vite + Svelte project.
22 |
23 | Should you later need the extended capabilities and extensibility provided by SvelteKit, the template has been structured similarly to SvelteKit so that it is easy to migrate.
24 |
25 | **Why `global.d.ts` instead of `compilerOptions.types` inside `jsconfig.json` or `tsconfig.json`?**
26 |
27 | Setting `compilerOptions.types` shuts out all other types not explicitly listed in the configuration. Using triple-slash references keeps the default TypeScript setting of accepting type information from the entire workspace, while also adding `svelte` and `vite/client` type information.
28 |
29 | **Why include `.vscode/extensions.json`?**
30 |
31 | Other templates indirectly recommend extensions via the README, but this file allows VS Code to prompt the user to install the recommended extension upon opening the project.
32 |
33 | **Why enable `allowJs` in the TS template?**
34 |
35 | While `allowJs: false` would indeed prevent the use of `.js` files in the project, it does not prevent the use of JavaScript syntax in `.svelte` files. In addition, it would force `checkJs: false`, bringing the worst of both worlds: not being able to guarantee the entire codebase is TypeScript, and also having worse typechecking for the existing JavaScript. In addition, there are valid use cases in which a mixed codebase may be relevant.
36 |
37 | **Why is HMR not preserving my local component state?**
38 |
39 | HMR state preservation comes with a number of gotchas! It has been disabled by default in both `svelte-hmr` and `@sveltejs/vite-plugin-svelte` due to its often surprising behavior. You can read the details [here](https://github.com/rixo/svelte-hmr#svelte-hmr).
40 |
41 | If you have state that's important to retain within a component, consider creating an external store which would not be replaced by HMR.
42 |
43 | ```ts
44 | // store.ts
45 | // An extremely simple external store
46 | import { writable } from 'svelte/store'
47 | export default writable(0)
48 | ```
49 |
--------------------------------------------------------------------------------
/ui/cypress.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig } from "cypress";
2 |
3 | export default defineConfig({
4 | component: {
5 | devServer: {
6 | framework: "svelte",
7 | bundler: "vite",
8 | },
9 | },
10 | });
11 |
--------------------------------------------------------------------------------
/ui/cypress.d.ts:
--------------------------------------------------------------------------------
1 | import { mount } from 'cypress/svelte'
2 |
3 | declare global {
4 | namespace Cypress {
5 | interface Chainable {
6 | mount: typeof mount
7 | }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/ui/cypress/support/commands.ts:
--------------------------------------------------------------------------------
1 | ///
2 | // ***********************************************
3 | // This example commands.ts shows you how to
4 | // create various custom commands and overwrite
5 | // existing commands.
6 | //
7 | // For more comprehensive examples of custom
8 | // commands please read more here:
9 | // https://on.cypress.io/custom-commands
10 | // ***********************************************
11 | //
12 | //
13 | // -- This is a parent command --
14 | // Cypress.Commands.add('login', (email, password) => { ... })
15 | //
16 | //
17 | // -- This is a child command --
18 | // Cypress.Commands.add('drag', { prevSubject: 'element'}, (subject, options) => { ... })
19 | //
20 | //
21 | // -- This is a dual command --
22 | // Cypress.Commands.add('dismiss', { prevSubject: 'optional'}, (subject, options) => { ... })
23 | //
24 | //
25 | // -- This will overwrite an existing command --
26 | // Cypress.Commands.overwrite('visit', (originalFn, url, options) => { ... })
27 | //
28 | // declare global {
29 | // namespace Cypress {
30 | // interface Chainable {
31 | // login(email: string, password: string): Chainable
32 | // drag(subject: string, options?: Partial): Chainable
33 | // dismiss(subject: string, options?: Partial): Chainable
34 | // visit(originalFn: CommandOriginalFn, url: string, options: Partial): Chainable
35 | // }
36 | // }
37 | // }
38 |
39 | export {}
40 |
--------------------------------------------------------------------------------
/ui/cypress/support/component-index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Components App
8 |
9 |
10 |
11 |
12 |
13 |
--------------------------------------------------------------------------------
/ui/cypress/support/component.ts:
--------------------------------------------------------------------------------
1 | // ***********************************************************
2 | // This example support/component.ts is processed and
3 | // loaded automatically before your test files.
4 | //
5 | // This is a great place to put global configuration and
6 | // behavior that modifies Cypress.
7 | //
8 | // You can change the location of this file or turn off
9 | // automatically serving support files with the
10 | // 'supportFile' configuration option.
11 | //
12 | // You can read more here:
13 | // https://on.cypress.io/configuration
14 | // ***********************************************************
15 |
16 | // Import commands.js using ES2015 syntax:
17 | import './commands'
18 |
19 | // Alternatively you can use CommonJS syntax:
20 | // require('./commands')
21 |
22 | import { mount } from 'cypress/svelte'
23 |
24 | import 'bootstrap/dist/css/bootstrap.min.css'
25 |
26 | // Augment the Cypress namespace to include type definitions for
27 | // your custom command.
28 | // Alternatively, can be defined in cypress/support/component.d.ts
29 | // with a at the top of your spec.
30 | declare global {
31 | namespace Cypress {
32 | interface Chainable {
33 | mount: typeof mount
34 | }
35 | }
36 | }
37 |
38 | Cypress.Commands.add('mount', mount)
39 |
40 | // Example use:
41 | // cy.mount(MyComponent)
42 |
--------------------------------------------------------------------------------
/ui/embed.go:
--------------------------------------------------------------------------------
1 | //go:build ui
2 |
3 | package ui
4 |
5 | import (
6 | "embed"
7 | )
8 |
9 | //go:embed dist/*
10 | //go:embed dist/assets
11 | var publicFs embed.FS
12 |
13 | func init() {
14 | PublicFs = publicFs
15 | }
16 |
--------------------------------------------------------------------------------
/ui/embed.init.go:
--------------------------------------------------------------------------------
1 | package ui
2 |
3 | import (
4 | "embed"
5 | )
6 |
7 | var PublicFs embed.FS
8 |
9 | // IsEmbedded returns true if PublicFs contains embedded UI assets.
10 | // The UI assets are embedded when built with `ui` tag.
11 | func IsEmbedded() bool {
12 | _, err := PublicFs.ReadFile("dist/index.html")
13 | return err == nil
14 | }
15 |
--------------------------------------------------------------------------------
/ui/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Clustercode UI
8 |
9 |
10 |
11 |
12 |
13 |
14 |
--------------------------------------------------------------------------------
/ui/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "clustercode",
3 | "private": true,
4 | "version": "master",
5 | "type": "module",
6 | "scripts": {
7 | "dev": "vite",
8 | "build": "vite build",
9 | "preview": "vite preview",
10 | "check": "svelte-check --tsconfig ./tsconfig.json",
11 | "format": "prettier --write ./src/**/*.{js,svelte,html,ts}",
12 | "lint": "eslint './src/**/*.{js,ts,svelte}'",
13 | "lint:fix": "eslint --fix './src/**/*.{js,ts,svelte}'",
14 | "prelint": "npm run format",
15 | "cy:open": "cypress open --component",
16 | "cy:run": "cypress run --component"
17 | },
18 | "devDependencies": {
19 | "@cypress/svelte": "^2.0.0",
20 | "@cypress/vite-dev-server": "5.0.5",
21 | "@sveltejs/vite-plugin-svelte": "2.0.3",
22 | "@testing-library/cypress": "9.0.0",
23 | "@tsconfig/svelte": "3.0.0",
24 | "@typescript-eslint/eslint-plugin": "5.56.0",
25 | "@typescript-eslint/parser": "5.56.0",
26 | "bootstrap": "5.2.3",
27 | "cypress": "12.8.1",
28 | "dotenv": "16.0.3",
29 | "eslint": "8.36.0",
30 | "eslint-plugin-svelte3": "4.0.0",
31 | "jose": "4.13.1",
32 | "prettier": "2.8.7",
33 | "prettier-plugin-svelte": "2.10.0",
34 | "svelte": "3.57.0",
35 | "svelte-check": "3.1.4",
36 | "svelte-preprocess": "5.0.3",
37 | "sveltestrap": "5.10.0",
38 | "tslib": "2.5.0",
39 | "typescript": "4.9.5",
40 | "vite": "4.2.1"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/ui/public/robots.txt:
--------------------------------------------------------------------------------
1 | User-agent: *
2 | Disallow: /
3 |
--------------------------------------------------------------------------------
/ui/public/vite.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ui/src/App.svelte:
--------------------------------------------------------------------------------
1 |
5 |
6 |
7 |
8 | Login
9 |
10 |
11 |
12 |
--------------------------------------------------------------------------------
/ui/src/assets/svelte.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ui/src/kube/client.ts:
--------------------------------------------------------------------------------
1 | import * as jose from "jose"
2 | import type { KubeObject } from "./object"
3 | import { SelfSubjectAccessReview } from "./types/selfSubjectAccessReview"
4 |
5 | export class Client {
6 | token = ""
7 |
8 | async create(obj: T): Promise {
9 | return this.makeRequest(obj, "POST")
10 | }
11 |
12 | async get(obj: T): Promise {
13 | return this.makeRequest(obj, "GET")
14 | }
15 |
16 | private async makeRequest(obj: T, method: string): Promise {
17 | const endpoint = `/apis/${obj.apiVersion}/${obj.kind.toLowerCase()}s`
18 | return await fetch(endpoint, {
19 | headers: {
20 | "Content-Type": "application/json",
21 | Authorization: `Bearer ${this.token}`
22 | },
23 | body: JSON.stringify(obj),
24 | method: method
25 | })
26 | .then(response => response.json())
27 | .then(json => {
28 | if (Object.prototype.hasOwnProperty.call(json, "kind")) {
29 | const err: kubeerror = json as kubeerror
30 | if (err.kind === "Status") {
31 | throw new RequestError(err.message, err.reason, err.status, err.code)
32 | }
33 | }
34 | return json as Promise
35 | })
36 | }
37 |
38 | /**
39 | * Parses the given Kubernetes token, performs a SelfSubjectAccessReview request and stores the token in the client.
40 | * @param token the JWT of a Kubernetes Service Account.
41 | * @throws JWTInvalid if token is not a JWT token, {@link RequestError} from Kubernetes or other Error if request fails.
42 | */
43 | async login(token: string): Promise {
44 | const saToken: ServiceAccountToken = decodeToken(token)
45 | const obj = new SelfSubjectAccessReview(
46 | "get",
47 | "blueprints",
48 | "clustercode.github.io",
49 | saToken.namespace
50 | )
51 | this.token = token
52 | return this.create(obj)
53 | }
54 | }
55 |
56 | /**
57 | * Decodes a Kubernetes JWT token and returns some metadata.
58 | * @param token to decode as JWT
59 | * @returns The parsed {@link ServiceAccountToken }
60 | * @throws any {@link jose.JWTInavlid} error
61 | */
62 | export function decodeToken(token: string): ServiceAccountToken {
63 | const payload = jose.decodeJwt(token)
64 | const parts = payload.sub.split(":")
65 | const namespace = parts[2]
66 | const name = parts[3]
67 | return {
68 | sub: payload.sub,
69 | namespace: namespace,
70 | name: name
71 | }
72 | }
73 |
74 | export interface ServiceAccountToken {
75 | readonly sub: string
76 | readonly namespace: string
77 | readonly name: string
78 | }
79 |
80 | interface kubeerror {
81 | message: string
82 | reason: string
83 | code: number
84 | status: string
85 | kind?: string
86 | }
87 |
88 | export class RequestError extends Error {
89 | readonly reason?: string
90 | readonly status?: string
91 | readonly code?: number
92 |
93 | constructor(msg: string, reason: string, status: string, code: number) {
94 | super(msg)
95 | this.reason = reason
96 | this.status = status
97 | this.code = code
98 | }
99 | }
100 |
--------------------------------------------------------------------------------
/ui/src/kube/object.ts:
--------------------------------------------------------------------------------
1 | export interface KubeObject {
2 | apiVersion: string
3 | kind: string
4 | metadata?: KubeMeta
5 | }
6 |
7 | export interface KubeMeta {
8 | name: string
9 | namespace: string
10 | }
11 |
--------------------------------------------------------------------------------
/ui/src/kube/types/list.ts:
--------------------------------------------------------------------------------
1 | export interface List {
2 | kind: "List"
3 | apiVersion: "v1"
4 | metadata: object
5 | items: T[]
6 | }
7 |
--------------------------------------------------------------------------------
/ui/src/kube/types/selfSubjectAccessReview.ts:
--------------------------------------------------------------------------------
1 | import type { KubeObject } from "../object"
2 |
3 | export class SelfSubjectAccessReview implements KubeObject {
4 | readonly kind = "SelfSubjectAccessReview"
5 | readonly apiVersion = "authorization.k8s.io/v1"
6 | readonly spec = {
7 | resourceAttributes: {
8 | namespace: "",
9 | verb: "",
10 | resource: "",
11 | group: ""
12 | }
13 | }
14 | status?: SelfSubjectAccessReviewStatus
15 |
16 | constructor(verb: string, resource: string, group: string, namespace: string) {
17 | this.spec.resourceAttributes.verb = verb
18 | this.spec.resourceAttributes.resource = resource
19 | this.spec.resourceAttributes.group = group
20 | this.spec.resourceAttributes.namespace = namespace
21 | }
22 | }
23 |
24 | export interface SelfSubjectAccessReviewStatus {
25 | allowed: boolean
26 | reason: string
27 | }
28 |
--------------------------------------------------------------------------------
/ui/src/kube/types/task.ts:
--------------------------------------------------------------------------------
1 | export class Task {
2 | readonly kind = "Task"
3 | readonly apiVersion = "clustercode.github.io/v1"
4 | readonly spec = {}
5 | readonly status = {}
6 | }
7 |
--------------------------------------------------------------------------------
/ui/src/lib/LoginForm.svelte:
--------------------------------------------------------------------------------
1 |
71 |
72 | {#if alertVisible}
73 |
80 | {displayError}
81 |
82 | {/if}
83 |
84 |
85 |
86 |
87 |
88 |
--------------------------------------------------------------------------------
/ui/src/main.ts:
--------------------------------------------------------------------------------
1 | import "bootstrap/dist/css/bootstrap.min.css"
2 | import App from "./App.svelte"
3 |
4 | const app = new App({
5 | target: document.getElementById("app")
6 | })
7 |
8 | export default app
9 |
--------------------------------------------------------------------------------
/ui/src/stores/ClientStore.ts:
--------------------------------------------------------------------------------
1 | import { writable } from "svelte/store"
2 | import type { Client } from "../kube/client"
3 |
4 | /**
5 | * ClientStore contains the Kubernetes client
6 | */
7 | export const ClientStore = writable()
8 |
--------------------------------------------------------------------------------
/ui/src/stores/SettingsStore.ts:
--------------------------------------------------------------------------------
1 | import { readable } from "svelte/store"
2 |
3 | /**
4 | * ServerSettings contains properties transferred over from the server
5 | */
6 | export interface ServerSettings {
7 | authCookieMaxAge?: number
8 | }
9 |
10 | let cached: ServerSettings
11 |
12 | export const ServerSettingsStore = readable(
13 | newDefaultSettings(),
14 | function start(set) {
15 | if (cached !== undefined) {
16 | set(cached)
17 | return
18 | }
19 | fetch("/settings", {
20 | headers: {
21 | "Content-Type": "application/json"
22 | },
23 | credentials: "omit"
24 | })
25 | .then(resp => {
26 | if (resp.ok) {
27 | return resp.json()
28 | }
29 | throw new Error(resp.statusText)
30 | })
31 | .then(js => {
32 | set(js as ServerSettings)
33 | })
34 | .catch(e => {
35 | console.log("could not load server settings", e)
36 | })
37 | }
38 | )
39 |
40 | function newDefaultSettings(): ServerSettings {
41 | return {
42 | authCookieMaxAge: 86400
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/ui/src/vite-env.d.ts:
--------------------------------------------------------------------------------
1 | ///
2 | ///
3 |
--------------------------------------------------------------------------------
/ui/svelte.config.js:
--------------------------------------------------------------------------------
1 | import sveltePreprocess from 'svelte-preprocess'
2 |
3 | export default {
4 | // Consult https://github.com/sveltejs/svelte-preprocess
5 | // for more information about preprocessors
6 | preprocess: sveltePreprocess()
7 | }
8 |
--------------------------------------------------------------------------------
/ui/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "extends": "@tsconfig/svelte/tsconfig.json",
3 | "compilerOptions": {
4 | "target": "ESNext",
5 | "useDefineForClassFields": true,
6 | "module": "ESNext",
7 | "resolveJsonModule": true,
8 | "baseUrl": ".",
9 | /**
10 | * Typecheck JS in `.svelte` and `.js` files by default.
11 | * Disable checkJs if you'd like to use dynamic types in JS.
12 | * Note that setting allowJs false does not prevent the use
13 | * of JS in `.svelte` files.
14 | */
15 | "allowJs": true,
16 | "checkJs": true,
17 | "isolatedModules": true,
18 | "strict": false,
19 | "types": [
20 | "cypress",
21 | "@testing-library/cypress"
22 | ]
23 | },
24 | "include": ["src/**/*.d.ts", "src/**/*.ts", "src/**/*.js", "src/**/*.svelte", "cypress/support/component.ts"],
25 | "references": [{ "path": "./tsconfig.node.json" }]
26 | }
27 |
--------------------------------------------------------------------------------
/ui/tsconfig.node.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "composite": true,
4 | "module": "ESNext",
5 | "moduleResolution": "Node"
6 | },
7 | "include": ["vite.config.ts"]
8 | }
9 |
--------------------------------------------------------------------------------
/ui/ui.mk:
--------------------------------------------------------------------------------
1 | # These make target are generally only needed by CI/CD
2 | # Use npm directly for local development
3 |
4 | clean_targets += clean-ui
5 |
6 | npm = npm --prefix ./ui
7 | npm_run = $(npm) run
8 |
9 | ui/node_modules:
10 | $(npm) install
11 |
12 | ui/dist:
13 | mkdir -p $@/assets
14 |
15 | .PHONY: lint-ui
16 | lint-ui: ui/node_modules ## Runs linters for the UI code
17 | $(npm_run) lint
18 | $(npm_run) check
19 |
20 | .PHONY: test-ui
21 | test-ui: ui/node_modules ## Runs tests for the UI code
22 | $(npm_run) cy:run
23 |
24 | .PHONY: build-ui
25 | build-ui: ui/node_modules ## Builds the UI for packaging
26 | $(npm_run) build
27 |
28 | .PHONY: clean-ui
29 | clean-ui: ## Removes all UI-related artifacts (node_modules, dist)
30 | rm -rf ui/node_modules ui/dist ui/.env
31 |
32 | .PHONY: run-ui
33 | run-ui: ui/.env ## Prepares the local environment to run the UI in development mode including kind cluster
34 | $(npm_run) dev
35 |
36 | ui/.env: $(KIND_KUBECONFIG)
37 | @echo "VITE_KUBERNETES_API_URL=$$(yq e '.clusters[0].cluster.server' $(KIND_KUBECONFIG))" > $@
38 |
--------------------------------------------------------------------------------
/ui/vite.config.ts:
--------------------------------------------------------------------------------
1 | import { defineConfig, loadEnv } from 'vite'
2 | import { svelte } from '@sveltejs/vite-plugin-svelte'
3 | import 'dotenv/config'
4 |
5 | // https://vitejs.dev/config/
6 | export default defineConfig(({mode}) => {
7 | if (mode !== "production" && !process.env.VITE_KUBERNETES_API_URL) {
8 | console.log('⚠️ WARNING ⚠️ :environment variable VITE_KUBERNETES_API_URL is not defined. API may not be working!')
9 | }
10 | return {
11 | plugins: [svelte()],
12 | server: {
13 | proxy: {
14 | '/apis': {
15 | target: process.env.VITE_KUBERNETES_API_URL,
16 | changeOrigin: true,
17 | secure: false,
18 | },
19 | "/settings": {
20 | // this is for running "go run . webui"
21 | target: "http://localhost:8080",
22 | changeOrigin: true,
23 | secure: false,
24 | }
25 | }
26 | }
27 | }
28 | })
29 |
--------------------------------------------------------------------------------
/webhook_command.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "github.com/ccremer/clustercode/pkg/webhook"
5 | "github.com/urfave/cli/v2"
6 | )
7 |
8 | func newWebhookCommand() *cli.Command {
9 | command := &webhook.Command{}
10 | return &cli.Command{
11 | Name: "webhook",
12 | Usage: "Start clustercode in admission controller mode",
13 | Before: LogMetadata,
14 | Action: func(ctx *cli.Context) error {
15 | command.Log = AppLogger(ctx).WithName(ctx.Command.Name)
16 | return command.Execute(ctx.Context)
17 | },
18 | Flags: []cli.Flag{
19 | &cli.StringFlag{Name: "webhook-tls-cert-dir", EnvVars: envVars("WEBHOOK_TLS_CERT_DIR"), Required: true,
20 | Usage: "Directory containing the certificates for the webhook server. It's expected to contain the 'tls.crt' and 'tls.key' files.",
21 | Destination: &command.WebhookCertDir,
22 | },
23 | },
24 | }
25 | }
26 |
--------------------------------------------------------------------------------
/webui_command.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "os"
5 | "time"
6 |
7 | "github.com/ccremer/clustercode/pkg/webui"
8 | "github.com/lestrrat-go/jwx/v2/jwt"
9 | "github.com/urfave/cli/v2"
10 | )
11 |
12 | const apiUrlFlag = "api-url"
13 |
14 | func newWebuiCommand() *cli.Command {
15 | command := &webui.Command{}
16 | return &cli.Command{
17 | Name: "webui",
18 | Usage: "Start clustercode frontend web server",
19 | Before: discoverKubernetesAPI,
20 | Action: func(ctx *cli.Context) error {
21 | command.Log = AppLogger(ctx).WithName(ctx.Command.Name)
22 | return command.Execute(ctx.Context)
23 | },
24 | Flags: []cli.Flag{
25 | &cli.StringFlag{Name: apiUrlFlag, EnvVars: envVars("API_URL"),
26 | Usage: "Full base URL of the Kubernetes API server that is being proxied. If empty, the proxy is disabled. If set to 'auto', it will try to discover it using the service account token.",
27 | Value: "auto",
28 | Destination: &command.ApiURL,
29 | },
30 | &cli.BoolFlag{Name: "api-tls-skip-verify", EnvVars: envVars("API_TLS_SKIP_VERIFY"),
31 | Usage: "Whether the certificate verification of the Kubernetes API server should be verified",
32 | Destination: &command.ApiTLSSkipVerify,
33 | },
34 | &cli.PathFlag{Name: "sa-token-path", EnvVars: envVars("API_SA_TOKEN_PATH"),
35 | Usage: "Path to the Kubernetes Service Account token secret for auto-discovery",
36 | Value: "/var/run/secrets/kubernetes.io/serviceaccount/token",
37 | },
38 | &cli.DurationFlag{Name: "auth-cookie-max-age", EnvVars: envVars("AUTH_COOKIE_MAX_AGE"),
39 | Usage: "Duration of authentication cookie(s) when logging in to web UI. Accepts units [h, m, s]. If 0 or negative, cookies are disabled",
40 | Value: 24 * time.Hour,
41 | Destination: &command.AuthCookieMaxAge,
42 | },
43 | },
44 | }
45 | }
46 |
47 | func discoverKubernetesAPI(ctx *cli.Context) error {
48 | _ = LogMetadata(ctx)
49 | log := AppLogger(ctx).WithName(ctx.Command.Name)
50 |
51 | if ctx.String(apiUrlFlag) != "auto" {
52 | return nil
53 | }
54 |
55 | path := ctx.String("sa-token-path")
56 | raw, err := os.ReadFile(path)
57 | if err != nil {
58 | log.Info("Cannot read the token", "error", err.Error())
59 | return ctx.Set(apiUrlFlag, "")
60 | }
61 | token, err := jwt.Parse(raw, jwt.WithVerify(false))
62 | if err != nil {
63 | log.Info("Cannot parse the token", "error", err.Error())
64 | return ctx.Set(apiUrlFlag, "")
65 | }
66 | aud := token.Audience()
67 | if len(aud) > 0 {
68 | log.Info("Discovered Kubernetes API URL", "url", aud[0])
69 | return ctx.Set(apiUrlFlag, aud[0])
70 | }
71 | return nil
72 | }
73 |
--------------------------------------------------------------------------------