├── env
├── .keep
└── prod
│ ├── driver
│ ├── .gitignore
│ ├── sops-age-recipient.txt
│ ├── config.libsonnet
│ └── secrets.yml
├── workloads
├── cert-manager
│ ├── cert-manager.yml.sum
│ ├── secret.template.yml
│ ├── README.md
│ └── main.libsonnet
├── backup
│ ├── backup.sh
│ ├── secret.template.yml
│ ├── README.md
│ └── main.libsonnet
├── config.template.libsonnet
├── seafile
│ ├── secret.template.yml
│ ├── README.md
│ └── main.libsonnet
├── forgejo
│ ├── secret.template.yml
│ ├── README.md
│ └── main.libsonnet
├── open-webui
│ ├── secret.template.yml
│ └── main.libsonnet
├── romm
│ ├── secret.template.yml
│ ├── README.md
│ └── main.libsonnet
├── chess2online
│ ├── secret.template.yml
│ └── main.libsonnet
├── whoami
│ └── main.libsonnet
├── core
│ ├── authelia.libsonnet
│ ├── README.md
│ └── main.libsonnet
├── utils.test.jsonnet
├── dashboard
│ ├── main.libsonnet
│ └── index.html
├── main.jsonnet
└── utils.libsonnet
├── .gitignore
├── driver
├── lima-template.yml
├── hetzner-firewall.json
├── lima
├── bootstrap.sh
└── hetzner
├── docs
└── Storage.md
├── integration-test
├── Argcfile.sh
└── README.md
/env/.keep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/env/prod/driver:
--------------------------------------------------------------------------------
1 | ../../driver/hetzner
--------------------------------------------------------------------------------
/env/prod/.gitignore:
--------------------------------------------------------------------------------
1 | /ryan.key
2 | /kubeconfig.yml
3 |
--------------------------------------------------------------------------------
/env/prod/sops-age-recipient.txt:
--------------------------------------------------------------------------------
1 | age1ye7q4uvexwhruvm5p9svw9z5tu58v9uk6r9pv4aue8j7v0lmpqus5ug9az
2 |
--------------------------------------------------------------------------------
/workloads/cert-manager/cert-manager.yml.sum:
--------------------------------------------------------------------------------
1 | 940c85a3afa41f9bed7e5555002a537ebfa555ee workloads/cert-manager/cert-manager.yml
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform
2 | terraform.tfstate
3 | terraform.tfstate.*
4 | /vault.txt
5 | /env/local
6 | /workloads/cert-manager/cert-manager.yml
7 |
--------------------------------------------------------------------------------
/workloads/backup/backup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -uex
3 | apk update
4 | apk add restic
5 | restic version
6 | restic backup --verbose /var/lib/rancher --exclude-caches -o s3.storage-class=STANDARD_IA
7 |
--------------------------------------------------------------------------------
/workloads/config.template.libsonnet:
--------------------------------------------------------------------------------
1 | {
2 | domain: 'lvh.me',
3 | workloads: {
4 | core: {
5 | secrets: importstr 'secrets.yml',
6 | },
7 | whoami: {},
8 | },
9 | }
10 |
--------------------------------------------------------------------------------
/driver/lima-template.yml:
--------------------------------------------------------------------------------
1 | # Based off https://github.com/lima-vm/lima/blob/master/templates/k3s.yaml
2 | minimumLimaVersion: 1.1.0
3 |
4 | base: template://_images/ubuntu-lts
5 | mounts: []
6 | containerd:
7 | system: false
8 | user: false
9 | timezone: UTC
10 |
--------------------------------------------------------------------------------
/workloads/seafile/secret.template.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | name: sops-secrets
5 | spec:
6 | secretTemplates:
7 | - name: seafile
8 | stringData:
9 | JWT_PRIVATE_KEY: your-secret-key-here
10 |
--------------------------------------------------------------------------------
/docs/Storage.md:
--------------------------------------------------------------------------------
1 | # Storage Management
2 |
3 | ## Expanding the storage drive
4 |
5 | The storage drive can be expanded. Once you increase the physical size of the disk, reboot to have cloud-init automatically resize the encrypted container to fit. Unseal the vault, and then run `resize2fs /dev/mapper/data` to expand the filesystem.
6 |
--------------------------------------------------------------------------------
/workloads/forgejo/secret.template.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | name: sops-secrets
5 | spec:
6 | secretTemplates:
7 | - name: forgejo
8 | stringData:
9 | secret_key: your-secret-key-here
10 | mailer_passwd: smtp-password-here
11 |
--------------------------------------------------------------------------------
/workloads/cert-manager/secret.template.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | name: sops-secrets
5 | namespace: cert-manager
6 | spec:
7 | secretTemplates:
8 | - name: aws-access-key
9 | stringData:
10 | AWS_ACCESS_KEY_ID: ""
11 | AWS_SECRET_ACCESS_KEY: ""
12 |
--------------------------------------------------------------------------------
/workloads/backup/secret.template.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | namspace: admin
5 | name: sops-secrets
6 | spec:
7 | secretTemplates:
8 | - name: backup-secrets
9 | stringData:
10 | AWS_ACCESS_KEY_ID: your-aws-access-key-id
11 | AWS_SECRET_ACCESS_KEY: your-aws-secret-access-key
12 | RESTIC_REPOSITORY: s3:s3.amazonaws.com/your-s3-bucket
13 | RESTIC_PASSWORD: your-restic-password
14 |
--------------------------------------------------------------------------------
/workloads/open-webui/secret.template.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | name: sops-secrets
5 | spec:
6 | secretTemplates:
7 | - name: open-webui-secrets
8 | stringData:
9 | WEBUI_SECRET_KEY: your-secret-key-here
10 | ANTHROPIC_API_KEY: your-anthropic-api-key
11 | OPENAI_API_KEY: your-openai-api-key
12 | GOOGLE_PSE_API_KEY: your-google-pse-api-key
13 | GOOGLE_PSE_ENGINE_ID: your-google-pse-engine-id
14 |
--------------------------------------------------------------------------------
/workloads/backup/README.md:
--------------------------------------------------------------------------------
1 | # Backup with Restic
2 |
3 | Uses [Restic](https://restic.readthedocs.io) to automatically create backups of the cluster periodically. This workload is only appropriate for a single-node cluster that stores all volumes using the built-in k3s local-path-provisioner.
4 |
5 | ## Installation
6 |
7 | 1. Merge the [secret.template.yml](./secret.template.yml) file with your environment's secrets.
8 | 1. Add `backup = {}` in your environment's `config.libsonnet` under the workloads.
9 | 1. Run `argc apply core && argc apply backup`.
10 |
11 | ## Trigger a manual backup
12 |
13 | ```bash
14 | kubectl create job -n admin --from=cronjob/backup manual-backup
15 | kubectl logs -n admin job/manual-backup -f
16 | kubectl delete -n admin job/manual-backup
17 | ```
18 |
19 |
--------------------------------------------------------------------------------
/workloads/romm/secret.template.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | name: sops-secrets
5 | spec:
6 | secretTemplates:
7 | - name: romm
8 | stringData:
9 | OIDC_CLIENT_ID: romm
10 | OIDC_CLIENT_SECRET: # See README
11 | ROMM_AUTH_SECRET_KEY: # Generate a key with `openssl rand -hex 32`
12 | SCREENSCRAPER_USER: # These are the recommended metadata providers
13 | SCREENSCRAPER_PASSWORD: # https://docs.romm.app/latest/Getting-Started/Metadata-Providers/#screenscraper
14 | RETROACHIEVEMENTS_API_KEY: # https://docs.romm.app/latest/Getting-Started/Metadata-Providers/#retroachievements
15 | STEAMGRIDDB_API_KEY: # https://docs.romm.app/latest/Getting-Started/Metadata-Providers/#steamgriddb
16 | HASHEOUS_API_ENABLED: 'true' # https://docs.romm.app/latest/Getting-Started/Metadata-Providers/#hasheous
17 |
--------------------------------------------------------------------------------
/workloads/chess2online/secret.template.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | name: sops-secrets
5 | spec:
6 | secretTemplates:
7 | - name: chess2online-config
8 | stringData:
9 | production.json: |
10 | {
11 | "knex": {
12 | "client": "sqlite3",
13 | "connection": {
14 | "filename": "/app/db/production.sqlite3"
15 | },
16 | "useNullAsDefault": true
17 | },
18 | "auth": {
19 | "secret": "REPLACE_WITH_SECRET"
20 | }
21 | }
22 | - name: chess2online-registry
23 | type: kubernetes.io/dockerconfigjson
24 | stringData:
25 | .dockerconfigjson: |
26 | {
27 | "auths": {
28 | "registry.gitlab.com": {
29 | "auth": "BASE64_ENCODED_USERNAME_PASSWORD"
30 | }
31 | }
32 | }
33 |
--------------------------------------------------------------------------------
/workloads/whoami/main.libsonnet:
--------------------------------------------------------------------------------
1 | local utils = import 'utils.libsonnet';
2 |
3 | {
4 | priority: 100,
5 |
6 | manifests(_config): {
7 | local module = self,
8 | local config = {} + _config,
9 |
10 | deployment: {
11 | apiVersion: 'apps/v1',
12 | kind: 'Deployment',
13 | metadata: {
14 | name: 'whoami',
15 | },
16 | spec: {
17 | replicas: 1,
18 | selector: {
19 | matchLabels: {
20 | app: 'whoami',
21 | },
22 | },
23 | template: {
24 | metadata: {
25 | labels: {
26 | app: 'whoami',
27 | },
28 | },
29 | spec: {
30 | containers: [
31 | {
32 | name: 'whoami',
33 | image: 'traefik/whoami',
34 | resources: {
35 | limits: {
36 | memory: '50Mi',
37 | },
38 | },
39 | },
40 | ],
41 | },
42 | },
43 | },
44 | },
45 |
46 | serviceIngress: utils.simple_service(config, { app: 'whoami', port: 80 }),
47 |
48 | },
49 | }
50 |
--------------------------------------------------------------------------------
/driver/hetzner-firewall.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "description": "ping",
4 | "direction": "in",
5 | "protocol": "icmp",
6 | "source_ips": ["0.0.0.0/0", "::/0"]
7 | },
8 | {
9 | "description": "ssh",
10 | "direction": "in",
11 | "protocol": "tcp",
12 | "port": "22",
13 | "source_ips": ["0.0.0.0/0", "::/0"]
14 | },
15 | {
16 | "description": "http",
17 | "direction": "in",
18 | "protocol": "tcp",
19 | "port": "80",
20 | "source_ips": ["0.0.0.0/0", "::/0"]
21 | },
22 | {
23 | "description": null,
24 | "direction": "in",
25 | "protocol": "udp",
26 | "port": "80",
27 | "source_ips": ["0.0.0.0/0", "::/0"]
28 | },
29 | {
30 | "description": "https",
31 | "direction": "in",
32 | "protocol": "tcp",
33 | "port": "443",
34 | "source_ips": ["0.0.0.0/0", "::/0"]
35 | },
36 | {
37 | "description": "quic",
38 | "direction": "in",
39 | "protocol": "udp",
40 | "port": "443",
41 | "source_ips": ["0.0.0.0/0", "::/0"]
42 | },
43 | {
44 | "description": "k8s",
45 | "direction": "in",
46 | "protocol": "tcp",
47 | "port": "6443",
48 | "source_ips": ["0.0.0.0/0", "::/0"]
49 | }
50 | ]
51 |
--------------------------------------------------------------------------------
/workloads/core/authelia.libsonnet:
--------------------------------------------------------------------------------
1 | function(config) {
2 | theme: 'auto',
3 | server: { address: 'tcp://:9091' },
4 | log: { level: if config.verbose then 'debug' else 'info' },
5 | authentication_backend: {
6 | file: { path: '/var/lib/authelia/users.yml', watch: true },
7 | },
8 | access_control: {
9 | default_policy: 'one_factor',
10 | },
11 | session: {
12 | cookies: [
13 | {
14 | domain: config.domain,
15 | authelia_url: 'https://auth.' + config.domain,
16 | inactivity: '1 day',
17 | expiration: '1 day',
18 | },
19 | ],
20 | },
21 | storage: {
22 | 'local': { path: '/var/lib/authelia/db.sqlite3' },
23 | },
24 | notifier: {
25 | [if !config.mailer.enabled then 'filesystem']: {
26 | filename: '/var/lib/authelia/notification.txt',
27 | },
28 | [if config.mailer.enabled then 'smtp']: {
29 | address: config.mailer.address,
30 | username: config.mailer.username,
31 | sender: config.mailer.sender,
32 | identifier: config.mailer.identifier,
33 | },
34 | },
35 | identity_providers: {
36 | oidc: {
37 | clients: [
38 | {
39 | client_id: 'authelia_requires_at_least_one_client',
40 | public: true,
41 | redirect_uris: [],
42 | },
43 | ],
44 | },
45 | },
46 | }
47 |
--------------------------------------------------------------------------------
/workloads/seafile/README.md:
--------------------------------------------------------------------------------
1 | # Seafile
2 |
3 | [Seafile](https://www.seafile.com/en/home/) is a self-hosted Dropbox alternative. This configuration uses the Pro version of Seafile, which is free for up to 3 users without a license.
4 |
5 | ## Installation
6 |
7 | Add Seafile to the environment configuration. Add secrets.template.yml to your environment's screts.yml in the default namespace.
8 |
9 | After starting the container for the first time, you should be able to access the login page, but won't be able to log in. You need to create an admin user account, and make some changes to the configuration.
10 |
11 | ```bash
12 | # Access the container
13 | kubectl exec -it deployment/seafile -- /bin/bash
14 | # Create the admin user
15 | /opt/seafile/seafile-server-latest/reset-admin.sh
16 | # The server name of the memcached server is hard-coded, and must be updated
17 | sed -i 's/memcached:11211/localhost:11211/' conf/seahub_settings.py
18 | # I also choose to disable search. Note that ElasticSearch is not in the manifests.
19 | sed -i '/\[INDEX FILES\]/,/\[.*\]/ s/^enabled = true/enabled = false/' conf/seafevents.conf
20 | ```
21 |
22 | After doing that, restart the deployment using `kubectl rollout restart deployment seafile`. Then you can log in normally and set up the instance the way you like. Make sure to do these two things:
23 |
24 | - Set the `SERVICE_URL` and `FILE_SERVER_URL` in the system settings.
25 | - Delete the empty default user.
26 |
27 |
--------------------------------------------------------------------------------
/workloads/cert-manager/README.md:
--------------------------------------------------------------------------------
1 | # cert-manager
2 |
3 | [cert-manager](https://cert-manager.io/docs/) is used to issue TLS certificates for all subdomains. By default, it uses the http challenge and LetsEncrypt staging.
4 |
5 | ## Configuration
6 |
7 | ```yaml
8 | # config.libsonnet
9 | 'cert-manager': {
10 | email: 'you@example.com',
11 | staging: false, # Required to issue real certificates
12 | }
13 | ```
14 |
15 | ## DNS01 and Route53
16 |
17 | You'll need to create an AWS access key with the following policy. Make sure that you replace `$HOSTED_ZONE_ID` with the actual ID from Route 53. It looks like `Z08479911R6V57QW3SS8R`.
18 |
19 | ```json
20 | {
21 | "Version": "2012-10-17",
22 | "Statement": [
23 | {
24 | "Effect": "Allow",
25 | "Action": "route53:GetChange",
26 | "Resource": "arn:aws:route53:::change/*"
27 | },
28 | {
29 | "Effect": "Allow",
30 | "Action": [
31 | "route53:ChangeResourceRecordSets",
32 | "route53:ListResourceRecordSets"
33 | ],
34 | "Resource": "arn:aws:route53:::hostedzone/$HOSTED_ZONE_ID",
35 | "Condition": {
36 | "ForAllValues:StringEquals": {
37 | "route53:ChangeResourceRecordSetsRecordTypes": ["TXT"]
38 | }
39 | }
40 | }
41 | ]
42 | }
43 | ```
44 |
45 | Next add the AWS access key to a secret; use `secret.template.yml` as an example. Finally, set the top-level config key to enable wildcard certificates.
46 |
47 | ```yaml
48 | # config.libsonnet
49 | {
50 | domain: 'example.com',
51 | wildcardCertificate: true,
52 | workloads: {
53 | 'cert-manager': {
54 | email: 'you@example.com',
55 | staging: false,
56 | hostedZoneID: "$HOSTED_ZONE_ID"
57 | }
58 | }
59 | }
60 | ```
61 |
62 |
--------------------------------------------------------------------------------
/workloads/utils.test.jsonnet:
--------------------------------------------------------------------------------
1 | local utils = import 'utils.libsonnet';
2 |
3 | local test(name, actual, expected) =
4 | if actual != expected then
5 | error 'Test failed! ' + name + '\n Actual: ' + actual + '\n Expected: ' + expected
6 | else
7 | null;
8 |
9 | local testSuite(config) =
10 | local results = std.prune([
11 | if config[x].actual == config[x].expect then
12 | null
13 | else
14 | { name: x } + config[x]
15 | for x in std.objectFields(config)
16 | ]);
17 | if results == [] then
18 | std.length(config) + ' tests passed'
19 | else
20 | local format(arr, acc=[]) =
21 | if arr == [] then
22 | acc
23 | else
24 | local result = '\tTest failed: ' + arr[0].name + '\n\t\tActual: ' + arr[0].actual + '\n\t\tExpected: ' + arr[0].expect;
25 | format(arr[1:], acc + [result]);
26 | error std.length(results) + ' tests failed.\n\n' + std.join('\n', format(results));
27 |
28 | testSuite({
29 | 'no substitutions': {
30 | actual: utils.varSubstitute('the happy fox', { animal: 'hound' }),
31 | expect: 'the happy fox',
32 | },
33 | 'escaping $': {
34 | actual: utils.varSubstitute('the $${money} fox', { animal: 'hound' }),
35 | expect: 'the ${money} fox',
36 | },
37 | 'extra $': {
38 | actual: utils.varSubstitute('the $money fox', { animal: 'hound' }),
39 | expect: 'the $money fox',
40 | },
41 | substitution: {
42 | actual: utils.varSubstitute('the happy ${animal}!', { animal: 'hound' }),
43 | expect: 'the happy hound!',
44 | },
45 | 'back-to-back': {
46 | actual: utils.varSubstitute('${animal}${animal}$$${animal}', { animal: 'hound' }),
47 | expect: 'houndhound$hound',
48 | },
49 | recursive: {
50 | actual: utils.varSubstitute('the happy ${animal}', { animal: '${animal}' }),
51 | expect: 'the happy ${animal}',
52 | },
53 | })
54 |
--------------------------------------------------------------------------------
/workloads/dashboard/main.libsonnet:
--------------------------------------------------------------------------------
1 | local core = import 'core/main.libsonnet';
2 | local utils = import 'utils.libsonnet';
3 |
4 | local index_html = (
5 | function()
6 | local template = importstr 'index.html';
7 | template
8 | )();
9 |
10 | {
11 | priority: 100,
12 |
13 | manifests(_config): {
14 | local module = self,
15 | local config = {} + _config,
16 |
17 | deployment: {
18 | apiVersion: 'apps/v1',
19 | kind: 'Deployment',
20 | metadata: {
21 | name: 'dashboard',
22 | },
23 | spec: {
24 | replicas: 1,
25 | selector: {
26 | matchLabels: {
27 | app: 'dashboard',
28 | },
29 | },
30 | template: {
31 | metadata: {
32 | labels: {
33 | app: 'dashboard',
34 | },
35 | },
36 | spec: {
37 | containers: [
38 | {
39 | name: 'dashboard',
40 | image: 'halverneus/static-file-server',
41 | volumeMounts: [
42 | { name: 'web-content', mountPath: '/web' },
43 | ],
44 | resources: {
45 | limits: {
46 | memory: '50Mi',
47 | },
48 | },
49 | },
50 | ],
51 | volumes: [
52 | {
53 | name: 'web-content',
54 | configMap: { name: module.configMap.metadata.name },
55 | },
56 | ],
57 | },
58 | },
59 | },
60 | },
61 |
62 | configMap: utils.immutable_config_map({
63 | apiVersion: 'v1',
64 | kind: 'ConfigMap',
65 | metadata: {
66 | name: 'dashboard-files-',
67 | },
68 | data: {
69 | 'index.html': utils.varSubstitute(index_html, {
70 | domain: config.domain,
71 | }),
72 | },
73 | }),
74 |
75 | serviceIngress: utils.simple_service(config, { app: 'dashboard', port: 8080, host: config.domain, middlewares: [core.auth_middleware] }),
76 | },
77 | }
78 |
--------------------------------------------------------------------------------
/workloads/main.jsonnet:
--------------------------------------------------------------------------------
1 | local config = import 'config.libsonnet';
2 |
3 | // Config for kapp. This isn't actually applied to the cluster.
4 | // https://carvel.dev/kapp/docs/v0.64.x/config/
5 | local kappConfig = {
6 | apiVersion: 'kapp.k14s.io/v1alpha1',
7 | kind: 'Config',
8 | local pvcIgnoreAnnotations = ['volume.kubernetes.io/selected-node', 'volume.kubernetes.io/storage-provisioner'],
9 | diffAgainstExistingFieldExclusionRules: [
10 | {
11 | path: ['metadata', 'annotations', annotation],
12 | resourceMatchers: [
13 | { apiVersionKindMatcher: { apiVersion: 'v1', kind: 'PersistentVolumeClaim' } },
14 | ],
15 | }
16 | for annotation in pvcIgnoreAnnotations
17 | ],
18 | };
19 |
20 | local decls = {
21 | backup: import 'backup/main.libsonnet',
22 | core: import 'core/main.libsonnet',
23 | 'cert-manager': import 'cert-manager/main.libsonnet',
24 | chess2online: import 'chess2online/main.libsonnet',
25 | dashboard: import 'dashboard/main.libsonnet',
26 | forgejo: import 'forgejo/main.libsonnet',
27 | 'open-webui': import 'open-webui/main.libsonnet',
28 | romm: import 'romm/main.libsonnet',
29 | seafile: import 'seafile/main.libsonnet',
30 | whoami: import 'whoami/main.libsonnet',
31 | };
32 |
33 | local extractManifests(obj) =
34 | if std.isObject(obj) then
35 | if std.objectHas(obj, 'apiVersion') && std.objectHas(obj, 'kind') then
36 | [obj]
37 | else
38 | std.flattenArrays([extractManifests(x) for x in std.objectValues(obj)])
39 | else if std.isArray(obj) then
40 | std.flattenArrays([extractManifests(x) for x in obj])
41 | else
42 | [];
43 |
44 | local manifests(workload) =
45 | local module = std.get(decls, workload, error 'Invalid manifest: ' + workload);
46 | if !std.objectHas(config.workloads, workload) then
47 | error 'Manifest is disabled for environment: ' + workload
48 | else
49 | local globalConfig = {
50 | domain: config.domain,
51 | wildcardCertificate: std.get(config, 'wildcardCertificate', false),
52 | tcp_ports: std.get(config, 'tcp_ports', {}),
53 | };
54 | local moduleConfig = globalConfig + config.workloads[workload];
55 | local manifestTree = module.manifests(moduleConfig);
56 | extractManifests(manifestTree) + [kappConfig];
57 |
58 | {
59 | decls: decls,
60 | config: config,
61 | manifests: manifests,
62 | }
63 |
--------------------------------------------------------------------------------
/workloads/romm/README.md:
--------------------------------------------------------------------------------
1 | # RomM
2 |
3 | A beautiful, powerful, self-hosted rom manager and player. [Website](https://romm.app/)
4 |
5 | ## Installation
6 |
7 | Add Romm to the environment configuration. Add secrets.template.yml to your environment's secrets.yml in the default namespace.
8 |
9 | ### Enabling SSO
10 |
11 | ```bash
12 | # Generate a client secret and hash it.
13 | kubectl exec -it -n admin deployment/authelia -- authelia crypto hash generate --random
14 | ```
15 |
16 | ```jsonnet
17 | // Environment config
18 | {
19 | local config = self,
20 | domain: 'lvh.me',
21 | // Optional.
22 | tcp_ports: { ssh: 2222 },
23 | workloads: {
24 | core: {
25 | authelia_config: {
26 | identity_providers: {
27 | oidc: {
28 | clients: [
29 | {
30 | client_id: 'romm',
31 | client_name: 'RomM',
32 | client_secret: '$argon2id$v=19$m=65536,t=3,p=4$UyhbhLOY3A1ewbVo+W1v+w$8gstH/JMx9QKvK0H0Xub7sufjZDouXl8CJu6eGsm58s',
33 | consent_mode: 'implicit',
34 | authorization_policy: 'one_factor',
35 | redirect_uris: [
36 | 'https://romm.' + config.domain + '/api/oauth/openid',
37 | ],
38 | scopes: ['openid', 'email', 'profile'],
39 | claims_policy: 'romm',
40 | },
41 | ],
42 | claims_policies: {
43 | // https://github.com/rommapp/romm/issues/1927
44 | romm: {
45 | id_token: ['email', 'email_verified', 'alt_emails', 'preferred_username', 'name'],
46 | },
47 | },
48 | },
49 | },
50 | },
51 | },
52 | romm: {
53 | sso: true,
54 | },
55 | }
56 | }
57 | ```
58 |
59 | When you open the app for the first time, it will put you into the new-user experience. Create a new user which matches the email address of your authelia user (username and password don't matter). After compelting the new-user experience, you will be directed to the SSO login.
60 |
61 | ## Maintenance
62 |
63 | You can get a database shell for doing manual maintenance using:
64 |
65 | ```bash
66 | kubectl exec -it deployments/romm -c mariadb -- mariadb romm
67 | SELECT platforms.name, COUNT(*) FROM platforms INNER JOIN roms ON roms.platform_id = platforms.id GROUP BY platforms.id;
68 | ```
69 |
--------------------------------------------------------------------------------
/workloads/dashboard/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 | My Own Cluster
7 |
8 |
9 |
10 |
11 |
12 |
13 | My Own Cluster
14 |
15 |
54 |
55 |
56 |
61 |
62 |
63 |
--------------------------------------------------------------------------------
/workloads/backup/main.libsonnet:
--------------------------------------------------------------------------------
1 | local utils = import 'utils.libsonnet';
2 |
3 | {
4 | priority: 20,
5 |
6 | manifests(_config): {
7 | local module = self,
8 |
9 | cronJob: {
10 | apiVersion: 'batch/v1',
11 | kind: 'CronJob',
12 | metadata: {
13 | name: 'backup',
14 | namespace: 'admin',
15 | },
16 | spec: {
17 | schedule: '@monthly',
18 | jobTemplate: {
19 | spec: {
20 | backoffLimit: 4,
21 | template: {
22 | metadata: {
23 | name: 'backup',
24 | },
25 | spec: {
26 | restartPolicy: 'Never',
27 | containers: [
28 | {
29 | name: 'backup',
30 | image: 'alpine:latest',
31 |
32 | env: [
33 | { name: 'RESTIC_CACHE_DIR', value: '/cache' },
34 | ],
35 | envFrom: [
36 | { secretRef: { name: 'backup-secrets' } },
37 | ],
38 | volumeMounts: [
39 | { name: 'backup-script', mountPath: '/app' },
40 | { name: 'var-lib-rancher', mountPath: '/var/lib/rancher' },
41 | { name: 'opt-backup-cache', mountPath: '/cache' },
42 | ],
43 |
44 | command: ['/app/backup.sh'],
45 | },
46 | ],
47 | volumes: [
48 | {
49 | name: 'backup-script',
50 | configMap: {
51 | name: module.configMap.metadata.name,
52 | defaultMode: std.parseOctal('0755'),
53 | },
54 | },
55 | {
56 | name: 'var-lib-rancher',
57 | hostPath: { path: '/var/lib/rancher' },
58 | },
59 | {
60 | name: 'opt-backup-cache',
61 | hostPath: {
62 | path: '/opt/backup-cache',
63 | type: 'DirectoryOrCreate',
64 | },
65 | },
66 | ],
67 | },
68 | },
69 | },
70 | },
71 | },
72 | },
73 |
74 | configMap: utils.immutable_config_map({
75 | apiVersion: 'v1',
76 | kind: 'ConfigMap',
77 | metadata: {
78 | namespace: 'admin',
79 | name: 'backup-script-',
80 | },
81 | data: {
82 | 'backup.sh': importstr 'backup.sh',
83 | },
84 | }),
85 | },
86 | }
87 |
--------------------------------------------------------------------------------
/workloads/cert-manager/main.libsonnet:
--------------------------------------------------------------------------------
1 | {
2 | priority: 10,
3 |
4 | manifests(_config): {
5 | local module = self,
6 | local config = {
7 | email: error 'email required for LetsEncrypt',
8 | selfSigned: false,
9 | staging: true,
10 | hostedZoneID: error 'hostedZoneID is required when using wildcardCertificate',
11 | } + _config,
12 | local manifests = std.parseYaml(importstr 'cert-manager.yml'),
13 | local server = if config.staging then
14 | 'https://acme-staging-v02.api.letsencrypt.org/directory'
15 | else
16 | 'https://acme-v02.api.letsencrypt.org/directory',
17 | local issuerName = if config.selfSigned then 'selfsigned' else 'letsencrypt',
18 |
19 |
20 | vendor: manifests,
21 |
22 | clusterIssuer: {
23 | apiVersion: 'cert-manager.io/v1',
24 | kind: 'ClusterIssuer',
25 | metadata: {
26 | name: issuerName,
27 | },
28 | spec: if config.selfSigned then { selfSigned: {} } else {
29 | acme: {
30 | email: config.email,
31 | server: server,
32 | privateKeySecretRef: { name: 'cert-manager-key' },
33 | solvers:
34 | [{
35 | http01: {
36 | ingress: { ingressClassName: 'traefik' },
37 | },
38 | }] +
39 | if config.wildcardCertificate then
40 | [{
41 | selector: {
42 | dnsNames: ['*.' + config.domain, config.domain],
43 | },
44 | dns01: {
45 | route53: {
46 | region: 'eu-central-1',
47 | hostedZoneID: config.hostedZoneID,
48 | accessKeyIDSecretRef: {
49 | name: 'aws-access-key',
50 | key: 'AWS_ACCESS_KEY_ID',
51 | },
52 | secretAccessKeySecretRef: {
53 | name: 'aws-access-key',
54 | key: 'AWS_SECRET_ACCESS_KEY',
55 | },
56 | },
57 | },
58 | }]
59 | else [],
60 | },
61 | },
62 | },
63 |
64 | // The IngressRoute CRD that the Traefik dashboard uses doesn't cause
65 | // cert-manager to request certificates, so we do that part manually.
66 | traefikCertificate: if config.wildcardCertificate then {} else {
67 | apiVersion: 'cert-manager.io/v1',
68 | kind: 'Certificate',
69 | metadata: {
70 | name: 'traefik-tls',
71 | namespace: 'kube-system',
72 | },
73 | spec: {
74 | secretName: 'traefik-tls',
75 | dnsNames: ['traefik.' + config.domain],
76 | issuerRef: { name: issuerName },
77 | },
78 | },
79 | },
80 | }
81 |
--------------------------------------------------------------------------------
/workloads/chess2online/main.libsonnet:
--------------------------------------------------------------------------------
1 | local utils = import 'utils.libsonnet';
2 |
3 | {
4 | priority: 100,
5 |
6 | manifests(_config): {
7 | local module = self,
8 | local config = {} + _config,
9 |
10 | deployment: {
11 | apiVersion: 'apps/v1',
12 | kind: 'Deployment',
13 | metadata: {
14 | name: 'chess2online',
15 | },
16 | spec: {
17 | replicas: 1,
18 | selector: {
19 | matchLabels: {
20 | app: 'chess2online',
21 | },
22 | },
23 | template: {
24 | metadata: {
25 | labels: {
26 | app: 'chess2online',
27 | },
28 | },
29 | spec: {
30 | containers: [
31 | {
32 | name: 'chess2online',
33 | image: 'registry.gitlab.com/cgamesplay/chess2online:latest',
34 | ports: [
35 | {
36 | containerPort: 4000,
37 | },
38 | ],
39 | resources: {
40 | limits: {
41 | memory: '128Mi',
42 | },
43 | },
44 | volumeMounts: [
45 | {
46 | name: 'config',
47 | mountPath: '/app/config/production.json',
48 | subPath: 'production.json',
49 | },
50 | {
51 | name: 'data',
52 | mountPath: '/app/db',
53 | },
54 | ],
55 | },
56 | ],
57 | imagePullSecrets: [
58 | {
59 | name: 'chess2online-registry',
60 | },
61 | ],
62 | volumes: [
63 | {
64 | name: 'config',
65 | secret: {
66 | secretName: 'chess2online-config',
67 | },
68 | },
69 | {
70 | name: 'data',
71 | persistentVolumeClaim: {
72 | claimName: 'chess2online-data',
73 | },
74 | },
75 | ],
76 | },
77 | },
78 | },
79 | },
80 |
81 | pvc: {
82 | apiVersion: 'v1',
83 | kind: 'PersistentVolumeClaim',
84 | metadata: {
85 | name: 'chess2online-data',
86 | },
87 | spec: {
88 | accessModes: ['ReadWriteOnce'],
89 | resources: {
90 | requests: {
91 | storage: '1Gi',
92 | },
93 | },
94 | },
95 | },
96 |
97 | serviceIngress: utils.simple_service(config, { app: 'chess2online', port: 4000, host: 'api.chess2online.com' }),
98 | },
99 | }
100 |
--------------------------------------------------------------------------------
/workloads/forgejo/README.md:
--------------------------------------------------------------------------------
1 | # Forgejo
2 |
3 | ## Installation
4 |
5 | Add Forgejo to the environment configuration, and enable the SSH port ingress. You can use any port in place of 2222. Add secrets.template.yml to your environment's secrets.yml in the default namespace.
6 |
7 | ```bash
8 | # Generate a client secret and hash it.
9 | kubectl exec -it -n admin deployment/authelia -- authelia crypto hash generate --random
10 | ```
11 |
12 | ```jsonnet
13 | // Environment config
14 | {
15 | local config = self,
16 | domain: 'lvh.me',
17 | // Optional.
18 | tcp_ports: { ssh: 2222 },
19 | workloads: {
20 | core: {
21 | authelia_config: {
22 | identity_providers: {
23 | oidc: {
24 | clients: [
25 | {
26 | client_id: 'forgejo',
27 | client_name: 'Forgejo',
28 | client_secret: '$argon2id$v=19$m=65536,t=3,p=4$4xa2WF3Kja9F8MwGX/FKRg$1UuuCHv4vYX1SHd4Yma18ZOCHVjueHIQuC+63a9QO3I',
29 | consent_mode: 'implicit',
30 | authorization_policy: 'one_factor',
31 | pkce_challenge_method: 'S256',
32 | redirect_uris: [
33 | 'https://code.' + config.domain + '/user/oauth2/authelia/callback',
34 | ],
35 | scopes: ['openid', 'email', 'profile', 'groups'],
36 | },
37 | ],
38 | },
39 | },
40 | },
41 | },
42 | forgejo: {},
43 | }
44 | }
45 | ```
46 |
47 | Once deployed, you then need to activate the OIDC client.
48 |
49 | ```bash
50 | DOMAIN=lvh.me
51 | CLIENT_SECRET=value-from-before
52 | kubectl exec deployment/forgejo -- su git -- \
53 | forgejo admin auth add-oauth \
54 | --provider=openidConnect \
55 | --name=authelia \
56 | --key=forgejo \
57 | --secret="$CLIENT_SECRET" \
58 | --auto-discover-url="https://auth.$DOMAIN/.well-known/openid-configuration" \
59 | --scopes='openid email profile groups' \
60 | --group-claim-name='groups' \
61 | --admin-group='admins'
62 | ```
63 |
64 | ## Note for local clusters
65 |
66 | When using `lvh.me` as the domain, you need to override the DNS and add the self-signed certificate to Forgejo in order for the auto-discover URL to work correctly. Cert-Manager MUST be enabled in order for this to work, but should be configured to use self-signed certificates.
67 |
68 | ```bash
69 | kubectl edit configmap coredns -n kube-system
70 | # Add this line after errors/health/ready
71 | # rewrite name auth.lvh.me traefik.kube-system.svc.cluster.local
72 | # Then restart coredns
73 | kubectl rollout restart -n kube-system deployment/coredns
74 |
75 | # Add the self-signed certificate for the Forgejo volume
76 | kubectl get -n admin secret/authelia-tls -o jsonpath="{.data['tls\.crt']}" | base64 -d |\
77 | kubectl exec -i deployments/forgejo -- tee -a /data/ssl.crt
78 | # Update the deployment to add the CA certificate.
79 | kubectl edit deployment/forgejo
80 | # Add this to the env section:
81 | # - name: SSL_CERT_FILE
82 | # value: /data/ssl.crt
83 | ```
84 |
--------------------------------------------------------------------------------
/driver/lima:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # @describe Set up a local cluster with Lima
3 |
4 | set -eu
5 |
6 | # @cmd Initialize a new environment
7 | # @flag --validate-args-only Don't do anything other than validate arguments
8 | # @meta require-tools limactl,kubectl,age-keygen
9 | init() {
10 | env_name=${1:?Environment name is required}
11 | if [ -n "$(limactl disk list --json "$env_name" 2>/dev/null)" ]; then
12 | echo "limactl: disk $env_name already exists" >&2
13 | exit 1
14 | fi
15 | if limactl list "$env_name" >/dev/null 2>&1; then
16 | echo "limactl: machine $env_name already exists" >&2
17 | exit 1
18 | fi
19 |
20 | [ ${argc_validate_args_only+1} ] && exit 0
21 |
22 | limactl disk create "$env_name" --format raw --size $(( 5 * 1024 * 1024 * 1024 ))
23 | limactl create \
24 | --name="$env_name" \
25 | --disk=20 \
26 | --yes \
27 | ../../driver/lima-template.yml \
28 | --set '.additionalDisks += [{"name": "'"$env_name"'", "format": false}]'
29 | limactl start "$env_name"
30 |
31 | limactl shell "$env_name" sudo \
32 | DISK_PASSWORD="${DISK_PASSWORD:?}" \
33 | BLOCK_DEVICE=/dev/vdb \
34 | FORMAT_DRIVE=1 \
35 | INSTALL_K3S_CHANNEL="${INSTALL_K3S_CHANNEL:?}" \
36 | sh < ../../driver/bootstrap.sh
37 | limactl shell "$env_name" sudo cat /etc/rancher/k3s/k3s.yaml > kubeconfig.yml
38 | limactl shell "$env_name" cat /tmp/sops-age-recipient.txt > sops-age-recipient.txt
39 | }
40 |
41 | # @cmd Unseal the cluster
42 | unseal() {
43 | env_name=${1:?Environment name is required}
44 | limactl shell "$env_name" sudo unseal
45 | }
46 |
47 | # @cmd Replace the cluster's server with a new one
48 | #
49 | # The Lima driver is designed for development use, so does not support
50 | # snapshotting to roll back a failed deployment. To drive this home,
51 | # the --delete-existing flag is required.
52 | # @flag --delete-existing Delete the existing server before creating the new one
53 | upgrade() {
54 | env_name=${1:?Environment name is required}
55 | if [ ! ${argc_delete_existing+1} ]; then
56 | echo "--delete-existing: required" >&2
57 | exit 1
58 | fi
59 | limactl stop "$env_name"
60 | limactl delete "$env_name"
61 | limactl create \
62 | --name="$env_name" \
63 | --disk=20 \
64 | --yes \
65 | ../../driver/lima-template.yml \
66 | --set '.additionalDisks += [{"name": "'"$env_name"'", "format": false}]'
67 | limactl start "$env_name"
68 |
69 | limactl shell "$env_name" sudo \
70 | BLOCK_DEVICE=/dev/vdb \
71 | INSTALL_K3S_CHANNEL="${INSTALL_K3S_CHANNEL:?}" \
72 | sh < ../../driver/bootstrap.sh
73 | }
74 |
75 | # @cmd Delete the cluster
76 | destroy() {
77 | env_name=${1:?Environment name is required}
78 | if limactl list "$env_name" >/dev/null 2>&1; then
79 | limactl delete --force "$env_name"
80 | fi
81 | if [ -n "$(limactl disk list --json "$env_name" 2>/dev/null)" ]; then
82 | limactl disk delete "$env_name"
83 | fi
84 | }
85 |
86 | if ! command -v argc >/dev/null; then
87 | echo "This command requires argc. Install from https://github.com/sigoden/argc" >&2
88 | exit 100
89 | fi
90 | eval "$(argc --argc-eval "$0" "$@")"
91 |
--------------------------------------------------------------------------------
/workloads/core/README.md:
--------------------------------------------------------------------------------
1 | # Core Workload
2 |
3 | This workload sets up core services: sops, traefik, and authelia.
4 |
5 | ## Authelia
6 |
7 | Authelia is automatically set up, but can be customized with additional configuration.
8 |
9 | ### User management
10 |
11 | To manage users, you need to edit the users yml database manually. Authelia initially creates a database with a user `authelia` / `authelia`. This can safely be removed.
12 |
13 | ```bash
14 | POD=$(kubectl get pods -n admin -l app=authelia -o jsonpath='{.items[0].metadata.name}')
15 | # Download the users database
16 | kubectl cp -n admin "$POD:/var/lib/authelia/users.yml" users.yml
17 | # Generate a password hash
18 | kubectl exec -it -n admin "$POD" -- authelia crypto hash generate
19 | # Upload the modified file
20 | kubectl cp -n admin users.yml "$POD:/var/lib/authelia/users.yml"
21 | ```
22 |
23 | ### Access control
24 |
25 | By default, any workload protected by Authelia will allow any user. To further restrict some workloads, use Authelia's [access control configuration](https://www.authelia.com/configuration/security/access-control/). The following example allows only users in the admin group to access the Traefik dashboard.
26 |
27 | ```jsonnet
28 | {
29 | workloads: {
30 | core: {
31 | authelia_config: {
32 | access_control: {
33 | default_policy: 'one_factor',
34 | rules: [
35 | {
36 | domain: 'traefik.' + config.domain,
37 | policy: 'one_factor',
38 | subject: 'group:admins',
39 | },
40 | {
41 | domain: 'traefik.' + config.domain,
42 | policy: 'deny',
43 | },
44 | ],
45 | },
46 | }
47 | }
48 | }
49 | }
50 | ```
51 |
52 | ### OpenID Provider
53 |
54 | You can configure OpenID clients by updating the environment configuration. The configuration values will vary by client capabilities, and are documented [here](https://www.authelia.com/configuration/identity-providers/openid-connect/clients/).
55 |
56 | ```jsonnet
57 | {
58 | workloads: {
59 | core: {
60 | authelia_config: {
61 | identity_providers: {
62 | oidc: {
63 | clients: [
64 | // Place client configuration here.
65 | {
66 | client_id: 'my_client',
67 | client_name: 'My Client',
68 | client_secret: '$argon2id$v=19$m=65536,t=3,p=4$4xa2WF3Kja9F8MwGX/FKRg$1UuuCHv4vYX1SHd4Yma18ZOCHVjueHIQuC+63a9QO3I',
69 | consent_mode: 'implicit',
70 | authorization_policy: 'one_factor',
71 | pkce_challenge_method: 'S256',
72 | redirect_uris: [
73 | 'https://code.lvh.me/user/oauth2/authelia/callback',
74 | ],
75 | scopes: ['openid', 'email', 'profile', 'groups'],
76 | },
77 | ]
78 | }
79 | }
80 | }
81 | }
82 | }
83 | }
84 | ```
85 |
86 | ```bash
87 | # Generate a client secret and hash
88 | kubectl exec -it -n admin deployments/authelia -- authelia crypto hash generate --random
89 | ```
90 |
--------------------------------------------------------------------------------
/env/prod/config.libsonnet:
--------------------------------------------------------------------------------
1 | {
2 | local config = self,
3 | domain: 'cluster.cgamesplay.com',
4 | wildcardCertificate: true,
5 | tcp_ports: { ssh: 2222 },
6 |
7 | workloads: {
8 | backup: {},
9 | 'cert-manager': {
10 | email: 'ry@cgamesplay.com',
11 | staging: false,
12 | hostedZoneID: 'Z06017189PYZQUONKTV4',
13 | },
14 | core: {
15 | secrets: importstr 'secrets.yml',
16 | mailer+: {
17 | enabled: true,
18 | sender: 'Authelia ',
19 | address: 'submission://smtp.mailgun.org:587',
20 | username: 'forgejo@mail.cgamesplay.com',
21 | },
22 | authelia_config: {
23 | access_control: {
24 | default_policy: 'one_factor',
25 | rules: [
26 | {
27 | domain: 'traefik.' + config.domain,
28 | policy: 'one_factor',
29 | subject: 'group:admins',
30 | },
31 | {
32 | domain: 'traefik.' + config.domain,
33 | policy: 'deny',
34 | },
35 | ],
36 | },
37 | identity_providers: {
38 | oidc: {
39 | clients: [
40 | {
41 | client_id: 'forgejo',
42 | client_name: 'Forgejo',
43 | client_secret: '$argon2id$v=19$m=65536,t=3,p=4$8SIHs236AJDJSCZ7Our3ag$IdeLVKaIvf4ddpAut2rYN9E+jpCCUzl3+4I6DIbXnv0',
44 | consent_mode: 'implicit',
45 | authorization_policy: 'one_factor',
46 | pkce_challenge_method: 'S256',
47 | redirect_uris: [
48 | 'https://code.' + config.domain + '/user/oauth2/authelia/callback',
49 | ],
50 | scopes: ['openid', 'email', 'profile', 'groups'],
51 | },
52 | {
53 | client_id: 'romm',
54 | client_name: 'RomM',
55 | client_secret: '$argon2id$v=19$m=65536,t=3,p=4$UyhbhLOY3A1ewbVo+W1v+w$8gstH/JMx9QKvK0H0Xub7sufjZDouXl8CJu6eGsm58s',
56 | consent_mode: 'implicit',
57 | authorization_policy: 'one_factor',
58 | redirect_uris: [
59 | 'https://romm.' + config.domain + '/api/oauth/openid',
60 | ],
61 | scopes: ['openid', 'email', 'profile'],
62 | claims_policy: 'romm',
63 | },
64 | ],
65 | claims_policies: {
66 | // https://github.com/rommapp/romm/issues/1927
67 | romm: {
68 | id_token: ['email', 'email_verified', 'alt_emails', 'preferred_username', 'name'],
69 | },
70 | },
71 | },
72 | },
73 | },
74 | },
75 | chess2online: {},
76 | dashboard: {},
77 | forgejo: {
78 | image_tag: '12.0.4',
79 | mailer+: {
80 | enabled: true,
81 | from: '"Forgejo" ',
82 | smtp_addr: 'smtp.mailgun.org',
83 | user: 'forgejo@mail.cgamesplay.com',
84 | },
85 | },
86 | romm: {
87 | sso: true,
88 | },
89 | seafile: {},
90 | },
91 | }
92 |
--------------------------------------------------------------------------------
/integration-test:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # @describe Run an integration test
3 | #
4 | # Creates a new cluster, ensures it can reboot and upgrade the server
5 | # without anything breaking.
6 | # @option --environment=test Environment name to use for the test
7 | # @meta inherit-flag-options
8 | # @meta require-tools http
9 |
10 | set -euo pipefail
11 |
12 | init_args=()
13 | upgrade_args=()
14 | ip_command=()
15 | reboot_command=()
16 | ready_command=false
17 |
18 | # @cmd Run the integration test on Lima
19 | lima() {
20 | init_args=(--driver=lima "$argc_environment")
21 | upgrade_args=("$argc_envrionment" --delete-existing)
22 | ip_command=(echo 127.0.0.1)
23 | reboot_command=(limactl restart "$argc_environment")
24 | ready_command=true
25 | integration_test
26 | }
27 |
28 | # @cmd Run the integration test on Hetzner
29 | hetzner() {
30 | init_args=(--driver=hetzner "$argc_environment" --location nbg1 --type cx22 --size 20)
31 | upgrade_args=("$argc_environment" --type cx22)
32 | ip_command=(hcloud server ip "$argc_environment")
33 | reboot_command=(hcloud server reboot "$argc_environment")
34 | ready_command=hetzner_ready
35 | integration_test
36 | }
37 |
38 | hetzner_ready() {
39 | # shellcheck disable=SC2016
40 | timeout 300 sh -c 'until nc -z $0 $1; do sleep 1; done' "$1" 22
41 | }
42 |
43 | integration_test() {
44 | if [ -d "env/${argc_environment:?}" ]; then
45 | echo "${argc_environment:?}: already exists"
46 | fi
47 | set -x
48 | disk_password=$(argc init "${init_args[@]}" | tee /dev/stderr | grep -A3 "DISK ENCRYPTION PASSWORD" | tail -1)
49 | ip=$("${ip_command[@]}")
50 |
51 | echo "Disk password is $disk_password"
52 | echo "IP address is $ip"
53 | export KUBECONFIG="env/$argc_environment/kubeconfig.yml"
54 |
55 | argc sync "$argc_environment" -y
56 | kubectl wait --for=jsonpath='{.subsets[*].addresses[0].ip}' -n kube-system endpoints/traefik --timeout=30s
57 | kubectl wait --for=jsonpath='{.subsets[*].addresses[0].ip}' -n admin endpoints/authelia --timeout=30s
58 | assert_service_is_up
59 |
60 | "${reboot_command[@]}"
61 | "$ready_command" "$ip"
62 |
63 | # Sanity check, server is offline
64 | if http -v --verify no --headers "https://$ip" Host:authelia.lvh.me; then
65 | echo "Sanity check failed; service reachable after reboot before unseal" >&2
66 | exit 1
67 | fi
68 |
69 | echo "$disk_password" | argc unseal "$argc_environment"
70 | kubectl wait --for=jsonpath='{.subsets[*].addresses[0].ip}' -n kube-system endpoints/traefik --timeout=30s
71 | kubectl wait --for=jsonpath='{.subsets[*].addresses[0].ip}' -n admin endpoints/authelia --timeout=30s
72 | assert_service_is_up
73 |
74 | argc upgrade "$argc_environment" "${upgrade_args[@]}"
75 |
76 | # Sanity check, server is offline
77 | if http -v --verify no --headers "https://$ip" Host:authelia.lvh.me; then
78 | echo "Sanity check failed; service reachable after reboot before unseal" >&2
79 | exit 1
80 | fi
81 |
82 | echo "$disk_password" | argc unseal "$argc_environment"
83 | kubectl wait --for=jsonpath='{.subsets[*].addresses[0].ip}' -n kube-system endpoints/traefik --timeout=30s
84 | kubectl wait --for=jsonpath='{.subsets[*].addresses[0].ip}' -n admin endpoints/authelia --timeout=30s
85 | assert_service_is_up
86 |
87 | argc destroy "$argc_environment"
88 | }
89 |
90 | assert_service_is_up() {
91 | tries=0
92 | while ! http -v --verify no --check-status --headers "https://$ip/" Host:authelia.lvh.me; do
93 | if [ $tries -ge 5 ]; then
94 | echo "Failed to access Authelia" >&2
95 | exit 1
96 | fi
97 | sleep 5
98 | tries=$((tries + 1))
99 | done
100 | }
101 |
102 | if ! command -v argc >/dev/null; then
103 | echo "This command requires argc. Install from https://github.com/sigoden/argc" >&2
104 | exit 100
105 | fi
106 | eval "$(argc --argc-eval "$0" "$@")"
107 |
--------------------------------------------------------------------------------
/driver/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -eux
3 |
4 | apt-get update
5 | apt-get install age
6 |
7 | curl -sfL https://get.k3s.io | \
8 | INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:?} \
9 | INSTALL_K3S_SKIP_START=true \
10 | K3S_NODE_NAME=main \
11 | sh -
12 |
13 | mkdir -p /etc/rancher/k3s
14 |
15 | cat >/etc/rancher/k3s/config.yaml <<-EOF
16 | disable: metrics-server
17 | EOF
18 |
19 | if [ -n "$(ip -6 addr show scope global)" ]; then
20 | cat >>/etc/rancher/k3s/config.yaml <<-EOF
21 | cluster-cidr: 10.42.0.0/16,fd00:cafe:42::/56
22 | service-cidr: 10.43.0.0/16,fd00:cafe:43::/112
23 | flannel-ipv6-masq: true
24 | EOF
25 | fi
26 |
27 | mkdir -p /etc/systemd/system/k3s.service.d/
28 | cat >/etc/systemd/system/k3s.service.d/override.conf <<-EOF
29 | [Unit]
30 | After=var-lib-rancher.mount
31 | Requires=var-lib-rancher.mount
32 | EOF
33 |
34 | cat >/usr/local/bin/unseal <<-EOF
35 | #!/bin/sh
36 | set -e
37 | cryptsetup luksOpen "${BLOCK_DEVICE:?}" data
38 | echo "Drive opened successfully. Starting k3s..."
39 | systemctl start k3s
40 | EOF
41 | chmod +x /usr/local/bin/unseal
42 |
43 | echo "/dev/mapper/data /var/lib/rancher ext4 noauto 0 0" >>/etc/fstab
44 | echo "data ${BLOCK_DEVICE:?} none noauto,headless=true" >>/etc/crypttab
45 |
46 | # Set up a local (ephemeral) containerd storage area
47 | mkdir -m 700 /opt/containerd
48 |
49 | if [ ${FORMAT_DRIVE:+1} ]; then
50 | KEY_FILE=/run/disk.key
51 | set +x
52 | printf "%s" "${DISK_PASSWORD:?}" > "$KEY_FILE"
53 | set -x
54 |
55 | cryptsetup luksFormat "$BLOCK_DEVICE" -d "$KEY_FILE"
56 | cryptsetup luksOpen "$BLOCK_DEVICE" data -d "$KEY_FILE"
57 | rm "$KEY_FILE"
58 | mkfs.ext4 /dev/mapper/data
59 | systemctl daemon-reload
60 | mkdir /var/lib/rancher
61 | mount "/var/lib/rancher"
62 |
63 | mkdir -m 700 /var/lib/rancher/k3s
64 | mkdir -m 700 /var/lib/rancher/k3s/server
65 | mkdir -m 700 /var/lib/rancher/k3s/server/static
66 | mkdir -m 700 /var/lib/rancher/k3s/server/static/charts
67 | mkdir -m 700 /var/lib/rancher/k3s/server/manifests
68 | mkdir -m 700 /var/lib/rancher/k3s/agent
69 | ln -s /opt/containerd /var/lib/rancher/k3s/agent/containerd
70 |
71 | # Need to vendor the helm chart to ensure that it doesn't get
72 | # tampered with.
73 | # https://isindir.github.io/sops-secrets-operator/index.yaml
74 | curl -fsSL https://isindir.github.io/sops-secrets-operator/sops-secrets-operator-0.22.0.tgz -o /var/lib/rancher/k3s/server/static/charts/sops-secrets-operator-0.22.0.tgz
75 | shasum -c <<-EOF
76 | 28ebe7da0812a9f6cabc9d655dec2f7bb4ad7af789751afdb998eb0f570d1543 /var/lib/rancher/k3s/server/static/charts/sops-secrets-operator-0.22.0.tgz
77 | EOF
78 |
79 | # Drop it in as an add-on. We also want to pin the digest here
80 | # since it processes all of our secrets. This hash can be found
81 | # through: docker pull isindir/sops-secrets-operator:0.16.0
82 | cat >/var/lib/rancher/k3s/server/manifests/sops-secrets-operator.yaml <<-EOF
83 | apiVersion: helm.cattle.io/v1
84 | kind: HelmChart
85 | metadata:
86 | name: sops-secrets-operator
87 | namespace: kube-system
88 | spec:
89 | chart: https://%{KUBERNETES_API}%/static/charts/sops-secrets-operator-0.22.0.tgz
90 | targetNamespace: kube-system
91 | valuesContent: |-
92 | image:
93 | repository: isindir/sops-secrets-operator
94 | tag: 0.16.0@sha256:252fc938071a3087b532f5fe4465aff0967c822d5fd4ba271fbb586c522311a6
95 | secretsAsFiles:
96 | - mountPath: /etc/sops-age-key-file
97 | name: sops-age-key-file
98 | secretName: sops-age-key-file
99 | extraEnv:
100 | - name: SOPS_AGE_KEY_FILE
101 | value: /etc/sops-age-key-file/key
102 | EOF
103 |
104 | service k3s start
105 |
106 | # Create the age key
107 | age-keygen -o /run/age.key
108 | age-keygen -y /run/age.key > /tmp/sops-age-recipient.txt
109 | kubectl create secret generic -n kube-system sops-age-key-file --from-file=key=/run/age.key
110 | rm -f /run/age.key
111 |
112 | # Wait for k3s to finish its install procedure.
113 | while ! kubectl wait --for condition=established --timeout=10s crd/ingressroutes.traefik.io; do
114 | sleep 1
115 | done
116 |
117 | else
118 | systemctl daemon-reload
119 | echo "The server will start normally once unseal is complete." >&2
120 | fi
121 |
--------------------------------------------------------------------------------
/workloads/utils.libsonnet:
--------------------------------------------------------------------------------
1 | {
2 | // Declares the typical Ingress needed to expose a service via Traefik. The
3 | // matching Service object must expose a port named 'http'.
4 | local traefik_ingress(module_config, _ingress_config) = {
5 | local domain = module_config.domain,
6 | local ingress_config = {
7 | // Required. Name of the workload.
8 | app: error 'App name required',
9 | // Optional. Override namespace.
10 | namespace: null,
11 | // Optional. Override the public host.
12 | host: _ingress_config.app + '.' + domain,
13 | // Optional. Traefik middlewares to apply.
14 | middlewares: [],
15 | } + _ingress_config,
16 |
17 | apiVersion: 'networking.k8s.io/v1',
18 | kind: 'Ingress',
19 | metadata: {
20 | name: ingress_config.app,
21 | annotations: {
22 | 'cert-manager.io/cluster-issuer': 'letsencrypt',
23 | } + if ingress_config.middlewares != [] then
24 | { 'traefik.ingress.kubernetes.io/router.middlewares': std.join(',', ingress_config.middlewares) }
25 | else {},
26 | } + if ingress_config.namespace != null then { namespace: ingress_config.namespace } else {},
27 | spec: {
28 | rules: [
29 | {
30 | host: ingress_config.host,
31 | http: {
32 | paths: [
33 | {
34 | path: '/',
35 | pathType: 'Prefix',
36 | backend: {
37 | service: {
38 | name: ingress_config.app,
39 | port: { name: 'http' },
40 | },
41 | },
42 | },
43 | ],
44 | },
45 | },
46 | ],
47 | tls: [
48 | if module_config.wildcardCertificate && (std.endsWith(ingress_config.host, '.' + domain) || ingress_config.host == domain) then
49 | {
50 | secretName: 'tls-' + domain,
51 | hosts: [domain, '*.' + domain],
52 | }
53 | else
54 | {
55 | secretName: ingress_config.app + '-tls',
56 | hosts: [ingress_config.host],
57 | },
58 | ],
59 | },
60 | },
61 | traefik_ingress: traefik_ingress,
62 |
63 | // Typical service with optional ingress configuration.
64 | simple_service(module_config, _service_config):
65 | local domain = module_config.domain;
66 | local service_config = {
67 | // Required. Name of the workload.
68 | app: error 'App name required',
69 | // Optional. Override namespace.
70 | namespace: null,
71 | // Required. Port to connect to.
72 | port: error 'Port required',
73 | // Optional. Disable the default ingress route.
74 | ingress: true,
75 | } + _service_config;
76 | {
77 | service: {
78 | apiVersion: 'v1',
79 | kind: 'Service',
80 | metadata: {
81 | name: service_config.app,
82 | namespace: service_config.namespace,
83 | },
84 | spec: {
85 | selector: {
86 | app: service_config.app,
87 | },
88 | ports: [
89 | { name: 'http', port: service_config.port },
90 | ],
91 | },
92 | },
93 | ingress: if service_config.ingress then traefik_ingress(module_config, service_config) else {},
94 | },
95 |
96 | // Helper to hash config data for use as an immutable ConfigMap object.
97 | // The existing metadata.name is treated as a prefix and should end in
98 | // "-".
99 | immutable_config_map(manifest): manifest {
100 | metadata+: {
101 | name: manifest.metadata.name + std.md5(std.manifestJson(manifest.data)),
102 | },
103 | immutable: true,
104 | },
105 |
106 | // This function substitutes all occurrences of `${foo}` with
107 | // `vars.foo` in the template.
108 | varSubstitute(template, vars):
109 | local subNext(prefix, rest) =
110 | local parts = std.splitLimit(rest, '$', 1);
111 | if std.length(parts) == 1 then
112 | // No more substitutions in string
113 | prefix + rest
114 | else if parts[1][0] == '$' then
115 | // Escaped $
116 | subNext(prefix + parts[0] + '$', parts[1][1:])
117 | else if parts[1][0] == '{' then
118 | // Make a substitution
119 | local parts2 = std.splitLimit(parts[1][1:], '}', 1);
120 | subNext(prefix + parts[0] + vars[parts2[0]], parts2[1])
121 | else
122 | // Unescaped $
123 | subNext(prefix + parts[0] + '$', parts[1]);
124 | subNext('', template),
125 |
126 | // This is really useful if you want to make an arry out of
127 | // constitutent parts which may be lists or optional.
128 | //
129 | // Returns the passed array with:
130 | // 1. Nulls removed
131 | // 2. Any elements who are arrays flattened into this arry.
132 | join(a):
133 | local notNull(i) = i != null;
134 | local maybeFlatten(acc, i) = if std.type(i) == 'array' then acc + i else acc + [i];
135 | std.foldl(maybeFlatten, std.filter(notNull, a), []),
136 | }
137 |
--------------------------------------------------------------------------------
/workloads/open-webui/main.libsonnet:
--------------------------------------------------------------------------------
1 | local utils = import '../utils.libsonnet';
2 |
3 | {
4 | priority: 100,
5 |
6 | manifests(_config): {
7 | local module = self,
8 | local config = {
9 | image_tag: '0.6.15',
10 | } + _config,
11 |
12 | deployment: {
13 | apiVersion: 'apps/v1',
14 | kind: 'Deployment',
15 | metadata: {
16 | name: 'open-webui',
17 | },
18 | spec: {
19 | replicas: 1,
20 | selector: {
21 | matchLabels: {
22 | app: 'open-webui',
23 | },
24 | },
25 | template: {
26 | metadata: {
27 | labels: {
28 | app: 'open-webui',
29 | },
30 | },
31 | spec: {
32 | containers: [
33 | {
34 | name: 'open-webui',
35 | image: 'ghcr.io/open-webui/open-webui:' + config.image_tag,
36 | ports: [
37 | {
38 | containerPort: 8080,
39 | },
40 | ],
41 | env: [
42 | {
43 | name: 'WEBUI_SECRET_KEY',
44 | valueFrom: {
45 | secretKeyRef: {
46 | name: 'open-webui-secrets',
47 | key: 'WEBUI_SECRET_KEY',
48 | },
49 | },
50 | },
51 | {
52 | name: 'ANTHROPIC_API_KEY',
53 | valueFrom: {
54 | secretKeyRef: {
55 | name: 'open-webui-secrets',
56 | key: 'ANTHROPIC_API_KEY',
57 | },
58 | },
59 | },
60 | {
61 | name: 'OPENAI_API_KEY',
62 | valueFrom: {
63 | secretKeyRef: {
64 | name: 'open-webui-secrets',
65 | key: 'OPENAI_API_KEY',
66 | },
67 | },
68 | },
69 | {
70 | name: 'GOOGLE_PSE_API_KEY',
71 | valueFrom: {
72 | secretKeyRef: {
73 | name: 'open-webui-secrets',
74 | key: 'GOOGLE_PSE_API_KEY',
75 | },
76 | },
77 | },
78 | {
79 | name: 'GOOGLE_PSE_ENGINE_ID',
80 | valueFrom: {
81 | secretKeyRef: {
82 | name: 'open-webui-secrets',
83 | key: 'GOOGLE_PSE_ENGINE_ID',
84 | },
85 | },
86 | },
87 | {
88 | name: 'WEBUI_URL',
89 | value: 'https://open-webui.' + config.domain + '/',
90 | },
91 | {
92 | name: 'ENABLE_OLLAMA_API',
93 | value: 'False',
94 | },
95 | {
96 | name: 'RAG_EMBEDDING_ENGINE',
97 | value: 'openai',
98 | },
99 | {
100 | name: 'AUDIO_STT_ENGINE',
101 | value: 'openai',
102 | },
103 | {
104 | name: 'ENABLE_RAG_WEB_SEARCH',
105 | value: 'True',
106 | },
107 | {
108 | name: 'RAG_WEB_SEARCH_ENGINE',
109 | value: 'google_pse',
110 | },
111 | {
112 | name: 'RAG_WEB_SEARCH_RESULT_COUNT',
113 | value: '3',
114 | },
115 | {
116 | name: 'RAG_WEB_SEARCH_CONCURRENT_REQUESTS',
117 | value: '10',
118 | },
119 | ],
120 | volumeMounts: [
121 | {
122 | name: 'data',
123 | mountPath: '/app/backend/data',
124 | },
125 | ],
126 | resources: {
127 | requests: {
128 | memory: '512Mi',
129 | },
130 | limits: {
131 | memory: '1Gi',
132 | },
133 | },
134 | livenessProbe: {
135 | httpGet: {
136 | path: '/health',
137 | port: 8080,
138 | },
139 | initialDelaySeconds: 30,
140 | periodSeconds: 30,
141 | timeoutSeconds: 10,
142 | },
143 | },
144 | ],
145 | volumes: [
146 | {
147 | name: 'data',
148 | persistentVolumeClaim: {
149 | claimName: 'open-webui-data',
150 | },
151 | },
152 | ],
153 | },
154 | },
155 | },
156 | },
157 |
158 | pvc: {
159 | apiVersion: 'v1',
160 | kind: 'PersistentVolumeClaim',
161 | metadata: {
162 | name: 'open-webui-data',
163 | },
164 | spec: {
165 | accessModes: ['ReadWriteOnce'],
166 | resources: {
167 | requests: {
168 | storage: '10Gi',
169 | },
170 | },
171 | },
172 | },
173 |
174 | serviceIngress: utils.simple_service(config, { app: 'open-webui', port: 8080 }),
175 | },
176 | }
177 |
--------------------------------------------------------------------------------
/workloads/seafile/main.libsonnet:
--------------------------------------------------------------------------------
1 | local utils = import '../utils.libsonnet';
2 |
3 | {
4 | priority: 100,
5 |
6 | manifests(_config):
7 | local config = {
8 | // was 10.0.15
9 | image_tag: '12.0-latest',
10 | } + _config;
11 | {
12 | local module = self,
13 |
14 | configMap: utils.immutable_config_map({
15 | apiVersion: 'v1',
16 | kind: 'ConfigMap',
17 | metadata: {
18 | name: 'seafile-config-',
19 | },
20 | data: {
21 | MYSQL_ALLOW_EMPTY_PASSWORD: 'true',
22 | MYSQL_LOG_CONSOLE: 'true',
23 | },
24 | }),
25 |
26 | deployment: {
27 | apiVersion: 'apps/v1',
28 | kind: 'Deployment',
29 | metadata: {
30 | name: 'seafile',
31 | },
32 | spec: {
33 | replicas: 1,
34 | strategy: {
35 | // Seafile refuses to start if another server has a lock on the
36 | // data directory.
37 | type: 'Recreate',
38 | },
39 | selector: {
40 | matchLabels: {
41 | app: 'seafile',
42 | },
43 | },
44 | template: {
45 | metadata: {
46 | labels: {
47 | app: 'seafile',
48 | },
49 | },
50 | spec: {
51 | imagePullSecrets: [
52 | { name: 'seafile-registry-secret' },
53 | ],
54 | containers: [
55 | {
56 | name: 'seafile',
57 | image: 'seafileltd/seafile-pro-mc:' + config.image_tag,
58 | ports: [
59 | {
60 | containerPort: 80,
61 | name: 'http',
62 | },
63 | ],
64 | resources: {
65 | requests: {
66 | memory: '768Mi',
67 | },
68 | limits: {
69 | memory: '2Gi',
70 | },
71 | },
72 | volumeMounts: [
73 | {
74 | name: 'seafile-data',
75 | mountPath: '/shared',
76 | },
77 | ],
78 | env: [
79 | {
80 | name: 'JWT_PRIVATE_KEY',
81 | valueFrom: {
82 | secretKeyRef: {
83 | name: 'seafile',
84 | key: 'JWT_PRIVATE_KEY',
85 | },
86 | },
87 | },
88 | ],
89 | livenessProbe: {
90 | httpGet: {
91 | path: '/api2/ping/',
92 | port: 80,
93 | },
94 | initialDelaySeconds: 30,
95 | periodSeconds: 30,
96 | timeoutSeconds: 2,
97 | failureThreshold: 3,
98 | },
99 | readinessProbe: {
100 | httpGet: {
101 | path: '/api2/ping/',
102 | port: 80,
103 | },
104 | initialDelaySeconds: 5,
105 | periodSeconds: 5,
106 | timeoutSeconds: 2,
107 | },
108 | },
109 | {
110 | name: 'mariadb',
111 | image: 'mariadb:10.11',
112 | args: ['--datadir=/shared/mariadb'],
113 | envFrom: [
114 | {
115 | configMapRef: {
116 | name: module.configMap.metadata.name,
117 | },
118 | },
119 | ],
120 | resources: {
121 | requests: {
122 | memory: '200Mi',
123 | },
124 | limits: {
125 | memory: '200Mi',
126 | },
127 | },
128 | volumeMounts: [
129 | {
130 | name: 'mariadb-data',
131 | mountPath: '/shared/mariadb',
132 | },
133 | ],
134 | },
135 | {
136 | name: 'memcached',
137 | image: 'memcached:1.6.18',
138 | command: ['memcached', '-m', '60'],
139 | resources: {
140 | requests: {
141 | memory: '64Mi',
142 | },
143 | limits: {
144 | memory: '64Mi',
145 | },
146 | },
147 | },
148 | ],
149 | volumes: [
150 | {
151 | name: 'seafile-data',
152 | persistentVolumeClaim: {
153 | claimName: 'seafile-data',
154 | },
155 | },
156 | {
157 | name: 'mariadb-data',
158 | persistentVolumeClaim: {
159 | claimName: 'mariadb-data',
160 | },
161 | },
162 | ],
163 | },
164 | },
165 | },
166 | },
167 |
168 | seafileDataPvc: {
169 | apiVersion: 'v1',
170 | kind: 'PersistentVolumeClaim',
171 | metadata: {
172 | name: 'seafile-data',
173 | },
174 | spec: {
175 | accessModes: ['ReadWriteOnce'],
176 | resources: {
177 | requests: {
178 | storage: '10Gi',
179 | },
180 | },
181 | },
182 | },
183 |
184 | mariadbDataPvc: {
185 | apiVersion: 'v1',
186 | kind: 'PersistentVolumeClaim',
187 | metadata: {
188 | name: 'mariadb-data',
189 | },
190 | spec: {
191 | accessModes: ['ReadWriteOnce'],
192 | resources: {
193 | requests: {
194 | storage: '5Gi',
195 | },
196 | },
197 | },
198 | },
199 |
200 | serviceIngress: utils.simple_service(config, { app: 'seafile', port: 80 }),
201 | },
202 | }
203 |
--------------------------------------------------------------------------------
/workloads/core/main.libsonnet:
--------------------------------------------------------------------------------
1 | local autheliaConfig = import './authelia.libsonnet';
2 | local utils = import 'utils.libsonnet';
3 |
4 | {
5 | auth_middleware: 'admin-authelia@kubernetescrd',
6 |
7 | priority: 0,
8 |
9 | manifests(_config): {
10 | local module = self,
11 | local config = {
12 | verbose: false,
13 | authelia_tag: '4.39',
14 | // Additional mixin for the Authelia configuration.yml
15 | authelia_config: {},
16 | // Can be used to allocate additional TCP listeners, key is label, value
17 | // is port number.
18 | tcp_ports: {},
19 | // Use mailer+: { enabled: true, ... } to enable, and set the
20 | // smtp_passwd secret.
21 | mailer: {
22 | enabled: false,
23 | sender: 'Authelia ' % _config.domain,
24 | address: error 'address is required',
25 | username: error 'username is required',
26 | identifier: _config.domain,
27 | },
28 | } + _config,
29 |
30 | sopsSecrets: {
31 | [std.get(x.metadata, 'namespace', 'default') + ':' + x.metadata.name]: x
32 | for x in std.parseYaml(config.secrets)
33 | },
34 |
35 | traefikChartConfig: {
36 | apiVersion: 'helm.cattle.io/v1',
37 | kind: 'HelmChartConfig',
38 | metadata: {
39 | name: 'traefik',
40 | namespace: 'kube-system',
41 | },
42 | spec: {
43 | valuesContent: std.manifestYamlDoc({
44 | ports: {
45 | web: {
46 | redirections: {
47 | entryPoint: {
48 | to: 'websecure',
49 | scheme: 'https',
50 | permanent: true,
51 | },
52 | },
53 | },
54 | websecure: {
55 | asDefault: true,
56 | },
57 | metrics: null,
58 | } + {
59 | [name]: {
60 | port: port,
61 | expose: {
62 | default: true,
63 | },
64 | exposedPort: port,
65 | }
66 | for name in std.objectFields(config.tcp_ports)
67 | for port in [config.tcp_ports[name]]
68 | },
69 | service: {
70 | spec: {
71 | externalTrafficPolicy: 'Local',
72 | },
73 | },
74 | ingressRoute: {
75 | dashboard: {
76 | enabled: true,
77 | matchRule: 'Host(`traefik.' + config.domain + '`)',
78 | entryPoints: ['websecure'],
79 | middlewares: [{ name: $.auth_middleware }],
80 | // NOTE: certificate is manually requested in cert-manager
81 | // workload.
82 | tls: if config.wildcardCertificate then
83 | { secretName: 'tls-' + config.domain }
84 | else
85 | { secretName: 'traefik-tls' },
86 | },
87 | },
88 | providers: {
89 | kubernetesCRD: {
90 | allowCrossNamespace: true,
91 | },
92 | },
93 | metrics: {
94 | prometheus: null,
95 | },
96 | globalArguments: null,
97 | logs: {
98 | general: {
99 | level: if config.verbose then 'DEBUG' else 'INFO',
100 | },
101 | access: {
102 | enabled: true,
103 | },
104 | },
105 | }),
106 | },
107 | },
108 |
109 | adminNamespace: {
110 | apiVersion: 'v1',
111 | kind: 'Namespace',
112 | metadata: {
113 | name: 'admin',
114 | },
115 | },
116 |
117 | autheliaConfig: utils.immutable_config_map({
118 | apiVersion: 'v1',
119 | kind: 'ConfigMap',
120 | metadata: {
121 | namespace: 'admin',
122 | name: 'authelia-',
123 | },
124 | data: {
125 | 'configuration.yml': std.manifestYamlDoc(autheliaConfig(config) + config.authelia_config),
126 | },
127 | }),
128 |
129 | autheliaVolume: {
130 | apiVersion: 'v1',
131 | kind: 'PersistentVolumeClaim',
132 | metadata: {
133 | namespace: 'admin',
134 | name: 'authelia',
135 | },
136 | spec: {
137 | accessModes: ['ReadWriteOnce'],
138 | resources: {
139 | requests: {
140 | storage: '1Gi',
141 | },
142 | },
143 | },
144 | },
145 |
146 | autheliaDeployment: {
147 | apiVersion: 'apps/v1',
148 | kind: 'Deployment',
149 | metadata: {
150 | name: 'authelia',
151 | namespace: 'admin',
152 | },
153 | spec: {
154 | replicas: 1,
155 | selector: {
156 | matchLabels: {
157 | app: 'authelia',
158 | },
159 | },
160 | template: {
161 | metadata: {
162 | labels: {
163 | app: 'authelia',
164 | },
165 | },
166 | spec: {
167 | enableServiceLinks: false,
168 | containers: [
169 | {
170 | name: 'authelia',
171 | image: 'docker.io/authelia/authelia:' + config.authelia_tag,
172 | env: [
173 | { name: 'X_AUTHELIA_CONFIG', value: '/etc/authelia' },
174 | ],
175 | envFrom: [
176 | { secretRef: { name: 'authelia' } },
177 | ],
178 | volumeMounts: [
179 | {
180 | name: 'secrets',
181 | mountPath: '/etc/authelia/configuration.secret.yml',
182 | subPath: 'configuration.secret.yml',
183 | },
184 | {
185 | name: 'config',
186 | mountPath: '/etc/authelia/configuration.yml',
187 | subPath: 'configuration.yml',
188 | },
189 | { name: 'data', mountPath: '/var/lib/authelia' },
190 | ],
191 | resources: {
192 | limits: {
193 | memory: '512Mi',
194 | },
195 | },
196 | },
197 | ],
198 | volumes: [
199 | {
200 | name: 'config',
201 | configMap: { name: module.autheliaConfig.metadata.name },
202 | },
203 | {
204 | name: 'secrets',
205 | secret: { secretName: 'authelia' },
206 | },
207 | {
208 | name: 'data',
209 | persistentVolumeClaim: { claimName: module.autheliaVolume.metadata.name },
210 | },
211 | ],
212 | },
213 | },
214 | },
215 | },
216 |
217 | autheliaServiceIngress: utils.simple_service(config, { app: 'authelia', namespace: 'admin', port: 9091, host: 'auth.' + config.domain }),
218 |
219 | autheliaMiddleware: {
220 | apiVersion: 'traefik.io/v1alpha1',
221 | kind: 'Middleware',
222 | metadata: {
223 | name: 'authelia',
224 | namespace: 'admin',
225 | },
226 | spec: {
227 | forwardAuth: {
228 | address: 'http://authelia.admin.svc.cluster.local:9091/api/authz/forward-auth',
229 | authResponseHeaders: ['Remote-User', 'Remote-Groups', 'Remote-Name', 'Remote-Email'],
230 | },
231 | },
232 | },
233 | },
234 | }
235 |
--------------------------------------------------------------------------------
/workloads/romm/main.libsonnet:
--------------------------------------------------------------------------------
1 | local core = import 'core/main.libsonnet';
2 | local utils = import 'utils.libsonnet';
3 |
4 | local app_ini = (
5 | function()
6 | local template = importstr 'app.ini';
7 | template
8 | )();
9 |
10 | {
11 | priority: 100,
12 |
13 | manifests(_config):
14 | local config = {
15 | image_tag: '4',
16 | sso: false,
17 | } + _config;
18 | {
19 | local module = self,
20 |
21 | deployment: {
22 | apiVersion: 'apps/v1',
23 | kind: 'Deployment',
24 | metadata: {
25 | name: 'romm',
26 | },
27 | spec: {
28 | replicas: 1,
29 | strategy: {
30 | type: 'Recreate',
31 | },
32 | selector: {
33 | matchLabels: {
34 | app: 'romm',
35 | },
36 | },
37 | template: {
38 | metadata: {
39 | labels: {
40 | app: 'romm',
41 | },
42 | },
43 | spec: {
44 | enableServiceLinks: false,
45 | initContainers: [
46 | {
47 | name: 'init-config',
48 | image: 'busybox:latest',
49 | command: ['sh', '-c', 'touch /romm/config/config.yml'],
50 | volumeMounts: [
51 | {
52 | name: 'assets',
53 | mountPath: '/romm/config',
54 | subPath: 'config',
55 | },
56 | ],
57 | },
58 | ],
59 | containers: [
60 | {
61 | name: 'romm',
62 | image: 'rommapp/romm:' + config.image_tag,
63 | ports: [
64 | { containerPort: 8080 },
65 | ],
66 | volumeMounts: [
67 | {
68 | name: 'cache',
69 | mountPath: '/romm/resources',
70 | subPath: 'resources',
71 | },
72 | {
73 | name: 'cache',
74 | mountPath: '/redis-data',
75 | subPath: 'redis-data',
76 | },
77 | {
78 | name: 'assets',
79 | mountPath: '/romm/library',
80 | subPath: 'library',
81 | },
82 | {
83 | name: 'assets',
84 | mountPath: '/romm/assets',
85 | subPath: 'assets',
86 | },
87 | {
88 | name: 'assets',
89 | mountPath: '/romm/config',
90 | subPath: 'config',
91 | },
92 | ],
93 | envFrom: [
94 | { secretRef: { name: 'romm' } },
95 | ],
96 | env: utils.join([
97 | [
98 | { name: 'DB_HOST', value: '127.0.0.1' },
99 | { name: 'DB_NAME', value: 'romm' },
100 | { name: 'DB_USER', value: 'romm' },
101 | { name: 'DB_PASSWD', value: 'romm' },
102 | ],
103 | if config.sso then [
104 | { name: 'DISABLE_USERPASS_LOGIN', value: 'true' },
105 | { name: 'OIDC_ENABLED', value: 'true' },
106 | { name: 'OIDC_PROVIDER', value: 'authelia' },
107 | { name: 'OIDC_REDIRECT_URI', value: 'https://romm.' + config.domain + '/api/oauth/openid' },
108 | { name: 'OIDC_SERVER_APPLICATION_URL', value: 'https://auth.' + config.domain + '/' },
109 | ],
110 | ]),
111 | resources: {
112 | requests: {
113 | memory: '200Mi',
114 | },
115 | limits: {
116 | memory: '512Mi',
117 | },
118 | },
119 | startupProbe: {
120 | httpGet: {
121 | path: '/auth/logout',
122 | port: 8080,
123 | },
124 | initialDelaySeconds: 45,
125 | periodSeconds: 10,
126 | timeoutSeconds: 5,
127 | failureThreshold: 3,
128 | },
129 | },
130 | {
131 | name: 'mariadb',
132 | image: 'mariadb:12',
133 | env: [
134 | { name: 'MARIADB_ALLOW_EMPTY_ROOT_PASSWORD', value: '1' },
135 | { name: 'MARIADB_USER', value: 'romm' },
136 | { name: 'MARIADB_PASSWORD', value: 'romm' },
137 | { name: 'MARIADB_DATABASE', value: 'romm' },
138 | ],
139 | resources: {
140 | requests: {
141 | memory: '200Mi',
142 | },
143 | limits: {
144 | memory: '200Mi',
145 | },
146 | },
147 | volumeMounts: [
148 | { name: 'mariadb', mountPath: '/var/lib/mysql' },
149 | ],
150 | },
151 | ],
152 | volumes: [
153 | {
154 | name: 'assets',
155 | persistentVolumeClaim: { claimName: 'romm-assets' },
156 | },
157 | {
158 | name: 'cache',
159 | persistentVolumeClaim: { claimName: 'romm-cache' },
160 | },
161 | {
162 | name: 'mariadb',
163 | persistentVolumeClaim: { claimName: 'romm-mariadb' },
164 | },
165 | ],
166 | },
167 | },
168 | },
169 | },
170 |
171 | pvcs: {
172 | assets: {
173 | apiVersion: 'v1',
174 | kind: 'PersistentVolumeClaim',
175 | metadata: {
176 | name: 'romm-assets',
177 | },
178 | spec: {
179 | accessModes: ['ReadWriteOnce'],
180 | resources: {
181 | requests: {
182 | storage: '10Gi',
183 | },
184 | },
185 | },
186 | },
187 |
188 | cache: {
189 | apiVersion: 'v1',
190 | kind: 'PersistentVolumeClaim',
191 | metadata: {
192 | name: 'romm-cache',
193 | },
194 | spec: {
195 | accessModes: ['ReadWriteOnce'],
196 | resources: {
197 | requests: {
198 | storage: '10Gi',
199 | },
200 | },
201 | },
202 | },
203 |
204 | mariadb: {
205 | apiVersion: 'v1',
206 | kind: 'PersistentVolumeClaim',
207 | metadata: {
208 | name: 'romm-mariadb',
209 | },
210 | spec: {
211 | accessModes: ['ReadWriteOnce'],
212 | resources: {
213 | requests: {
214 | storage: '1Gi',
215 | },
216 | },
217 | },
218 | },
219 | },
220 |
221 | serviceIngress: utils.simple_service(config, { app: 'romm', port: 8080 }),
222 | },
223 | }
224 |
--------------------------------------------------------------------------------
/driver/hetzner:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # @describe Set up a production cluster with Hetzner
3 | set -euo pipefail
4 |
5 | # @cmd Initialize a new environment
6 | # @flag --validate-args-only Don't do anything other than validate arguments
7 | # @option --location! Location to create the cluster in
8 | # @option --size! Size for data volume
9 | # @option --type! Server type to use
10 | # @option --image=ubuntu-24.04 Image to use for server
11 | # @option --ssh-key=`default_ssh_key` SSH key to use
12 | # @meta require-tools kubectl,terraform,age-keygen
13 | # @meta require-tools hcloud,jq,sops,age-keygen,nc
14 | init() {
15 | env_name=${1:?Environment name is required}
16 |
17 | if hcloud firewall describe "$env_name" >/dev/null 2>&1; then
18 | echo "hcloud: firewall $env_name already exists" >&2
19 | exit 1
20 | fi
21 | if hcloud volume describe "data-$env_name" >/dev/null 2>&1; then
22 | echo "hcloud: volume data-$env_name already exists" >&2
23 | exit 1
24 | fi
25 |
26 | [ ${argc_validate_args_only+1} ] && exit 0
27 |
28 | hcloud firewall create \
29 | --name "$env_name" \
30 | --label env="$env_name" \
31 | --rules-file ../../driver/hetzner-firewall.json
32 | volume=$(hcloud volume create \
33 | --name "data-$env_name" \
34 | --label env="$env_name" \
35 | --size "${argc_size:?}" \
36 | --location "${argc_location:?}" \
37 | -o json | jq '.volume.id')
38 | user_data=$(cat <<-EOF
39 | #cloud-config
40 | timezone: UTC
41 | EOF
42 | )
43 | args=(
44 | --name "$env_name"
45 | --type "${argc_type:?}"
46 | --location "${argc_location:?}"
47 | --image "${argc_image:?}"
48 | --firewall "$env_name"
49 | --ssh-key "${argc_ssh_key:?}"
50 | --volume "$volume"
51 | --user-data-from-file <(echo "$user_data")
52 | --label env="$env_name"
53 | )
54 | server=$(hcloud server create -o json "${args[@]}")
55 | server_ip=$(echo "$server" | jq -r '.server.public_net.ipv4.ip')
56 |
57 | # Mark the primary IPs as belonging to this environment
58 | ipv4_id=$(echo "$server" | jq -r '.server.public_net.ipv4.id')
59 | if [ "$ipv4_id" ]; then
60 | hcloud primary-ip add-label "$ipv4_id" env="$env_name"
61 | hcloud primary-ip update "$ipv4_id" --auto-delete=false
62 | fi
63 | ipv6_id=$(echo "$server" | jq -r '.server.public_net.ipv6.id')
64 | if [ "$ipv6_id" ]; then
65 | hcloud primary-ip add-label "$ipv6_id" env="$env_name"
66 | hcloud primary-ip update "$ipv6_id" --auto-delete=false
67 | fi
68 |
69 | wait_for_ssh "$server_ip"
70 | # shellcheck disable=SC2087
71 | ssh -o StrictHostKeyChecking=no \
72 | -l root "$server_ip" \
73 | DISK_PASSWORD="${DISK_PASSWORD:?}" \
74 | BLOCK_DEVICE="/dev/disk/by-id/scsi-SHC_Volume_$volume" \
75 | FORMAT_DRIVE=1 \
76 | INSTALL_K3S_CHANNEL="${INSTALL_K3S_CHANNEL:?}" \
77 | sh <<-EOF
78 | cloud-init status -w
79 | $(cat ../../driver/bootstrap.sh)
80 | EOF
81 | ssh -l root "$server_ip" cat /etc/rancher/k3s/k3s.yaml | sed -e "s@https://127.0.0.1:6443@https://$server_ip:6443@" > kubeconfig.yml
82 | ssh -l root "$server_ip" cat /tmp/sops-age-recipient.txt > sops-age-recipient.txt
83 | }
84 |
85 | # @cmd Unseal the cluster
86 | unseal() {
87 | env_name=${1:?Environment name is required}
88 | tty=
89 | if [ -t /dev/stdin ]; then
90 | tty=1
91 | fi
92 | ssh -l root ${tty+-t} "$(hcloud server ip "$env_name")" -- unseal
93 | }
94 |
95 | # @cmd Replace the cluster's server with a new one
96 | #
97 | # In case something goes wrong, delete the old server and primary IPs,
98 | # then use --ignore-existing.
99 | # @flag --ignore-existing Do not attempt to reuse an old server
100 | # @option --type! Server type to use
101 | # @option --image=ubuntu-24.04 Image to use for server
102 | # @option --ssh-key=`default_ssh_key` SSH key to use
103 | upgrade() {
104 | env_name=${1:?Environment name is required}
105 | volume=$(hcloud volume describe "data-$env_name" -o json)
106 | server_create_args=()
107 | if [ ${argc_ignore_existing+1} ]; then
108 | server_create_args+=(--name "$env_name")
109 | else
110 | old_server_id=$(echo "$volume" | jq -r '.server')
111 | old_server=$(hcloud server describe "$old_server_id" -o json)
112 | server_name=$(echo "$old_server" | jq -r '.name')
113 | server_create_args+=(--name "$server_name")
114 | ipv4_id=$(echo "$old_server" | jq -r '.public_net.ipv4.id')
115 | if [ "$ipv4_id" ]; then
116 | server_create_args+=(--primary-ipv4 "$ipv4_id")
117 | fi
118 | ipv6_id=$(echo "$old_server" | jq -r '.public_net.ipv6.id')
119 | if [ "$ipv6_id" ]; then
120 | server_create_args+=(--primary-ipv6 "$ipv6_id")
121 | fi
122 | hcloud server shutdown --wait "$env_name"
123 | hcloud server delete "$env_name"
124 | fi
125 | user_data=$(cat <<-EOF
126 | #cloud-config
127 | timezone: UTC
128 | EOF
129 | )
130 | server_create_args+=(
131 | --type "${argc_type:?}"
132 | --location "$(echo "$volume" | jq -r '.location.name')"
133 | --image "${argc_image:?}"
134 | --firewall "$env_name"
135 | --ssh-key "${argc_ssh_key:?}"
136 | --volume "$(echo "$volume" | jq -r '.id')"
137 | --user-data-from-file <(echo "$user_data")
138 | --label env="$env_name"
139 | )
140 |
141 | server=$(hcloud server create -o json "${server_create_args[@]}")
142 | server_ip=$(echo "$server" | jq -r '.server.public_net.ipv4.ip')
143 |
144 | if [ ${argc_ignore_existing+1} ]; then
145 | # Mark the primary IPs as belonging to this environment
146 | ipv4_id=$(echo "$server" | jq -r '.server.public_net.ipv4.id')
147 | if [ "$ipv4_id" ]; then
148 | hcloud primary-ip add-label "$ipv4_id" env="$env_name"
149 | hcloud primary-ip update "$ipv4_id" --auto-delete=false
150 | fi
151 | ipv6_id=$(echo "$server" | jq -r '.server.public_net.ipv6.id')
152 | if [ "$ipv6_id" ]; then
153 | hcloud primary-ip add-label "$ipv6_id" env="$env_name"
154 | hcloud primary-ip update "$ipv6_id" --auto-delete=false
155 | fi
156 | fi
157 |
158 | ssh-keygen -R "$server_ip"
159 | wait_for_ssh "$server_ip"
160 | # shellcheck disable=SC2087
161 | ssh -o StrictHostKeyChecking=no \
162 | -l root "$server_ip" \
163 | BLOCK_DEVICE="/dev/disk/by-id/scsi-SHC_Volume_$(echo "$volume" | jq -r '.id')" \
164 | INSTALL_K3S_CHANNEL="${INSTALL_K3S_CHANNEL:?}" \
165 | sh <<-EOF
166 | cloud-init status -w
167 | $(cat ../../driver/bootstrap.sh)
168 | EOF
169 |
170 | cat <<-EOF
171 | Upgrade completed. Unseal the server and verify that everything is
172 | still working properly.
173 | EOF
174 | }
175 |
176 | # @cmd Delete the cluster
177 | destroy() {
178 | env_name=${1:?Environment name is required}
179 | ssh-keygen -R "$(hcloud server ip "$env_name")"
180 | for resource in server firewall primary-ip volume; do
181 | hcloud "$resource" list -l env="$env_name" -o noheader -o columns=id | xargs hcloud "$resource" delete
182 | done
183 | }
184 |
185 | # Find any SSH key which is loaded in the local ssh-agent.
186 | default_ssh_key() {
187 | patterns=()
188 | for key in $(ssh-add -lE md5 | cut -d: -f2- | awk '{ print $1 }'); do
189 | patterns+=(-e "$key")
190 | done
191 | hcloud ssh-key list | grep "${patterns[@]}" | awk '{ print $1 }'
192 | if [[ "${PIPESTATUS[1]}" -ne 0 ]]; then
193 | echo "No suitable ssh key found and none specified" >&2
194 | return 1
195 | fi
196 | }
197 |
198 | wait_for_ssh() {
199 | # shellcheck disable=SC2016
200 | timeout 300 sh -c 'until nc -z $0 $1; do sleep 1; done' "$1" 22
201 | }
202 |
203 | if ! command -v argc >/dev/null; then
204 | echo "This command requires argc. Install from https://github.com/sigoden/argc" >&2
205 | exit 100
206 | fi
207 | eval "$(argc --argc-eval "$0" "$@")"
208 |
--------------------------------------------------------------------------------
/Argcfile.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # @describe Cluster management
3 |
4 | set -eu
5 |
6 | # @cmd Prepare a new cluster
7 | #
8 | # Note that --driver and --age must come *before* the environment name
9 | # and all driver arguments.
10 | #
11 | # @arg name Name of the cluster
12 | # @arg args~ Arguments for driver
13 | # @option --age $AGE_PUBLIC_KEY Admin's age public key to use
14 | # @option --k3s-channel=stable K3s channel to use
15 | # @option --driver![lima|hetzner] Type of cluster to create
16 | # @flag --age-generate-key Make a new age key to use
17 | # @flag --driver-help Show help for the driver
18 | # @meta require-tools sops,terraform,kubectl
19 | init() {
20 | if [ ${argc_driver_help+1} ]; then
21 | exec "./driver/${argc_driver:?}" init --help
22 | fi
23 | "./driver/${argc_driver:?}" init --validate-args-only "${argc_name:?}" ${argc_args+"${argc_args[@]}"}
24 | mkdir "env/${argc_name:?}"
25 | cd "env/${argc_name:?}"
26 | ln -s "../../driver/${argc_driver:?}" driver
27 | DISK_PASSWORD=$(head -c 32 /dev/urandom | base64)
28 | export DISK_PASSWORD
29 | export INSTALL_K3S_CHANNEL="${argc_k3s_channel:?}"
30 |
31 | ./driver init "${argc_name:?}" ${argc_args+"${argc_args[@]}"}
32 |
33 | CLUSTER_AGE_PUBLIC_KEY=$(cat sops-age-recipient.txt)
34 | age_keys="${argc_age:-}${argc_age:+,}$CLUSTER_AGE_PUBLIC_KEY"
35 | if [[ ${argc_age_generate_key+1} ]]; then
36 | age-keygen -o age.key
37 | age_keys="${age_keys},$(age-keygen -y age.key)"
38 | fi
39 | sops --encrypt --age "$age_keys" --encrypted-suffix Templates --input-type yaml --output-type yaml /dev/stdin > secrets.yml </dev/null; then
203 | echo "This command requires argc. Install from https://github.com/sigoden/argc" >&2
204 | exit 100
205 | fi
206 | eval "$(argc --argc-eval "$0" "$@")"
207 | # vim:set ts=4
208 |
--------------------------------------------------------------------------------
/workloads/forgejo/main.libsonnet:
--------------------------------------------------------------------------------
1 | local core = import 'core/main.libsonnet';
2 | local utils = import 'utils.libsonnet';
3 |
4 | local app_ini = (
5 | function()
6 | local template = importstr 'app.ini';
7 | template
8 | )();
9 |
10 | {
11 | priority: 100,
12 |
13 | manifests(_config):
14 | local config = {
15 | image_tag: '12',
16 | // Use mailer+: { enabled: true, ... } to enable, and set the
17 | // mailer_passwd secret.
18 | mailer: {
19 | enabled: false,
20 | from: '"Forgejo" ' % _config.domain,
21 | smtp_protocol: '',
22 | smtp_addr: error 'smtp_addr is required',
23 | smtp_port: 587,
24 | user: error 'user is required',
25 | },
26 | } + _config;
27 | {
28 | local module = self,
29 |
30 | deployment: {
31 | apiVersion: 'apps/v1',
32 | kind: 'Deployment',
33 | metadata: {
34 | name: 'forgejo',
35 | },
36 | spec: {
37 | replicas: 1,
38 | selector: {
39 | matchLabels: {
40 | app: 'forgejo',
41 | },
42 | },
43 | template: {
44 | metadata: {
45 | labels: {
46 | app: 'forgejo',
47 | },
48 | },
49 | spec: {
50 | enableServiceLinks: false,
51 | containers: [
52 | {
53 | name: 'forgejo',
54 | image: 'codeberg.org/forgejo/forgejo:' + config.image_tag,
55 | ports: [
56 | { containerPort: 3000 },
57 | { containerPort: 22 },
58 | ],
59 | volumeMounts: [
60 | { name: 'secrets', mountPath: '/etc/gitea' },
61 | { name: 'data', mountPath: '/data' },
62 | ],
63 | env: utils.join([
64 | [
65 | { name: 'FORGEJO____APP_NAME', value: 'Forgejo' },
66 | { name: 'FORGEJO__admin__SEND_NOTIFICATION_EMAIL_ON_NEW_USER', value: 'true' },
67 | { name: 'FORGEJO__cron__ENABLED', value: 'true' },
68 | { name: 'FORGEJO__indexer__REPO_INDEXER_ENABLED', value: 'true' },
69 | { name: 'FORGEJO__mailer__ENABLED', value: std.toString(config.mailer.enabled) },
70 | { name: 'FORGEJO__oauth2_client__ACCOUNT_LINKING', value: 'auto' },
71 | { name: 'FORGEJO__oauth2_client__ENABLE_AUTO_REGISTRATION', value: 'true' },
72 | { name: 'FORGEJO__oauth2_client__UPDATE_AVATAR', value: 'true' },
73 | { name: 'FORGEJO__openid__WHITELISTED_URIS', value: 'auth.' + config.domain },
74 | { name: 'FORGEJO__repository__ENABLE_PUSH_CREATE_ORG', value: 'true' },
75 | { name: 'FORGEJO__repository__ENABLE_PUSH_CREATE_USER', value: 'true' },
76 | { name: 'FORGEJO__security__INSTALL_LOCK', value: 'true' },
77 | { name: 'FORGEJO__security__SECRET_KEY__FILE', value: '/etc/gitea/secret_key' },
78 | { name: 'FORGEJO__server__DISABLE_SSH', value: std.toString(!std.objectHas(config.tcp_ports, 'ssh')) },
79 | { name: 'FORGEJO__server__DOMAIN', value: 'code.' + config.domain },
80 | { name: 'FORGEJO__server__LANDING_PAGE', value: '/user/oauth2/authelia' },
81 | { name: 'FORGEJO__server__LFS_START_SERVER', value: 'true' },
82 | { name: 'FORGEJO__server__ROOT_URL', value: 'https://code.' + config.domain + '/' },
83 | { name: 'FORGEJO__server__SSH_DOMAIN', value: '%(DOMAIN)s' },
84 | { name: 'FORGEJO__service__ALLOWED_USER_VISIBILITY_MODES', value: 'limited,private' },
85 | { name: 'FORGEJO__service__ALLOW_ONLY_EXTERNAL_REGISTRATION', value: 'true' },
86 | { name: 'FORGEJO__service__DEFAULT_ORG_VISIBILITY', value: 'limited' },
87 | { name: 'FORGEJO__service__DEFAULT_USER_VISIBILITY', value: 'limited' },
88 | { name: 'FORGEJO__service__ENABLE_INTERNAL_SIGNIN', value: 'false' },
89 | { name: 'FORGEJO__service__ENABLE_NOTIFY_MAIL', value: 'true' },
90 | { name: 'FORGEJO__service__REQUIRE_SIGNIN_VIEW', value: 'true' },
91 | { name: 'FORGEJO__service__SHOW_REGISTRATION_BUTTON', value: 'false' },
92 | { name: 'FORGEJO__session__PROVIDER', value: 'db' },
93 | ],
94 | if std.objectHas(config.tcp_ports, 'ssh') then [
95 | { name: 'FORGEJO__server__SSH_PORT', value: std.toString(config.tcp_ports.ssh) },
96 | ],
97 | if config.mailer.enabled then [
98 | { name: 'FORGEJO__mailer__FROM', value: config.mailer.from },
99 | { name: 'FORGEJO__mailer__PROTOCOL', value: config.mailer.smtp_protocol },
100 | { name: 'FORGEJO__mailer__SMTP_ADDR', value: config.mailer.smtp_addr },
101 | { name: 'FORGEJO__mailer__SMTP_PORT', value: std.toString(config.mailer.smtp_port) },
102 | { name: 'FORGEJO__mailer__USER', value: config.mailer.user },
103 | { name: 'FORGEJO__mailer__PASSWD__FILE', value: '/etc/gitea/mailer_passwd' },
104 | ],
105 | ]),
106 | resources: {
107 | requests: {
108 | memory: '512Mi',
109 | },
110 | limits: {
111 | memory: '1Gi',
112 | },
113 | },
114 | livenessProbe: {
115 | httpGet: {
116 | path: '/api/healthz',
117 | port: 3000,
118 | },
119 | initialDelaySeconds: 30,
120 | periodSeconds: 30,
121 | timeoutSeconds: 10,
122 | },
123 | },
124 | ],
125 | volumes: [
126 | {
127 | name: 'secrets',
128 | secret: { secretName: 'forgejo' },
129 | },
130 | {
131 | name: 'data',
132 | persistentVolumeClaim: { claimName: 'forgejo-data' },
133 | },
134 | ],
135 | },
136 | },
137 | },
138 | },
139 |
140 | pvc: {
141 | apiVersion: 'v1',
142 | kind: 'PersistentVolumeClaim',
143 | metadata: {
144 | name: 'forgejo-data',
145 | },
146 | spec: {
147 | accessModes: ['ReadWriteOnce'],
148 | resources: {
149 | requests: {
150 | storage: '10Gi',
151 | },
152 | },
153 | },
154 | },
155 |
156 | service: {
157 | apiVersion: 'v1',
158 | kind: 'Service',
159 | metadata: { name: 'forgejo' },
160 | spec: {
161 | selector: {
162 | app: 'forgejo',
163 | },
164 | ports: [
165 | { name: 'http', port: 3000 },
166 | ] + if std.objectHas(config.tcp_ports, 'ssh') then [
167 | { name: 'ssh', port: 22 },
168 | ] else [],
169 | },
170 | },
171 |
172 | ingress: utils.traefik_ingress(config, {
173 | app: 'forgejo',
174 | port: 3000,
175 | host: 'code.' + config.domain,
176 | middlewares: [core.auth_middleware],
177 | }),
178 |
179 | [if std.objectHas(config.tcp_ports, 'ssh') then 'sshIngress']: {
180 | apiVersion: 'traefik.io/v1alpha1',
181 | kind: 'IngressRouteTCP',
182 | metadata: {
183 | name: 'forgejo-ssh',
184 | },
185 | spec: {
186 | entryPoints: ['ssh'],
187 | routes: [{
188 | match: 'HostSNI(`*`)',
189 | services: [{
190 | name: 'forgejo',
191 | port: 'ssh',
192 | }],
193 | }],
194 | },
195 | },
196 | },
197 | }
198 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Personal Cloud IaC
2 |
3 | This is the repo I use for [my personal cloud server](https://cgamesplay.com/post/2021/10/27/creating-my-personal-cloud-with-hashicorp/), hosted as a VPS.
4 |
5 | **Features:**
6 |
7 | - Single-node [K3s](https://k3s.io) installation.
8 | - Encryption at rest for Kubernetes secrets, etcd, and all container persistent volumes.
9 | - Atomic upgrades by storing all stateful data on an external volume.
10 | - Easily create local environments for testing.
11 | - Automatic SSL certificates via [LetsEncrypt](https://letsencrypt.org).
12 | - IPv4 and IPv6 support.
13 | - A [variety of workloads](./workloads) that I've deployed. Some highlights:
14 | - [backup](./workloads/backup) - back up the system using [Restic](https://restic.net) on a periodic basis.
15 | - See the full list [here](./workloads).
16 |
17 | ## System components
18 |
19 | **K3s**
20 |
21 | [K3s](https://k3s.io) is a lightweight Kubernetes distribution which includes useful single-node-cluster features like host path volumes, a LoadBalancer, and [Traefik](https://traefik.io/traefik/).
22 |
23 | **Jsonnet**
24 |
25 | [Jsonnet](https://jsonnet.org/) is used to declare the desired Kubernetes workloads. The configuration boils down to a series of manifest files which are applied using either kubectl or [kapp](https://carvel.dev/kapp/).
26 |
27 | **SOPS**
28 |
29 | [Mozilla SOPS](https://getsops.io/docs/) is used to encrypt Kubernetes secrets in this repository, and combined with [sops-secrets-operator](https://github.com/isindir/sops-secrets-operator/) to decrypt them on the cluster. We use [Age](https://age-encryption.org/) to as the encryption provider.
30 |
31 | ## Installation
32 |
33 | ### 1. Choose Technology
34 |
35 | Choose the technology you will deploy to:
36 |
37 | - [Lima](https://lima-vm.io) is available for quickly spinning up test clusters on a local macOS machine.
38 | - [Hetzner Cloud](https://www.hetzner.com) is available for creating a cloud-hosted cluster.
39 |
40 | ### 2. Configure Dependencies
41 |
42 | You'll need [argc](https://github.com/sigoden/argc/) installed, as well as a variety of other utilities that will be printed when you use a command that requires them.
43 |
44 | - The Lima driver requires that `limactl` is installed.
45 | - The Hetzner driver requires that `hcloud` is installed. For Hetzner, your should also create an empty project to host the resources you will use. Set up `hcloud` using `hcloud context` or by setting `HCLOUD_TOKEN`.
46 |
47 | Generate an age key if you don't already have one: `age-keygen -o development.key`. The public key will be printed to the console; it should look like `age1qal59j7k2hphhmnmurg4ymj9n32sz5dgnx5teks3ch72n4wjfevsupgahc`.
48 |
49 | ### 3. Initialize Cluster
50 |
51 | Run `argc init --driver=lima --age $AGE_PUBLIC_KEY local` to create a cluster named `local` using the Lima driver. `$AGE_PUBLIC_KEY` should be your age public key. This command should take a few minutes to run and should stream logs throughout the process.
52 |
53 | At the end, the script will print the disk encryption password. It is important that you store this somewhere safe; it is necessary to reboot or upgrade the server.
54 |
55 | To use the Hetzner driver, run `argc init --help` and `argc init --driver=hetzner --driver-help` to see the arguments you need to pass. At a minimum, you'll need to use `--location`, `--type`, and `--size`.
56 |
57 | You can create any number of clusters. Each stores its configuration in a subdirectory of `env/`. Looking at the local cluster in `env/local/`, we see these files:
58 |
59 | - `kubeconfig.yml` is the kubeconfig you can use to access the cluster.
60 | - `sops-age-recipient.txt` is the public key of the cluster's sops-secret-operator.
61 | - `config.libsonnet` contains the configuration for the workloads.
62 | - `secrets.yml` contains the environment-specific SOPS-encrypted secrets. Each document in this YAML file should be a SopsSecret object, and you need to use a separate object for each namespace you want to add secrets to.
63 | - `authelia-users.yml` contains a sample Authelia users database.
64 |
65 | ### 4. Use Cluster
66 |
67 | The default cluster configuration is an empty k3s installation. Use `argc sync` to deploy the workloads from `config.libsonnet` to the cluster.
68 |
69 | - [Traefik Dashboard](https://traefik.lvh.me/) - accessible via the self-signed certificate. Log in with authelia / authelia.
70 | - `eval "$(argc activate $ENVIRONMENT)"` - set up the `KUBECONFIG` variable and others in the current terminal session. Useful to put this in your `.envrc` for use with direnv.
71 | - `kubectl` - Use `env/local/kubeconfig.yml` to access
72 | - `kapp` - Use `env/local/kubeconfig.yml` to access
73 | - `argc sync` - Run this to sync all workloads in `config.libsonnet`. This is equivalent to running `argc apply $WORKLOAD` for each workload configured.
74 | - `argc render $WORKLOAD` - Show the rendered manifest for the given workload.
75 | - `argc diff $WORKLOAD` - Show a diff of the rendered manifest and the current cluster state.
76 | - `argc apply $WORKLOAD` - Apply the rendered manifest to the cluster.
77 |
78 | Workloads are managed using kapp, and can be deleted using `kapp delete`. There is presently no support for automatically pruning workloads that you remove from `config.libsonnet`.
79 |
80 | #### Deprovisioning Workloads
81 |
82 | You can use `kapp delete -a $NAME` to delete all resources associated with a workload. Note that the default reclaim policy of dynamically-provisioned PersistentVolumes (e.g. local-path PVs) is "Delete". You may want to change this to "Retain". Since the PersistentVolume isn't specified in the jsonnet configuration, you should [do this using kubectl](https://kubernetes.io/docs/tasks/administer-cluster/change-pv-reclaim-policy/#changing-the-reclaim-policy-of-a-persistentvolume).
83 |
84 | To reuse this volume at a later date, you should patch it again to set a claimRef matching the original PersistentVolumeClaim, then deploy the workload as usual.
85 |
86 | ```bash
87 | kubectl patch pv -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
88 | kubectl get pv # Verify that change has been applied.
89 |
90 | # Untested commands
91 | kubectl patch pv -p '{"spec":{"claimRef":{"namespace":"default","name":"your-pvc-name"}}}'
92 | ```
93 |
94 | ### 5. Upgrade The Server
95 |
96 | **Option A: My Server is a "pet"**
97 |
98 | You can follow the normal [K3s upgrade guide](https://docs.k3s.io/upgrades), as well as the normal [Ubuntu upgrade guide](https://documentation.ubuntu.com/server/how-to/software/upgrade-your-release/index.html).
99 |
100 | **Option B: My server is "cattle"**
101 |
102 | It is also possible to simply swap out the server for a new one using the same data drive. This method gives a fresh install of k3s from a known-good image.
103 |
104 | To use this second approach, see `argc upgrade --help` and `argc upgrade --driver-help $ENVIRONMENT` for the available options. The basic approach looks like this:
105 |
106 | 1. Create a snapshot of the current server to roll back to if something happens: `hcloud server create-image`
107 | 2. Replace your server with a new one using `argc upgrade`
108 | 3. Unseal the server with `argc unseal`
109 | 4. Verify everything works. If you need to roll back to the previous version, use the snapshot you created in step 1 (e.g. `argc upgrade $ENVIRONMENT --image=my-snapshot-id`).
110 | 5. Delete the snapshot once you are happy with the upgrade.
111 |
112 | ### 6. Clean Up
113 |
114 | Once you no longer need an environment, use `argc destroy` to remove it. This will delete all local/cloud resources, and remove the `env/` subdirectory.
115 |
116 | ### 7. Prepare for Production
117 |
118 | Here is a checklist of things you should do when you are ready to deploy your cluster to production.
119 |
120 | 1. Turn on accidental deletion protection for the volume and primary IPs: `hcloud volume enable-protection` and `hcloud primary-ip enable-protection`.
121 | 1. Configure DNS for the main domain and subdomains.
122 |
123 | You may want to set up your SSH config for access to the server. This is fine, but please note that `argc upgrade` will cause the SSH host key to change. You can avoid this by using the `Hostname` directive in your SSH config. The `argc upgrade` script will automatically update the host key when the upgrade is performed.
124 |
125 | ```bash
126 | # Example configuration for SSH
127 | Host my.cluster.dns
128 | Hostname 188.245.147.159
129 | User root
130 | ```
131 |
132 | ## Repo Organization
133 |
134 | Here are the main directories in this repository
135 |
136 | - `env/$ENVIRONMENT` describes a single environment. My production deployment is checked in here, which you can see as an example.
137 | - `driver/` is a directory containing the scripts to manage the infrastructure powering the cluster. These are not meant to be run directly, instead accessed through the root `Argcfile.sh`.
138 | - `workloads/` is the main Jsonnet directory.
139 | - Subdirectories here correspond to individual workloads which can be enabled and configured using the environment's `config.libsonnet` file.
140 |
141 | ## Using Kubernetes
142 |
143 | ### Basic Maintenance
144 |
145 | These are some basic commands that can be used for troubleshooting:
146 |
147 | ```bash
148 | # View node status
149 | kubectl get nodes
150 | # Check control plane components
151 | kubectl get componentstatuses
152 | # Review Kubernetes events
153 | kubectl get events -A
154 | # List deployments (check for anything not fully ready)
155 | kubectl get deployments -A
156 | # Look for failed jobs
157 | kubectl get job -A
158 | ```
159 |
160 | ### Deploying from CI/CD
161 |
162 | Unclear!
163 |
164 | ### Common Issues
165 |
166 | - If you change the server's primary IP addresses in the cloud provider console, it may be necessary to run `cloud-init clean -c network` and reboot in order for the server to detect the changes. Failing to do this may result in a partially updated network (e.g. IPv4 works but IPv6 does not).
167 |
168 | ## Security Model
169 |
170 | This is a toy project for personal use. As a result, the security model has been simplified from the normal one that you would encounter in a production system. At its core, the key difference is that a single-node system will be fully compromised if root access is gained on that node. The key implication of this is: **if a job escapes its sandbox, everything is compromised.** Specifically:
171 |
172 | - Access to the root privileges on the host system can be used to read the unencrypted contents of the cluster's drive.
173 | - Access to kube-apiserver can be used to run an arbitrary pod with root privileges on the host system.
174 | - Helm charts installed from URLs can be modified at any time in the future to run an arbitrary pods with root privileges on the host system.
175 |
176 | The steps required to make this setup "production-ready" are:
177 |
178 | 1. Set up [Pod Security Admissions](https://kubernetes.io/docs/concepts/security/pod-security-admission/) to prevent pods from being able to access resources that they shouldn't (host system resources, kube-system namespace, etc).
179 | 2. Follow the [K3s CIS Hardening Guide](https://docs.k3s.io/security/hardening-guide).
180 | - Note: the Kubernetes-native secrets encryption is not used; instead the entire etcd store is encrypted using full disk encryption.
181 |
182 | ## Changes
183 |
184 | - 2025-05-30: The infrastructure underwent a substantial change from Nomad to Kubernetes. The older version can be found [here](https://github.com/CGamesPlay/infra/tree/35120ca5e04795cad60536bc5f91c0c6f89f4d15). It uses Nomad, Consul, and Vault, as well as Ansible for managing the configuration of the server.
185 |
--------------------------------------------------------------------------------
/env/prod/secrets.yml:
--------------------------------------------------------------------------------
1 | apiVersion: isindir.github.com/v1alpha3
2 | kind: SopsSecret
3 | metadata:
4 | name: sops-secrets
5 | namespace: admin
6 | spec:
7 | # https://github.com/isindir/sops-secrets-operator/blob/147febf336f14bb2546eec020680ce1b2a2e96f1/api/v1alpha3/sopssecret_types.go#L33
8 | secretTemplates:
9 | - name: ENC[AES256_GCM,data:nKVW1E3R1kQ=,iv:xCDOlXCUnQk8+voaVnrsyGcU1UYex7Yd8CViH26uRPE=,tag:O17M183KGJz0PGorzTLfnw==,type:str]
10 | stringData:
11 | configuration.secret.yml: ENC[AES256_GCM,data:yAmZoLww1xzBBKpkLzH6V5w8Zz7rUzvHLtRtLbixys0NqBzUDP4DJACTkNDatuFPDeZ96vd22VZXY5aPab19Xn4pTzSy8uCojcUJPydQwWq67QAWaLaMxCs4hJYtfc+vVBTzN3kPJt8djtQx8O7foH6+9hTDfJc7c1DnT86KdI1OiKdp7gqcSegadlVgoNbhLWOeFy1fVz8Z5Dk5wEKclggtcw3I/y0Sk5XTTxtuNveQysw5PsOiI/DQQK8Z7Pe/SF86V99xd//kGkM7XIqUoixdA33WgFK720reJ8lrIUIGnXw5gV4v5mO3o0crGsMxFyfj/e25ONH27+m3sGdX/i3pPSa2kVDmfPX9lJYTB2Ek8JEaXe7y1jK/2TO2qkjSjVoUSEj+CBuo0+p/v1ptzCYdd4SiKbYz63eUtX+X7OPSSTpHGbNrFow2K4HXn/uCRuHGoAZvqFstVvlp0m3l8sDwh3v6CswbBvYkz199hbfCKeSLcrjjPWI3l6lnVDMooBL3As/5DTU8W4xr8jfFEcwIk8gkJQjEmkmXx/xyjThgfbHPsxbzkK34aXmG2rzgPufScZJWzKTJY4zzoF6PMudnAQj9Umax4ukXBmLdsTD7QfSi8LIxIOdMYGphXPxnRWCnXDsoE3mPRTTQKfNB7oRrKMUf9xF48JTCoRgrLFEmjJb5AU3JbmmVTX0C4q7bUodsPU8EUyOwWirviyZAq9w5523DXof7rCVd+H0CeDaz7bOzHtLSBBxwWXESJF9vIXBtaq6m4Nj9jFskm1cWF+eKUSZHA858unNjNjiO4ZDneX57C+r8ePLxW6ATGpt3rxMfMT0JYrYjPtELyCoAqauDTypPMIarvrg2fl9jV6AayKo+MClzuS0dUM1xrsU2PIZEIqZTZmEehgJl4danhmhupJSV+U0IMJ8uR8cazZj0Jb7NlOVIrZAxdWeRUBf0HezYkvbumWrm/tnPrs8/jZK+K0b8EGkhVDt28S3D674jPKXa3cwO93+dXutYcge4gtN45NHWUsO/8OQv1SQQiOzAGDqH2cg1WP3Wdafj5YKlDQvi5LGGhzDEXbo2I/+BoYOJIhBZrYC7h3HCbf/bytIqVfmuomWzLpxCfOKxDshShPI5G/PtK5W4KJpaO9xBDUbKX8qm9Gdh3sDM/s42q6CLvkUUIubg3lpIRR5Y53YB2PamfdLDVU4O0X6mCsJgHPYFgykdzt6nN4h1Q77cpldtZV0td5uT/5R9FGzd3bexLVtFT5AkUh4mjUShkuVSPiD0fgyTT5u37ZB2UjSOYO2DJgAS7h4//Zfm/azkSa29iZ3ChwyEnXAq563S+mqOi10vREFEGi1vvnPTM71Uj2FVrvp5+1shNnAD1POfeMM/TgGYoo5XwW/jV9Vz7UPkJicvpeAUT6ZQ7TuQkTCRFRCh0qt9kXreI/7o8qnTx0NO6KP61v35VVCaeHEG7b7DToV8eL8v2npuIkI2p1Fxo7EBBKyODuARjoh9DHbA99Dgq097OnMGatUifZMCDSFZFbuGqt+Qbn/oJKd8rX8GfT4NiiKQHbIUWe3+JqgeSmpuUoeiJqIbSiGHn8MJtF3J66b44dCT3fZEl3UK9h8TRGhX63TM3zxN3awpwL9LJid3EQ1rdnEJjY5+15e6ShhMhP5beaseqzwGCG1DxGIwaRm81YBRSlUSQV/vh5wQ/eJ5Vsib3B8C4n4DagjnfY+K0PEkkTSvekkz9NJw2iA+0Lt5lSU0gfCZ5csTiYhrZtf6xQo9Y1MxbFRaRifb6k9AwwYTr8x0FQ/kBzLfg5Ee+U7GFosQYYj8PjF1SBxKS600D1HbUYFxP8Vpz/d/ejyaGuRefZzEbaT+urhA56CapaPGZK/4irhXnAcCj5m2SuCsLyqm4S6wH5pSffqCCfsreoQqkZqbz2OAUT45SKah1nYixg/h3wpra8ILAq+4uHpnH1qNlb58YCP24TR3lODWkoG5qNGTSuITbRXkF0AWmuMOihLX+hoGlf0wsYhPIiVz+V1pufLkB9eRxIeYhOxplR292sIeI8AT8Djx6rloq/aoXSaAQz3s+P2efZNTZIuiUdc75EdaUpbpeaCl8ZmsUHSY3+W/VGzeA2fGCE6sOcKKVx7RMkAfq0TmTe7heV336jcwgwm9NYMFKoQmbACAiX5R8QsIdsKhTGDDx2i6Ye3WUkyrdJur6X7C8a1gS2vMW/0LnmwxugB9NoaIsYDblUKwxKT84fUN9Sc91X9ACK/7kdZgq6IhYt2f1nHRdGI3BLfYo/ElQaJkHvBqBYUwn+f/79kzgsfREMaR4Zy83EEH4wILEESz2acVzvgZCG0E3tW9ny3T26e4tuhMOnViIsy+znqngj5RfA6H+ChpDzP91jmcDqRQ0ipSF2X6kA/ujufDwF7CiPor1PG81K6RMb+e70N67tFYQpMLFbIjzDrb0CF3AQ98HuKWcnunkUhF4vZq+IYoD7nhqpndpgSSCPK72Kiy0weMfjJtzT5XW/nfFb8qsFP4aR3xhC+VfiDJfw8c9TyMLLPFe8v9ZSgrKdJJm2h6L0OfJIsLBtsYKBHTnmIVx5Xd5UC4jxTCUjgGiWHZlSlSa9vRNkuqLSmpN+YlQjkhcikjDK50Wjux/m2JV7Z6if/aHxwaoimSn49o3RG3FkU0Q42TkS15Oq/3MLkjO9n3JHHW4cb3VjrZ3QfF/Tu/w4IqmQIKpHcXV6zKPFWvnHkdrQW895+MJBCLD0G0lpwgwQS3Cn7bzWiNvDeUhUjehk9RFXX19aEh1YRyzNDXXdzotNxuvNWBaAUtKhlibS3o9cYtQbMhavv4hmzhdZONPu0I1mzvzgjMEAzLvezDi0uX8klVKrleqrB795REZuEzvVlQbAXfQ+5W3rXZkod5Bhm2jZQVb7PsCIjXHrsY+QF/PLSmrKjdXRtzA8/gjyc0cc6qdqLn4rLd56zfNhc2vXPGMUYyVMMlEUWe6XijsT9SQAjxPjSyKTKTNBSz2+fai+GdUzdlUBItQMCEcfaEl3+gaevP+zxTakLglhcWj8hx3w8t5zosDbveJAiv6gQpLcNC4AQxO+5UQsjiFHtskRadiCrInyiv7qOJ0HzcbCZWoe3IKpjlU8BZ7o6BzGJuOh1mT3h+gi8uysud98QxuVBQb1bwi5rUfvwMimXk4dI+T3kWSAMt7napgv5bBWw60TWWv3sA3yB/lhZ/,iv:qTLJqCO9BSoVnjVXu9s7oAMRxjzgjfxIzOuf9yZrLdA=,tag:I3tRF5006Y/zOfDi6iXj5A==,type:str]
12 | - name: ENC[AES256_GCM,data:AwzNqAywh6UgtvLzmao=,iv:521LhNJ2PM1Gy4lKZygXhglo7FdcnqlTUffv4fDkOSM=,tag:1R2kiDJiHFDwNMAg9PdeBg==,type:str]
13 | stringData:
14 | AWS_ACCESS_KEY_ID: ENC[AES256_GCM,data:+R0Ra8kI2yI5FcnXMfPa00KxnfE=,iv:UjWw6q+goGD9SlP5njEe2sybLGzAG6BGz4Yug7B3BjA=,tag:uvie+IA4kLxVDEIniQMPoQ==,type:str]
15 | AWS_SECRET_ACCESS_KEY: ENC[AES256_GCM,data:XSsfP9DfPqJtKKwpUF1DwzGNqj3j/srFfEIDV280MP1TIwyR4uKgvw==,iv:GMulgmJHb+jyWlC36ijyG1kefzGuLuw0Kow1c+V4wIQ=,tag:F1R5NUurNzCFEyEZwuG3PQ==,type:str]
16 | RESTIC_REPOSITORY: ENC[AES256_GCM,data:ozGssvXpjcvpknkQ5TWg8PUJeQk62B7fsip/r1NH+EYIDQGAR2sBn7paAzlIPCY=,iv:85xSAINv3ig2ORVXdVleSvuTZ2Wv7TVLUS/tmzuuEto=,tag:XiV3q0kcE73obiKHZ7bJvw==,type:str]
17 | RESTIC_PASSWORD: ENC[AES256_GCM,data:Cpj1ZDTz8a8xYdjRVdfpYT9eR4BKlQBu207AN9DxrvoQDSui0EkkJFdvz6Y=,iv:NPlLyaFlxR2m8rhoxBfPqUTXJlXSfU6LsOEqu1tNzjo=,tag:U+/YNhAqT+JrKYI5KZ8JDg==,type:str]
18 | sops:
19 | age:
20 | - recipient: age1se1qd0xew8nru2cmy7u9mvy9wr8lxj2zs6k0fysthpdj8kjuuryn6vwv6fsh04
21 | enc: |
22 | -----BEGIN AGE ENCRYPTED FILE-----
23 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHBpdi1wMjU2IHZ5Ry9wUSBBMEZRVEZX
24 | aXlYQU45aW5admI5YkpiMWRIMGxzektGbXNWaGxoa3VxOGxRTwpvVmNGYTRobURB
25 | cm5EeGl0NEJzNHJQcHRFblp2VHp4aWdxSllhWk1nTkRzCi0tLSAxRkRtTDBQa1E5
26 | TnVRa3dkTHhHNWxaSUtMWEZ3SktUaDVWbWZTWDA0QjE4Cpg3CVP53vpwIUC3DKft
27 | C7OK6R3QxlBIDEoxAe+nTDiuaXFf4agOJLRO91wGYYDbLfYInX4J2nUSg3Y+3R3a
28 | oZQ=
29 | -----END AGE ENCRYPTED FILE-----
30 | - recipient: age1ye7q4uvexwhruvm5p9svw9z5tu58v9uk6r9pv4aue8j7v0lmpqus5ug9az
31 | enc: |
32 | -----BEGIN AGE ENCRYPTED FILE-----
33 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmUGtRMC9oaExVTHVzdEJO
34 | cFBjV1BuajlzNFRIengxNGtTQWFDNUJqWUNjCnlUWG9JT2FsTnRZMHVyYjlpQXY2
35 | dmtFemcza3NaK1VVZ3kvanhCVkdCTkEKLS0tICtEeDdQUUMxUzFTbTlzZkhRaEww
36 | ejhGd0V5ZzU0clVVc084Mm1QN1RPSmMKXMN8IpwaHXj2TC5gBoh5dKsn1JW4kfc/
37 | 60S856zHsdx+l8lw7OudAywLm97z7M0dWP1SrFZunaDMnONw/mFsCQ==
38 | -----END AGE ENCRYPTED FILE-----
39 | lastmodified: "2025-10-28T13:12:14Z"
40 | mac: ENC[AES256_GCM,data:y5qDcno+eyrtGJp+lTkFb50caCgJT/zp9pu4rnXjVHzYGL43WdUMV41AUnOWjTWhWoYJQT3FDlEpgHFKaVSFIJMPoorF+HPDQdjm5XKfuFgn3wuoT7SshZvM6QpIRkYvMa4tDArzZJLLaykwlg0O2TrGg7v7Z6rL8H635rQBSCs=,iv:21bCm5M9caZJwG7u4tWxrjcLnkS9mDRrl63AwWxxuTA=,tag:oTIIVDqfHm8x+3OQ3vv4eA==,type:str]
41 | encrypted_suffix: Templates
42 | version: 3.10.2
43 | ---
44 | apiVersion: isindir.github.com/v1alpha3
45 | kind: SopsSecret
46 | metadata:
47 | name: sops-secrets
48 | namespace: cert-manager
49 | spec:
50 | secretTemplates:
51 | - name: ENC[AES256_GCM,data:+vztK5fiprvA4eD+hds=,iv:BXIqN/p/tlXDn0GnJagIZDtHjdGpEyJRd/pWTlQu3r4=,tag:FwvYa8tDVhTu0r+u/FBJMQ==,type:str]
52 | stringData:
53 | AWS_ACCESS_KEY_ID: ENC[AES256_GCM,data:PGSoeHHu+BqHnWhv3moI/F5gfko=,iv:N/gPw7erqLcKpRQCpDST60cWkMBXSsE5tP5pQnwNbDw=,tag:UyEUCh8LPo043CXtVc2XkQ==,type:str]
54 | AWS_SECRET_ACCESS_KEY: ENC[AES256_GCM,data:5qFhusPdd9VqdkdkNf2bAujcVIr+Zb2AM5HhVthTg9OajeBB3yiG9w==,iv:FWKR4Cw5Fc+GSh9VrXpymzsGyfPv6ZxozygveCUdgmU=,tag:FDNWOCllWWyEmjJ99rWuVg==,type:str]
55 | sops:
56 | age:
57 | - recipient: age1se1qd0xew8nru2cmy7u9mvy9wr8lxj2zs6k0fysthpdj8kjuuryn6vwv6fsh04
58 | enc: |
59 | -----BEGIN AGE ENCRYPTED FILE-----
60 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHBpdi1wMjU2IHZ5Ry9wUSBBMEZRVEZX
61 | aXlYQU45aW5admI5YkpiMWRIMGxzektGbXNWaGxoa3VxOGxRTwpvVmNGYTRobURB
62 | cm5EeGl0NEJzNHJQcHRFblp2VHp4aWdxSllhWk1nTkRzCi0tLSAxRkRtTDBQa1E5
63 | TnVRa3dkTHhHNWxaSUtMWEZ3SktUaDVWbWZTWDA0QjE4Cpg3CVP53vpwIUC3DKft
64 | C7OK6R3QxlBIDEoxAe+nTDiuaXFf4agOJLRO91wGYYDbLfYInX4J2nUSg3Y+3R3a
65 | oZQ=
66 | -----END AGE ENCRYPTED FILE-----
67 | - recipient: age1ye7q4uvexwhruvm5p9svw9z5tu58v9uk6r9pv4aue8j7v0lmpqus5ug9az
68 | enc: |
69 | -----BEGIN AGE ENCRYPTED FILE-----
70 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmUGtRMC9oaExVTHVzdEJO
71 | cFBjV1BuajlzNFRIengxNGtTQWFDNUJqWUNjCnlUWG9JT2FsTnRZMHVyYjlpQXY2
72 | dmtFemcza3NaK1VVZ3kvanhCVkdCTkEKLS0tICtEeDdQUUMxUzFTbTlzZkhRaEww
73 | ejhGd0V5ZzU0clVVc084Mm1QN1RPSmMKXMN8IpwaHXj2TC5gBoh5dKsn1JW4kfc/
74 | 60S856zHsdx+l8lw7OudAywLm97z7M0dWP1SrFZunaDMnONw/mFsCQ==
75 | -----END AGE ENCRYPTED FILE-----
76 | lastmodified: "2025-10-28T13:12:14Z"
77 | mac: ENC[AES256_GCM,data:y5qDcno+eyrtGJp+lTkFb50caCgJT/zp9pu4rnXjVHzYGL43WdUMV41AUnOWjTWhWoYJQT3FDlEpgHFKaVSFIJMPoorF+HPDQdjm5XKfuFgn3wuoT7SshZvM6QpIRkYvMa4tDArzZJLLaykwlg0O2TrGg7v7Z6rL8H635rQBSCs=,iv:21bCm5M9caZJwG7u4tWxrjcLnkS9mDRrl63AwWxxuTA=,tag:oTIIVDqfHm8x+3OQ3vv4eA==,type:str]
78 | encrypted_suffix: Templates
79 | version: 3.10.2
80 | ---
81 | apiVersion: isindir.github.com/v1alpha3
82 | kind: SopsSecret
83 | metadata:
84 | name: sops-secrets
85 | spec:
86 | # https://github.com/isindir/sops-secrets-operator/blob/147febf336f14bb2546eec020680ce1b2a2e96f1/api/v1alpha3/sopssecret_types.go#L33
87 | secretTemplates:
88 | - name: ENC[AES256_GCM,data:a+fPUmxfYg==,iv:G15dV3Fn3TBP7la3pX6tBbi7KnzEI+9riFuudUwjTeQ=,tag:2Gs+Lp4TiSZhIqtuXLBUCw==,type:str]
89 | stringData:
90 | secret_key: ENC[AES256_GCM,data:p8Al0QaszjFQOaMtrF/hegrSd8MZd+YnR6yS5H2DvwdPZZNTVu9fvJuWHKU=,iv:BCdi6YcLgTC58fdlhnU6aEbclHSf2Y6Yjn5oGwqvkf8=,tag:MRb8AXfZzJYV3s65Gh4Bqg==,type:str]
91 | mailer_passwd: ENC[AES256_GCM,data:N5MmTPRLiR2rcVng9QBs17O6wAMOxkdNdh0CL1jzECtRUmqltDud7MA/GtinD8D4iPM=,iv:SRzUeicnAhj63wnSjrivDSwbVqSM0yyE4LzbRqatskg=,tag:/rzjE2h3ZM3NeKiyJ46Dag==,type:str]
92 | - name: ENC[AES256_GCM,data:fKNU3gSWqg==,iv:rzYYxGlArs8ZSjzFJwL0/hgosAxD6Rf3ERO2klAappc=,tag:+o+ykQtI3HaO1/gV+wkl6A==,type:str]
93 | stringData:
94 | JWT_PRIVATE_KEY: ENC[AES256_GCM,data:9mcobWD+VExwdQ2Z4WCh1pVPfYFGeQ0KhsmVhKtFFraKt+qCP1aJ2Ej8GHxCvzAQFQ==,iv:Lf6ohFKz+GxJcWTqPSLMPaH/AeV6/7NOOr5KuG/mEzQ=,tag:SXEa2DQ5P7BqukR8PwzqrA==,type:str]
95 | - name: ENC[AES256_GCM,data:pXVLP0tAIAFyscVIS8lWMcv8,iv:H7eQ9ijQRjmDoRpRXM65A47XlA7GF3gz7u0xk+ZoXu8=,tag:nIaGzSuvts2VMCQxHY1I9A==,type:str]
96 | stringData:
97 | ANTHROPIC_API_KEY: ENC[AES256_GCM,data:FtHkdo4vYYWaAlPyypwqo+EbO0YMwQAK75LAWzW0gvx/ypNkXRMFlMmrLZnTrkVP6wuF2gd3DFBfp2pFJVuzho4QWZ1SR8PjtCh7/8SiSvHlaso5H9+8rRExtqeLY8ifu6V56/p1mM5XjlRm,iv:FPIonoWcWVPAjIwmUcrnYZrg0KALxQfwr5Y3CG+x8RA=,tag:zQslyXi3CTehU/cDDXNY1Q==,type:str]
98 | GOOGLE_PSE_API_KEY: ENC[AES256_GCM,data:0rFYiPTB3tx4hp/+GnfaGm2Hw4hdkFAj6aJOgeCWTLHHzRx9QAzP,iv:UwA3qq40v7msBkINKsLGkmi8Zlv8dFa9N37j+s94IEc=,tag:+oLtWCpI2Y63jG28MByw0w==,type:str]
99 | GOOGLE_PSE_ENGINE_ID: ENC[AES256_GCM,data:9KA/QiFsInKb1AOpPGGzvjw=,iv:tWbcz2tiQtb4Zd4bQjT2cvWwCGGxGWfmCLdYbuvBPbM=,tag:/dRuhQUduWLCWhT5kTXy+g==,type:str]
100 | OPENAI_API_KEY: ENC[AES256_GCM,data:9CgpEB+sRfziihaVAcO3yny5H2+48HT9Sjk8q0N3YULYyV4Mk/z8UCbQfziNry0AocCQ,iv:i++G4/sdNbqAPG4JwCehi3aAOp47pSTQ+6zm1FHVOKo=,tag:v9JDv5yIW1VAcKPF9gMMow==,type:str]
101 | SERPLY_API_KEY: ENC[AES256_GCM,data:lQH+xpOdBBHTs10fD04whAu/F6pz/25s,iv:zTOXcc9Q5vH+a+A7FL14sFCbJ1rlHxF0oneE5u+qRjQ=,tag:/1Rk/awOFfTGQvL9V9PnCA==,type:str]
102 | WEBUI_SECRET_KEY: ENC[AES256_GCM,data:v4fJeBB4M1I/RWqKT+44HwoNVlOsHTElytecpsOrT800PWcHvsaHD3TAqV4=,iv:BjP3xVBTP8YTjBa0SSStrNDDtPGc8IC3uGRNyFu0xgs=,tag:0ZVDm4q0zbRp4tzSQbHY8Q==,type:str]
103 | - name: ENC[AES256_GCM,data:Iziy36xBOxouVtfjFkOi0Wo6KA==,iv:uukWLZpAdFxmXdcJf6Nmm5FGtyceMPLttLG/p6YP2oo=,tag:GXYxzv7riwbgL3v7FFTS+g==,type:str]
104 | stringData:
105 | production.json: ENC[AES256_GCM,data:dGd33/Zllfoieh8df5redDM3Ij/Ab+R5zFccc16UQbxZ6WD2MBS9p4iZbG0vZ84Q1xO8tWsVZmlA5eb4GUoHc1ga0ne/H1ZxuIkJnvEu5l+qVvoLQBG/m3SXh/wQG1Obp2KWFZ01h6q0kGFZcY5uQp1F2ZJ0cKzmt3ioH4yO4EbCCqEqkjRNlkDnAZoTW8Q4xuB0+BtpG6jq1bsX0KWJ9NOvtAitqXzG+zvWnMpx81DqzLKLb/uHufEEMZZ9Kk/E7N/fr4lHWKx5PtaRJb7ff6s5IsRMSOZ3yCgu1IP1nuRm+g==,iv:sOnB+Yvpqgp8AsIo0fGUyxnKkTtWOLDjF5Pxp6c1PwM=,tag:eIhOc0ljo8TbvrfKVEX0Zg==,type:str]
106 | - name: ENC[AES256_GCM,data:uwlsdKDsaGrPTOXIdI7n/KHyLDGS,iv:WDkqSHO6XzxRsJW/ZBvc3OwcXridw2I/JSZpoVdD8h0=,tag:RFY2b9Z1aQqUbmwiw/8HvQ==,type:str]
107 | type: ENC[AES256_GCM,data:smzgGA/19CtQvLF5VGdNfsgrjJ2r9qYJjMI/ypLx,iv:qf6M0wMw5H/kyehanJBZigOzk1g++C6a27ezxLKS6Q0=,tag:px2t8RL81i3K58jYbdZDQw==,type:str]
108 | stringData:
109 | .dockerconfigjson: ENC[AES256_GCM,data:NxnojJQoXT4ncfiGyx0IP6r6eU8qakgNWFmpsxq98YRraBqT8kpfGWs9MuqYxE2ZMkulDsaJgR3Or9/KBE6d5uG9i7yQDeiaPQPFgJyoymQVE7yH1AMIzqGBpC06/4GjM1yYY1uq+HoLZYjKAZKEFzekeZiqBsGDz/lqBAg=,iv:KftjTzGLptn46iBccW1yd/iBSJwkX/u9tqMzEAxUMzI=,tag:6rO/ADluroaQbnbgehHliQ==,type:str]
110 | - name: ENC[AES256_GCM,data:VhyEGQ==,iv:oR9lunq0CG3tJBDednb7YSPJnnq9ArJ8zvo/vOHmONs=,tag:WIDAfFwdUjj0X3JicgkqSA==,type:str]
111 | stringData:
112 | OIDC_CLIENT_ID: ENC[AES256_GCM,data:39GJUQ==,iv:csWgqDiFnGYNq5DvrdHD+i41LMbzfZVHbh41OEwcmpQ=,tag:kf/ZCMg2x9VmgJ6hnUQrRA==,type:str]
113 | OIDC_CLIENT_SECRET: ENC[AES256_GCM,data:nEMjV8i0jGmlKVeCyWsRcklZ3QT50lIhZWhli22sa/RBjgxhLb4SVc8TEIdN581oA6XYVmyaY3fqoFUCndH1L7/MDdjxgpZS,iv:ifr/+xZ4BrIGOIkik6F9NJ0UtCoOhzO4cu2mtgYcUG4=,tag:aLt3lVDYA/aqrr32ruvR9g==,type:str]
114 | ROMM_AUTH_SECRET_KEY: ENC[AES256_GCM,data:Xi8Bx6JEQYDa5ywNU5x16BbXDdtj7CK2jOYVjzmPYf+2zUohXFlDYZ7Uux6u34yMZRl17+PP0H4BpTE5MrN3UQ==,iv:AM2NVLl5Faaqi7h/rO1l7PbXa6MDekD1LlcgljfH5g4=,tag:2R7SMIFQVzwZctSeYiPbnQ==,type:str]
115 | SCREENSCRAPER_USER: ENC[AES256_GCM,data:zvDViOI6ZeWZKw==,iv:ki6mSLW6eevkIL0GV7ndkNrJkyfTLNBdEnlg4VSLaYc=,tag:fFS25OF19sd0DieF5qL6KQ==,type:str]
116 | SCREENSCRAPER_PASSWORD: ENC[AES256_GCM,data:gnW+b44/k9ayqYUDh24iIV7I,iv:0H3AfbZ+QsQ/WCItFG6ATg5kfJp7jtWRLSDKAfoa41c=,tag:XRizrZr4H9UGGCCWjQIhwA==,type:str]
117 | HASHEOUS_API_ENABLED: ENC[AES256_GCM,data:WbaAlQ==,iv:m9ordx10Efw4oFUTPddIRDyVPN9c8Ri1dp8DaGjlip8=,tag:rnOcpJQULiNSqRFmuMF08w==,type:str]
118 | sops:
119 | age:
120 | - recipient: age1se1qd0xew8nru2cmy7u9mvy9wr8lxj2zs6k0fysthpdj8kjuuryn6vwv6fsh04
121 | enc: |
122 | -----BEGIN AGE ENCRYPTED FILE-----
123 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IHBpdi1wMjU2IHZ5Ry9wUSBBMEZRVEZX
124 | aXlYQU45aW5admI5YkpiMWRIMGxzektGbXNWaGxoa3VxOGxRTwpvVmNGYTRobURB
125 | cm5EeGl0NEJzNHJQcHRFblp2VHp4aWdxSllhWk1nTkRzCi0tLSAxRkRtTDBQa1E5
126 | TnVRa3dkTHhHNWxaSUtMWEZ3SktUaDVWbWZTWDA0QjE4Cpg3CVP53vpwIUC3DKft
127 | C7OK6R3QxlBIDEoxAe+nTDiuaXFf4agOJLRO91wGYYDbLfYInX4J2nUSg3Y+3R3a
128 | oZQ=
129 | -----END AGE ENCRYPTED FILE-----
130 | - recipient: age1ye7q4uvexwhruvm5p9svw9z5tu58v9uk6r9pv4aue8j7v0lmpqus5ug9az
131 | enc: |
132 | -----BEGIN AGE ENCRYPTED FILE-----
133 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBmUGtRMC9oaExVTHVzdEJO
134 | cFBjV1BuajlzNFRIengxNGtTQWFDNUJqWUNjCnlUWG9JT2FsTnRZMHVyYjlpQXY2
135 | dmtFemcza3NaK1VVZ3kvanhCVkdCTkEKLS0tICtEeDdQUUMxUzFTbTlzZkhRaEww
136 | ejhGd0V5ZzU0clVVc084Mm1QN1RPSmMKXMN8IpwaHXj2TC5gBoh5dKsn1JW4kfc/
137 | 60S856zHsdx+l8lw7OudAywLm97z7M0dWP1SrFZunaDMnONw/mFsCQ==
138 | -----END AGE ENCRYPTED FILE-----
139 | lastmodified: "2025-10-28T13:12:14Z"
140 | mac: ENC[AES256_GCM,data:y5qDcno+eyrtGJp+lTkFb50caCgJT/zp9pu4rnXjVHzYGL43WdUMV41AUnOWjTWhWoYJQT3FDlEpgHFKaVSFIJMPoorF+HPDQdjm5XKfuFgn3wuoT7SshZvM6QpIRkYvMa4tDArzZJLLaykwlg0O2TrGg7v7Z6rL8H635rQBSCs=,iv:21bCm5M9caZJwG7u4tWxrjcLnkS9mDRrl63AwWxxuTA=,tag:oTIIVDqfHm8x+3OQ3vv4eA==,type:str]
141 | encrypted_suffix: Templates
142 | version: 3.10.2
143 |
--------------------------------------------------------------------------------