├── .github
├── ISSUE_TEMPLATE
│ ├── bug.md
│ ├── chore.md
│ ├── config.yml
│ └── feature.md
├── dependabot.yml
└── workflows
│ ├── CI-pipeline.yml
│ ├── automatic-docs-update.yml
│ ├── release-docs.yml
│ └── release.yml
├── .gitignore
├── .golangci.yml
├── CODE_OF_CONDUCT.md
├── LICENSE
├── Makefile
├── README.md
├── SECURITY.md
├── docs
├── CHANGELOG
│ ├── changelog-0.1.x.md
│ ├── changelog-0.2.x.md
│ ├── changelog-0.3.x.md
│ ├── changelog-0.4.x.md
│ ├── changelog-0.5.x.md
│ ├── changelog-0.6.x.md
│ ├── changelog-0.7.x.md
│ ├── changelog-0.8.x.md
│ └── changelog-0.9.x.md
├── CNAME
├── autoscaling
│ ├── autoscaling.drawio
│ ├── autoscaling.md
│ └── autoscaling.png
├── claudie-workflow
│ ├── claudie-diagram.drawio
│ ├── claudie-diagram.png
│ ├── claudie-workflow.md
│ ├── done_error_state.png
│ ├── manager.excalidraw
│ ├── pending_state.png
│ ├── rolling_update.png
│ └── scheduled_state.png
├── commands
│ └── commands.md
├── contributing
│ ├── contributing.md
│ ├── local-testing.md
│ └── release.md
├── creating-claudie-backup
│ └── creating-claudie-backup.md
├── docs-guides
│ ├── deployment-workflow.md
│ └── development.md
├── faq
│ └── FAQ.md
├── feedback
│ └── feedback-form.md
├── getting-started
│ ├── detailed-guide.md
│ └── get-started-using-claudie.md
├── hardening
│ └── hardening.md
├── hexagon_blue.png
├── http-proxy
│ └── http-proxy.md
├── index.md
├── infra-diagram.drawio
├── infra-diagram.png
├── input-manifest
│ ├── api-reference.md
│ ├── example.md
│ ├── external-templates.md
│ ├── gpu-example.md
│ └── providers
│ │ ├── aws.md
│ │ ├── azure.md
│ │ ├── cloudflare.md
│ │ ├── gcp.md
│ │ ├── genesiscloud.md
│ │ ├── hetzner.md
│ │ ├── oci.md
│ │ └── on-prem.md
├── latency-limitations
│ └── latency-limitations.md
├── loadbalancing
│ ├── lb-architecture.png
│ └── loadbalancing-solution.md
├── logo claudie_blue_no_BG.svg
├── monitoring
│ └── grafana.md
├── overrides
│ └── main.html
├── roadmap
│ └── roadmap.md
├── sitemap
│ └── sitemap.md
├── storage
│ └── storage-solution.md
├── troubleshooting
│ └── troubleshooting.md
├── update
│ └── update.md
├── use-cases
│ └── use-cases.md
└── version-matrix
│ └── version-matrix.md
├── go.mod
├── go.sum
├── internal
├── clusters
│ ├── clusters.go
│ ├── ping4.go
│ └── ping4_test.go
├── command
│ ├── cmd.go
│ └── cmd_test.go
├── concurrent
│ └── exec.go
├── envs
│ └── envs.go
├── fileutils
│ └── file.go
├── generics
│ └── generic.go
├── grpcutils
│ └── grpc.go
├── hash
│ └── checksum.go
├── healthcheck
│ ├── health_checker.go
│ └── healthcheck.go
├── kubectl
│ └── kubectl.go
├── loggerutils
│ ├── log.go
│ └── log_test.go
├── manifest
│ ├── check_future_domain.go
│ ├── manifest.go
│ ├── state.go
│ ├── state_string.go
│ ├── utils.go
│ ├── utils_test.go
│ ├── validate.go
│ ├── validate_kubernetes.go
│ ├── validate_load_balancer.go
│ ├── validate_node_pool.go
│ ├── validate_provider.go
│ └── validate_test.go
├── metrics
│ └── middleware.go
├── nodepools
│ ├── iter.go
│ └── nodepools.go
├── nodes
│ ├── arch_resolver.go
│ └── metadata.go
├── sanitise
│ └── sanitise.go
├── spectesting
│ ├── spec.go
│ └── spec_definitions.go
├── templateUtils
│ ├── templates.go
│ └── templates_test.go
└── worker
│ ├── error.go
│ └── worker.go
├── manifests
├── README.md
├── claudie
│ ├── .env
│ ├── ansibler.yaml
│ ├── builder.yaml
│ ├── cluster-rbac
│ │ ├── clusterrole.yaml
│ │ ├── clusterrolebinding.yaml
│ │ └── kustomization.yaml
│ ├── crd
│ │ ├── claudie.io_inputmanifests.yaml
│ │ └── kustomization.yaml
│ ├── dynamo
│ │ ├── cm.yaml
│ │ ├── dynamodb.yaml
│ │ ├── job.yaml
│ │ ├── kustomization.yaml
│ │ └── secrets
│ │ │ ├── access-key
│ │ │ └── secret-key
│ ├── kube-eleven.yaml
│ ├── kuber.yaml
│ ├── kustomization.yaml
│ ├── manager.yaml
│ ├── minio
│ │ ├── cm.yaml
│ │ ├── job.yaml
│ │ ├── kustomization.yaml
│ │ ├── secrets
│ │ │ ├── access-key
│ │ │ └── secret-key
│ │ ├── sts.yaml
│ │ └── svc.yaml
│ ├── mongo
│ │ ├── kustomization.yaml
│ │ ├── mongodb.yaml
│ │ └── secrets
│ │ │ ├── connectionString
│ │ │ ├── password
│ │ │ └── username
│ ├── ns.yaml
│ ├── operator.yaml
│ └── terraformer.yaml
├── kustomization.yaml
├── network-policies
│ ├── network-policy-cilium.yaml
│ └── network-policy.yaml
└── testing-framework
│ ├── README.md
│ ├── kustomization.yaml
│ ├── test-sets
│ ├── autoscaling-1
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ └── 3.yaml
│ ├── autoscaling-2
│ │ ├── 1.yaml
│ │ └── 2.yaml
│ ├── proxy-with-hetzner
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ ├── 3.yaml
│ │ ├── 4.yaml
│ │ ├── 5.yaml
│ │ ├── 6.yaml
│ │ ├── 7.yaml
│ │ └── 8.yaml
│ ├── proxy-without-hetzner
│ │ ├── 1.yaml
│ │ ├── 10.yaml
│ │ ├── 2.yaml
│ │ ├── 3.yaml
│ │ ├── 4.yaml
│ │ ├── 5.yaml
│ │ ├── 6.yaml
│ │ ├── 7.yaml
│ │ ├── 8.yaml
│ │ └── 9.yaml
│ ├── rolling-update-2
│ │ ├── 1.yaml
│ │ └── 2.yaml
│ ├── rolling-update
│ │ ├── 1.yaml
│ │ └── 2.yaml
│ ├── succeeds-on-last-1
│ │ ├── 1.yaml
│ │ └── 2.yaml
│ ├── succeeds-on-last-2
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ └── 3.yaml
│ ├── succeeds-on-last-3
│ │ ├── 1.yaml
│ │ └── 2.yaml
│ ├── succeeds-on-last-4
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ └── 3.yaml
│ ├── test-set1
│ │ ├── 1.yaml
│ │ └── 2.yaml
│ ├── test-set2
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ └── 3.yaml
│ ├── test-set3
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ ├── 3.yaml
│ │ └── 4.yaml
│ ├── test-set4
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ └── 3.yaml
│ └── test-set5
│ │ ├── 1.yaml
│ │ ├── 2.yaml
│ │ └── 3.yaml
│ └── testing-framework.yaml
├── mkdocs.yml
├── proto
├── ansibler.proto
├── claudie-operator.proto
├── kubeEleven.proto
├── kuber.proto
├── manager.proto
├── pb
│ ├── ansibler.pb.go
│ ├── ansibler_grpc.pb.go
│ ├── claudie-operator.pb.go
│ ├── claudie-operator_grpc.pb.go
│ ├── kubeEleven.pb.go
│ ├── kubeEleven_grpc.pb.go
│ ├── kuber.pb.go
│ ├── kuber_grpc.pb.go
│ ├── manager.pb.go
│ ├── manager_grpc.pb.go
│ ├── spec
│ │ ├── dns.pb.go
│ │ ├── manifest.pb.go
│ │ ├── nodepool.pb.go
│ │ ├── provider.pb.go
│ │ └── utils.go
│ ├── terraformer.pb.go
│ └── terraformer_grpc.pb.go
├── spec
│ ├── dns.proto
│ ├── manifest.proto
│ ├── nodepool.proto
│ └── provider.proto
└── terraformer.proto
├── requirements.txt
└── services
├── ansibler
├── .dockerignore
├── Dockerfile
├── client
│ └── client.go
├── server
│ ├── adapters
│ │ └── inbound
│ │ │ └── grpc
│ │ │ ├── adapter.go
│ │ │ └── ansibler_service.go
│ ├── ansible-playbooks
│ │ ├── apiEndpointChange.yml
│ │ ├── longhorn-req.yml
│ │ ├── proxy
│ │ │ ├── commit-proxy-envs-changes.yml
│ │ │ ├── populate-proxy-envs.yml
│ │ │ └── remove-proxy-envs.yml
│ │ ├── wireguard-uninstall.yml
│ │ ├── wireguard.yml
│ │ └── wireguard
│ │ │ ├── .travis.yml
│ │ │ ├── README.md
│ │ │ ├── defaults
│ │ │ └── main.yml
│ │ │ ├── handlers
│ │ │ └── main.yml
│ │ │ ├── meta
│ │ │ └── main.yml
│ │ │ ├── tasks
│ │ │ ├── configure.yml
│ │ │ ├── install.yml
│ │ │ ├── kill_unattended_upgrades.yml
│ │ │ └── main.yml
│ │ │ ├── templates
│ │ │ ├── wg-dynamic.conf.j2
│ │ │ └── wg-static.conf.j2
│ │ │ ├── tests
│ │ │ ├── inventory
│ │ │ └── test.yml
│ │ │ └── vars
│ │ │ └── main.yml
│ ├── domain
│ │ └── usecases
│ │ │ ├── determine_api_endpoint_change.go
│ │ │ ├── install_node_requirements.go
│ │ │ ├── install_vpn.go
│ │ │ ├── remove_utilities.go
│ │ │ ├── setup_loadbalancers.go
│ │ │ ├── update_api_endpoint.go
│ │ │ ├── update_envs_k8s_services.go
│ │ │ ├── update_proxy_envs_on_nodes.go
│ │ │ └── usecases.go
│ ├── main.go
│ └── utils
│ │ ├── ansible.go
│ │ ├── api_endpoint.go
│ │ ├── loadbalancers.go
│ │ └── proxy.go
└── templates
│ ├── all-node-inventory.goini
│ ├── conf.gotpl
│ ├── lb-inventory.goini
│ ├── nginx.goyml
│ ├── node-exporter.goyml
│ ├── proxy-envs.goini
│ └── templates.go
├── autoscaler-adapter
├── .dockerignore
├── Dockerfile
├── claudie_provider
│ ├── claudie_node_groups.go
│ ├── claudie_provider.go
│ └── node_template.go
├── main.go
└── node_manager
│ ├── cloud_provider.go
│ ├── node_manager.go
│ └── utils.go
├── builder
├── .dockerignore
├── Dockerfile
├── adapters
│ └── outbound
│ │ ├── ansibler_connector.go
│ │ ├── kube_eleven_connector.go
│ │ ├── kuber_connector.go
│ │ └── terraformer_connector.go
├── domain
│ ├── ports
│ │ ├── ansibler_port.go
│ │ ├── kube_eleven_port.go
│ │ ├── kuber_port.go
│ │ └── terraformer_port.go
│ └── usecases
│ │ ├── ansibler_caller.go
│ │ ├── config_processor_v2.go
│ │ ├── kube_eleven_caller.go
│ │ ├── kuber_caller.go
│ │ ├── metrics
│ │ └── metrics.go
│ │ ├── terraformer_caller.go
│ │ ├── usecases.go
│ │ └── workflow_helpers.go
├── internal
│ ├── builder_context.go
│ └── proxy.go
└── main.go
├── claudie-operator
├── .dockerignore
├── Dockerfile
├── client
│ └── client.go
├── main.go
├── pkg
│ ├── api
│ │ └── v1beta1
│ │ │ ├── groupversion_info.go
│ │ │ ├── inputmanifest_helpers.go
│ │ │ ├── inputmanifest_types.go
│ │ │ └── zz_generated.deepcopy.go
│ └── controller
│ │ ├── controler_helpers.go
│ │ ├── controller.go
│ │ ├── controller_types.go
│ │ └── validator.go
└── server
│ ├── adapters
│ └── inbound
│ │ └── grpc
│ │ ├── adapter.go
│ │ └── operator_service.go
│ └── domain
│ └── usecases
│ ├── process_manifest_files.go
│ ├── send_autoscaler_event.go
│ └── usecases.go
├── kube-eleven
├── .dockerignore
├── Dockerfile
├── client
│ └── client.go
├── server
│ ├── adapters
│ │ └── inbound
│ │ │ └── grpc
│ │ │ ├── adapter.go
│ │ │ └── kube_eleven_service.go
│ ├── domain
│ │ ├── usecases
│ │ │ ├── build_cluster.go
│ │ │ ├── destroy_cluster.go
│ │ │ └── usecases.go
│ │ └── utils
│ │ │ ├── kube-eleven
│ │ │ ├── kube_eleven.go
│ │ │ ├── types.go
│ │ │ └── utils.go
│ │ │ └── kubeone
│ │ │ └── kubeone.go
│ └── main.go
└── templates
│ ├── kubeone.tpl
│ └── templates.go
├── kuber
├── .dockerignore
├── Dockerfile
├── client
│ └── client.go
├── server
│ ├── adapters
│ │ └── inbound
│ │ │ └── grpc
│ │ │ ├── adapter.go
│ │ │ └── kuber_service.go
│ ├── domain
│ │ ├── usecases
│ │ │ ├── cilium_rollout_restart.go
│ │ │ ├── delete_cluster_metadata.go
│ │ │ ├── delete_kubeconfig.go
│ │ │ ├── delete_nodes.go
│ │ │ ├── destroy_cluster_autoscaler.go
│ │ │ ├── patch_cluster_info_configmap.go
│ │ │ ├── patch_kube_proxy.go
│ │ │ ├── patch_kubeadm.go
│ │ │ ├── patch_nodes.go
│ │ │ ├── remove_lb_scrape_config.go
│ │ │ ├── setup_cluster_autoscaler.go
│ │ │ ├── setup_storage.go
│ │ │ ├── store_cluster_metadata.go
│ │ │ ├── store_kubeconfig.go
│ │ │ ├── store_lb_scrape_config.go
│ │ │ └── usecases.go
│ │ └── utils
│ │ │ ├── autoscaler
│ │ │ ├── autoscaler_test.go
│ │ │ ├── cluster_autoscaler.go
│ │ │ └── cluster_autoscaler_test.go
│ │ │ ├── longhorn
│ │ │ └── longhorn.go
│ │ │ ├── nodes
│ │ │ ├── delete.go
│ │ │ ├── patch.go
│ │ │ └── pvc_replication_utils.go
│ │ │ ├── scrape-config
│ │ │ └── scrape_config.go
│ │ │ ├── secret
│ │ │ └── secret.go
│ │ │ └── utils.go
│ ├── main.go
│ └── manifests
│ │ ├── claudie-defaults.yaml
│ │ └── longhorn.yaml
└── templates
│ ├── cluster-autoscaler.goyaml
│ ├── enable-ca.goyaml
│ ├── scrape-config-manifest.goyaml
│ ├── scrape-config.goyaml
│ ├── storage-class.goyaml
│ └── templates.go
├── manager
├── .dockerignore
├── Dockerfile
├── client
│ ├── api.go
│ ├── client.go
│ ├── crud_api.go
│ ├── manifest_api.go
│ ├── retry.go
│ ├── retry_test.go
│ ├── state_api.go
│ └── task_api.go
├── cmd
│ └── api-server
│ │ └── main.go
└── internal
│ ├── service
│ ├── create_desired_state.go
│ ├── create_desired_state_test.go
│ ├── existing_state.go
│ ├── existing_state_test.go
│ ├── grpc.go
│ ├── handler_get_config.go
│ ├── handler_list_configs.go
│ ├── handler_mark_for_deletion.go
│ ├── handler_next_task.go
│ ├── handler_task_complete.go
│ ├── handler_task_update.go
│ ├── handler_update_nodepool.go
│ ├── handler_update_nodepool_test.go
│ ├── handler_upsert_manifest.go
│ ├── metrics.go
│ ├── rolling_update.go
│ ├── rolling_update_lbs.go
│ ├── rolling_update_lbs_test.go
│ ├── rolling_update_test.go
│ ├── schedule_tasks.go
│ ├── schedule_tasks_test.go
│ ├── watchers.go
│ └── watchers_test.go
│ └── store
│ ├── api.go
│ ├── convert_db_grpc.go
│ ├── convert_db_grpc_test.go
│ ├── in_memory_store.go
│ └── mongo_client.go
├── terraformer
├── .dockerignore
├── Dockerfile
├── client
│ └── client.go
└── server
│ ├── adapters
│ ├── inbound
│ │ └── grpc
│ │ │ ├── adapter.go
│ │ │ └── terraformer_service.go
│ └── outbound
│ │ ├── aws_envs.go
│ │ ├── dynamodb_adapter.go
│ │ ├── immutable_endpoint_resolver.go
│ │ └── s3_adapter.go
│ ├── domain
│ ├── ports
│ │ ├── dynamodb_port.go
│ │ └── statestorage_port.go
│ ├── usecases
│ │ ├── build_infrastructure.go
│ │ ├── destroy_infrastructure.go
│ │ └── usecases.go
│ └── utils
│ │ ├── cluster-builder
│ │ ├── cluster_builder.go
│ │ └── cluster_builder_test.go
│ │ ├── kubernetes
│ │ └── kubernetes.go
│ │ ├── loadbalancer
│ │ ├── dns.go
│ │ └── loadbalancer.go
│ │ ├── templates
│ │ ├── backend.go
│ │ ├── backend.tpl
│ │ ├── doc.go
│ │ ├── provider.go
│ │ ├── providers.tpl
│ │ ├── structures.go
│ │ ├── templates.go
│ │ └── templates_test.go
│ │ └── terraform
│ │ └── terraform.go
│ └── main.go
└── testing-framework
├── .dockerignore
├── Dockerfile
├── claudie_test.go
├── inputmanifest.go
├── test_autoscaler.go
├── test_longhorn.go
└── utils.go
/.github/ISSUE_TEMPLATE/bug.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug
3 | about: Report a bug
4 | title: 'Bug:
'
5 | labels: bug
6 | assignees: ''
7 | ---
8 |
9 |
12 |
13 | ### Current Behaviour
14 |
15 |
16 | ### Expected Behaviour
17 |
18 |
19 | ### Steps To Reproduce
20 |
27 |
28 | ### Anything else to note
29 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/chore.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Chore
3 | about: Log a new chore
4 | title: 'Chore: '
5 | labels: chore
6 | assignees: ''
7 | ---
8 |
9 |
12 |
13 | ### Description
14 |
15 |
16 | ### Exit criteria
17 |
18 | - [ ] Task...
19 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/config.yml:
--------------------------------------------------------------------------------
1 | blank_issues_enabled: true
2 | contact_links:
3 | - name: Ask question
4 | url: https://github.com/berops/claudie/discussions/new
5 | about: We use Discussions for these reports
6 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature
3 | about: Create a feature request
4 | title: 'Feature: '
5 | labels: feature
6 | assignees: ''
7 | ---
8 |
9 |
12 |
13 | ### Motivation
14 |
15 |
16 | ### Description
17 |
18 |
19 | ### Exit criteria
20 |
21 | - [ ] Task...
22 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea/
2 | *.ini
3 | .terraform*
4 | playground
5 | clusters
6 | .vscode
7 | mongodb
8 | *.tfstate*
9 | *.pem
10 | kubeone.yaml
11 | cluster.tar.gz
12 | cluster-kubeconfig
13 | *.conf
14 | __debug_bin
15 | services/testing-framework/test-sets
16 | services/terraformer/templates
17 | /venv/
18 | /site/
19 | bin
20 | .manifest*
21 | .DS_Store
--------------------------------------------------------------------------------
/.golangci.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | linters:
3 | default: none
4 | enable:
5 | - asciicheck
6 | - bidichk
7 | - bodyclose
8 | - dogsled
9 | - durationcheck
10 | - errcheck
11 | - errname
12 | - errorlint
13 | - goconst
14 | - gomodguard
15 | - goprintffuncname
16 | - govet
17 | - ineffassign
18 | - misspell
19 | - noctx
20 | - predeclared
21 | - staticcheck
22 | - unconvert
23 | - unparam
24 | - unused
25 | - wastedassign
26 | - whitespace
27 | settings:
28 | govet:
29 | disable:
30 | - fieldalignment
31 | - shadow
32 | enable-all: true
33 | exclusions:
34 | generated: lax
35 | presets:
36 | - comments
37 | - common-false-positives
38 | - legacy
39 | - std-error-handling
40 | paths:
41 | - third_party$
42 | - builtin$
43 | - examples$
44 | issues:
45 | max-same-issues: 50
46 | formatters:
47 | enable:
48 | - gofmt
49 | exclusions:
50 | generated: lax
51 | paths:
52 | - third_party$
53 | - builtin$
54 | - examples$
55 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security policy
2 |
3 | Thanks for helping to make this project safe for everyone.
4 |
5 | ## Reporting security issues
6 |
7 | If you discover a security issue in this repo, please report it to us at
8 | **security@berops.com**.
9 |
10 | You will receive a response from us within a few days.
11 |
12 | If the issue is confirmed, we will release a patch as soon as possible.
13 |
--------------------------------------------------------------------------------
/docs/CHANGELOG/changelog-0.5.x.md:
--------------------------------------------------------------------------------
1 | # Claudie `v0.5`
2 |
3 | !!! warning "Due to a breaking change in swapping the CNI used in the Kubernetes cluster, the `v0.5.x` will not be backwards compatible with `v0.4.x`"
4 |
5 | ## Deployment
6 |
7 | To deploy Claudie `v0.5.X`, please:
8 |
9 | 1. Download the archive and checksums from the [release page](https://github.com/berops/claudie/releases)
10 |
11 | 2. Verify the archive with the `sha256` (optional)
12 |
13 | ```sh
14 | sha256sum -c --ignore-missing checksums.txt
15 | ```
16 |
17 | If valid, output is, depending on the archive downloaded
18 |
19 | ```sh
20 | claudie.tar.gz: OK
21 | ```
22 |
23 | or
24 |
25 | ```sh
26 | claudie.zip: OK
27 | ```
28 |
29 | or both.
30 |
31 | 3. Lastly, unpack the archive and deploy using `kubectl`
32 |
33 | > We strongly recommend changing the default credentials for MongoDB, MinIO and DynamoDB before you deploy it. To do this, change contents of the files in `mongo/secrets`, `minio/secrets` and `dynamo/secrets` respectively.
34 |
35 | ```sh
36 | kubectl apply -k .
37 | ```
38 |
39 | ## v0.5.0
40 |
41 | ### Features
42 |
43 | - Use cilium as the cluster CNI [#984](https://github.com/berops/claudie/pull/984)
44 | - Update to the latest longhorn version v1.5.1 [#984](https://github.com/berops/claudie/pull/984)
45 |
46 | ### Known issues
47 |
48 | - No known issues since the last release
49 |
50 | ## v0.5.1
51 |
52 | ### Bug fixes
53 |
54 | - Fix issue when node deletion from the cluster wouldn't be idempotent [#1008](https://github.com/berops/claudie/pull/1008)
55 |
--------------------------------------------------------------------------------
/docs/CNAME:
--------------------------------------------------------------------------------
1 | docs.claudie.io
--------------------------------------------------------------------------------
/docs/autoscaling/autoscaling.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/autoscaling/autoscaling.png
--------------------------------------------------------------------------------
/docs/claudie-workflow/claudie-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/claudie-workflow/claudie-diagram.png
--------------------------------------------------------------------------------
/docs/claudie-workflow/done_error_state.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/claudie-workflow/done_error_state.png
--------------------------------------------------------------------------------
/docs/claudie-workflow/pending_state.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/claudie-workflow/pending_state.png
--------------------------------------------------------------------------------
/docs/claudie-workflow/rolling_update.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/claudie-workflow/rolling_update.png
--------------------------------------------------------------------------------
/docs/claudie-workflow/scheduled_state.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/claudie-workflow/scheduled_state.png
--------------------------------------------------------------------------------
/docs/contributing/contributing.md:
--------------------------------------------------------------------------------
1 | # Contributing
2 |
3 | ## Bug reports
4 |
5 | When you encounter a bug, please create a new [issue](https://github.com/berops/claudie/issues/new/choose) and use our bug template.
6 | Before you submit, please check:
7 |
8 | - ...that the issue you want to open is not a duplicate
9 | - ...that you submitted the logs/screenshots of any errors and a concise way to reproduce the issue
10 | - ...the input manifest you used
11 |
12 | !!! warning "be careful not to include your cloud credentials"
13 |
--------------------------------------------------------------------------------
/docs/contributing/release.md:
--------------------------------------------------------------------------------
1 | # How to release a new version of Claudie
2 |
3 | The release process of Claudie consists of a few manual steps and a few automated steps.
4 |
5 | ## Manual steps
6 |
7 | Whoever is responsible for creating a new release has to:
8 |
9 | 1. Write a new entry to a relevant [Changelog document](https://github.com/berops/claudie/tree/master/docs/CHANGELOG)
10 | 2. Add release notes to the Releases page
11 | 3. Publish a release
12 |
13 | ## Automated steps
14 |
15 | After a new release is published, a [release pipeline](https://github.com/berops/claudie/blob/master/.github/workflows/release.yml) and a [release-docs pipeline](https://github.com/berops/claudie/blob/master/.github/workflows/release-docs.yml) runs.
16 |
17 | A [release pipeline](https://github.com/berops/claudie/blob/master/.github/workflows/release.yml) consists of the following steps:
18 |
19 | 1. Build new images tagged with the release tag
20 | 2. Push them to the container registry where anyone can pull them
21 | 3. Add Claudie manifest files to the release assets, with image tags referencing this release
22 |
23 | A [release-docs pipeline](https://github.com/berops/claudie/blob/master/.github/workflows/release-docs.yml) consists of the following steps:
24 |
25 | 1. If there is a new Changelog file:
26 | 1. Checkout to a new feature branch
27 | 2. Add reference to the new Changelog file in [mkdocs.yml](https://github.com/berops/claudie/blob/master/mkdocs.yml)
28 | 3. Create a PR to merge changes from new feature branch to master (PR needs to be created to update changes in `master` branch and align with branch protection)
29 | 2. Deploy new version of docs on [docs.claudie.io](https://docs.claudie.io)
--------------------------------------------------------------------------------
/docs/feedback/feedback-form.md:
--------------------------------------------------------------------------------
1 |
2 |
16 |
17 |
18 |
23 |
--------------------------------------------------------------------------------
/docs/getting-started/get-started-using-claudie.md:
--------------------------------------------------------------------------------
1 | {%
2 | include-markdown "../../README.md"
3 | start=""
4 | end=""
5 | %}
6 |
--------------------------------------------------------------------------------
/docs/hardening/hardening.md:
--------------------------------------------------------------------------------
1 | # Claudie Hardening
2 |
3 | In this section we'll describe how to further configure security hardening of the default
4 | deployment for claudie.
5 |
6 | ## Passwords
7 |
8 | When deploying the default manifests claudie uses simple passwords for MongoDB, DynamoDB
9 | and MinIO.
10 |
11 | You can find the passwords at these paths:
12 |
13 | ```
14 | manifests/claudie/mongo/secrets
15 | manifests/claudie/minio/secrets
16 | manifests/claudie/dynamo/secrets
17 | ```
18 |
19 | It is highly recommended that you change these passwords to more secure ones.
20 |
21 | ## Network Policies
22 |
23 | The default deployment of claudie comes without any network policies, as based on the
24 | CNI on the Management cluster the network policies may not be fully supported.
25 |
26 | We have a set of network policies pre-defined that can be found in:
27 |
28 | ```
29 | manifests/network-policies
30 | ```
31 |
32 | Currently, we have a cilium specific network policy that's using `CiliumNetworkPolicy` and another that
33 | uses `NetworkPolicy` which should be supported by most network plugins.
34 |
35 | To install network policies you can simply execute one the following commands:
36 |
37 | ```
38 | # for clusters using cilium as their CNI
39 | kubectl apply -f https://github.com/berops/claudie/releases/latest/download/network-policy-cilium.yaml
40 | ```
41 |
42 | ```
43 | # other
44 | kubectl apply -f https://github.com/berops/claudie/releases/latest/download/network-policy.yaml
45 | ```
--------------------------------------------------------------------------------
/docs/hexagon_blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/hexagon_blue.png
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | # What is Claudie
2 |
3 | Claudie is a platform for managing multi-cloud and hybrid-cloud Kubernetes clusters. These Kubernetes clusters can mix and match nodepools from various cloud providers, e.g. a single cluster can have a nodepool in AWS, another in GCP and another one on-premises. This is our opinionated way to build multi-cloud and hybrid-cloud Kubernetes infrastructure. On top of that Claudie supports Cluster Autoscaler on the managed clusters.
4 |
5 | ## Vision
6 |
7 | The purpose of Claudie is to become the final Kubernetes engine you'll ever need. It aims to build clusters that leverage features and costs across multiple cloud vendors and on-prem datacenters. A Kubernetes that you won't ever need to migrate away from.
8 |
9 | ## Use cases
10 |
11 | Claudie has been built as an answer to the following Kubernetes challenges:
12 |
13 | * Cost savings
14 | * Data locality & compliance (e.g. GDPR)
15 | * Managed Kubernetes for providers that do not offer it
16 | * Cloud bursting
17 | * Service interconnet
18 |
19 | You can read more [here](./use-cases/use-cases.md).
20 |
21 | ## Features
22 |
23 | Claudie covers you with the following features functionalities:
24 |
25 | * Manage multi-cloud and hybrid-cloud Kubernetes clusters
26 | * Management via IaC
27 | * Fast scale-up/scale-down of your infrastructure
28 | * Loadbalancing
29 | * Persistent storage volumes
30 |
31 | See more in How Claudie works sections.
32 |
33 | ## What to do next
34 |
35 | In case you are not sure where to go next, you can just simply start with our [Getting Started Guide](./getting-started/get-started-using-claudie.md) or read our documentation [sitemap](./sitemap/sitemap.md).
36 |
37 | If you need help or want to have a chat with us, feel free to join our channel on [kubernetes Slack workspace](https://kubernetes.slack.com/archives/C05SW4GKPL3) (get invite [here](https://communityinviter.com/apps/kubernetes/community)).
38 |
--------------------------------------------------------------------------------
/docs/infra-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/infra-diagram.png
--------------------------------------------------------------------------------
/docs/loadbalancing/lb-architecture.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/berops/claudie/926f566e5226bf6adf9a82214b4e59a8673d91aa/docs/loadbalancing/lb-architecture.png
--------------------------------------------------------------------------------
/docs/overrides/main.html:
--------------------------------------------------------------------------------
1 | {% extends "base.html" %}
2 |
3 | {% block header %}
4 | {{ super() }}
5 |
6 | {% endblock %}
--------------------------------------------------------------------------------
/docs/roadmap/roadmap.md:
--------------------------------------------------------------------------------
1 | # Roadmap for Claudie
2 |
3 | - [ ] CLI read-only interface
4 | - [ ] Allow different Terraform template versions for a single provider
5 | - [ ] Override for all Terraform templates
6 | - [ ] Override for all manifest defaults
7 | - [ ] Service type: loadbalancer
8 | - [ ] Support for Spot & preemtible instances
9 | - [ ] Roadwarrior/Edge mode (on-prem node behind a NAT)
10 |
11 | v0.8.1:
12 | - [x] Support for more cloud providers
13 | - [x] OCI
14 | - [x] AWS
15 | - [x] Azure
16 | - [x] Cloudflare
17 | - [x] GenesisCloud
18 | - [x] Hybrid-cloud support (on-premises)
19 | - [x] `arm64` support for the nodepools
20 | - [x] App-level metrics
21 | - [x] Autoscaler
22 |
--------------------------------------------------------------------------------
/docs/version-matrix/version-matrix.md:
--------------------------------------------------------------------------------
1 | # Version matrix
2 |
3 | In the following table, you can find the supported Kubernetes and OS versions for the latest Claudie versions.
4 |
5 | | Claudie Version | Kubernetes versions | OS versions |
6 | | --------------- | ------------------- | ----------- |
7 | | v0.6.x | 1.24.x, 1.25.x, 1.26.x | Ubuntu 22.04 |
8 | | v0.7.0 | 1.24.x, 1.25.x, 1.26.x | Ubuntu 22.04 |
9 | | v0.7.1-x | 1.25.x, 1.26.x, 1.27.x | Ubuntu 22.04 |
10 | | v0.8.0 | 1.25.x, 1.26.x, 1.27.x | Ubuntu 22.04 |
11 | | v0.8.1 | 1.27.x, 1.28.x, 1.29.x | Ubuntu 22.04 |
12 | | v0.9.0 | 1.27.x, 1.28.x, 1.29.x, 1.30.x | Ubuntu 22.04 (Ubuntu 24.04 on Hetzner and Azure) |
13 | | v0.9.1 | 1.29.x, 1.30.x 1.31.x | Ubuntu 22.04 (Ubuntu 24.04 on Hetzner and Azure) |
14 |
--------------------------------------------------------------------------------
/internal/concurrent/exec.go:
--------------------------------------------------------------------------------
1 | package concurrent
2 |
3 | import "golang.org/x/sync/errgroup"
4 |
5 | func Exec[K any](items []K, f func(index int, item K) error) error {
6 | group := errgroup.Group{}
7 |
8 | for i, item := range items {
9 | group.Go(func() error {
10 | return f(i, item)
11 | })
12 | }
13 |
14 | return group.Wait()
15 | }
16 |
--------------------------------------------------------------------------------
/internal/fileutils/file.go:
--------------------------------------------------------------------------------
1 | package fileutils
2 |
3 | import (
4 | "os"
5 | "path/filepath"
6 | )
7 |
8 | // CreateKey writes the given key to a file.
9 | // The key filename is specified by its outputPath and KeyName operands.
10 | func CreateKey(key string, outputPath string, keyName string) error {
11 | keyFileName := filepath.Join(outputPath, keyName)
12 | return os.WriteFile(keyFileName, []byte(key), 0600)
13 | }
14 |
15 | func DirectoryExists(dir string) bool {
16 | _, err := os.Stat(dir)
17 | return err == nil
18 | }
19 |
20 | func CreateDirectory(dir string) error {
21 | if _, err := os.Stat(dir); os.IsNotExist(err) {
22 | if err := os.MkdirAll(dir, os.ModePerm); err != nil {
23 | return err
24 | }
25 | }
26 | return nil
27 | }
28 |
--------------------------------------------------------------------------------
/internal/generics/generic.go:
--------------------------------------------------------------------------------
1 | package generics
2 |
3 | import (
4 | "cmp"
5 | "iter"
6 | "maps"
7 | "slices"
8 |
9 | "golang.org/x/exp/constraints"
10 | )
11 |
12 | type inorder interface {
13 | constraints.Ordered
14 | comparable
15 | }
16 |
17 | func IterateMapInOrder[M ~map[K]V, K inorder, V any](m M) iter.Seq2[K, V] {
18 | keys := slices.Collect(maps.Keys(m))
19 | slices.SortStableFunc(keys, func(first, second K) int { return cmp.Compare(first, second) })
20 | return func(yield func(K, V) bool) {
21 | for _, k := range keys {
22 | if !yield(k, m[k]) {
23 | return
24 | }
25 | }
26 | }
27 | }
28 |
29 | // MergeMaps merges two or more maps together, into single map.
30 | func MergeMaps[M ~map[K]V, K comparable, V any](maps ...M) M {
31 | merged := make(M)
32 | for _, m := range maps {
33 | for k, v := range m {
34 | merged[k] = v
35 | }
36 | }
37 | return merged
38 | }
39 |
40 | func RemoveDuplicates[K comparable](slice []K) []K {
41 | keys := make(map[K]bool)
42 | list := []K{}
43 | for _, entry := range slice {
44 | if _, value := keys[entry]; !value {
45 | keys[entry] = true
46 | list = append(list, entry)
47 | }
48 | }
49 | return list
50 | }
51 |
--------------------------------------------------------------------------------
/internal/hash/checksum.go:
--------------------------------------------------------------------------------
1 | package hash
2 |
3 | import (
4 | "crypto/sha512"
5 | "math/rand"
6 | "time"
7 | )
8 |
9 | const (
10 | charset = "abcdefghijklmnopqrstuvwxyz" + "0123456789"
11 |
12 | // Length is the length of the randomly generated "hash" that is used throughout claudie in different places.
13 | // be CAUTIOUS when changing this value as this will break backwards compatibility and also invariants within claudie.
14 | // Dynamic nodepools are assigned a randomly generated hash. Node pool names have a max constraint of 15 characters
15 | // changing the hash length will invalidate this.
16 | Length = 7
17 | )
18 |
19 | var seededRand = rand.New(rand.NewSource(time.Now().UnixNano()))
20 |
21 | func Digest(data string) []byte {
22 | digest := sha512.Sum512_256([]byte(data))
23 | return digest[:]
24 | }
25 |
26 | func Digest128(data string) []byte { return Digest(data)[:16] }
27 |
28 | func Create(length int) string {
29 | b := make([]byte, length)
30 | for i := range b {
31 | b[i] = charset[seededRand.Intn(len(charset))]
32 | }
33 | return string(b)
34 | }
35 |
--------------------------------------------------------------------------------
/internal/healthcheck/health_checker.go:
--------------------------------------------------------------------------------
1 | package healthcheck
2 |
3 | type HealthChecker interface {
4 | // HealthCheck checks whether the underlying connection is still ongoing.
5 | HealthCheck() error
6 | }
7 |
--------------------------------------------------------------------------------
/internal/manifest/state_string.go:
--------------------------------------------------------------------------------
1 | // Code generated by "stringer -type=State"; DO NOT EDIT.
2 |
3 | package manifest
4 |
5 | import "strconv"
6 |
7 | func _() {
8 | // An "invalid array index" compiler error signifies that the constant values have changed.
9 | // Re-run the stringer command to generate them again.
10 | var x [1]struct{}
11 | _ = x[Pending-0]
12 | _ = x[Scheduled-1]
13 | _ = x[Done-2]
14 | _ = x[Error-3]
15 | }
16 |
17 | const _State_name = "PendingScheduledDoneError"
18 |
19 | var _State_index = [...]uint8{0, 7, 16, 20, 25}
20 |
21 | func (i State) String() string {
22 | if i < 0 || i >= State(len(_State_index)-1) {
23 | return "State(" + strconv.FormatInt(int64(i), 10) + ")"
24 | }
25 | return _State_name[_State_index[i]:_State_index[i+1]]
26 | }
27 |
--------------------------------------------------------------------------------
/internal/manifest/utils_test.go:
--------------------------------------------------------------------------------
1 | package manifest
2 |
3 | import (
4 | "github.com/berops/claudie/proto/pb/spec"
5 | "testing"
6 | )
7 |
8 | func Test_commitHash(t *testing.T) {
9 | type args struct {
10 | tmpl *spec.TemplateRepository
11 | }
12 | tests := []struct {
13 | name string
14 | args args
15 | want string
16 | wantErr bool
17 | }{
18 | {
19 | name: "ok-tag-0.8.1",
20 | args: args{
21 | tmpl: &spec.TemplateRepository{
22 | Repository: "https://github.com/berops/claudie",
23 | Tag: func() *string { s := "v0.8.1"; return &s }(),
24 | },
25 | },
26 | want: "dc323eb49b5023306a5a70789d5a192f68e0a3a1",
27 | wantErr: false,
28 | },
29 | }
30 |
31 | for _, tt := range tests {
32 | t.Run(tt.name, func(t *testing.T) {
33 | err := FetchCommitHash(tt.args.tmpl)
34 | if (err != nil) != tt.wantErr {
35 | t.Errorf("commitHash() error = %v, wantErr %v", err, tt.wantErr)
36 | return
37 | }
38 | if tt.args.tmpl.CommitHash != tt.want {
39 | t.Errorf("commitHash() got = %v, want %v", tt.args.tmpl.CommitHash, tt.want)
40 | }
41 | })
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/internal/metrics/middleware.go:
--------------------------------------------------------------------------------
1 | package metrics
2 |
3 | import (
4 | "context"
5 | "time"
6 |
7 | "github.com/prometheus/client_golang/prometheus"
8 | "google.golang.org/grpc"
9 | )
10 |
11 | const (
12 | MethodLabel = "method"
13 | )
14 |
15 | var (
16 | RequestsInFlight = prometheus.NewGauge(
17 | prometheus.GaugeOpts{
18 | Name: "claudie_grpc_request_in_flight",
19 | Help: "Number of grpc requests currently handled",
20 | },
21 | )
22 |
23 | RequestCount = prometheus.NewCounterVec(
24 | prometheus.CounterOpts{
25 | Name: "claudie_grpc_request_count",
26 | Help: "Total number of gRPC API calls.",
27 | },
28 | []string{MethodLabel},
29 | )
30 |
31 | Latency = prometheus.NewHistogramVec(
32 | prometheus.HistogramOpts{
33 | Name: "claudie_grpc_call_latency_seconds",
34 | Help: "Latency of gRPC API calls in seconds.",
35 | Buckets: []float64{
36 | .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, // up to 10sec
37 | 20, 30, 60, 300, 600, // up to 10 min
38 | 1200, 1800, 2700, 3600, // up to 1hour
39 | },
40 | },
41 | []string{MethodLabel},
42 | )
43 |
44 | ErrorCount = prometheus.NewCounterVec(
45 | prometheus.CounterOpts{
46 | Name: "grpc_error_count",
47 | Help: "Total number of gRPC API call errors.",
48 | },
49 | []string{MethodLabel},
50 | )
51 | )
52 |
53 | func MetricsMiddleware(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
54 | RequestsInFlight.Inc()
55 |
56 | now := time.Now()
57 | resp, err := handler(ctx, req)
58 | duration := time.Since(now)
59 |
60 | RequestCount.WithLabelValues(info.FullMethod).Inc()
61 | Latency.WithLabelValues(info.FullMethod).Observe(duration.Seconds())
62 |
63 | if err != nil {
64 | ErrorCount.WithLabelValues(info.FullMethod).Inc()
65 | }
66 |
67 | RequestsInFlight.Dec()
68 | return resp, err
69 | }
70 |
71 | func MustRegisterCounters() {
72 | prometheus.MustRegister(RequestCount)
73 | prometheus.MustRegister(Latency)
74 | prometheus.MustRegister(ErrorCount)
75 | prometheus.MustRegister(RequestsInFlight)
76 | }
77 |
--------------------------------------------------------------------------------
/internal/worker/error.go:
--------------------------------------------------------------------------------
1 | package worker
2 |
3 | import "github.com/rs/zerolog/log"
4 |
5 | // ErrorLogger function defines a callback for handling errors
6 | func ErrorLogger(err error) {
7 | log.Err(err).Send()
8 | }
9 |
--------------------------------------------------------------------------------
/internal/worker/worker.go:
--------------------------------------------------------------------------------
1 | package worker
2 |
3 | import (
4 | "context"
5 | "time"
6 | )
7 |
8 | // Worker struct
9 | type Worker struct {
10 | tick *time.Ticker
11 | fn func() error
12 | errorHandler func(err error)
13 | ctx context.Context
14 | }
15 |
16 | // NewWorker creates a new Worker structure
17 | func NewWorker(ctx context.Context, d time.Duration, fn func() error, eh func(err error)) *Worker {
18 | return &Worker{
19 | ctx: ctx,
20 | fn: fn,
21 | errorHandler: eh,
22 | tick: time.NewTicker(d),
23 | }
24 | }
25 |
26 | // Run starts the handling loop
27 | func (w *Worker) Run() {
28 | for {
29 | select {
30 | case <-w.tick.C:
31 | if err := w.fn(); err != nil {
32 | if w.errorHandler != nil {
33 | w.errorHandler(err)
34 | }
35 | }
36 | case <-w.ctx.Done():
37 | w.tick.Stop()
38 | return
39 | }
40 | }
41 | }
42 |
--------------------------------------------------------------------------------
/manifests/README.md:
--------------------------------------------------------------------------------
1 | ## Warning
2 | Running `kustomize` in this directory will deploy `testing-framework` along with the other Claudie components.
3 |
4 | `testing-framework` is not part of `claudie` and it is used only for testing the platform itself during development.
5 |
--------------------------------------------------------------------------------
/manifests/claudie/.env:
--------------------------------------------------------------------------------
1 | GOLANG_LOG=info
2 |
3 | TERRAFORMER_HOSTNAME=terraformer
4 | TERRAFORMER_PORT=50052
5 |
6 | ANSIBLER_HOSTNAME=ansibler
7 | ANSIBLER_PORT=50053
8 |
9 | KUBE_ELEVEN_HOSTNAME=kube-eleven
10 | KUBE_ELEVEN_PORT=50054
11 |
12 | MANAGER_HOSTNAME=manager
13 | MANAGER_PORT=50055
14 |
15 | KUBER_HOSTNAME=kuber
16 | KUBER_PORT=50057
17 |
18 | OPERATOR_HOSTNAME=claudie-operator
19 | OPERATOR_PORT=50058
20 |
21 | BUCKET_URL=http://minio:9000
22 | BUCKET_NAME=claudie-tf-state-files
23 |
24 | DYNAMO_URL=http://dynamo:8000
25 | DYNAMO_TABLE_NAME=claudie
26 |
27 | AWS_REGION=local
28 |
29 |
--------------------------------------------------------------------------------
/manifests/claudie/cluster-rbac/clusterrole.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: ClusterRole
3 | metadata:
4 | name: claudie-operator-role
5 | labels:
6 | app.kubernetes.io/part-of: claudie
7 | app.kubernetes.io/name: claudie-operator
8 | rules:
9 | - apiGroups: [""]
10 | resources: ["secrets"]
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - apiGroups: [""]
16 | resources: ["events"]
17 | verbs:
18 | - create
19 | - list
20 | - watch
21 | - patch
22 | - apiGroups: ["claudie.io"]
23 | resources: ["inputmanifests"]
24 | verbs:
25 | - create
26 | - delete
27 | - get
28 | - list
29 | - patch
30 | - update
31 | - watch
32 | - apiGroups: ["claudie.io"]
33 | resources: ["inputmanifests/finalizers"]
34 | verbs:
35 | - update
36 | - apiGroups: ["claudie.io"]
37 | resources: ["inputmanifests/status"]
38 | verbs:
39 | - get
40 | - patch
41 | - update
--------------------------------------------------------------------------------
/manifests/claudie/cluster-rbac/clusterrolebinding.yaml:
--------------------------------------------------------------------------------
1 | kind: ClusterRoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: claudie-operator-role-binding
5 | labels:
6 | app.kubernetes.io/part-of: claudie
7 | app.kubernetes.io/name: claudie-operator
8 | subjects:
9 | - kind: ServiceAccount
10 | name: claudie-operator-sa
11 | roleRef:
12 | kind: ClusterRole
13 | name: claudie-operator-role
14 | apiGroup: rbac.authorization.k8s.io
15 |
--------------------------------------------------------------------------------
/manifests/claudie/cluster-rbac/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | metadata:
3 | name: clusterroles
4 | resources:
5 | - clusterrole.yaml
6 | - clusterrolebinding.yaml
7 |
--------------------------------------------------------------------------------
/manifests/claudie/crd/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | metadata:
3 | name: crd
4 | resources:
5 | - claudie.io_inputmanifests.yaml
6 |
--------------------------------------------------------------------------------
/manifests/claudie/dynamo/cm.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: dynamodb-cm
5 | labels:
6 | app.kubernetes.io/part-of: claudie
7 | app.kubernetes.io/name: dynamo
8 | data:
9 | initialize: |
10 | aws dynamodb describe-table \
11 | --table-name $DYNAMO_TABLE_NAME \
12 | --endpoint-url $DYNAMO_URL > /dev/null 2>&1
13 |
14 | if [ $? -ne 0 ]; then
15 | echo "Creating table '$DYNAMO_TABLE_NAME'."
16 | aws dynamodb create-table \
17 | --table-name $DYNAMO_TABLE_NAME \
18 | --attribute-definitions AttributeName=LockID,AttributeType=S \
19 | --key-schema AttributeName=LockID,KeyType=HASH \
20 | --provisioned-throughput ReadCapacityUnits=1,WriteCapacityUnits=1 \
21 | --endpoint-url $DYNAMO_URL \
22 | --region $AWS_REGION \
23 | --output json
24 | else
25 | echo "Table '$DYNAMO_TABLE_NAME' already exists."
26 | fi
27 |
--------------------------------------------------------------------------------
/manifests/claudie/dynamo/job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: create-table-job
5 | labels:
6 | app.kubernetes.io/part-of: claudie
7 | app.kubernetes.io/name: dynamo
8 | spec:
9 | backoffLimit: 8
10 | ttlSecondsAfterFinished: 3600
11 | template:
12 | metadata:
13 | labels:
14 | app.kubernetes.io/part-of: claudie
15 | app.kubernetes.io/name: dynamo
16 | spec:
17 | restartPolicy: OnFailure
18 | volumes:
19 | - name: dynamodb-configuration
20 | projected:
21 | sources:
22 | - configMap:
23 | name: dynamodb-cm
24 | containers:
25 | - name: awc-cli
26 | image: amazon/aws-cli
27 | command: ["/bin/sh", "/config/initialize"]
28 | env:
29 | - name: DYNAMO_URL
30 | valueFrom:
31 | configMapKeyRef:
32 | name: env
33 | key: DYNAMO_URL
34 | - name: DYNAMO_TABLE_NAME
35 | valueFrom:
36 | configMapKeyRef:
37 | name: env
38 | key: DYNAMO_TABLE_NAME
39 | - name: AWS_ACCESS_KEY_ID
40 | valueFrom:
41 | secretKeyRef:
42 | name: dynamo-secret
43 | key: AWS_ACCESS_KEY_ID
44 | - name: AWS_SECRET_ACCESS_KEY
45 | valueFrom:
46 | secretKeyRef:
47 | name: dynamo-secret
48 | key: AWS_SECRET_ACCESS_KEY
49 | - name: AWS_REGION
50 | valueFrom:
51 | configMapKeyRef:
52 | name: env
53 | key: AWS_REGION
54 | volumeMounts:
55 | - name: dynamodb-configuration
56 | mountPath: /config
57 | resources:
58 | requests:
59 | memory: 128Mi
60 | cpu: 50m
61 | limits:
62 | memory: 200Mi
63 | cpu: 100m
64 |
--------------------------------------------------------------------------------
/manifests/claudie/dynamo/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | metadata:
3 | name: dynamo
4 | resources:
5 | - dynamodb.yaml
6 | - job.yaml
7 | - cm.yaml
8 | secretGenerator:
9 | - name: dynamo-secret
10 | files:
11 | - AWS_ACCESS_KEY_ID=secrets/access-key
12 | - AWS_SECRET_ACCESS_KEY=secrets/secret-key
13 | options:
14 | labels:
15 | app.kubernetes.io/part-of: claudie
16 |
--------------------------------------------------------------------------------
/manifests/claudie/dynamo/secrets/access-key:
--------------------------------------------------------------------------------
1 | client-admin
--------------------------------------------------------------------------------
/manifests/claudie/dynamo/secrets/secret-key:
--------------------------------------------------------------------------------
1 | client-password
--------------------------------------------------------------------------------
/manifests/claudie/kustomization.yaml:
--------------------------------------------------------------------------------
1 | namespace: claudie
2 | resources:
3 | - crd
4 | - ns.yaml
5 | - operator.yaml
6 | - builder.yaml
7 | - terraformer.yaml
8 | - ansibler.yaml
9 | - kube-eleven.yaml
10 | - kuber.yaml
11 | - manager.yaml
12 | - cluster-rbac
13 | - mongo
14 | - minio
15 | - dynamo
16 |
17 | # Alter ValidatingWebhookConfiguration and Certificate fields, so they will match the generated namespace
18 | replacements:
19 | - source:
20 | fieldPath: metadata.name
21 | kind: Namespace
22 | targets:
23 | - fieldPaths:
24 | - webhooks.*.namespaceSelector.matchLabels.kubernetes\.io/metadata\.name
25 | - webhooks.*.clientConfig.service.namespace
26 | select:
27 | kind: ValidatingWebhookConfiguration
28 | name: claudie-webhook
29 | - source:
30 | fieldPath: metadata.name
31 | kind: Namespace
32 | targets:
33 | - fieldPaths:
34 | - metadata.annotations.cert-manager\.io/inject-ca-from
35 | options:
36 | delimiter: /
37 | select:
38 | kind: ValidatingWebhookConfiguration
39 | name: claudie-webhook
40 | - fieldPaths:
41 | - spec.dnsNames.*
42 | options:
43 | delimiter: .
44 | index: 1
45 | select:
46 | kind: Certificate
47 | name: claudie-webhook-certificate
48 |
49 | configMapGenerator:
50 | - envs:
51 | - .env
52 | name: env
53 | options:
54 | labels:
55 | app.kubernetes.io/part-of: claudie
56 | apiVersion: kustomize.config.k8s.io/v1beta1
57 | kind: Kustomization
58 | images:
59 | - name: ghcr.io/berops/claudie/ansibler
60 | newTag: 1f9c783-3347
61 | - name: ghcr.io/berops/claudie/autoscaler-adapter
62 | newTag: 891740b-3320
63 | - name: ghcr.io/berops/claudie/builder
64 | newTag: 891740b-3320
65 | - name: ghcr.io/berops/claudie/claudie-operator
66 | newTag: f3f2ff6-3338
67 | - name: ghcr.io/berops/claudie/kube-eleven
68 | newTag: 891740b-3320
69 | - name: ghcr.io/berops/claudie/kuber
70 | newTag: 891740b-3320
71 | - name: ghcr.io/berops/claudie/manager
72 | newTag: 891740b-3320
73 | - name: ghcr.io/berops/claudie/terraformer
74 | newTag: d76bdf8-3344
75 |
--------------------------------------------------------------------------------
/manifests/claudie/minio/job.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: make-bucket-job
5 | labels:
6 | app.kubernetes.io/part-of: claudie
7 | app.kubernetes.io/name: minio
8 | spec:
9 | backoffLimit: 8
10 | ttlSecondsAfterFinished: 3600
11 | template:
12 | metadata:
13 | labels:
14 | app.kubernetes.io/part-of: claudie
15 | app.kubernetes.io/name: minio
16 | spec:
17 | restartPolicy: OnFailure
18 | volumes:
19 | - name: minio-configuration
20 | projected:
21 | sources:
22 | - configMap:
23 | name: minio-cm
24 | containers:
25 | - name: minio-mc
26 | image: quay.io/minio/mc
27 | command: ["/bin/sh", "/config/initialize"]
28 | env:
29 | - name: MINIO_ENDPOINT
30 | value: minio
31 | - name: MINIO_PORT
32 | value: "9000"
33 | - name: MINIO_ROOT_USER
34 | valueFrom:
35 | secretKeyRef:
36 | name: minio-secret
37 | key: AWS_ACCESS_KEY_ID
38 | - name: MINIO_ROOT_PASSWORD
39 | valueFrom:
40 | secretKeyRef:
41 | name: minio-secret
42 | key: AWS_SECRET_ACCESS_KEY
43 | volumeMounts:
44 | - name: minio-configuration
45 | mountPath: /config
46 | resources:
47 | requests:
48 | memory: 128Mi
49 | cpu: 10m
50 | limits:
51 | memory: 200Mi
52 | cpu: 35m
53 |
--------------------------------------------------------------------------------
/manifests/claudie/minio/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | metadata:
3 | name: minio
4 | resources:
5 | - sts.yaml
6 | - svc.yaml
7 | - cm.yaml
8 | - job.yaml
9 | secretGenerator:
10 | - name: minio-secret
11 | files:
12 | - AWS_ACCESS_KEY_ID=secrets/access-key
13 | - AWS_SECRET_ACCESS_KEY=secrets/secret-key
14 | options:
15 | labels:
16 | app.kubernetes.io/part-of: claudie
17 |
18 |
--------------------------------------------------------------------------------
/manifests/claudie/minio/secrets/access-key:
--------------------------------------------------------------------------------
1 | client-admin
--------------------------------------------------------------------------------
/manifests/claudie/minio/secrets/secret-key:
--------------------------------------------------------------------------------
1 | client-password
--------------------------------------------------------------------------------
/manifests/claudie/minio/svc.yaml:
--------------------------------------------------------------------------------
1 | kind: Service
2 | apiVersion: v1
3 | metadata:
4 | name: minio
5 | labels:
6 | app.kubernetes.io/part-of: claudie
7 | app.kubernetes.io/name: minio
8 | spec:
9 | publishNotReadyAddresses: true
10 | type: ClusterIP
11 | clusterIP: None
12 | selector:
13 | app.kubernetes.io/part-of: claudie
14 | app.kubernetes.io/name: minio
15 | ports:
16 | - protocol: TCP
17 | name: minio
18 | port: 9000
19 | targetPort: 9000
20 | - protocol: TCP
21 | name: minio-console
22 | port: 9001
23 | targetPort: 9001
24 |
--------------------------------------------------------------------------------
/manifests/claudie/mongo/kustomization.yaml:
--------------------------------------------------------------------------------
1 | kind: Kustomization
2 | metadata:
3 | name: mongo
4 | resources:
5 | - "mongodb.yaml"
6 | secretGenerator:
7 | - name: mongo-secret
8 | files:
9 | - "DATABASE_URL=./secrets/connectionString"
10 | - "MONGO_INITDB_ROOT_USERNAME=./secrets/username"
11 | - "MONGO_INITDB_ROOT_PASSWORD=./secrets/password"
12 | options:
13 | labels:
14 | app.kubernetes.io/part-of: claudie
--------------------------------------------------------------------------------
/manifests/claudie/mongo/secrets/connectionString:
--------------------------------------------------------------------------------
1 | mongodb://client-admin:client-password@mongodb:27017
--------------------------------------------------------------------------------
/manifests/claudie/mongo/secrets/password:
--------------------------------------------------------------------------------
1 | client-password
--------------------------------------------------------------------------------
/manifests/claudie/mongo/secrets/username:
--------------------------------------------------------------------------------
1 | client-admin
--------------------------------------------------------------------------------
/manifests/claudie/ns.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: claudie
5 | labels:
6 | app.kubernetes.io/part-of: claudie
7 |
--------------------------------------------------------------------------------
/manifests/kustomization.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.config.k8s.io/v1beta1
2 | kind: Kustomization
3 | resources:
4 | - claudie
5 | - testing-framework
6 |
--------------------------------------------------------------------------------
/manifests/network-policies/network-policy-cilium.yaml:
--------------------------------------------------------------------------------
1 | kind: NetworkPolicy
2 | apiVersion: networking.k8s.io/v1
3 | metadata:
4 | name: deny-from-other-namespaces
5 | namespace: claudie
6 | labels:
7 | app.kubernetes.io/part-of: claudie
8 | spec:
9 | podSelector:
10 | matchLabels:
11 | ingress:
12 | - from:
13 | - podSelector: { }
14 | ---
15 | apiVersion: cilium.io/v2
16 | kind: CiliumNetworkPolicy
17 | metadata:
18 | name: claudie-webhook
19 | namespace: claudie
20 | labels:
21 | app.kubernetes.io/part-of: claudie
22 | spec:
23 | endpointSelector:
24 | matchLabels:
25 | app.kubernetes.io/name: claudie-operator
26 | app.kubernetes.io/part-of: claudie
27 | ingress:
28 | - fromEntities:
29 | - world
30 | - cluster
31 | egress:
32 | - toEntities:
33 | - world
34 | toPorts:
35 | - ports:
36 | - port: "53"
37 | protocol: UDP
38 | - port: "53"
39 | protocol: TCP
40 | - toEntities:
41 | - cluster
42 | toPorts:
43 | - ports:
44 | - port: "443"
45 | protocol: TCP
46 | - ports:
47 | - port: "6443"
48 | protocol: TCP
49 | - toEndpoints:
50 | - matchLabels:
51 | app.kubernetes.io/name: manager
52 | app.kubernetes.io/part-of: claudie
53 | matchExpressions:
54 | - key: io.kubernetes.pod.namespace
55 | operator: Exists
56 | toPorts:
57 | - ports:
58 | - port: "50055"
59 |
--------------------------------------------------------------------------------
/manifests/network-policies/network-policy.yaml:
--------------------------------------------------------------------------------
1 | kind: NetworkPolicy
2 | apiVersion: networking.k8s.io/v1
3 | metadata:
4 | name: deny-from-other-namespaces
5 | namespace: claudie
6 | labels:
7 | app.kubernetes.io/part-of: claudie
8 | spec:
9 | podSelector:
10 | matchLabels:
11 | ingress:
12 | - from:
13 | - podSelector: { }
14 | ---
15 | apiVersion: networking.k8s.io/v1
16 | kind: NetworkPolicy
17 | metadata:
18 | name: claudie-webhook
19 | namespace: claudie
20 | labels:
21 | app.kubernetes.io/part-of: claudie
22 | spec:
23 | podSelector:
24 | matchLabels:
25 | app.kubernetes.io/name: claudie-operator
26 | app.kubernetes.io/part-of: claudie
27 | policyTypes:
28 | - Ingress
29 | - Egress
30 | ingress:
31 | - from:
32 | - ipBlock:
33 | cidr: 0.0.0.0/0
34 | egress:
35 | - to:
36 | - ipBlock:
37 | cidr: 0.0.0.0/0
38 | ports:
39 | - port: 53
40 | protocol: UDP
41 | - port: 53
42 | protocol: TCP
43 | - to:
44 | - namespaceSelector: { }
45 | ports:
46 | - port: 443
47 | protocol: TCP
48 | - port: 6443
49 | protocol: TCP
50 | - to:
51 | - namespaceSelector: { }
52 | podSelector:
53 | matchLabels:
54 | app.kubernetes.io/name: manager
55 | app.kubernetes.io/part-of: claudie
56 | ports:
57 | - port: 50055
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/README.md:
--------------------------------------------------------------------------------
1 | ## Testing Framework
2 |
3 | `testing-framework` is a custom service used to run tests by applying some pre-defined manifest and monitor the cluster creation process and their health after successful creation.
4 |
5 | Please make sure you use the testing framework **only** for development purposes. \
6 | You almost certainly don't want to deploy the testing framework as a regular user.
7 |
8 | ## Adding new test sets
9 | To add another test-set create a directory in the `./test-sets` with the name of the next test scenario. In that directory create `.yaml` manifests for `inputmanifest` resource type - where the `.metadata.name` is the same as the test-set directory name.
10 | For example, to create a new test set named `test-gcp-aws-1`, create a directory: `mkdir ./test-sets/test-gcp-aws-1`, and in that directory define a `yaml` manifest:
11 | ```
12 | # 1.yaml
13 | apiVersion: claudie.io/v1beta1
14 | kind: InputManifest
15 | metadata:
16 | name: test-gcp-aws-1
17 | spec:
18 |
19 | ...
20 | ```
21 | **Do not define a `.metadata.namespace` since each test run will have a different Namespace.**
22 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/autoscaling-1/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: autoscaling-1
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | templates:
9 | repository: "https://github.com/berops/claudie-config"
10 | tag: v0.9.8
11 | path: "templates/terraformer/gcp"
12 | providerType: gcp
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | - name: gcp-2
17 | templates:
18 | repository: "https://github.com/berops/claudie-config"
19 | tag: v0.9.8
20 | path: "templates/terraformer/gcp"
21 | providerType: gcp
22 | secretRef:
23 | name: gcp-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: gcp-ctrl-nodes
28 | providerSpec:
29 | name: gcp-1
30 | region: europe-west1
31 | zone: europe-west1-c
32 | count: 1
33 | serverType: e2-medium
34 | image: ubuntu-minimal-2404-noble-amd64-v20241116
35 | storageDiskSize: 50
36 | - name: gcp-cmpt-nodes
37 | providerSpec:
38 | name: gcp-2
39 | region: europe-west2
40 | zone: europe-west2-a
41 | autoscaler:
42 | min: 1
43 | max: 5
44 | serverType: e2-small
45 | image: ubuntu-minimal-2404-noble-amd64-v20241116
46 | storageDiskSize: 50
47 | labels:
48 | test-set: autoscaling-1
49 | annotations:
50 | test-set: autoscaling-1
51 |
52 | kubernetes:
53 | clusters:
54 | - name: autoscaling-cluster-test-set
55 | version: 1.29.0
56 | network: 192.168.2.0/24
57 | pools:
58 | control:
59 | - gcp-ctrl-nodes
60 | compute:
61 | - gcp-cmpt-nodes
62 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/autoscaling-1/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: autoscaling-1
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | templates:
9 | repository: "https://github.com/berops/claudie-config"
10 | tag: v0.9.8
11 | path: "templates/terraformer/gcp"
12 | providerType: gcp
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | - name: gcp-2
17 | templates:
18 | repository: "https://github.com/berops/claudie-config"
19 | tag: v0.9.8
20 | path: "templates/terraformer/gcp"
21 | providerType: gcp
22 | secretRef:
23 | name: gcp-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: gcp-ctrl-nodes
28 | providerSpec:
29 | name: gcp-1
30 | region: europe-west1
31 | zone: europe-west1-c
32 | count: 1
33 | serverType: e2-medium
34 | image: ubuntu-minimal-2404-noble-amd64-v20241116
35 | storageDiskSize: 50
36 | - name: gcp-cmpt-nodes
37 | providerSpec:
38 | name: gcp-2
39 | region: europe-west2
40 | zone: europe-west2-a
41 | count: 1
42 | serverType: e2-small
43 | image: ubuntu-minimal-2404-noble-amd64-v20241116
44 | storageDiskSize: 50
45 | labels:
46 | test-set: autoscaling-1-new
47 | annotations:
48 | test-set: autoscaling-1-new
49 |
50 | kubernetes:
51 | clusters:
52 | - name: autoscaling-cluster-test-set
53 | version: 1.29.0
54 | network: 192.168.2.0/24
55 | pools:
56 | control:
57 | - gcp-ctrl-nodes
58 | compute:
59 | - gcp-cmpt-nodes
60 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/autoscaling-1/3.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: autoscaling-1
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | templates:
9 | repository: "https://github.com/berops/claudie-config"
10 | tag: v0.9.8
11 | path: "templates/terraformer/gcp"
12 | providerType: gcp
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | - name: gcp-2
17 | templates:
18 | repository: "https://github.com/berops/claudie-config"
19 | tag: v0.9.8
20 | path: "templates/terraformer/gcp"
21 | providerType: gcp
22 | secretRef:
23 | name: gcp-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: gcp-ctrl-nodes
28 | providerSpec:
29 | name: gcp-1
30 | region: europe-west1
31 | zone: europe-west1-c
32 | count: 1
33 | serverType: e2-medium
34 | image: ubuntu-minimal-2404-noble-amd64-v20241116
35 | storageDiskSize: 50
36 | - name: gcp-cmpt-nodes
37 | providerSpec:
38 | name: gcp-2
39 | region: europe-west2
40 | zone: europe-west2-a
41 | autoscaler:
42 | min: 1
43 | max: 5
44 | serverType: e2-small
45 | image: ubuntu-minimal-2404-noble-amd64-v20241116
46 | storageDiskSize: 50
47 |
48 | kubernetes:
49 | clusters:
50 | - name: autoscaling-cluster-test-set
51 | version: 1.29.0
52 | network: 192.168.2.0/24
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/autoscaling-2/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: autoscaling-2
5 | spec:
6 | providers:
7 | - name: aws-1
8 | templates:
9 | repository: "https://github.com/berops/claudie-config"
10 | tag: "v0.9.1"
11 | path: "templates/terraformer/aws"
12 | providerType: aws
13 | secretRef:
14 | name: aws-secret
15 | namespace: e2e-secrets
16 | - name: aws-2
17 | templates:
18 | repository: "https://github.com/berops/claudie-config"
19 | tag: "v0.9.1"
20 | path: "templates/terraformer/aws"
21 | providerType: aws
22 | secretRef:
23 | name: aws-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: aws-ctrl-nodes
28 | providerSpec:
29 | name: aws-1
30 | region: eu-north-1
31 | zone: eu-north-1a
32 | count: 1
33 | serverType: t3.small
34 | #ubuntu
35 | image: ami-08eb150f611ca277f
36 | taints:
37 | - key: test
38 | value: test
39 | effect: NoSchedule
40 |
41 | - name: aws-cmpt-nodes
42 | providerSpec:
43 | name: aws-2
44 | region: eu-west-3
45 | zone: eu-west-3a
46 | autoscaler:
47 | min: 1
48 | max: 5
49 | serverType: t3.small
50 | #ubuntu
51 | image: ami-045a8ab02aadf4f88
52 | storageDiskSize: 50
53 | labels:
54 | test-set: autoscaling-2
55 | annotations:
56 | test-set: autoscaling-2
57 |
58 | kubernetes:
59 | clusters:
60 | - name: autoscaling-cluster-test-002
61 | version: 1.30.0
62 | network: 192.168.2.0/24
63 | pools:
64 | control:
65 | - aws-ctrl-nodes
66 | compute:
67 | - aws-cmpt-nodes
68 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/autoscaling-2/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: autoscaling-2
5 | spec:
6 | providers:
7 | - name: aws-1
8 | templates:
9 | repository: "https://github.com/berops/claudie-config"
10 | # performs a rolling update
11 | tag: "v0.9.8"
12 | path: "templates/terraformer/aws"
13 | providerType: aws
14 | secretRef:
15 | name: aws-secret
16 | namespace: e2e-secrets
17 | - name: aws-2
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | # performs a rolling update
21 | tag: "v0.9.8"
22 | path: "templates/terraformer/aws"
23 | providerType: aws
24 | secretRef:
25 | name: aws-secret
26 | namespace: e2e-secrets
27 | nodePools:
28 | dynamic:
29 | - name: aws-ctrl-nodes
30 | providerSpec:
31 | name: aws-1
32 | region: eu-north-1
33 | zone: eu-north-1a
34 | count: 1
35 | serverType: t3.small
36 | #ubuntu
37 | image: ami-08eb150f611ca277f
38 | taints:
39 | - key: test
40 | value: test
41 | effect: NoSchedule
42 |
43 | - name: aws-cmpt-nodes
44 | providerSpec:
45 | name: aws-2
46 | region: eu-west-3
47 | zone: eu-west-3a
48 | autoscaler:
49 | min: 1
50 | max: 5
51 | serverType: t3.small
52 | #ubuntu
53 | image: ami-045a8ab02aadf4f88
54 | storageDiskSize: 50
55 | labels:
56 | test-set: autoscaling-2
57 | annotations:
58 | test-set: autoscaling-2
59 |
60 | kubernetes:
61 | clusters:
62 | - name: autoscaling-cluster-test-002
63 | version: 1.30.0
64 | network: 192.168.2.0/24
65 | pools:
66 | control:
67 | - aws-ctrl-nodes
68 | compute:
69 | - aws-cmpt-nodes
70 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-with-hetzner/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-with-hetzner
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: htz-ctrl-nodes
19 | providerSpec:
20 | name: hetzner-1
21 | region: fsn1
22 | zone: fsn1-dc14
23 | count: 1
24 | serverType: cpx11
25 | image: ubuntu-24.04
26 | storageDiskSize: 50
27 | labels:
28 | test-set: proxy-with-hetzner
29 | annotations:
30 | claudie.io/example-annotation: >
31 | ["proxy-with-hetzner"]
32 | - name: htz-cmpt-nodes
33 | providerSpec:
34 | name: hetzner-1
35 | region: fsn1
36 | zone: fsn1-dc14
37 | serverType: cpx11
38 | image: ubuntu-24.04
39 | storageDiskSize: 50
40 | count: 1
41 | labels:
42 | test-set: proxy-with-hetzner
43 | annotations:
44 | claudie.io/example-annotation: >
45 | ["proxy-with-hetzner"]
46 | kubernetes:
47 | clusters:
48 | - name: proxy-with-hetzner
49 | version: "1.30.0"
50 | network: 192.168.2.0/24
51 | installationProxy:
52 | mode: "default"
53 | endpoint: http://proxy.claudie.io:8880
54 | pools:
55 | control:
56 | - htz-ctrl-nodes
57 | compute:
58 | - htz-cmpt-nodes
59 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-with-hetzner/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-with-hetzner
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: htz-ctrl-nodes
19 | providerSpec:
20 | name: hetzner-1
21 | region: fsn1
22 | zone: fsn1-dc14
23 | count: 1
24 | serverType: cpx11
25 | image: ubuntu-24.04
26 | storageDiskSize: 50
27 | labels:
28 | test-set: proxy-with-hetzner
29 | annotations:
30 | claudie.io/example-annotation: >
31 | ["proxy-with-hetzner"]
32 | - name: htz-cmpt-nodes
33 | providerSpec:
34 | name: hetzner-1
35 | region: fsn1
36 | zone: fsn1-dc14
37 | serverType: cpx11
38 | image: ubuntu-24.04
39 | storageDiskSize: 50
40 | count: 1
41 | labels:
42 | test-set: proxy-with-hetzner
43 | annotations:
44 | claudie.io/example-annotation: >
45 | ["proxy-with-hetzner"]
46 | kubernetes:
47 | clusters:
48 | - name: proxy-with-hetzner
49 | version: "1.30.0"
50 | network: 192.168.2.0/24
51 | installationProxy:
52 | mode: "off"
53 | endpoint: http://proxy.claudie.io:8880
54 | pools:
55 | control:
56 | - htz-ctrl-nodes
57 | compute:
58 | - htz-cmpt-nodes
59 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-with-hetzner/3.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-with-hetzner
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: htz-ctrl-nodes
19 | providerSpec:
20 | name: hetzner-1
21 | region: fsn1
22 | zone: fsn1-dc14
23 | count: 1
24 | serverType: cpx11
25 | image: ubuntu-24.04
26 | storageDiskSize: 50
27 | labels:
28 | test-set: proxy-with-hetzner
29 | annotations:
30 | claudie.io/example-annotation: >
31 | ["proxy-with-hetzner"]
32 | - name: htz-cmpt-nodes
33 | providerSpec:
34 | name: hetzner-1
35 | region: fsn1
36 | zone: fsn1-dc14
37 | serverType: cpx11
38 | image: ubuntu-24.04
39 | storageDiskSize: 50
40 | count: 1
41 | labels:
42 | test-set: proxy-with-hetzner
43 | annotations:
44 | claudie.io/example-annotation: >
45 | ["proxy-with-hetzner"]
46 | kubernetes:
47 | clusters:
48 | - name: proxy-with-hetzner
49 | version: "1.30.0"
50 | network: 192.168.2.0/24
51 | installationProxy:
52 | mode: "default"
53 | endpoint: http://proxy.claudie.io:8880
54 | pools:
55 | control:
56 | - htz-ctrl-nodes
57 | compute:
58 | - htz-cmpt-nodes
59 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-with-hetzner/4.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-with-hetzner
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: htz-ctrl-nodes
19 | providerSpec:
20 | name: hetzner-1
21 | region: fsn1
22 | zone: fsn1-dc14
23 | count: 1
24 | serverType: cpx11
25 | image: ubuntu-24.04
26 | storageDiskSize: 50
27 | labels:
28 | test-set: proxy-with-hetzner
29 | annotations:
30 | claudie.io/example-annotation: >
31 | ["proxy-with-hetzner"]
32 | - name: htz-cmpt-nodes
33 | providerSpec:
34 | name: hetzner-1
35 | region: fsn1
36 | zone: fsn1-dc14
37 | serverType: cpx11
38 | image: ubuntu-24.04
39 | storageDiskSize: 50
40 | count: 1
41 | labels:
42 | test-set: proxy-with-hetzner
43 | annotations:
44 | claudie.io/example-annotation: >
45 | ["proxy-with-hetzner"]
46 | kubernetes:
47 | clusters:
48 | - name: proxy-with-hetzner
49 | version: "1.30.0"
50 | network: 192.168.2.0/24
51 | installationProxy:
52 | mode: "on"
53 | endpoint: http://proxy.claudie.io:8880
54 | pools:
55 | control:
56 | - htz-ctrl-nodes
57 | compute:
58 | - htz-cmpt-nodes
59 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-with-hetzner/5.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-with-hetzner
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | - name: gcp-1
17 | providerType: gcp
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | path: "templates/terraformer/gcp"
21 | secretRef:
22 | name: gcp-secret
23 | namespace: e2e-secrets
24 | nodePools:
25 | dynamic:
26 | - name: htz-ctrl-nodes
27 | providerSpec:
28 | name: hetzner-1
29 | region: fsn1
30 | zone: fsn1-dc14
31 | count: 1
32 | serverType: cpx11
33 | image: ubuntu-24.04
34 | storageDiskSize: 50
35 | labels:
36 | test-set: proxy-with-hetzner
37 | annotations:
38 | claudie.io/example-annotation: >
39 | ["proxy-with-hetzner"]
40 | - name: gcp-cmpt-nodes
41 | providerSpec:
42 | name: gcp-1
43 | region: europe-west1
44 | zone: europe-west1-b
45 | count: 1
46 | serverType: e2-small
47 | image: ubuntu-minimal-2404-noble-amd64-v20241116
48 | storageDiskSize: 50
49 | labels:
50 | test-set: proxy-with-hetzner
51 | annotations:
52 | claudie.io/example-annotation: >
53 | ["proxy-with-hetzner"]
54 | kubernetes:
55 | clusters:
56 | - name: proxy-with-hetzner
57 | version: "1.30.0"
58 | network: 192.168.2.0/24
59 | installationProxy:
60 | mode: "default"
61 | endpoint: http://proxy.claudie.io:8880
62 | pools:
63 | control:
64 | - htz-ctrl-nodes
65 | compute:
66 | - gcp-cmpt-nodes
67 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-with-hetzner/8.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-with-hetzner
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | - name: gcp-1
17 | providerType: gcp
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | path: "templates/terraformer/gcp"
21 | secretRef:
22 | name: gcp-secret
23 | namespace: e2e-secrets
24 | nodePools:
25 | dynamic:
26 | - name: gcp-ctrl-nodes
27 | providerSpec:
28 | name: hetzner-1
29 | region: fsn1
30 | zone: fsn1-dc14
31 | count: 1
32 | serverType: cpx11
33 | image: ubuntu-24.04
34 | storageDiskSize: 50
35 | labels:
36 | test-set: proxy-with-hetzner
37 | annotations:
38 | claudie.io/example-annotation: >
39 | ["proxy-with-hetzner"]
40 | - name: htz-cmpt-nodes
41 | providerSpec:
42 | name: hetzner-1
43 | region: fsn1
44 | zone: fsn1-dc14
45 | serverType: cpx11
46 | image: ubuntu-24.04
47 | storageDiskSize: 50
48 | count: 1
49 | labels:
50 | test-set: proxy-with-hetzner
51 | annotations:
52 | claudie.io/example-annotation: >
53 | ["proxy-with-hetzner"]
54 | kubernetes:
55 | clusters:
56 | - name: proxy-with-hetzner
57 | version: "1.30.0"
58 | network: 192.168.2.0/24
59 | installationProxy:
60 | mode: "default"
61 | endpoint: http://proxy.claudie.io:8880
62 | pools:
63 | control:
64 | - gcp-ctrl-nodes
65 | compute:
66 | - htz-cmpt-nodes
67 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 1
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "default"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/10.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 2
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "default"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 1
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "off"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/3.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 1
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "default"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/4.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 1
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "on"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/5.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 1
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "off"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/6.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 2
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "off"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/7.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 2
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "on"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/8.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 1
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "on"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/proxy-without-hetzner/9.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: proxy-without-hetzner
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: gcp-ctrl-nodes
19 | providerSpec:
20 | name: gcp-1
21 | region: europe-west2
22 | zone: europe-west2-c
23 | count: 1
24 | serverType: e2-medium
25 | image: ubuntu-minimal-2404-noble-amd64-v20241116
26 | labels:
27 | test-set: proxy-without-hetzner
28 | annotations:
29 | claudie.io/example-annotation: >
30 | ["proxy-without-hetzner"]
31 | - name: gcp-cmpt-nodes
32 | providerSpec:
33 | name: gcp-1
34 | region: europe-west1
35 | zone: europe-west1-b
36 | count: 1
37 | serverType: e2-small
38 | image: ubuntu-minimal-2404-noble-amd64-v20241116
39 | storageDiskSize: 50
40 | labels:
41 | test-set: proxy-without-hetzner
42 | annotations:
43 | claudie.io/example-annotation: >
44 | ["proxy-without-hetzner"]
45 | kubernetes:
46 | clusters:
47 | - name: proxy-without-hetzner
48 | version: "1.30.0"
49 | network: 192.168.2.0/24
50 | installationProxy:
51 | mode: "default"
52 | endpoint: http://proxy.claudie.io:8880
53 | pools:
54 | control:
55 | - gcp-ctrl-nodes
56 | compute:
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/rolling-update-2/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: rolling-update-2
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: "v0.9.8"
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | - name: aws-2
17 | providerType: aws
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | tag: "v0.9.8"
21 | path: "templates/terraformer/aws"
22 | secretRef:
23 | name: aws-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | static:
27 | dynamic:
28 | - name: htz-ctrl-nodes
29 | providerSpec:
30 | name: hetzner-1
31 | region: nbg1
32 | zone: nbg1-dc3
33 | count: 1
34 | serverType: cpx11
35 | image: ubuntu-24.04
36 | labels:
37 | test-set: rolling-update-2-test
38 | annotations:
39 | claudie.io/example-annotation: >
40 | ["rolling-update-2-test"]
41 |
42 | - name: aws-cmpt-nodes
43 | providerSpec:
44 | name: aws-2
45 | region: eu-west-3
46 | zone: eu-west-3a
47 | count: 1
48 | serverType: t3.small
49 | #ubuntu
50 | image: ami-045a8ab02aadf4f88
51 | storageDiskSize: 50
52 |
53 | kubernetes:
54 | clusters:
55 | - name: ts-rolling-update-nodepools1
56 | version: 1.29.0
57 | network: 192.168.2.0/24
58 | pools:
59 | control:
60 | - htz-ctrl-nodes
61 | compute:
62 | - aws-cmpt-nodes
63 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-1/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-1
5 | spec:
6 | providers:
7 | - name: aws-1
8 | providerType: aws
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/aws"
13 | secretRef:
14 | name: aws-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: aws-cmpt-00001
19 | providerSpec:
20 | name: aws-1
21 | region: eu-west-3
22 | zone: eu-west-3a
23 | count: 1
24 | serverType: t3.small
25 | #ubuntu - doesnt exists
26 | image: ami-00000000000000001
27 | storageDiskSize: 50
28 | kubernetes:
29 | clusters:
30 | - name: succeeds-on-last-1-cluster-0
31 | version: 1.29.0
32 | network: 192.168.2.0/24
33 | pools:
34 | control:
35 | - aws-cmpt-00001
36 | compute:
37 | - aws-cmpt-00001
38 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-1/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-1
5 | spec:
6 | providers:
7 | - name: aws-1
8 | providerType: aws
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/aws"
13 | secretRef:
14 | name: aws-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: aws-cmpt-00002
19 | providerSpec:
20 | name: aws-1
21 | region: eu-west-3
22 | zone: eu-west-3a
23 | count: 1
24 | serverType: t3.small
25 | #ubuntu
26 | image: ami-045a8ab02aadf4f88
27 | storageDiskSize: 50
28 | kubernetes:
29 | clusters:
30 | - name: succeeds-on-last-1-cluster-0
31 | version: 1.29.0
32 | network: 192.168.2.0/24
33 | pools:
34 | control:
35 | - aws-cmpt-00002
36 | compute:
37 | - aws-cmpt-00002
38 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-2/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-2
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: htz-nodes
19 | providerSpec:
20 | name: hetzner-1
21 | region: nbg1
22 | zone: nbg1-dc3
23 | count: 1
24 | serverType: cpx11
25 | image: ubuntu-24.04
26 | kubernetes:
27 | clusters:
28 | - name: succeeds-on-last-2-cluster-0
29 | version: 1.29.0
30 | network: 192.168.2.0/24
31 | pools:
32 | control:
33 | - htz-nodes
34 | compute:
35 | - htz-nodes
36 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-2/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-2
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | - name: oci-1
17 | providerType: oci
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | tag: v0.9.8
21 | path: "templates/terraformer/oci"
22 | secretRef:
23 | name: oci-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: htz-nodes
28 | providerSpec:
29 | name: hetzner-1
30 | region: nbg1
31 | zone: nbg1-dc3
32 | count: 1
33 | serverType: cpx11
34 | image: ubuntu-24.04
35 | - name: oci-cmpt-00001
36 | providerSpec:
37 | name: oci-1
38 | region: eu-frankfurt-1
39 | zone: hsVQ:EU-FRANKFURT-1-AD-1
40 | count: 1
41 | serverType: VM.Standard1.1
42 | # ubuntu minimal - wrong image
43 | # https://docs.oracle.com/en-us/iaas/images/ubuntu-2404/canonical-ubuntu-24-04-2024-08-28-0.htm
44 | image: ocid1.image.oc1.eu-frankfurt-1.000000000000000000000000000000000000000000000000000000000001
45 | storageDiskSize: 50
46 | kubernetes:
47 | clusters:
48 | - name: succeeds-on-last-2-cluster-0
49 | version: 1.29.0
50 | network: 192.168.2.0/24
51 | pools:
52 | control:
53 | - htz-nodes
54 | compute:
55 | - oci-cmpt-00001
56 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-2/3.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-2
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | - name: oci-1
17 | providerType: oci
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | tag: v0.9.8
21 | path: "templates/terraformer/oci"
22 | secretRef:
23 | name: oci-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: htz-nodes
28 | providerSpec:
29 | name: hetzner-1
30 | region: nbg1
31 | zone: nbg1-dc3
32 | count: 1
33 | serverType: cpx11
34 | image: ubuntu-24.04
35 | - name: oci-cmpt-00002
36 | providerSpec:
37 | name: oci-1
38 | region: eu-frankfurt-1
39 | zone: hsVQ:EU-FRANKFURT-1-AD-1
40 | count: 1
41 | serverType: VM.Standard1.1
42 | # ubuntu minimal
43 | # https://docs.oracle.com/en-us/iaas/images/ubuntu-2404/canonical-ubuntu-24-04-2024-08-28-0.htm
44 | image: ocid1.image.oc1.eu-frankfurt-1.aaaaaaaa7hxwyz4qiasffo7n7s4ep5lywpzwgkc2am65frqrqinoyitmxxla
45 | storageDiskSize: 50
46 | kubernetes:
47 | clusters:
48 | - name: succeeds-on-last-2-cluster-0
49 | version: 1.29.0
50 | network: 192.168.2.0/24
51 | pools:
52 | control:
53 | - htz-nodes
54 | compute:
55 | - oci-cmpt-00002
56 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-3/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-3
5 | spec:
6 | providers:
7 | - name: azure-sponsor-1
8 | providerType: azure
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/azure"
13 | secretRef:
14 | name: azure-sponsorship-secret
15 | namespace: e2e-secrets
16 | - name: hetznerdns-1
17 | providerType: hetznerdns
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | tag: v0.9.8
21 | path: "templates/terraformer/hetznerdns"
22 | secretRef:
23 | name: hetznerdns-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: azr-nodes
28 | providerSpec:
29 | name: azure-sponsor-1
30 | region: Germany West Central
31 | zone: "3"
32 | count: 1
33 | serverType: Standard_B2s
34 | image: Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts:22.04.202212120
35 | storageDiskSize: 50
36 | kubernetes:
37 | clusters:
38 | - name: succeeds-on-last-3-cluster-0
39 | version: 1.29.0
40 | network: 192.168.2.0/24
41 | pools:
42 | control:
43 | - azr-nodes
44 | compute:
45 | - azr-nodes
46 | loadBalancers:
47 | roles:
48 | - name: apiserver-lb-hetzner
49 | protocol: tcp
50 | port: 6443
51 | targetPort: 6443
52 | targetPools:
53 | - azr-nodes
54 | clusters:
55 | - name: succeeds-on-last-3-lbcluster
56 | roles:
57 | - apiserver-lb-hetzner
58 | dns:
59 | dnsZone: claudie.dev
60 | provider: hetznerdns-1
61 | targetedK8s: succeeds-on-last-3-cluster-0
62 | pools:
63 | - azr-nodes
64 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-3/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-3
5 | spec:
6 | providers:
7 | - name: azure-sponsor-1
8 | providerType: azure
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/azure"
13 | secretRef:
14 | name: azure-sponsorship-secret
15 | namespace: e2e-secrets
16 | - name: hetznerdns-1
17 | providerType: hetznerdns
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | tag: v0.9.8
21 | path: "templates/terraformer/hetznerdns"
22 | secretRef:
23 | name: hetznerdns-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: azr-nodes
28 | providerSpec:
29 | name: azure-sponsor-1
30 | region: Germany West Central
31 | zone: "3"
32 | count: 1
33 | serverType: Standard_B2s
34 | image: Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts:22.04.202212120
35 | storageDiskSize: 50
36 | kubernetes:
37 | clusters:
38 | - name: succeeds-on-last-3-cluster-0
39 | version: 1.29.0
40 | network: 192.168.2.0/24
41 | pools:
42 | control:
43 | - azr-nodes
44 | compute:
45 | - azr-nodes
46 | loadBalancers:
47 | roles:
48 | - name: apiserver-lb-hetzner
49 | protocol: tcp
50 | port: 6443
51 | targetPort: 6443
52 | targetPools:
53 | - azr-nodes
54 | clusters:
55 | - name: succeeds-on-last-3-lbcluster
56 | roles:
57 | - apiserver-lb-hetzner
58 | dns:
59 | dnsZone: claudie.org
60 | provider: hetznerdns-1
61 | targetedK8s: succeeds-on-last-3-cluster-0
62 | pools:
63 | - azr-nodes
64 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/succeeds-on-last-4/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: succeeds-on-last-4
5 | spec:
6 | providers:
7 | - name: hetzner-1
8 | providerType: hetzner
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/hetzner"
13 | secretRef:
14 | name: hetzner-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: htz-ctrl-nodes
19 | providerSpec:
20 | name: hetzner-1
21 | region: nbg1
22 | zone: nbg1-dc3
23 | count: 1
24 | serverType: cpx11
25 | image: ubuntu-24.04
26 | kubernetes:
27 | clusters:
28 | - name: succeeds-on-last-4-cluster-0
29 | version: 1.29.0
30 | network: 192.168.2.0/24
31 | pools:
32 | control:
33 | - htz-ctrl-nodes
34 | compute:
35 | - htz-ctrl-nodes
36 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/test-set3/2.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: test-set3
5 | spec:
6 | providers:
7 | - name: gcp-1
8 | providerType: gcp
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/gcp"
13 | secretRef:
14 | name: gcp-secret
15 | namespace: e2e-secrets
16 | - name: hetzner-1
17 | providerType: hetzner
18 | templates:
19 | repository: "https://github.com/berops/claudie-config"
20 | tag: v0.9.8
21 | path: "templates/terraformer/hetzner"
22 | secretRef:
23 | name: hetzner-secret
24 | namespace: e2e-secrets
25 | nodePools:
26 | dynamic:
27 | - name: htz-cmpt-nodes
28 | providerSpec:
29 | name: hetzner-1
30 | region: nbg1
31 | zone: nbg1-dc3
32 | count: 1
33 | serverType: cpx11
34 | image: ubuntu-24.04
35 | storageDiskSize: 50
36 |
37 | - name: gcp-cmpt-nodes
38 | providerSpec:
39 | name: gcp-1
40 | region: europe-west1
41 | zone: europe-west1-b
42 | count: 1
43 | serverType: e2-small
44 | image: ubuntu-minimal-2404-noble-amd64-v20241116
45 | storageDiskSize: 50
46 |
47 | kubernetes:
48 | clusters:
49 | - name: ts3-c-1-cluster-test-set-no3
50 | version: 1.29.0
51 | network: 192.168.2.0/24
52 | pools:
53 | control:
54 | - htz-cmpt-nodes
55 | compute:
56 | - htz-cmpt-nodes
57 | - gcp-cmpt-nodes
58 |
--------------------------------------------------------------------------------
/manifests/testing-framework/test-sets/test-set4/1.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: claudie.io/v1beta1
2 | kind: InputManifest
3 | metadata:
4 | name: test-set4
5 | spec:
6 | providers:
7 | - name: oci-1
8 | providerType: oci
9 | templates:
10 | repository: "https://github.com/berops/claudie-config"
11 | tag: v0.9.8
12 | path: "templates/terraformer/oci"
13 | secretRef:
14 | name: oci-secret
15 | namespace: e2e-secrets
16 | nodePools:
17 | dynamic:
18 | - name: oci-kube-nodes
19 | providerSpec:
20 | name: oci-1
21 | region: eu-frankfurt-1
22 | zone: hsVQ:EU-FRANKFURT-1-AD-1
23 | count: 1
24 | serverType: VM.Standard1.1
25 | # ubuntu minimal
26 | # https://docs.oracle.com/en-us/iaas/images/ubuntu-2404/canonical-ubuntu-24-04-2024-08-28-0.htm
27 | image: ocid1.image.oc1.eu-frankfurt-1.aaaaaaaa7hxwyz4qiasffo7n7s4ep5lywpzwgkc2am65frqrqinoyitmxxla
28 | storageDiskSize: 50
29 |
30 | kubernetes:
31 | clusters:
32 | - name: ts4-c-1-cluster-test-set-no4
33 | version: 1.29.0
34 | network: 192.168.2.0/24
35 | installationProxy:
36 | mode: "on"
37 | pools:
38 | control:
39 | - oci-kube-nodes
40 | compute:
41 | - oci-kube-nodes
42 |
--------------------------------------------------------------------------------
/manifests/testing-framework/testing-framework.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: testing-framework
5 | spec:
6 | backoffLimit: 0
7 | template:
8 | spec:
9 | terminationGracePeriodSeconds: 7200 # 2 hours
10 | containers:
11 | - name: testing-framework
12 | imagePullPolicy: Always
13 | image: ghcr.io/berops/claudie/testing-framework
14 | env:
15 | - name: MANAGER_PORT
16 | valueFrom:
17 | configMapKeyRef:
18 | name: env
19 | key: MANAGER_PORT
20 | - name: MANAGER_HOSTNAME
21 | valueFrom:
22 | configMapKeyRef:
23 | name: env
24 | key: MANAGER_HOSTNAME
25 | - name: GOLANG_LOG
26 | valueFrom:
27 | configMapKeyRef:
28 | name: env
29 | key: GOLANG_LOG
30 | - name: NAMESPACE
31 | valueFrom:
32 | fieldRef:
33 | fieldPath: metadata.namespace
34 | - name: AUTO_CLEAN_UP
35 | valueFrom:
36 | configMapKeyRef:
37 | name: env
38 | key: AUTO_CLEAN_UP
39 | optional: true
40 | restartPolicy: Never
41 | serviceAccountName: testing-framework
42 | ---
43 | apiVersion: v1
44 | kind: ServiceAccount
45 | metadata:
46 | name: testing-framework
47 | ---
48 | apiVersion: rbac.authorization.k8s.io/v1
49 | kind: ClusterRole
50 | metadata:
51 | name: testing-framework
52 | rules:
53 | - apiGroups: ["claudie.io"]
54 | resources: ["inputmanifests"]
55 | verbs: ["create", "patch", "update", "get", "list", "delete", "watch"]
56 | ---
57 | kind: ClusterRoleBinding
58 | apiVersion: rbac.authorization.k8s.io/v1
59 | metadata:
60 | name: testing-framework
61 | roleRef:
62 | kind: ClusterRole
63 | name: testing-framework
64 | apiGroup: rbac.authorization.k8s.io
65 | subjects:
66 | - kind: ServiceAccount
67 | name: testing-framework
68 |
--------------------------------------------------------------------------------
/proto/claudie-operator.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | package claudie;
3 |
4 | option go_package = "proto/pb";
5 |
6 | message SendAutoscalerEventRequest {
7 | string InputManifestName = 1;
8 | string InputManifestNamespace = 2;
9 | }
10 |
11 | message SendAutoscalerEventResponse {}
12 |
13 | service OperatorService {
14 | // SendAutoscalerEvent sends events to claudie-operator when a scale up/down occurs
15 | rpc SendAutoscalerEvent(SendAutoscalerEventRequest) returns (SendAutoscalerEventResponse);
16 | }
--------------------------------------------------------------------------------
/proto/kubeEleven.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | package claudie;
3 |
4 | import "spec/manifest.proto";
5 |
6 | option go_package = "proto/pb";
7 |
8 | message BuildClusterRequest {
9 | spec.K8scluster desired = 1;
10 | string projectName = 4;
11 | // Endpoint specifies if the endpoint
12 | // is on a loadbalancer. If empty the
13 | // endpoint is one of the nodes supplied
14 | // in part of the desired state.
15 | string loadBalancerEndpoint = 5;
16 | }
17 |
18 | message BuildClusterResponse {
19 | spec.K8scluster desired = 1;
20 | }
21 |
22 | message DestroyClusterRequest {
23 | string projectName = 1;
24 |
25 | spec.K8scluster current = 2;
26 | // Endpoint specifies if the endpoint
27 | // is on a loadbalancer. If empty the
28 | // endpoint is one of the nodes supplied
29 | // in part of the desired state.
30 | string loadBalancerEndpoint = 3;
31 |
32 | }
33 |
34 | message DestroyClusterResponse {
35 | spec.K8scluster current = 1;
36 | }
37 |
38 | service KubeElevenService {
39 | // BuildCluster builds the kubernetes clusters specified in the provided config.
40 | rpc BuildCluster(BuildClusterRequest) returns (BuildClusterResponse);
41 | // DestroyCluster destroys the kubernetes clusters specified in the provided config.
42 | rpc DestroyCluster(DestroyClusterRequest) returns (DestroyClusterResponse);
43 | }
44 |
--------------------------------------------------------------------------------
/proto/spec/dns.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 |
3 | package spec;
4 | option go_package = "github.com/berops/claudie/proto/pb/spec";
5 |
6 | import "spec/provider.proto";
7 |
8 | message AlternativeName {
9 | string hostname = 1;
10 | string endpoint = 2;
11 | }
12 |
13 | // DNS holds general information about the DNS records.
14 | message DNS {
15 | // DNS zone for the DNS records.
16 | string dnsZone = 1;
17 | // User specified hostname. (optional)
18 | string hostname = 2;
19 | // Provider for the DNS records.
20 | Provider provider = 3;
21 | // The whole hostname of the DNS record.
22 | string endpoint = 4;
23 | // alternative names for which A records will be created in addition to the hostname.
24 | repeated AlternativeName alternativeNames = 5;
25 | }
26 |
--------------------------------------------------------------------------------
/proto/spec/provider.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | package spec;
3 | option go_package = "github.com/berops/claudie/proto/pb/spec";
4 |
5 | message GCPProvider {
6 | string key = 1;
7 | string project = 2;
8 | }
9 |
10 | message HetznerProvider {
11 | string token = 1;
12 | }
13 |
14 | message HetznerDNSProvider {
15 | string token = 1;
16 | }
17 |
18 | message OCIProvider {
19 | string userOCID = 1;
20 | string tenancyOCID = 2;
21 | string keyFingerprint = 3;
22 | string compartmentOCID = 4;
23 | string privateKey = 5;
24 | }
25 |
26 | message AWSProvider {
27 | string secretKey = 1;
28 | string accessKey = 2;
29 | }
30 |
31 | message AzureProvider {
32 | string subscriptionID = 1;
33 | string tenantID = 2;
34 | string clientID = 3;
35 | string clientSecret = 4;
36 | }
37 |
38 | message CloudflareProvider {
39 | string token = 1;
40 | }
41 |
42 | message GenesisCloudProvider {
43 | string token = 1;
44 | }
45 |
46 | message Provider {
47 | string specName = 1;
48 | string cloudProviderName = 2;
49 |
50 | oneof ProviderType {
51 | GCPProvider gcp = 3;
52 | HetznerProvider hetzner = 4;
53 | HetznerDNSProvider hetznerdns = 5;
54 | OCIProvider oci = 6;
55 | AWSProvider aws = 7;
56 | AzureProvider azure = 8;
57 | CloudflareProvider cloudflare = 9;
58 | GenesisCloudProvider genesiscloud = 10;
59 | }
60 |
61 | TemplateRepository templates = 13;
62 | }
63 |
64 | message TemplateRepository {
65 | string repository = 1;
66 | optional string tag = 2;
67 | string path = 3;
68 |
69 | string commitHash = 4;
70 | }
71 |
--------------------------------------------------------------------------------
/proto/terraformer.proto:
--------------------------------------------------------------------------------
1 | syntax = "proto3";
2 | package claudie;
3 | import "spec/manifest.proto";
4 |
5 | option go_package = "proto/pb";
6 |
7 | message BuildInfrastructureRequest {
8 | spec.K8scluster current = 1;
9 | spec.K8scluster desired = 2;
10 |
11 | repeated spec.LBcluster currentLbs = 3;
12 | repeated spec.LBcluster desiredLbs = 4;
13 |
14 | string projectName = 5;
15 |
16 | uint64 options = 6;
17 | }
18 |
19 | message BuildInfrastructureResponse {
20 | message InfrastructureData {
21 | spec.K8scluster desired = 1;
22 | repeated spec.LBcluster desiredLbs = 2;
23 | repeated string failed = 3;
24 | }
25 |
26 | oneof Response {
27 | InfrastructureData ok = 6;
28 | InfrastructureData fail = 7;
29 | }
30 | }
31 |
32 | message DestroyInfrastructureRequest {
33 | string projectName = 1;
34 |
35 | spec.K8scluster current = 2;
36 | repeated spec.LBcluster currentLbs = 3;
37 | }
38 |
39 | message DestroyInfrastructureResponse {
40 | spec.K8scluster current = 1;
41 | repeated spec.LBcluster currentLbs = 2;
42 | }
43 |
44 | service TerraformerService {
45 | // BuildInfrastructure builds the infrastructure based on the provided desired state (includes addition/deletion of *stuff*).
46 | rpc BuildInfrastructure(BuildInfrastructureRequest) returns (BuildInfrastructureResponse);
47 | // DestroyInfrastructure destroys the infrastructure completely.
48 | rpc DestroyInfrastructure(DestroyInfrastructureRequest) returns (DestroyInfrastructureResponse);
49 | }
50 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2025.1.31
2 | charset-normalizer==3.4.1
3 | click==8.1.8
4 | colorama==0.4.6
5 | fontawesome==5.10.1.post1
6 | ghp-import==2.1.0
7 | idna==3.10
8 | Jinja2==3.1.6
9 | Markdown==3.7
10 | MarkupSafe==3.0.2
11 | material-plausible-plugin==0.3.0
12 | mergedeep==1.3.4
13 | mike==2.1.3
14 | mkdocs==1.6.1
15 | mkdocs-glightbox==0.4.0
16 | mkdocs-include-markdown-plugin==7.1.5
17 | mkdocs-material==9.6.9
18 | mkdocs-material-extensions==1.3.1
19 | packaging==24.2
20 | Pygments==2.19.1
21 | pymdown-extensions==10.14.3
22 | python-dateutil==2.9.0.post0
23 | PyYAML==6.0.2
24 | pyyaml_env_tag==0.1
25 | regex==2024.11.6
26 | requests==2.32.3
27 | six==1.17.0
28 | urllib3==2.3.0
29 | verspec==0.1.0
30 | watchdog==6.0.0
31 |
--------------------------------------------------------------------------------
/services/ansibler/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/terraformer
8 | services/kube-eleven
9 | services/builder
10 | services/testing-framework
11 | services/claudie-operator
12 | services/kuber
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/ansibler/server/adapters/inbound/grpc/ansibler_service.go:
--------------------------------------------------------------------------------
1 | package grpc
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb"
7 | "github.com/berops/claudie/services/ansibler/server/domain/usecases"
8 | )
9 |
10 | type AnsiblerGrpcService struct {
11 | pb.UnimplementedAnsiblerServiceServer
12 |
13 | usecases *usecases.Usecases
14 | }
15 |
16 | func (a *AnsiblerGrpcService) RemoveClaudieUtilities(_ context.Context, request *pb.RemoveClaudieUtilitiesRequest) (*pb.RemoveClaudieUtilitiesResponse, error) {
17 | return a.usecases.RemoveUtilities(request)
18 | }
19 |
20 | func (a *AnsiblerGrpcService) UpdateAPIEndpoint(_ context.Context, request *pb.UpdateAPIEndpointRequest) (*pb.UpdateAPIEndpointResponse, error) {
21 | return a.usecases.UpdateAPIEndpoint(request)
22 | }
23 |
24 | func (a *AnsiblerGrpcService) UpdateProxyEnvsK8SServices(_ context.Context, request *pb.UpdateProxyEnvsK8SServicesRequest) (*pb.UpdateProxyEnvsK8SServicesResponse, error) {
25 | return a.usecases.UpdateProxyEnvsK8sServices(request)
26 | }
27 |
28 | func (a *AnsiblerGrpcService) UpdateProxyEnvsOnNodes(_ context.Context, request *pb.UpdateProxyEnvsOnNodesRequest) (*pb.UpdateProxyEnvsOnNodesResponse, error) {
29 | return a.usecases.UpdateProxyEnvsOnNodes(request)
30 | }
31 |
32 | func (a *AnsiblerGrpcService) InstallNodeRequirements(_ context.Context, request *pb.InstallRequest) (*pb.InstallResponse, error) {
33 | return a.usecases.InstallNodeRequirements(request)
34 | }
35 |
36 | func (a *AnsiblerGrpcService) InstallVPN(_ context.Context, request *pb.InstallRequest) (*pb.InstallResponse, error) {
37 | return a.usecases.InstallVPN(request)
38 | }
39 |
40 | func (a *AnsiblerGrpcService) SetUpLoadbalancers(_ context.Context, request *pb.SetUpLBRequest) (*pb.SetUpLBResponse, error) {
41 | return a.usecases.SetUpLoadbalancers(request)
42 | }
43 |
44 | func (a *AnsiblerGrpcService) DetermineApiEndpointChange(ctx context.Context, request *pb.DetermineApiEndpointChangeRequest) (*pb.DetermineApiEndpointChangeResponse, error) {
45 | return a.usecases.DetermineApiEndpointChange(ctx, request)
46 | }
47 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/proxy/remove-proxy-envs.yml:
--------------------------------------------------------------------------------
1 | - hosts: all
2 | gather_facts: true
3 | become: true
4 | tasks:
5 | - name: Deleting proxy.conf from /etc/apt/apt.conf.d/
6 | ansible.builtin.file:
7 | path: "/etc/apt/apt.conf.d/proxy.conf"
8 | state: absent
9 | - name: Deleting http-proxy.conf from /etc/systemd/system for containerd,kubelet,docker
10 | ansible.builtin.file:
11 | path: "/etc/systemd/system/{{ item }}.service.d/http-proxy.conf"
12 | state: absent
13 | loop:
14 | - docker
15 | - containerd
16 | - kubelet
17 | - name: Delete HTTP,HTTPS,NO_PROXY from /etc/environmennt
18 | ansible.builtin.lineinfile:
19 | path: /etc/environment
20 | regexp: "^{{ item.key }}=.*$"
21 | state: absent
22 | loop:
23 | - { key: "HTTP_PROXY" }
24 | - { key: "http_proxy" }
25 | - { key: "HTTPS_PROXY" }
26 | - { key: "https_proxy" }
27 | - { key: "NO_PROXY" }
28 | - { key: "no_proxy" }
29 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard-uninstall.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | gather_facts: false
4 | become: yes
5 |
6 | pre_tasks:
7 | # Gather facts manually after we made sure, VMs are accessible
8 | - name: Gather facts from nodes
9 | ansible.builtin.setup:
10 |
11 | tasks:
12 | - name: Run kubeadm reset
13 | shell: |
14 | kubeadm reset -f
15 | ignore_errors: yes
16 |
17 | - name: Delete /etc/kubernetes directory
18 | ansible.builtin.file:
19 | path: "/etc/kubernetes"
20 | state: absent
21 |
22 | - name: Delete /etc/cni directory
23 | ansible.builtin.file:
24 | path: "/etc/cni"
25 | state: absent
26 |
27 | - name: Bring down WireGuard interface
28 | ansible.builtin.command: wg-quick down wg0
29 | ignore_errors: yes
30 |
31 | - name: Stop and disable systemd service for wg-quick@wg0
32 | ansible.builtin.systemd:
33 | name: "wg-quick@wg0"
34 | enabled: false
35 | ignore_errors: yes
36 |
37 | - name: Delete WireGuard configuration directory
38 | ansible.builtin.file:
39 | path: "/etc/wireguard"
40 | state: absent
41 |
42 | - name: Remove WireGuard package via apt
43 | ansible.builtin.apt:
44 | pkg:
45 | - wireguard
46 | - wireguard-tools
47 | state: absent
48 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | gather_facts: false
4 | become: yes
5 |
6 | pre_tasks:
7 | - name: Wait 100 seconds for target connection to become reachable/usable
8 | wait_for_connection:
9 | timeout: 100
10 |
11 | # Gather facts manually after we made sure, VMs are accessible
12 | - name: Gather facts from nodes
13 | ansible.builtin.setup:
14 |
15 | # abort playbook on any fatal error, the golang code will trigger a retry
16 | any_errors_fatal: true
17 | roles:
18 | - role: "./wireguard"
19 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 | language: python
3 | python: "2.7"
4 |
5 | # Use the new container infrastructure
6 | sudo: false
7 |
8 | # Install ansible
9 | addons:
10 | apt:
11 | packages:
12 | - python-pip
13 |
14 | install:
15 | # Install ansible
16 | - pip install ansible
17 |
18 | # Check ansible version
19 | - ansible --version
20 |
21 | # Create ansible.cfg with correct roles_path
22 | - printf '[defaults]\nroles_path=../' >ansible.cfg
23 |
24 | script:
25 | # Basic role syntax check
26 | - ansible-playbook tests/test.yml -i tests/inventory --syntax-check
27 |
28 | notifications:
29 | webhooks: https://galaxy.ansible.com/api/v1/notifications/
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/README.md:
--------------------------------------------------------------------------------
1 | Role Name
2 | =========
3 |
4 | A brief description of the role goes here.
5 |
6 | Requirements
7 | ------------
8 |
9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
10 |
11 | Role Variables
12 | --------------
13 |
14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
15 |
16 | Dependencies
17 | ------------
18 |
19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
20 |
21 | Example Playbook
22 | ----------------
23 |
24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
25 |
26 | - hosts: servers
27 | roles:
28 | - { role: username.rolename, x: 42 }
29 |
30 | License
31 | -------
32 |
33 | BSD
34 |
35 | Author Information
36 | ------------------
37 |
38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed).
39 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | wg_conf_dir: "/etc/wireguard"
3 | wg_listen_port: 51820
4 | wg_interface_name: "wg0"
5 | mtu_size: 1380
6 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for wireguard
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: Samuel Stolicny
3 | description: DevOps
4 | company: Berops
5 |
6 | # If the issue tracker for your role is not on github, uncomment the
7 | # next line and provide a value
8 | # issue_tracker_url: http://example.com/issue/tracker
9 |
10 | # Choose a valid license ID from https://spdx.org - some suggested licenses:
11 | # - BSD-3-Clause (default)
12 | # - MIT
13 | # - GPL-2.0-or-later
14 | # - GPL-3.0-only
15 | # - Apache-2.0
16 | # - CC-BY-4.0
17 | license: license (GPL-2.0-or-later, MIT, etc)
18 |
19 | min_ansible_version: 2.9
20 |
21 | # If this a Container Enabled role, provide the minimum Ansible Container version.
22 | # min_ansible_container_version:
23 |
24 | #
25 | # Provide a list of supported platforms, and for each platform a list of versions.
26 | # If you don't wish to enumerate all versions for a particular platform, use 'all'.
27 | # To view available platforms and versions (or releases), visit:
28 | # https://galaxy.ansible.com/api/v1/platforms/
29 | #
30 | # platforms:
31 | # - name: Fedora
32 | # versions:
33 | # - all
34 | # - 25
35 | # - name: SomePlatform
36 | # versions:
37 | # - all
38 | # - 1.0
39 | # - 7
40 | # - 99.99
41 |
42 | galaxy_tags: []
43 | # List tags for your role here, one per line. A tag is a keyword that describes
44 | # and categorizes the role. Users find roles by searching for tags. Be sure to
45 | # remove the '[]' above, if you add tags to this list.
46 | #
47 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
48 | # Maximum 20 tags per role.
49 |
50 | dependencies: []
51 | # List your role dependencies here, one per line. Be sure to remove the '[]' above,
52 | # if you add dependencies to this list.
53 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install Wireguard and dependencies
3 | ansible.builtin.apt:
4 | pkg:
5 | - wireguard
6 | - pipx
7 | - net-tools
8 | state: present
9 | update_cache: true
10 | retries: 10
11 | delay: 10
12 | register: res
13 | until: res is not failed
14 |
15 | - name: Install wireguard via pipx
16 | community.general.pipx:
17 | name: wireguard
18 | ...
19 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/tasks/kill_unattended_upgrades.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Check if unattended-upgrades.service is present"
3 | ansible.builtin.command: |
4 | systemctl list-unit-files --quiet unattended-upgrades.service
5 | register: unattended_upgrades_service_exists
6 | # this task may fail, if the service is not present. that is not an issue,
7 | # since we use this information for further decisions.
8 | ignore_errors: true
9 | # don't count this as a change.
10 | changed_when: false
11 |
12 | - name: "Disable unattended (unintended) upgrades systemd service"
13 | ansible.builtin.systemd:
14 | name: unattended-upgrades
15 | state: stopped
16 | enabled: false
17 | when: |
18 | unattended_upgrades_service_exists is not failed
19 |
20 | - name: "Make sure unattended upgrades package is not installed"
21 | ansible.builtin.apt:
22 | name: "unattended-upgrades"
23 | state: "absent"
24 | purge: true
25 | update_cache: true
26 | force_apt_get: true
27 | ...
28 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: kill_unattended_upgrades.yml
3 | tags: install
4 |
5 | - include_tasks: install.yml
6 | tags: install
7 |
8 | - include_tasks: configure.yml
9 | tags: configure
10 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/templates/wg-dynamic.conf.j2:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = {{ private_ip }}/24
3 | PrivateKey = {{ privatekey.stdout }}
4 | ListenPort = {{ wg_listen_port }}
5 |
6 | {% for host in groups['dynamic'] %}
7 | {% if publickey.stdout != hostvars[host].publickey.stdout %}
8 | [Peer]
9 | PublicKey = {{ hostvars[host].publickey.stdout }}
10 | Endpoint = {{ hostvars[host].ansible_host }}:{{ wg_listen_port }}
11 | AllowedIps = {{ hostvars[host].private_ip }}/32
12 | PersistentKeepalive = 30
13 | {% endif %}
14 | {% endfor %}
15 |
16 | {% for host in groups['static'] %}
17 | {% if publickey.stdout != hostvars[host].publickey.stdout %}
18 | [Peer]
19 | PublicKey = {{ hostvars[host].publickey.stdout }}
20 | AllowedIps = {{ hostvars[host].private_ip }}/32
21 | {% endif %}
22 | {% endfor %}
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/templates/wg-static.conf.j2:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = {{ private_ip }}/24
3 | PrivateKey = {{ privatekey.stdout }}
4 | ListenPort = {{ wg_listen_port }}
5 |
6 | {% for host in groups['dynamic'] %}
7 | {% if publickey.stdout != hostvars[host].publickey.stdout %}
8 | [Peer]
9 | PublicKey = {{ hostvars[host].publickey.stdout }}
10 | Endpoint = {{ hostvars[host].ansible_host }}:{{ wg_listen_port }}
11 | AllowedIps = {{ hostvars[host].private_ip }}/32
12 | PersistentKeepalive = 60
13 | {% endif %}
14 | {% endfor %}
15 |
16 | {% for host in groups['static'] %}
17 | {% if publickey.stdout != hostvars[host].publickey.stdout %}
18 | [Peer]
19 | PublicKey = {{ hostvars[host].publickey.stdout }}
20 | Endpoint = {{ hostvars[host].ansible_host }}:{{ wg_listen_port }}
21 | AllowedIps = {{ hostvars[host].private_ip }}/32
22 | {% endif %}
23 | {% endfor %}
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
3 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | remote_user: root
4 | gather_facts: yes
5 | become: yes
6 |
7 | roles:
8 | - role: "./wireguard"
9 |
--------------------------------------------------------------------------------
/services/ansibler/server/ansible-playbooks/wireguard/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | config: {}
--------------------------------------------------------------------------------
/services/ansibler/server/domain/usecases/usecases.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "github.com/berops/claudie/services/ansibler/server/utils"
5 |
6 | "golang.org/x/sync/semaphore"
7 | )
8 |
9 | const (
10 | // baseDirectory is ansibler base directory
11 | baseDirectory = "services/ansibler/server"
12 | // outputDirectory is directory used to generate ansible playbooks/inventories.
13 | outputDirectory = "clusters"
14 | // SpawnProcessLimit is the number of processes concurrently executing ansible.
15 | SpawnProcessLimit = 5
16 | )
17 |
18 | type Usecases struct {
19 | // SpawnProcessLimit limits the number of spawned ansible processes.
20 | SpawnProcessLimit *semaphore.Weighted
21 | }
22 |
23 | type (
24 | NodepoolsInfo struct {
25 | Nodepools utils.NodePools
26 | ClusterID string
27 | ClusterNetwork string
28 | }
29 |
30 | AllNodesInventoryData struct {
31 | NodepoolsInfo []*NodepoolsInfo
32 | }
33 | )
34 |
--------------------------------------------------------------------------------
/services/ansibler/server/utils/api_endpoint.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 |
6 | "golang.org/x/sync/semaphore"
7 | )
8 |
9 | const apiChangePlaybookFilePath = "../../ansible-playbooks/apiEndpointChange.yml"
10 |
11 | // ChangeAPIEndpoint will change the kubeadm configuration.
12 | // It will set the Api endpoint of the cluster to the public IP of the
13 | // newly selected ApiEndpoint node.
14 | func ChangeAPIEndpoint(clusterName, oldEndpoint, newEndpoint, directory string, spawnProcessLimit *semaphore.Weighted) error {
15 | ansible := Ansible{
16 | Playbook: apiChangePlaybookFilePath,
17 | Inventory: InventoryFileName,
18 | Flags: fmt.Sprintf("--extra-vars \"NewEndpoint=%s OldEndpoint=%s\"", newEndpoint, oldEndpoint),
19 | Directory: directory,
20 | SpawnProcessLimit: spawnProcessLimit,
21 | }
22 |
23 | if err := ansible.RunAnsiblePlaybook(fmt.Sprintf("EP - %s", clusterName)); err != nil {
24 | return fmt.Errorf("error while running ansible: %w ", err)
25 | }
26 |
27 | return nil
28 | }
29 |
--------------------------------------------------------------------------------
/services/ansibler/server/utils/proxy.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | type (
4 | ProxyInventoryFileParameters struct {
5 | K8sNodepools NodePools
6 | ClusterID string
7 | NoProxyList string
8 | HttpProxyUrl string
9 | }
10 | )
11 |
--------------------------------------------------------------------------------
/services/ansibler/templates/all-node-inventory.goini:
--------------------------------------------------------------------------------
1 | [dynamic]
2 | {{- range $nodepoolInfo := .NodepoolsInfo }}
3 | {{- range $nodepool := $nodepoolInfo.Nodepools.Dynamic }}
4 | {{- range $node := $nodepool.Nodes }}
5 | {{ trimPrefix $node.Name (printf "%s-" $nodepoolInfo.ClusterID) }} ansible_user=root ansible_host={{ $node.Public }} private_ip={{ $node.Private }} netmask={{ extractNetmaskFromCIDR $nodepoolInfo.ClusterNetwork }} ansible_ssh_private_key_file={{ $nodepool.Name }}.pem ansible_ssh_extra_args="-o IdentitiesOnly=yes"
6 | {{- end }}
7 | {{- end }}
8 | {{- end }}
9 |
10 | [static]
11 | {{- range $nodepoolInfo := .NodepoolsInfo }}
12 | {{- range $nodepool := $nodepoolInfo.Nodepools.Static }}
13 | {{- range $node := $nodepool.Nodes }}
14 | {{ $node.Name }} ansible_user={{ $node.Username }} ansible_host={{ $node.Public }} private_ip={{ $node.Private }} netmask={{ extractNetmaskFromCIDR $nodepoolInfo.ClusterNetwork }} ansible_ssh_private_key_file={{ $node.Name }}.pem ansible_ssh_extra_args="-o IdentitiesOnly=yes"
15 | {{- end }}
16 | {{- end }}
17 | {{- end }}
18 |
19 |
--------------------------------------------------------------------------------
/services/ansibler/templates/conf.gotpl:
--------------------------------------------------------------------------------
1 | stream{
2 | {{- range $role := .Roles }}
3 | upstream {{ $role.Role.Name }}{
4 | {{- if $role.Role.Settings.StickySessions }}
5 | hash $remote_addr consistent;
6 | {{- end }}
7 |
8 | {{- range $node := $role.TargetNodes}}
9 | server {{$node.Private}}:{{$role.Role.TargetPort}} max_fails=3 fail_timeout=10s;
10 | {{- end}}
11 | }
12 |
13 | server {
14 | listen {{ $role.Role.Port }};
15 | proxy_pass {{ $role.Role.Name}};
16 | proxy_next_upstream on;
17 |
18 | {{- if $role.Role.Settings.ProxyProtocol }}
19 | {{- if ne $role.Role.Port 6443 }}
20 | proxy_protocol on;
21 | {{- end }}
22 | {{- end }}
23 | }
24 | {{- end }}
25 | }
26 |
--------------------------------------------------------------------------------
/services/ansibler/templates/nginx.goyml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: {{ .Loadbalancer }}
3 | gather_facts: no
4 | become: yes
5 | tasks:
6 | - name: ensure nginx is at the latest version
7 | apt:
8 | name: nginx
9 | state: latest
10 | update_cache: yes
11 | - name: install stream module for nginx
12 | ansible.builtin.apt:
13 | name: libnginx-mod-stream
14 | state: present
15 | update_cache: true
16 | - name: copy config files
17 | copy:
18 | src: lb.conf
19 | dest: /etc/nginx/passthrough.conf
20 | - name: include passthrough conf in nginx.conf
21 | lineinfile:
22 | path: /etc/nginx/nginx.conf
23 | line: "include /etc/nginx/passthrough.conf;"
24 | insertafter: EOF
25 | become: yes
26 | - name: delete default HTTP server conf (sites-available)
27 | file:
28 | path: "/etc/nginx/sites-available/default"
29 | state: absent
30 | - name: delete default HTTP server conf (sites-enabled)
31 | file:
32 | path: "/etc/nginx/sites-enabled/default"
33 | state: absent
34 | - name: Increase worker connections
35 | ansible.builtin.lineinfile:
36 | path: /etc/nginx/nginx.conf
37 | regexp: '^(\s*)worker_connections (\d+);'
38 | backrefs: yes
39 | line: '\1worker_connections 65535;'
40 | - name: Ensure worker_rlimit_nofile is set
41 | ansible.builtin.blockinfile:
42 | path: /etc/nginx/nginx.conf
43 | block: |
44 | worker_rlimit_nofile 65535;
45 | insertbefore: '^events {'
46 | marker: "# {mark} ANSIBLE MANAGED BLOCK"
47 | - name: restart nginx
48 | service:
49 | name: nginx
50 | state: restarted
51 | become: yes
52 |
--------------------------------------------------------------------------------
/services/ansibler/templates/proxy-envs.goini:
--------------------------------------------------------------------------------
1 | [control]
2 | {{- range $nodepool := .K8sNodepools.Dynamic }}
3 | {{- if $nodepool.IsControl }}
4 | {{- range $node := $nodepool.Nodes }}
5 | {{ trimPrefix $node.Name (printf "%s-" $.ClusterID) }} ansible_user=root ansible_host={{ $node.Public }} private_ip={{ $node.Private }} no_proxy_list={{ $.NoProxyList }} http_proxy_url={{ $.HttpProxyUrl }} ansible_ssh_private_key_file={{ $nodepool.Name }}.pem ansible_ssh_extra_args="-o IdentitiesOnly=yes"
6 | {{- end }}
7 | {{- end }}
8 | {{- end }}
9 | {{- range $nodepool := .K8sNodepools.Static }}
10 | {{- if $nodepool.IsControl }}
11 | {{- range $node := $nodepool.Nodes }}
12 | {{ $node.Name }} ansible_user={{ $node.Username }} ansible_host={{ $node.Public }} private_ip={{ $node.Private }} no_proxy_list={{ $.NoProxyList }} http_proxy_url={{ $.HttpProxyUrl }} ansible_ssh_private_key_file={{ $node.Name }}.pem ansible_ssh_extra_args="-o IdentitiesOnly=yes"
13 | {{- end }}
14 | {{- end }}
15 | {{- end }}
16 |
17 | [compute]
18 | {{- range $nodepool := .K8sNodepools.Dynamic }}
19 | {{- if not $nodepool.IsControl }}
20 | {{- range $node := $nodepool.Nodes }}
21 | {{ trimPrefix $node.Name (printf "%s-" $.ClusterID) }} ansible_user=root ansible_host={{ $node.Public }} private_ip={{ $node.Private }} no_proxy_list={{ $.NoProxyList }} http_proxy_url={{ $.HttpProxyUrl }} ansible_ssh_private_key_file={{ $nodepool.Name }}.pem ansible_ssh_extra_args="-o IdentitiesOnly=yes"
22 | {{- end }}
23 | {{- end }}
24 | {{- end }}
25 | {{- range $nodepool := .K8sNodepools.Static }}
26 | {{- if not $nodepool.IsControl }}
27 | {{- range $node := $nodepool.Nodes }}
28 | {{ $node.Name }} ansible_user={{ $node.Username }} ansible_host={{ $node.Public }} private_ip={{ $node.Private }} no_proxy_list={{ $.NoProxyList }} http_proxy_url={{ $.HttpProxyUrl }} ansible_ssh_private_key_file={{ $node.Name }}.pem ansible_ssh_extra_args="-o IdentitiesOnly=yes"
29 | {{- end }}
30 | {{- end }}
31 | {{- end }}
32 |
--------------------------------------------------------------------------------
/services/ansibler/templates/templates.go:
--------------------------------------------------------------------------------
1 | package templates
2 |
3 | import _ "embed"
4 |
5 | var (
6 | //go:embed all-node-inventory.goini
7 | AllNodesInventoryTemplate string
8 |
9 | //go:embed conf.gotpl
10 | NginxConfigTemplate string
11 |
12 | //go:embed lb-inventory.goini
13 | LoadbalancerInventoryTemplate string
14 |
15 | //go:embed nginx.goyml
16 | NginxPlaybookTemplate string
17 |
18 | //go:embed node-exporter.goyml
19 | NodeExporterPlaybookTemplate string
20 |
21 | //go:embed proxy-envs.goini
22 | ProxyEnvsInventoryTemplate string
23 | )
24 |
--------------------------------------------------------------------------------
/services/autoscaler-adapter/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/terraformer
8 | services/kube-eleven
9 | services/ansibler
10 | services/testing-framework
11 | services/claudie-operator
12 | services/kuber
13 | services/builder
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/autoscaler-adapter/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | #Unset the GOPATH
4 | ENV GOPATH=
5 |
6 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
7 | COPY go.mod .
8 | COPY go.sum .
9 |
10 | #Check if any modules need downloading
11 | RUN go mod download
12 |
13 |
14 | WORKDIR /go
15 | #Copy all files apart from the ones in .dockerignore
16 | COPY . .
17 | #Change the directory
18 | WORKDIR /go/services/autoscaler-adapter
19 |
20 | #Compile the golang code, CGO_ENABLE=0 removes cross compile dependencies
21 | RUN CGO_ENABLED=0 go build -o claudie-autoscaler-adapter
22 |
23 | #Use alpine image to copy certs needed for cloud libraries
24 | FROM alpine:latest as certs
25 | RUN apk --update add ca-certificates
26 |
27 | #Use empty base image
28 | FROM scratch
29 | #Add repository label
30 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
31 | #Add base image name as a label
32 | LABEL org.opencontainers.image.base.name "scratch"
33 | #Add description to the image
34 | LABEL org.opencontainers.image.description "Image for external gRPC provider for Cluster Autoscaler by Claudie"
35 |
36 | #Copy the binaries & certs to empty base image
37 | ENV PATH=/bin
38 | COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
39 | COPY --from=build /go/services/autoscaler-adapter/claudie-autoscaler-adapter /bin/claudie-autoscaler-adapter
40 |
41 | WORKDIR /bin
42 |
43 | ENTRYPOINT [ "./claudie-autoscaler-adapter"]
44 |
--------------------------------------------------------------------------------
/services/autoscaler-adapter/main.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "net"
7 | "os"
8 |
9 | "github.com/berops/claudie/internal/grpcutils"
10 | "github.com/berops/claudie/internal/loggerutils"
11 | "github.com/berops/claudie/services/autoscaler-adapter/claudie_provider"
12 | "github.com/rs/zerolog/log"
13 |
14 | "google.golang.org/grpc"
15 |
16 | "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/externalgrpc/protos"
17 | )
18 |
19 | func main() {
20 | projectName := os.Getenv("PROJECT_NAME")
21 | clusterName := os.Getenv("CLUSTER_NAME")
22 | port := os.Getenv("ADAPTER_PORT")
23 |
24 | if projectName == "" || clusterName == "" || port == "" {
25 | log.Fatal().Msgf("Env vars PROJECT_NAME and CLUSTER_NAME and ADAPTER_PORT must be specified")
26 | }
27 |
28 | loggerutils.Init(fmt.Sprintf("%s-%s", "autoscaler-adapter", clusterName))
29 |
30 | server := grpcutils.NewGRPCServer(
31 | grpc.ChainUnaryInterceptor(grpcutils.PeerInfoInterceptor(&log.Logger)),
32 | )
33 |
34 | // Listen
35 | serviceAddr := net.JoinHostPort("0.0.0.0", port)
36 | lis, err := net.Listen("tcp", serviceAddr)
37 | if err != nil {
38 | log.Fatal().Msgf("failed to listen: %s", err)
39 | }
40 |
41 | // Serve
42 | srv := claudie_provider.NewClaudieCloudProvider(context.Background(), projectName, clusterName)
43 | protos.RegisterCloudProviderServer(server, srv)
44 | log.Info().Msgf("Server ready at: %s", port)
45 | if err := server.Serve(lis); err != nil {
46 | log.Fatal().Msgf("failed to serve: %v", err)
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/services/builder/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/terraformer
8 | services/kube-eleven
9 | services/ansibler
10 | services/testing-framework
11 | services/claudie-operator
12 | services/kuber
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/builder/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | #Unset the GOPATH
4 | ENV GOPATH=
5 |
6 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
7 | COPY go.mod .
8 | COPY go.sum .
9 |
10 | #Check if any modules need downloading
11 | RUN go mod download
12 |
13 | #Copy all files apart from the ones in .dockerignore
14 | COPY . .
15 |
16 | #Change the directory
17 | WORKDIR /go/services/builder
18 |
19 | #Compile the golang code, CGO_ENABLE=0 removes cross compile dependencies
20 | RUN CGO_ENABLED=0 go build
21 |
22 | FROM scratch
23 | #Add repository label
24 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
25 | #Add image name as a label
26 | LABEL org.opencontainers.image.base.name "scratch"
27 | #Add description to the image
28 | LABEL org.opencontainers.image.description "Image for Builder from Claudie"
29 |
30 | #Copy the binaries to empty base image
31 | COPY --from=build /go/services/builder/builder /bin/services/builder/builder
32 |
33 | #Run server
34 | WORKDIR /bin
35 | ENTRYPOINT [ "./services/builder/builder" ]
36 |
--------------------------------------------------------------------------------
/services/builder/domain/ports/ansibler_port.go:
--------------------------------------------------------------------------------
1 | package ports
2 |
3 | import (
4 | "github.com/berops/claudie/proto/pb"
5 | "github.com/berops/claudie/proto/pb/spec"
6 | builder "github.com/berops/claudie/services/builder/internal"
7 | )
8 |
9 | type AnsiblerPort interface {
10 | InstallNodeRequirements(builderCtx *builder.Context, ansiblerGrpcClient pb.AnsiblerServiceClient) (*pb.InstallResponse, error)
11 | InstallVPN(builderCtx *builder.Context, ansiblerGrpcClient pb.AnsiblerServiceClient) (*pb.InstallResponse, error)
12 | SetUpLoadbalancers(builderCtx *builder.Context, ansiblerGrpcClient pb.AnsiblerServiceClient) (*pb.SetUpLBResponse, error)
13 | DetermineApiEndpointChange(builderCtx *builder.Context, cid string, did string, stt spec.ApiEndpointChangeState, ansiblerGrpcClient pb.AnsiblerServiceClient) (*pb.DetermineApiEndpointChangeResponse, error)
14 | UpdateAPIEndpoint(builderCtx *builder.Context, nodepool, node string, ansiblerGrpcClient pb.AnsiblerServiceClient) (*pb.UpdateAPIEndpointResponse, error)
15 | UpdateProxyEnvsK8SServices(builderCtx *builder.Context, ansiblerGrpcClient pb.AnsiblerServiceClient) error
16 | UpdateProxyEnvsOnNodes(builderCtx *builder.Context, ansiblerGrpcClient pb.AnsiblerServiceClient) error
17 | RemoveClaudieUtilities(builderCtx *builder.Context, ansiblerGrpcClient pb.AnsiblerServiceClient) error
18 |
19 | PerformHealthCheck() error
20 | GetClient() pb.AnsiblerServiceClient
21 | }
22 |
--------------------------------------------------------------------------------
/services/builder/domain/ports/kube_eleven_port.go:
--------------------------------------------------------------------------------
1 | package ports
2 |
3 | import (
4 | "github.com/berops/claudie/proto/pb"
5 | builder "github.com/berops/claudie/services/builder/internal"
6 | )
7 |
8 | type KubeElevenPort interface {
9 | BuildCluster(builderCtx *builder.Context, loadBalancerEndpoint string, kubeElevenGrpcClient pb.KubeElevenServiceClient) (*pb.BuildClusterResponse, error)
10 | DestroyCluster(builderCtx *builder.Context, loadBalancerEndpoint string, kubeElevenGrpcClient pb.KubeElevenServiceClient) (*pb.DestroyClusterResponse, error)
11 |
12 | PerformHealthCheck() error
13 | GetClient() pb.KubeElevenServiceClient
14 | }
15 |
--------------------------------------------------------------------------------
/services/builder/domain/ports/kuber_port.go:
--------------------------------------------------------------------------------
1 | package ports
2 |
3 | import (
4 | "github.com/berops/claudie/proto/pb"
5 | "github.com/berops/claudie/proto/pb/spec"
6 | builder "github.com/berops/claudie/services/builder/internal"
7 | )
8 |
9 | type KuberPort interface {
10 | SetUpStorage(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) (*pb.SetUpStorageResponse, error)
11 | StoreLBScrapeConfig(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
12 | RemoveLBScrapeConfig(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
13 | StoreClusterMetadata(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
14 | DeleteClusterMetadata(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
15 | StoreKubeconfig(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
16 | DeleteKubeconfig(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
17 | SetUpClusterAutoscaler(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
18 | DestroyClusterAutoscaler(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
19 | PatchClusterInfoConfigMap(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
20 | PatchKubeProxyConfigMap(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
21 | PatchKubeadmConfigMap(builderCtx *builder.Context, lbEndpoint string, kuberGrpcClient pb.KuberServiceClient) error
22 | PatchNodes(builderCtx *builder.Context, kuberGrpcClient pb.KuberServiceClient) error
23 | DeleteNodes(cluster *spec.K8Scluster, nodepools map[string]*spec.DeletedNodes, kuberGrpcClient pb.KuberServiceClient) (*pb.DeleteNodesResponse, error)
24 | CiliumRolloutRestart(cluster *spec.K8Scluster, kuberGrpcClient pb.KuberServiceClient) error
25 |
26 | PerformHealthCheck() error
27 | GetClient() pb.KuberServiceClient
28 | }
29 |
--------------------------------------------------------------------------------
/services/builder/domain/ports/terraformer_port.go:
--------------------------------------------------------------------------------
1 | package ports
2 |
3 | import (
4 | "github.com/berops/claudie/proto/pb"
5 | builder "github.com/berops/claudie/services/builder/internal"
6 | )
7 |
8 | type TerraformerPort interface {
9 | BuildInfrastructure(builderCtx *builder.Context, terraformerGrpcClient pb.TerraformerServiceClient) (*pb.BuildInfrastructureResponse, error)
10 | DestroyInfrastructure(builderCtx *builder.Context, terraformerGrpcClient pb.TerraformerServiceClient) (*pb.DestroyInfrastructureResponse, error)
11 |
12 | PerformHealthCheck() error
13 | GetClient() pb.TerraformerServiceClient
14 | }
15 |
--------------------------------------------------------------------------------
/services/builder/domain/usecases/usecases.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "github.com/berops/claudie/services/builder/domain/ports"
5 | managerclient "github.com/berops/claudie/services/manager/client"
6 | )
7 |
8 | type Usecases struct {
9 | // Manager client to perform tasks related to manager
10 | Manager managerclient.ClientAPI
11 | // Terraformer connector to perform tasks related to Terraformer
12 | Terraformer ports.TerraformerPort
13 | // Ansibler connector to perform tasks related to Ansibler
14 | Ansibler ports.AnsiblerPort
15 | // KubeEleven connector to perform tasks related to KubeEleven
16 | KubeEleven ports.KubeElevenPort
17 | // Kuber connector to perform tasks related to Kuber
18 | Kuber ports.KuberPort
19 | }
20 |
--------------------------------------------------------------------------------
/services/claudie-operator/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/terraformer
8 | services/kube-eleven
9 | services/builder
10 | services/testing-framework
11 | services/ansibler
12 | services/kuber
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/claudie-operator/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | ARG TARGETARCH
4 |
5 | #Unset the GOPATH
6 | ENV GOPATH=
7 |
8 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
9 | COPY go.mod .
10 | COPY go.sum .
11 |
12 | #Check if any modules need downloading
13 | RUN go mod download
14 |
15 | #Copy all files apart from the ones in .dockerignore
16 | COPY . .
17 |
18 | #Change the directory
19 | WORKDIR /go/services/claudie-operator
20 |
21 | #Compile the golang code, CGO_ENABLE=0 removes cross compile dependencies
22 | RUN CGO_ENABLED=0 go build
23 |
24 | #Use empty base image
25 | FROM scratch
26 | #Add repository label
27 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
28 | #Add image name as a label
29 | LABEL org.opencontainers.image.base.name "scratch"
30 | #Add description to the image
31 | LABEL org.opencontainers.image.description "Image for Claudie-operator from Claudie"
32 |
33 | #Copy the binaries to empty base image
34 | COPY --from=build /go/services/claudie-operator/claudie-operator /bin/services/claudie-operator/claudie-operator
35 |
36 | WORKDIR /bin
37 | #Run server
38 | ENTRYPOINT [ "./services/claudie-operator/claudie-operator" ]
39 |
--------------------------------------------------------------------------------
/services/claudie-operator/client/client.go:
--------------------------------------------------------------------------------
1 | package client
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb"
7 | )
8 |
9 | // SendAutoscalerEvent will send the information about
10 | func SendAutoscalerEvent(c pb.OperatorServiceClient, req *pb.SendAutoscalerEventRequest) error {
11 | _, err := c.SendAutoscalerEvent(context.Background(), req)
12 | return err
13 | }
14 |
--------------------------------------------------------------------------------
/services/claudie-operator/pkg/api/v1beta1/groupversion_info.go:
--------------------------------------------------------------------------------
1 | /*
2 | Copyright 2023 berops.com.
3 |
4 | Licensed under the Apache License, Version 2.0 (the "License");
5 | you may not use this file except in compliance with the License.
6 | You may obtain a copy of the License at
7 |
8 | http://www.apache.org/licenses/LICENSE-2.0
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | See the License for the specific language governing permissions and
14 | limitations under the License.
15 | */
16 |
17 | // Package v1beta1 contains API Schema definitions for the v1beta1 API group
18 | // +kubebuilder:object:generate=true
19 | // +groupName=claudie.io
20 | package v1beta1
21 |
22 | import (
23 | "k8s.io/apimachinery/pkg/runtime/schema"
24 | "sigs.k8s.io/controller-runtime/pkg/scheme"
25 | )
26 |
27 | var (
28 | // GroupVersion is group version used to register these objects
29 | GroupVersion = schema.GroupVersion{Group: "claudie.io", Version: "v1beta1"}
30 |
31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme
32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion}
33 |
34 | // AddToScheme adds the types in this group-version to the given scheme.
35 | AddToScheme = SchemeBuilder.AddToScheme
36 | )
37 |
--------------------------------------------------------------------------------
/services/claudie-operator/server/adapters/inbound/grpc/adapter.go:
--------------------------------------------------------------------------------
1 | package grpc
2 |
3 | import (
4 | "fmt"
5 | "net"
6 |
7 | "github.com/rs/zerolog/log"
8 | "google.golang.org/grpc"
9 |
10 | "github.com/berops/claudie/internal/envs"
11 | "github.com/berops/claudie/internal/grpcutils"
12 | "github.com/berops/claudie/proto/pb"
13 | "github.com/berops/claudie/services/claudie-operator/server/domain/usecases"
14 | )
15 |
16 | const (
17 | defaultOperatorPort = 50058
18 | )
19 |
20 | type GrpcAdapter struct {
21 | tcpListener net.Listener
22 | server *grpc.Server
23 | }
24 |
25 | // Init will create the underlying gRPC server and the gRPC healthcheck server
26 | func (g *GrpcAdapter) Init(usecases *usecases.Usecases) {
27 | port := envs.GetOrDefault("OPERATOR_PORT", fmt.Sprint(defaultOperatorPort))
28 | listeningAddress := net.JoinHostPort("0.0.0.0", port)
29 |
30 | tcpListener, err := net.Listen("tcp", listeningAddress)
31 | if err != nil {
32 | log.Fatal().Msgf("Failed to start Grpc server for claudie-operator at %s: %v", listeningAddress, err)
33 | }
34 | g.tcpListener = tcpListener
35 |
36 | log.Info().Msgf("Claudie-operator bound to %s", listeningAddress)
37 |
38 | g.server = grpcutils.NewGRPCServer(
39 | grpc.ChainUnaryInterceptor(grpcutils.PeerInfoInterceptor(&log.Logger)),
40 | )
41 |
42 | pb.RegisterOperatorServiceServer(g.server, &OperatorGrpcService{usecases: usecases})
43 | }
44 |
45 | // Serve will create a service goroutine for each connection
46 | func (g *GrpcAdapter) Serve() error {
47 | if err := g.server.Serve(g.tcpListener); err != nil {
48 | return fmt.Errorf("claudie-operator grpc server failed to serve: %w", err)
49 | }
50 |
51 | log.Info().Msgf("Finished listening for incoming gRPC connections")
52 | return nil
53 | }
54 |
55 | // Stop will gracefully shutdown the gRPC server
56 | func (g *GrpcAdapter) Stop() {
57 | g.server.GracefulStop()
58 | }
59 |
--------------------------------------------------------------------------------
/services/claudie-operator/server/adapters/inbound/grpc/operator_service.go:
--------------------------------------------------------------------------------
1 | package grpc
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb"
7 | "github.com/berops/claudie/services/claudie-operator/server/domain/usecases"
8 | )
9 |
10 | type OperatorGrpcService struct {
11 | pb.UnimplementedOperatorServiceServer
12 | usecases *usecases.Usecases
13 | }
14 |
15 | func (f *OperatorGrpcService) SendAutoscalerEvent(ctx context.Context, request *pb.SendAutoscalerEventRequest) (*pb.SendAutoscalerEventResponse, error) {
16 | return f.usecases.SendAutoscalerEvent(request)
17 | }
18 |
--------------------------------------------------------------------------------
/services/claudie-operator/server/domain/usecases/send_autoscaler_event.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "github.com/berops/claudie/proto/pb"
5 | "github.com/berops/claudie/services/claudie-operator/pkg/api/v1beta1"
6 | "sigs.k8s.io/controller-runtime/pkg/event"
7 | )
8 |
9 | // SendAutoscalerEvent will receive an autoscaler event, and send it to the autoscaler channel
10 | func (u *Usecases) SendAutoscalerEvent(request *pb.SendAutoscalerEventRequest) (*pb.SendAutoscalerEventResponse, error) {
11 | im := v1beta1.InputManifest{}
12 | im.SetName(request.InputManifestName)
13 | im.SetNamespace(request.InputManifestNamespace)
14 | u.SaveAutoscalerEvent <- event.GenericEvent{Object: &im}
15 | return &pb.SendAutoscalerEventResponse{}, nil
16 | }
17 |
--------------------------------------------------------------------------------
/services/claudie-operator/server/domain/usecases/usecases.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 |
6 | "sigs.k8s.io/controller-runtime/pkg/event"
7 |
8 | managerclient "github.com/berops/claudie/services/manager/client"
9 | )
10 |
11 | type Usecases struct {
12 | // Manager is a connector used to query request from manager.
13 | Manager managerclient.ClientAPI
14 |
15 | // Context which when cancelled will close all channel/goroutines.
16 | Context context.Context
17 |
18 | // SaveAutoscalerEvent is channel which is used to pass autoscaler event to controller
19 | SaveAutoscalerEvent chan event.GenericEvent
20 | }
21 |
--------------------------------------------------------------------------------
/services/kube-eleven/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/terraformer
8 | services/ansibler
9 | services/builder
10 | services/testing-framework
11 | services/claudie-operator
12 | services/kuber
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/kube-eleven/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | ARG TARGETARCH
4 |
5 | # download and unzip kube-one binary
6 | RUN apt-get -qq update && apt-get -qq install unzip
7 | RUN KUBEONE_V=1.9.0 && \
8 | wget -q https://github.com/kubermatic/kubeone/releases/download/v${KUBEONE_V}/kubeone_${KUBEONE_V}_linux_$TARGETARCH.zip && \
9 | unzip -qq kubeone_${KUBEONE_V}_linux_$TARGETARCH.zip -d kubeone_dir
10 |
11 | #Unset the GOPATH
12 | ENV GOPATH=
13 |
14 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
15 | COPY go.mod .
16 | COPY go.sum .
17 |
18 | #Check if any modules need downloading
19 | RUN go mod download
20 |
21 | COPY . .
22 |
23 | #Change the directory
24 | WORKDIR /go/services/kube-eleven/server
25 |
26 | #Compile the golang code, CGO_ENABLE=0 removes cross compile dependencies
27 | RUN CGO_ENABLED=0 go build
28 |
29 | FROM docker.io/library/alpine:3.20
30 | #Add repository label
31 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
32 | #Add image name as a label
33 | LABEL org.opencontainers.image.base.name "docker.io/library/alpine"
34 | #Add description to the image
35 | LABEL org.opencontainers.image.description "Image for Kube-eleven from Claudie"
36 |
37 | RUN apk update
38 | RUN apk add -q bash
39 |
40 | COPY --from=build /go/kubeone_dir/kubeone /usr/local/bin
41 | COPY --from=build /go/services/kube-eleven/server/server /bin/services/kube-eleven/server/server
42 |
43 | #Run server
44 | WORKDIR /bin
45 | ENTRYPOINT [ "./services/kube-eleven/server/server" ]
46 |
47 | #NOTE: We cannot use scratch image for our dockerfile since we are using shell commands to execute commands inside the code
48 |
--------------------------------------------------------------------------------
/services/kube-eleven/client/client.go:
--------------------------------------------------------------------------------
1 | package kubeElevenClient
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/rs/zerolog/log"
7 |
8 | "github.com/berops/claudie/proto/pb"
9 | )
10 |
11 | // BuildCluster uses KubeEleven service client to deploy our cluster
12 | func BuildCluster(c pb.KubeElevenServiceClient, req *pb.BuildClusterRequest) (*pb.BuildClusterResponse, error) {
13 | res, err := c.BuildCluster(context.Background(), req)
14 | if err != nil {
15 | log.Err(err).Msgf("Error building cluster")
16 | return res, err
17 | }
18 | return res, nil
19 | }
20 |
21 | func DestroyCluster(c pb.KubeElevenServiceClient, req *pb.DestroyClusterRequest) (*pb.DestroyClusterResponse, error) {
22 | resp, err := c.DestroyCluster(context.Background(), req)
23 | if err != nil {
24 | log.Err(err).Msgf("Error building cluster")
25 | return resp, err
26 | }
27 |
28 | return resp, nil
29 | }
30 |
--------------------------------------------------------------------------------
/services/kube-eleven/server/adapters/inbound/grpc/kube_eleven_service.go:
--------------------------------------------------------------------------------
1 | package grpc
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb"
7 | "github.com/berops/claudie/services/kube-eleven/server/domain/usecases"
8 | )
9 |
10 | type KubeElevenGrpcService struct {
11 | pb.UnimplementedKubeElevenServiceServer
12 |
13 | usecases *usecases.Usecases
14 | }
15 |
16 | func (k *KubeElevenGrpcService) BuildCluster(_ context.Context, request *pb.BuildClusterRequest) (*pb.BuildClusterResponse, error) {
17 | return k.usecases.BuildCluster(request)
18 | }
19 |
20 | func (k *KubeElevenGrpcService) DestroyCluster(_ context.Context, request *pb.DestroyClusterRequest) (*pb.DestroyClusterResponse, error) {
21 | return k.usecases.DestroyCluster(request)
22 | }
23 |
--------------------------------------------------------------------------------
/services/kube-eleven/server/domain/usecases/build_cluster.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/berops/claudie/internal/loggerutils"
7 | "github.com/berops/claudie/proto/pb"
8 | kube_eleven "github.com/berops/claudie/services/kube-eleven/server/domain/utils/kube-eleven"
9 | )
10 |
11 | // BuildCluster builds all cluster defined in the desired state
12 | func (u *Usecases) BuildCluster(req *pb.BuildClusterRequest) (*pb.BuildClusterResponse, error) {
13 | logger := loggerutils.WithProjectAndCluster(req.ProjectName, req.Desired.ClusterInfo.Id())
14 |
15 | logger.Info().Msgf("Building kubernetes cluster")
16 |
17 | k := kube_eleven.KubeEleven{
18 | K8sCluster: req.Desired,
19 | LoadBalancerEndpoint: req.LoadBalancerEndpoint,
20 | SpawnProcessLimit: u.SpawnProcessLimit,
21 | }
22 |
23 | if err := k.BuildCluster(); err != nil {
24 | logger.Error().Msgf("Error while building a cluster: %s", err)
25 | return nil, fmt.Errorf("error while building cluster %s for project %s : %w", req.Desired.ClusterInfo.Name, req.ProjectName, err)
26 | }
27 |
28 | logger.Info().Msgf("Kubernetes cluster was successfully build")
29 | return &pb.BuildClusterResponse{Desired: req.Desired}, nil
30 | }
31 |
--------------------------------------------------------------------------------
/services/kube-eleven/server/domain/usecases/destroy_cluster.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/berops/claudie/internal/loggerutils"
7 | "github.com/berops/claudie/proto/pb"
8 | kube_eleven "github.com/berops/claudie/services/kube-eleven/server/domain/utils/kube-eleven"
9 | )
10 |
11 | func (u *Usecases) DestroyCluster(req *pb.DestroyClusterRequest) (*pb.DestroyClusterResponse, error) {
12 | if req.Current == nil {
13 | return &pb.DestroyClusterResponse{Current: req.Current}, nil
14 | }
15 |
16 | logger := loggerutils.WithProjectAndCluster(req.ProjectName, req.Current.ClusterInfo.Id())
17 |
18 | logger.Info().Msgf("Destroying kubernetes cluster")
19 |
20 | k := kube_eleven.KubeEleven{
21 | K8sCluster: req.Current,
22 | LoadBalancerEndpoint: req.LoadBalancerEndpoint,
23 | SpawnProcessLimit: u.SpawnProcessLimit,
24 | }
25 |
26 | if err := k.DestroyCluster(); err != nil {
27 | logger.Error().Msgf("Error while destroying cluster: %s", err)
28 | return nil, fmt.Errorf("error while destroying cluster %s for project %s: %w", req.Current.ClusterInfo.Name, req.ProjectName, err)
29 | }
30 |
31 | logger.Info().Msgf("Kubernetes cluster was successfully destroyed")
32 | return &pb.DestroyClusterResponse{Current: req.Current}, nil
33 | }
34 |
--------------------------------------------------------------------------------
/services/kube-eleven/server/domain/usecases/usecases.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import "golang.org/x/sync/semaphore"
4 |
5 | const (
6 | // SpawnProcessLimit is the number of processes concurrently executing kubeone.
7 | SpawnProcessLimit = 5
8 | )
9 |
10 | type Usecases struct {
11 | // SpawnProcessLimit limits the number of spawned terraform processes.
12 | SpawnProcessLimit *semaphore.Weighted
13 | }
14 |
--------------------------------------------------------------------------------
/services/kube-eleven/server/domain/utils/kube-eleven/types.go:
--------------------------------------------------------------------------------
1 | package kube_eleven
2 |
3 | import (
4 | "github.com/berops/claudie/proto/pb/spec"
5 | )
6 |
7 | type (
8 | // NodeInfo struct holds data necessary to define node in the node pool.
9 | NodeInfo struct {
10 | Node *spec.Node
11 | Name string
12 | }
13 |
14 | // NodepoolInfo struct holds data necessary to define nodes in kubeone
15 | // manifest.
16 | NodepoolInfo struct {
17 | Nodes []*NodeInfo
18 | IsDynamic bool
19 | NodepoolName string
20 | Region string
21 | Zone string
22 | CloudProviderName string
23 | ProviderName string
24 | }
25 |
26 | // templateData struct holds the data which will be used in creating
27 | // the Kubeone files from templates.
28 | templateData struct {
29 | APIEndpoint string
30 | AlternativeNames []string
31 | KubernetesVersion string
32 | ClusterName string
33 | Nodepools []*NodepoolInfo
34 | }
35 | )
36 |
--------------------------------------------------------------------------------
/services/kube-eleven/server/domain/utils/kube-eleven/utils.go:
--------------------------------------------------------------------------------
1 | package kube_eleven
2 |
3 | import (
4 | "fmt"
5 | "os"
6 | )
7 |
8 | // readKubeconfigFromFile reads kubeconfig from a file and returns it as a string
9 | func readKubeconfigFromFile(path string) (string, error) {
10 | kubeconfigAsByte, err := os.ReadFile(path)
11 | if err != nil {
12 | return "", fmt.Errorf("error while reading kubeconfig from file %s : %w", path, err)
13 | }
14 |
15 | return string(kubeconfigAsByte), nil
16 | }
17 |
--------------------------------------------------------------------------------
/services/kube-eleven/templates/templates.go:
--------------------------------------------------------------------------------
1 | package templates
2 |
3 | import _ "embed"
4 |
5 | //go:embed kubeone.tpl
6 | var KubeOneTemplate string
7 |
--------------------------------------------------------------------------------
/services/kuber/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/terraformer
8 | services/kube-eleven
9 | services/builder
10 | services/testing-framework
11 | services/claudie-operator
12 | services/ansibler
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/kuber/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | ARG TARGETARCH
4 |
5 | #Install kubectl
6 | RUN KC_VERSION=v1.27.0 && \
7 | wget -q https://storage.googleapis.com/kubernetes-release/release/${KC_VERSION}/bin/linux/$TARGETARCH/kubectl
8 |
9 | #Unset the GOPATH
10 | ENV GOPATH=
11 |
12 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
13 | COPY go.mod .
14 | COPY go.sum .
15 |
16 | #Check if any modules need downloading
17 | RUN go mod download
18 |
19 | COPY . .
20 |
21 | #Change the directory
22 | WORKDIR /go/services/kuber/server
23 |
24 | #Compile the golang code, CGO_ENABLE=0 removes cross compile dependencies
25 | RUN CGO_ENABLED=0 go build
26 |
27 | FROM docker.io/library/alpine:3.20
28 | #Add repository label
29 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
30 | #Add image name as a label
31 | LABEL org.opencontainers.image.base.name "docker.io/library/alpine"
32 | #Add description to the image
33 | LABEL org.opencontainers.image.description "Image for Kuber from Claudie"
34 |
35 | #Copy the binaries to empty base image
36 | COPY --from=build /go/kubectl /usr/local/bin/kubectl
37 | COPY --from=build /go/services/kuber/server/server /bin/services/kuber/server/server
38 | COPY --from=build /go/services/kuber/server/manifests /bin/services/kuber/server/manifests
39 | RUN chmod +x /usr/local/bin/kubectl && apk add -q bash
40 |
41 | #Run server
42 | WORKDIR /bin
43 | ENTRYPOINT [ "./services/kuber/server/server" ]
44 |
45 | #NOTE: We cannot use scratch image for our dockerfile since we are using shell commands to execute commands inside the code
46 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/cilium_rollout_restart.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/berops/claudie/internal/kubectl"
7 | "github.com/berops/claudie/internal/loggerutils"
8 | "github.com/berops/claudie/proto/pb"
9 | )
10 |
11 | func (u *Usecases) CiliumRolloutRestart(request *pb.CiliumRolloutRestartRequest) (*pb.CiliumRolloutRestartResponse, error) {
12 | clusterID := request.Cluster.ClusterInfo.Id()
13 | logger := loggerutils.WithClusterName(clusterID)
14 |
15 | logger.Info().Msgf("Performing a rollout of the cilium daemonset")
16 | kc := kubectl.Kubectl{
17 | Kubeconfig: request.Cluster.Kubeconfig,
18 | MaxKubectlRetries: 5,
19 | }
20 |
21 | if err := kc.RolloutRestart("daemonset", "cilium", "-n kube-system"); err != nil {
22 | return nil, fmt.Errorf("failed to rollout restart daemonset for cilium: %w", err)
23 | }
24 |
25 | return &pb.CiliumRolloutRestartResponse{}, nil
26 | }
27 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/delete_cluster_metadata.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | comm "github.com/berops/claudie/internal/command"
8 | "github.com/berops/claudie/internal/envs"
9 | "github.com/berops/claudie/internal/kubectl"
10 | "github.com/berops/claudie/internal/loggerutils"
11 | "github.com/berops/claudie/proto/pb"
12 | )
13 |
14 | // DeleteClusterMetadata deletes the K8s secret (from the management cluster) containing cluster
15 | // metadata for the given K8s cluster.
16 | func (u *Usecases) DeleteClusterMetadata(ctx context.Context, request *pb.DeleteClusterMetadataRequest) (*pb.DeleteClusterMetadataResponse, error) {
17 | namespace := envs.Namespace
18 | if namespace == "" {
19 | // If kuber deployed locally, return.
20 | return &pb.DeleteClusterMetadataResponse{}, nil
21 | }
22 | clusterID := request.Cluster.ClusterInfo.Id()
23 |
24 | logger := loggerutils.WithClusterName(clusterID)
25 | var err error
26 | // Log success/error message.
27 | defer func() {
28 | if err != nil {
29 | logger.Warn().Msgf("Failed to remove cluster metadata, secret most likely already removed : %v", err)
30 | } else {
31 | logger.Info().Msgf("Deleted cluster metadata secret")
32 | }
33 | }()
34 |
35 | logger.Info().Msgf("Deleting cluster metadata secret")
36 | kc := kubectl.Kubectl{MaxKubectlRetries: 3}
37 | kc.Stdout = comm.GetStdOut(clusterID)
38 | kc.Stderr = comm.GetStdErr(clusterID)
39 |
40 | // Save error and return as errors are ignored here.
41 | err = kc.KubectlDeleteResource("secret", fmt.Sprintf("%s-metadata", clusterID), "-n", namespace)
42 | return &pb.DeleteClusterMetadataResponse{}, nil
43 | }
44 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/delete_kubeconfig.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | comm "github.com/berops/claudie/internal/command"
8 | "github.com/berops/claudie/internal/envs"
9 | "github.com/berops/claudie/internal/kubectl"
10 | "github.com/berops/claudie/internal/loggerutils"
11 | "github.com/berops/claudie/proto/pb"
12 | )
13 |
14 | // DeleteKubeconfig deletes the K8s secret (in the management cluster) containing kubeconfig
15 | // for the given K8s cluster.
16 | func (u *Usecases) DeleteKubeconfig(ctx context.Context, request *pb.DeleteKubeconfigRequest) (*pb.DeleteKubeconfigResponse, error) {
17 | namespace := envs.Namespace
18 | if namespace == "" {
19 | // If kuber deployed locally, return.
20 | return &pb.DeleteKubeconfigResponse{}, nil
21 | }
22 | clusterID := request.Cluster.ClusterInfo.Id()
23 | logger := loggerutils.WithClusterName(clusterID)
24 | var err error
25 | // Log success/error message.
26 | defer func() {
27 | if err != nil {
28 | logger.Warn().Msgf("Failed to remove kubeconfig, secret most likely already removed : %v", err)
29 | } else {
30 | logger.Info().Msgf("Deleted kubeconfig secret")
31 | }
32 | }()
33 |
34 | logger.Info().Msgf("Deleting kubeconfig secret")
35 | kc := kubectl.Kubectl{MaxKubectlRetries: 3}
36 | kc.Stdout = comm.GetStdOut(clusterID)
37 | kc.Stderr = comm.GetStdErr(clusterID)
38 |
39 | // Save error and return as errors are ignored here.
40 | err = kc.KubectlDeleteResource("secret", fmt.Sprintf("%s-kubeconfig", clusterID), "-n", namespace)
41 | return &pb.DeleteKubeconfigResponse{}, nil
42 | }
43 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/delete_nodes.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/berops/claudie/internal/loggerutils"
8 | "github.com/berops/claudie/internal/nodepools"
9 | "github.com/berops/claudie/proto/pb"
10 | "github.com/berops/claudie/services/kuber/server/domain/utils/nodes"
11 | )
12 |
13 | // DeleteNodes gracefully removes nodes from specified cluster.
14 | func (u *Usecases) DeleteNodes(ctx context.Context, request *pb.DeleteNodesRequest) (*pb.DeleteNodesResponse, error) {
15 | logger := loggerutils.WithClusterName(request.Cluster.ClusterInfo.Id())
16 |
17 | var (
18 | master []string
19 | worker []string
20 | keepNodepools = make(map[string]struct{})
21 | )
22 |
23 | for np, deleted := range request.Nodepools {
24 | if nodepools.FindByName(np, request.Cluster.GetClusterInfo().GetNodePools()).GetIsControl() {
25 | master = append(master, deleted.Nodes...)
26 | } else {
27 | worker = append(worker, deleted.Nodes...)
28 | }
29 | if deleted.KeepNodePoolIfEmpty {
30 | keepNodepools[np] = struct{}{}
31 | }
32 | }
33 |
34 | logger.Info().Msgf("Deleting nodes - control nodes [%d], compute nodes[%d]", len(master), len(worker))
35 | deleter := nodes.NewDeleter(master, worker, request.Cluster, keepNodepools)
36 | c, err := deleter.DeleteNodes()
37 | if err != nil {
38 | logger.Err(err).Msgf("Error while deleting nodes")
39 | return nil, fmt.Errorf("error while deleting nodes for cluster %s : %w", request.Cluster.ClusterInfo.Id(), err)
40 | }
41 | logger.Info().Msgf("Nodes were successfully deleted")
42 | return &pb.DeleteNodesResponse{Cluster: c}, nil
43 | }
44 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/destroy_cluster_autoscaler.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "path/filepath"
7 |
8 | "github.com/berops/claudie/internal/fileutils"
9 | "github.com/berops/claudie/internal/hash"
10 | "github.com/berops/claudie/internal/loggerutils"
11 | "github.com/berops/claudie/proto/pb"
12 | "github.com/berops/claudie/services/kuber/server/domain/utils/autoscaler"
13 | )
14 |
15 | // DestroyClusterAutoscaler removes deployment of Cluster Autoscaler from the management cluster for given k8s cluster.
16 | func (u *Usecases) DestroyClusterAutoscaler(ctx context.Context, request *pb.DestroyClusterAutoscalerRequest) (*pb.DestroyClusterAutoscalerResponse, error) {
17 | logger := loggerutils.WithClusterName(request.Cluster.ClusterInfo.Id())
18 |
19 | var err error
20 | // Log success/error message.
21 | defer func() {
22 | if err != nil {
23 | logger.Err(err).Msgf("Error while destroying cluster autoscaler")
24 | } else {
25 | logger.Info().Msgf("Cluster autoscaler successfully destroyed")
26 | }
27 | }()
28 |
29 | // Create output dir
30 | tempClusterID := fmt.Sprintf("%s-%s", request.Cluster.ClusterInfo.Name, hash.Create(5))
31 | clusterDir := filepath.Join(outputDir, tempClusterID)
32 | if err = fileutils.CreateDirectory(clusterDir); err != nil {
33 | return nil, fmt.Errorf("error while creating directory %s : %w", clusterDir, err)
34 | }
35 |
36 | // Destroy cluster autoscaler.
37 | logger.Info().Msgf("Destroying Cluster Autoscaler deployment")
38 | autoscalerManager := autoscaler.NewAutoscalerManager(request.ProjectName, request.Cluster, clusterDir)
39 | if err := autoscalerManager.DestroyClusterAutoscaler(); err != nil {
40 | logger.Debug().Msgf("Ignoring Destroy Autoscaler error: %v", err.Error())
41 | }
42 |
43 | return &pb.DestroyClusterAutoscalerResponse{}, nil
44 | }
45 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/patch_nodes.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/berops/claudie/internal/loggerutils"
8 | "github.com/berops/claudie/proto/pb"
9 | "github.com/berops/claudie/services/kuber/server/domain/utils/nodes"
10 | )
11 |
12 | // PatchNodes uses kube API patch to set correct metadata for nodes.
13 | func (u *Usecases) PatchNodes(ctx context.Context, request *pb.PatchNodesRequest) (*pb.PatchNodesResponse, error) {
14 | clusterID := request.Cluster.ClusterInfo.Id()
15 | logger := loggerutils.WithClusterName(clusterID)
16 |
17 | patcher := nodes.NewPatcher(request.Cluster, logger)
18 |
19 | if err := patcher.PatchProviderID(); err != nil {
20 | logger.Err(err).Msgf("Error while patching node provider ID")
21 | return nil, fmt.Errorf("error while patching providerID on nodes for %s : %w", clusterID, err)
22 | }
23 |
24 | if err := patcher.PatchAnnotations(); err != nil {
25 | logger.Err(err).Msgf("Error while patching node annotations")
26 | return nil, fmt.Errorf("error while patching annotations on nodes for %s : %w", clusterID, err)
27 | }
28 |
29 | if err := patcher.PatchLabels(); err != nil {
30 | logger.Err(err).Msgf("Error while patching node labels")
31 | return nil, fmt.Errorf("error while patching labels on nodes for %s : %w", clusterID, err)
32 | }
33 |
34 | if err := patcher.PatchTaints(); err != nil {
35 | logger.Err(err).Msgf("Error while patching node taints")
36 | return nil, fmt.Errorf("error while patching taints on nodes for %s : %w", clusterID, err)
37 | }
38 |
39 | logger.Info().Msgf("Nodes were successfully patched")
40 | return &pb.PatchNodesResponse{}, nil
41 | }
42 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/remove_lb_scrape_config.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "path/filepath"
7 |
8 | "github.com/berops/claudie/internal/hash"
9 | "github.com/berops/claudie/internal/loggerutils"
10 | "github.com/berops/claudie/proto/pb"
11 | scrapeconfig "github.com/berops/claudie/services/kuber/server/domain/utils/scrape-config"
12 | )
13 |
14 | // RemoveLBScrapeConfig deletes the Kubernetes secret containing Prometheus scrape config related to
15 | // the LB clusters attached to given K8s cluster.
16 | func (u *Usecases) RemoveLBScrapeConfig(ctx context.Context, request *pb.RemoveLBScrapeConfigRequest) (*pb.RemoveLBScrapeConfigResponse, error) {
17 | clusterID := request.Cluster.ClusterInfo.Id()
18 | logger := loggerutils.WithClusterName(clusterID)
19 |
20 | logger.Info().Msgf("Deleting load balancer scrape-config")
21 |
22 | tempClusterID := fmt.Sprintf("%s-%s", request.Cluster.ClusterInfo.Name, hash.Create(5))
23 | clusterDir := filepath.Join(outputDir, tempClusterID)
24 |
25 | sc := scrapeconfig.ScrapeConfig{
26 | Cluster: request.Cluster,
27 | Directory: clusterDir,
28 | }
29 |
30 | if err := sc.RemoveLBScrapeConfig(); err != nil {
31 | logger.Err(err).Msgf("Error while removing scrape config for Loadbalancer nodes")
32 | return nil, fmt.Errorf("error while removing old loadbalancer scrape-config for %s : %w", clusterID, err)
33 | }
34 | logger.Info().Msgf("Load balancer scrape-config successfully deleted")
35 |
36 | return &pb.RemoveLBScrapeConfigResponse{}, nil
37 | }
38 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/setup_cluster_autoscaler.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "path/filepath"
7 |
8 | "github.com/berops/claudie/internal/fileutils"
9 | "github.com/berops/claudie/internal/hash"
10 | "github.com/berops/claudie/internal/loggerutils"
11 | "github.com/berops/claudie/proto/pb"
12 | "github.com/berops/claudie/services/kuber/server/domain/utils/autoscaler"
13 | )
14 |
15 | func (u *Usecases) SetUpClusterAutoscaler(ctx context.Context, request *pb.SetUpClusterAutoscalerRequest) (*pb.SetUpClusterAutoscalerResponse, error) {
16 | clusterID := request.Cluster.ClusterInfo.Id()
17 | logger := loggerutils.WithClusterName(clusterID)
18 | var err error
19 | // Log success/error message.
20 | defer func() {
21 | if err != nil {
22 | logger.Err(err).Msgf("Error while setting up cluster autoscaler")
23 | } else {
24 | logger.Info().Msgf("Cluster autoscaler successfully set up")
25 | }
26 | }()
27 |
28 | // Create output dir
29 | tempClusterID := fmt.Sprintf("%s-%s", request.Cluster.ClusterInfo.Name, hash.Create(5))
30 | clusterDir := filepath.Join(outputDir, tempClusterID)
31 | if err := fileutils.CreateDirectory(clusterDir); err != nil {
32 | return nil, fmt.Errorf("error while creating directory %s : %w", clusterDir, err)
33 | }
34 |
35 | // Set up cluster autoscaler.
36 | autoscalerManager := autoscaler.NewAutoscalerManager(request.ProjectName, request.Cluster, clusterDir)
37 | if err := autoscalerManager.SetUpClusterAutoscaler(); err != nil {
38 | return nil, fmt.Errorf("error while setting up cluster autoscaler for %s : %w", clusterID, err)
39 | }
40 | return &pb.SetUpClusterAutoscalerResponse{}, nil
41 | }
42 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/setup_storage.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "path/filepath"
7 |
8 | "github.com/berops/claudie/internal/loggerutils"
9 | "github.com/berops/claudie/proto/pb"
10 | "github.com/berops/claudie/services/kuber/server/domain/utils/longhorn"
11 | )
12 |
13 | // SetUpStorage installs and configures Longhorn in the given K8s cluster.
14 | // (Installation of Longhorn prerequisites has already been taken care in the ansibler microservice.)
15 | func (u *Usecases) SetUpStorage(ctx context.Context, request *pb.SetUpStorageRequest) (*pb.SetUpStorageResponse, error) {
16 | clusterID := request.DesiredCluster.ClusterInfo.Id()
17 | logger := loggerutils.WithClusterName(clusterID)
18 |
19 | clusterDir := filepath.Join(outputDir, clusterID)
20 |
21 | logger.Info().Msgf("Setting up the longhorn")
22 | longhorn := longhorn.Longhorn{Cluster: request.DesiredCluster, Directory: clusterDir}
23 | if err := longhorn.SetUp(); err != nil {
24 | logger.Err(err).Msgf("Error while setting up the longhorn")
25 | return nil, fmt.Errorf("error while setting up the longhorn for %s : %w", clusterID, err)
26 | }
27 | logger.Info().Msgf("Longhorn successfully set up")
28 |
29 | return &pb.SetUpStorageResponse{DesiredCluster: request.DesiredCluster}, nil
30 | }
31 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/store_kubeconfig.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "encoding/base64"
6 | "fmt"
7 | "path/filepath"
8 |
9 | "github.com/berops/claudie/internal/envs"
10 | "github.com/berops/claudie/internal/loggerutils"
11 | "github.com/berops/claudie/proto/pb"
12 | "github.com/berops/claudie/services/kuber/server/domain/utils"
13 | "github.com/berops/claudie/services/kuber/server/domain/utils/secret"
14 | )
15 |
16 | func (u *Usecases) StoreKubeconfig(ctx context.Context, request *pb.StoreKubeconfigRequest) (*pb.StoreKubeconfigResponse, error) {
17 | clusterID := request.Cluster.ClusterInfo.Id()
18 | logger := loggerutils.WithClusterName(clusterID)
19 |
20 | if envs.Namespace == "" {
21 | //NOTE: DEBUG print
22 | // logger.Info().Msgf("The kubeconfig for %s\n%s:", clusterID, cluster.Kubeconfig)
23 | return &pb.StoreKubeconfigResponse{}, nil
24 | }
25 |
26 | logger.Info().Msgf("Storing kubeconfig")
27 |
28 | clusterDir := filepath.Join(outputDir, clusterID)
29 | sec := secret.New(clusterDir, secret.NewYaml(
30 | utils.GetSecretMetadata(request.Cluster.ClusterInfo, request.ProjectName, utils.KubeconfigSecret),
31 | map[string]string{"kubeconfig": base64.StdEncoding.EncodeToString([]byte(request.Cluster.Kubeconfig))},
32 | ))
33 |
34 | if err := sec.Apply(envs.Namespace, ""); err != nil {
35 | logger.Err(err).Msgf("Failed to store kubeconfig")
36 | return nil, fmt.Errorf("error while creating the kubeconfig secret for %s", clusterID)
37 | }
38 |
39 | logger.Info().Msgf("Kubeconfig was successfully stored")
40 | return &pb.StoreKubeconfigResponse{}, nil
41 | }
42 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/usecases/store_lb_scrape_config.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "context"
5 | "fmt"
6 | "path/filepath"
7 |
8 | "github.com/berops/claudie/internal/loggerutils"
9 | "github.com/berops/claudie/proto/pb"
10 | scrapeconfig "github.com/berops/claudie/services/kuber/server/domain/utils/scrape-config"
11 | )
12 |
13 | func (u *Usecases) StoreLBScrapeConfig(ctx context.Context, req *pb.StoreLBScrapeConfigRequest) (*pb.StoreLBScrapeConfigResponse, error) {
14 | clusterID := req.Cluster.ClusterInfo.Id()
15 | logger := loggerutils.WithClusterName(clusterID)
16 |
17 | clusterDir := filepath.Join(outputDir, clusterID)
18 | logger.Info().Msgf("Storing loadbalancer scrape-config")
19 |
20 | sc := scrapeconfig.ScrapeConfig{
21 | Cluster: req.GetCluster(),
22 | LBClusters: req.GetDesiredLoadbalancers(),
23 | Directory: clusterDir,
24 | }
25 |
26 | if err := sc.GenerateAndApplyScrapeConfig(); err != nil {
27 | logger.Err(err).Msgf("Error while applying scrape config for Loadbalancers")
28 | return nil, fmt.Errorf("error while setting up the loadbalancer scrape-config for %s : %w", clusterID, err)
29 | }
30 | logger.Info().Msgf("Loadbalancer scrape-config successfully set up")
31 |
32 | return &pb.StoreLBScrapeConfigResponse{}, nil
33 | }
34 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/utils/autoscaler/cluster_autoscaler_test.go:
--------------------------------------------------------------------------------
1 | package autoscaler
2 |
3 | import (
4 | "testing"
5 |
6 | "github.com/stretchr/testify/assert"
7 | )
8 |
9 | func Test_getK8sVersion(t *testing.T) {
10 | out, err := getK8sVersion("v1.29.4")
11 | assert.Nil(t, err)
12 | assert.Equal(t, "v1.29.5", out)
13 |
14 | out, err = getK8sVersion("1.29.4")
15 | assert.Nil(t, err)
16 | assert.Equal(t, "v1.29.5", out)
17 | }
18 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/utils/nodes/pvc_replication_utils.go:
--------------------------------------------------------------------------------
1 | package nodes
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 |
7 | "github.com/berops/claudie/internal/kubectl"
8 | "gopkg.in/yaml.v3"
9 | )
10 |
11 | type ReplicaList struct {
12 | Items []LonghornReplica `yaml:"items"`
13 | }
14 |
15 | type LonghornReplica struct {
16 | Metadata struct {
17 | Name string `yaml:"name"`
18 | } `yaml:"metadata"`
19 |
20 | Status struct {
21 | InstanceManagerName string `yaml:"instanceManagerName"`
22 | CurrentState string `yaml:"currentState"`
23 | Started bool `yaml:"started"`
24 | } `yaml:"status"`
25 |
26 | Spec struct {
27 | NodeID string `yaml:"nodeID"`
28 | FailedAt string `yaml:"failedAt"`
29 | } `yaml:"spec"`
30 | }
31 |
32 | func removeReplicasOnDeletedNode(kc kubectl.Kubectl, node string) error {
33 | out, err := kc.KubectlGet("replicas.longhorn.io", "-n", longhornNamespace, "-o", "yaml")
34 | if err != nil {
35 | return fmt.Errorf("failed to list all replicas : %w", err)
36 | }
37 |
38 | var replicaList ReplicaList
39 | if err := yaml.Unmarshal(out, &replicaList); err != nil {
40 | return fmt.Errorf("failed unmarshal kubectl output : %w", err)
41 | }
42 |
43 | var errAll error
44 | for _, replica := range replicaList.Items {
45 | // https://github.com/longhorn/longhorn/blob/6cc47ec5e942f33b10f644a5eaf0970b650e27a7/deploy/longhorn.yaml#L3048
46 | // spec.NodeID is the node where the replica is on, this should
47 | // matched the deleted node.
48 | del := replica.Spec.NodeID == node
49 | del = del && replica.Status.CurrentState == "stopped"
50 | del = del && !replica.Status.Started
51 | del = del && replica.Status.InstanceManagerName == ""
52 | del = del && replica.Spec.FailedAt != ""
53 |
54 | if del {
55 | err := kc.KubectlDeleteResource("replicas.longhorn.io", replica.Metadata.Name, "-n", longhornNamespace)
56 | if err != nil {
57 | errAll = errors.Join(errAll, fmt.Errorf("failed to delete replica %s: %w", replica.Metadata.Name, err))
58 | }
59 | }
60 | }
61 |
62 | return errAll
63 | }
64 |
--------------------------------------------------------------------------------
/services/kuber/server/domain/utils/utils.go:
--------------------------------------------------------------------------------
1 | package utils
2 |
3 | import (
4 | "fmt"
5 |
6 | "github.com/berops/claudie/proto/pb/spec"
7 | "github.com/berops/claudie/services/kuber/server/domain/utils/secret"
8 | )
9 |
10 | type OutputType string
11 |
12 | const (
13 | KubeconfigSecret OutputType = "kubeconfig"
14 | MetadataSecret OutputType = "metadata"
15 | )
16 |
17 | // GetSecretMetadata returns metadata for secrets created in the management cluster as a Claudie output.
18 | func GetSecretMetadata(ci *spec.ClusterInfo, projectName string, outputType OutputType) secret.Metadata {
19 | return secret.Metadata{
20 | Name: fmt.Sprintf("%s-%s", ci.Id(), outputType),
21 | Labels: map[string]string{
22 | "claudie.io/project": projectName,
23 | "claudie.io/cluster": ci.Name,
24 | "claudie.io/cluster-id": ci.Id(),
25 | "claudie.io/output": string(outputType),
26 | "app.kubernetes.io/part-of": "claudie",
27 | },
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/services/kuber/server/manifests/claudie-defaults.yaml:
--------------------------------------------------------------------------------
1 | # Make sure longhorn is deployed only on compute nodes
2 | apiVersion: longhorn.io/v1beta1
3 | kind: Setting
4 | metadata:
5 | name: system-managed-components-node-selector
6 | namespace: longhorn-system
7 | value: claudie.io/node-type:compute
8 | ---
9 | # Make sure longhorn is using block on eviction if last replica is on the node deleted.
10 | apiVersion: longhorn.io/v1beta1
11 | kind: Setting
12 | metadata:
13 | name: node-drain-policy
14 | namespace: longhorn-system
15 | value: block-for-eviction-if-last-replica
16 | ---
17 | # Default path to use for storing data on a host
18 | apiVersion: longhorn.io/v1beta1
19 | kind: Setting
20 | metadata:
21 | name: default-data-path
22 | namespace: longhorn-system
23 | value: /opt/claudie/data
24 |
--------------------------------------------------------------------------------
/services/kuber/templates/enable-ca.goyaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Enable CA functionality
3 | apiVersion: longhorn.io/v1beta1
4 | kind: Setting
5 | metadata:
6 | name: kubernetes-cluster-autoscaler-enabled
7 | namespace: longhorn-system
8 | value: "{{ .IsAutoscaled }}"
9 |
--------------------------------------------------------------------------------
/services/kuber/templates/scrape-config-manifest.goyaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: {{ .Namespace }}
5 | ---
6 | apiVersion: v1
7 | kind: Secret
8 | metadata:
9 | name: loadbalancers-scrape-config
10 | namespace: {{ .Namespace }}
11 | type: Opaque
12 | data:
13 | scrape-config.yaml: {{ .ScrapeConfigB64 }}
--------------------------------------------------------------------------------
/services/kuber/templates/scrape-config.goyaml:
--------------------------------------------------------------------------------
1 | - job_name: "claudie-loadbalancers"
2 |
3 | honor_timestamps: true
4 |
5 | scrape_interval: 30s
6 | scrape_timeout: 10s
7 | metrics_path: /metrics
8 | follow_redirects: true
9 |
10 | static_configs:
11 | {{- range $lbCluster := .LBClusters }}
12 | {{- range $lbNodepool := $lbCluster.NodePools.Dynamic }}
13 | {{- range $lbNode := $lbNodepool.Nodes }}
14 | - targets:
15 | - {{ $lbNode.Private }}:9100
16 | labels:
17 | instance: {{ $lbNode.Name }}
18 | component: claudie
19 | service: loadbalancer
20 | {{- end }}
21 | {{- end }}
22 | {{- range $lbNodepool := $lbCluster.NodePools.Static }}
23 | {{- range $lbNode := $lbNodepool.Nodes }}
24 | - targets:
25 | - {{ $lbNode.Private }}:9100
26 | labels:
27 | instance: {{ $lbNode.Name }}
28 | component: claudie
29 | service: loadbalancer
30 | {{- end }}
31 | {{- end }}
32 | {{- end }}
--------------------------------------------------------------------------------
/services/kuber/templates/storage-class.goyaml:
--------------------------------------------------------------------------------
1 | apiVersion: storage.k8s.io/v1
2 | kind: StorageClass
3 | metadata:
4 | name: {{ .StorageClassName }}
5 | labels:
6 | claudie.io/storage-class: {{ .StorageClassName }}
7 | provisioner: driver.longhorn.io
8 | parameters:
9 | fromBackup: ""
10 | nodeSelector: {{ .ZoneName }}
11 | fsType: xfs
12 | numberOfReplicas: "3"
13 | staleReplicaTimeout: "28800"
14 | reclaimPolicy: Delete
15 | allowVolumeExpansion: true
16 | volumeBindingMode: Immediate
17 |
--------------------------------------------------------------------------------
/services/kuber/templates/templates.go:
--------------------------------------------------------------------------------
1 | package templates
2 |
3 | import (
4 | _ "embed"
5 | )
6 |
7 | var (
8 | //go:embed cluster-autoscaler.goyaml
9 | ClusterAutoscalerTemplate string
10 |
11 | //go:embed enable-ca.goyaml
12 | EnableClusterAutoscalerTemplate string
13 |
14 | //go:embed scrape-config-manifest.goyaml
15 | ScrapeConfigManifestTemplate string
16 |
17 | //go:embed scrape-config.goyaml
18 | ScrapeConfigTemplate string
19 |
20 | //go:embed storage-class.goyaml
21 | StorageClassTemplate string
22 | )
23 |
--------------------------------------------------------------------------------
/services/manager/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/ansibler
8 | services/kube-eleven
9 | services/builder
10 | services/testing-framework
11 | services/claudie-operator
12 | services/kuber
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/manager/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | ARG TARGETARCH
4 |
5 | #Install kubectl
6 | RUN KC_VERSION=v1.27.0 && \
7 | wget -q https://storage.googleapis.com/kubernetes-release/release/${KC_VERSION}/bin/linux/$TARGETARCH/kubectl
8 |
9 | #Unset the GOPATH
10 | ENV GOPATH=
11 |
12 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
13 | COPY go.mod .
14 | COPY go.sum .
15 |
16 | #Check if any modules need downloading
17 | RUN go mod download
18 |
19 | #Copy all files apart from the ones in .dockerignore
20 | COPY . .
21 |
22 | #Change the directory
23 | WORKDIR /go/services/manager/cmd/api-server
24 |
25 | #Compile the golang code, CGO_ENABLE=0 removes cross compile dependencies
26 | RUN CGO_ENABLED=0 go build
27 |
28 | #Use alpine image to copy certs needed for cloud libraries
29 | FROM alpine:latest as certs
30 | RUN apk --update add ca-certificates
31 |
32 | FROM docker.io/library/alpine:3.20
33 | #Add repository label
34 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
35 | #Add base image name as a label
36 | LABEL org.opencontainers.image.base.name "docker.io/library/alpine"
37 | #Add description to the image
38 | LABEL org.opencontainers.image.description "Image for Manager from Claudie"
39 |
40 | #Copy the binaries to empty base image
41 | COPY --from=build /go/kubectl /usr/local/bin/kubectl
42 | COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
43 | COPY --from=build /go/services/manager/cmd/api-server/api-server /bin/services/manager/api-server
44 |
45 | WORKDIR /bin
46 |
47 | RUN chmod +x /usr/local/bin/kubectl && apk add -q bash
48 |
49 | #Run server
50 | ENTRYPOINT [ "./services/manager/api-server" ]
51 |
--------------------------------------------------------------------------------
/services/manager/client/api.go:
--------------------------------------------------------------------------------
1 | package managerclient
2 |
3 | import (
4 | "errors"
5 | "io"
6 |
7 | "github.com/berops/claudie/internal/healthcheck"
8 | )
9 |
10 | var (
11 | // ErrVersionMismatch is returned when the requested operation errors out due to a mismatch in the document version.
12 | // Two writes using the same document version occurred but this writes failed as the document was modified by the other write.
13 | ErrVersionMismatch = errors.New("requested operation failed due to document version mismatch")
14 |
15 | // ErrNotFound is returned when the requested resource, i.e. Config, cluster, task etc. is not found.
16 | ErrNotFound = errors.New("not found")
17 | )
18 |
19 | // ClientAPI wraps all manager apis into a single interface.
20 | type ClientAPI interface {
21 | io.Closer
22 | healthcheck.HealthChecker
23 |
24 | TaskAPI
25 | ManifestAPI
26 | CrudAPI
27 | StateAPI
28 | }
29 |
--------------------------------------------------------------------------------
/services/manager/client/crud_api.go:
--------------------------------------------------------------------------------
1 | package managerclient
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb/spec"
7 | )
8 |
9 | type CrudAPI interface {
10 | // GetConfig will query the config with the specified name. If the requested
11 | // config is not found the ErrNotFound error is returned.
12 | GetConfig(ctx context.Context, request *GetConfigRequest) (*GetConfigResponse, error)
13 | // ListConfigs will query all the configs the manager handles.
14 | ListConfigs(ctx context.Context, request *ListConfigRequest) (*ListConfigResponse, error)
15 | }
16 |
17 | type GetConfigRequest struct{ Name string }
18 | type GetConfigResponse struct{ Config *spec.Config }
19 |
20 | type ListConfigRequest struct{}
21 | type ListConfigResponse struct{ Config []*spec.Config }
22 |
--------------------------------------------------------------------------------
/services/manager/client/manifest_api.go:
--------------------------------------------------------------------------------
1 | package managerclient
2 |
3 | import "context"
4 |
5 | type ManifestAPI interface {
6 | // UpsertManifest will update the [store.Manifest] and [store.KubernetesContext] of an existing
7 | // config or will create a new config (if not present) from the passed in values.
8 | // The function will return the ErrVersionMismatch error indicating a Dirty write,
9 | // the application code should execute the Read/Update/Write cycle again, to resolve the merge conflicts.
10 | UpsertManifest(ctx context.Context, request *UpsertManifestRequest) error
11 |
12 | // MarkForDeletion will mark the Infrastructure of the specified Config to be deleted.
13 | // If the requested config with the specific version is not found the ErrVersionMismatch error is
14 | // returned indicating a Dirty write. On a Dirty write the application code should execute
15 | // the Read/Update/Write cycle again. If the config is not present an error will be returned
16 | // along with other errors. If the requested config for deletion is not found the ErrNotFound error
17 | // is returned.
18 | MarkForDeletion(ctx context.Context, request *MarkForDeletionRequest) error
19 | }
20 |
21 | type UpsertManifestRequest struct {
22 | Name string
23 | Manifest *Manifest
24 | K8sCtx *KubernetesContext
25 | }
26 |
27 | type Manifest struct{ Raw string }
28 | type KubernetesContext struct{ Name, Namespace string }
29 |
30 | type MarkForDeletionRequest struct{ Name string }
31 |
--------------------------------------------------------------------------------
/services/manager/client/retry.go:
--------------------------------------------------------------------------------
1 | package managerclient
2 |
3 | import (
4 | "errors"
5 | "fmt"
6 | "math/rand/v2"
7 | "time"
8 |
9 | "github.com/rs/zerolog"
10 | )
11 |
12 | // TolerateDirtyWrites defines how many dirty writes the client should tolerate before giving up
13 | // on writing the value to the manager.
14 | const TolerateDirtyWrites = 20
15 |
16 | // ErrRetriesExhausted is returned when all the TolerateDirtyWrites retries fail due to a dirty write.
17 | var ErrRetriesExhausted = errors.New("exhausted all retries")
18 |
19 | // Retry retries the passed in function up to TolerateDirtyWrites times. On first error that is not ErrVersionMismatch
20 | // or a total of TolerateDirtyWrites retries are executed, the underlying non-retryable error is returned or the ErrRetriesExhausted
21 | // error is returned.
22 | func Retry(logger *zerolog.Logger, description string, fn func() error) error {
23 | var err error
24 | var retries int
25 |
26 | for i := range TolerateDirtyWrites {
27 | retries++
28 | if i > 0 {
29 | wait := time.Duration(50+rand.IntN(300)) * time.Millisecond
30 | logger.Debug().Msgf("retry[%v/%v] %q failed due to dirty write: %v, retrying again in %s ms", i, TolerateDirtyWrites, description, err, wait)
31 | time.Sleep(wait)
32 | }
33 |
34 | err = fn()
35 | if err == nil || !errors.Is(err, ErrVersionMismatch) {
36 | break
37 | }
38 | }
39 |
40 | if retries == TolerateDirtyWrites {
41 | err = fmt.Errorf("%w: %w", err, ErrRetriesExhausted)
42 | }
43 |
44 | if err != nil {
45 | return err
46 | }
47 |
48 | return nil
49 | }
50 |
--------------------------------------------------------------------------------
/services/manager/client/retry_test.go:
--------------------------------------------------------------------------------
1 | package managerclient
2 |
3 | import (
4 | "bytes"
5 | "github.com/stretchr/testify/assert"
6 | "testing"
7 |
8 | "github.com/rs/zerolog"
9 | )
10 |
11 | func TestRetry(t *testing.T) {
12 | type args struct {
13 | logger *zerolog.Logger
14 | description string
15 | fn func() error
16 | }
17 | tests := []struct {
18 | name string
19 | args args
20 | validate func(t *testing.T, err error)
21 | }{
22 | {
23 | name: "ok-on-3rd-retry",
24 | args: args{
25 | logger: func() *zerolog.Logger { k := zerolog.New(new(bytes.Buffer)); return &k }(),
26 | description: "testing",
27 | fn: func() func() error {
28 | retry := 3
29 | return func() error {
30 | retry--
31 | if retry == 0 {
32 | return nil
33 | }
34 | return ErrVersionMismatch
35 | }
36 | }(),
37 | },
38 | validate: func(t *testing.T, err error) { assert.Nil(t, err) },
39 | },
40 | {
41 | name: "fail-all-retries",
42 | args: args{
43 | logger: func() *zerolog.Logger { k := zerolog.New(new(bytes.Buffer)); return &k }(),
44 | description: "testing",
45 | fn: func() func() error {
46 | retry := TolerateDirtyWrites
47 | return func() error {
48 | if retry == 0 {
49 | return nil
50 | }
51 | retry--
52 | return ErrVersionMismatch
53 | }
54 | }(),
55 | },
56 | validate: func(t *testing.T, err error) {
57 | assert.ErrorIs(t, err, ErrVersionMismatch)
58 | assert.ErrorIs(t, err, ErrRetriesExhausted)
59 | },
60 | },
61 | }
62 | for _, tt := range tests {
63 | t.Run(tt.name, func(t *testing.T) {
64 | t.Parallel()
65 | err := Retry(tt.args.logger, tt.args.description, tt.args.fn)
66 | tt.validate(t, err)
67 | })
68 | }
69 | }
70 |
--------------------------------------------------------------------------------
/services/manager/client/state_api.go:
--------------------------------------------------------------------------------
1 | package managerclient
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb/spec"
7 | )
8 |
9 | type StateAPI interface {
10 | // UpdateNodePool will update the nodepool of a cluster within a config. If during an update a dirty
11 | // write occurs the ErrVersionMismatch error is returned. On a dirty write the application code should execute
12 | // the Read/Update/Write cycle again. If either one of nodepool, cluster, config is not found the ErrNotFound
13 | // error is returned.
14 | UpdateNodePool(ctx context.Context, request *UpdateNodePoolRequest) error
15 | }
16 |
17 | type UpdateNodePoolRequest struct {
18 | Config string
19 | Cluster string
20 | NodePool *spec.NodePool
21 | }
22 |
--------------------------------------------------------------------------------
/services/manager/internal/service/handler_get_config.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "context"
5 | "errors"
6 |
7 | "google.golang.org/grpc/codes"
8 | "google.golang.org/grpc/status"
9 |
10 | "github.com/berops/claudie/proto/pb"
11 | "github.com/berops/claudie/services/manager/internal/store"
12 | "github.com/rs/zerolog/log"
13 | )
14 |
15 | func (g *GRPC) GetConfig(ctx context.Context, request *pb.GetConfigRequest) (*pb.GetConfigResponse, error) {
16 | log.Debug().Msgf("Received request for config: %q", request.Name)
17 |
18 | cfg, err := g.Store.GetConfig(ctx, request.Name)
19 | if err != nil {
20 | if !errors.Is(err, store.ErrNotFoundOrDirty) {
21 | return nil, status.Errorf(codes.Internal, "failed to check existence for config %q: %v", request.Name, err)
22 | }
23 | return nil, status.Errorf(codes.NotFound, "no config with name %q found", request.Name)
24 | }
25 |
26 | resp, err := store.ConvertToGRPC(cfg)
27 | if err != nil {
28 | return nil, status.Errorf(codes.Internal, "failed to convert database representation for config %q to grpc: %v", request.Name, err)
29 | }
30 |
31 | return &pb.GetConfigResponse{Config: resp}, nil
32 | }
33 |
--------------------------------------------------------------------------------
/services/manager/internal/service/handler_list_configs.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb"
7 | "github.com/berops/claudie/proto/pb/spec"
8 | "github.com/berops/claudie/services/manager/internal/store"
9 |
10 | "google.golang.org/grpc/codes"
11 | "google.golang.org/grpc/status"
12 | )
13 |
14 | func (g *GRPC) ListConfigs(ctx context.Context, _ *pb.ListConfigRequest) (*pb.ListConfigResponse, error) {
15 | cfgs, err := g.Store.ListConfigs(ctx, nil)
16 | if err != nil {
17 | return nil, status.Errorf(codes.Internal, "failed to query all configs: %v", err)
18 | }
19 |
20 | var out []*spec.Config
21 | for _, cfg := range cfgs {
22 | grpc, err := store.ConvertToGRPC(cfg)
23 | if err != nil {
24 | return nil, status.Errorf(codes.Internal, "failed to convert database representation for config %q to grpc: %v", cfg.Name, err)
25 | }
26 | out = append(out, grpc)
27 | }
28 |
29 | return &pb.ListConfigResponse{Configs: out}, nil
30 | }
31 |
--------------------------------------------------------------------------------
/services/manager/internal/service/handler_mark_for_deletion.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "context"
5 | "errors"
6 |
7 | "google.golang.org/grpc/codes"
8 | "google.golang.org/grpc/status"
9 |
10 | "github.com/berops/claudie/proto/pb"
11 | "github.com/berops/claudie/services/manager/internal/store"
12 | "github.com/rs/zerolog/log"
13 | )
14 |
15 | func (g *GRPC) MarkForDeletion(ctx context.Context, request *pb.MarkForDeletionRequest) (*pb.MarkForDeletionResponse, error) {
16 | log.Debug().Msgf("Marking config %q with version %v for deletion", request.Name, request.Version)
17 |
18 | if err := g.Store.MarkForDeletion(ctx, request.Name, request.Version); err != nil {
19 | if !errors.Is(err, store.ErrNotFoundOrDirty) {
20 | return nil, status.Errorf(codes.Internal, "failed to mark config %q with version %v for deletion: %s", request.Name, request.Version, err.Error())
21 | }
22 |
23 | if _, err := g.Store.GetConfig(ctx, request.Name); err != nil {
24 | if !errors.Is(err, store.ErrNotFoundOrDirty) {
25 | return nil, status.Errorf(codes.Internal, "failed to check existence of config %q: %v", request.Name, err)
26 | }
27 | return nil, status.Errorf(codes.NotFound, "no config with name %q exists", request.Name)
28 | }
29 |
30 | log.Warn().Msgf("Couldn't mark config %q with version %v for deletion, dirty write", request.Name, request.Version)
31 |
32 | return nil, status.Errorf(codes.Aborted, "config %q with version %v was not found", request.Name, request.Version)
33 | }
34 |
35 | log.Info().Msgf("Config %q with version %v successfully marked for deletion", request.Name, request.Version)
36 | return &pb.MarkForDeletionResponse{Name: request.Name, Version: request.Version}, nil
37 | }
38 |
--------------------------------------------------------------------------------
/services/manager/internal/service/metrics.go:
--------------------------------------------------------------------------------
1 | package service
2 |
3 | import (
4 | "github.com/prometheus/client_golang/prometheus"
5 | )
6 |
7 | var (
8 | TasksScheduled = prometheus.NewCounter(prometheus.CounterOpts{
9 | Name: "claudie_input_manifests_tasks_scheduled",
10 | Help: "Total number of tasks scheduled for builder service to work on",
11 | })
12 |
13 | TasksFinishedOk = prometheus.NewCounter(prometheus.CounterOpts{
14 | Name: "claudie_input_manifests_tasks_completed",
15 | Help: "Total number of tasks completed by the builder service",
16 | })
17 |
18 | TasksFinishedErr = prometheus.NewCounter(prometheus.CounterOpts{
19 | Name: "claudie_input_manifests_tasks_errored",
20 | Help: "Total number of tasks errored while processing by the builder service",
21 | })
22 | )
23 |
24 | func MustRegisterCounters() {
25 | prometheus.MustRegister(TasksScheduled)
26 | prometheus.MustRegister(TasksFinishedOk)
27 | prometheus.MustRegister(TasksFinishedErr)
28 | }
29 |
--------------------------------------------------------------------------------
/services/terraformer/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/ansibler
8 | services/kube-eleven
9 | services/builder
10 | services/testing-framework
11 | services/claudie-operator
12 | services/kuber
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/terraformer/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | ARG TARGETARCH
4 |
5 | # download and unzip kube-one binary
6 | RUN apt-get -qq update && apt-get -qq install unzip
7 | RUN VERSION=1.5.7 && \
8 | wget -q https://releases.hashicorp.com/terraform/${VERSION}/terraform_${VERSION}_linux_$TARGETARCH.zip && \
9 | unzip -qq terraform_${VERSION}_linux_$TARGETARCH.zip -d terraform
10 |
11 | #Unset the GOPATH
12 | ENV GOPATH=
13 |
14 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
15 | COPY go.mod .
16 | COPY go.sum .
17 |
18 | #Check if any modules need downloading
19 | RUN go mod download
20 |
21 | #Copy all files apart from the ones in .dockerignore
22 | COPY . .
23 |
24 | #Change the directory
25 | WORKDIR /go/services/terraformer/server
26 |
27 | #Compile the golang code to /out, CGO_ENABLE=0 removes cross compile dependencies
28 | RUN CGO_ENABLED=0 go build
29 |
30 | FROM docker.io/library/alpine:3.20
31 | #Add repository label
32 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
33 | #Add image name as a label
34 | LABEL org.opencontainers.image.base.name "docker.io/library/alpine"
35 | #Add description to the image
36 | LABEL org.opencontainers.image.description "Image for Terraformer from Claudie"
37 |
38 | #Copy the binaries to empty base image
39 | COPY --from=build /go/terraform/terraform /usr/local/bin
40 |
41 | COPY --from=build /go/services/terraformer/server/server /bin/services/terraformer/server/server
42 |
43 | COPY --from=build /usr/bin/git /usr/bin/git
44 |
45 | RUN apk --no-cache add bash git
46 |
47 | #Run server
48 | WORKDIR /bin
49 | ENTRYPOINT [ "./services/terraformer/server/server" ]
50 |
51 | #NOTE: We cannot use scratch image for our dockerfile since we are using shell commands to execute commands inside the code
52 |
--------------------------------------------------------------------------------
/services/terraformer/client/client.go:
--------------------------------------------------------------------------------
1 | package terraformer
2 |
3 | import (
4 | "context"
5 | "fmt"
6 |
7 | "github.com/berops/claudie/proto/pb"
8 | )
9 |
10 | // BuildInfrastructure uses TerraformServiceClient to build/deploy the infrastructure
11 | func BuildInfrastructure(c pb.TerraformerServiceClient, req *pb.BuildInfrastructureRequest) (*pb.BuildInfrastructureResponse, error) {
12 | res, err := c.BuildInfrastructure(context.Background(), req) //sending request to the server and receiving response
13 | if err != nil {
14 | return nil, fmt.Errorf("error while calling BuildInfrastructure on Terraformer: %w", err)
15 | }
16 | return res, nil
17 | }
18 |
19 | // DestroyInfrastructure uses TerraformServiceClient to destroy infrastructure
20 | func DestroyInfrastructure(c pb.TerraformerServiceClient, req *pb.DestroyInfrastructureRequest) (*pb.DestroyInfrastructureResponse, error) {
21 | res, err := c.DestroyInfrastructure(context.Background(), req)
22 | if err != nil {
23 | return res, fmt.Errorf("error while calling DestroyInfrastructure on Terraformer: %w", err)
24 | }
25 | return res, nil
26 | }
27 |
--------------------------------------------------------------------------------
/services/terraformer/server/adapters/inbound/grpc/terraformer_service.go:
--------------------------------------------------------------------------------
1 | package grpc
2 |
3 | import (
4 | "context"
5 |
6 | "github.com/berops/claudie/proto/pb"
7 | "github.com/berops/claudie/services/terraformer/server/domain/usecases"
8 | )
9 |
10 | type TerraformerGrpcService struct {
11 | pb.UnimplementedTerraformerServiceServer
12 |
13 | usecases *usecases.Usecases
14 | }
15 |
16 | func (t *TerraformerGrpcService) BuildInfrastructure(ctx context.Context, request *pb.BuildInfrastructureRequest) (*pb.BuildInfrastructureResponse, error) {
17 | return t.usecases.BuildInfrastructure(request)
18 | }
19 |
20 | func (t *TerraformerGrpcService) DestroyInfrastructure(ctx context.Context, request *pb.DestroyInfrastructureRequest) (*pb.DestroyInfrastructureResponse, error) {
21 | return t.usecases.DestroyInfrastructure(ctx, request)
22 | }
23 |
--------------------------------------------------------------------------------
/services/terraformer/server/adapters/outbound/aws_envs.go:
--------------------------------------------------------------------------------
1 | package outboundAdapters
2 |
3 | import (
4 | "errors"
5 |
6 | "github.com/berops/claudie/internal/envs"
7 | )
8 |
9 | var (
10 | awsRegion = envs.AwsRegion
11 | awsAccessKeyId = envs.AwsAccesskeyId
12 | awsSecretAccessKey = envs.AwsSecretAccessKey
13 | )
14 |
15 | var (
16 | // ErrKeyNotExists is returned when the key is not present in the object storage.
17 | ErrKeyNotExists = errors.New("key is not present in bucket")
18 | )
19 |
--------------------------------------------------------------------------------
/services/terraformer/server/adapters/outbound/immutable_endpoint_resolver.go:
--------------------------------------------------------------------------------
1 | package outboundAdapters
2 |
3 | import (
4 | "context"
5 | "github.com/aws/aws-sdk-go-v2/service/s3"
6 | "github.com/aws/smithy-go/endpoints"
7 | "net/url"
8 | )
9 |
10 | type immutableResolver struct{ endpoint string }
11 |
12 | func (i *immutableResolver) ResolveEndpoint(_ context.Context, params s3.EndpointParameters) (transport.Endpoint, error) {
13 | u, err := url.Parse(i.endpoint)
14 | if err != nil {
15 | return transport.Endpoint{}, err
16 | }
17 |
18 | u.Path += "/" + *params.Bucket
19 | return transport.Endpoint{URI: *u}, nil
20 | }
21 |
--------------------------------------------------------------------------------
/services/terraformer/server/domain/ports/dynamodb_port.go:
--------------------------------------------------------------------------------
1 | package ports
2 |
3 | import (
4 | "context"
5 | )
6 |
7 | type DynamoDBPort interface {
8 | // DeleteLockFile removes lock file for the terraform state file from dynamoDB.
9 | DeleteLockFile(ctx context.Context, projectName, clusterId string, keyFormat string) error
10 | }
11 |
--------------------------------------------------------------------------------
/services/terraformer/server/domain/ports/statestorage_port.go:
--------------------------------------------------------------------------------
1 | package ports
2 |
3 | import "context"
4 |
5 | type StateStoragePort interface {
6 | // DeleteStateFile removes terraform state file from MinIO.
7 | DeleteStateFile(ctx context.Context, projectName, clusterId string, keyFormat string) error
8 | // Stat checks whether the object exists.
9 | Stat(ctx context.Context, projectName, clusterId, keyFormat string) error
10 | // Healthcheck checks whether the storage bucket exists or not.
11 | Healthcheck() error
12 | }
13 |
--------------------------------------------------------------------------------
/services/terraformer/server/domain/usecases/usecases.go:
--------------------------------------------------------------------------------
1 | package usecases
2 |
3 | import (
4 | "github.com/berops/claudie/services/terraformer/server/domain/ports"
5 | "github.com/rs/zerolog"
6 |
7 | "golang.org/x/sync/semaphore"
8 | )
9 |
10 | const (
11 | // SpawnProcessLimit is the number of processes concurrently executing terraform.
12 | SpawnProcessLimit = 5
13 | )
14 |
15 | type Usecases struct {
16 | // DynamoDB connector.
17 | DynamoDB ports.DynamoDBPort
18 | // Minio connector.
19 | StateStorage ports.StateStoragePort
20 | // SpawnProcessLimit limits the number of spawned terraform processes.
21 | SpawnProcessLimit *semaphore.Weighted
22 | }
23 |
24 | type Cluster interface {
25 | // Build builds the cluster.
26 | Build(logger zerolog.Logger) error
27 | // Destroy destroys the cluster.
28 | Destroy(logger zerolog.Logger) error
29 | // Id returns a cluster ID for the cluster.
30 | Id() string
31 | // UpdateCurrentState sets the current state equal to the desired state.
32 | UpdateCurrentState()
33 | }
34 |
--------------------------------------------------------------------------------
/services/terraformer/server/domain/utils/cluster-builder/cluster_builder_test.go:
--------------------------------------------------------------------------------
1 | package cluster_builder
2 |
3 | import (
4 | "reflect"
5 | "testing"
6 |
7 | "github.com/berops/claudie/services/terraformer/server/domain/utils/templates"
8 | )
9 |
10 | func Test_readIPs(t *testing.T) {
11 | type args struct {
12 | data string
13 | }
14 | tests := []struct {
15 | name string
16 | args args
17 | want templates.NodepoolIPs
18 | wantErr bool
19 | }{
20 | {
21 | name: "test-01",
22 | args: args{
23 | data: "{\"test-cluster-compute1\":\"0.0.0.65\",\n\"test-cluster-compute2\":\"0.0.0.512\", \"test-cluster-control1\":\"0.0.0.72\",\n\"test-cluster-control2\":\"0.0.0.65\"}",
24 | },
25 | want: templates.NodepoolIPs{
26 | IPs: map[string]any{
27 | "test-cluster-compute1": "0.0.0.65",
28 | "test-cluster-compute2": "0.0.0.512",
29 | "test-cluster-control1": "0.0.0.72",
30 | "test-cluster-control2": "0.0.0.65",
31 | },
32 | },
33 | wantErr: false,
34 | },
35 | }
36 | for _, tt := range tests {
37 | t.Run(tt.name, func(t *testing.T) {
38 | got, err := readIPs(tt.args.data)
39 | if (err != nil) != tt.wantErr {
40 | t.Errorf("readIPs() error = %v, wantErr %v", err, tt.wantErr)
41 | return
42 | }
43 | if !reflect.DeepEqual(got, tt.want) {
44 | t.Errorf("readIPs() got = %v, want %v", got, tt.want)
45 | }
46 | })
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/services/terraformer/server/domain/utils/templates/backend.go:
--------------------------------------------------------------------------------
1 | package templates
2 |
3 | import (
4 | _ "embed"
5 | "fmt"
6 |
7 | "github.com/berops/claudie/internal/envs"
8 | "github.com/berops/claudie/internal/templateUtils"
9 | )
10 |
11 | //go:embed backend.tpl
12 | var backendTemplate string
13 |
14 | var (
15 | bucketName = envs.BucketName
16 | bucketURL = envs.BucketEndpoint
17 | dynamoTable = envs.DynamoTable
18 | dynamoURL = envs.DynamoEndpoint
19 | awsAccessKey = envs.AwsAccesskeyId
20 | awsSecretAccessKey = envs.AwsSecretAccessKey
21 | region = envs.AwsRegion
22 | )
23 |
24 | type Backend struct {
25 | ProjectName string
26 | ClusterName string
27 | Directory string
28 | }
29 |
30 | // CreateTFFile creates backend.tf file into specified Directory.
31 | func (b Backend) CreateTFFile() error {
32 | template := templateUtils.Templates{Directory: b.Directory}
33 |
34 | tpl, err := templateUtils.LoadTemplate(backendTemplate)
35 | if err != nil {
36 | return fmt.Errorf("failed to load template file external_backend.tpl for %s : %w", b.ClusterName, err)
37 | }
38 |
39 | data := struct {
40 | ProjectName string
41 | ClusterName string
42 | BucketURL string
43 | BucketName string
44 | DynamoURL string
45 | DynamoTable string
46 | Region string
47 | AccessKey string
48 | SecretKey string
49 | }{
50 | ProjectName: b.ProjectName,
51 | ClusterName: b.ClusterName,
52 | BucketURL: bucketURL,
53 | BucketName: bucketName,
54 | DynamoURL: dynamoURL,
55 | DynamoTable: dynamoTable,
56 | AccessKey: awsAccessKey,
57 | SecretKey: awsSecretAccessKey,
58 | Region: region,
59 | }
60 |
61 | if err := template.Generate(tpl, "backend.tf", data); err != nil {
62 | return fmt.Errorf("failed to generate backend files for %s : %w", b.ClusterName, err)
63 | }
64 |
65 | return nil
66 | }
67 |
--------------------------------------------------------------------------------
/services/terraformer/server/domain/utils/templates/backend.tpl:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | key = "{{ .ProjectName }}/{{ .ClusterName }}"
4 | region = "{{or .Region "main" }}"
5 | bucket = "{{ .BucketName }}"
6 | dynamodb_table = "{{ .DynamoTable }}"
7 |
8 | access_key = "{{ .AccessKey }}"
9 | secret_key = "{{ .SecretKey }}"
10 |
11 | {{if .BucketURL }}endpoint = "{{ .BucketURL }}"{{ end }}
12 | {{if .DynamoURL }}dynamodb_endpoint = "{{ .DynamoURL }}"{{end}}
13 |
14 | skip_credentials_validation = true
15 | skip_metadata_api_check = true
16 | skip_region_validation = true
17 | force_path_style = true
18 | }
19 | }
--------------------------------------------------------------------------------
/services/terraformer/server/domain/utils/templates/providers.tpl:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | {{- if .Hetzner }}
4 | hcloud = {
5 | source = "hetznercloud/hcloud"
6 | version = "1.38.1"
7 | }
8 | {{- end }}
9 | {{- if .Gcp }}
10 | google = {
11 | source = "hashicorp/google"
12 | version = "4.59.0"
13 | }
14 | {{- end }}
15 | {{- if .Aws }}
16 | aws = {
17 | source = "hashicorp/aws"
18 | version = "4.61.0"
19 | }
20 | {{- end }}
21 | {{- if .Oci }}
22 | oci = {
23 | source = "oracle/oci"
24 | version = "4.114.0"
25 | }
26 | {{- end }}
27 | {{- if .Azure }}
28 | azurerm = {
29 | source = "hashicorp/azurerm"
30 | version = "3.50.0"
31 | }
32 | {{- end }}
33 | {{- if .Cloudflare }}
34 | cloudflare = {
35 | source = "cloudflare/cloudflare"
36 | version = "4.2.0"
37 | }
38 | {{- end }}
39 | {{- if .HetznerDNS }}
40 | hetznerdns = {
41 | source = "timohirt/hetznerdns"
42 | version = "2.2.0"
43 | }
44 | {{- end }}
45 | {{- if .GenesisCloud }}
46 | genesiscloud = {
47 | source = "genesiscloud/genesiscloud"
48 | version = "1.1.12"
49 | }
50 | {{- end }}
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/services/testing-framework/.dockerignore:
--------------------------------------------------------------------------------
1 | *.md
2 | .gitignore
3 | Makefile
4 | LICENSE
5 | *.yaml
6 | .git
7 | services/terraformer
8 | services/kube-eleven
9 | services/builder
10 | services/ansibler
11 | services/claudie-operator
12 | services/kuber
13 | services/autoscaler-adapter
14 | .github
15 | .golangci.yml
16 | .gitattributes
17 | manifests
18 | docs
19 |
--------------------------------------------------------------------------------
/services/testing-framework/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM docker.io/library/golang:1.24.1 AS build
2 |
3 | ARG TARGETARCH
4 |
5 | #Install kubectl
6 | RUN KC_VERSION=v1.24.0 && \
7 | wget -q https://storage.googleapis.com/kubernetes-release/release/${KC_VERSION}/bin/linux/$TARGETARCH/kubectl
8 |
9 | #Unset the GOPATH
10 | ENV GOPATH=
11 |
12 | #First, copy go.mod and go.sum to prevent uneccesary download of modules
13 | COPY go.mod .
14 | COPY go.sum .
15 |
16 | #Check if any modules need downloading
17 | RUN go mod download
18 |
19 | #Copy all files apart from the ones in .dockerignore
20 | COPY . .
21 |
22 | #Change the directory
23 | WORKDIR /go/services/testing-framework
24 | RUN CGO_ENABLED=0 go test -c -run TestClaudie
25 |
26 | FROM docker.io/library/alpine:3.20
27 | #Add repository label
28 | LABEL org.opencontainers.image.source "https://github.com/berops/claudie"
29 | #Add image name as a label
30 | LABEL org.opencontainers.image.base.name "docker.io/library/alpine"
31 | #Add description to the image
32 | LABEL org.opencontainers.image.description "Image for Testing-framework from Claudie"
33 |
34 | COPY --from=build /go/kubectl /usr/local/bin/kubectl
35 | COPY --from=build /go/services/testing-framework/testing-framework.test /go/services/testing-framework/testing-framework.test
36 |
37 | RUN chmod +x /usr/local/bin/kubectl && \
38 | apk add -q bash
39 |
40 | #Run server
41 | WORKDIR /go/services/testing-framework
42 | ENTRYPOINT [ "./testing-framework.test", "-test.run", "TestClaudie", "-test.timeout=25000s", "-test.v", "./..." ]
43 |
--------------------------------------------------------------------------------