├── .dockerignore ├── .gitignore ├── Dockerfile ├── Dockerfile.osd-daemon ├── Makefile ├── PROJECT ├── README.md ├── agent ├── Dockerfile ├── requirements.txt └── vitastor-agent.py ├── api └── v1 │ ├── groupversion_info.go │ ├── vitastorcluster_types.go │ ├── vitastornode_types.go │ ├── vitastorosd_types.go │ ├── vitastorpool_types.go │ └── zz_generated.deepcopy.go ├── config ├── crd │ ├── bases │ │ ├── control.vitastor.io_vitastorclusters.yaml │ │ ├── control.vitastor.io_vitastornodes.yaml │ │ ├── control.vitastor.io_vitastorosds.yaml │ │ ├── control.vitastor.io_vitastorpools.yaml │ │ └── pool.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_vitastorclusters.yaml │ │ ├── cainjection_in_vitastornodes.yaml │ │ ├── cainjection_in_vitastorosds.yaml │ │ ├── cainjection_in_vitastorpools.yaml │ │ ├── webhook_in_vitastorclusters.yaml │ │ ├── webhook_in_vitastornodes.yaml │ │ ├── webhook_in_vitastorosds.yaml │ │ └── webhook_in_vitastorpools.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ ├── role_binding.yaml │ ├── service_account.yaml │ ├── vitastorcluster_editor_role.yaml │ ├── vitastorcluster_viewer_role.yaml │ ├── vitastornode_editor_role.yaml │ ├── vitastornode_viewer_role.yaml │ ├── vitastorosd_editor_role.yaml │ ├── vitastorosd_viewer_role.yaml │ ├── vitastorpool_editor_role.yaml │ └── vitastorpool_viewer_role.yaml └── samples │ ├── control_v1_vitastorcluster.yaml │ ├── control_v1_vitastornode.yaml │ ├── control_v1_vitastorosd.yaml │ └── control_v1_vitastorpool.yaml ├── controllers ├── suite_test.go ├── vitastorcluster_controller.go ├── vitastornode_controller.go ├── vitastorosd_controller.go └── vitastorpool_controller.go ├── deploy ├── 000-vitastor-csi-namespace.yaml ├── 001-vitastor-configmap-osd.yaml ├── 002-csi.yaml ├── 003-vitastor-crd.yaml ├── 004-vitastor-operator-deployment.yaml ├── 005-sample-vitastor-cluster.yaml ├── 006-sample-vitastor-pool.yaml ├── 007-test-pvc.yaml └── 008-test-pod.yaml ├── dist └── Operator.yaml ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt └── main.go /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore build and test binaries. 3 | bin/ 4 | testbin/ 5 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | Dockerfile.cross 11 | 12 | # Test binary, build with `go test -c` 13 | *.test 14 | 15 | # Output of the go coverage tool, specifically when used with LiteIDE 16 | *.out 17 | 18 | # Kubernetes Generated files - skip generated files, except for vendored files 19 | 20 | !vendor/**/zz_generated.* 21 | 22 | # editor and IDE paraphernalia 23 | .idea 24 | *.swp 25 | *.swo 26 | *~ 27 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.19 as builder 3 | ARG TARGETOS 4 | ARG TARGETARCH 5 | 6 | WORKDIR /workspace 7 | # Copy the Go Modules manifests 8 | COPY go.mod go.mod 9 | COPY go.sum go.sum 10 | # cache deps before building and copying source so that we don't need to re-download as much 11 | # and so that source changes don't invalidate our downloaded layer 12 | RUN go mod download 13 | 14 | # Copy the go source 15 | COPY main.go main.go 16 | COPY api/ api/ 17 | COPY controllers/ controllers/ 18 | 19 | # Build 20 | # the GOARCH has not a default value to allow the binary be built according to the host where the command 21 | # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO 22 | # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, 23 | # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. 24 | RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go 25 | 26 | # Use distroless as minimal base image to package the manager binary 27 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 28 | FROM gcr.io/distroless/static:nonroot 29 | WORKDIR / 30 | COPY --from=builder /workspace/manager . 31 | USER 65532:65532 32 | 33 | ENTRYPOINT ["/manager"] 34 | -------------------------------------------------------------------------------- /Dockerfile.osd-daemon: -------------------------------------------------------------------------------- 1 | FROM debian:bullseye 2 | 3 | RUN apt-get update && apt-get install -y wget gnupg && \ 4 | wget -q -O - https://vitastor.io/debian/pubkey | apt-key add - && \ 5 | echo 'deb https://vitastor.io/debian bullseye main' >> /etc/apt/sources.list && \ 6 | apt-get update && apt-get install -y vitastor lp-solve && apt-get clean 7 | 8 | ENTRYPOINT ["vitastor-disk", "exec-osd"] -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | # Image URL to use all building/pushing image targets 3 | IMG ?= controller:latest 4 | # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. 5 | ENVTEST_K8S_VERSION = 1.25.0 6 | 7 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 8 | ifeq (,$(shell go env GOBIN)) 9 | GOBIN=$(shell go env GOPATH)/bin 10 | else 11 | GOBIN=$(shell go env GOBIN) 12 | endif 13 | 14 | # Setting SHELL to bash allows bash commands to be executed by recipes. 15 | # Options are set to exit when a recipe line exits non-zero or a piped command fails. 16 | SHELL = /usr/bin/env bash -o pipefail 17 | .SHELLFLAGS = -ec 18 | 19 | .PHONY: all 20 | all: build 21 | 22 | ##@ General 23 | 24 | # The help target prints out all targets with their descriptions organized 25 | # beneath their categories. The categories are represented by '##@' and the 26 | # target descriptions by '##'. The awk commands is responsible for reading the 27 | # entire set of makefiles included in this invocation, looking for lines of the 28 | # file as xyz: ## something, and then pretty-format the target and help. Then, 29 | # if there's a line with ##@ something, that gets pretty-printed as a category. 30 | # More info on the usage of ANSI control characters for terminal formatting: 31 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 32 | # More info on the awk command: 33 | # http://linuxcommand.org/lc3_adv_awk.php 34 | 35 | .PHONY: help 36 | help: ## Display this help. 37 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 38 | 39 | ##@ Development 40 | 41 | .PHONY: manifests 42 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 43 | $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases 44 | 45 | .PHONY: generate 46 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 47 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./..." 48 | 49 | .PHONY: fmt 50 | fmt: ## Run go fmt against code. 51 | go fmt ./... 52 | 53 | .PHONY: vet 54 | vet: ## Run go vet against code. 55 | go vet ./... 56 | 57 | .PHONY: test 58 | test: manifests generate fmt vet envtest ## Run tests. 59 | KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out 60 | 61 | ##@ Build 62 | 63 | .PHONY: build 64 | build: generate fmt vet ## Build manager binary. 65 | go build -o bin/manager main.go 66 | 67 | .PHONY: run 68 | run: manifests generate fmt vet ## Run a controller from your host. 69 | go run ./main.go 70 | 71 | # If you wish built the manager image targeting other platforms you can use the --platform flag. 72 | # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. 73 | # More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 74 | .PHONY: docker-build 75 | docker-build: test ## Build docker image with the manager. 76 | docker build -t ${IMG} . 77 | 78 | .PHONY: docker-push 79 | docker-push: ## Push docker image with the manager. 80 | docker push ${IMG} 81 | 82 | # PLATFORMS defines the target platforms for the manager image be build to provide support to multiple 83 | # architectures. (i.e. make docker-buildx IMG=myregistry/mypoperator:0.0.1). To use this option you need to: 84 | # - able to use docker buildx . More info: https://docs.docker.com/build/buildx/ 85 | # - have enable BuildKit, More info: https://docs.docker.com/develop/develop-images/build_enhancements/ 86 | # - be able to push the image for your registry (i.e. if you do not inform a valid value via IMG=> than the export will fail) 87 | # To properly provided solutions that supports more than one platform you should use this option. 88 | PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x,linux/ppc64le 89 | .PHONY: docker-buildx 90 | docker-buildx: test ## Build and push docker image for the manager for cross-platform support 91 | # copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile 92 | sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' Dockerfile > Dockerfile.cross 93 | - docker buildx create --name project-v3-builder 94 | docker buildx use project-v3-builder 95 | - docker buildx build --push --platform=$(PLATFORMS) --tag ${IMG} -f Dockerfile.cross 96 | - docker buildx rm project-v3-builder 97 | rm Dockerfile.cross 98 | 99 | ##@ Deployment 100 | 101 | ifndef ignore-not-found 102 | ignore-not-found = false 103 | endif 104 | 105 | .PHONY: install 106 | install: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 107 | $(KUSTOMIZE) build config/crd | kubectl apply -f - 108 | 109 | .PHONY: uninstall 110 | uninstall: manifests kustomize ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 111 | $(KUSTOMIZE) build config/crd | kubectl delete --ignore-not-found=$(ignore-not-found) -f - 112 | 113 | .PHONY: deploy 114 | deploy: manifests kustomize ## Deploy controller to the K8s cluster specified in ~/.kube/config. 115 | cd config/manager && $(KUSTOMIZE) edit set image controller=${IMG} 116 | $(KUSTOMIZE) build config/default | kubectl apply -f - 117 | 118 | .PHONY: undeploy 119 | undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. Call with ignore-not-found=true to ignore resource not found errors during deletion. 120 | $(KUSTOMIZE) build config/default | kubectl delete --ignore-not-found=$(ignore-not-found) -f - 121 | 122 | ##@ Build Dependencies 123 | 124 | ## Location to install dependencies to 125 | LOCALBIN ?= $(shell pwd)/bin 126 | $(LOCALBIN): 127 | mkdir -p $(LOCALBIN) 128 | 129 | ## Tool Binaries 130 | KUSTOMIZE ?= $(LOCALBIN)/kustomize 131 | CONTROLLER_GEN ?= $(LOCALBIN)/controller-gen 132 | ENVTEST ?= $(LOCALBIN)/setup-envtest 133 | 134 | ## Tool Versions 135 | KUSTOMIZE_VERSION ?= v3.8.7 136 | CONTROLLER_TOOLS_VERSION ?= v0.14.0 137 | 138 | KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" 139 | .PHONY: kustomize 140 | kustomize: $(KUSTOMIZE) ## Download kustomize locally if necessary. 141 | $(KUSTOMIZE): $(LOCALBIN) 142 | test -s $(LOCALBIN)/kustomize || { curl -Ss $(KUSTOMIZE_INSTALL_SCRIPT) | bash -s -- $(subst v,,$(KUSTOMIZE_VERSION)) $(LOCALBIN); } 143 | 144 | .PHONY: controller-gen 145 | controller-gen: $(CONTROLLER_GEN) ## Download controller-gen locally if necessary. 146 | $(CONTROLLER_GEN): $(LOCALBIN) 147 | test -s $(LOCALBIN)/controller-gen || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-tools/cmd/controller-gen@$(CONTROLLER_TOOLS_VERSION) 148 | 149 | .PHONY: envtest 150 | envtest: $(ENVTEST) ## Download envtest-setup locally if necessary. 151 | $(ENVTEST): $(LOCALBIN) 152 | test -s $(LOCALBIN)/setup-envtest || GOBIN=$(LOCALBIN) go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest 153 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: vitastor.io 2 | layout: 3 | - go.kubebuilder.io/v3 4 | projectName: vitastor-operator 5 | repo: gitlab.com/Antilles7227/vitastor-operator 6 | resources: 7 | - api: 8 | crdVersion: v1 9 | namespaced: true 10 | controller: true 11 | domain: vitastor.io 12 | group: control 13 | kind: VitastorNode 14 | path: gitlab.com/Antilles7227/vitastor-operator/api/v1 15 | version: v1 16 | - api: 17 | crdVersion: v1 18 | namespaced: true 19 | controller: true 20 | domain: vitastor.io 21 | group: control 22 | kind: VitastorOSD 23 | path: gitlab.com/Antilles7227/vitastor-operator/api/v1 24 | version: v1 25 | - api: 26 | crdVersion: v1 27 | namespaced: true 28 | controller: true 29 | domain: vitastor.io 30 | group: control 31 | kind: VitastorCluster 32 | path: gitlab.com/Antilles7227/vitastor-operator/api/v1 33 | version: v1 34 | - api: 35 | crdVersion: v1 36 | namespaced: true 37 | controller: true 38 | domain: vitastor.io 39 | group: control 40 | kind: VitastorPool 41 | path: gitlab.com/Antilles7227/vitastor-operator/api/v1 42 | version: v1 43 | version: "3" 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # vitastor-operator 2 | Kubernetes operator for Vitastor - software-defined block storage. 3 | 4 | ## Description 5 | Vitastor is a distributed block SDS, direct replacement of Ceph RBD and internal SDS's of public clouds. 6 | 7 | Vitastor is architecturally similar to Ceph which means strong consistency, primary-replication, symmetric clustering and automatic data distribution over any number of drives of any size with configurable redundancy (replication or erasure codes/XOR). 8 | 9 | Vitastor targets primarily SSD and SSD+HDD clusters with at least 10 Gbit/s network, supports TCP and RDMA and may achieve 4 KB read and write latency as low as ~0.1 ms with proper hardware which is ~10 times faster than other popular SDS's like Ceph or internal systems of public clouds. 10 | 11 | Operator are built with Kubebuilder - a framework for building Kubernetes APIs with CRDs. 12 | 13 | ## Getting Started 14 | You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster. 15 | **Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). 16 | 17 | Also you need an etcd cluster for Vitastor with some additional tuning: 18 | * max-txn-ops should be at least 100000 19 | * I recommend to increase quota-backend-bytes (especially for large clusters), in some cases you can reach quota limit and it will stop etcd cluster at all. 20 | 21 | You can use baremetal etcd cluster or use [Etcd Operator](https://github.com/aenix-io/etcd-operator) to keep all your Vitastor stuff inside your cluster. 22 | 23 | ### Running on the cluster 24 | 1. Create namespace for vitastor and ConfigMap with vitastor.conf: 25 | 26 | ```sh 27 | kubectl apply -f deploy/000-vitastor-csi-namespace.yaml 28 | kubectl apply -f deploy/001-vitastor-configmap-osd.yaml 29 | ``` 30 | 31 | 2. Install CSI driver pods and CSI provisioner: 32 | 33 | ```sh 34 | kubectl apply -f deploy/002-csi.yaml 35 | ``` 36 | 37 | 3. Install Instances of Custom Resources: 38 | 39 | ```sh 40 | kubectl apply -f deploy/003-vitastor-crd.yaml 41 | ``` 42 | 43 | 4. Deploy the controller to the cluster (fix image name/tag if you want to use self-hosted images) 44 | 45 | ```sh 46 | kubectl apply -f deploy/004-vitastor-operator-deployment.yaml 47 | ``` 48 | 49 | 5. Check cluster manifest (e.g. to set proper number of monitors, set label and image names if need so) and apply it 50 | 51 | ```sh 52 | kubectl apply -f deploy/005-sample-vitastor-cluster.yaml 53 | ``` 54 | 55 | 6. Apply labels for nodes with disks (so operator can deploy agents to it and see empty disks on them) 56 | 57 | ```sh 58 | kubectl label nodes vitastor-node=true 59 | ``` 60 | 61 | 7. Prepare your disks for OSD using Agent API (right now it can be done through `curl`, in future there will be kubectl plugin for operational things). 62 | 63 | **Note:** If you are using NVMe disk bigger than 2TiB, it's recommended to use more than one OSD per disk (for better IO utilization). It not, you may omit field `osd_num` in `curl` request. 64 | 65 | ```sh 66 | # Use IPs from output to prepare disks 67 | kubectl get pods -n vitastor-system -o custom-columns=NAME:.metadata.name,IP:.status.podIP | grep vitastor-agent 68 | # Make request for every disk you want to use for Vitastor 69 | curl -XPOST -H 'Content-Type: application/json' -d '{"disk": "", "osd_num": }' http://:8000/disk/prepare 70 | ``` 71 | 72 | 8. Create Pool for PVs. You may change some options of that pool, e.g. redundancy scheme (`xor`, `replicated` or `ec`), number of PGs, failure domain etc. Check [Vitastor docs](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/config/pool.en.md) for more info (not all options supported right now) 73 | 74 | ```sh 75 | kubectl apply deploy/006-sample-vitastor-pool.yaml 76 | ``` 77 | 78 | That's it! Now you can use created pool as StorageClass for your PVCs 79 | 80 | 81 | 9. Optional: By default, operator create additional placement level `dc` (Datacenter). 82 | You can label your nodes with `fd.vitastor.io/: ` and operator will link node to that failue domain (it will be useful if you use [complex failure domain](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/config/pool.en.md#level_placement) ) 83 | ```sh 84 | kubectl label node fd.vitastor.io/dc: "DCNAME" 85 | ``` 86 | 87 | ## TODO 88 | 89 | * kubectl plugin for operator&vitastor maintenance 90 | * More options for cluster tuning 91 | * Automatic decomission and replacing disks 92 | 93 | ## Contacts 94 | 95 | If you have any questions about that Operator and/or Vitastor itself, feel free to join out Telegram chat: [@vitastor](https://t.me/vitastor) -------------------------------------------------------------------------------- /agent/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.10-bullseye 2 | 3 | WORKDIR /app 4 | RUN wget -q -O - https://vitastor.io/debian/pubkey | apt-key add - && \ 5 | echo 'deb https://vitastor.io/debian bullseye main' >> /etc/apt/sources.list && \ 6 | apt update && apt install -y vitastor lp-solve && apt clean 7 | 8 | COPY . /app 9 | RUN pip install -r requirements.txt 10 | 11 | CMD [ "uvicorn", "--host", "0.0.0.0", "vitastor-agent:app" ] -------------------------------------------------------------------------------- /agent/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi==0.79.0 2 | uvicorn==0.18.2 -------------------------------------------------------------------------------- /agent/vitastor-agent.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Tuple, Optional 3 | import logging 4 | import os 5 | import sys 6 | import asyncio 7 | from fastapi import FastAPI, HTTPException 8 | from pydantic import BaseModel 9 | import json 10 | import re 11 | 12 | class Partition(BaseModel): 13 | partuuid: Optional[str] 14 | name: str 15 | fstype: Optional[str] 16 | 17 | 18 | class Disk(BaseModel): 19 | name: str 20 | type: str 21 | children: Optional[list[Partition]] 22 | 23 | 24 | class LsblkOutput(BaseModel): 25 | blockdevices: list[Disk] 26 | 27 | 28 | class VitastorParameters(BaseModel): 29 | data_device: str 30 | bitmap_granularity: int 31 | block_size: int 32 | osd_num: int 33 | disable_data_fsync: bool 34 | disable_device_lock: bool 35 | immediate_commit: str 36 | disk_alignment: int 37 | journal_block_size: int 38 | meta_block_size: int 39 | journal_no_same_sector_overwrites: bool 40 | journal_sector_buffer_count: int 41 | 42 | class OSDPrepareParameters(BaseModel): 43 | disk: str 44 | osd_num: Optional[int] 45 | 46 | 47 | app = FastAPI() 48 | 49 | 50 | async def exec_shell(script: str) -> Tuple[int, str, str]: 51 | proc = await asyncio.create_subprocess_shell(script, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) 52 | stdout, stderr = await proc.communicate() 53 | if stdout: 54 | res_stdout = stdout.decode() 55 | if stderr: 56 | res_stderr = stderr.decode() 57 | return proc.returncode, res_stdout, res_stderr 58 | 59 | async def get_system_disks(disk_mask: str = "") -> list[Disk]: 60 | system_disks: list[Disk] = [] 61 | result: LsblkOutput 62 | proc = await asyncio.create_subprocess_shell(f"lsblk -p -J -o NAME,PARTUUID,FSTYPE,TYPE {disk_mask}", 63 | stdout=asyncio.subprocess.PIPE, 64 | stderr=asyncio.subprocess.PIPE) 65 | stdout, stderr = await proc.communicate() 66 | if stdout: 67 | print(stdout.decode()) 68 | result = LsblkOutput.parse_raw(stdout.decode()) 69 | for block in result.blockdevices: 70 | if block.type != "disk": 71 | continue 72 | system_disks.append(block) 73 | return system_disks 74 | 75 | @app.on_event('startup') 76 | async def app_startup(): 77 | pass 78 | 79 | 80 | @app.on_event('shutdown') 81 | async def app_shutdown(): 82 | pass 83 | 84 | @app.get("/disk") 85 | async def get_disks() -> list[Disk]: 86 | result = await get_system_disks() 87 | return result 88 | 89 | 90 | @app.get("/disk/empty") 91 | async def get_empty_disks() -> list[Disk]: 92 | all_disks = await get_system_disks() 93 | empty_disks: list[Disk] = [] 94 | for disk in all_disks: 95 | if not disk.children and (not "nbd" in disk.name): 96 | empty_disks.append(disk) 97 | return empty_disks 98 | 99 | @app.get("/disk/osd") 100 | async def get_osd_disks() -> Optional[list[VitastorParameters]]: 101 | osd_info: list[VitastorParameters] = [] 102 | osd_list = os.listdir("/dev/disk/by-partuuid") 103 | if osd_list == 0: 104 | return None 105 | for osd in osd_list: 106 | osd_info_script = f"vitastor-disk read-sb '/dev/disk/by-partuuid/{osd}'" 107 | print(osd_info_script) 108 | osd_info_proc = await asyncio.create_subprocess_shell(osd_info_script, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) 109 | stdout, stderr = await osd_info_proc.communicate() 110 | if osd_info_proc.returncode != 0: 111 | logging.error(f"Error during getting info, looks like it's not OSD partition, error: {stderr.decode()}") 112 | continue 113 | print(f"OSD info: {stdout.decode()}") 114 | osd_info.append(VitastorParameters.parse_raw(stdout.decode())) 115 | return osd_info 116 | 117 | 118 | @app.post("/disk/prepare") 119 | async def prepare_disk_for_vitastor(device: OSDPrepareParameters) -> Optional[list[VitastorParameters]]: 120 | aligned_disks = await get_system_disks(device.disk) 121 | if aligned_disks[0].children is not None: 122 | raise HTTPException(status_code=409, detail="That disk already partitioned and can't be prepared automatically. Please check it manually") 123 | vitastor_parameters: list[VitastorParameters] = [] 124 | vitastor_prepare_script = f"vitastor-disk prepare {device.disk}" 125 | if device.osd_num: 126 | vitastor_prepare_script = f"vitastor-disk prepare {device.disk} --osd_per_disk {device.osd_num}" 127 | 128 | vitastor_prepare_proc = await asyncio.create_subprocess_shell(vitastor_prepare_script, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) 129 | stdout, stderr = await vitastor_prepare_proc.communicate() 130 | if vitastor_prepare_proc.returncode != 0: 131 | logging.error("Something happen during preparing disk for OSD") 132 | raise HTTPException(status_code=500, detail="Error during preparing") 133 | disk_list = await get_system_disks(device.disk) 134 | print(disk_list) 135 | for d in disk_list[0].children: 136 | print(d) 137 | print(d.name) 138 | osd_info_script = f"vitastor-disk read-sb {d.name}" 139 | osd_info_proc = await asyncio.create_subprocess_shell(osd_info_script, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) 140 | stdout, stderr = await osd_info_proc.communicate() 141 | if osd_info_proc.returncode != 0: 142 | logging.error("Something happen during gathering OSD info") 143 | logging.error(stderr.decode()) 144 | continue 145 | vitastor_parameters.append(VitastorParameters.parse_raw(stdout.decode())) 146 | return vitastor_parameters -------------------------------------------------------------------------------- /api/v1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1 contains API Schema definitions for the control v1 API group 18 | // +kubebuilder:object:generate=true 19 | // +groupName=control.vitastor.io 20 | package v1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "control.vitastor.io", Version: "v1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /api/v1/vitastorcluster_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 24 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 25 | 26 | // VitastorClusterSpec defines the desired state of VitastorCluster 27 | type VitastorClusterSpec struct { 28 | // Number of replicas for monitors 29 | MonitorReplicaNum int `json:"monitorReplicaNum"` 30 | // Node label for Agent DaemonSet nodeSelector 31 | VitastorNodeLabel string `json:"vitastorNodeLabel"` 32 | // Reconcile period in seconds 33 | DisksReconciligPeriod int `json:"disksReconciligPeriod"` 34 | // Agent image name/tag 35 | AgentImage string `json:"agentImage"` 36 | // Monitor image name/tag 37 | MonitorImage string `json:"monitorImage"` 38 | // OSD image name/tag 39 | OSDImage string `json:"osdImage"` 40 | } 41 | 42 | // VitastorClusterStatus defines the observed state of VitastorCluster 43 | type VitastorClusterStatus struct { 44 | } 45 | 46 | // +kubebuilder:object:root=true 47 | // +kubebuilder:subresource:status 48 | // +kubebuilder:resource:scope=Cluster 49 | // VitastorCluster is the Schema for the vitastorclusters API 50 | type VitastorCluster struct { 51 | metav1.TypeMeta `json:",inline"` 52 | metav1.ObjectMeta `json:"metadata,omitempty"` 53 | 54 | Spec VitastorClusterSpec `json:"spec,omitempty"` 55 | Status VitastorClusterStatus `json:"status,omitempty"` 56 | } 57 | 58 | //+kubebuilder:object:root=true 59 | 60 | // VitastorClusterList contains a list of VitastorCluster 61 | type VitastorClusterList struct { 62 | metav1.TypeMeta `json:",inline"` 63 | metav1.ListMeta `json:"metadata,omitempty"` 64 | Items []VitastorCluster `json:"items"` 65 | } 66 | 67 | func init() { 68 | SchemeBuilder.Register(&VitastorCluster{}, &VitastorClusterList{}) 69 | } 70 | -------------------------------------------------------------------------------- /api/v1/vitastornode_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | //corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 25 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 26 | 27 | // VitastorNodeSpec defines the desired state of VitastorNode 28 | type VitastorNodeSpec struct { 29 | // Name of node that have disks for OSDs 30 | NodeName string `json:"nodeName"` 31 | // OSD image name/tag 32 | OSDImage string `json:"osdImage"` 33 | } 34 | 35 | // VitastorNodeStatus defines the observed state of VitastorNode 36 | type VitastorNodeStatus struct { 37 | // List of disks on that node 38 | // +optional 39 | Disks []string `json:"disks"` 40 | 41 | // List of empty disks (without any partition) on that node 42 | // +optional 43 | EmptyDisks []string `json:"emptyDisks"` 44 | 45 | // List of Vitastor OSDs on that node 46 | // +optional 47 | VitastorDisks []string `json:"vitastorDisks"` 48 | } 49 | 50 | //+kubebuilder:object:root=true 51 | //+kubebuilder:subresource:status 52 | //+kubebuilder:resource:scope=Cluster 53 | 54 | // VitastorNode is the Schema for the vitastornodes API 55 | type VitastorNode struct { 56 | metav1.TypeMeta `json:",inline"` 57 | metav1.ObjectMeta `json:"metadata,omitempty"` 58 | 59 | Spec VitastorNodeSpec `json:"spec,omitempty"` 60 | Status VitastorNodeStatus `json:"status,omitempty"` 61 | } 62 | 63 | //+kubebuilder:object:root=true 64 | 65 | // VitastorNodeList contains a list of VitastorNode 66 | type VitastorNodeList struct { 67 | metav1.TypeMeta `json:",inline"` 68 | metav1.ListMeta `json:"metadata,omitempty"` 69 | Items []VitastorNode `json:"items"` 70 | } 71 | 72 | func init() { 73 | SchemeBuilder.Register(&VitastorNode{}, &VitastorNodeList{}) 74 | } 75 | -------------------------------------------------------------------------------- /api/v1/vitastorosd_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | //corev1 "k8s.io/api/core/v1" 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 25 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 26 | 27 | // VitastorOSDSpec defines the desired state of VitastorOSD 28 | type VitastorOSDSpec struct { 29 | // Name of node 30 | NodeName string `json:"nodeName"` 31 | // Path to OSD disk (i.e. /dev/disk/by-partuuid/<...>) 32 | OSDPath string `json:"osdPath"` 33 | // Number allocated to OSD 34 | OSDNumber int `json:"osdNumber"` 35 | // OSD container image 36 | OSDImage string `json:"osdImage"` 37 | // // Tags that applied to OSD (divided by comma, i.e. "hostN,nvme,dcN") 38 | // +optional 39 | OSDTags string `json:"osdTags"` 40 | } 41 | 42 | // VitastorOSDStatus defines the observed state of VitastorOSD 43 | type VitastorOSDStatus struct { 44 | } 45 | 46 | //+kubebuilder:object:root=true 47 | //+kubebuilder:subresource:status 48 | //+kubebuilder:resource:scope=Cluster 49 | 50 | // VitastorOSD is the Schema for the vitastorosds API 51 | type VitastorOSD struct { 52 | metav1.TypeMeta `json:",inline"` 53 | metav1.ObjectMeta `json:"metadata,omitempty"` 54 | 55 | Spec VitastorOSDSpec `json:"spec,omitempty"` 56 | Status VitastorOSDStatus `json:"status,omitempty"` 57 | } 58 | 59 | //+kubebuilder:object:root=true 60 | 61 | // VitastorOSDList contains a list of VitastorOSD 62 | type VitastorOSDList struct { 63 | metav1.TypeMeta `json:",inline"` 64 | metav1.ListMeta `json:"metadata,omitempty"` 65 | Items []VitastorOSD `json:"items"` 66 | } 67 | 68 | func init() { 69 | SchemeBuilder.Register(&VitastorOSD{}, &VitastorOSDList{}) 70 | } 71 | -------------------------------------------------------------------------------- /api/v1/vitastorpool_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1 18 | 19 | import ( 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 24 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 25 | 26 | // VitastorPoolSpec defines the desired state of VitastorPool 27 | type VitastorPoolSpec struct { 28 | // Foo is an example field of VitastorPool. Edit vitastorpool_types.go to remove/update 29 | ID string `json:"id"` 30 | Scheme string `json:"scheme"` 31 | PGSize int32 `json:"pgSize"` 32 | ParityChunks int32 `json:"parityChunks,omitempty"` 33 | PGMinSize int32 `json:"pgMinSize"` 34 | PGCount int32 `json:"pgCount"` 35 | FailureDomain string `json:"failureDomain"` 36 | MaxOSDCombinations int32 `json:"maxOSDCombinations,omitempty"` 37 | BlockSize int32 `json:"blockSize,omitempty"` 38 | ImmediateCommit string `json:"immediateCommit,omitempty"` 39 | OSDTags string `json:"osdTags,omitempty"` 40 | } 41 | 42 | // VitastorPoolStatus defines the observed state of VitastorPool 43 | type VitastorPoolStatus struct { 44 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 45 | // Important: Run "make" to regenerate code after modifying this file 46 | } 47 | 48 | //+kubebuilder:object:root=true 49 | //+kubebuilder:subresource:status 50 | //+kubebuilder:resourse:scope=Cluster 51 | 52 | // VitastorPool is the Schema for the vitastorpools API 53 | type VitastorPool struct { 54 | metav1.TypeMeta `json:",inline"` 55 | metav1.ObjectMeta `json:"metadata,omitempty"` 56 | 57 | Spec VitastorPoolSpec `json:"spec,omitempty"` 58 | Status VitastorPoolStatus `json:"status,omitempty"` 59 | } 60 | 61 | //+kubebuilder:object:root=true 62 | 63 | // VitastorPoolList contains a list of VitastorPool 64 | type VitastorPoolList struct { 65 | metav1.TypeMeta `json:",inline"` 66 | metav1.ListMeta `json:"metadata,omitempty"` 67 | Items []VitastorPool `json:"items"` 68 | } 69 | 70 | func init() { 71 | SchemeBuilder.Register(&VitastorPool{}, &VitastorPoolList{}) 72 | } 73 | -------------------------------------------------------------------------------- /api/v1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | //go:build !ignore_autogenerated 2 | 3 | /* 4 | Copyright 2022. 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | // Code generated by controller-gen. DO NOT EDIT. 20 | 21 | package v1 22 | 23 | import ( 24 | runtime "k8s.io/apimachinery/pkg/runtime" 25 | ) 26 | 27 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 28 | func (in *VitastorCluster) DeepCopyInto(out *VitastorCluster) { 29 | *out = *in 30 | out.TypeMeta = in.TypeMeta 31 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 32 | out.Spec = in.Spec 33 | out.Status = in.Status 34 | } 35 | 36 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorCluster. 37 | func (in *VitastorCluster) DeepCopy() *VitastorCluster { 38 | if in == nil { 39 | return nil 40 | } 41 | out := new(VitastorCluster) 42 | in.DeepCopyInto(out) 43 | return out 44 | } 45 | 46 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 47 | func (in *VitastorCluster) DeepCopyObject() runtime.Object { 48 | if c := in.DeepCopy(); c != nil { 49 | return c 50 | } 51 | return nil 52 | } 53 | 54 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 55 | func (in *VitastorClusterList) DeepCopyInto(out *VitastorClusterList) { 56 | *out = *in 57 | out.TypeMeta = in.TypeMeta 58 | in.ListMeta.DeepCopyInto(&out.ListMeta) 59 | if in.Items != nil { 60 | in, out := &in.Items, &out.Items 61 | *out = make([]VitastorCluster, len(*in)) 62 | for i := range *in { 63 | (*in)[i].DeepCopyInto(&(*out)[i]) 64 | } 65 | } 66 | } 67 | 68 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorClusterList. 69 | func (in *VitastorClusterList) DeepCopy() *VitastorClusterList { 70 | if in == nil { 71 | return nil 72 | } 73 | out := new(VitastorClusterList) 74 | in.DeepCopyInto(out) 75 | return out 76 | } 77 | 78 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 79 | func (in *VitastorClusterList) DeepCopyObject() runtime.Object { 80 | if c := in.DeepCopy(); c != nil { 81 | return c 82 | } 83 | return nil 84 | } 85 | 86 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 87 | func (in *VitastorClusterSpec) DeepCopyInto(out *VitastorClusterSpec) { 88 | *out = *in 89 | } 90 | 91 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorClusterSpec. 92 | func (in *VitastorClusterSpec) DeepCopy() *VitastorClusterSpec { 93 | if in == nil { 94 | return nil 95 | } 96 | out := new(VitastorClusterSpec) 97 | in.DeepCopyInto(out) 98 | return out 99 | } 100 | 101 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 102 | func (in *VitastorClusterStatus) DeepCopyInto(out *VitastorClusterStatus) { 103 | *out = *in 104 | } 105 | 106 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorClusterStatus. 107 | func (in *VitastorClusterStatus) DeepCopy() *VitastorClusterStatus { 108 | if in == nil { 109 | return nil 110 | } 111 | out := new(VitastorClusterStatus) 112 | in.DeepCopyInto(out) 113 | return out 114 | } 115 | 116 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 117 | func (in *VitastorNode) DeepCopyInto(out *VitastorNode) { 118 | *out = *in 119 | out.TypeMeta = in.TypeMeta 120 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 121 | out.Spec = in.Spec 122 | in.Status.DeepCopyInto(&out.Status) 123 | } 124 | 125 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorNode. 126 | func (in *VitastorNode) DeepCopy() *VitastorNode { 127 | if in == nil { 128 | return nil 129 | } 130 | out := new(VitastorNode) 131 | in.DeepCopyInto(out) 132 | return out 133 | } 134 | 135 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 136 | func (in *VitastorNode) DeepCopyObject() runtime.Object { 137 | if c := in.DeepCopy(); c != nil { 138 | return c 139 | } 140 | return nil 141 | } 142 | 143 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 144 | func (in *VitastorNodeList) DeepCopyInto(out *VitastorNodeList) { 145 | *out = *in 146 | out.TypeMeta = in.TypeMeta 147 | in.ListMeta.DeepCopyInto(&out.ListMeta) 148 | if in.Items != nil { 149 | in, out := &in.Items, &out.Items 150 | *out = make([]VitastorNode, len(*in)) 151 | for i := range *in { 152 | (*in)[i].DeepCopyInto(&(*out)[i]) 153 | } 154 | } 155 | } 156 | 157 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorNodeList. 158 | func (in *VitastorNodeList) DeepCopy() *VitastorNodeList { 159 | if in == nil { 160 | return nil 161 | } 162 | out := new(VitastorNodeList) 163 | in.DeepCopyInto(out) 164 | return out 165 | } 166 | 167 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 168 | func (in *VitastorNodeList) DeepCopyObject() runtime.Object { 169 | if c := in.DeepCopy(); c != nil { 170 | return c 171 | } 172 | return nil 173 | } 174 | 175 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 176 | func (in *VitastorNodeSpec) DeepCopyInto(out *VitastorNodeSpec) { 177 | *out = *in 178 | } 179 | 180 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorNodeSpec. 181 | func (in *VitastorNodeSpec) DeepCopy() *VitastorNodeSpec { 182 | if in == nil { 183 | return nil 184 | } 185 | out := new(VitastorNodeSpec) 186 | in.DeepCopyInto(out) 187 | return out 188 | } 189 | 190 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 191 | func (in *VitastorNodeStatus) DeepCopyInto(out *VitastorNodeStatus) { 192 | *out = *in 193 | if in.Disks != nil { 194 | in, out := &in.Disks, &out.Disks 195 | *out = make([]string, len(*in)) 196 | copy(*out, *in) 197 | } 198 | if in.EmptyDisks != nil { 199 | in, out := &in.EmptyDisks, &out.EmptyDisks 200 | *out = make([]string, len(*in)) 201 | copy(*out, *in) 202 | } 203 | if in.VitastorDisks != nil { 204 | in, out := &in.VitastorDisks, &out.VitastorDisks 205 | *out = make([]string, len(*in)) 206 | copy(*out, *in) 207 | } 208 | } 209 | 210 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorNodeStatus. 211 | func (in *VitastorNodeStatus) DeepCopy() *VitastorNodeStatus { 212 | if in == nil { 213 | return nil 214 | } 215 | out := new(VitastorNodeStatus) 216 | in.DeepCopyInto(out) 217 | return out 218 | } 219 | 220 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 221 | func (in *VitastorOSD) DeepCopyInto(out *VitastorOSD) { 222 | *out = *in 223 | out.TypeMeta = in.TypeMeta 224 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 225 | out.Spec = in.Spec 226 | out.Status = in.Status 227 | } 228 | 229 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorOSD. 230 | func (in *VitastorOSD) DeepCopy() *VitastorOSD { 231 | if in == nil { 232 | return nil 233 | } 234 | out := new(VitastorOSD) 235 | in.DeepCopyInto(out) 236 | return out 237 | } 238 | 239 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 240 | func (in *VitastorOSD) DeepCopyObject() runtime.Object { 241 | if c := in.DeepCopy(); c != nil { 242 | return c 243 | } 244 | return nil 245 | } 246 | 247 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 248 | func (in *VitastorOSDList) DeepCopyInto(out *VitastorOSDList) { 249 | *out = *in 250 | out.TypeMeta = in.TypeMeta 251 | in.ListMeta.DeepCopyInto(&out.ListMeta) 252 | if in.Items != nil { 253 | in, out := &in.Items, &out.Items 254 | *out = make([]VitastorOSD, len(*in)) 255 | for i := range *in { 256 | (*in)[i].DeepCopyInto(&(*out)[i]) 257 | } 258 | } 259 | } 260 | 261 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorOSDList. 262 | func (in *VitastorOSDList) DeepCopy() *VitastorOSDList { 263 | if in == nil { 264 | return nil 265 | } 266 | out := new(VitastorOSDList) 267 | in.DeepCopyInto(out) 268 | return out 269 | } 270 | 271 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 272 | func (in *VitastorOSDList) DeepCopyObject() runtime.Object { 273 | if c := in.DeepCopy(); c != nil { 274 | return c 275 | } 276 | return nil 277 | } 278 | 279 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 280 | func (in *VitastorOSDSpec) DeepCopyInto(out *VitastorOSDSpec) { 281 | *out = *in 282 | } 283 | 284 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorOSDSpec. 285 | func (in *VitastorOSDSpec) DeepCopy() *VitastorOSDSpec { 286 | if in == nil { 287 | return nil 288 | } 289 | out := new(VitastorOSDSpec) 290 | in.DeepCopyInto(out) 291 | return out 292 | } 293 | 294 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 295 | func (in *VitastorOSDStatus) DeepCopyInto(out *VitastorOSDStatus) { 296 | *out = *in 297 | } 298 | 299 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorOSDStatus. 300 | func (in *VitastorOSDStatus) DeepCopy() *VitastorOSDStatus { 301 | if in == nil { 302 | return nil 303 | } 304 | out := new(VitastorOSDStatus) 305 | in.DeepCopyInto(out) 306 | return out 307 | } 308 | 309 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 310 | func (in *VitastorPool) DeepCopyInto(out *VitastorPool) { 311 | *out = *in 312 | out.TypeMeta = in.TypeMeta 313 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 314 | out.Spec = in.Spec 315 | out.Status = in.Status 316 | } 317 | 318 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorPool. 319 | func (in *VitastorPool) DeepCopy() *VitastorPool { 320 | if in == nil { 321 | return nil 322 | } 323 | out := new(VitastorPool) 324 | in.DeepCopyInto(out) 325 | return out 326 | } 327 | 328 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 329 | func (in *VitastorPool) DeepCopyObject() runtime.Object { 330 | if c := in.DeepCopy(); c != nil { 331 | return c 332 | } 333 | return nil 334 | } 335 | 336 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 337 | func (in *VitastorPoolList) DeepCopyInto(out *VitastorPoolList) { 338 | *out = *in 339 | out.TypeMeta = in.TypeMeta 340 | in.ListMeta.DeepCopyInto(&out.ListMeta) 341 | if in.Items != nil { 342 | in, out := &in.Items, &out.Items 343 | *out = make([]VitastorPool, len(*in)) 344 | for i := range *in { 345 | (*in)[i].DeepCopyInto(&(*out)[i]) 346 | } 347 | } 348 | } 349 | 350 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorPoolList. 351 | func (in *VitastorPoolList) DeepCopy() *VitastorPoolList { 352 | if in == nil { 353 | return nil 354 | } 355 | out := new(VitastorPoolList) 356 | in.DeepCopyInto(out) 357 | return out 358 | } 359 | 360 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 361 | func (in *VitastorPoolList) DeepCopyObject() runtime.Object { 362 | if c := in.DeepCopy(); c != nil { 363 | return c 364 | } 365 | return nil 366 | } 367 | 368 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 369 | func (in *VitastorPoolSpec) DeepCopyInto(out *VitastorPoolSpec) { 370 | *out = *in 371 | } 372 | 373 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorPoolSpec. 374 | func (in *VitastorPoolSpec) DeepCopy() *VitastorPoolSpec { 375 | if in == nil { 376 | return nil 377 | } 378 | out := new(VitastorPoolSpec) 379 | in.DeepCopyInto(out) 380 | return out 381 | } 382 | 383 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 384 | func (in *VitastorPoolStatus) DeepCopyInto(out *VitastorPoolStatus) { 385 | *out = *in 386 | } 387 | 388 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VitastorPoolStatus. 389 | func (in *VitastorPoolStatus) DeepCopy() *VitastorPoolStatus { 390 | if in == nil { 391 | return nil 392 | } 393 | out := new(VitastorPoolStatus) 394 | in.DeepCopyInto(out) 395 | return out 396 | } 397 | -------------------------------------------------------------------------------- /config/crd/bases/control.vitastor.io_vitastorclusters.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.14.0 7 | name: vitastorclusters.control.vitastor.io 8 | spec: 9 | group: control.vitastor.io 10 | names: 11 | kind: VitastorCluster 12 | listKind: VitastorClusterList 13 | plural: vitastorclusters 14 | singular: vitastorcluster 15 | scope: Cluster 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: VitastorCluster is the Schema for the vitastorclusters API 21 | properties: 22 | apiVersion: 23 | description: |- 24 | APIVersion defines the versioned schema of this representation of an object. 25 | Servers should convert recognized schemas to the latest internal value, and 26 | may reject unrecognized values. 27 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 28 | type: string 29 | kind: 30 | description: |- 31 | Kind is a string value representing the REST resource this object represents. 32 | Servers may infer this from the endpoint the client submits requests to. 33 | Cannot be updated. 34 | In CamelCase. 35 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 36 | type: string 37 | metadata: 38 | type: object 39 | spec: 40 | description: VitastorClusterSpec defines the desired state of VitastorCluster 41 | properties: 42 | agentImage: 43 | description: Agent image name/tag 44 | type: string 45 | disksReconciligPeriod: 46 | description: Reconcile period in seconds 47 | type: integer 48 | monitorImage: 49 | description: Monitor image name/tag 50 | type: string 51 | monitorReplicaNum: 52 | description: Number of replicas for monitors 53 | type: integer 54 | osdImage: 55 | description: OSD image name/tag 56 | type: string 57 | vitastorNodeLabel: 58 | description: Node label for Agent DaemonSet nodeSelector 59 | type: string 60 | required: 61 | - agentImage 62 | - disksReconciligPeriod 63 | - monitorImage 64 | - monitorReplicaNum 65 | - osdImage 66 | - vitastorNodeLabel 67 | type: object 68 | status: 69 | description: VitastorClusterStatus defines the observed state of VitastorCluster 70 | type: object 71 | type: object 72 | served: true 73 | storage: true 74 | subresources: 75 | status: {} 76 | -------------------------------------------------------------------------------- /config/crd/bases/control.vitastor.io_vitastornodes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.14.0 7 | name: vitastornodes.control.vitastor.io 8 | spec: 9 | group: control.vitastor.io 10 | names: 11 | kind: VitastorNode 12 | listKind: VitastorNodeList 13 | plural: vitastornodes 14 | singular: vitastornode 15 | scope: Cluster 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: VitastorNode is the Schema for the vitastornodes API 21 | properties: 22 | apiVersion: 23 | description: |- 24 | APIVersion defines the versioned schema of this representation of an object. 25 | Servers should convert recognized schemas to the latest internal value, and 26 | may reject unrecognized values. 27 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 28 | type: string 29 | kind: 30 | description: |- 31 | Kind is a string value representing the REST resource this object represents. 32 | Servers may infer this from the endpoint the client submits requests to. 33 | Cannot be updated. 34 | In CamelCase. 35 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 36 | type: string 37 | metadata: 38 | type: object 39 | spec: 40 | description: VitastorNodeSpec defines the desired state of VitastorNode 41 | properties: 42 | nodeName: 43 | description: Name of node that have disks for OSDs 44 | type: string 45 | osdImage: 46 | description: OSD image name/tag 47 | type: string 48 | required: 49 | - nodeName 50 | - osdImage 51 | type: object 52 | status: 53 | description: VitastorNodeStatus defines the observed state of VitastorNode 54 | properties: 55 | disks: 56 | description: List of disks on that node 57 | items: 58 | type: string 59 | type: array 60 | emptyDisks: 61 | description: List of empty disks (without any partition) on that node 62 | items: 63 | type: string 64 | type: array 65 | vitastorDisks: 66 | description: List of Vitastor OSDs on that node 67 | items: 68 | type: string 69 | type: array 70 | type: object 71 | type: object 72 | served: true 73 | storage: true 74 | subresources: 75 | status: {} 76 | -------------------------------------------------------------------------------- /config/crd/bases/control.vitastor.io_vitastorosds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.14.0 7 | name: vitastorosds.control.vitastor.io 8 | spec: 9 | group: control.vitastor.io 10 | names: 11 | kind: VitastorOSD 12 | listKind: VitastorOSDList 13 | plural: vitastorosds 14 | singular: vitastorosd 15 | scope: Cluster 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: VitastorOSD is the Schema for the vitastorosds API 21 | properties: 22 | apiVersion: 23 | description: |- 24 | APIVersion defines the versioned schema of this representation of an object. 25 | Servers should convert recognized schemas to the latest internal value, and 26 | may reject unrecognized values. 27 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 28 | type: string 29 | kind: 30 | description: |- 31 | Kind is a string value representing the REST resource this object represents. 32 | Servers may infer this from the endpoint the client submits requests to. 33 | Cannot be updated. 34 | In CamelCase. 35 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 36 | type: string 37 | metadata: 38 | type: object 39 | spec: 40 | description: VitastorOSDSpec defines the desired state of VitastorOSD 41 | properties: 42 | nodeName: 43 | description: Name of node 44 | type: string 45 | osdImage: 46 | description: OSD container image 47 | type: string 48 | osdNumber: 49 | description: Number allocated to OSD 50 | type: integer 51 | osdPath: 52 | description: Path to OSD disk (i.e. /dev/disk/by-partuuid/<...>) 53 | type: string 54 | osdTags: 55 | description: // Tags that applied to OSD (divided by comma, i.e. "hostN,nvme,dcN") 56 | type: string 57 | required: 58 | - nodeName 59 | - osdImage 60 | - osdNumber 61 | - osdPath 62 | type: object 63 | status: 64 | description: VitastorOSDStatus defines the observed state of VitastorOSD 65 | type: object 66 | type: object 67 | served: true 68 | storage: true 69 | subresources: 70 | status: {} 71 | -------------------------------------------------------------------------------- /config/crd/bases/control.vitastor.io_vitastorpools.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.14.0 7 | name: vitastorpools.control.vitastor.io 8 | spec: 9 | group: control.vitastor.io 10 | names: 11 | kind: VitastorPool 12 | listKind: VitastorPoolList 13 | plural: vitastorpools 14 | singular: vitastorpool 15 | scope: Namespaced 16 | versions: 17 | - name: v1 18 | schema: 19 | openAPIV3Schema: 20 | description: VitastorPool is the Schema for the vitastorpools API 21 | properties: 22 | apiVersion: 23 | description: |- 24 | APIVersion defines the versioned schema of this representation of an object. 25 | Servers should convert recognized schemas to the latest internal value, and 26 | may reject unrecognized values. 27 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources 28 | type: string 29 | kind: 30 | description: |- 31 | Kind is a string value representing the REST resource this object represents. 32 | Servers may infer this from the endpoint the client submits requests to. 33 | Cannot be updated. 34 | In CamelCase. 35 | More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds 36 | type: string 37 | metadata: 38 | type: object 39 | spec: 40 | description: VitastorPoolSpec defines the desired state of VitastorPool 41 | properties: 42 | blockSize: 43 | format: int32 44 | type: integer 45 | failureDomain: 46 | type: string 47 | id: 48 | description: Foo is an example field of VitastorPool. Edit vitastorpool_types.go 49 | to remove/update 50 | type: string 51 | immediateCommit: 52 | type: string 53 | maxOSDCombinations: 54 | format: int32 55 | type: integer 56 | osdTags: 57 | type: string 58 | parityChunks: 59 | format: int32 60 | type: integer 61 | pgCount: 62 | format: int32 63 | type: integer 64 | pgMinSize: 65 | format: int32 66 | type: integer 67 | pgSize: 68 | format: int32 69 | type: integer 70 | scheme: 71 | type: string 72 | required: 73 | - failureDomain 74 | - id 75 | - pgCount 76 | - pgMinSize 77 | - pgSize 78 | - scheme 79 | type: object 80 | status: 81 | description: VitastorPoolStatus defines the observed state of VitastorPool 82 | type: object 83 | type: object 84 | served: true 85 | storage: true 86 | subresources: 87 | status: {} 88 | -------------------------------------------------------------------------------- /config/crd/bases/pool.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.9.2 7 | creationTimestamp: null 8 | name: vitastorpools.control.vitastor.io 9 | spec: 10 | group: control.vitastor.io 11 | names: 12 | kind: VitastorPool 13 | listKind: VitastorPoolList 14 | plural: vitastorpools 15 | singular: vitastorpool 16 | scope: Namespaced 17 | versions: 18 | - name: v1 19 | schema: 20 | openAPIV3Schema: 21 | description: VitastorPool is the Schema for the vitastorpools API 22 | properties: 23 | apiVersion: 24 | description: 'APIVersion defines the versioned schema of this representation 25 | of an object. Servers should convert recognized schemas to the latest 26 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 27 | type: string 28 | kind: 29 | description: 'Kind is a string value representing the REST resource this 30 | object represents. Servers may infer this from the endpoint the client 31 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 32 | type: string 33 | metadata: 34 | type: object 35 | spec: 36 | description: VitastorPoolSpec defines the desired state of VitastorPool 37 | properties: 38 | blockSize: 39 | format: int32 40 | type: integer 41 | failureDomain: 42 | type: string 43 | id: 44 | description: Foo is an example field of VitastorPool. Edit vitastorpool_types.go 45 | to remove/update 46 | type: string 47 | immediateCommit: 48 | type: string 49 | maxOSDCombinations: 50 | format: int32 51 | type: integer 52 | osdTags: 53 | type: string 54 | parityChunks: 55 | format: int32 56 | type: integer 57 | pgCount: 58 | format: int32 59 | type: integer 60 | pgMinSize: 61 | format: int32 62 | type: integer 63 | pgSize: 64 | format: int32 65 | type: integer 66 | scheme: 67 | type: string 68 | required: 69 | - failureDomain 70 | - id 71 | - pgCount 72 | - pgMinSize 73 | - pgSize 74 | - scheme 75 | type: object 76 | status: 77 | description: VitastorPoolStatus defines the observed state of VitastorPool 78 | type: object 79 | type: object 80 | served: true 81 | storage: true 82 | subresources: 83 | status: {} 84 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/control.vitastor.io_vitastornodes.yaml 6 | - bases/control.vitastor.io_vitastorosds.yaml 7 | - bases/control.vitastor.io_vitastorclusters.yaml 8 | - bases/control.vitastor.io_vitastorpools.yaml 9 | #+kubebuilder:scaffold:crdkustomizeresource 10 | 11 | patchesStrategicMerge: 12 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 13 | # patches here are for enabling the conversion webhook for each CRD 14 | #- patches/webhook_in_vitastornodes.yaml 15 | #- patches/webhook_in_vitastorosds.yaml 16 | #- patches/webhook_in_vitastorclusters.yaml 17 | #- patches/webhook_in_vitastorpools.yaml 18 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 19 | 20 | # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. 21 | # patches here are for enabling the CA injection for each CRD 22 | #- patches/cainjection_in_vitastornodes.yaml 23 | #- patches/cainjection_in_vitastorosds.yaml 24 | #- patches/cainjection_in_vitastorclusters.yaml 25 | #- patches/cainjection_in_vitastorpools.yaml 26 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 27 | 28 | # the following config is for teaching kustomize how to do kustomization for CRDs. 29 | configurations: 30 | - kustomizeconfig.yaml 31 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_vitastorclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: vitastorclusters.control.vitastor.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_vitastornodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: vitastornodes.control.vitastor.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_vitastorosds.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: vitastorosds.control.vitastor.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_vitastorpools.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: vitastorpools.control.vitastor.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_vitastorclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: vitastorclusters.control.vitastor.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_vitastornodes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: vitastornodes.control.vitastor.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_vitastorosds.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: vitastorosds.control.vitastor.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_vitastorpools.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: vitastorpools.control.vitastor.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | conversionReviewVersions: 16 | - v1 17 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: vitastor-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: vitastor-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | - manager_auth_proxy_patch.yaml 32 | 33 | 34 | 35 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 36 | # crd/kustomization.yaml 37 | #- manager_webhook_patch.yaml 38 | 39 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 40 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 41 | # 'CERTMANAGER' needs to be enabled to use ca injection 42 | #- webhookcainjection_patch.yaml 43 | 44 | # the following config is for teaching kustomize how to do var substitution 45 | vars: 46 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 47 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 48 | # objref: 49 | # kind: Certificate 50 | # group: cert-manager.io 51 | # version: v1 52 | # name: serving-cert # this name should match the one in certificate.yaml 53 | # fieldref: 54 | # fieldpath: metadata.namespace 55 | #- name: CERTIFICATE_NAME 56 | # objref: 57 | # kind: Certificate 58 | # group: cert-manager.io 59 | # version: v1 60 | # name: serving-cert # this name should match the one in certificate.yaml 61 | #- name: SERVICE_NAMESPACE # namespace of the service 62 | # objref: 63 | # kind: Service 64 | # version: v1 65 | # name: webhook-service 66 | # fieldref: 67 | # fieldpath: metadata.namespace 68 | #- name: SERVICE_NAME 69 | # objref: 70 | # kind: Service 71 | # version: v1 72 | # name: webhook-service 73 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | affinity: 12 | nodeAffinity: 13 | requiredDuringSchedulingIgnoredDuringExecution: 14 | nodeSelectorTerms: 15 | - matchExpressions: 16 | - key: kubernetes.io/arch 17 | operator: In 18 | values: 19 | - amd64 20 | - arm64 21 | - ppc64le 22 | - s390x 23 | - key: kubernetes.io/os 24 | operator: In 25 | values: 26 | - linux 27 | containers: 28 | - name: kube-rbac-proxy 29 | securityContext: 30 | allowPrivilegeEscalation: false 31 | capabilities: 32 | drop: 33 | - "ALL" 34 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.0 35 | args: 36 | - "--secure-listen-address=0.0.0.0:8443" 37 | - "--upstream=http://127.0.0.1:8080/" 38 | - "--logtostderr=true" 39 | - "--v=0" 40 | ports: 41 | - containerPort: 8443 42 | protocol: TCP 43 | name: https 44 | resources: 45 | limits: 46 | cpu: 500m 47 | memory: 128Mi 48 | requests: 49 | cpu: 5m 50 | memory: 64Mi 51 | - name: manager 52 | args: 53 | - "--health-probe-bind-address=:8081" 54 | - "--metrics-bind-address=127.0.0.1:8080" 55 | - "--leader-elect" 56 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: namespace 7 | app.kubernetes.io/instance: system 8 | app.kubernetes.io/component: manager 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: system 13 | --- 14 | apiVersion: apps/v1 15 | kind: Deployment 16 | metadata: 17 | name: controller-manager 18 | namespace: system 19 | labels: 20 | control-plane: controller-manager 21 | app.kubernetes.io/name: deployment 22 | app.kubernetes.io/instance: controller-manager 23 | app.kubernetes.io/component: manager 24 | app.kubernetes.io/created-by: vitastor-operator 25 | app.kubernetes.io/part-of: vitastor-operator 26 | app.kubernetes.io/managed-by: kustomize 27 | spec: 28 | selector: 29 | matchLabels: 30 | control-plane: controller-manager 31 | replicas: 1 32 | template: 33 | metadata: 34 | annotations: 35 | kubectl.kubernetes.io/default-container: manager 36 | labels: 37 | control-plane: controller-manager 38 | spec: 39 | # TODO(user): Uncomment the following code to configure the nodeAffinity expression 40 | # according to the platforms which are supported by your solution. 41 | # It is considered best practice to support multiple architectures. You can 42 | # build your manager image using the makefile target docker-buildx. 43 | # affinity: 44 | # nodeAffinity: 45 | # requiredDuringSchedulingIgnoredDuringExecution: 46 | # nodeSelectorTerms: 47 | # - matchExpressions: 48 | # - key: kubernetes.io/arch 49 | # operator: In 50 | # values: 51 | # - amd64 52 | # - arm64 53 | # - ppc64le 54 | # - s390x 55 | # - key: kubernetes.io/os 56 | # operator: In 57 | # values: 58 | # - linux 59 | securityContext: 60 | runAsNonRoot: true 61 | # TODO(user): For common cases that do not require escalating privileges 62 | # it is recommended to ensure that all your Pods/Containers are restrictive. 63 | # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted 64 | # Please uncomment the following code if your project does NOT have to work on old Kubernetes 65 | # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). 66 | # seccompProfile: 67 | # type: RuntimeDefault 68 | containers: 69 | - command: 70 | - /manager 71 | args: 72 | - --leader-elect 73 | image: controller:latest 74 | name: manager 75 | securityContext: 76 | allowPrivilegeEscalation: false 77 | capabilities: 78 | drop: 79 | - "ALL" 80 | livenessProbe: 81 | httpGet: 82 | path: /healthz 83 | port: 8081 84 | initialDelaySeconds: 15 85 | periodSeconds: 20 86 | readinessProbe: 87 | httpGet: 88 | path: /readyz 89 | port: 8081 90 | initialDelaySeconds: 5 91 | periodSeconds: 10 92 | # TODO(user): Configure the resources accordingly based on the project requirements. 93 | # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ 94 | resources: 95 | limits: 96 | cpu: 500m 97 | memory: 128Mi 98 | requests: 99 | cpu: 10m 100 | memory: 64Mi 101 | serviceAccountName: controller-manager 102 | terminationGracePeriodSeconds: 10 103 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | app.kubernetes.io/name: servicemonitor 9 | app.kubernetes.io/instance: controller-manager-metrics-monitor 10 | app.kubernetes.io/component: metrics 11 | app.kubernetes.io/created-by: vitastor-operator 12 | app.kubernetes.io/part-of: vitastor-operator 13 | app.kubernetes.io/managed-by: kustomize 14 | name: controller-manager-metrics-monitor 15 | namespace: system 16 | spec: 17 | endpoints: 18 | - path: /metrics 19 | port: https 20 | scheme: https 21 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 22 | tlsConfig: 23 | insecureSkipVerify: true 24 | selector: 25 | matchLabels: 26 | control-plane: controller-manager 27 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: metrics-reader 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: vitastor-operator 9 | app.kubernetes.io/part-of: vitastor-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: metrics-reader 12 | rules: 13 | - nonResourceURLs: 14 | - "/metrics" 15 | verbs: 16 | - get 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrole 6 | app.kubernetes.io/instance: proxy-role 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: vitastor-operator 9 | app.kubernetes.io/part-of: vitastor-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-role 12 | rules: 13 | - apiGroups: 14 | - authentication.k8s.io 15 | resources: 16 | - tokenreviews 17 | verbs: 18 | - create 19 | - apiGroups: 20 | - authorization.k8s.io 21 | resources: 22 | - subjectaccessreviews 23 | verbs: 24 | - create 25 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: proxy-rolebinding 7 | app.kubernetes.io/component: kube-rbac-proxy 8 | app.kubernetes.io/created-by: vitastor-operator 9 | app.kubernetes.io/part-of: vitastor-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: proxy-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: proxy-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | app.kubernetes.io/name: service 7 | app.kubernetes.io/instance: controller-manager-metrics-service 8 | app.kubernetes.io/component: kube-rbac-proxy 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: controller-manager-metrics-service 13 | namespace: system 14 | spec: 15 | ports: 16 | - name: https 17 | port: 8443 18 | protocol: TCP 19 | targetPort: https 20 | selector: 21 | control-plane: controller-manager 22 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | # All RBAC will be applied under this service account in 3 | # the deployment namespace. You may comment out this resource 4 | # if your manager will use a service account that exists at 5 | # runtime. Be sure to update RoleBinding and ClusterRoleBinding 6 | # subjects if changing service account names. 7 | - service_account.yaml 8 | - role.yaml 9 | - role_binding.yaml 10 | - leader_election_role.yaml 11 | - leader_election_role_binding.yaml 12 | # Comment the following 4 lines if you want to disable 13 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 14 | # which protects your /metrics endpoint. 15 | - auth_proxy_service.yaml 16 | - auth_proxy_role.yaml 17 | - auth_proxy_role_binding.yaml 18 | - auth_proxy_client_clusterrole.yaml 19 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: role 7 | app.kubernetes.io/instance: leader-election-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: leader-election-role 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - configmaps 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - create 23 | - update 24 | - patch 25 | - delete 26 | - apiGroups: 27 | - coordination.k8s.io 28 | resources: 29 | - leases 30 | verbs: 31 | - get 32 | - list 33 | - watch 34 | - create 35 | - update 36 | - patch 37 | - delete 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - events 42 | verbs: 43 | - create 44 | - patch 45 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: rolebinding 6 | app.kubernetes.io/instance: leader-election-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: vitastor-operator 9 | app.kubernetes.io/part-of: vitastor-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: leader-election-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: Role 15 | name: leader-election-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - apps 9 | resources: 10 | - daemonsets 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - apps 21 | resources: 22 | - daemonsets/status 23 | verbs: 24 | - get 25 | - apiGroups: 26 | - apps 27 | resources: 28 | - deployments 29 | verbs: 30 | - create 31 | - delete 32 | - get 33 | - list 34 | - patch 35 | - update 36 | - watch 37 | - apiGroups: 38 | - apps 39 | resources: 40 | - deployments/status 41 | verbs: 42 | - get 43 | - apiGroups: 44 | - apps 45 | resources: 46 | - statefulsets 47 | verbs: 48 | - create 49 | - delete 50 | - get 51 | - list 52 | - patch 53 | - update 54 | - watch 55 | - apiGroups: 56 | - apps 57 | resources: 58 | - statefulsets/status 59 | verbs: 60 | - get 61 | - apiGroups: 62 | - control.vitastor.io 63 | resources: 64 | - vitastorclusters 65 | verbs: 66 | - create 67 | - delete 68 | - get 69 | - list 70 | - patch 71 | - update 72 | - watch 73 | - apiGroups: 74 | - control.vitastor.io 75 | resources: 76 | - vitastorclusters/finalizers 77 | verbs: 78 | - update 79 | - apiGroups: 80 | - control.vitastor.io 81 | resources: 82 | - vitastorclusters/status 83 | verbs: 84 | - get 85 | - patch 86 | - update 87 | - apiGroups: 88 | - control.vitastor.io 89 | resources: 90 | - vitastornodes 91 | verbs: 92 | - create 93 | - delete 94 | - get 95 | - list 96 | - patch 97 | - update 98 | - watch 99 | - apiGroups: 100 | - control.vitastor.io 101 | resources: 102 | - vitastornodes/finalizers 103 | verbs: 104 | - update 105 | - apiGroups: 106 | - control.vitastor.io 107 | resources: 108 | - vitastornodes/status 109 | verbs: 110 | - get 111 | - patch 112 | - update 113 | - apiGroups: 114 | - control.vitastor.io 115 | resources: 116 | - vitastorosds 117 | verbs: 118 | - create 119 | - delete 120 | - get 121 | - list 122 | - patch 123 | - update 124 | - watch 125 | - apiGroups: 126 | - control.vitastor.io 127 | resources: 128 | - vitastorosds/finalizers 129 | verbs: 130 | - update 131 | - apiGroups: 132 | - control.vitastor.io 133 | resources: 134 | - vitastorosds/status 135 | verbs: 136 | - get 137 | - patch 138 | - update 139 | - apiGroups: 140 | - control.vitastor.io 141 | resources: 142 | - vitastorpools 143 | verbs: 144 | - create 145 | - delete 146 | - get 147 | - list 148 | - patch 149 | - update 150 | - watch 151 | - apiGroups: 152 | - control.vitastor.io 153 | resources: 154 | - vitastorpools/finalizers 155 | verbs: 156 | - update 157 | - apiGroups: 158 | - control.vitastor.io 159 | resources: 160 | - vitastorpools/status 161 | verbs: 162 | - get 163 | - patch 164 | - update 165 | - apiGroups: 166 | - storage.k8s.io 167 | resources: 168 | - storageclasses 169 | verbs: 170 | - create 171 | - delete 172 | - get 173 | - list 174 | - patch 175 | - update 176 | - watch 177 | - apiGroups: 178 | - v1 179 | resources: 180 | - configmaps 181 | verbs: 182 | - get 183 | - list 184 | - apiGroups: 185 | - v1 186 | resources: 187 | - pods 188 | verbs: 189 | - get 190 | - list 191 | - watch 192 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: clusterrolebinding 6 | app.kubernetes.io/instance: manager-rolebinding 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: vitastor-operator 9 | app.kubernetes.io/part-of: vitastor-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: manager-rolebinding 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: manager-role 16 | subjects: 17 | - kind: ServiceAccount 18 | name: controller-manager 19 | namespace: system 20 | -------------------------------------------------------------------------------- /config/rbac/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: serviceaccount 6 | app.kuberentes.io/instance: controller-manager 7 | app.kubernetes.io/component: rbac 8 | app.kubernetes.io/created-by: vitastor-operator 9 | app.kubernetes.io/part-of: vitastor-operator 10 | app.kubernetes.io/managed-by: kustomize 11 | name: controller-manager 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/vitastorcluster_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit vitastorclusters. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastorcluster-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastorcluster-editor-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastorclusters 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - control.vitastor.io 28 | resources: 29 | - vitastorclusters/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/vitastorcluster_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view vitastorclusters. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastorcluster-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastorcluster-viewer-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastorclusters 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - control.vitastor.io 24 | resources: 25 | - vitastorclusters/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/vitastornode_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit vitastornodes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastornode-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastornode-editor-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastornodes 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - control.vitastor.io 28 | resources: 29 | - vitastornodes/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/vitastornode_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view vitastornodes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastornode-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastornode-viewer-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastornodes 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - control.vitastor.io 24 | resources: 25 | - vitastornodes/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/vitastorosd_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit vitastorosds. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastorosd-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastorosd-editor-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastorosds 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - control.vitastor.io 28 | resources: 29 | - vitastorosds/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/vitastorosd_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view vitastorosds. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastorosd-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastorosd-viewer-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastorosds 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - control.vitastor.io 24 | resources: 25 | - vitastorosds/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/rbac/vitastorpool_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit vitastorpools. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastorpool-editor-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastorpool-editor-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastorpools 18 | verbs: 19 | - create 20 | - delete 21 | - get 22 | - list 23 | - patch 24 | - update 25 | - watch 26 | - apiGroups: 27 | - control.vitastor.io 28 | resources: 29 | - vitastorpools/status 30 | verbs: 31 | - get 32 | -------------------------------------------------------------------------------- /config/rbac/vitastorpool_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view vitastorpools. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: clusterrole 7 | app.kubernetes.io/instance: vitastorpool-viewer-role 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | app.kubernetes.io/managed-by: kustomize 12 | name: vitastorpool-viewer-role 13 | rules: 14 | - apiGroups: 15 | - control.vitastor.io 16 | resources: 17 | - vitastorpools 18 | verbs: 19 | - get 20 | - list 21 | - watch 22 | - apiGroups: 23 | - control.vitastor.io 24 | resources: 25 | - vitastorpools/status 26 | verbs: 27 | - get 28 | -------------------------------------------------------------------------------- /config/samples/control_v1_vitastorcluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: control.vitastor.io/v1 2 | kind: VitastorCluster 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: vitastorcluster 6 | app.kubernetes.io/instance: vitastorcluster-sample 7 | app.kubernetes.io/part-of: vitastor-operator 8 | app.kuberentes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: vitastor-operator 10 | name: vitastorcluster-sample 11 | spec: 12 | # TODO(user): Add fields here 13 | -------------------------------------------------------------------------------- /config/samples/control_v1_vitastornode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: control.vitastor.io/v1 2 | kind: VitastorNode 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: vitastornode 6 | app.kubernetes.io/instance: vitastornode-sample 7 | app.kubernetes.io/part-of: vitastor-operator 8 | app.kuberentes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: vitastor-operator 10 | name: vitastornode-sample 11 | spec: 12 | nodeName: "rke-node1" 13 | -------------------------------------------------------------------------------- /config/samples/control_v1_vitastorosd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: control.vitastor.io/v1 2 | kind: VitastorOSD 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: vitastorosd 6 | app.kubernetes.io/instance: vitastorosd-sample 7 | app.kubernetes.io/part-of: vitastor-operator 8 | app.kuberentes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: vitastor-operator 10 | name: vitastorosd-sample 11 | spec: 12 | nodeName: "rke-node1" 13 | osdPath: '/dev/disk/by-partuuid/29f9c4ac-f9a0-a945-ac4a-96a7b3994fe2' 14 | 15 | -------------------------------------------------------------------------------- /config/samples/control_v1_vitastorpool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: control.vitastor.io/v1 2 | kind: VitastorPool 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: vitastorpool 6 | app.kubernetes.io/instance: vitastorpool-sample 7 | app.kubernetes.io/part-of: vitastor-operator 8 | app.kuberentes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: vitastor-operator 10 | name: vitastorpool-sample 11 | spec: 12 | # TODO(user): Add fields here 13 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "path/filepath" 21 | "testing" 22 | 23 | . "github.com/onsi/ginkgo/v2" 24 | . "github.com/onsi/gomega" 25 | 26 | "k8s.io/client-go/kubernetes/scheme" 27 | "k8s.io/client-go/rest" 28 | "sigs.k8s.io/controller-runtime/pkg/client" 29 | "sigs.k8s.io/controller-runtime/pkg/envtest" 30 | logf "sigs.k8s.io/controller-runtime/pkg/log" 31 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 32 | 33 | controlv1 "gitlab.com/Antilles7227/vitastor-operator/api/v1" 34 | //+kubebuilder:scaffold:imports 35 | ) 36 | 37 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 38 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 39 | 40 | var cfg *rest.Config 41 | var k8sClient client.Client 42 | var testEnv *envtest.Environment 43 | 44 | func TestAPIs(t *testing.T) { 45 | RegisterFailHandler(Fail) 46 | 47 | RunSpecs(t, "Controller Suite") 48 | } 49 | 50 | var _ = BeforeSuite(func() { 51 | logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) 52 | 53 | By("bootstrapping test environment") 54 | testEnv = &envtest.Environment{ 55 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 56 | ErrorIfCRDPathMissing: true, 57 | } 58 | 59 | var err error 60 | // cfg is defined in this file globally. 61 | cfg, err = testEnv.Start() 62 | Expect(err).NotTo(HaveOccurred()) 63 | Expect(cfg).NotTo(BeNil()) 64 | 65 | err = controlv1.AddToScheme(scheme.Scheme) 66 | Expect(err).NotTo(HaveOccurred()) 67 | 68 | //+kubebuilder:scaffold:scheme 69 | 70 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 71 | Expect(err).NotTo(HaveOccurred()) 72 | Expect(k8sClient).NotTo(BeNil()) 73 | 74 | }) 75 | 76 | var _ = AfterSuite(func() { 77 | By("tearing down the test environment") 78 | err := testEnv.Stop() 79 | Expect(err).NotTo(HaveOccurred()) 80 | }) 81 | -------------------------------------------------------------------------------- /controllers/vitastorcluster_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "context" 21 | "encoding/json" 22 | "os" 23 | "time" 24 | 25 | clientv3 "go.etcd.io/etcd/client/v3" 26 | "k8s.io/apimachinery/pkg/api/errors" 27 | "k8s.io/apimachinery/pkg/api/resource" 28 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 | "k8s.io/apimachinery/pkg/runtime" 30 | "k8s.io/apimachinery/pkg/types" 31 | "k8s.io/apimachinery/pkg/util/intstr" 32 | 33 | appsv1 "k8s.io/api/apps/v1" 34 | corev1 "k8s.io/api/core/v1" 35 | ctrl "sigs.k8s.io/controller-runtime" 36 | "sigs.k8s.io/controller-runtime/pkg/client" 37 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 38 | "sigs.k8s.io/controller-runtime/pkg/log" 39 | 40 | controlv1 "gitlab.com/Antilles7227/vitastor-operator/api/v1" 41 | ) 42 | 43 | // VitastorClusterReconciler reconciles a VitastorCluster object 44 | type VitastorClusterReconciler struct { 45 | client.Client 46 | Scheme *runtime.Scheme 47 | } 48 | 49 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorclusters,verbs=get;list;watch;create;update;patch;delete 50 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorclusters/status,verbs=get;update;patch 51 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorclusters/finalizers,verbs=update 52 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastornodes,verbs=get;list;watch;create;update;patch;delete 53 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastornodes/finalizers,verbs=update 54 | //+kubebuilder:rbac:groups=apps,resources=daemonsets,verbs=get;list;watch;create;update;patch;delete 55 | //+kubebuilder:rbac:groups=apps,resources=daemonsets/status,verbs=get 56 | //+kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete 57 | //+kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get 58 | //+kubebuilder:rbac:groups=v1,resources=configmaps,verbs=get;list 59 | 60 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 61 | // move the current state of the cluster closer to the desired state. 62 | func (r *VitastorClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 63 | var log = log.FromContext(ctx) 64 | config, err := loadConfiguration(ctx, "/etc/vitastor/vitastor.conf") 65 | if err != nil { 66 | log.Error(err, "Unable to load vitastor.conf") 67 | return ctrl.Result{}, err 68 | } 69 | cli, err := clientv3.New(clientv3.Config{ 70 | Endpoints: config.VitastorEtcdUrls, 71 | DialTimeout: 5 * time.Second, 72 | }) 73 | if err != nil { 74 | log.Error(err, "Unable to connect to etcd") 75 | return ctrl.Result{}, err 76 | } 77 | defer cli.Close() 78 | namespace, isEmpty := os.LookupEnv("NAMESPACE") 79 | if !isEmpty { 80 | namespace = "vitastor-system" 81 | } 82 | 83 | // Check placement_levels to fix 84 | placementLevels := map[string]int32{ 85 | "dc": 99, 86 | "host": 100, 87 | "osd": 101, 88 | } 89 | placementLevelBytes, err := json.Marshal(placementLevels) 90 | if err != nil { 91 | log.Error(err, "Unable to marshal placement level block") 92 | return ctrl.Result{}, err 93 | } 94 | nodePlacementPath := config.VitastorPrefix + "/config/placement_levels" 95 | placementLevelResp, err := cli.Put(ctx, nodePlacementPath, string(placementLevelBytes)) 96 | if err != nil { 97 | log.Error(err, "Unable to update placement level list") 98 | return ctrl.Result{}, err 99 | } 100 | log.Info(placementLevelResp.Header.String()) 101 | 102 | var vitastorCluster controlv1.VitastorCluster 103 | if err := r.Get(ctx, types.NamespacedName{Namespace: corev1.NamespaceAll, Name: req.Name}, &vitastorCluster); err != nil { 104 | log.Error(err, "Unable to fetch VitastorCluster, skipping") 105 | return ctrl.Result{}, client.IgnoreNotFound(err) 106 | } 107 | monitorReplicas := int32(vitastorCluster.Spec.MonitorReplicaNum) 108 | 109 | // Check monitor deployment 110 | monitorDeployment := &appsv1.Deployment{} 111 | if err := r.Get(ctx, types.NamespacedName{Name: "vitastor-monitor", Namespace: namespace}, monitorDeployment); err != nil { 112 | if errors.IsNotFound(err) { 113 | // Deployment is not found - creating new one 114 | log.Info("Deployment is not found, creating new one") 115 | depl, err := r.getMonitorConfiguration(&vitastorCluster) 116 | if err != nil { 117 | log.Error(err, "Failed to create monitor deployment") 118 | return ctrl.Result{}, err 119 | } 120 | if err := controllerutil.SetControllerReference(&vitastorCluster, depl, r.Scheme); err != nil { 121 | log.Error(err, "Failed to set owner deployment") 122 | return ctrl.Result{}, err 123 | } 124 | if err := r.Create(ctx, depl); err != nil { 125 | log.Error(err, "Failed to create new Deployment") 126 | return ctrl.Result{}, err 127 | } 128 | return ctrl.Result{Requeue: true}, nil 129 | } 130 | log.Error(err, "Failed to fetch monitor deployment") 131 | return ctrl.Result{}, err 132 | } 133 | 134 | // Check monitor image 135 | if monitorDeployment.Spec.Template.Spec.Containers[0].Image != vitastorCluster.Spec.MonitorImage { 136 | log.Info("Monitor image mismatch, updating") 137 | monitorDeployment.Spec.Template.Spec.Containers[0].Image = vitastorCluster.Spec.MonitorImage 138 | if err := r.Update(ctx, monitorDeployment); err != nil { 139 | log.Error(err, "Failed to update monitor deployment") 140 | return ctrl.Result{}, err 141 | } 142 | } 143 | // Check monitor replicas 144 | if *monitorDeployment.Spec.Replicas != int32(vitastorCluster.Spec.MonitorReplicaNum) { 145 | log.Info("Number of monitor replicas mismatch, updating") 146 | monitorDeployment.Spec.Replicas = &monitorReplicas 147 | if err := r.Update(ctx, monitorDeployment); err != nil { 148 | log.Error(err, "Failed to update monitor deployment") 149 | return ctrl.Result{}, err 150 | } 151 | } 152 | 153 | // Check agent daemonset 154 | agentDaemonSet := &appsv1.DaemonSet{} 155 | if err := r.Get(ctx, types.NamespacedName{Name: "vitastor-agent", Namespace: namespace}, agentDaemonSet); err != nil { 156 | if errors.IsNotFound(err) { 157 | // Daemonset is not found - creating new one 158 | log.Info("Daemonset is not found, creating new one") 159 | ds, err := r.getAgentConfiguration(&vitastorCluster) 160 | if err != nil { 161 | log.Error(err, "Failed to create agent daemonset") 162 | return ctrl.Result{}, err 163 | } 164 | if err := controllerutil.SetControllerReference(&vitastorCluster, ds, r.Scheme); err != nil { 165 | log.Error(err, "Failed to set owner for agent daemonset") 166 | return ctrl.Result{}, err 167 | } 168 | if err := r.Create(ctx, ds); err != nil { 169 | log.Error(err, "Failed to create new Daemonset") 170 | return ctrl.Result{}, err 171 | } 172 | return ctrl.Result{RequeueAfter: time.Duration(5) * time.Minute}, nil 173 | } 174 | log.Error(err, "Failed to fetch agent daemonset") 175 | return ctrl.Result{}, err 176 | } 177 | 178 | // Check agent image 179 | if agentDaemonSet.Spec.Template.Spec.Containers[0].Image != vitastorCluster.Spec.AgentImage { 180 | log.Info("Agent image mismatch") 181 | agentDaemonSet.Spec.Template.Spec.Containers[0].Image = vitastorCluster.Spec.AgentImage 182 | if err := r.Update(ctx, agentDaemonSet); err != nil { 183 | log.Error(err, "Failed to update agent daemonset") 184 | return ctrl.Result{}, err 185 | } 186 | } 187 | 188 | agentList := &corev1.PodList{} 189 | getOpts := []client.ListOption{ 190 | client.InNamespace(namespace), 191 | client.MatchingLabels{"app": "vitastor-agent"}, 192 | } 193 | log.Info("Fetching agents...") 194 | if err := r.List(ctx, agentList, getOpts...); err != nil { 195 | log.Error(err, "unable to fetch Vitastor agents") 196 | return ctrl.Result{}, client.IgnoreNotFound(err) 197 | } 198 | for _, agent := range agentList.Items { 199 | vitastorNode := &controlv1.VitastorNode{} 200 | if err := r.Get(ctx, types.NamespacedName{Namespace: corev1.NamespaceAll, Name: agent.Spec.NodeName}, vitastorNode); err != nil { 201 | if errors.IsNotFound(err) { 202 | // VitastorNode CRD for that node is not found - creating new one 203 | log.Error(err, "VitastorNode CRD is not found, creating new one", "NodeName", agent.Spec.NodeName) 204 | newVitastorNode, err := r.getVitastorNodeConfiguration(agent.Spec.NodeName, vitastorCluster.Spec.OSDImage) 205 | if err != nil { 206 | log.Error(err, "Unable to get vitastorNode configuration") 207 | return ctrl.Result{}, err 208 | } 209 | log.Info("Created new VitastorNode CRD", "VitastorNode.name", newVitastorNode.Name, "VitastorNode.Spec.NodeName", newVitastorNode.Spec.NodeName) 210 | if err := controllerutil.SetControllerReference(&vitastorCluster, newVitastorNode, r.Scheme); err != nil { 211 | log.Error(err, "Failed to set owner for vitastorNode CRD") 212 | return ctrl.Result{}, err 213 | } 214 | if err := r.Create(ctx, newVitastorNode); err != nil { 215 | log.Error(err, "Unable to create VitastorNode CRD") 216 | return ctrl.Result{}, err 217 | } 218 | } 219 | } 220 | } 221 | 222 | return ctrl.Result{}, nil 223 | } 224 | 225 | func (r *VitastorClusterReconciler) getVitastorNodeConfiguration(nodeName string, image string) (*controlv1.VitastorNode, error) { 226 | vitastorNode := &controlv1.VitastorNode{ 227 | ObjectMeta: ctrl.ObjectMeta{ 228 | Name: nodeName, 229 | }, 230 | Spec: controlv1.VitastorNodeSpec{ 231 | NodeName: nodeName, 232 | OSDImage: image, 233 | }, 234 | } 235 | return vitastorNode, nil 236 | } 237 | 238 | func (r *VitastorClusterReconciler) getMonitorConfiguration(cluster *controlv1.VitastorCluster) (*appsv1.Deployment, error) { 239 | namespace, isEmpty := os.LookupEnv("NAMESPACE") 240 | if !isEmpty { 241 | namespace = "vitastor-system" 242 | } 243 | monitorReplicas := int32(cluster.Spec.MonitorReplicaNum) 244 | labels := map[string]string{"app": "vitastor-monitor"} 245 | 246 | depl := appsv1.Deployment{ 247 | ObjectMeta: ctrl.ObjectMeta{ 248 | Namespace: namespace, 249 | Name: "vitastor-monitor", 250 | }, 251 | Spec: appsv1.DeploymentSpec{ 252 | Replicas: &monitorReplicas, 253 | Selector: &v1.LabelSelector{ 254 | MatchLabels: labels, 255 | }, 256 | Template: corev1.PodTemplateSpec{ 257 | ObjectMeta: v1.ObjectMeta{ 258 | Labels: labels, 259 | }, 260 | Spec: corev1.PodSpec{ 261 | Containers: []corev1.Container{ 262 | { 263 | Name: "vitastor-monitor", 264 | Image: cluster.Spec.MonitorImage, 265 | Resources: corev1.ResourceRequirements{ 266 | Limits: corev1.ResourceList{ 267 | corev1.ResourceCPU: resource.MustParse("1000m"), 268 | corev1.ResourceMemory: resource.MustParse("1024Mi"), 269 | }, 270 | }, 271 | VolumeMounts: []corev1.VolumeMount{ 272 | { 273 | Name: "vitastor-config", 274 | MountPath: "/etc/vitastor", 275 | }, 276 | }, 277 | }, 278 | }, 279 | Volumes: []corev1.Volume{ 280 | { 281 | Name: "vitastor-config", 282 | VolumeSource: corev1.VolumeSource{ 283 | ConfigMap: &corev1.ConfigMapVolumeSource{ 284 | LocalObjectReference: corev1.LocalObjectReference{Name: "vitastor-config"}, 285 | }, 286 | }, 287 | }, 288 | }, 289 | }, 290 | }, 291 | Strategy: appsv1.DeploymentStrategy{ 292 | Type: appsv1.RollingUpdateDeploymentStrategyType, 293 | RollingUpdate: &appsv1.RollingUpdateDeployment{ 294 | MaxUnavailable: &intstr.IntOrString{IntVal: 1}, 295 | MaxSurge: &intstr.IntOrString{IntVal: 1}, 296 | }, 297 | }, 298 | }, 299 | } 300 | return &depl, nil 301 | } 302 | 303 | func (r *VitastorClusterReconciler) getAgentConfiguration(cluster *controlv1.VitastorCluster) (*appsv1.DaemonSet, error) { 304 | namespace, isEmpty := os.LookupEnv("NAMESPACE") 305 | if !isEmpty { 306 | namespace = "vitastor-system" 307 | } 308 | privilegedContainer := true 309 | dsLabels := map[string]string{"app": "vitastor-agent"} 310 | nodeLabels := map[string]string{cluster.Spec.VitastorNodeLabel: "true"} 311 | 312 | ds := appsv1.DaemonSet{ 313 | ObjectMeta: ctrl.ObjectMeta{ 314 | Namespace: namespace, 315 | Name: "vitastor-agent", 316 | }, 317 | Spec: appsv1.DaemonSetSpec{ 318 | Selector: &v1.LabelSelector{ 319 | MatchLabels: dsLabels, 320 | }, 321 | Template: corev1.PodTemplateSpec{ 322 | ObjectMeta: v1.ObjectMeta{ 323 | Labels: dsLabels, 324 | }, 325 | Spec: corev1.PodSpec{ 326 | NodeSelector: nodeLabels, 327 | Containers: []corev1.Container{ 328 | { 329 | Name: "vitastor-agent", 330 | Image: cluster.Spec.AgentImage, 331 | SecurityContext: &corev1.SecurityContext{ 332 | Privileged: &privilegedContainer, 333 | }, 334 | Resources: corev1.ResourceRequirements{ 335 | Limits: corev1.ResourceList{ 336 | corev1.ResourceCPU: resource.MustParse("1000m"), 337 | corev1.ResourceMemory: resource.MustParse("1024Mi"), 338 | }, 339 | }, 340 | Ports: []corev1.ContainerPort{{ContainerPort: 8000}}, 341 | VolumeMounts: []corev1.VolumeMount{ 342 | { 343 | Name: "vitastor-config", 344 | MountPath: "/etc/vitastor", 345 | }, 346 | { 347 | Name: "host-dev", 348 | MountPath: "/dev", 349 | }, 350 | { 351 | Name: "host-sys", 352 | MountPath: "/sys", 353 | }, 354 | { 355 | Name: "host-lib-modules", 356 | MountPath: "/lib/modules", 357 | }, 358 | }, 359 | }, 360 | }, 361 | Volumes: []corev1.Volume{ 362 | { 363 | Name: "host-dev", 364 | VolumeSource: corev1.VolumeSource{ 365 | HostPath: &corev1.HostPathVolumeSource{ 366 | Path: "/dev", 367 | }, 368 | }, 369 | }, 370 | { 371 | Name: "host-sys", 372 | VolumeSource: corev1.VolumeSource{ 373 | HostPath: &corev1.HostPathVolumeSource{ 374 | Path: "/sys", 375 | }, 376 | }, 377 | }, 378 | { 379 | Name: "host-lib-modules", 380 | VolumeSource: corev1.VolumeSource{ 381 | HostPath: &corev1.HostPathVolumeSource{ 382 | Path: "/lib/modules", 383 | }, 384 | }, 385 | }, 386 | { 387 | Name: "vitastor-config", 388 | VolumeSource: corev1.VolumeSource{ 389 | ConfigMap: &corev1.ConfigMapVolumeSource{ 390 | LocalObjectReference: corev1.LocalObjectReference{Name: "vitastor-config"}, 391 | }, 392 | }, 393 | }, 394 | }, 395 | }, 396 | }, 397 | }, 398 | } 399 | return &ds, nil 400 | } 401 | 402 | // SetupWithManager sets up the controller with the Manager. 403 | func (r *VitastorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { 404 | 405 | return ctrl.NewControllerManagedBy(mgr). 406 | For(&controlv1.VitastorCluster{}). 407 | Owns(&controlv1.VitastorNode{}). 408 | Owns(&appsv1.DaemonSet{}). 409 | Owns(&appsv1.Deployment{}). 410 | Complete(r) 411 | } 412 | -------------------------------------------------------------------------------- /controllers/vitastornode_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "context" 21 | "encoding/json" 22 | "io" 23 | "net/http" 24 | "os" 25 | "strconv" 26 | "strings" 27 | "time" 28 | 29 | "github.com/google/go-cmp/cmp" 30 | "github.com/google/go-cmp/cmp/cmpopts" 31 | "go.etcd.io/etcd/client/v3" 32 | corev1 "k8s.io/api/core/v1" 33 | "k8s.io/apimachinery/pkg/runtime" 34 | "k8s.io/apimachinery/pkg/types" 35 | ctrl "sigs.k8s.io/controller-runtime" 36 | "sigs.k8s.io/controller-runtime/pkg/client" 37 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 38 | "sigs.k8s.io/controller-runtime/pkg/log" 39 | 40 | controlv1 "gitlab.com/Antilles7227/vitastor-operator/api/v1" 41 | ) 42 | 43 | // VitastorNodeReconciler reconciles a VitastorNode object 44 | type VitastorNodeReconciler struct { 45 | client.Client 46 | Scheme *runtime.Scheme 47 | } 48 | 49 | type SystemPartition struct { 50 | Name string `json:"name"` 51 | FStype string `json:"fstype,omitempty"` 52 | PartUUID string `json:"partuuid,omitempty"` 53 | } 54 | 55 | type SystemDisk struct { 56 | Name string `json:"name"` 57 | Type string `json:"type,omitempty"` 58 | Children []SystemPartition `json:"children,omitempty"` 59 | } 60 | 61 | type OSDPartition struct { 62 | DataDevice string `json:"data_device"` 63 | OSDNumber int `json:"osd_num"` 64 | ImmediateCommit string `json:"immediate_commit"` 65 | } 66 | 67 | type VitastorConfig struct { 68 | VitastorEtcdUrls []string `json:"etcd_address"` 69 | VitastorPrefix string `json:"etcd_prefix"` 70 | } 71 | 72 | type VitastorNodePlacement struct { 73 | Level string `json:"level,omitempty"` 74 | Parent string `json:"parent,omitempty"` 75 | } 76 | 77 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastornodes,verbs=get;list;watch;create;update;patch;delete 78 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastornodes/status,verbs=get;update;patch 79 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastornodes/finalizers,verbs=update 80 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorosds,verbs=get;list;watch;create;update;patch;delete 81 | //+kubebuilder:rbac:groups=v1,resources=pods,verbs=get;list;watch 82 | 83 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 84 | // move the current state of the cluster closer to the desired state. 85 | // TODO(user): Modify the Reconcile function to compare the state specified by 86 | // the VitastorNode object against the actual cluster state, and then 87 | // perform operations to make the cluster state reflect the state specified by 88 | // the user. 89 | // 90 | // For more details, check Reconcile and its Result here: 91 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile 92 | func (r *VitastorNodeReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 93 | var log = log.FromContext(ctx) 94 | config, err := loadConfiguration(ctx, "/etc/vitastor/vitastor.conf") 95 | if err != nil { 96 | log.Error(err, "Unable to load vitastor.conf") 97 | return ctrl.Result{}, err 98 | } 99 | cli, err := clientv3.New(clientv3.Config{ 100 | Endpoints: config.VitastorEtcdUrls, 101 | DialTimeout: 5 * time.Second, 102 | }) 103 | if err != nil { 104 | log.Error(err, "Unable to connect to etcd") 105 | return ctrl.Result{}, err 106 | } 107 | defer cli.Close() 108 | namespace, isEmpty := os.LookupEnv("NAMESPACE") 109 | if !isEmpty { 110 | namespace = "vitastor-system" 111 | } 112 | updateIntervalRaw, isEmpty := os.LookupEnv("UPDATE_INTERVAL") 113 | if !isEmpty { 114 | updateIntervalRaw = "15" 115 | } 116 | updateInterval, err := strconv.Atoi(updateIntervalRaw) 117 | if err != nil { 118 | return ctrl.Result{}, err 119 | } 120 | placementLevelsStatic := []string{"dc"} // TODO: that list should be inside VitastorCluster CRD 121 | 122 | var vitastorNode controlv1.VitastorNode 123 | if err := r.Get(ctx, types.NamespacedName{Namespace: corev1.NamespaceAll, Name: req.Name}, &vitastorNode); err != nil { 124 | log.Error(err, "unable to fetch VitastorNode, skipping") 125 | return ctrl.Result{}, client.IgnoreNotFound(err) 126 | } 127 | 128 | var k8sNode corev1.Node 129 | if err := r.Get(ctx, types.NamespacedName{Namespace: corev1.NamespaceAll, Name: req.Name}, &k8sNode); err != nil { 130 | log.Error(err, "unable to fetch Node, skipping") 131 | return ctrl.Result{}, client.IgnoreNotFound(err) 132 | } 133 | 134 | // Check node placement and set if empty 135 | log.Info("Checking node_placement config") 136 | nodePlacementPath := config.VitastorPrefix + "/config/node_placement" 137 | placementLevelRaw, err := cli.Get(ctx, nodePlacementPath) 138 | if err != nil { 139 | log.Error(err, "Unable to retrieve placement tree") 140 | return ctrl.Result{}, err 141 | } 142 | var placementLevel map[string]VitastorNodePlacement 143 | if placementLevelRaw.Count != 0 { 144 | err = json.Unmarshal(placementLevelRaw.Kvs[0].Value, &placementLevel) 145 | if err != nil { 146 | log.Error(err, "Unable to parse placement level block") 147 | return ctrl.Result{}, err 148 | } 149 | } else { 150 | placementLevel = make(map[string]VitastorNodePlacement) 151 | } 152 | placementLevel[vitastorNode.Name] = VitastorNodePlacement{Level: "host"} 153 | // Check if node has fd.vitastor.io labels 154 | for label, value := range k8sNode.Labels { 155 | if strings.Contains(label, "fd.vitastor.io") { 156 | splittedLabel := strings.Split(label, "/") 157 | if contains_str_list(placementLevelsStatic, splittedLabel[1]) { 158 | // Node labeled properly, check if that label exist in placements 159 | _, ok := placementLevel[value] 160 | // If the key not exists 161 | if !ok { 162 | placementLevel[value] = VitastorNodePlacement{Level: "dc"} 163 | } 164 | // Updating placement level with proper parent 165 | placementLevel[vitastorNode.Name] = VitastorNodePlacement{Level: "host", Parent: value} 166 | } 167 | } 168 | } 169 | 170 | var placementLevelBytes []byte 171 | placementLevelBytes, err = json.Marshal(placementLevel) 172 | if err != nil { 173 | log.Error(err, "Unable to marshal placement level block") 174 | return ctrl.Result{}, err 175 | } 176 | placementLevelResp, err := cli.Put(ctx, nodePlacementPath, string(placementLevelBytes)) 177 | if err != nil { 178 | log.Error(err, "Unable to update placement level tree") 179 | return ctrl.Result{}, err 180 | } 181 | log.Info(placementLevelResp.Header.String()) 182 | 183 | // Update status with OSDs 184 | var nodeStatusUpdated bool = false 185 | agentList := &corev1.PodList{} 186 | getOpts := []client.ListOption{ 187 | client.InNamespace(namespace), 188 | client.MatchingLabels{"app": "vitastor-agent"}, 189 | client.MatchingFields{".spec.node": vitastorNode.Name}, 190 | } 191 | log.Info("Fetching agent for that VitastorNode...") 192 | if err := r.List(ctx, agentList, getOpts...); err != nil { 193 | log.Error(err, "unable to fetch agent for that VitastorNode CRD") 194 | return ctrl.Result{}, client.IgnoreNotFound(err) 195 | } 196 | if len(agentList.Items) == 0 { 197 | log.Info("Seems like that agent Pod is not running, reschedule reconciling...") 198 | return ctrl.Result{RequeueAfter: time.Duration(updateInterval) * time.Minute}, nil 199 | } 200 | agentIP := agentList.Items[0].Status.PodIP 201 | systemDisksURL := "http://" + agentIP + ":8000/disk" 202 | emptyDisksURL := systemDisksURL + "/empty" 203 | osdURL := systemDisksURL + "/osd" 204 | 205 | // Getting all disks on that node 206 | resp, err := http.Get(systemDisksURL) 207 | if err != nil { 208 | log.Error(err, "Unable to get system disks") 209 | return ctrl.Result{}, err 210 | } 211 | body, err := io.ReadAll(resp.Body) 212 | if err != nil { 213 | log.Error(err, "Unable to read body of response") 214 | return ctrl.Result{}, err 215 | } 216 | var systemDisks []SystemDisk 217 | json.Unmarshal(body, &systemDisks) 218 | resp.Body.Close() 219 | var systemDisksPaths []string = make([]string, len(systemDisks)) 220 | for _, disk := range systemDisks { 221 | systemDisksPaths = append(systemDisksPaths, disk.Name) 222 | } 223 | if !compareArrays(systemDisksPaths, vitastorNode.Status.Disks) { 224 | log.Info("Status of system disks is not actual, updating") 225 | vitastorNode.Status.Disks = systemDisksPaths 226 | nodeStatusUpdated = true 227 | } 228 | 229 | // Getting empty disks on that node 230 | resp, err = http.Get(emptyDisksURL) 231 | if err != nil { 232 | log.Error(err, "Unable to get empty disks") 233 | return ctrl.Result{}, err 234 | } 235 | body, err = io.ReadAll(resp.Body) 236 | if err != nil { 237 | log.Error(err, "Unable to read body of response") 238 | return ctrl.Result{}, err 239 | } 240 | var emptyDisks []SystemDisk 241 | json.Unmarshal(body, &emptyDisks) 242 | resp.Body.Close() 243 | var emptyDisksPaths []string = make([]string, len(emptyDisks)) 244 | for _, disk := range emptyDisks { 245 | emptyDisksPaths = append(emptyDisksPaths, disk.Name) 246 | } 247 | if !compareArrays(emptyDisksPaths, vitastorNode.Status.EmptyDisks) { 248 | log.Info("Status of empty disks is not actual, updating") 249 | vitastorNode.Status.EmptyDisks = emptyDisksPaths 250 | nodeStatusUpdated = true 251 | } 252 | 253 | // Getting OSD on that node 254 | resp, err = http.Get(osdURL) 255 | if err != nil { 256 | log.Error(err, "Unable to get OSDs") 257 | return ctrl.Result{}, err 258 | } 259 | body, err = io.ReadAll(resp.Body) 260 | if err != nil { 261 | log.Error(err, "Unable to read body of response") 262 | return ctrl.Result{}, err 263 | } 264 | var osds []OSDPartition 265 | json.Unmarshal(body, &osds) 266 | resp.Body.Close() 267 | var osdPaths []string = make([]string, len(osds)) 268 | for _, disk := range osds { 269 | osdPaths = append(osdPaths, disk.DataDevice) 270 | } 271 | if !compareArrays(osdPaths, vitastorNode.Status.VitastorDisks) { 272 | log.Info("Status of OSDs is not actual, updating") 273 | vitastorNode.Status.VitastorDisks = osdPaths 274 | nodeStatusUpdated = true 275 | } 276 | 277 | if nodeStatusUpdated { 278 | log.Info("Updating node status") 279 | err := r.Status().Update(ctx, &vitastorNode) 280 | if err != nil { 281 | log.Error(err, "Unable to update status of vitastorNode") 282 | return ctrl.Result{}, err 283 | } 284 | } 285 | 286 | log.Info("Fetching OSDs for that Node") 287 | osdList := &controlv1.VitastorOSDList{} 288 | listOpts := []client.ListOption{ 289 | client.MatchingFields{".spec.nodeName": vitastorNode.Name}, 290 | } 291 | if err := r.List(ctx, osdList, listOpts...); err != nil { 292 | log.Error(err, "Unable to list OSDs") 293 | return ctrl.Result{}, err 294 | } 295 | 296 | // Checking existing VitastorOSD for creating new OSDs 297 | log.Info("Checking existing VitastorOSD for creating new OSDs") 298 | for _, osd := range osds { 299 | if contains(osdList.Items, osd.DataDevice) { 300 | // That disk already working in cluster, updating node placement and skip 301 | // Check node placement and set if empty 302 | placementLevelRaw, err := cli.Get(ctx, nodePlacementPath) 303 | if err != nil { 304 | log.Error(err, "Unable to retrieve placement tree") 305 | return ctrl.Result{}, err 306 | } 307 | var placementLevel map[string]VitastorNodePlacement 308 | err = json.Unmarshal(placementLevelRaw.Kvs[0].Value, &placementLevel) 309 | if err != nil { 310 | log.Error(err, "Unable to parse placement level block") 311 | return ctrl.Result{}, err 312 | } 313 | placementLevel[strconv.Itoa(osd.OSDNumber)] = VitastorNodePlacement{Level: "osd", Parent: vitastorNode.Spec.NodeName} 314 | var placementLevelBytes []byte 315 | placementLevelBytes, err = json.Marshal(placementLevel) 316 | if err != nil { 317 | log.Error(err, "Unable to marshal placement level block") 318 | return ctrl.Result{}, err 319 | } 320 | placementLevelResp, err := cli.Put(ctx, nodePlacementPath, string(placementLevelBytes)) 321 | if err != nil { 322 | log.Error(err, "Unable to update placement level tree") 323 | return ctrl.Result{}, err 324 | } 325 | log.Info(placementLevelResp.Header.String()) 326 | continue 327 | } else { 328 | // OSD for that disk not deployed, need to create CRD 329 | new_osd := r.getConfiguration(osd.DataDevice, osd.OSDNumber, &vitastorNode) 330 | if err := controllerutil.SetControllerReference(&vitastorNode, new_osd, r.Scheme); err != nil { 331 | log.Error(err, "Failed to set owner for osd") 332 | return ctrl.Result{}, err 333 | } 334 | log.Info("Deploying new OSD", "osdName", new_osd.Name) 335 | err := r.Create(ctx, new_osd) 336 | if err != nil { 337 | log.Error(err, "Failed to create new OSD") 338 | return ctrl.Result{}, err 339 | } 340 | 341 | // Check node placement and set if empty 342 | placementLevelRaw, err := cli.Get(ctx, nodePlacementPath) 343 | if err != nil { 344 | log.Error(err, "Unable to retrieve placement tree") 345 | return ctrl.Result{}, err 346 | } 347 | var placementLevel map[string]VitastorNodePlacement 348 | err = json.Unmarshal(placementLevelRaw.Kvs[0].Value, &placementLevel) 349 | if err != nil { 350 | log.Error(err, "Unable to parse placement level block") 351 | return ctrl.Result{}, err 352 | } 353 | placementLevel[strconv.Itoa(new_osd.Spec.OSDNumber)] = VitastorNodePlacement{Level: "osd", Parent: vitastorNode.Spec.NodeName} 354 | var placementLevelBytes []byte 355 | placementLevelBytes, err = json.Marshal(placementLevel) 356 | if err != nil { 357 | log.Error(err, "Unable to marshal placement level block") 358 | return ctrl.Result{}, err 359 | } 360 | placementLevelResp, err := cli.Put(ctx, nodePlacementPath, string(placementLevelBytes)) 361 | if err != nil { 362 | log.Error(err, "Unable to update placement level tree") 363 | return ctrl.Result{}, err 364 | } 365 | log.Info(placementLevelResp.Header.String()) 366 | } 367 | } 368 | 369 | // Checking existing VitastorOSD for deleting disabled OSD 370 | log.Info("Checking existing VitastorOSD for deleting disabled OSDs") 371 | for _, osd := range osdList.Items { 372 | if contains_str_list(osdPaths, osd.Spec.OSDPath) { 373 | // That disk still working in cluster, checking image 374 | if osd.Spec.OSDImage != vitastorNode.Spec.OSDImage { 375 | log.Info("Updating OSD image", "osdName", osd.Name, "oldImage", osd.Spec.OSDImage, "newImage", vitastorNode.Spec.OSDImage) 376 | osd.Spec.OSDImage = vitastorNode.Spec.OSDImage 377 | if err := r.Update(ctx, &osd); err != nil { 378 | log.Error(err, "Failed to update OSD object") 379 | return ctrl.Result{}, err 380 | } 381 | log.Info("Updated OSD image, waiting for changes to apply", "osdName", osd.Name, "image", osd.Spec.OSDImage) 382 | return ctrl.Result{RequeueAfter: time.Duration(3) * time.Minute}, nil // TODO: make parametrized sync duration, 3 minutes seems to be okay for beginning 383 | } 384 | continue 385 | } else { 386 | // OSD for that disk disappeared, deleting OSD 387 | log.Info("Deleting OSD...", "osdName", osd.Name) 388 | err := r.Delete(ctx, &osd) 389 | if err != nil { 390 | log.Error(err, "Failed to delete OSD") 391 | return ctrl.Result{RequeueAfter: time.Duration(updateInterval) * time.Minute}, err 392 | } 393 | } 394 | } 395 | log.Info("Reconciling is done") 396 | return ctrl.Result{RequeueAfter: time.Duration(updateInterval) * time.Minute}, nil 397 | } 398 | 399 | func compareArrays(x, y []string) bool { 400 | less := func(a, b string) bool { return a < b } 401 | return cmp.Equal(x, y, cmpopts.SortSlices(less)) 402 | } 403 | 404 | func contains(s []controlv1.VitastorOSD, str string) bool { 405 | for _, v := range s { 406 | if v.Spec.OSDPath == str { 407 | return true 408 | } 409 | } 410 | return false 411 | } 412 | 413 | func contains_str_list(s []string, str string) bool { 414 | for _, v := range s { 415 | if v == str { 416 | return true 417 | } 418 | } 419 | return false 420 | } 421 | 422 | func (r *VitastorNodeReconciler) getConfiguration(osdPath string, osdNumber int, node *controlv1.VitastorNode) *controlv1.VitastorOSD { 423 | osd := &controlv1.VitastorOSD{ 424 | ObjectMeta: ctrl.ObjectMeta{ 425 | Name: "vitastor-osd-" + strconv.Itoa(osdNumber), 426 | }, 427 | Spec: controlv1.VitastorOSDSpec{ 428 | NodeName: node.Spec.NodeName, 429 | OSDPath: osdPath, 430 | OSDNumber: osdNumber, 431 | OSDImage: node.Spec.OSDImage, 432 | }, 433 | } 434 | return osd 435 | } 436 | 437 | func loadConfiguration(ctx context.Context, file string) (VitastorConfig, error) { 438 | log := log.FromContext(ctx) 439 | var config VitastorConfig 440 | configFile, err := os.Open(file) 441 | if err != nil { 442 | log.Error(err, "Unable to open config") 443 | return VitastorConfig{}, err 444 | } 445 | defer configFile.Close() 446 | jsonParser := json.NewDecoder(configFile) 447 | jsonParser.Decode(&config) 448 | return config, nil 449 | } 450 | 451 | // SetupWithManager sets up the controller with the Manager. 452 | func (r *VitastorNodeReconciler) SetupWithManager(mgr ctrl.Manager) error { 453 | if err := mgr.GetFieldIndexer().IndexField(context.Background(), &controlv1.VitastorOSD{}, ".spec.nodeName", func(rawObj client.Object) []string { 454 | osd := rawObj.(*controlv1.VitastorOSD) 455 | return []string{osd.Spec.NodeName} 456 | }); err != nil { 457 | return err 458 | } 459 | 460 | if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, ".spec.node", func(rawObj client.Object) []string { 461 | agent := rawObj.(*corev1.Pod) 462 | return []string{agent.Spec.NodeName} 463 | }); err != nil { 464 | return err 465 | } 466 | 467 | return ctrl.NewControllerManagedBy(mgr). 468 | For(&controlv1.VitastorNode{}). 469 | Owns(&controlv1.VitastorOSD{}). 470 | Complete(r) 471 | } 472 | -------------------------------------------------------------------------------- /controllers/vitastorosd_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "context" 21 | "os" 22 | "strconv" 23 | 24 | appsv1 "k8s.io/api/apps/v1" 25 | corev1 "k8s.io/api/core/v1" 26 | "k8s.io/apimachinery/pkg/api/errors" 27 | "k8s.io/apimachinery/pkg/api/resource" 28 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 | "k8s.io/apimachinery/pkg/runtime" 30 | "k8s.io/apimachinery/pkg/types" 31 | ctrl "sigs.k8s.io/controller-runtime" 32 | "sigs.k8s.io/controller-runtime/pkg/client" 33 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 34 | "sigs.k8s.io/controller-runtime/pkg/log" 35 | 36 | controlv1 "gitlab.com/Antilles7227/vitastor-operator/api/v1" 37 | ) 38 | 39 | // VitastorOSDReconciler reconciles a VitastorOSD object 40 | type VitastorOSDReconciler struct { 41 | client.Client 42 | Scheme *runtime.Scheme 43 | } 44 | 45 | func (r *VitastorOSDReconciler) getConfiguration(osd *controlv1.VitastorOSD) (*appsv1.StatefulSet, error) { 46 | namespace, isEmpty := os.LookupEnv("NAMESPACE") 47 | if !isEmpty { 48 | namespace = "vitastor-system" 49 | } 50 | imageName := osd.Spec.OSDImage 51 | containerPortRaw, isEmpty := os.LookupEnv("CONTAINER_PORT") 52 | if !isEmpty { 53 | containerPortRaw = "5666" 54 | } 55 | cp, err := strconv.Atoi(containerPortRaw) 56 | if err != nil { 57 | return nil, err 58 | } 59 | containerPort := int32(cp) 60 | privilegedContainer := true 61 | nodeName := osd.Spec.NodeName 62 | osdPath := osd.Spec.OSDPath 63 | replicas := int32(1) 64 | 65 | labels := map[string]string{"osdName": osd.Name, "node": nodeName} 66 | 67 | depl := appsv1.StatefulSet{ 68 | ObjectMeta: ctrl.ObjectMeta{ 69 | Namespace: namespace, 70 | Name: osd.Name, 71 | }, 72 | Spec: appsv1.StatefulSetSpec{ 73 | Replicas: &replicas, 74 | Selector: &v1.LabelSelector{ 75 | MatchLabels: labels, 76 | }, 77 | Template: corev1.PodTemplateSpec{ 78 | ObjectMeta: v1.ObjectMeta{ 79 | Labels: labels, 80 | }, 81 | Spec: corev1.PodSpec{ 82 | NodeName: nodeName, 83 | Containers: []corev1.Container{ 84 | { 85 | Name: "vitastor-osd", 86 | Image: imageName, 87 | Command: []string{"vitastor-disk"}, 88 | Args: []string{"exec-osd", osdPath}, 89 | Resources: corev1.ResourceRequirements{ 90 | Limits: corev1.ResourceList{ 91 | corev1.ResourceCPU: resource.MustParse("1000m"), 92 | }, 93 | }, 94 | VolumeMounts: []corev1.VolumeMount{ 95 | { 96 | Name: "vitastor-config", 97 | MountPath: "/etc/vitastor", 98 | }, 99 | { 100 | Name: "host-dev", 101 | MountPath: "/dev", 102 | }, 103 | { 104 | Name: "host-sys", 105 | MountPath: "/sys", 106 | }, 107 | { 108 | Name: "host-lib-modules", 109 | MountPath: "/lib/modules", 110 | }, 111 | }, 112 | SecurityContext: &corev1.SecurityContext{ 113 | Privileged: &privilegedContainer, 114 | }, 115 | Ports: []corev1.ContainerPort{{ContainerPort: containerPort}}, 116 | }, 117 | }, 118 | PriorityClassName: "system-cluster-critical", 119 | Volumes: []corev1.Volume{ 120 | { 121 | Name: "host-dev", 122 | VolumeSource: corev1.VolumeSource{ 123 | HostPath: &corev1.HostPathVolumeSource{ 124 | Path: "/dev", 125 | }, 126 | }, 127 | }, 128 | { 129 | Name: "host-sys", 130 | VolumeSource: corev1.VolumeSource{ 131 | HostPath: &corev1.HostPathVolumeSource{ 132 | Path: "/sys", 133 | }, 134 | }, 135 | }, 136 | { 137 | Name: "host-lib-modules", 138 | VolumeSource: corev1.VolumeSource{ 139 | HostPath: &corev1.HostPathVolumeSource{ 140 | Path: "/lib/modules", 141 | }, 142 | }, 143 | }, 144 | { 145 | Name: "vitastor-config", 146 | VolumeSource: corev1.VolumeSource{ 147 | ConfigMap: &corev1.ConfigMapVolumeSource{ 148 | LocalObjectReference: corev1.LocalObjectReference{Name: "vitastor-config"}, 149 | }, 150 | }, 151 | }, 152 | }, 153 | }, 154 | }, 155 | }, 156 | } 157 | return &depl, nil 158 | } 159 | 160 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorosds,verbs=get;list;watch;create;update;patch;delete 161 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorosds/status,verbs=get;update;patch 162 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorosds/finalizers,verbs=update 163 | //+kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete 164 | //+kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get 165 | //+kubebuilder:rbac:groups=v1,resources=configmaps,verbs=get;list 166 | 167 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 168 | // move the current state of the cluster closer to the desired state. 169 | func (r *VitastorOSDReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 170 | var log = log.FromContext(ctx) 171 | namespace, isEmpty := os.LookupEnv("NAMESPACE") 172 | if !isEmpty { 173 | namespace = "vitastor-system" 174 | } 175 | 176 | var vitastorOSD controlv1.VitastorOSD 177 | err := r.Get(ctx, types.NamespacedName{Namespace: corev1.NamespaceAll, Name: req.Name}, &vitastorOSD) 178 | if err != nil { 179 | log.Error(err, "unable to fetch VitastorOSD, seems it's destroyed") 180 | return ctrl.Result{}, client.IgnoreNotFound(err) 181 | } 182 | 183 | // Check if deployment already exists, if not create a new deployment 184 | foundSts := &appsv1.StatefulSet{} 185 | err = r.Get(ctx, types.NamespacedName{Name: vitastorOSD.Name, Namespace: namespace}, foundSts) 186 | if err != nil { 187 | if errors.IsNotFound(err) { 188 | // Deployment is not found - creating new one 189 | log.Info("Deployment is not found, creating new one") 190 | sts, err := r.getConfiguration(&vitastorOSD) 191 | if err := controllerutil.SetControllerReference(&vitastorOSD, sts, r.Scheme); err != nil { 192 | log.Error(err, "Failed to set owner for deployment") 193 | return ctrl.Result{}, err 194 | } 195 | if err != nil { 196 | log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", sts.Namespace, "Deployment.Name", sts.Name) 197 | return ctrl.Result{}, err 198 | } 199 | err = r.Create(ctx, sts) 200 | if err != nil { 201 | log.Error(err, "Failed to create new Deployment", "Deployment.Namespace", sts.Namespace, "Deployment.Name", sts.Name) 202 | return ctrl.Result{}, err 203 | } 204 | return ctrl.Result{Requeue: true}, nil 205 | } 206 | log.Error(err, "Failed to fetch osd deployment") 207 | return ctrl.Result{}, err 208 | } 209 | 210 | //Check OSD image 211 | if foundSts.Spec.Template.Spec.Containers[0].Image != vitastorOSD.Spec.OSDImage { 212 | log.Info("OSD image mismatch") 213 | foundSts.Spec.Template.Spec.Containers[0].Image = vitastorOSD.Spec.OSDImage 214 | if err := r.Update(ctx, foundSts); err != nil { 215 | log.Error(err, "Failed to update OSD statefulset") 216 | return ctrl.Result{}, err 217 | } 218 | } 219 | 220 | return ctrl.Result{}, nil 221 | } 222 | 223 | // SetupWithManager sets up the controller with the Manager. 224 | func (r *VitastorOSDReconciler) SetupWithManager(mgr ctrl.Manager) error { 225 | return ctrl.NewControllerManagedBy(mgr). 226 | For(&controlv1.VitastorOSD{}). 227 | Owns(&appsv1.StatefulSet{}). 228 | Complete(r) 229 | } 230 | -------------------------------------------------------------------------------- /controllers/vitastorpool_controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package controllers 18 | 19 | import ( 20 | "context" 21 | "encoding/json" 22 | "go.etcd.io/etcd/client/v3" 23 | corev1 "k8s.io/api/core/v1" 24 | storage "k8s.io/api/storage/v1" 25 | "k8s.io/apimachinery/pkg/api/errors" 26 | "k8s.io/apimachinery/pkg/runtime" 27 | "k8s.io/apimachinery/pkg/types" 28 | ctrl "sigs.k8s.io/controller-runtime" 29 | "sigs.k8s.io/controller-runtime/pkg/client" 30 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 31 | "sigs.k8s.io/controller-runtime/pkg/log" 32 | "time" 33 | 34 | controlv1 "gitlab.com/Antilles7227/vitastor-operator/api/v1" 35 | ) 36 | 37 | // VitastorPoolReconciler reconciles a VitastorPool object 38 | type VitastorPoolReconciler struct { 39 | client.Client 40 | Scheme *runtime.Scheme 41 | } 42 | 43 | type VitastorPoolConfig struct { 44 | Name string `json:"name"` 45 | Scheme string `json:"scheme"` 46 | PGSize int32 `json:"pg_size"` 47 | ParityChunks int32 `json:"parity_chunks,omitempty"` 48 | PGMinSize int32 `json:"pg_minsize"` 49 | PGCount int32 `json:"pg_count"` 50 | FailureDomain string `json:"failure_domain,omitempty"` 51 | MaxOSDCombinations int32 `json:"max_osd_combinations,omitempty"` 52 | BlockSize int32 `json:"block_size,omitempty"` 53 | ImmediateCommit string `json:"immediate_commit,omitempty"` 54 | OSDTags string `json:"osd_tags,omitempty"` 55 | } 56 | 57 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorpools,verbs=get;list;watch;create;update;patch;delete 58 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorpools/status,verbs=get;update;patch 59 | //+kubebuilder:rbac:groups=control.vitastor.io,resources=vitastorpools/finalizers,verbs=update 60 | //+kubebuilder:rbac:groups=storage.k8s.io,resources=storageclasses,verbs=get;list;watch;create;update;patch;delete 61 | 62 | // Reconcile is part of the main kubernetes reconciliation loop which aims to 63 | // move the current state of the cluster closer to the desired state. 64 | // TODO(user): Modify the Reconcile function to compare the state specified by 65 | // the VitastorPool object against the actual cluster state, and then 66 | // perform operations to make the cluster state reflect the state specified by 67 | // the user. 68 | // 69 | // For more details, check Reconcile and its Result here: 70 | // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.13.0/pkg/reconcile 71 | func (r *VitastorPoolReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { 72 | var log = log.FromContext(ctx) 73 | config, err := loadConfiguration(ctx, "/etc/vitastor/vitastor.conf") 74 | if err != nil { 75 | log.Error(err, "Unable to load vitastor.conf") 76 | return ctrl.Result{}, err 77 | } 78 | cli, err := clientv3.New(clientv3.Config{ 79 | Endpoints: config.VitastorEtcdUrls, 80 | DialTimeout: 5 * time.Second, 81 | }) 82 | if err != nil { 83 | log.Error(err, "Unable to connect to etcd") 84 | return ctrl.Result{}, err 85 | } 86 | defer cli.Close() 87 | 88 | var vitastorPool controlv1.VitastorPool 89 | if err := r.Get(ctx, types.NamespacedName{Namespace: corev1.NamespaceAll, Name: req.Name}, &vitastorPool); err != nil { 90 | log.Error(err, "unable to fetch VitastorPool, skipping") 91 | return ctrl.Result{}, client.IgnoreNotFound(err) 92 | } 93 | 94 | // Check pool tree 95 | log.Info("Checking pools config") 96 | poolsPath := config.VitastorPrefix + "/config/pools" 97 | poolsConfigRaw, err := cli.Get(ctx, poolsPath) 98 | if err != nil { 99 | log.Error(err, "Unable to retrive pools config") 100 | return ctrl.Result{}, err 101 | } 102 | var pools map[string]VitastorPoolConfig 103 | if poolsConfigRaw.Count != 0 { 104 | err = json.Unmarshal(poolsConfigRaw.Kvs[0].Value, &pools) 105 | if err != nil { 106 | log.Error(err, "Unable to parse pools config block") 107 | return ctrl.Result{}, err 108 | } 109 | } else { 110 | pools = make(map[string]VitastorPoolConfig) 111 | } 112 | pools[vitastorPool.Spec.ID] = VitastorPoolConfig{ 113 | Name: vitastorPool.Name, 114 | Scheme: vitastorPool.Spec.Scheme, 115 | PGSize: vitastorPool.Spec.PGSize, 116 | PGMinSize: vitastorPool.Spec.PGMinSize, 117 | ParityChunks: vitastorPool.Spec.ParityChunks, 118 | PGCount: vitastorPool.Spec.PGCount, 119 | FailureDomain: vitastorPool.Spec.FailureDomain, 120 | } 121 | var poolsBytes []byte 122 | poolsBytes, err = json.Marshal(pools) 123 | if err != nil { 124 | log.Error(err, "Unable to marshal pools config block") 125 | return ctrl.Result{}, err 126 | } 127 | poolsResp, err := cli.Put(ctx, poolsPath, string(poolsBytes)) 128 | if err != nil { 129 | log.Error(err, "Unable to update pools tree") 130 | return ctrl.Result{}, err 131 | } 132 | log.Info(poolsResp.Header.String()) 133 | 134 | var storageClass storage.StorageClass 135 | if err := r.Get(ctx, types.NamespacedName{Namespace: corev1.NamespaceAll, Name: req.Name}, &storageClass); err != nil { 136 | if errors.IsNotFound(err) { 137 | // StorageClass for that pool not found - creating new one 138 | log.Info("StorageClass is not found, creating new one") 139 | sc, err := r.getStorageClassConfig(&vitastorPool, &config) 140 | if err != nil { 141 | log.Error(err, "Failed to create storage class for that pool") 142 | return ctrl.Result{}, err 143 | } 144 | if err := controllerutil.SetControllerReference(&vitastorPool, sc, r.Scheme); err != nil { 145 | log.Error(err, "Failed to set owner for storageclass") 146 | return ctrl.Result{}, err 147 | } 148 | if err := r.Create(ctx, sc); err != nil { 149 | log.Error(err, "Failed to create StorageClass") 150 | return ctrl.Result{}, err 151 | } 152 | return ctrl.Result{Requeue: true}, err 153 | } 154 | log.Error(err, "Unable to get StorageClass") 155 | return ctrl.Result{}, err 156 | } 157 | 158 | return ctrl.Result{}, nil 159 | } 160 | 161 | func (r *VitastorPoolReconciler) getStorageClassConfig(pool *controlv1.VitastorPool, config *VitastorConfig) (*storage.StorageClass, error) { 162 | 163 | storageClassParameters := map[string]string{ 164 | "etcdVolumePrefix": "", 165 | "poolId": pool.Spec.ID, 166 | } 167 | 168 | storageClass := storage.StorageClass{ 169 | ObjectMeta: ctrl.ObjectMeta{ 170 | Name: pool.Name, 171 | }, 172 | Provisioner: "csi.vitastor.io", 173 | Parameters: storageClassParameters, 174 | } 175 | return &storageClass, nil 176 | } 177 | 178 | // SetupWithManager sets up the controller with the Manager. 179 | func (r *VitastorPoolReconciler) SetupWithManager(mgr ctrl.Manager) error { 180 | return ctrl.NewControllerManagedBy(mgr). 181 | For(&controlv1.VitastorPool{}). 182 | Owns(&storage.StorageClass{}). 183 | Complete(r) 184 | } 185 | -------------------------------------------------------------------------------- /deploy/000-vitastor-csi-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: vitastor-system 6 | -------------------------------------------------------------------------------- /deploy/001-vitastor-configmap-osd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | data: 5 | vitastor.conf: |- 6 | {"etcd_address":[":"], 7 | "etcd_prefix": "/vitastor", 8 | "osd_network": "", 9 | "bind_port": "5666" 10 | } 11 | metadata: 12 | namespace: vitastor-system 13 | name: vitastor-config 14 | -------------------------------------------------------------------------------- /deploy/002-csi.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | namespace: vitastor-system 6 | name: vitastor-csi-nodeplugin 7 | --- 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | metadata: 11 | namespace: vitastor-system 12 | name: vitastor-csi-nodeplugin 13 | rules: 14 | - apiGroups: [""] 15 | resources: ["nodes"] 16 | verbs: ["get"] 17 | # allow to read Vault Token and connection options from the Tenants namespace 18 | - apiGroups: [""] 19 | resources: ["secrets"] 20 | verbs: ["get"] 21 | - apiGroups: [""] 22 | resources: ["configmaps"] 23 | verbs: ["get"] 24 | --- 25 | kind: ClusterRoleBinding 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | metadata: 28 | namespace: vitastor-system 29 | name: vitastor-csi-nodeplugin 30 | subjects: 31 | - kind: ServiceAccount 32 | name: vitastor-csi-nodeplugin 33 | namespace: vitastor-system 34 | roleRef: 35 | kind: ClusterRole 36 | name: vitastor-csi-nodeplugin 37 | apiGroup: rbac.authorization.k8s.io 38 | 39 | --- 40 | kind: DaemonSet 41 | apiVersion: apps/v1 42 | metadata: 43 | namespace: vitastor-system 44 | name: csi-vitastor 45 | spec: 46 | selector: 47 | matchLabels: 48 | app: csi-vitastor 49 | template: 50 | metadata: 51 | namespace: vitastor-system 52 | labels: 53 | app: csi-vitastor 54 | spec: 55 | serviceAccountName: vitastor-csi-nodeplugin 56 | hostNetwork: true 57 | hostPID: true 58 | priorityClassName: system-node-critical 59 | # to use e.g. Rook orchestrated cluster, and mons' FQDN is 60 | # resolved through k8s service, set dns policy to cluster first 61 | dnsPolicy: ClusterFirstWithHostNet 62 | containers: 63 | - name: driver-registrar 64 | # This is necessary only for systems with SELinux, where 65 | # non-privileged sidecar containers cannot access unix domain socket 66 | # created by privileged CSI driver container. 67 | securityContext: 68 | privileged: true 69 | image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.2.0 70 | args: 71 | - "--v=5" 72 | - "--csi-address=/csi/csi.sock" 73 | - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi.vitastor.io/csi.sock" 74 | env: 75 | - name: KUBE_NODE_NAME 76 | valueFrom: 77 | fieldRef: 78 | fieldPath: spec.nodeName 79 | resources: 80 | limits: 81 | cpu: 1000m 82 | memory: 1000Mi 83 | requests: 84 | cpu: 100m 85 | memory: 100Mi 86 | volumeMounts: 87 | - name: socket-dir 88 | mountPath: /csi 89 | - name: registration-dir 90 | mountPath: /registration 91 | - name: csi-vitastor 92 | securityContext: 93 | privileged: true 94 | capabilities: 95 | add: ["SYS_ADMIN"] 96 | allowPrivilegeEscalation: true 97 | image: antilles/vitastor-csi:1.6.0 98 | args: 99 | - "--node=$(NODE_ID)" 100 | - "--endpoint=$(CSI_ENDPOINT)" 101 | env: 102 | - name: NODE_ID 103 | valueFrom: 104 | fieldRef: 105 | fieldPath: spec.nodeName 106 | - name: CSI_ENDPOINT 107 | value: unix:///csi/csi.sock 108 | imagePullPolicy: "IfNotPresent" 109 | ports: 110 | - containerPort: 9898 111 | name: healthz 112 | protocol: TCP 113 | resources: 114 | limits: 115 | cpu: 1000m 116 | memory: 1000Mi 117 | requests: 118 | cpu: 100m 119 | memory: 100Mi 120 | livenessProbe: 121 | failureThreshold: 5 122 | httpGet: 123 | path: /healthz 124 | port: healthz 125 | initialDelaySeconds: 10 126 | timeoutSeconds: 3 127 | periodSeconds: 2 128 | volumeMounts: 129 | - name: run-csi 130 | mountPath: /run/vitastor-csi 131 | - name: socket-dir 132 | mountPath: /csi 133 | - mountPath: /dev 134 | name: host-dev 135 | - mountPath: /sys 136 | name: host-sys 137 | - mountPath: /run/mount 138 | name: host-mount 139 | - mountPath: /lib/modules 140 | name: lib-modules 141 | readOnly: true 142 | - name: vitastor-config 143 | mountPath: /etc/vitastor 144 | - name: plugin-dir 145 | mountPath: /var/lib/kubelet/plugins 146 | mountPropagation: "Bidirectional" 147 | - name: mountpoint-dir 148 | mountPath: /var/lib/kubelet/pods 149 | mountPropagation: "Bidirectional" 150 | - name: liveness-probe 151 | securityContext: 152 | privileged: true 153 | image: k8s.gcr.io/sig-storage/livenessprobe:v2.5.0 154 | args: 155 | - "--csi-address=$(CSI_ENDPOINT)" 156 | - "--health-port=9898" 157 | env: 158 | - name: CSI_ENDPOINT 159 | value: unix:///csi/csi.sock 160 | resources: 161 | limits: 162 | cpu: 1000m 163 | memory: 1000Mi 164 | requests: 165 | cpu: 100m 166 | memory: 100Mi 167 | volumeMounts: 168 | - mountPath: /csi 169 | name: socket-dir 170 | volumes: 171 | - name: run-csi 172 | hostPath: 173 | path: /run/vitastor-csi 174 | type: Directory 175 | - name: socket-dir 176 | hostPath: 177 | path: /var/lib/kubelet/plugins/csi.vitastor.io 178 | type: DirectoryOrCreate 179 | - name: plugin-dir 180 | hostPath: 181 | path: /var/lib/kubelet/plugins 182 | type: Directory 183 | - name: mountpoint-dir 184 | hostPath: 185 | path: /var/lib/kubelet/pods 186 | type: DirectoryOrCreate 187 | - name: registration-dir 188 | hostPath: 189 | path: /var/lib/kubelet/plugins_registry/ 190 | type: Directory 191 | - name: host-dev 192 | hostPath: 193 | path: /dev 194 | - name: host-sys 195 | hostPath: 196 | path: /sys 197 | - name: host-mount 198 | hostPath: 199 | path: /run/mount 200 | - name: lib-modules 201 | hostPath: 202 | path: /lib/modules 203 | - name: vitastor-config 204 | configMap: 205 | name: vitastor-config 206 | 207 | 208 | --- 209 | apiVersion: v1 210 | kind: ServiceAccount 211 | metadata: 212 | namespace: vitastor-system 213 | name: vitastor-csi-provisioner 214 | 215 | --- 216 | kind: ClusterRole 217 | apiVersion: rbac.authorization.k8s.io/v1 218 | metadata: 219 | namespace: vitastor-system 220 | name: vitastor-external-provisioner-runner 221 | rules: 222 | - apiGroups: [""] 223 | resources: ["nodes"] 224 | verbs: ["get", "list", "watch"] 225 | - apiGroups: [""] 226 | resources: ["secrets"] 227 | verbs: ["get", "list", "watch"] 228 | - apiGroups: [""] 229 | resources: ["events"] 230 | verbs: ["list", "watch", "create", "update", "patch"] 231 | - apiGroups: [""] 232 | resources: ["persistentvolumes"] 233 | verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] 234 | - apiGroups: [""] 235 | resources: ["persistentvolumeclaims"] 236 | verbs: ["get", "list", "watch", "update"] 237 | - apiGroups: [""] 238 | resources: ["persistentvolumeclaims/status"] 239 | verbs: ["update", "patch"] 240 | - apiGroups: ["storage.k8s.io"] 241 | resources: ["storageclasses"] 242 | verbs: ["get", "list", "watch"] 243 | - apiGroups: ["snapshot.storage.k8s.io"] 244 | resources: ["volumesnapshots"] 245 | verbs: ["get", "list"] 246 | - apiGroups: ["snapshot.storage.k8s.io"] 247 | resources: ["volumesnapshotcontents"] 248 | verbs: ["create", "get", "list", "watch", "update", "delete"] 249 | - apiGroups: ["snapshot.storage.k8s.io"] 250 | resources: ["volumesnapshotclasses"] 251 | verbs: ["get", "list", "watch"] 252 | - apiGroups: ["storage.k8s.io"] 253 | resources: ["volumeattachments"] 254 | verbs: ["get", "list", "watch", "update", "patch"] 255 | - apiGroups: ["storage.k8s.io"] 256 | resources: ["volumeattachments/status"] 257 | verbs: ["patch"] 258 | - apiGroups: ["storage.k8s.io"] 259 | resources: ["csinodes"] 260 | verbs: ["get", "list", "watch"] 261 | - apiGroups: ["snapshot.storage.k8s.io"] 262 | resources: ["volumesnapshotcontents/status"] 263 | verbs: ["update"] 264 | - apiGroups: [""] 265 | resources: ["configmaps"] 266 | verbs: ["get"] 267 | --- 268 | kind: ClusterRoleBinding 269 | apiVersion: rbac.authorization.k8s.io/v1 270 | metadata: 271 | namespace: vitastor-system 272 | name: vitastor-csi-provisioner-role 273 | subjects: 274 | - kind: ServiceAccount 275 | name: vitastor-csi-provisioner 276 | namespace: vitastor-system 277 | roleRef: 278 | kind: ClusterRole 279 | name: vitastor-external-provisioner-runner 280 | apiGroup: rbac.authorization.k8s.io 281 | 282 | --- 283 | kind: Role 284 | apiVersion: rbac.authorization.k8s.io/v1 285 | metadata: 286 | namespace: vitastor-system 287 | name: vitastor-external-provisioner-cfg 288 | rules: 289 | - apiGroups: [""] 290 | resources: ["configmaps"] 291 | verbs: ["get", "list", "watch", "create", "update", "delete"] 292 | - apiGroups: ["coordination.k8s.io"] 293 | resources: ["leases"] 294 | verbs: ["get", "watch", "list", "delete", "update", "create"] 295 | 296 | --- 297 | kind: RoleBinding 298 | apiVersion: rbac.authorization.k8s.io/v1 299 | metadata: 300 | name: vitastor-csi-provisioner-role-cfg 301 | namespace: vitastor-system 302 | subjects: 303 | - kind: ServiceAccount 304 | name: vitastor-csi-provisioner 305 | namespace: vitastor-system 306 | roleRef: 307 | kind: Role 308 | name: vitastor-external-provisioner-cfg 309 | apiGroup: rbac.authorization.k8s.io 310 | 311 | --- 312 | kind: Service 313 | apiVersion: v1 314 | metadata: 315 | namespace: vitastor-system 316 | name: csi-vitastor-provisioner 317 | labels: 318 | app: csi-metrics 319 | spec: 320 | selector: 321 | app: csi-vitastor-provisioner 322 | ports: 323 | - name: http-metrics 324 | port: 8080 325 | protocol: TCP 326 | targetPort: 8680 327 | 328 | --- 329 | kind: Deployment 330 | apiVersion: apps/v1 331 | metadata: 332 | namespace: vitastor-system 333 | name: csi-vitastor-provisioner 334 | spec: 335 | replicas: 3 336 | selector: 337 | matchLabels: 338 | app: csi-vitastor-provisioner 339 | template: 340 | metadata: 341 | namespace: vitastor-system 342 | labels: 343 | app: csi-vitastor-provisioner 344 | spec: 345 | affinity: 346 | podAntiAffinity: 347 | requiredDuringSchedulingIgnoredDuringExecution: 348 | - labelSelector: 349 | matchExpressions: 350 | - key: app 351 | operator: In 352 | values: 353 | - csi-vitastor-provisioner 354 | topologyKey: "kubernetes.io/hostname" 355 | serviceAccountName: vitastor-csi-provisioner 356 | priorityClassName: system-cluster-critical 357 | containers: 358 | - name: csi-provisioner 359 | image: k8s.gcr.io/sig-storage/csi-provisioner:v2.2.2 360 | args: 361 | - "--csi-address=$(ADDRESS)" 362 | - "--v=5" 363 | - "--timeout=150s" 364 | - "--retry-interval-start=500ms" 365 | - "--leader-election=true" 366 | # set it to true to use topology based provisioning 367 | - "--feature-gates=Topology=false" 368 | # if fstype is not specified in storageclass, ext4 is default 369 | - "--default-fstype=ext4" 370 | - "--extra-create-metadata=true" 371 | env: 372 | - name: ADDRESS 373 | value: unix:///csi/csi-provisioner.sock 374 | resources: 375 | limits: 376 | cpu: 1000m 377 | memory: 1000Mi 378 | requests: 379 | cpu: 100m 380 | memory: 100Mi 381 | imagePullPolicy: "IfNotPresent" 382 | volumeMounts: 383 | - name: socket-dir 384 | mountPath: /csi 385 | - name: csi-snapshotter 386 | image: k8s.gcr.io/sig-storage/csi-snapshotter:v4.1.1 387 | args: 388 | - "--csi-address=$(ADDRESS)" 389 | - "--v=5" 390 | - "--timeout=150s" 391 | - "--leader-election=true" 392 | env: 393 | - name: ADDRESS 394 | value: unix:///csi/csi-provisioner.sock 395 | resources: 396 | limits: 397 | cpu: 1000m 398 | memory: 1000Mi 399 | requests: 400 | cpu: 100m 401 | memory: 100Mi 402 | imagePullPolicy: "IfNotPresent" 403 | securityContext: 404 | privileged: true 405 | volumeMounts: 406 | - name: socket-dir 407 | mountPath: /csi 408 | - name: csi-attacher 409 | image: k8s.gcr.io/sig-storage/csi-attacher:v3.2.1 410 | args: 411 | - "--v=5" 412 | - "--csi-address=$(ADDRESS)" 413 | - "--leader-election=true" 414 | - "--retry-interval-start=500ms" 415 | env: 416 | - name: ADDRESS 417 | value: /csi/csi-provisioner.sock 418 | resources: 419 | limits: 420 | cpu: 1000m 421 | memory: 1000Mi 422 | requests: 423 | cpu: 100m 424 | memory: 100Mi 425 | imagePullPolicy: "IfNotPresent" 426 | volumeMounts: 427 | - name: socket-dir 428 | mountPath: /csi 429 | - name: csi-resizer 430 | image: k8s.gcr.io/sig-storage/csi-resizer:v1.4.0 431 | args: 432 | - "--csi-address=$(ADDRESS)" 433 | - "--v=5" 434 | - "--timeout=150s" 435 | - "--leader-election" 436 | - "--retry-interval-start=500ms" 437 | - "--handle-volume-inuse-error=false" 438 | env: 439 | - name: ADDRESS 440 | value: unix:///csi/csi-provisioner.sock 441 | resources: 442 | limits: 443 | cpu: 1000m 444 | memory: 1000Mi 445 | requests: 446 | cpu: 100m 447 | memory: 100Mi 448 | imagePullPolicy: "IfNotPresent" 449 | volumeMounts: 450 | - name: socket-dir 451 | mountPath: /csi 452 | - name: csi-vitastor 453 | securityContext: 454 | privileged: true 455 | capabilities: 456 | add: ["SYS_ADMIN"] 457 | image: antilles/vitastor-csi:1.6.0 458 | args: 459 | - "--node=$(NODE_ID)" 460 | - "--endpoint=$(CSI_ENDPOINT)" 461 | env: 462 | - name: NODE_ID 463 | valueFrom: 464 | fieldRef: 465 | fieldPath: spec.nodeName 466 | - name: CSI_ENDPOINT 467 | value: unix:///csi/csi-provisioner.sock 468 | resources: 469 | limits: 470 | cpu: 1000m 471 | memory: 1000Mi 472 | requests: 473 | cpu: 100m 474 | memory: 100Mi 475 | imagePullPolicy: "IfNotPresent" 476 | volumeMounts: 477 | - name: socket-dir 478 | mountPath: /csi 479 | - mountPath: /dev 480 | name: host-dev 481 | - mountPath: /sys 482 | name: host-sys 483 | - mountPath: /lib/modules 484 | name: lib-modules 485 | readOnly: true 486 | - name: vitastor-config 487 | mountPath: /etc/vitastor 488 | volumes: 489 | - name: host-dev 490 | hostPath: 491 | path: /dev 492 | - name: host-sys 493 | hostPath: 494 | path: /sys 495 | - name: lib-modules 496 | hostPath: 497 | path: /lib/modules 498 | - name: socket-dir 499 | emptyDir: { 500 | medium: "Memory" 501 | } 502 | - name: vitastor-config 503 | configMap: 504 | name: vitastor-config 505 | 506 | --- 507 | # if Kubernetes version is less than 1.18 change 508 | # apiVersion to storage.k8s.io/v1betav1 509 | apiVersion: storage.k8s.io/v1 510 | kind: CSIDriver 511 | metadata: 512 | namespace: vitastor-system 513 | name: csi.vitastor.io 514 | spec: 515 | attachRequired: true 516 | podInfoOnMount: false 517 | -------------------------------------------------------------------------------- /deploy/003-vitastor-crd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | controller-gen.kubebuilder.io/version: v0.9.2 7 | creationTimestamp: null 8 | name: vitastorclusters.control.vitastor.io 9 | spec: 10 | group: control.vitastor.io 11 | names: 12 | kind: VitastorCluster 13 | listKind: VitastorClusterList 14 | plural: vitastorclusters 15 | singular: vitastorcluster 16 | scope: Cluster 17 | versions: 18 | - name: v1 19 | schema: 20 | openAPIV3Schema: 21 | description: VitastorCluster is the Schema for the vitastorclusters API 22 | properties: 23 | apiVersion: 24 | description: 'APIVersion defines the versioned schema of this representation 25 | of an object. Servers should convert recognized schemas to the latest 26 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 27 | type: string 28 | kind: 29 | description: 'Kind is a string value representing the REST resource this 30 | object represents. Servers may infer this from the endpoint the client 31 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 32 | type: string 33 | metadata: 34 | type: object 35 | spec: 36 | description: VitastorClusterSpec defines the desired state of VitastorCluster 37 | properties: 38 | agentImage: 39 | description: Agent image name/tag 40 | type: string 41 | disksReconciligPeriod: 42 | description: Reconcile period in seconds 43 | type: integer 44 | monitorImage: 45 | description: Monitor image name/tag 46 | type: string 47 | monitorReplicaNum: 48 | description: Number of replicas for monitors 49 | type: integer 50 | osdImage: 51 | description: OSD image name/tag 52 | type: string 53 | vitastorNodeLabel: 54 | description: Node label for Agent DaemonSet nodeSelector 55 | type: string 56 | required: 57 | - agentImage 58 | - disksReconciligPeriod 59 | - monitorImage 60 | - monitorReplicaNum 61 | - osdImage 62 | - vitastorNodeLabel 63 | type: object 64 | status: 65 | description: VitastorClusterStatus defines the observed state of VitastorCluster 66 | type: object 67 | type: object 68 | served: true 69 | storage: true 70 | subresources: 71 | status: {} 72 | 73 | --- 74 | apiVersion: apiextensions.k8s.io/v1 75 | kind: CustomResourceDefinition 76 | metadata: 77 | annotations: 78 | controller-gen.kubebuilder.io/version: v0.9.2 79 | creationTimestamp: null 80 | name: vitastorosds.control.vitastor.io 81 | spec: 82 | group: control.vitastor.io 83 | names: 84 | kind: VitastorOSD 85 | listKind: VitastorOSDList 86 | plural: vitastorosds 87 | singular: vitastorosd 88 | scope: Cluster 89 | versions: 90 | - name: v1 91 | schema: 92 | openAPIV3Schema: 93 | description: VitastorOSD is the Schema for the vitastorosds API 94 | properties: 95 | apiVersion: 96 | description: 'APIVersion defines the versioned schema of this representation 97 | of an object. Servers should convert recognized schemas to the latest 98 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 99 | type: string 100 | kind: 101 | description: 'Kind is a string value representing the REST resource this 102 | object represents. Servers may infer this from the endpoint the client 103 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 104 | type: string 105 | metadata: 106 | type: object 107 | spec: 108 | description: VitastorOSDSpec defines the desired state of VitastorOSD 109 | properties: 110 | nodeName: 111 | description: Name of node 112 | type: string 113 | osdImage: 114 | description: OSD container image 115 | type: string 116 | osdNumber: 117 | description: Number allocated to OSD 118 | type: integer 119 | osdPath: 120 | description: Path to OSD disk (i.e. /dev/disk/by-partuuid/<...>) 121 | type: string 122 | osdTags: 123 | description: // Tags that applied to OSD (divided by comma, i.e. "hostN,nvme,dcN") 124 | type: string 125 | required: 126 | - nodeName 127 | - osdImage 128 | - osdNumber 129 | - osdPath 130 | type: object 131 | status: 132 | description: VitastorOSDStatus defines the observed state of VitastorOSD 133 | type: object 134 | type: object 135 | served: true 136 | storage: true 137 | subresources: 138 | status: {} 139 | 140 | --- 141 | apiVersion: apiextensions.k8s.io/v1 142 | kind: CustomResourceDefinition 143 | metadata: 144 | annotations: 145 | controller-gen.kubebuilder.io/version: v0.9.2 146 | creationTimestamp: null 147 | name: vitastornodes.control.vitastor.io 148 | spec: 149 | group: control.vitastor.io 150 | names: 151 | kind: VitastorNode 152 | listKind: VitastorNodeList 153 | plural: vitastornodes 154 | singular: vitastornode 155 | scope: Cluster 156 | versions: 157 | - name: v1 158 | schema: 159 | openAPIV3Schema: 160 | description: VitastorNode is the Schema for the vitastornodes API 161 | properties: 162 | apiVersion: 163 | description: 'APIVersion defines the versioned schema of this representation 164 | of an object. Servers should convert recognized schemas to the latest 165 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 166 | type: string 167 | kind: 168 | description: 'Kind is a string value representing the REST resource this 169 | object represents. Servers may infer this from the endpoint the client 170 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 171 | type: string 172 | metadata: 173 | type: object 174 | spec: 175 | description: VitastorNodeSpec defines the desired state of VitastorNode 176 | properties: 177 | nodeName: 178 | description: Name of node that have disks for OSDs 179 | type: string 180 | osdImage: 181 | description: OSD image name/tag 182 | type: string 183 | required: 184 | - nodeName 185 | - osdImage 186 | type: object 187 | status: 188 | description: VitastorNodeStatus defines the observed state of VitastorNode 189 | properties: 190 | disks: 191 | description: List of disks on that node 192 | items: 193 | type: string 194 | type: array 195 | emptyDisks: 196 | description: List of empty disks (without any partition) on that node 197 | items: 198 | type: string 199 | type: array 200 | vitastorDisks: 201 | description: List of Vitastor OSDs on that node 202 | items: 203 | type: string 204 | type: array 205 | type: object 206 | type: object 207 | served: true 208 | storage: true 209 | subresources: 210 | status: {} 211 | 212 | --- 213 | apiVersion: apiextensions.k8s.io/v1 214 | kind: CustomResourceDefinition 215 | metadata: 216 | annotations: 217 | controller-gen.kubebuilder.io/version: v0.9.2 218 | name: vitastorpools.control.vitastor.io 219 | spec: 220 | group: control.vitastor.io 221 | names: 222 | kind: VitastorPool 223 | listKind: VitastorPoolList 224 | plural: vitastorpools 225 | singular: vitastorpool 226 | scope: Cluster 227 | versions: 228 | - name: v1 229 | schema: 230 | openAPIV3Schema: 231 | description: VitastorPool is the Schema for the vitastorpools API 232 | properties: 233 | apiVersion: 234 | description: 'APIVersion defines the versioned schema of this representation 235 | of an object. Servers should convert recognized schemas to the latest 236 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 237 | type: string 238 | kind: 239 | description: 'Kind is a string value representing the REST resource this 240 | object represents. Servers may infer this from the endpoint the client 241 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 242 | type: string 243 | metadata: 244 | type: object 245 | spec: 246 | description: VitastorPoolSpec defines the desired state of VitastorPool 247 | properties: 248 | blockSize: 249 | format: int32 250 | type: integer 251 | failureDomain: 252 | type: string 253 | id: 254 | description: Foo is an example field of VitastorPool. Edit vitastorpool_types.go 255 | to remove/update 256 | type: string 257 | immediateCommit: 258 | type: string 259 | maxOSDCombinations: 260 | format: int32 261 | type: integer 262 | osdTags: 263 | type: string 264 | parityChunks: 265 | format: int32 266 | type: integer 267 | pgCount: 268 | format: int32 269 | type: integer 270 | pgMinSize: 271 | format: int32 272 | type: integer 273 | pgSize: 274 | format: int32 275 | type: integer 276 | scheme: 277 | type: string 278 | required: 279 | - failureDomain 280 | - id 281 | - pgCount 282 | - pgMinSize 283 | - pgSize 284 | - scheme 285 | type: object 286 | status: 287 | description: VitastorPoolStatus defines the observed state of VitastorPool 288 | type: object 289 | type: object 290 | served: true 291 | storage: true 292 | subresources: 293 | status: {} 294 | -------------------------------------------------------------------------------- /deploy/004-vitastor-operator-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | labels: 6 | app.kubernetes.io/name: serviceaccount 7 | app.kuberentes.io/instance: vitastor-controller-manager 8 | app.kubernetes.io/component: rbac 9 | app.kubernetes.io/created-by: vitastor-operator 10 | app.kubernetes.io/part-of: vitastor-operator 11 | name: vitastor-controller-manager 12 | namespace: vitastor-system 13 | 14 | --- 15 | apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRole 17 | metadata: 18 | name: vitastor-manager-role 19 | rules: 20 | - apiGroups: 21 | - apps 22 | resources: 23 | - daemonsets 24 | verbs: 25 | - create 26 | - delete 27 | - get 28 | - list 29 | - patch 30 | - update 31 | - watch 32 | - apiGroups: 33 | - apps 34 | resources: 35 | - daemonsets/status 36 | verbs: 37 | - get 38 | - apiGroups: 39 | - apps 40 | resources: 41 | - deployments 42 | verbs: 43 | - create 44 | - delete 45 | - get 46 | - list 47 | - patch 48 | - update 49 | - watch 50 | - apiGroups: 51 | - apps 52 | resources: 53 | - deployments/status 54 | verbs: 55 | - get 56 | - apiGroups: 57 | - apps 58 | resources: 59 | - statefulsets 60 | verbs: 61 | - create 62 | - delete 63 | - get 64 | - list 65 | - patch 66 | - update 67 | - watch 68 | - apiGroups: 69 | - apps 70 | resources: 71 | - statefulsets/status 72 | verbs: 73 | - get 74 | - apiGroups: 75 | - control.vitastor.io 76 | resources: 77 | - vitastorclusters 78 | verbs: 79 | - create 80 | - delete 81 | - get 82 | - list 83 | - patch 84 | - update 85 | - watch 86 | - apiGroups: 87 | - control.vitastor.io 88 | resources: 89 | - vitastorclusters/finalizers 90 | verbs: 91 | - update 92 | - apiGroups: 93 | - control.vitastor.io 94 | resources: 95 | - vitastorclusters/status 96 | verbs: 97 | - get 98 | - patch 99 | - update 100 | - apiGroups: 101 | - control.vitastor.io 102 | resources: 103 | - vitastornodes 104 | verbs: 105 | - create 106 | - delete 107 | - get 108 | - list 109 | - patch 110 | - update 111 | - watch 112 | - apiGroups: 113 | - control.vitastor.io 114 | resources: 115 | - vitastornodes/finalizers 116 | verbs: 117 | - update 118 | - apiGroups: 119 | - control.vitastor.io 120 | resources: 121 | - vitastornodes/status 122 | verbs: 123 | - get 124 | - patch 125 | - update 126 | - apiGroups: 127 | - control.vitastor.io 128 | resources: 129 | - vitastorosds 130 | verbs: 131 | - create 132 | - delete 133 | - get 134 | - list 135 | - patch 136 | - update 137 | - watch 138 | - apiGroups: 139 | - control.vitastor.io 140 | resources: 141 | - vitastorosds/finalizers 142 | verbs: 143 | - update 144 | - apiGroups: 145 | - control.vitastor.io 146 | resources: 147 | - vitastorosds/status 148 | verbs: 149 | - get 150 | - patch 151 | - update 152 | - apiGroups: 153 | - control.vitastor.io 154 | resources: 155 | - vitastorpools 156 | verbs: 157 | - create 158 | - delete 159 | - get 160 | - list 161 | - patch 162 | - update 163 | - watch 164 | - apiGroups: 165 | - control.vitastor.io 166 | resources: 167 | - vitastorpools/finalizers 168 | verbs: 169 | - update 170 | - apiGroups: 171 | - control.vitastor.io 172 | resources: 173 | - vitastorpools/status 174 | verbs: 175 | - get 176 | - patch 177 | - update 178 | - apiGroups: 179 | - storage.k8s.io 180 | resources: 181 | - storageclasses 182 | verbs: 183 | - create 184 | - delete 185 | - get 186 | - list 187 | - patch 188 | - update 189 | - watch 190 | - apiGroups: 191 | - "" 192 | resources: 193 | - pods 194 | - configmaps 195 | verbs: 196 | - get 197 | - list 198 | - watch 199 | 200 | --- 201 | apiVersion: rbac.authorization.k8s.io/v1 202 | kind: ClusterRoleBinding 203 | metadata: 204 | labels: 205 | app.kubernetes.io/name: clusterrolebinding 206 | app.kubernetes.io/instance: vitastor-manager-rolebinding 207 | app.kubernetes.io/component: rbac 208 | app.kubernetes.io/created-by: vitastor-operator 209 | app.kubernetes.io/part-of: vitastor-operator 210 | name: vitastor-manager-rolebinding 211 | roleRef: 212 | apiGroup: rbac.authorization.k8s.io 213 | kind: ClusterRole 214 | name: vitastor-manager-role 215 | subjects: 216 | - kind: ServiceAccount 217 | name: vitastor-controller-manager 218 | namespace: vitastor-system 219 | 220 | --- 221 | apiVersion: apps/v1 222 | kind: Deployment 223 | metadata: 224 | name: vitastor-operator 225 | namespace: vitastor-system 226 | spec: 227 | replicas: 1 228 | selector: 229 | matchLabels: 230 | app: vitastor-operator 231 | template: 232 | metadata: 233 | labels: 234 | app: vitastor-operator 235 | spec: 236 | containers: 237 | - name: vitastor-operator 238 | image: antilles/vitastor-operator:1.0.0 239 | imagePullPolicy: IfNotPresent 240 | env: 241 | - name: "CONTAINER_PORT" 242 | value: "5666" 243 | resources: 244 | limits: 245 | cpu: 1000m 246 | memory: 1000Mi 247 | volumeMounts: 248 | - name: vitastor-config 249 | mountPath: /etc/vitastor 250 | volumes: 251 | - name: vitastor-config 252 | configMap: 253 | name: vitastor-config 254 | serviceAccountName: vitastor-controller-manager 255 | -------------------------------------------------------------------------------- /deploy/005-sample-vitastor-cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: control.vitastor.io/v1 2 | kind: VitastorCluster 3 | metadata: 4 | labels: 5 | app.kubernetes.io/name: vitastorcluster 6 | app.kubernetes.io/instance: vitastorcluster-sample 7 | app.kubernetes.io/part-of: vitastor-operator 8 | app.kuberentes.io/managed-by: kustomize 9 | app.kubernetes.io/created-by: vitastor-operator 10 | name: vitastorcluster-test 11 | spec: 12 | monitorReplicaNum: 3 13 | vitastorNodeLabel: "vitastor-node" 14 | disksReconciligPeriod: 60 15 | agentImage: "antilles/vitastor-agent:1.6.0" 16 | monitorImage: "antilles/vitastor-monitor:1.6.0" 17 | osdImage: "antilles/vitastor-osd:1.6.0" 18 | -------------------------------------------------------------------------------- /deploy/006-sample-vitastor-pool.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: control.vitastor.io/v1 3 | kind: VitastorPool 4 | metadata: 5 | name: test-pool 6 | spec: 7 | scheme: "ec" 8 | failureDomain: "osd" 9 | id: "1" 10 | pgCount: 128 11 | pgMinSize: 2 12 | pgSize: 3 13 | parityChunks: 1 -------------------------------------------------------------------------------- /deploy/007-test-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: test-vitastor-pvc-block 6 | namespace: vitastor-system 7 | spec: 8 | storageClassName: test-pool # Same as VitastorPool name 9 | 10 | volumeMode: Block 11 | accessModes: 12 | - ReadWriteMany 13 | resources: 14 | requests: 15 | storage: 100Gi -------------------------------------------------------------------------------- /deploy/008-test-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: vitastor-pvc-test 5 | namespace: vitastor-system 6 | spec: 7 | restartPolicy: Always 8 | nodeName: rbkube-node-ord3.i 9 | containers: 10 | - name: vitastor-test-perf 11 | image: antilles/vitastor-osd:0.8 12 | command: [ "/bin/bash", "-c", "--" ] 13 | args: 14 | - "while true; do sleep 30; done;" 15 | resources: 16 | limits: 17 | memory: "512Mi" 18 | cpu: "8000m" 19 | volumeMounts: 20 | - name: vitastor-config 21 | mountPath: /etc/vitastor 22 | volumeDevices: 23 | - name: vitastor-block 24 | devicePath: /dev/xvda 25 | 26 | volumes: 27 | - name: vitastor-config 28 | configMap: 29 | name: vitastor-config 30 | - name: vitastor-block 31 | persistentVolumeClaim: 32 | claimName: test-vitastor-pvc-block 33 | readOnly: false 34 | -------------------------------------------------------------------------------- /dist/Operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: vitastor-operator-deployment 5 | namespace: vitastor-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: vitastor-operator-controller 11 | template: 12 | metadata: 13 | labels: 14 | app: vitastor-operator-controller 15 | spec: 16 | containers: 17 | - name: vitastor-operator-controller 18 | image: antilles/vitastor-operator-controller:0.1 19 | imagePullPolicy: Always 20 | env: 21 | - name: "CONTAINER_PORT" 22 | value: "5666" 23 | - name: "OSD_IMAGE" 24 | value: "antilles/vitastor-osd:0.1" 25 | resources: 26 | limits: 27 | cpu: 1000m 28 | memory: 1000Mi 29 | serviceAccountName: demo-operator-svc -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module gitlab.com/Antilles7227/vitastor-operator 2 | 3 | go 1.19 4 | 5 | require ( 6 | github.com/google/go-cmp v0.5.8 7 | github.com/onsi/ginkgo/v2 v2.1.4 8 | github.com/onsi/gomega v1.19.0 9 | go.etcd.io/etcd/client/v3 v3.5.7 10 | k8s.io/api v0.25.0 11 | k8s.io/apimachinery v0.25.0 12 | k8s.io/client-go v0.25.0 13 | sigs.k8s.io/controller-runtime v0.13.0 14 | ) 15 | 16 | require ( 17 | cloud.google.com/go v0.97.0 // indirect 18 | github.com/Azure/go-autorest v14.2.0+incompatible // indirect 19 | github.com/Azure/go-autorest/autorest v0.11.27 // indirect 20 | github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect 21 | github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect 22 | github.com/Azure/go-autorest/logger v0.2.1 // indirect 23 | github.com/Azure/go-autorest/tracing v0.6.0 // indirect 24 | github.com/PuerkitoBio/purell v1.1.1 // indirect 25 | github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect 26 | github.com/beorn7/perks v1.0.1 // indirect 27 | github.com/cespare/xxhash/v2 v2.1.2 // indirect 28 | github.com/coreos/go-semver v0.3.0 // indirect 29 | github.com/coreos/go-systemd/v22 v22.3.2 // indirect 30 | github.com/davecgh/go-spew v1.1.1 // indirect 31 | github.com/emicklei/go-restful/v3 v3.8.0 // indirect 32 | github.com/evanphx/json-patch/v5 v5.6.0 // indirect 33 | github.com/fsnotify/fsnotify v1.5.4 // indirect 34 | github.com/go-logr/logr v1.2.3 // indirect 35 | github.com/go-logr/zapr v1.2.3 // indirect 36 | github.com/go-openapi/jsonpointer v0.19.5 // indirect 37 | github.com/go-openapi/jsonreference v0.19.5 // indirect 38 | github.com/go-openapi/swag v0.19.14 // indirect 39 | github.com/gogo/protobuf v1.3.2 // indirect 40 | github.com/golang-jwt/jwt/v4 v4.2.0 // indirect 41 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 42 | github.com/golang/protobuf v1.5.2 // indirect 43 | github.com/google/gnostic v0.5.7-v3refs // indirect 44 | github.com/google/gofuzz v1.1.0 // indirect 45 | github.com/google/uuid v1.1.2 // indirect 46 | github.com/imdario/mergo v0.3.12 // indirect 47 | github.com/josharian/intern v1.0.0 // indirect 48 | github.com/json-iterator/go v1.1.12 // indirect 49 | github.com/mailru/easyjson v0.7.6 // indirect 50 | github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect 51 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect 52 | github.com/modern-go/reflect2 v1.0.2 // indirect 53 | github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect 54 | github.com/pkg/errors v0.9.1 // indirect 55 | github.com/prometheus/client_golang v1.12.2 // indirect 56 | github.com/prometheus/client_model v0.2.0 // indirect 57 | github.com/prometheus/common v0.32.1 // indirect 58 | github.com/prometheus/procfs v0.7.3 // indirect 59 | github.com/spf13/pflag v1.0.5 // indirect 60 | go.etcd.io/etcd/api/v3 v3.5.7 // indirect 61 | go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect 62 | go.uber.org/atomic v1.7.0 // indirect 63 | go.uber.org/multierr v1.6.0 // indirect 64 | go.uber.org/zap v1.21.0 // indirect 65 | golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd // indirect 66 | golang.org/x/net v0.0.0-20220722155237-a158d28d115b // indirect 67 | golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect 68 | golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect 69 | golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect 70 | golang.org/x/text v0.3.7 // indirect 71 | golang.org/x/time v0.0.0-20220609170525-579cf78fd858 // indirect 72 | gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect 73 | google.golang.org/appengine v1.6.7 // indirect 74 | google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21 // indirect 75 | google.golang.org/grpc v1.47.0 // indirect 76 | google.golang.org/protobuf v1.28.0 // indirect 77 | gopkg.in/inf.v0 v0.9.1 // indirect 78 | gopkg.in/yaml.v2 v2.4.0 // indirect 79 | gopkg.in/yaml.v3 v3.0.1 // indirect 80 | k8s.io/apiextensions-apiserver v0.25.0 // indirect 81 | k8s.io/component-base v0.25.0 // indirect 82 | k8s.io/klog/v2 v2.70.1 // indirect 83 | k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect 84 | k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect 85 | sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect 86 | sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect 87 | sigs.k8s.io/yaml v1.3.0 // indirect 88 | ) 89 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2022. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | "flag" 21 | "os" 22 | 23 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 24 | // to ensure that exec-entrypoint and run can make use of them. 25 | // appsv1 "k8s.io/api/apps/v1" 26 | // corev1 "k8s.io/api/core/v1" 27 | "k8s.io/apimachinery/pkg/runtime" 28 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 29 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 30 | _ "k8s.io/client-go/plugin/pkg/client/auth" 31 | ctrl "sigs.k8s.io/controller-runtime" 32 | 33 | // "sigs.k8s.io/controller-runtime/pkg/client" 34 | "sigs.k8s.io/controller-runtime/pkg/healthz" 35 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 36 | 37 | controlv1 "gitlab.com/Antilles7227/vitastor-operator/api/v1" 38 | "gitlab.com/Antilles7227/vitastor-operator/controllers" 39 | //+kubebuilder:scaffold:imports 40 | ) 41 | 42 | var ( 43 | scheme = runtime.NewScheme() 44 | setupLog = ctrl.Log.WithName("setup") 45 | ) 46 | 47 | func init() { 48 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 49 | 50 | utilruntime.Must(controlv1.AddToScheme(scheme)) 51 | //+kubebuilder:scaffold:scheme 52 | } 53 | 54 | func main() { 55 | var metricsAddr string 56 | var enableLeaderElection bool 57 | var probeAddr string 58 | flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") 59 | flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") 60 | flag.BoolVar(&enableLeaderElection, "leader-elect", false, 61 | "Enable leader election for controller manager. "+ 62 | "Enabling this will ensure there is only one active controller manager.") 63 | opts := zap.Options{ 64 | Development: true, 65 | } 66 | opts.BindFlags(flag.CommandLine) 67 | flag.Parse() 68 | 69 | ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts))) 70 | 71 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 72 | Scheme: scheme, 73 | MetricsBindAddress: metricsAddr, 74 | Port: 9443, 75 | HealthProbeBindAddress: probeAddr, 76 | LeaderElection: enableLeaderElection, 77 | LeaderElectionID: "ff08a114.vitastor.io", 78 | // LeaderElectionReleaseOnCancel defines if the leader should step down voluntarily 79 | // when the Manager ends. This requires the binary to immediately end when the 80 | // Manager is stopped, otherwise, this setting is unsafe. Setting this significantly 81 | // speeds up voluntary leader transitions as the new leader don't have to wait 82 | // LeaseDuration time first. 83 | // 84 | // In the default scaffold provided, the program ends immediately after 85 | // the manager stops, so would be fine to enable this option. However, 86 | // if you are doing or is intended to do any operation such as perform cleanups 87 | // after the manager stops then its usage might be unsafe. 88 | // LeaderElectionReleaseOnCancel: true, 89 | // ClientDisableCacheFor: []client.Object{&appsv1.Deployment{}, &corev1.Node{}, &controlv1.VitastorOSD{}}, 90 | }) 91 | if err != nil { 92 | setupLog.Error(err, "unable to start manager") 93 | os.Exit(1) 94 | } 95 | 96 | if err = (&controllers.VitastorNodeReconciler{ 97 | Client: mgr.GetClient(), 98 | Scheme: mgr.GetScheme(), 99 | }).SetupWithManager(mgr); err != nil { 100 | setupLog.Error(err, "unable to create controller", "controller", "VitastorNode") 101 | os.Exit(1) 102 | } 103 | if err = (&controllers.VitastorOSDReconciler{ 104 | Client: mgr.GetClient(), 105 | Scheme: mgr.GetScheme(), 106 | }).SetupWithManager(mgr); err != nil { 107 | setupLog.Error(err, "unable to create controller", "controller", "VitastorOSD") 108 | os.Exit(1) 109 | } 110 | if err = (&controllers.VitastorClusterReconciler{ 111 | Client: mgr.GetClient(), 112 | Scheme: mgr.GetScheme(), 113 | }).SetupWithManager(mgr); err != nil { 114 | setupLog.Error(err, "unable to create controller", "controller", "VitastorCluster") 115 | os.Exit(1) 116 | } 117 | if err = (&controllers.VitastorPoolReconciler{ 118 | Client: mgr.GetClient(), 119 | Scheme: mgr.GetScheme(), 120 | }).SetupWithManager(mgr); err != nil { 121 | setupLog.Error(err, "unable to create controller", "controller", "VitastorPool") 122 | os.Exit(1) 123 | } 124 | //+kubebuilder:scaffold:builder 125 | 126 | if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { 127 | setupLog.Error(err, "unable to set up health check") 128 | os.Exit(1) 129 | } 130 | if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { 131 | setupLog.Error(err, "unable to set up ready check") 132 | os.Exit(1) 133 | } 134 | 135 | setupLog.Info("starting manager") 136 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 137 | setupLog.Error(err, "problem running manager") 138 | os.Exit(1) 139 | } 140 | } 141 | --------------------------------------------------------------------------------