├── .dockerignore ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── bases │ │ ├── cache │ │ │ ├── datafuse.datafuselabs.io_datafusecomputeinstances.yaml │ │ │ └── datafuse.datafuselabs.io_datafusecomputesets.yaml │ │ ├── datafuse.datafuselabs.io_datafusecomputegroups.yaml │ │ └── datafuse.datafuselabs.io_datafuseoperators.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_datafuseclusters.yaml │ │ ├── cainjection_in_datafusecomputegroups.yaml │ │ ├── cainjection_in_datafusecomputeinstances.yaml │ │ ├── cainjection_in_datafusecomputesets.yaml │ │ ├── cainjection_in_datafuseoperators.yaml │ │ ├── webhook_in_datafuseclusters.yaml │ │ ├── webhook_in_datafusecomputegroups.yaml │ │ ├── webhook_in_datafusecomputeinstances.yaml │ │ ├── webhook_in_datafusecomputesets.yaml │ │ └── webhook_in_datafuseoperators.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ └── manager_config_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_client_clusterrole.yaml │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── datafusecluster_editor_role.yaml │ ├── datafusecluster_viewer_role.yaml │ ├── datafusecomputegroup_editor_role.yaml │ ├── datafusecomputegroup_viewer_role.yaml │ ├── datafusecomputeinstance_editor_role.yaml │ ├── datafusecomputeinstance_viewer_role.yaml │ ├── datafusecomputeset_editor_role.yaml │ ├── datafusecomputeset_viewer_role.yaml │ ├── datafuseoperator_editor_role.yaml │ ├── datafuseoperator_viewer_role.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ └── role_binding.yaml └── samples │ ├── datafuse_v1alpha1_datafusecomputegroup.yaml │ ├── datafuse_v1alpha1_datafusecomputeinstance.yaml │ ├── datafuse_v1alpha1_datafusecomputeset.yaml │ ├── datafuse_v1alpha1_datafuseoperator.yaml │ └── default │ ├── datafuse_v1alpha1_datafusecluster.yaml │ ├── datafuse_v1alpha1_datafusecomputegroup.yaml │ ├── datafuse_v1alpha1_datafusecomputeinstance.yaml │ └── datafuse_v1alpha1_datafusecomputeset.yaml ├── docs ├── development-guide.md ├── quick-start.md └── user-guide.md ├── examples ├── demo.yaml └── operator.yaml ├── go.mod ├── go.sum ├── hack ├── boilerplate.go.txt ├── tools.go └── update-codegen.sh ├── main.go ├── manifests ├── crds │ ├── datafuse.datafuselabs.io_datafusecomputegroups.yaml │ └── datafuse.datafuselabs.io_datafuseoperators.yaml ├── datafuse-operator.yaml └── datafuse-rbac.yaml ├── pkg ├── apis │ └── datafuse │ │ └── v1alpha1 │ │ ├── datafusecomputegroup_types.go │ │ ├── datafusecomputeinstance_types.go │ │ ├── datafusecomputeset_types.go │ │ ├── datafuseoperator_types.go │ │ ├── defaults.go │ │ ├── defaults_test.go │ │ ├── groupversion_info.go │ │ ├── register.go │ │ └── zz_generated.deepcopy.go ├── client │ ├── clientset │ │ └── versioned │ │ │ ├── clientset.go │ │ │ ├── doc.go │ │ │ ├── fake │ │ │ ├── clientset_generated.go │ │ │ ├── doc.go │ │ │ └── register.go │ │ │ ├── scheme │ │ │ ├── doc.go │ │ │ └── register.go │ │ │ └── typed │ │ │ └── datafuse │ │ │ └── v1alpha1 │ │ │ ├── datafuse_client.go │ │ │ ├── datafusecomputegroup.go │ │ │ ├── datafuseoperator.go │ │ │ ├── doc.go │ │ │ ├── fake │ │ │ ├── doc.go │ │ │ ├── fake_datafuse_client.go │ │ │ ├── fake_datafusecomputegroup.go │ │ │ └── fake_datafuseoperator.go │ │ │ └── generated_expansion.go │ ├── informers │ │ └── externalversions │ │ │ ├── datafuse │ │ │ ├── interface.go │ │ │ └── v1alpha1 │ │ │ │ ├── datafusecomputegroup.go │ │ │ │ ├── datafuseoperator.go │ │ │ │ └── interface.go │ │ │ ├── factory.go │ │ │ ├── generic.go │ │ │ └── internalinterfaces │ │ │ └── factory_interfaces.go │ └── listers │ │ └── datafuse │ │ └── v1alpha1 │ │ ├── datafusecomputegroup.go │ │ ├── datafuseoperator.go │ │ └── expansion_generated.go ├── config │ └── constants.go ├── controllers │ ├── main_controller.go │ ├── operator │ │ ├── operator_controller.go │ │ └── operator_controller_test.go │ ├── register │ │ ├── register.go │ │ ├── register_controller.go │ │ ├── register_controller_test.go │ │ ├── register_test.go │ │ └── register_test_utils.go │ └── utils │ │ ├── operator_utils.go │ │ └── operator_utils_test.go └── scheduler │ ├── interface │ └── interface.go │ └── scheduler_factory.go ├── result.txt ├── tests ├── e2e │ ├── README.md │ ├── clients │ │ ├── clickhouse-client.yaml │ │ └── mysql-client.yaml │ ├── main_test.go │ ├── sqlfiles │ │ ├── clickhouse │ │ │ └── test1.result │ │ ├── mysql │ │ │ └── test1.result │ │ └── test1.sql │ ├── testfiles │ │ ├── default_follower.yaml │ │ ├── default_generated_deploy.yaml │ │ ├── default_operator.yaml │ │ ├── default_service.yaml │ │ └── leader_with_2workers.yaml │ └── validate │ │ └── valid_cluster.sql ├── framework │ ├── cluster_role.go │ ├── cluster_role_binding.go │ ├── crd.go │ ├── deployment.go │ ├── framework.go │ ├── helpers.go │ ├── namespace.go │ ├── operator.go │ ├── pod.go │ ├── role.go │ ├── role_binding.go │ └── service_account.go └── utils │ ├── convert │ └── convert.go │ ├── retry │ ├── retry.go │ └── retry_test.go │ ├── sql │ └── sql_utils.go │ ├── ssa │ ├── ssa.go │ └── ssa_test.go │ └── utils.go └── utils ├── kube.go ├── kube_test.go └── signals.go /.dockerignore: -------------------------------------------------------------------------------- 1 | # More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file 2 | # Ignore all files which are not go type 3 | !**/*.go 4 | !**/*.mod 5 | !**/*.sum 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | testbin/* 10 | tmp 11 | 12 | # Test binary, build with `go test -c` 13 | *.test 14 | 15 | # Output of the go coverage tool, specifically when used with LiteIDE 16 | *.out 17 | 18 | # Kubernetes Generated files - skip generated files, except for vendored files 19 | 20 | !vendor/**/zz_generated.* 21 | 22 | # editor and IDE paraphernalia 23 | .idea 24 | *.swp 25 | *.swo 26 | *~ 27 | vendor 28 | ./hack/tools/go 29 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.16 as builder 3 | 4 | WORKDIR /workspace 5 | # Copy the Go Modules manifests 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | # cache deps before building and copying source so that we don't need to re-download as much 9 | # and so that source changes don't invalidate our downloaded layer 10 | RUN go mod download 11 | 12 | # Copy the go source 13 | COPY main.go main.go 14 | COPY pkg/ pkg/ 15 | COPY utils/ utils/ 16 | 17 | # Build 18 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go 19 | 20 | # Use distroless as minimal base image to package the manager binary 21 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 22 | FROM gcr.io/distroless/static:nonroot 23 | WORKDIR / 24 | COPY --from=builder /workspace/manager . 25 | USER 65532:65532 26 | 27 | ENTRYPOINT ["/manager"] 28 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | HUB ?= datafusedev 2 | TAG ?= latest 3 | # Image URL to use all building/pushing image targets 4 | IMG ?= ${HUB}/datafuse-operator:${TAG} 5 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) 6 | CRD_OPTIONS ?= "crd:trivialVersions=true,preserveUnknownFields=false" 7 | 8 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 9 | ifeq (,$(shell go env GOBIN)) 10 | GOBIN=$(shell go env GOPATH)/bin 11 | else 12 | GOBIN=$(shell go env GOBIN) 13 | endif 14 | 15 | all: build 16 | 17 | ##@ General 18 | 19 | # The help target prints out all targets with their descriptions organized 20 | # beneath their categories. The categories are represented by '##@' and the 21 | # target descriptions by '##'. The awk commands is responsible for reading the 22 | # entire set of makefiles included in this invocation, looking for lines of the 23 | # file as xyz: ## something, and then pretty-format the target and help. Then, 24 | # if there's a line with ##@ something, that gets pretty-printed as a category. 25 | # More info on the usage of ANSI control characters for terminal formatting: 26 | # https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters 27 | # More info on the awk command: 28 | # http://linuxcommand.org/lc3_adv_awk.php 29 | 30 | help: ## Display this help. 31 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 32 | 33 | ##@ Development 34 | 35 | manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. 36 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./pkg/apis/..." output:crd:artifacts:config=config/crd/bases 37 | 38 | generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. 39 | $(CONTROLLER_GEN) object:headerFile="hack/boilerplate.go.txt" paths="./pkg/apis/..." 40 | 41 | fmt: ## Run go fmt against code. 42 | go fmt ./... 43 | 44 | vet: ## Run go vet against code. 45 | go vet ./... 46 | 47 | ##@ Build 48 | 49 | build: generate fmt ## Build manager binary. 50 | go build -o bin/manager main.go 51 | 52 | run: manifests generate fmt vet ## Run a controller from your host. 53 | go run ./main.go 54 | 55 | docker-build: unit-test ## Build docker image with the manager. 56 | docker build -t ${IMG} . 57 | 58 | docker-push: ## Push docker image with the manager. 59 | docker push ${IMG} 60 | 61 | ##@ Deployment 62 | 63 | generate-template: manifests kustomize ## Install CRDs into the K8s cluster specified in ~/.kube/config. 64 | $(KUSTOMIZE) build config/crd > config/generated/crd_template.yaml 65 | 66 | install: 67 | kubectl apply -f config/generated/crd_template.yaml 68 | 69 | uninstall: 70 | kubectl delete -f config/generated/crd_template.yaml 71 | 72 | CONTROLLER_GEN = $(shell pwd)/bin/controller-gen 73 | controller-gen: ## Download controller-gen locally if necessary. 74 | $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.4.1) 75 | 76 | KUSTOMIZE = $(shell pwd)/bin/kustomize 77 | kustomize: ## Download kustomize locally if necessary. 78 | $(call go-get-tool,$(KUSTOMIZE),sigs.k8s.io/kustomize/kustomize/v3@v3.8.7) 79 | 80 | 81 | unit-test: 82 | go test ./pkg/... 83 | integration: 84 | go test ./tests/e2e/... 85 | deployfile: 86 | kubectl apply -f config/crd/bases 87 | mkdir -p tmp 88 | kustomize build config/default > ./tmp/deploy.yaml 89 | ./bin/kflit -f ./tmp/deploy.yaml -i group=rbac.authorization.k8s.io > manifests/datafuse-rbac.yaml 90 | ./bin/kflit -f ./tmp/deploy.yaml -i kind=Deployment > manifests/datafuse-operator.yaml 91 | ./bin/kflit -f ./tmp/deploy.yaml -i kind=CustomResourceDefinition,name=datafusecomputegroups.datafuse.datafuselabs.io > manifests/crds/datafuse.datafuselabs.io_datafusecomputegroups.yaml 92 | ./bin/kflit -f ./tmp/deploy.yaml -i kind=CustomResourceDefinition,name=datafuseoperators.datafuse.datafuselabs.io > manifests/crds/datafuse.datafuselabs.io_datafuseoperators.yaml 93 | deploy: manifests 94 | kubectl apply -f config/crd/bases 95 | kustomize build config/default | kubectl apply -f - 96 | # go-get-tool will 'go get' any package $2 and install it to $1. 97 | PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) 98 | define go-get-tool 99 | @[ -f $(1) ] || { \ 100 | set -e ;\ 101 | TMP_DIR=$$(mktemp -d) ;\ 102 | cd $$TMP_DIR ;\ 103 | go mod init tmp ;\ 104 | echo "Downloading $(2)" ;\ 105 | GOBIN=$(PROJECT_DIR)/bin go get $(2) ;\ 106 | rm -rf $$TMP_DIR ;\ 107 | } 108 | endef 109 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | domain: datafuselabs.io 2 | layout: go.kubebuilder.io/v3 3 | multigroup: true 4 | projectName: new-operator 5 | repo: datafuselabs.io/datafuse-operator 6 | resources: 7 | - api: 8 | crdVersion: v1 9 | namespaced: true 10 | domain: datafuselabs.io 11 | group: datafuse 12 | kind: DatafuseComputeGroup 13 | path: datafuselabs.io/datafuse-operator/apis/datafuse/v1alpha1 14 | version: v1alpha1 15 | - api: 16 | crdVersion: v1 17 | namespaced: true 18 | domain: datafuselabs.io 19 | group: datafuse 20 | kind: DatafuseOperator 21 | path: datafuselabs.io/datafuse-operator/apis/datafuse/v1alpha1 22 | version: v1alpha1 23 | version: "3" 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Datafuse operator 2 | **NOTICE**: this project is not under active maintainence stage, if you are interested on how to deploy databend on kubernetes, please take a look at our helm chart(https://github.com/datafuselabs/helm-charts) 3 | 4 | DataFuse operator manages fuse-query and fuse-store clusters atop [Kubernetes](https://kubernetes.io/) using [CRDs](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/). If looking to run FuseQuery on bare metal machine or docker, please refer to [How to Run](https://github.com/datafuselabs/datafuse/blob/master/docs/overview/building-and-running.md). 5 | 6 | ## Requirements 7 | * Kubernetes v1.15+ (at least with CRD official support) 8 | 9 | ## Principles 10 | * **Zero Configuration** 11 | - Support automated cluster provisioning. 12 | * **Whole process monitoring** 13 | - Support to use various integration tools to monitor query/storage healthiness and performance 14 | * **High Availability** 15 | - Aiming on zero downtime, no single point failure 16 | * **Expandability** 17 | - Support to run on multi-cloud, hybrid environment 18 | 19 | ## Introduction 20 | 21 | 22 | ## Roadmap 23 | 24 | - [ ] 0.1 Support Basic Install (Automated provisioning and configuration management) 25 | - [ ] 0.2 Support Seemless Upgrades (patch and minor version upgrade supported) 26 | - [ ] 0.3 Support Full lifecycle orchestration(High availability) 27 | - [ ] 0.5 Support Deep Insights (Monitoring metrics, workload analysis) 28 | - [ ] 0.6 Support Auto Pilot (self defined Autoscaling based on metrics) 29 | 30 | ## Status 31 | 32 | #### General 33 | 34 | - [ ] CRD definition 35 | - [ ] Workload Lifecycle Controllers 36 | - [ ] Continuous Integration tests 37 | - [ ] High Availability operator cluster and Datafuse cluster 38 | - [ ] Monitoring metrics support to use prometheus and grafana for query time, cpu, memory monitoring 39 | - [ ] Autoscaling based on monitoring metrics 40 | 41 | 42 | ## Contributing 43 | 44 | You can learn more about contributing to the Datafuse project by reading our [Contribution Guide](https://github.com/datafuselabs/datafuse/blob/master/docs/development/contributing.md) and by viewing our [Code of Conduct](https://github.com/datafuselabs/datafuse/blob/master/docs/policies/code-of-conduct.md). 45 | 46 | ## License 47 | 48 | Datafuse operator is licensed under [Apache 2.0](LICENSE). 49 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager v1.0. Check https://cert-manager.io/docs/installation/upgrading/ for breaking changes. 4 | apiVersion: cert-manager.io/v1 5 | kind: Issuer 6 | metadata: 7 | name: selfsigned-issuer 8 | namespace: system 9 | spec: 10 | selfSigned: {} 11 | --- 12 | apiVersion: cert-manager.io/v1 13 | kind: Certificate 14 | metadata: 15 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 16 | namespace: system 17 | spec: 18 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 19 | dnsNames: 20 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 22 | issuerRef: 23 | kind: Issuer 24 | name: selfsigned-issuer 25 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 26 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd/bases/cache/datafuse.datafuselabs.io_datafusecomputeinstances.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.4.1 8 | creationTimestamp: null 9 | name: datafusecomputeinstances.datafuse.datafuselabs.io 10 | spec: 11 | group: datafuse.datafuselabs.io 12 | names: 13 | kind: DatafuseComputeInstance 14 | listKind: DatafuseComputeInstanceList 15 | plural: datafusecomputeinstances 16 | singular: datafusecomputeinstance 17 | scope: Namespaced 18 | versions: 19 | - name: v1alpha1 20 | schema: 21 | openAPIV3Schema: 22 | description: DatafuseComputeInstance is the Schema for the datafusecomputeinstances 23 | API 24 | properties: 25 | apiVersion: 26 | description: 'APIVersion defines the versioned schema of this representation 27 | of an object. Servers should convert recognized schemas to the latest 28 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 29 | type: string 30 | kind: 31 | description: 'Kind is a string value representing the REST resource this 32 | object represents. Servers may infer this from the endpoint the client 33 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 34 | type: string 35 | metadata: 36 | type: object 37 | spec: 38 | description: DatafuseComputeInstanceSpec defines the desired state of 39 | DatafuseComputeInstance 40 | properties: 41 | clickhousePort: 42 | default: 9000 43 | description: Port open to Clickhouse Client connection 44 | format: int32 45 | type: integer 46 | coreLimit: 47 | default: 1200m 48 | description: CoreLimit specifies a hard limit on CPU cores for the 49 | instance. 50 | type: string 51 | cores: 52 | default: 1 53 | description: Num of cpus for the instance, 54 | format: int32 55 | minimum: 1 56 | type: integer 57 | httpPort: 58 | default: 8080 59 | description: 'Port for warp HTTP connection, can get cluster infomation 60 | and support to add/remove port We also use HTTP port for health 61 | check and readiness check TODO(zhihanz) docs on readiness check 62 | difference between leaders and workers example: https://github.com/datafuselabs/datafuse/blob/master/fusequery/example/cluster.sh' 63 | format: int32 64 | type: integer 65 | image: 66 | default: datafuselabs/fuse-query:latest 67 | description: Image is the container image to use. Overrides Spec.Image 68 | if set. 69 | type: string 70 | imagePullPolicy: 71 | default: Always 72 | description: ImagePullPolicy is the image pull policy for the driver, 73 | executor, and init-container. 74 | type: string 75 | labels: 76 | additionalProperties: 77 | type: string 78 | description: Labels are the Kubernetes labels to be added to the pod. 79 | type: object 80 | memory: 81 | default: 512m 82 | description: Memory is the amount of memory to request for the instance. 83 | type: string 84 | memorylimit: 85 | default: 512m 86 | description: MemoryLimit is the amount of memory limit for the instance. 87 | in MiB 88 | type: string 89 | metricsPort: 90 | default: 7070 91 | description: Port for metrics exporter 92 | format: int32 93 | type: integer 94 | mysqlPort: 95 | default: 3307 96 | description: Port open to Mysql Client connection 97 | format: int32 98 | type: integer 99 | name: 100 | description: Name is the specific name of current instance 101 | type: string 102 | priority: 103 | default: 1 104 | description: Priority range from 1 - 10 inclusive, higher priority 105 | means more workload will be distributed to the instance 106 | format: int32 107 | maximum: 10 108 | minimum: 1 109 | type: integer 110 | rpcPort: 111 | default: 9090 112 | description: Port for gRPC communication 113 | format: int32 114 | type: integer 115 | required: 116 | - cores 117 | type: object 118 | status: 119 | description: DatafuseComputeInstanceStatus defines the observed state 120 | of DatafuseComputeInstance 121 | properties: 122 | status: 123 | description: 'INSERT ADDITIONAL STATUS FIELD - define observed state 124 | of cluster Important: Run "make" to regenerate code after modifying 125 | this file' 126 | type: string 127 | type: object 128 | type: object 129 | served: true 130 | storage: true 131 | subresources: 132 | status: {} 133 | status: 134 | acceptedNames: 135 | kind: "" 136 | plural: "" 137 | conditions: [] 138 | storedVersions: [] 139 | -------------------------------------------------------------------------------- /config/crd/bases/cache/datafuse.datafuselabs.io_datafusecomputesets.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.4.1 8 | creationTimestamp: null 9 | name: datafusecomputesets.datafuse.datafuselabs.io 10 | spec: 11 | group: datafuse.datafuselabs.io 12 | names: 13 | kind: DatafuseComputeSet 14 | listKind: DatafuseComputeSetList 15 | plural: datafusecomputesets 16 | singular: datafusecomputeset 17 | scope: Namespaced 18 | versions: 19 | - name: v1alpha1 20 | schema: 21 | openAPIV3Schema: 22 | description: DatafuseComputeSet is the Schema for the datafusecomputesets 23 | API 24 | properties: 25 | apiVersion: 26 | description: 'APIVersion defines the versioned schema of this representation 27 | of an object. Servers should convert recognized schemas to the latest 28 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 29 | type: string 30 | kind: 31 | description: 'Kind is a string value representing the REST resource this 32 | object represents. Servers may infer this from the endpoint the client 33 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 34 | type: string 35 | metadata: 36 | type: object 37 | spec: 38 | description: DatafuseComputeSetSpec defines the desired state of DatafuseComputeSet 39 | properties: 40 | clickhousePort: 41 | default: 9000 42 | description: Port open to Clickhouse Client connection 43 | format: int32 44 | type: integer 45 | coreLimit: 46 | default: 1200m 47 | description: CoreLimit specifies a hard limit on CPU cores for the 48 | instance. 49 | type: string 50 | cores: 51 | default: 1 52 | description: Num of cpus for the instance, 53 | format: int32 54 | minimum: 1 55 | type: integer 56 | httpPort: 57 | default: 8080 58 | description: 'Port for warp HTTP connection, can get cluster infomation 59 | and support to add/remove port We also use HTTP port for health 60 | check and readiness check TODO(zhihanz) docs on readiness check 61 | difference between leaders and workers example: https://github.com/datafuselabs/datafuse/blob/master/fusequery/example/cluster.sh' 62 | format: int32 63 | type: integer 64 | image: 65 | default: datafuselabs/fuse-query:latest 66 | description: Image is the container image to use. Overrides Spec.Image 67 | if set. 68 | type: string 69 | imagePullPolicy: 70 | default: Always 71 | description: ImagePullPolicy is the image pull policy for the driver, 72 | executor, and init-container. 73 | type: string 74 | labels: 75 | additionalProperties: 76 | type: string 77 | description: Labels are the Kubernetes labels to be added to the pod. 78 | type: object 79 | memory: 80 | default: 512m 81 | description: Memory is the amount of memory to request for the instance. 82 | type: string 83 | memorylimit: 84 | default: 512m 85 | description: MemoryLimit is the amount of memory limit for the instance. 86 | in MiB 87 | type: string 88 | metricsPort: 89 | default: 7070 90 | description: Port for metrics exporter 91 | format: int32 92 | type: integer 93 | mysqlPort: 94 | default: 3307 95 | description: Port open to Mysql Client connection 96 | format: int32 97 | type: integer 98 | name: 99 | description: Name is the specific name of current instance 100 | type: string 101 | priority: 102 | default: 1 103 | description: Priority range from 1 - 10 inclusive, higher priority 104 | means more workload will be distributed to the instance 105 | format: int32 106 | maximum: 10 107 | minimum: 1 108 | type: integer 109 | replicas: 110 | description: Number of compute instances 111 | format: int32 112 | minimum: 0 113 | type: integer 114 | rpcPort: 115 | default: 9090 116 | description: Port for gRPC communication 117 | format: int32 118 | type: integer 119 | required: 120 | - cores 121 | type: object 122 | status: 123 | description: DatafuseComputeSetStatus defines the observed state of DatafuseComputeSet 124 | properties: 125 | instancestatus: 126 | additionalProperties: 127 | type: string 128 | type: object 129 | replicas: 130 | description: 'INSERT ADDITIONAL STATUS FIELD - define observed state 131 | of cluster Important: Run "make" to regenerate code after modifying 132 | this file' 133 | format: int32 134 | type: integer 135 | selector: 136 | type: string 137 | type: object 138 | type: object 139 | served: true 140 | storage: true 141 | subresources: 142 | scale: 143 | labelSelectorPath: .status.selector 144 | specReplicasPath: .spec.replicas 145 | statusReplicasPath: .status.replicas 146 | status: {} 147 | status: 148 | acceptedNames: 149 | kind: "" 150 | plural: "" 151 | conditions: [] 152 | storedVersions: [] 153 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | # - bases/datafuse.datafuselabs.io_datafusecomputeinstances.yaml 6 | # - bases/datafuse.datafuselabs.io_datafusecomputesets.yaml 7 | - bases/datafuse.datafuselabs.io_datafusecomputegroups.yaml 8 | - bases/datafuse.datafuselabs.io_datafuseoperators.yaml 9 | #+kubebuilder:scaffold:crdkustomizeresource 10 | 11 | patchesStrategicMerge: 12 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 13 | # patches here are for enabling the conversion webhook for each CRD 14 | #- patches/webhook_in_datafusecomputeinstances.yaml 15 | #- patches/webhook_in_datafusecomputesets.yaml 16 | #- patches/webhook_in_datafusecomputegroups.yaml 17 | #- patches/webhook_in_datafuseoperators.yaml 18 | #+kubebuilder:scaffold:crdkustomizewebhookpatch 19 | 20 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 21 | # patches here are for enabling the CA injection for each CRD 22 | #- patches/cainjection_in_datafusecomputeinstances.yaml 23 | #- patches/cainjection_in_datafusecomputesets.yaml 24 | #- patches/cainjection_in_datafusecomputegroups.yaml 25 | #- patches/cainjection_in_datafuseoperators.yaml 26 | #+kubebuilder:scaffold:crdkustomizecainjectionpatch 27 | 28 | # the following config is for teaching kustomize how to do kustomization for CRDs. 29 | configurations: 30 | - kustomizeconfig.yaml 31 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | version: v1 8 | group: apiextensions.k8s.io 9 | path: spec/conversion/webhook/clientConfig/service/name 10 | 11 | namespace: 12 | - kind: CustomResourceDefinition 13 | version: v1 14 | group: apiextensions.k8s.io 15 | path: spec/conversion/webhook/clientConfig/service/namespace 16 | create: false 17 | 18 | varReference: 19 | - path: metadata/annotations 20 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_datafuseclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: datafuseclusters.datafuse.datafuselabs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_datafusecomputegroups.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: datafusecomputegroups.datafuse.datafuselabs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_datafusecomputeinstances.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: datafusecomputeinstances.datafuse.datafuselabs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_datafusecomputesets.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: datafusecomputesets.datafuse.datafuselabs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_datafuseoperators.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | annotations: 6 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 7 | name: datafuseoperators.datafuse.datafuselabs.io 8 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_datafuseclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: datafuseclusters.datafuse.datafuselabs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_datafusecomputegroups.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: datafusecomputegroups.datafuse.datafuselabs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_datafusecomputeinstances.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: datafusecomputeinstances.datafuse.datafuselabs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_datafusecomputesets.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: datafusecomputesets.datafuse.datafuselabs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_datafuseoperators.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables a conversion webhook for the CRD 2 | apiVersion: apiextensions.k8s.io/v1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: datafuseoperators.datafuse.datafuselabs.io 6 | spec: 7 | conversion: 8 | strategy: Webhook 9 | webhook: 10 | clientConfig: 11 | service: 12 | namespace: system 13 | name: webhook-service 14 | path: /convert 15 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: datafuse-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: datafuse- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 20 | # crd/kustomization.yaml 21 | #- ../webhook 22 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 23 | #- ../certmanager 24 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 25 | #- ../prometheus 26 | 27 | patchesStrategicMerge: 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # If you want your controller-manager to expose the /metrics 30 | # endpoint w/o any authn/z, please comment the following line. 31 | # - manager_auth_proxy_patch.yaml 32 | 33 | # Mount the controller config file for loading manager configurations 34 | # through a ComponentConfig type 35 | #- manager_config_patch.yaml 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in 38 | # crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # the following config is for teaching kustomize how to do var substitution 47 | vars: 48 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 49 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 50 | # objref: 51 | # kind: Certificate 52 | # group: cert-manager.io 53 | # version: v1 54 | # name: serving-cert # this name should match the one in certificate.yaml 55 | # fieldref: 56 | # fieldpath: metadata.namespace 57 | #- name: CERTIFICATE_NAME 58 | # objref: 59 | # kind: Certificate 60 | # group: cert-manager.io 61 | # version: v1 62 | # name: serving-cert # this name should match the one in certificate.yaml 63 | #- name: SERVICE_NAMESPACE # namespace of the service 64 | # objref: 65 | # kind: Service 66 | # version: v1 67 | # name: webhook-service 68 | # fieldref: 69 | # fieldpath: metadata.namespace 70 | #- name: SERVICE_NAME 71 | # objref: 72 | # kind: Service 73 | # version: v1 74 | # name: webhook-service 75 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the 2 | # controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | name: https 22 | # - name: manager 23 | # args: 24 | # - "--health-probe-bind-address=:8081" 25 | # - "--metrics-bind-address=127.0.0.1:8080" 26 | # - "--leader-elect" 27 | -------------------------------------------------------------------------------- /config/default/manager_config_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | args: 12 | - "--config=controller_manager_config.yaml" 13 | volumeMounts: 14 | - name: manager-config 15 | mountPath: /controller_manager_config.yaml 16 | subPath: controller_manager_config.yaml 17 | volumes: 18 | - name: manager-config 19 | configMap: 20 | name: manager-config 21 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | 4 | generatorOptions: 5 | disableNameSuffixHash: true 6 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | spec: 25 | securityContext: 26 | runAsNonRoot: true 27 | containers: 28 | - command: 29 | - /manager 30 | # args: 31 | # - --leader-elect 32 | image: zhihanz/controller:latest 33 | name: manager 34 | securityContext: 35 | allowPrivilegeEscalation: false 36 | # livenessProbe: 37 | # httpGet: 38 | # path: /healthz 39 | # port: 8081 40 | # initialDelaySeconds: 15 41 | # periodSeconds: 20 42 | # readinessProbe: 43 | # httpGet: 44 | # path: /readyz 45 | # port: 8081 46 | # initialDelaySeconds: 5 47 | # periodSeconds: 10 48 | resources: 49 | limits: 50 | cpu: 100m 51 | memory: 30Mi 52 | requests: 53 | cpu: 100m 54 | memory: 20Mi 55 | terminationGracePeriodSeconds: 10 56 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | selector: 15 | matchLabels: 16 | control-plane: controller-manager 17 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_client_clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: metrics-reader 5 | rules: 6 | - nonResourceURLs: ["/metrics"] 7 | verbs: ["get"] 8 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /config/rbac/datafusecluster_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit datafuseclusters. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecluster-editor-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafuseclusters 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - datafuse.datafuselabs.io 21 | resources: 22 | - datafuseclusters/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/datafusecluster_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view datafuseclusters. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecluster-viewer-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafuseclusters 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - datafuse.datafuselabs.io 17 | resources: 18 | - datafuseclusters/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/datafusecomputegroup_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit datafusecomputegroups. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecomputegroup-editor-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafusecomputegroups 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - datafuse.datafuselabs.io 21 | resources: 22 | - datafusecomputegroups/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/datafusecomputegroup_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view datafusecomputegroups. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecomputegroup-viewer-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafusecomputegroups 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - datafuse.datafuselabs.io 17 | resources: 18 | - datafusecomputegroups/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/datafusecomputeinstance_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit datafusecomputeinstances. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecomputeinstance-editor-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafusecomputeinstances 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - datafuse.datafuselabs.io 21 | resources: 22 | - datafusecomputeinstances/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/datafusecomputeinstance_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view datafusecomputeinstances. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecomputeinstance-viewer-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafusecomputeinstances 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - datafuse.datafuselabs.io 17 | resources: 18 | - datafusecomputeinstances/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/datafusecomputeset_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit datafusecomputesets. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecomputeset-editor-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafusecomputesets 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - datafuse.datafuselabs.io 21 | resources: 22 | - datafusecomputesets/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/datafusecomputeset_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view datafusecomputesets. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafusecomputeset-viewer-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafusecomputesets 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - datafuse.datafuselabs.io 17 | resources: 18 | - datafusecomputesets/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/datafuseoperator_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to edit datafuseoperators. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafuseoperator-editor-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafuseoperators 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - datafuse.datafuselabs.io 21 | resources: 22 | - datafuseoperators/status 23 | verbs: 24 | - get 25 | -------------------------------------------------------------------------------- /config/rbac/datafuseoperator_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions for end users to view datafuseoperators. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: datafuseoperator-viewer-role 6 | rules: 7 | - apiGroups: 8 | - datafuse.datafuselabs.io 9 | resources: 10 | - datafuseoperators 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - datafuse.datafuselabs.io 17 | resources: 18 | - datafuseoperators/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | # Comment the following 4 lines if you want to disable 7 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 8 | # which protects your /metrics endpoint. 9 | - auth_proxy_service.yaml 10 | - auth_proxy_role.yaml 11 | - auth_proxy_role_binding.yaml 12 | - auth_proxy_client_clusterrole.yaml 13 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | - coordination.k8s.io 10 | resources: 11 | - configmaps 12 | - leases 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - create 18 | - update 19 | - patch 20 | - delete 21 | - apiGroups: 22 | - "" 23 | resources: 24 | - events 25 | verbs: 26 | - create 27 | - patch 28 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: manager-role 8 | rules: 9 | - apiGroups: 10 | - "" 11 | resources: 12 | - events 13 | verbs: 14 | - create 15 | - get 16 | - list 17 | - patch 18 | - update 19 | - watch 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - pods 24 | verbs: 25 | - create 26 | - delete 27 | - get 28 | - list 29 | - patch 30 | - update 31 | - watch 32 | - apiGroups: 33 | - "" 34 | resources: 35 | - pods/status 36 | verbs: 37 | - get 38 | - apiGroups: 39 | - "" 40 | resources: 41 | - services 42 | verbs: 43 | - create 44 | - delete 45 | - get 46 | - list 47 | - patch 48 | - update 49 | - watch 50 | - apiGroups: 51 | - "" 52 | resources: 53 | - services/status 54 | verbs: 55 | - get 56 | - apiGroups: 57 | - apps 58 | resources: 59 | - deployments 60 | verbs: 61 | - create 62 | - delete 63 | - get 64 | - list 65 | - patch 66 | - update 67 | - watch 68 | - apiGroups: 69 | - apps 70 | resources: 71 | - deployments/status 72 | verbs: 73 | - get 74 | - apiGroups: 75 | - datafuse.datafuselabs.io 76 | resources: 77 | - datafusecomputegroups 78 | verbs: 79 | - create 80 | - delete 81 | - get 82 | - list 83 | - patch 84 | - update 85 | - watch 86 | - apiGroups: 87 | - datafuse.datafuselabs.io 88 | resources: 89 | - datafusecomputegroups/status 90 | verbs: 91 | - get 92 | - patch 93 | - update 94 | - apiGroups: 95 | - datafuse.datafuselabs.io 96 | resources: 97 | - datafuseoperators 98 | verbs: 99 | - create 100 | - delete 101 | - get 102 | - list 103 | - patch 104 | - update 105 | - watch 106 | - apiGroups: 107 | - datafuse.datafuselabs.io 108 | resources: 109 | - datafuseoperators/status 110 | verbs: 111 | - get 112 | - patch 113 | - update 114 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/datafuse_v1alpha1_datafusecomputegroup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseComputeGroup 3 | metadata: 4 | name: datafusecomputegroup-sample 5 | namespace: default 6 | spec: 7 | # Add fields here 8 | leaders: 9 | spec: 10 | cores: 1 11 | workers: 12 | - spec: 13 | cores: 3 14 | - spec: 15 | cores: 2 16 | replicas: 2 -------------------------------------------------------------------------------- /config/samples/datafuse_v1alpha1_datafusecomputeinstance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseComputeInstance 3 | metadata: 4 | name: datafusecomputeinstance-sample 5 | spec: 6 | # Add fields here 7 | cores: 1 8 | -------------------------------------------------------------------------------- /config/samples/datafuse_v1alpha1_datafusecomputeset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseComputeSet 3 | metadata: 4 | name: datafusecomputeset-sample 5 | spec: 6 | # Add fields here 7 | replicas: 1 8 | cores: 1 -------------------------------------------------------------------------------- /config/samples/datafuse_v1alpha1_datafuseoperator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseOperator 3 | metadata: 4 | name: datafuseoperator-sample 5 | spec: 6 | # Add fields here 7 | foo: bar 8 | -------------------------------------------------------------------------------- /config/samples/default/datafuse_v1alpha1_datafusecluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseOperator 3 | metadata: 4 | name: datafusecluster-default 5 | namespace: default 6 | spec: 7 | computeGroups: 8 | - metadata: 9 | name: datafusegroup-1 10 | namespace: default 11 | spec: 12 | leaders: 13 | cores: 1 14 | # workers: 15 | # - spec: 16 | # cores: 1 17 | # - spec: 18 | # cores: 3 19 | # - spec: 20 | # leaders: 21 | # spec: 22 | # cores: 3 23 | # workers: 24 | # - spec: 25 | # cores: 3 26 | # - spec: 27 | # cores: 5 -------------------------------------------------------------------------------- /config/samples/default/datafuse_v1alpha1_datafusecomputegroup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseComputeGroup 3 | metadata: 4 | name: datafusecomputegroup-default 5 | spec: 6 | # Add fields here 7 | leaders: 8 | spec: 9 | cores: 1 10 | # workers: 11 | # - spec: 12 | # cores: 1 13 | # - spec: 14 | # cores: 2 15 | -------------------------------------------------------------------------------- /config/samples/default/datafuse_v1alpha1_datafusecomputeinstance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseComputeInstance 3 | metadata: 4 | name: datafusecomputeinstance-defaault 5 | spec: 6 | # Add fields here 7 | cores: 1 8 | -------------------------------------------------------------------------------- /config/samples/default/datafuse_v1alpha1_datafusecomputeset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseComputeSet 3 | metadata: 4 | name: datafusecomputeset-sample 5 | spec: 6 | cores: 3 7 | -------------------------------------------------------------------------------- /docs/development-guide.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/databendlabs/datafuse-operator/f743e65d91153c1dd607850ba46d4596f3cdc2e6/docs/development-guide.md -------------------------------------------------------------------------------- /docs/quick-start.md: -------------------------------------------------------------------------------- 1 | # Quick Start Guide 2 | For more detailed introduction about how datafuse operator works, please take a look at [User Guide](user-guide.md). 3 | If you want to contribute and change API, please take a look at [Development Guide](development-guide.md) at first. 4 | 5 | ## Table of Contents 6 | * [Installation](#installation) 7 | * [Bootstrapping a cluster](#bootstrap-a-cluster) 8 | * [Testing and Debugging](#testing-the-cluster) 9 | * [Configuration](#configuration) 10 | 11 | ## Installation 12 | ### Environment setup 13 | 1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/) 14 | 2. Install [Minikube](https://minikube.sigs.k8s.io/docs/start/), Here used kubernetes 1.20.2 15 | ```bash 16 | minikube start --cpus 8 --memory 16384 --driver kvm2 17 | ``` 18 | 19 | ### Install operator through kubectl 20 | The following command will install crds and install datafuse-operator on datafuse-operator-system namespace 21 | ```bash 22 | kubectl apply -f ./examples/operator.yaml 23 | ``` 24 | 25 | Wait until operator get ready 26 | ```bash 27 | kubectl get deployments.apps -n datafuse-operator-system 28 | 29 | NAME READY UP-TO-DATE AVAILABLE AGE 30 | datafuse-controller-manager 1/1 1 1 17s 31 | ``` 32 | 33 | 34 | ## Bootstrap a cluster 35 | The following command will install a operator hold one compute group 36 | this group has 4 pods. 37 | ```bash 38 | kubectl apply -f ./examples/demo.yaml 39 | ``` 40 | 41 | ## Testing the cluster 42 | Validate the readiness status for each running instances 43 | ```bash 44 | kubectl get pods 45 | 46 | NAME READY STATUS RESTARTS AGE 47 | multi-worker-leader-6897f84d44-wjdz7 1/1 Running 0 77s 48 | multi-worker-worker0-6f48985b-xbgrv 1/1 Running 0 77s 49 | multi-worker-worker1-9bb968997-6jlht 1/1 Running 0 77s 50 | multi-worker-worker1-9bb968997-h7trt 1/1 Running 0 77s 51 | ``` 52 | 53 | ### Query through serivce based on local machine 54 | 55 | Check on the compute group service 56 | ```bash 57 | kubectl get svc 58 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 59 | kubernetes ClusterIP 10.96.0.1 443/TCP 18h 60 | multi-worker ClusterIP 10.105.246.63 8080/TCP,3307/TCP,9000/TCP,9090/TCP,7070/TCP 4m4s 61 | ``` 62 | 63 | Port forward the mysql port for service 64 | ```bash 65 | kubectl port-forward service/multi-worker 3307:3307 66 | ``` 67 | 68 | Connect the compute group based on local machine 69 | ```bash 70 | mysql -h localhost -P 3307 71 | ``` 72 | 73 | Check on all nodes in the cluster 74 | ```bash 75 | mysql> SELECT * from system.clusters; 76 | 77 | +----------------------------------------------+-----------------+----------+ 78 | | name | address | priority | 79 | +----------------------------------------------+-----------------+----------+ 80 | | default/multi-worker-worker1-9bb968997-h7trt | 172.17.0.6:9090 | 1 | 81 | | default/multi-worker-worker1-9bb968997-6jlht | 172.17.0.7:9090 | 1 | 82 | | default/multi-worker-worker0-6f48985b-xbgrv | 172.17.0.5:9090 | 2 | 83 | | default/multi-worker-leader-6897f84d44-wjdz7 | 172.17.0.4:9090 | 1 | 84 | +----------------------------------------------+-----------------+----------+ 85 | ``` 86 | 87 | Do sql queries 88 | ```bash 89 | mysql> SELECT sum(number) FROM numbers_mt(100000000000); 90 | +--------------------+ 91 | | sum(number) | 92 | +--------------------+ 93 | | 932355974711512064 | 94 | +--------------------+ 95 | 1 row in set (11.71 sec) 96 | ``` 97 | ### Check on logs in each compute instance 98 | Use kubectl logs command to check on query execution plan on each node 99 | ```bash 100 | kubectl logs multi-worker-leader-6897f84d44-wjdz7 fusequery 101 | ``` -------------------------------------------------------------------------------- /docs/user-guide.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/databendlabs/datafuse-operator/f743e65d91153c1dd607850ba46d4596f3cdc2e6/docs/user-guide.md -------------------------------------------------------------------------------- /examples/demo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseOperator 3 | metadata: 4 | name: datafusecluster-default 5 | namespace: default 6 | spec: 7 | computeGroups: 8 | - leaders: 9 | replicas: 1 10 | cores: 1 11 | image: datafusedev/fuse-query:latest # Optional 12 | memory: "1Gi" # Optional 13 | memorylimit: "1Gi" # Optional 14 | priority: 1 # Optional 15 | namespace: default 16 | name: multi-worker 17 | workers: 18 | - cores: 2 19 | replicas: 1 20 | memory: "1Gi" # Optional 21 | memorylimit: "1Gi" # Optional 22 | image: datafusedev/fuse-query:latest # Optional 23 | priority: 2 # Optional 24 | - cores: 1 25 | replicas: 2 26 | memory: "1Gi" # Optional 27 | memorylimit: "1Gi" # Optional 28 | image: datafusedev/fuse-query:latest # Optional 29 | priority: 1 # Optional 30 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module datafuselabs.io/datafuse-operator 2 | 3 | go 1.16 4 | 5 | require ( 6 | cloud.google.com/go v0.81.0 // indirect 7 | github.com/Azure/go-autorest/autorest v0.11.18 // indirect 8 | github.com/emicklei/go-restful v2.15.0+incompatible // indirect 9 | github.com/ghodss/yaml v1.0.0 // indirect 10 | github.com/go-openapi/jsonpointer v0.19.5 // indirect 11 | github.com/go-openapi/jsonreference v0.19.5 // indirect 12 | github.com/go-openapi/swag v0.19.15 // indirect 13 | github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect 14 | github.com/google/gofuzz v1.2.0 // indirect 15 | github.com/google/uuid v1.2.0 // indirect 16 | github.com/googleapis/gnostic v0.5.5 // indirect 17 | github.com/imdario/mergo v0.3.12 // indirect 18 | github.com/mailru/easyjson v0.7.7 // indirect 19 | github.com/namsral/flag v1.7.4-pre 20 | github.com/pkg/errors v0.9.1 21 | github.com/prometheus/client_golang v1.9.0 // indirect 22 | github.com/prometheus/common v0.22.0 // indirect 23 | github.com/rs/zerolog v1.21.0 24 | github.com/stretchr/testify v1.7.0 25 | go.uber.org/multierr v1.5.0 // indirect 26 | golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b // indirect 27 | golang.org/x/mod v0.4.2 // indirect 28 | golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6 // indirect 29 | golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78 // indirect 30 | golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect 31 | golang.org/x/sys v0.0.0-20210426230700-d19ff857e887 // indirect 32 | golang.org/x/term v0.0.0-20210422114643-f5beecf764ed // indirect 33 | gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect 34 | k8s.io/api v0.21.0 35 | k8s.io/apiextensions-apiserver v0.21.0 36 | k8s.io/apimachinery v0.21.0 37 | k8s.io/client-go v0.21.0 38 | k8s.io/code-generator v0.21.0 39 | k8s.io/gengo v0.0.0-20210203185629-de9496dff47b // indirect 40 | k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e // indirect 41 | k8s.io/kubectl v0.21.0 42 | k8s.io/utils v0.0.0-20210305010621-2afb4311ab10 // indirect 43 | sigs.k8s.io/controller-runtime v0.8.3 44 | sigs.k8s.io/structured-merge-diff/v4 v4.1.1 // indirect 45 | ) 46 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ -------------------------------------------------------------------------------- /hack/tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | package tools 3 | 4 | import _ "k8s.io/code-generator" 5 | -------------------------------------------------------------------------------- /hack/update-codegen.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | # corresponding to go mod init 8 | MODULE=datafuselabs.io/datafuse-operator 9 | # api package 10 | APIS_PKG=pkg/apis 11 | # generated output package 12 | OUTPUT_PKG=pkg/client 13 | # group-version such as foo:v1alpha1 14 | GROUP_VERSION=datafuse:v1alpha1 15 | 16 | SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. 17 | CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)} 18 | mkdir -p ${SCRIPT_ROOT}/tmp 19 | mkdir -p ${SCRIPT_ROOT}/pkg 20 | mkdir -p ${SCRIPT_ROOT}/pkg/client 21 | # generate the code with: 22 | # --output-base because this script should also be able to run inside the vendor dir of 23 | # k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir 24 | # instead of the $GOPATH directly. For normal projects this can be dropped. 25 | bash "${CODEGEN_PKG}"/generate-groups.sh all \ 26 | ${MODULE}/${OUTPUT_PKG} ${MODULE}/${APIS_PKG} \ 27 | ${GROUP_VERSION} \ 28 | --go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt \ 29 | --output-base "${SCRIPT_ROOT}/tmp" 30 | # --output-base "${SCRIPT_ROOT}/../../.." \ 31 | mv ${SCRIPT_ROOT}/tmp/${MODULE}/${OUTPUT_PKG}/* ${SCRIPT_ROOT}/pkg/client/ 32 | rm -rf ${SCRIPT_ROOT}/tmp -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package main 18 | 19 | import ( 20 | 21 | // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) 22 | // to ensure that exec-entrypoint and run can make use of them. 23 | 24 | "os" 25 | "os/signal" 26 | "syscall" 27 | "time" 28 | 29 | "github.com/namsral/flag" 30 | "github.com/rs/zerolog/log" 31 | apiv1 "k8s.io/api/core/v1" 32 | "k8s.io/apimachinery/pkg/runtime" 33 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 34 | kubeinformers "k8s.io/client-go/informers" 35 | "k8s.io/client-go/kubernetes" 36 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 37 | _ "k8s.io/client-go/plugin/pkg/client/auth" 38 | "k8s.io/client-go/rest" 39 | "k8s.io/client-go/tools/clientcmd" 40 | 41 | //+kubebuilder:scaffold:imports 42 | datafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 43 | crdclientset "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned" 44 | crdinformers "datafuselabs.io/datafuse-operator/pkg/client/informers/externalversions" 45 | "datafuselabs.io/datafuse-operator/pkg/controllers/operator" 46 | register "datafuselabs.io/datafuse-operator/pkg/controllers/register" 47 | "datafuselabs.io/datafuse-operator/pkg/controllers/utils" 48 | datafuseutils "datafuselabs.io/datafuse-operator/utils" 49 | ) 50 | 51 | var ( 52 | scheme = runtime.NewScheme() 53 | master = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") 54 | controllerThreads = flag.Int("controller-threads", 10, "Number of worker threads used by the SparkApplication controller.") 55 | resyncInterval = flag.Int("resync-interval", 30, "Informer resync interval in seconds.") 56 | namespace = flag.String("namespace", apiv1.NamespaceAll, "The Kubernetes namespace to manage. Will manage custom resource objects of the managed CRD types for the whole cluster if unset.") 57 | metricsAddr = flag.String("metrics-addr", ":8080", "The address the metric endpoint binds to.") 58 | enableLeaderElectio = flag.Bool("enable-leader-election", false, 59 | "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") 60 | ) 61 | 62 | func init() { 63 | utilruntime.Must(clientgoscheme.AddToScheme(scheme)) 64 | 65 | utilruntime.Must(datafusev1alpha1.AddToScheme(scheme)) 66 | //+kubebuilder:scaffold:scheme 67 | } 68 | 69 | func main() { 70 | flag.StringVar(&datafuseutils.KubeConfig, "kubeconfig", "", "Path to a kube config. Only required if out-of-cluster.") 71 | signalCh := make(chan os.Signal, 1) 72 | signal.Notify(signalCh, syscall.SIGINT, syscall.SIGTERM) 73 | flag.Parse() 74 | stopCh := datafuseutils.SetupSignalHandler() 75 | log.Info().Msg(datafuseutils.KubeConfig) 76 | kubeConfig, err := buildConfig(*master, datafuseutils.KubeConfig) 77 | if err != nil { 78 | log.Fatal().Msgf("Error building kubernetes config: %s", err) 79 | } 80 | kubeClient, err := kubernetes.NewForConfig(kubeConfig) 81 | if err != nil { 82 | log.Fatal().Msgf("Error building kubernetes client: %s", err) 83 | } 84 | crdClient, err := crdclientset.NewForConfig(kubeConfig) 85 | if err != nil { 86 | log.Fatal().Msgf("Error building crd client: %s", err) 87 | } 88 | // TODO prometheus etc controllers 89 | 90 | var ( 91 | kubeInformerFactory kubeinformers.SharedInformerFactory 92 | crdInformerFactory crdinformers.SharedInformerFactory 93 | ) 94 | if *namespace != apiv1.NamespaceAll { 95 | kubeInformerFactory = kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, time.Second*30, kubeinformers.WithNamespace(*namespace)) 96 | crdInformerFactory = crdinformers.NewSharedInformerFactoryWithOptions(crdClient, time.Second*30, crdinformers.WithNamespace(*namespace)) 97 | } else { 98 | kubeInformerFactory = kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30) 99 | crdInformerFactory = crdinformers.NewSharedInformerFactory(crdClient, time.Second*30) 100 | } 101 | // controller := cluster.NewController(kubeClient, crdClient, 102 | // kubeInformerFactory.Apps().V1().Deployments(), 103 | // kubeInformerFactory.Core().V1().Services(), 104 | // crdInformerFactory.Datafuse().V1alpha1().DatafuseComputeGroups(), 105 | // crdInformerFactory.Datafuse().V1alpha1().DatafuseOperators(), 106 | // ) 107 | controller := operator.NewController(&utils.OperatorSetter{K8sClient: kubeClient, Client: crdClient, AllNS: true}, 108 | kubeInformerFactory.Apps().V1().Deployments(), kubeInformerFactory.Core().V1().Services(), 109 | crdInformerFactory.Datafuse().V1alpha1().DatafuseComputeGroups(), crdInformerFactory.Datafuse().V1alpha1().DatafuseOperators()) 110 | rc, err := register.NewRegisterController(®ister.RegistSetter{AllNS: true, Namspaces: []string{}, K8sClient: kubeClient}) 111 | go kubeInformerFactory.Start(stopCh) 112 | go crdInformerFactory.Start(stopCh) 113 | go controller.Start(12, stopCh) 114 | rc.Run(stopCh) 115 | } 116 | 117 | func buildConfig(masterURL string, kubeConfig string) (*rest.Config, error) { 118 | if kubeConfig != "" { 119 | return clientcmd.BuildConfigFromFlags(masterURL, kubeConfig) 120 | } 121 | return rest.InClusterConfig() 122 | } 123 | -------------------------------------------------------------------------------- /manifests/datafuse-operator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | control-plane: controller-manager 7 | name: datafuse-controller-manager 8 | namespace: datafuse-operator-system 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | control-plane: controller-manager 14 | template: 15 | metadata: 16 | labels: 17 | control-plane: controller-manager 18 | spec: 19 | containers: 20 | - command: 21 | - /manager 22 | image: zhihanz/controller:latest 23 | name: manager 24 | resources: 25 | limits: 26 | cpu: 100m 27 | memory: 30Mi 28 | requests: 29 | cpu: 100m 30 | memory: 20Mi 31 | securityContext: 32 | allowPrivilegeEscalation: false 33 | securityContext: 34 | runAsNonRoot: true 35 | terminationGracePeriodSeconds: 10 36 | 37 | -------------------------------------------------------------------------------- /manifests/datafuse-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: datafuse-leader-election-role 6 | namespace: datafuse-operator-system 7 | rules: 8 | - apiGroups: 9 | - "" 10 | - coordination.k8s.io 11 | resources: 12 | - configmaps 13 | - leases 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | - create 19 | - update 20 | - patch 21 | - delete 22 | - apiGroups: 23 | - "" 24 | resources: 25 | - events 26 | verbs: 27 | - create 28 | - patch 29 | 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: ClusterRole 33 | metadata: 34 | creationTimestamp: null 35 | name: datafuse-manager-role 36 | rules: 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - events 41 | verbs: 42 | - create 43 | - get 44 | - list 45 | - patch 46 | - update 47 | - watch 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - pods 52 | verbs: 53 | - create 54 | - delete 55 | - get 56 | - list 57 | - patch 58 | - update 59 | - watch 60 | - apiGroups: 61 | - "" 62 | resources: 63 | - pods/status 64 | verbs: 65 | - get 66 | - apiGroups: 67 | - "" 68 | resources: 69 | - services 70 | verbs: 71 | - create 72 | - delete 73 | - get 74 | - list 75 | - patch 76 | - update 77 | - watch 78 | - apiGroups: 79 | - "" 80 | resources: 81 | - services/status 82 | verbs: 83 | - get 84 | - apiGroups: 85 | - apps 86 | resources: 87 | - deployments 88 | verbs: 89 | - create 90 | - delete 91 | - get 92 | - list 93 | - patch 94 | - update 95 | - watch 96 | - apiGroups: 97 | - apps 98 | resources: 99 | - deployments/status 100 | verbs: 101 | - get 102 | - apiGroups: 103 | - datafuse.datafuselabs.io 104 | resources: 105 | - datafusecomputegroups 106 | verbs: 107 | - create 108 | - delete 109 | - get 110 | - list 111 | - patch 112 | - update 113 | - watch 114 | - apiGroups: 115 | - datafuse.datafuselabs.io 116 | resources: 117 | - datafusecomputegroups/status 118 | verbs: 119 | - get 120 | - patch 121 | - update 122 | - apiGroups: 123 | - datafuse.datafuselabs.io 124 | resources: 125 | - datafuseoperators 126 | verbs: 127 | - create 128 | - delete 129 | - get 130 | - list 131 | - patch 132 | - update 133 | - watch 134 | - apiGroups: 135 | - datafuse.datafuselabs.io 136 | resources: 137 | - datafuseoperators/status 138 | verbs: 139 | - get 140 | - patch 141 | - update 142 | 143 | --- 144 | apiVersion: rbac.authorization.k8s.io/v1 145 | kind: ClusterRole 146 | metadata: 147 | name: datafuse-metrics-reader 148 | rules: 149 | - nonResourceURLs: 150 | - /metrics 151 | verbs: 152 | - get 153 | 154 | --- 155 | apiVersion: rbac.authorization.k8s.io/v1 156 | kind: ClusterRole 157 | metadata: 158 | name: datafuse-proxy-role 159 | rules: 160 | - apiGroups: 161 | - authentication.k8s.io 162 | resources: 163 | - tokenreviews 164 | verbs: 165 | - create 166 | - apiGroups: 167 | - authorization.k8s.io 168 | resources: 169 | - subjectaccessreviews 170 | verbs: 171 | - create 172 | 173 | --- 174 | apiVersion: rbac.authorization.k8s.io/v1 175 | kind: RoleBinding 176 | metadata: 177 | name: datafuse-leader-election-rolebinding 178 | namespace: datafuse-operator-system 179 | roleRef: 180 | apiGroup: rbac.authorization.k8s.io 181 | kind: Role 182 | name: datafuse-leader-election-role 183 | subjects: 184 | - kind: ServiceAccount 185 | name: default 186 | namespace: datafuse-operator-system 187 | 188 | --- 189 | apiVersion: rbac.authorization.k8s.io/v1 190 | kind: ClusterRoleBinding 191 | metadata: 192 | name: datafuse-manager-rolebinding 193 | roleRef: 194 | apiGroup: rbac.authorization.k8s.io 195 | kind: ClusterRole 196 | name: datafuse-manager-role 197 | subjects: 198 | - kind: ServiceAccount 199 | name: default 200 | namespace: datafuse-operator-system 201 | 202 | --- 203 | apiVersion: rbac.authorization.k8s.io/v1 204 | kind: ClusterRoleBinding 205 | metadata: 206 | name: datafuse-proxy-rolebinding 207 | roleRef: 208 | apiGroup: rbac.authorization.k8s.io 209 | kind: ClusterRole 210 | name: datafuse-proxy-role 211 | subjects: 212 | - kind: ServiceAccount 213 | name: default 214 | namespace: datafuse-operator-system 215 | 216 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/datafusecomputegroup_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // +kubebuilder:rbac:groups=datafuse.datafuselabs.io,resources=datafusecomputegroups,verbs=get;list;watch;create;update;patch;delete 17 | // +kubebuilder:rbac:groups=datafuse.datafuselabs.io,resources=datafusecomputegroups/status,verbs=get;update;patch 18 | package v1alpha1 19 | 20 | import ( 21 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 22 | ) 23 | 24 | type ComputeGroupState string 25 | 26 | const ( 27 | ComputeGroupDeployed ComputeGroupState = "Ready" 28 | ComputeGroupPending ComputeGroupState = "Pending" 29 | ComputeGroupCreated ComputeGroupState = "Created" 30 | ) 31 | 32 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 33 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 34 | 35 | // DatafuseComputeGroupSpec defines the desired state of DatafuseComputeGroup 36 | type DatafuseComputeGroupSpec struct { 37 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 38 | // Important: Run "make" to regenerate code after modifying this file 39 | 40 | // ComputeLeaders will incorporate all workers to form a cluster, designed for HA purpose 41 | // For performance consideration, suggest to set 3 to 5 leaders 42 | ComputeLeaders *DatafuseComputeSetSpec `json:"leaders,omitempty"` 43 | // Number of workers per cluster, workers are identical 44 | ComputeWorkers []*DatafuseComputeSetSpec `json:"workers,omitempty"` 45 | // Define the name of exposed service 46 | // +kubebuilder:validation:Type=string 47 | // +kubebuilder:validation:Required 48 | Name string `json:"name"` 49 | // Define the namespace where group should exists 50 | // +kubebuilder:validation:Type=string 51 | // +kubebuilder:default=default 52 | Namespace string `json:"namespace,omitempty"` 53 | } 54 | 55 | // DatafuseComputeGroupStatus defines the observed state of DatafuseComputeGroup 56 | type DatafuseComputeGroupStatus struct { 57 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 58 | // Important: Run "make" to regenerate code after modifying this file 59 | ReadyComputeLeaders map[string]ComputeInstanceState `json:"readyleaders,omitempty"` 60 | ReadyComputeWorkers map[string]ComputeInstanceState `json:"readyworkers,omitempty"` 61 | // Define the current status of group 62 | // +kubebuilder:validation:Type=string 63 | // +kubebuilder:default=Created 64 | Status ComputeGroupState `json:"status,omitempty"` 65 | GroupServiceStatus ComputeGroupServiceStatus `json:"service,omitempty"` 66 | } 67 | 68 | // defines the service which the group binded with 69 | type ComputeGroupServiceStatus struct { 70 | // the name of the serivce for this group 71 | Name string `json:"name,omitempty"` 72 | } 73 | 74 | // +kubebuilder:object:root=true 75 | 76 | // DatafuseComputeGroup is the Schema for the datafusecomputegroups API 77 | //+k8s:openapi-gen=true 78 | //+genclient 79 | //+kubebuilder:resource:shortName=cg-df 80 | type DatafuseComputeGroup struct { 81 | metav1.TypeMeta `json:",inline"` 82 | metav1.ObjectMeta `json:"metadata,omitempty"` 83 | 84 | Spec DatafuseComputeGroupSpec `json:"spec,omitempty"` 85 | Status DatafuseComputeGroupStatus `json:"status,omitempty"` 86 | } 87 | 88 | // +kubebuilder:object:root=true 89 | 90 | // DatafuseComputeGroupList contains a list of DatafuseComputeGroup 91 | type DatafuseComputeGroupList struct { 92 | metav1.TypeMeta `json:",inline"` 93 | metav1.ListMeta `json:"metadata,omitempty"` 94 | Items []DatafuseComputeGroup `json:"items"` 95 | } 96 | 97 | func init() { 98 | SchemeBuilder.Register(&DatafuseComputeGroup{}, &DatafuseComputeGroupList{}) 99 | } 100 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/datafusecomputeinstance_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | package v1alpha1 18 | 19 | type ComputeInstanceState string 20 | 21 | const ( 22 | ComputeInstanceReadyState ComputeInstanceState = "READY" 23 | ComputeInstanceLivenessState ComputeInstanceState = "LIVE" 24 | ComputeInstancePendingState ComputeInstanceState = "Pending" 25 | ) 26 | 27 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 28 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 29 | 30 | // DatafuseComputeInstanceSpec defines the desired state of DatafuseComputeInstance 31 | type DatafuseComputeInstanceSpec struct { 32 | // Name is the specific name of current instance 33 | // +kubebuilder:validation:Type=string 34 | // +kubebuilder:validation:Optional 35 | Name *string `json:"name"` 36 | // Num of cpus for the instance, 37 | // +kubebuilder:validation:Type=integer 38 | // +kubebuilder:default=1 39 | // +kubebuilder:validation:Minimum=1 40 | // +kubebuilder:validation:Required 41 | Cores *int32 `json:"cores"` 42 | // CoreLimit specifies a hard limit on CPU cores for the instance. 43 | // +kubebuilder:validation:Type=string 44 | // +kubebuilder:default="1300m" 45 | // +kubebuilder:validation:Optional 46 | CoreLimit *string `json:"coreLimit"` 47 | // Memory is the amount of memory to request for the instance. 48 | // +kubebuilder:validation:Type=string 49 | // +kubebuilder:default="512Mi" 50 | // +kubebuilder:validation:Optional 51 | Memory *string `json:"memory"` 52 | // MemoryLimit is the amount of memory limit for the instance. in MiB 53 | // +kubebuilder:validation:Type=string 54 | // +kubebuilder:default="512Mi" 55 | // +kubebuilder:validation:Optional 56 | MemoryLimit *string `json:"memorylimit"` 57 | // Labels are the Kubernetes labels to be added to the pod. 58 | // +kubebuilder:validation:Optional 59 | Labels map[string]string `json:"labels"` 60 | // Image is the container image to use. Overrides Spec.Image if set. 61 | // +kubebuilder:validation:Type=string 62 | // +kubebuilder:default="datafusedev/fuse-query:latest" 63 | // +kubebuilder:validation:Optional 64 | Image *string `json:"image"` 65 | // ImagePullPolicy is the image pull policy for the driver, executor, and init-container. 66 | // +kubebuilder:validation:Type=string 67 | // +kubebuilder:default=Always 68 | // +kubebuilder:validation:Optional 69 | ImagePullPolicy *string `json:"imagePullPolicy"` 70 | // Priority range from 1 - 10 inclusive, higher priority means more workload will be distributed to the instance 71 | // +kubebuilder:validation:Type=integer 72 | // +kubebuilder:default=1 73 | // +kubebuilder:validation:Minimum=1 74 | // +kubebuilder:validation:Maximum=10 75 | // +kubebuilder:validation:Optional 76 | Priority *int32 `json:"priority"` 77 | // Port open to Mysql Client connection 78 | // +kubebuilder:validation:Type=integer 79 | // +kubebuilder:default=3307 80 | // +kubebuilder:validation:Optional 81 | MysqlPort *int32 `json:"mysqlPort"` 82 | // Port open to Clickhouse Client connection 83 | // +kubebuilder:validation:Type=integer 84 | // +kubebuilder:default=9000 85 | // +kubebuilder:validation:Optional 86 | ClickhousePort *int32 `json:"clickhousePort"` 87 | // Port for warp HTTP connection, can get cluster infomation and support to add/remove port 88 | // We also use HTTP port for health check and readiness check 89 | //TODO(zhihanz) docs on readiness check difference between leaders and workers 90 | // example: https://github.com/datafuselabs/datafuse/blob/master/fusequery/example/cluster.sh 91 | // +kubebuilder:validation:Type=integer 92 | // +kubebuilder:default=8080 93 | // +kubebuilder:validation:Optional 94 | HTTPPort *int32 `json:"httpPort"` 95 | // Port for gRPC communication 96 | // +kubebuilder:validation:Type=integer 97 | // +kubebuilder:default=9090 98 | // +kubebuilder:validation:Optional 99 | RPCPort *int32 `json:"rpcPort"` 100 | // Port for metrics exporter 101 | // +kubebuilder:validation:Type=integer 102 | // +kubebuilder:default=7070 103 | // +kubebuilder:validation:Optional 104 | MetricsPort *int32 `json:"metricsPort"` 105 | } 106 | 107 | // DatafuseComputeInstanceStatus defines the observed state of DatafuseComputeInstance 108 | type DatafuseComputeInstanceStatus struct { 109 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 110 | // Important: Run "make" to regenerate code after modifying this file 111 | Status ComputeInstanceState `json:"status,omitempty"` 112 | } 113 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/datafusecomputeset_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | package v1alpha1 17 | 18 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 19 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 20 | 21 | // DatafuseComputeSetSpec defines the desired state of DatafuseComputeSet 22 | type DatafuseComputeSetSpec struct { 23 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 24 | // Important: Run "make" to regenerate code after modifying this file 25 | 26 | // Number of compute instances 27 | // +kubebuilder:validation:Type=integer 28 | // +kubebuilder:validation:Default=1 29 | // +kubebuilder:validation:Minimum=0 30 | // +kubebuilder:validation:Optional 31 | Replicas *int32 `json:"replicas,omitempty"` 32 | // Template for each idempotent instances 33 | DatafuseComputeInstanceSpec `json:",inline"` 34 | } 35 | 36 | // DatafuseComputeSetStatus defines the observed state of DatafuseComputeSet 37 | type DatafuseComputeSetStatus struct { 38 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 39 | // Important: Run "make" to regenerate code after modifying this file 40 | Replicas int32 `json:"replicas,omitempty"` 41 | Selector string `json:"selector,omitempty"` // this must be the string form of the selector 42 | InstancesStatus map[string]ComputeInstanceState `json:"instancestatus,omitempty"` // map from compute instance pod name to state 43 | } 44 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/datafuseoperator_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // +kubebuilder:rbac:groups=datafuse.datafuselabs.io,resources=datafuseoperators,verbs=get;list;watch;create;update;patch;delete 18 | // +kubebuilder:rbac:groups=datafuse.datafuselabs.io,resources=datafuseoperators/status,verbs=get;update;patch 19 | // +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete 20 | // +kubebuilder:rbac:groups="",resources=events,verbs=get;list;watch;create;update;patch; 21 | // +kubebuilder:rbac:groups="",resources=pods/status,verbs=get 22 | // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete 23 | // +kubebuilder:rbac:groups="",resources=services/status,verbs=get 24 | // +kubebuilder:rbac:groups="apps",resources=deployments,verbs=get;list;watch;create;update;patch;delete 25 | // +kubebuilder:rbac:groups="apps",resources=deployments/status,verbs=get 26 | package v1alpha1 27 | 28 | import ( 29 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 | ) 31 | 32 | type OperatorState string 33 | 34 | const ( 35 | OperatorReady OperatorState = "Ready" 36 | OperatorPending OperatorState = "Pending" 37 | OperatorCreated OperatorState = "Created" 38 | ) 39 | 40 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 41 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 42 | 43 | // DatafuseOperatorSpec defines the desired state of DatafuseOperator 44 | type DatafuseOperatorSpec struct { 45 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 46 | // Important: Run "make" to regenerate code after modifying this file 47 | 48 | // Define a set of compute groups belongs to the cluster 49 | // +kubebuilder:validation:Required 50 | ComputeGroups []*DatafuseComputeGroupSpec `json:"computeGroups,omitempty"` 51 | // Fuse Query and Fuse Store will share the same version 52 | // +kubebuilder:validation:Type=string 53 | // +kubebuilder:validation:Default=latest 54 | // +kubebuilder:validation:Optional 55 | Version *string `json:"version,omitempty"` 56 | } 57 | 58 | // DatafuseOperatorStatus defines the observed state of DatafuseOperator 59 | type DatafuseOperatorStatus struct { 60 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 61 | // Important: Run "make" to regenerate code after modifying this file 62 | 63 | // state of each compute group. map from compute group name to their status 64 | ComputeGroupStates map[string]ComputeGroupState `json:"computeGroupStates,omitempty"` 65 | // +kubebuilder:validation:Enum=Created;Pending;Ready 66 | // +kubebuilder:validation:Default=Created 67 | // +kubebuilder:validation:Optional 68 | Status OperatorState `json:"status,omitempty"` 69 | } 70 | 71 | // +kubebuilder:object:root=true 72 | 73 | // DatafuseOperator is the Schema for the datafuseclusters API 74 | //+k8s:openapi-gen=true 75 | //+genclient 76 | //+kubebuilder:resource:shortName=op-df 77 | type DatafuseOperator struct { 78 | metav1.TypeMeta `json:",inline"` 79 | metav1.ObjectMeta `json:"metadata,omitempty"` 80 | 81 | Spec DatafuseOperatorSpec `json:"spec,omitempty"` 82 | Status DatafuseOperatorStatus `json:"status,omitempty"` 83 | } 84 | 85 | // +kubebuilder:object:root=true 86 | 87 | // DatafuseOperatorList contains a list of DatafuseCluster 88 | type DatafuseOperatorList struct { 89 | metav1.TypeMeta `json:",inline"` 90 | metav1.ListMeta `json:"metadata,omitempty"` 91 | Items []DatafuseOperator `json:"items"` 92 | } 93 | 94 | func init() { 95 | SchemeBuilder.Register(&DatafuseOperator{}, &DatafuseOperatorList{}) 96 | } 97 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/defaults.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | func SetDatafuseOperatorDefault(operator *DatafuseOperator) { 8 | if operator == nil { 9 | return 10 | } 11 | if operator.GetNamespace() == "" { 12 | operator.ObjectMeta.SetNamespace("default") 13 | } 14 | if operator.GetName() == "" { 15 | operator.ObjectMeta.SetName("default-operator") 16 | } 17 | if operator.Spec.ComputeGroups == nil { 18 | operator.Spec.ComputeGroups = make([]*DatafuseComputeGroupSpec, 0) 19 | } 20 | if operator.Status.Status == "" { 21 | operator.Status.Status = OperatorCreated 22 | } 23 | } 24 | 25 | func SetDatafuseComputeGroupDefaults(parentOperator *DatafuseOperator, group *DatafuseComputeGroup) { 26 | if parentOperator == nil || group == nil { 27 | return 28 | } 29 | if group.OwnerReferences == nil || !metav1.IsControlledBy(group, parentOperator) { 30 | group.OwnerReferences = []metav1.OwnerReference{} 31 | group.OwnerReferences = append(group.OwnerReferences, *metav1.NewControllerRef(parentOperator, SchemeGroupVersion.WithKind("DatafuseOperator"))) 32 | } 33 | if group.GetNamespace() == "" { 34 | group.SetNamespace("default") 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/defaults_test.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | func TestSetDatafuseComputeGroupDefaults(t *testing.T) { 11 | tests := []struct { 12 | name string 13 | parent *DatafuseOperator 14 | group *DatafuseComputeGroup 15 | expectedGroup *DatafuseComputeGroup 16 | }{ 17 | { 18 | name: "nil operator", 19 | parent: nil, 20 | group: nil, 21 | expectedGroup: nil, 22 | }, 23 | { 24 | name: "ok", 25 | parent: &DatafuseOperator{ 26 | ObjectMeta: metav1.ObjectMeta{ 27 | Name: "test", 28 | }, 29 | }, 30 | group: &DatafuseComputeGroup{ 31 | ObjectMeta: metav1.ObjectMeta{ 32 | Name: "test-group", 33 | }, 34 | }, 35 | expectedGroup: &DatafuseComputeGroup{ 36 | ObjectMeta: metav1.ObjectMeta{ 37 | Name: "test-group", 38 | }, 39 | }, 40 | }, 41 | } 42 | for _, tt := range tests { 43 | t.Run(tt.name, func(t *testing.T) { 44 | SetDatafuseComputeGroupDefaults(tt.parent, tt.group) 45 | if tt.group == nil || tt.parent == nil { 46 | return 47 | } 48 | assert.Equal(t, tt.expectedGroup.Name, tt.group.Name) 49 | assert.Equal(t, tt.group.OwnerReferences, []metav1.OwnerReference{*metav1.NewControllerRef(tt.parent, SchemeGroupVersion.WithKind("DatafuseOperator"))}) 50 | }) 51 | } 52 | } 53 | 54 | func TestSetDatafuseOperatorDefault(t *testing.T) { 55 | tests := []struct { 56 | name string 57 | parent *DatafuseOperator 58 | expectedOperator *DatafuseOperator 59 | }{ 60 | { 61 | name: "nil operator", 62 | parent: nil, 63 | expectedOperator: nil, 64 | }, 65 | { 66 | name: "ok", 67 | parent: &DatafuseOperator{ 68 | ObjectMeta: metav1.ObjectMeta{ 69 | Name: "test", 70 | }, 71 | }, 72 | expectedOperator: &DatafuseOperator{ 73 | ObjectMeta: metav1.ObjectMeta{ 74 | Name: "test", 75 | Namespace: "default", 76 | }, 77 | Spec: DatafuseOperatorSpec{ 78 | ComputeGroups: []*DatafuseComputeGroupSpec{}, 79 | }, 80 | Status: DatafuseOperatorStatus{ 81 | Status: OperatorCreated, 82 | }, 83 | }, 84 | }, 85 | } 86 | for _, tt := range tests { 87 | t.Run(tt.name, func(t *testing.T) { 88 | arg := tt.parent.DeepCopy() 89 | SetDatafuseOperatorDefault(arg) 90 | if tt.parent == nil { 91 | return 92 | } 93 | assert.Equal(t, tt.expectedOperator, arg) 94 | }) 95 | } 96 | } 97 | 98 | func strPtr(input string) *string { 99 | return &input 100 | } 101 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | // Package v1alpha1 contains API Schema definitions for the datafuse v1alpha1 API group 18 | //+kubebuilder:object:generate=true 19 | //+groupName=datafuse.datafuselabs.io 20 | package v1alpha1 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime/schema" 24 | "sigs.k8s.io/controller-runtime/pkg/scheme" 25 | ) 26 | 27 | var ( 28 | // GroupVersion is group version used to register these objects 29 | GroupVersion = schema.GroupVersion{Group: "datafuse.datafuselabs.io", Version: "v1alpha1"} 30 | 31 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 32 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 33 | 34 | // AddToScheme adds the types in this group-version to the given scheme. 35 | AddToScheme = SchemeBuilder.AddToScheme 36 | ) 37 | -------------------------------------------------------------------------------- /pkg/apis/datafuse/v1alpha1/register.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime/schema" 5 | ) 6 | 7 | // SchemeGroupVersion is group version used to register these objects. 8 | var SchemeGroupVersion = GroupVersion 9 | 10 | func Resource(resource string) schema.GroupResource { 11 | return SchemeGroupVersion.WithResource(resource).GroupResource() 12 | } 13 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/clientset.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package versioned 19 | 20 | import ( 21 | "fmt" 22 | 23 | datafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned/typed/datafuse/v1alpha1" 24 | discovery "k8s.io/client-go/discovery" 25 | rest "k8s.io/client-go/rest" 26 | flowcontrol "k8s.io/client-go/util/flowcontrol" 27 | ) 28 | 29 | type Interface interface { 30 | Discovery() discovery.DiscoveryInterface 31 | DatafuseV1alpha1() datafusev1alpha1.DatafuseV1alpha1Interface 32 | } 33 | 34 | // Clientset contains the clients for groups. Each group has exactly one 35 | // version included in a Clientset. 36 | type Clientset struct { 37 | *discovery.DiscoveryClient 38 | datafuseV1alpha1 *datafusev1alpha1.DatafuseV1alpha1Client 39 | } 40 | 41 | // DatafuseV1alpha1 retrieves the DatafuseV1alpha1Client 42 | func (c *Clientset) DatafuseV1alpha1() datafusev1alpha1.DatafuseV1alpha1Interface { 43 | return c.datafuseV1alpha1 44 | } 45 | 46 | // Discovery retrieves the DiscoveryClient 47 | func (c *Clientset) Discovery() discovery.DiscoveryInterface { 48 | if c == nil { 49 | return nil 50 | } 51 | return c.DiscoveryClient 52 | } 53 | 54 | // NewForConfig creates a new Clientset for the given config. 55 | // If config's RateLimiter is not set and QPS and Burst are acceptable, 56 | // NewForConfig will generate a rate-limiter in configShallowCopy. 57 | func NewForConfig(c *rest.Config) (*Clientset, error) { 58 | configShallowCopy := *c 59 | if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { 60 | if configShallowCopy.Burst <= 0 { 61 | return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") 62 | } 63 | configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) 64 | } 65 | var cs Clientset 66 | var err error 67 | cs.datafuseV1alpha1, err = datafusev1alpha1.NewForConfig(&configShallowCopy) 68 | if err != nil { 69 | return nil, err 70 | } 71 | 72 | cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) 73 | if err != nil { 74 | return nil, err 75 | } 76 | return &cs, nil 77 | } 78 | 79 | // NewForConfigOrDie creates a new Clientset for the given config and 80 | // panics if there is an error in the config. 81 | func NewForConfigOrDie(c *rest.Config) *Clientset { 82 | var cs Clientset 83 | cs.datafuseV1alpha1 = datafusev1alpha1.NewForConfigOrDie(c) 84 | 85 | cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) 86 | return &cs 87 | } 88 | 89 | // New creates a new Clientset for the given RESTClient. 90 | func New(c rest.Interface) *Clientset { 91 | var cs Clientset 92 | cs.datafuseV1alpha1 = datafusev1alpha1.New(c) 93 | 94 | cs.DiscoveryClient = discovery.NewDiscoveryClient(c) 95 | return &cs 96 | } 97 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | // This package has the automatically generated clientset. 19 | package versioned 20 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/fake/clientset_generated.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package fake 19 | 20 | import ( 21 | clientset "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned" 22 | datafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned/typed/datafuse/v1alpha1" 23 | fakedatafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned/typed/datafuse/v1alpha1/fake" 24 | "k8s.io/apimachinery/pkg/runtime" 25 | "k8s.io/apimachinery/pkg/watch" 26 | "k8s.io/client-go/discovery" 27 | fakediscovery "k8s.io/client-go/discovery/fake" 28 | "k8s.io/client-go/testing" 29 | ) 30 | 31 | // NewSimpleClientset returns a clientset that will respond with the provided objects. 32 | // It's backed by a very simple object tracker that processes creates, updates and deletions as-is, 33 | // without applying any validations and/or defaults. It shouldn't be considered a replacement 34 | // for a real clientset and is mostly useful in simple unit tests. 35 | func NewSimpleClientset(objects ...runtime.Object) *Clientset { 36 | o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder()) 37 | for _, obj := range objects { 38 | if err := o.Add(obj); err != nil { 39 | panic(err) 40 | } 41 | } 42 | 43 | cs := &Clientset{tracker: o} 44 | cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} 45 | cs.AddReactor("*", "*", testing.ObjectReaction(o)) 46 | cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { 47 | gvr := action.GetResource() 48 | ns := action.GetNamespace() 49 | watch, err := o.Watch(gvr, ns) 50 | if err != nil { 51 | return false, nil, err 52 | } 53 | return true, watch, nil 54 | }) 55 | 56 | return cs 57 | } 58 | 59 | // Clientset implements clientset.Interface. Meant to be embedded into a 60 | // struct to get a default implementation. This makes faking out just the method 61 | // you want to test easier. 62 | type Clientset struct { 63 | testing.Fake 64 | discovery *fakediscovery.FakeDiscovery 65 | tracker testing.ObjectTracker 66 | } 67 | 68 | func (c *Clientset) Discovery() discovery.DiscoveryInterface { 69 | return c.discovery 70 | } 71 | 72 | func (c *Clientset) Tracker() testing.ObjectTracker { 73 | return c.tracker 74 | } 75 | 76 | var _ clientset.Interface = &Clientset{} 77 | 78 | // DatafuseV1alpha1 retrieves the DatafuseV1alpha1Client 79 | func (c *Clientset) DatafuseV1alpha1() datafusev1alpha1.DatafuseV1alpha1Interface { 80 | return &fakedatafusev1alpha1.FakeDatafuseV1alpha1{Fake: &c.Fake} 81 | } 82 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/fake/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | // This package has the automatically generated fake clientset. 19 | package fake 20 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/fake/register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package fake 19 | 20 | import ( 21 | datafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 22 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | runtime "k8s.io/apimachinery/pkg/runtime" 24 | schema "k8s.io/apimachinery/pkg/runtime/schema" 25 | serializer "k8s.io/apimachinery/pkg/runtime/serializer" 26 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 27 | ) 28 | 29 | var scheme = runtime.NewScheme() 30 | var codecs = serializer.NewCodecFactory(scheme) 31 | 32 | var localSchemeBuilder = runtime.SchemeBuilder{ 33 | datafusev1alpha1.AddToScheme, 34 | } 35 | 36 | // AddToScheme adds all types of this clientset into the given scheme. This allows composition 37 | // of clientsets, like in: 38 | // 39 | // import ( 40 | // "k8s.io/client-go/kubernetes" 41 | // clientsetscheme "k8s.io/client-go/kubernetes/scheme" 42 | // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" 43 | // ) 44 | // 45 | // kclientset, _ := kubernetes.NewForConfig(c) 46 | // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) 47 | // 48 | // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types 49 | // correctly. 50 | var AddToScheme = localSchemeBuilder.AddToScheme 51 | 52 | func init() { 53 | v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) 54 | utilruntime.Must(AddToScheme(scheme)) 55 | } 56 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/scheme/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | // This package contains the scheme of the automatically generated clientset. 19 | package scheme 20 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/scheme/register.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package scheme 19 | 20 | import ( 21 | datafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 22 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 23 | runtime "k8s.io/apimachinery/pkg/runtime" 24 | schema "k8s.io/apimachinery/pkg/runtime/schema" 25 | serializer "k8s.io/apimachinery/pkg/runtime/serializer" 26 | utilruntime "k8s.io/apimachinery/pkg/util/runtime" 27 | ) 28 | 29 | var Scheme = runtime.NewScheme() 30 | var Codecs = serializer.NewCodecFactory(Scheme) 31 | var ParameterCodec = runtime.NewParameterCodec(Scheme) 32 | var localSchemeBuilder = runtime.SchemeBuilder{ 33 | datafusev1alpha1.AddToScheme, 34 | } 35 | 36 | // AddToScheme adds all types of this clientset into the given scheme. This allows composition 37 | // of clientsets, like in: 38 | // 39 | // import ( 40 | // "k8s.io/client-go/kubernetes" 41 | // clientsetscheme "k8s.io/client-go/kubernetes/scheme" 42 | // aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" 43 | // ) 44 | // 45 | // kclientset, _ := kubernetes.NewForConfig(c) 46 | // _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) 47 | // 48 | // After this, RawExtensions in Kubernetes types will serialize kube-aggregator types 49 | // correctly. 50 | var AddToScheme = localSchemeBuilder.AddToScheme 51 | 52 | func init() { 53 | v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) 54 | utilruntime.Must(AddToScheme(Scheme)) 55 | } 56 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/typed/datafuse/v1alpha1/datafuse_client.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | import ( 21 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 22 | "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned/scheme" 23 | rest "k8s.io/client-go/rest" 24 | ) 25 | 26 | type DatafuseV1alpha1Interface interface { 27 | RESTClient() rest.Interface 28 | DatafuseComputeGroupsGetter 29 | DatafuseOperatorsGetter 30 | } 31 | 32 | // DatafuseV1alpha1Client is used to interact with features provided by the datafuse group. 33 | type DatafuseV1alpha1Client struct { 34 | restClient rest.Interface 35 | } 36 | 37 | func (c *DatafuseV1alpha1Client) DatafuseComputeGroups(namespace string) DatafuseComputeGroupInterface { 38 | return newDatafuseComputeGroups(c, namespace) 39 | } 40 | 41 | func (c *DatafuseV1alpha1Client) DatafuseOperators(namespace string) DatafuseOperatorInterface { 42 | return newDatafuseOperators(c, namespace) 43 | } 44 | 45 | // NewForConfig creates a new DatafuseV1alpha1Client for the given config. 46 | func NewForConfig(c *rest.Config) (*DatafuseV1alpha1Client, error) { 47 | config := *c 48 | if err := setConfigDefaults(&config); err != nil { 49 | return nil, err 50 | } 51 | client, err := rest.RESTClientFor(&config) 52 | if err != nil { 53 | return nil, err 54 | } 55 | return &DatafuseV1alpha1Client{client}, nil 56 | } 57 | 58 | // NewForConfigOrDie creates a new DatafuseV1alpha1Client for the given config and 59 | // panics if there is an error in the config. 60 | func NewForConfigOrDie(c *rest.Config) *DatafuseV1alpha1Client { 61 | client, err := NewForConfig(c) 62 | if err != nil { 63 | panic(err) 64 | } 65 | return client 66 | } 67 | 68 | // New creates a new DatafuseV1alpha1Client for the given RESTClient. 69 | func New(c rest.Interface) *DatafuseV1alpha1Client { 70 | return &DatafuseV1alpha1Client{c} 71 | } 72 | 73 | func setConfigDefaults(config *rest.Config) error { 74 | gv := v1alpha1.SchemeGroupVersion 75 | config.GroupVersion = &gv 76 | config.APIPath = "/apis" 77 | config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() 78 | 79 | if config.UserAgent == "" { 80 | config.UserAgent = rest.DefaultKubernetesUserAgent() 81 | } 82 | 83 | return nil 84 | } 85 | 86 | // RESTClient returns a RESTClient that is used to communicate 87 | // with API server by this client implementation. 88 | func (c *DatafuseV1alpha1Client) RESTClient() rest.Interface { 89 | if c == nil { 90 | return nil 91 | } 92 | return c.restClient 93 | } 94 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/typed/datafuse/v1alpha1/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | // This package has the automatically generated typed clients. 19 | package v1alpha1 20 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/typed/datafuse/v1alpha1/fake/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | // Package fake has the automatically generated clients. 19 | package fake 20 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/typed/datafuse/v1alpha1/fake/fake_datafuse_client.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package fake 19 | 20 | import ( 21 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned/typed/datafuse/v1alpha1" 22 | rest "k8s.io/client-go/rest" 23 | testing "k8s.io/client-go/testing" 24 | ) 25 | 26 | type FakeDatafuseV1alpha1 struct { 27 | *testing.Fake 28 | } 29 | 30 | func (c *FakeDatafuseV1alpha1) DatafuseComputeGroups(namespace string) v1alpha1.DatafuseComputeGroupInterface { 31 | return &FakeDatafuseComputeGroups{c, namespace} 32 | } 33 | 34 | func (c *FakeDatafuseV1alpha1) DatafuseOperators(namespace string) v1alpha1.DatafuseOperatorInterface { 35 | return &FakeDatafuseOperators{c, namespace} 36 | } 37 | 38 | // RESTClient returns a RESTClient that is used to communicate 39 | // with API server by this client implementation. 40 | func (c *FakeDatafuseV1alpha1) RESTClient() rest.Interface { 41 | var ret *rest.RESTClient 42 | return ret 43 | } 44 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/typed/datafuse/v1alpha1/fake/fake_datafuseoperator.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package fake 19 | 20 | import ( 21 | "context" 22 | 23 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 24 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | labels "k8s.io/apimachinery/pkg/labels" 26 | schema "k8s.io/apimachinery/pkg/runtime/schema" 27 | types "k8s.io/apimachinery/pkg/types" 28 | watch "k8s.io/apimachinery/pkg/watch" 29 | testing "k8s.io/client-go/testing" 30 | ) 31 | 32 | // FakeDatafuseOperators implements DatafuseOperatorInterface 33 | type FakeDatafuseOperators struct { 34 | Fake *FakeDatafuseV1alpha1 35 | ns string 36 | } 37 | 38 | var datafuseoperatorsResource = schema.GroupVersionResource{Group: "datafuse", Version: "v1alpha1", Resource: "datafuseoperators"} 39 | 40 | var datafuseoperatorsKind = schema.GroupVersionKind{Group: "datafuse", Version: "v1alpha1", Kind: "DatafuseOperator"} 41 | 42 | // Get takes name of the datafuseOperator, and returns the corresponding datafuseOperator object, and an error if there is any. 43 | func (c *FakeDatafuseOperators) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.DatafuseOperator, err error) { 44 | obj, err := c.Fake. 45 | Invokes(testing.NewGetAction(datafuseoperatorsResource, c.ns, name), &v1alpha1.DatafuseOperator{}) 46 | 47 | if obj == nil { 48 | return nil, err 49 | } 50 | return obj.(*v1alpha1.DatafuseOperator), err 51 | } 52 | 53 | // List takes label and field selectors, and returns the list of DatafuseOperators that match those selectors. 54 | func (c *FakeDatafuseOperators) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.DatafuseOperatorList, err error) { 55 | obj, err := c.Fake. 56 | Invokes(testing.NewListAction(datafuseoperatorsResource, datafuseoperatorsKind, c.ns, opts), &v1alpha1.DatafuseOperatorList{}) 57 | 58 | if obj == nil { 59 | return nil, err 60 | } 61 | 62 | label, _, _ := testing.ExtractFromListOptions(opts) 63 | if label == nil { 64 | label = labels.Everything() 65 | } 66 | list := &v1alpha1.DatafuseOperatorList{ListMeta: obj.(*v1alpha1.DatafuseOperatorList).ListMeta} 67 | for _, item := range obj.(*v1alpha1.DatafuseOperatorList).Items { 68 | if label.Matches(labels.Set(item.Labels)) { 69 | list.Items = append(list.Items, item) 70 | } 71 | } 72 | return list, err 73 | } 74 | 75 | // Watch returns a watch.Interface that watches the requested datafuseOperators. 76 | func (c *FakeDatafuseOperators) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { 77 | return c.Fake. 78 | InvokesWatch(testing.NewWatchAction(datafuseoperatorsResource, c.ns, opts)) 79 | 80 | } 81 | 82 | // Create takes the representation of a datafuseOperator and creates it. Returns the server's representation of the datafuseOperator, and an error, if there is any. 83 | func (c *FakeDatafuseOperators) Create(ctx context.Context, datafuseOperator *v1alpha1.DatafuseOperator, opts v1.CreateOptions) (result *v1alpha1.DatafuseOperator, err error) { 84 | obj, err := c.Fake. 85 | Invokes(testing.NewCreateAction(datafuseoperatorsResource, c.ns, datafuseOperator), &v1alpha1.DatafuseOperator{}) 86 | 87 | if obj == nil { 88 | return nil, err 89 | } 90 | return obj.(*v1alpha1.DatafuseOperator), err 91 | } 92 | 93 | // Update takes the representation of a datafuseOperator and updates it. Returns the server's representation of the datafuseOperator, and an error, if there is any. 94 | func (c *FakeDatafuseOperators) Update(ctx context.Context, datafuseOperator *v1alpha1.DatafuseOperator, opts v1.UpdateOptions) (result *v1alpha1.DatafuseOperator, err error) { 95 | obj, err := c.Fake. 96 | Invokes(testing.NewUpdateAction(datafuseoperatorsResource, c.ns, datafuseOperator), &v1alpha1.DatafuseOperator{}) 97 | 98 | if obj == nil { 99 | return nil, err 100 | } 101 | return obj.(*v1alpha1.DatafuseOperator), err 102 | } 103 | 104 | // UpdateStatus was generated because the type contains a Status member. 105 | // Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). 106 | func (c *FakeDatafuseOperators) UpdateStatus(ctx context.Context, datafuseOperator *v1alpha1.DatafuseOperator, opts v1.UpdateOptions) (*v1alpha1.DatafuseOperator, error) { 107 | obj, err := c.Fake. 108 | Invokes(testing.NewUpdateSubresourceAction(datafuseoperatorsResource, "status", c.ns, datafuseOperator), &v1alpha1.DatafuseOperator{}) 109 | 110 | if obj == nil { 111 | return nil, err 112 | } 113 | return obj.(*v1alpha1.DatafuseOperator), err 114 | } 115 | 116 | // Delete takes name of the datafuseOperator and deletes it. Returns an error if one occurs. 117 | func (c *FakeDatafuseOperators) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { 118 | _, err := c.Fake. 119 | Invokes(testing.NewDeleteAction(datafuseoperatorsResource, c.ns, name), &v1alpha1.DatafuseOperator{}) 120 | 121 | return err 122 | } 123 | 124 | // DeleteCollection deletes a collection of objects. 125 | func (c *FakeDatafuseOperators) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { 126 | action := testing.NewDeleteCollectionAction(datafuseoperatorsResource, c.ns, listOpts) 127 | 128 | _, err := c.Fake.Invokes(action, &v1alpha1.DatafuseOperatorList{}) 129 | return err 130 | } 131 | 132 | // Patch applies the patch and returns the patched datafuseOperator. 133 | func (c *FakeDatafuseOperators) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.DatafuseOperator, err error) { 134 | obj, err := c.Fake. 135 | Invokes(testing.NewPatchSubresourceAction(datafuseoperatorsResource, c.ns, name, pt, data, subresources...), &v1alpha1.DatafuseOperator{}) 136 | 137 | if obj == nil { 138 | return nil, err 139 | } 140 | return obj.(*v1alpha1.DatafuseOperator), err 141 | } 142 | -------------------------------------------------------------------------------- /pkg/client/clientset/versioned/typed/datafuse/v1alpha1/generated_expansion.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by client-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | type DatafuseComputeGroupExpansion interface{} 21 | 22 | type DatafuseOperatorExpansion interface{} 23 | -------------------------------------------------------------------------------- /pkg/client/informers/externalversions/datafuse/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by informer-gen. DO NOT EDIT. 17 | 18 | package datafuse 19 | 20 | import ( 21 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/client/informers/externalversions/datafuse/v1alpha1" 22 | internalinterfaces "datafuselabs.io/datafuse-operator/pkg/client/informers/externalversions/internalinterfaces" 23 | ) 24 | 25 | // Interface provides access to each of this group's versions. 26 | type Interface interface { 27 | // V1alpha1 provides access to shared informers for resources in V1alpha1. 28 | V1alpha1() v1alpha1.Interface 29 | } 30 | 31 | type group struct { 32 | factory internalinterfaces.SharedInformerFactory 33 | namespace string 34 | tweakListOptions internalinterfaces.TweakListOptionsFunc 35 | } 36 | 37 | // New returns a new Interface. 38 | func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { 39 | return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} 40 | } 41 | 42 | // V1alpha1 returns a new v1alpha1.Interface. 43 | func (g *group) V1alpha1() v1alpha1.Interface { 44 | return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) 45 | } 46 | -------------------------------------------------------------------------------- /pkg/client/informers/externalversions/datafuse/v1alpha1/datafusecomputegroup.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by informer-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | import ( 21 | "context" 22 | time "time" 23 | 24 | datafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 25 | versioned "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned" 26 | internalinterfaces "datafuselabs.io/datafuse-operator/pkg/client/informers/externalversions/internalinterfaces" 27 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/client/listers/datafuse/v1alpha1" 28 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 | runtime "k8s.io/apimachinery/pkg/runtime" 30 | watch "k8s.io/apimachinery/pkg/watch" 31 | cache "k8s.io/client-go/tools/cache" 32 | ) 33 | 34 | // DatafuseComputeGroupInformer provides access to a shared informer and lister for 35 | // DatafuseComputeGroups. 36 | type DatafuseComputeGroupInformer interface { 37 | Informer() cache.SharedIndexInformer 38 | Lister() v1alpha1.DatafuseComputeGroupLister 39 | } 40 | 41 | type datafuseComputeGroupInformer struct { 42 | factory internalinterfaces.SharedInformerFactory 43 | tweakListOptions internalinterfaces.TweakListOptionsFunc 44 | namespace string 45 | } 46 | 47 | // NewDatafuseComputeGroupInformer constructs a new informer for DatafuseComputeGroup type. 48 | // Always prefer using an informer factory to get a shared informer instead of getting an independent 49 | // one. This reduces memory footprint and number of connections to the server. 50 | func NewDatafuseComputeGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { 51 | return NewFilteredDatafuseComputeGroupInformer(client, namespace, resyncPeriod, indexers, nil) 52 | } 53 | 54 | // NewFilteredDatafuseComputeGroupInformer constructs a new informer for DatafuseComputeGroup type. 55 | // Always prefer using an informer factory to get a shared informer instead of getting an independent 56 | // one. This reduces memory footprint and number of connections to the server. 57 | func NewFilteredDatafuseComputeGroupInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { 58 | return cache.NewSharedIndexInformer( 59 | &cache.ListWatch{ 60 | ListFunc: func(options v1.ListOptions) (runtime.Object, error) { 61 | if tweakListOptions != nil { 62 | tweakListOptions(&options) 63 | } 64 | return client.DatafuseV1alpha1().DatafuseComputeGroups(namespace).List(context.TODO(), options) 65 | }, 66 | WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { 67 | if tweakListOptions != nil { 68 | tweakListOptions(&options) 69 | } 70 | return client.DatafuseV1alpha1().DatafuseComputeGroups(namespace).Watch(context.TODO(), options) 71 | }, 72 | }, 73 | &datafusev1alpha1.DatafuseComputeGroup{}, 74 | resyncPeriod, 75 | indexers, 76 | ) 77 | } 78 | 79 | func (f *datafuseComputeGroupInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { 80 | return NewFilteredDatafuseComputeGroupInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) 81 | } 82 | 83 | func (f *datafuseComputeGroupInformer) Informer() cache.SharedIndexInformer { 84 | return f.factory.InformerFor(&datafusev1alpha1.DatafuseComputeGroup{}, f.defaultInformer) 85 | } 86 | 87 | func (f *datafuseComputeGroupInformer) Lister() v1alpha1.DatafuseComputeGroupLister { 88 | return v1alpha1.NewDatafuseComputeGroupLister(f.Informer().GetIndexer()) 89 | } 90 | -------------------------------------------------------------------------------- /pkg/client/informers/externalversions/datafuse/v1alpha1/datafuseoperator.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by informer-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | import ( 21 | "context" 22 | time "time" 23 | 24 | datafusev1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 25 | versioned "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned" 26 | internalinterfaces "datafuselabs.io/datafuse-operator/pkg/client/informers/externalversions/internalinterfaces" 27 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/client/listers/datafuse/v1alpha1" 28 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 | runtime "k8s.io/apimachinery/pkg/runtime" 30 | watch "k8s.io/apimachinery/pkg/watch" 31 | cache "k8s.io/client-go/tools/cache" 32 | ) 33 | 34 | // DatafuseOperatorInformer provides access to a shared informer and lister for 35 | // DatafuseOperators. 36 | type DatafuseOperatorInformer interface { 37 | Informer() cache.SharedIndexInformer 38 | Lister() v1alpha1.DatafuseOperatorLister 39 | } 40 | 41 | type datafuseOperatorInformer struct { 42 | factory internalinterfaces.SharedInformerFactory 43 | tweakListOptions internalinterfaces.TweakListOptionsFunc 44 | namespace string 45 | } 46 | 47 | // NewDatafuseOperatorInformer constructs a new informer for DatafuseOperator type. 48 | // Always prefer using an informer factory to get a shared informer instead of getting an independent 49 | // one. This reduces memory footprint and number of connections to the server. 50 | func NewDatafuseOperatorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { 51 | return NewFilteredDatafuseOperatorInformer(client, namespace, resyncPeriod, indexers, nil) 52 | } 53 | 54 | // NewFilteredDatafuseOperatorInformer constructs a new informer for DatafuseOperator type. 55 | // Always prefer using an informer factory to get a shared informer instead of getting an independent 56 | // one. This reduces memory footprint and number of connections to the server. 57 | func NewFilteredDatafuseOperatorInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { 58 | return cache.NewSharedIndexInformer( 59 | &cache.ListWatch{ 60 | ListFunc: func(options v1.ListOptions) (runtime.Object, error) { 61 | if tweakListOptions != nil { 62 | tweakListOptions(&options) 63 | } 64 | return client.DatafuseV1alpha1().DatafuseOperators(namespace).List(context.TODO(), options) 65 | }, 66 | WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { 67 | if tweakListOptions != nil { 68 | tweakListOptions(&options) 69 | } 70 | return client.DatafuseV1alpha1().DatafuseOperators(namespace).Watch(context.TODO(), options) 71 | }, 72 | }, 73 | &datafusev1alpha1.DatafuseOperator{}, 74 | resyncPeriod, 75 | indexers, 76 | ) 77 | } 78 | 79 | func (f *datafuseOperatorInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { 80 | return NewFilteredDatafuseOperatorInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) 81 | } 82 | 83 | func (f *datafuseOperatorInformer) Informer() cache.SharedIndexInformer { 84 | return f.factory.InformerFor(&datafusev1alpha1.DatafuseOperator{}, f.defaultInformer) 85 | } 86 | 87 | func (f *datafuseOperatorInformer) Lister() v1alpha1.DatafuseOperatorLister { 88 | return v1alpha1.NewDatafuseOperatorLister(f.Informer().GetIndexer()) 89 | } 90 | -------------------------------------------------------------------------------- /pkg/client/informers/externalversions/datafuse/v1alpha1/interface.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by informer-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | import ( 21 | internalinterfaces "datafuselabs.io/datafuse-operator/pkg/client/informers/externalversions/internalinterfaces" 22 | ) 23 | 24 | // Interface provides access to all the informers in this group version. 25 | type Interface interface { 26 | // DatafuseComputeGroups returns a DatafuseComputeGroupInformer. 27 | DatafuseComputeGroups() DatafuseComputeGroupInformer 28 | // DatafuseOperators returns a DatafuseOperatorInformer. 29 | DatafuseOperators() DatafuseOperatorInformer 30 | } 31 | 32 | type version struct { 33 | factory internalinterfaces.SharedInformerFactory 34 | namespace string 35 | tweakListOptions internalinterfaces.TweakListOptionsFunc 36 | } 37 | 38 | // New returns a new Interface. 39 | func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { 40 | return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} 41 | } 42 | 43 | // DatafuseComputeGroups returns a DatafuseComputeGroupInformer. 44 | func (v *version) DatafuseComputeGroups() DatafuseComputeGroupInformer { 45 | return &datafuseComputeGroupInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} 46 | } 47 | 48 | // DatafuseOperators returns a DatafuseOperatorInformer. 49 | func (v *version) DatafuseOperators() DatafuseOperatorInformer { 50 | return &datafuseOperatorInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} 51 | } 52 | -------------------------------------------------------------------------------- /pkg/client/informers/externalversions/generic.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by informer-gen. DO NOT EDIT. 17 | 18 | package externalversions 19 | 20 | import ( 21 | "fmt" 22 | 23 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 24 | schema "k8s.io/apimachinery/pkg/runtime/schema" 25 | cache "k8s.io/client-go/tools/cache" 26 | ) 27 | 28 | // GenericInformer is type of SharedIndexInformer which will locate and delegate to other 29 | // sharedInformers based on type 30 | type GenericInformer interface { 31 | Informer() cache.SharedIndexInformer 32 | Lister() cache.GenericLister 33 | } 34 | 35 | type genericInformer struct { 36 | informer cache.SharedIndexInformer 37 | resource schema.GroupResource 38 | } 39 | 40 | // Informer returns the SharedIndexInformer. 41 | func (f *genericInformer) Informer() cache.SharedIndexInformer { 42 | return f.informer 43 | } 44 | 45 | // Lister returns the GenericLister. 46 | func (f *genericInformer) Lister() cache.GenericLister { 47 | return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) 48 | } 49 | 50 | // ForResource gives generic access to a shared informer of the matching type 51 | // TODO extend this to unknown resources with a client pool 52 | func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { 53 | switch resource { 54 | // Group=datafuse, Version=v1alpha1 55 | case v1alpha1.SchemeGroupVersion.WithResource("datafusecomputegroups"): 56 | return &genericInformer{resource: resource.GroupResource(), informer: f.Datafuse().V1alpha1().DatafuseComputeGroups().Informer()}, nil 57 | case v1alpha1.SchemeGroupVersion.WithResource("datafuseoperators"): 58 | return &genericInformer{resource: resource.GroupResource(), informer: f.Datafuse().V1alpha1().DatafuseOperators().Informer()}, nil 59 | 60 | } 61 | 62 | return nil, fmt.Errorf("no informer found for %v", resource) 63 | } 64 | -------------------------------------------------------------------------------- /pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by informer-gen. DO NOT EDIT. 17 | 18 | package internalinterfaces 19 | 20 | import ( 21 | time "time" 22 | 23 | versioned "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned" 24 | v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 25 | runtime "k8s.io/apimachinery/pkg/runtime" 26 | cache "k8s.io/client-go/tools/cache" 27 | ) 28 | 29 | // NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer. 30 | type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer 31 | 32 | // SharedInformerFactory a small interface to allow for adding an informer without an import cycle 33 | type SharedInformerFactory interface { 34 | Start(stopCh <-chan struct{}) 35 | InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer 36 | } 37 | 38 | // TweakListOptionsFunc is a function that transforms a v1.ListOptions. 39 | type TweakListOptionsFunc func(*v1.ListOptions) 40 | -------------------------------------------------------------------------------- /pkg/client/listers/datafuse/v1alpha1/datafusecomputegroup.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by lister-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | import ( 21 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 22 | "k8s.io/apimachinery/pkg/api/errors" 23 | "k8s.io/apimachinery/pkg/labels" 24 | "k8s.io/client-go/tools/cache" 25 | ) 26 | 27 | // DatafuseComputeGroupLister helps list DatafuseComputeGroups. 28 | // All objects returned here must be treated as read-only. 29 | type DatafuseComputeGroupLister interface { 30 | // List lists all DatafuseComputeGroups in the indexer. 31 | // Objects returned here must be treated as read-only. 32 | List(selector labels.Selector) (ret []*v1alpha1.DatafuseComputeGroup, err error) 33 | // DatafuseComputeGroups returns an object that can list and get DatafuseComputeGroups. 34 | DatafuseComputeGroups(namespace string) DatafuseComputeGroupNamespaceLister 35 | DatafuseComputeGroupListerExpansion 36 | } 37 | 38 | // datafuseComputeGroupLister implements the DatafuseComputeGroupLister interface. 39 | type datafuseComputeGroupLister struct { 40 | indexer cache.Indexer 41 | } 42 | 43 | // NewDatafuseComputeGroupLister returns a new DatafuseComputeGroupLister. 44 | func NewDatafuseComputeGroupLister(indexer cache.Indexer) DatafuseComputeGroupLister { 45 | return &datafuseComputeGroupLister{indexer: indexer} 46 | } 47 | 48 | // List lists all DatafuseComputeGroups in the indexer. 49 | func (s *datafuseComputeGroupLister) List(selector labels.Selector) (ret []*v1alpha1.DatafuseComputeGroup, err error) { 50 | err = cache.ListAll(s.indexer, selector, func(m interface{}) { 51 | ret = append(ret, m.(*v1alpha1.DatafuseComputeGroup)) 52 | }) 53 | return ret, err 54 | } 55 | 56 | // DatafuseComputeGroups returns an object that can list and get DatafuseComputeGroups. 57 | func (s *datafuseComputeGroupLister) DatafuseComputeGroups(namespace string) DatafuseComputeGroupNamespaceLister { 58 | return datafuseComputeGroupNamespaceLister{indexer: s.indexer, namespace: namespace} 59 | } 60 | 61 | // DatafuseComputeGroupNamespaceLister helps list and get DatafuseComputeGroups. 62 | // All objects returned here must be treated as read-only. 63 | type DatafuseComputeGroupNamespaceLister interface { 64 | // List lists all DatafuseComputeGroups in the indexer for a given namespace. 65 | // Objects returned here must be treated as read-only. 66 | List(selector labels.Selector) (ret []*v1alpha1.DatafuseComputeGroup, err error) 67 | // Get retrieves the DatafuseComputeGroup from the indexer for a given namespace and name. 68 | // Objects returned here must be treated as read-only. 69 | Get(name string) (*v1alpha1.DatafuseComputeGroup, error) 70 | DatafuseComputeGroupNamespaceListerExpansion 71 | } 72 | 73 | // datafuseComputeGroupNamespaceLister implements the DatafuseComputeGroupNamespaceLister 74 | // interface. 75 | type datafuseComputeGroupNamespaceLister struct { 76 | indexer cache.Indexer 77 | namespace string 78 | } 79 | 80 | // List lists all DatafuseComputeGroups in the indexer for a given namespace. 81 | func (s datafuseComputeGroupNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.DatafuseComputeGroup, err error) { 82 | err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { 83 | ret = append(ret, m.(*v1alpha1.DatafuseComputeGroup)) 84 | }) 85 | return ret, err 86 | } 87 | 88 | // Get retrieves the DatafuseComputeGroup from the indexer for a given namespace and name. 89 | func (s datafuseComputeGroupNamespaceLister) Get(name string) (*v1alpha1.DatafuseComputeGroup, error) { 90 | obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) 91 | if err != nil { 92 | return nil, err 93 | } 94 | if !exists { 95 | return nil, errors.NewNotFound(v1alpha1.Resource("datafusecomputegroup"), name) 96 | } 97 | return obj.(*v1alpha1.DatafuseComputeGroup), nil 98 | } 99 | -------------------------------------------------------------------------------- /pkg/client/listers/datafuse/v1alpha1/datafuseoperator.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by lister-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | import ( 21 | v1alpha1 "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 22 | "k8s.io/apimachinery/pkg/api/errors" 23 | "k8s.io/apimachinery/pkg/labels" 24 | "k8s.io/client-go/tools/cache" 25 | ) 26 | 27 | // DatafuseOperatorLister helps list DatafuseOperators. 28 | // All objects returned here must be treated as read-only. 29 | type DatafuseOperatorLister interface { 30 | // List lists all DatafuseOperators in the indexer. 31 | // Objects returned here must be treated as read-only. 32 | List(selector labels.Selector) (ret []*v1alpha1.DatafuseOperator, err error) 33 | // DatafuseOperators returns an object that can list and get DatafuseOperators. 34 | DatafuseOperators(namespace string) DatafuseOperatorNamespaceLister 35 | DatafuseOperatorListerExpansion 36 | } 37 | 38 | // datafuseOperatorLister implements the DatafuseOperatorLister interface. 39 | type datafuseOperatorLister struct { 40 | indexer cache.Indexer 41 | } 42 | 43 | // NewDatafuseOperatorLister returns a new DatafuseOperatorLister. 44 | func NewDatafuseOperatorLister(indexer cache.Indexer) DatafuseOperatorLister { 45 | return &datafuseOperatorLister{indexer: indexer} 46 | } 47 | 48 | // List lists all DatafuseOperators in the indexer. 49 | func (s *datafuseOperatorLister) List(selector labels.Selector) (ret []*v1alpha1.DatafuseOperator, err error) { 50 | err = cache.ListAll(s.indexer, selector, func(m interface{}) { 51 | ret = append(ret, m.(*v1alpha1.DatafuseOperator)) 52 | }) 53 | return ret, err 54 | } 55 | 56 | // DatafuseOperators returns an object that can list and get DatafuseOperators. 57 | func (s *datafuseOperatorLister) DatafuseOperators(namespace string) DatafuseOperatorNamespaceLister { 58 | return datafuseOperatorNamespaceLister{indexer: s.indexer, namespace: namespace} 59 | } 60 | 61 | // DatafuseOperatorNamespaceLister helps list and get DatafuseOperators. 62 | // All objects returned here must be treated as read-only. 63 | type DatafuseOperatorNamespaceLister interface { 64 | // List lists all DatafuseOperators in the indexer for a given namespace. 65 | // Objects returned here must be treated as read-only. 66 | List(selector labels.Selector) (ret []*v1alpha1.DatafuseOperator, err error) 67 | // Get retrieves the DatafuseOperator from the indexer for a given namespace and name. 68 | // Objects returned here must be treated as read-only. 69 | Get(name string) (*v1alpha1.DatafuseOperator, error) 70 | DatafuseOperatorNamespaceListerExpansion 71 | } 72 | 73 | // datafuseOperatorNamespaceLister implements the DatafuseOperatorNamespaceLister 74 | // interface. 75 | type datafuseOperatorNamespaceLister struct { 76 | indexer cache.Indexer 77 | namespace string 78 | } 79 | 80 | // List lists all DatafuseOperators in the indexer for a given namespace. 81 | func (s datafuseOperatorNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.DatafuseOperator, err error) { 82 | err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { 83 | ret = append(ret, m.(*v1alpha1.DatafuseOperator)) 84 | }) 85 | return ret, err 86 | } 87 | 88 | // Get retrieves the DatafuseOperator from the indexer for a given namespace and name. 89 | func (s datafuseOperatorNamespaceLister) Get(name string) (*v1alpha1.DatafuseOperator, error) { 90 | obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) 91 | if err != nil { 92 | return nil, err 93 | } 94 | if !exists { 95 | return nil, errors.NewNotFound(v1alpha1.Resource("datafuseoperator"), name) 96 | } 97 | return obj.(*v1alpha1.DatafuseOperator), nil 98 | } 99 | -------------------------------------------------------------------------------- /pkg/client/listers/datafuse/v1alpha1/expansion_generated.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | // Code generated by lister-gen. DO NOT EDIT. 17 | 18 | package v1alpha1 19 | 20 | // DatafuseComputeGroupListerExpansion allows custom methods to be added to 21 | // DatafuseComputeGroupLister. 22 | type DatafuseComputeGroupListerExpansion interface{} 23 | 24 | // DatafuseComputeGroupNamespaceListerExpansion allows custom methods to be added to 25 | // DatafuseComputeGroupNamespaceLister. 26 | type DatafuseComputeGroupNamespaceListerExpansion interface{} 27 | 28 | // DatafuseOperatorListerExpansion allows custom methods to be added to 29 | // DatafuseOperatorLister. 30 | type DatafuseOperatorListerExpansion interface{} 31 | 32 | // DatafuseOperatorNamespaceListerExpansion allows custom methods to be added to 33 | // DatafuseOperatorNamespaceLister. 34 | type DatafuseOperatorNamespaceListerExpansion interface{} 35 | -------------------------------------------------------------------------------- /pkg/config/constants.go: -------------------------------------------------------------------------------- 1 | package config 2 | 3 | const ( 4 | LabelAnnotationPrefix = "datafuse.datafuselabs.io/" 5 | DatafuseGroupNameLabel = LabelAnnotationPrefix + "group-name" 6 | DatafuseRoleLabel = "datafuse-role" 7 | DatafuseLeaderLabel = "leader" 8 | DatafuseFollowerLabel = "follower" 9 | ) 10 | -------------------------------------------------------------------------------- /pkg/controllers/main_controller.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | 5 | package controller 6 | 7 | const ( 8 | ControllerAgentName = "datafuse-operator" 9 | // SuccessSynced is used as part of the Event 'reason' when a Tenant is synced 10 | SuccessSynced = "Synced" 11 | // ErrResourceExists is used as part of the Event 'reason' when a Tenant fails 12 | // to sync due to a StatefulSet of the same name already existing. 13 | ErrResourceExists = "ErrResourceExists" 14 | 15 | // MessageResourceSynced is the message used for an Event fired when a Operator 16 | // is synced successfully 17 | MessageResourceSynced = "datafuse operator synced successfully" 18 | queueTokenRefillRate = 50 19 | queueTokenBucketSize = 500 20 | OperatorLabel = "datafuse-operator" 21 | ComputeGroupLabel = "datafuse-computegroup" 22 | ComputeGroupRoleLabel = "datafuse-computegrouprole" 23 | ComputeGroupRoleLeader = "leader" 24 | ComputeGroupRoleFollower = "follower" 25 | InstanceContainerName = "fusequery" 26 | FUSE_QUERY_NUM_CPUS = "FUSE_QUERY_NUM_CPUS" 27 | FUSE_QUERY_MYSQL_HANDLER_HOST = "FUSE_QUERY_MYSQL_HANDLER_HOST" 28 | FUSE_QUERY_MYSQL_HANDLER_PORT = "FUSE_QUERY_MYSQL_HANDLER_PORT" 29 | FUSE_QUERY_CLICKHOUSE_HANDLER_HOST = "FUSE_QUERY_CLICKHOUSE_HANDLER_HOST" 30 | FUSE_QUERY_CLICKHOUSE_HANDLER_PORT = "FUSE_QUERY_CLICKHOUSE_HANDLER_PORT" 31 | FUSE_QUERY_RPC_API_ADDRESS = "FUSE_QUERY_FLIGHT_API_ADDRESS" 32 | FUSE_QUERY_HTTP_API_ADDRESS = "FUSE_QUERY_HTTP_API_ADDRESS" 33 | FUSE_QUERY_METRIC_API_ADDRESS = "FUSE_QUERY_METRIC_API_ADDRESS" 34 | FUSE_QUERY_PRIORITY = "FUSE_QUERY_PRIORITY" 35 | ContainerHTTPPort = "http" 36 | ContainerMetricsPort = "metrics" 37 | ContainerRPCPort = "rpc" 38 | ContainerMysqlPort = "mysql" 39 | ContainerClickhousePort = "clickhouse" 40 | ) 41 | -------------------------------------------------------------------------------- /pkg/controllers/register/register_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package register 5 | 6 | import ( 7 | "strconv" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/stretchr/testify/assert" 12 | v1 "k8s.io/api/core/v1" 13 | "k8s.io/kubectl/pkg/util/podutils" 14 | ) 15 | 16 | func TestBuildPodConfig(t *testing.T) { 17 | nodes := []ClusterNodeRequest{{Name: "foo", Address: "1.1.1.1", Priority: 1}} 18 | config := "foo" 19 | server := newHTTPTestServer(t, config, &nodes) 20 | defer server.Close() 21 | pod := makeMockPodFromServer(server, "default-default-group", "pod1", "default", true, true) 22 | url := strings.TrimPrefix(server.URL, "http://") 23 | str := strings.SplitN(url, ":", 2) 24 | pConfig, err := BuildPodConfig(pod) 25 | assert.NoError(t, err) 26 | assert.Equal(t, pConfig.MysqlPort, strconv.Itoa(int(*DefaultComputeSpec.MysqlPort))) 27 | assert.Equal(t, pConfig.ClickHousePort, strconv.Itoa(int(*DefaultComputeSpec.ClickhousePort))) 28 | assert.Equal(t, pConfig.MetricsPort, strconv.Itoa(int(*DefaultComputeSpec.MetricsPort))) 29 | assert.Equal(t, pConfig.RPCPort, strconv.Itoa(int(*DefaultComputeSpec.RPCPort))) 30 | assert.Equal(t, pConfig.Priority, strconv.Itoa(int(*DefaultComputeSpec.Priority))) 31 | assert.Equal(t, pConfig.HTTPPort, str[1]) 32 | } 33 | 34 | func TestGetConfig(t *testing.T) { 35 | nodes := []ClusterNodeRequest{} 36 | config := "foo" 37 | server := newHTTPTestServer(t, config, &nodes) 38 | defer server.Close() 39 | pod := makeMockPodFromServer(server, "default-default-group", "pod1", "default", true, true) 40 | client := fakeK8sClienset([]*v1.Pod{pod}) 41 | rs := RegistSetter{true, []string{}, client} 42 | str, err := rs.GetConfig(pod) 43 | assert.NoError(t, err) 44 | assert.Equal(t, config, str) 45 | } 46 | 47 | func TestGetClusterList(t *testing.T) { 48 | nodes := []ClusterNodeRequest{{Name: "foo", Address: "1.1.1.1", Priority: 1}} 49 | config := "foo" 50 | server := newHTTPTestServer(t, config, &nodes) 51 | defer server.Close() 52 | pod := makeMockPodFromServer(server, "default-default-group", "pod1", "default", true, true) 53 | client := fakeK8sClienset([]*v1.Pod{pod}) 54 | rs := RegistSetter{true, []string{}, client} 55 | str, err := rs.GetConfig(pod) 56 | assert.NoError(t, err) 57 | assert.Equal(t, config, str) 58 | list, err := rs.GetClusterList(pod) 59 | assert.NoError(t, err) 60 | assert.Equal(t, list, nodes) 61 | assert.Equal(t, true, podutils.IsPodReady(pod)) 62 | } 63 | 64 | func TestAddNode(t *testing.T) { 65 | nodes := []ClusterNodeRequest{{Name: "foo", Address: "1.1.1.1", Priority: 1}} 66 | leaderConfig := "foo" 67 | followerConfig := "bar" 68 | leaderServer := newHTTPTestServer(t, leaderConfig, &nodes) 69 | defer leaderServer.Close() 70 | followerServer := newHTTPTestServer(t, followerConfig, &[]ClusterNodeRequest{}) 71 | defer followerServer.Close() 72 | leader := makeMockPodFromServer(leaderServer, "default-default-group", "pod1", "default", true, true) 73 | follower := makeMockPodFromServer(followerServer, "default-default-group", "pod2", "default", false, true) 74 | client := fakeK8sClienset([]*v1.Pod{leader, follower}) 75 | rs := RegistSetter{true, []string{}, client} 76 | err := rs.AddNode(leader, follower) 77 | assert.NoError(t, err) 78 | str, err := rs.GetConfig(leader) 79 | assert.NoError(t, err) 80 | assert.Equal(t, leaderConfig, str) 81 | list, err := rs.GetClusterList(leader) 82 | assert.NoError(t, err) 83 | assert.Equal(t, 2, len(nodes)) 84 | assert.Equal(t, list, nodes) 85 | assert.Equal(t, true, rs.IsGroupLeader(*leader)) 86 | assert.Equal(t, true, rs.IsGroupWorker(*follower)) 87 | } 88 | 89 | func TestRemoveNode(t *testing.T) { 90 | nodes := []ClusterNodeRequest{{Name: "foo", Address: "1.1.1.1", Priority: 1}} 91 | leaderConfig := "foo" 92 | followerConfig := "bar" 93 | leaderServer := newHTTPTestServer(t, leaderConfig, &nodes) 94 | defer leaderServer.Close() 95 | followerServer := newHTTPTestServer(t, followerConfig, &[]ClusterNodeRequest{}) 96 | defer followerServer.Close() 97 | leader := makeMockPodFromServer(leaderServer, "default-default-group", "pod1", "default", true, true) 98 | follower := makeMockPodFromServer(followerServer, "default-default-group", "pod2", "default", false, true) 99 | client := fakeK8sClienset([]*v1.Pod{leader, follower}) 100 | rs := RegistSetter{true, []string{}, client} 101 | err := rs.AddNode(leader, follower) 102 | assert.NoError(t, err) 103 | str, err := rs.GetConfig(leader) 104 | assert.NoError(t, err) 105 | assert.Equal(t, leaderConfig, str) 106 | list, err := rs.GetClusterList(leader) 107 | assert.NoError(t, err) 108 | assert.Equal(t, 2, len(nodes)) 109 | assert.Equal(t, list, nodes) 110 | err = rs.RemoveNode(leader, follower) 111 | assert.NoError(t, err) 112 | list, err = rs.GetClusterList(leader) 113 | assert.NoError(t, err) 114 | assert.Equal(t, 1, len(nodes)) 115 | assert.Equal(t, list, nodes) 116 | } 117 | -------------------------------------------------------------------------------- /pkg/controllers/register/register_test_utils.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package register 5 | 6 | import ( 7 | "encoding/json" 8 | "io" 9 | "net/http" 10 | "net/http/httptest" 11 | "strconv" 12 | "strings" 13 | "testing" 14 | 15 | "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 16 | "datafuselabs.io/datafuse-operator/pkg/controllers/utils" 17 | v1 "k8s.io/api/core/v1" 18 | "k8s.io/apimachinery/pkg/runtime" 19 | "k8s.io/client-go/kubernetes" 20 | "k8s.io/client-go/kubernetes/fake" 21 | ) 22 | 23 | type mockFuseQueryServer struct { 24 | Nodes []ClusterNodeRequest 25 | Config string 26 | Server *httptest.Server 27 | } 28 | 29 | func int32Ptr(i int32) *int32 { return &i } 30 | func strPtr(str string) *string { return &str } 31 | 32 | var ( 33 | DefaultComputeSpec = v1alpha1.DatafuseComputeSetSpec{ 34 | Replicas: int32Ptr(1), 35 | DatafuseComputeInstanceSpec: v1alpha1.DatafuseComputeInstanceSpec{ 36 | Cores: int32Ptr(1), 37 | CoreLimit: strPtr("1300m"), 38 | Memory: strPtr("512m"), 39 | MemoryLimit: strPtr("512m"), 40 | Image: strPtr("zhihanz/fuse-query:latest"), 41 | ImagePullPolicy: strPtr("Always"), 42 | HTTPPort: int32Ptr(8080), 43 | ClickhousePort: int32Ptr(9000), 44 | MysqlPort: int32Ptr(3306), 45 | RPCPort: int32Ptr(9091), 46 | MetricsPort: int32Ptr(9098), 47 | Priority: int32Ptr(1), 48 | }, 49 | } 50 | ) 51 | 52 | func newHTTPTestServer(t *testing.T, config string, nodes *[]ClusterNodeRequest) *httptest.Server { 53 | server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 54 | w.WriteHeader(http.StatusOK) 55 | if r.Method == "GET" && r.URL.EscapedPath() == string(CLUSTER_CONFIG) { 56 | w.Write([]byte(config)) 57 | } 58 | if r.Method == "GET" && r.URL.EscapedPath() == string(CLUSTER_LIST) { 59 | b, err := json.Marshal(*nodes) 60 | if err != nil { 61 | t.Fatal(err) 62 | } 63 | w.Write(b) 64 | } 65 | if r.Method == "POST" && r.URL.EscapedPath() == string(CLUSTER_ADD) { 66 | jsonbyte, err := io.ReadAll(r.Body) 67 | if err != nil { 68 | t.Fatal(err) 69 | } 70 | defer r.Body.Close() 71 | var node ClusterNodeRequest 72 | json.Unmarshal(jsonbyte, &node) 73 | for _, item := range *nodes { 74 | if item.Name == node.Name { 75 | return 76 | } 77 | } 78 | *nodes = append(*nodes, node) 79 | } 80 | if r.Method == "POST" && r.URL.EscapedPath() == string(CLUSTER_REMOVE) { 81 | jsonbyte, err := io.ReadAll(r.Body) 82 | if err != nil { 83 | t.Fatal(err) 84 | } 85 | defer r.Body.Close() 86 | var node ClusterNodeRequest 87 | json.Unmarshal(jsonbyte, &node) 88 | updatedList := []ClusterNodeRequest{} 89 | for _, item := range *nodes { 90 | if item.Name != node.Name { 91 | updatedList = append(updatedList, item) 92 | } 93 | } 94 | *nodes = updatedList 95 | } 96 | })) 97 | return server 98 | } 99 | 100 | func makeMockPod(groupKey, name, namespace, ip string, isLeader bool, isReady bool) *v1.Pod { 101 | pod := utils.MakeFuseQueryPod(&DefaultComputeSpec, name, namespace, groupKey, isLeader) 102 | if isReady { 103 | createMockStatus(pod, v1.PodReady) 104 | } 105 | pod.Status.PodIP = ip 106 | return pod 107 | } 108 | 109 | func makeIreleventMockPod(groupKey, name, namespace string, isLeader bool, isReady bool) *v1.Pod { 110 | pod := utils.MakeFuseQueryPod(&DefaultComputeSpec, name, namespace, groupKey, isLeader) 111 | pod.Labels = make(map[string]string) 112 | if isReady { 113 | createMockStatus(pod, v1.PodReady) 114 | } 115 | return pod 116 | } 117 | 118 | func makeMockPodFromServer(server *httptest.Server, groupKey, name, namespace string, isLeader, isReady bool) *v1.Pod { 119 | url := strings.TrimPrefix(server.URL, "http://") 120 | str := strings.SplitN(url, ":", 2) 121 | s, err := strconv.Atoi(str[1]) 122 | if err != nil { 123 | return nil 124 | } 125 | 126 | // we change compute spec to accomply with mock server settings 127 | instance := DefaultComputeSpec.DeepCopy() 128 | 129 | instance.HTTPPort = int32Ptr(int32(s)) 130 | pod := utils.MakeFuseQueryPod(instance, name, namespace, groupKey, isLeader) 131 | if isReady { 132 | createMockStatus(pod, v1.PodReady) 133 | } 134 | pod.Status.PodIP = str[0] 135 | return pod 136 | } 137 | 138 | func fakeK8sClienset(pods []*v1.Pod) (client kubernetes.Interface) { 139 | var csObjs []runtime.Object 140 | for _, op := range pods { 141 | csObjs = append(csObjs, op.DeepCopy()) 142 | } 143 | 144 | client = fake.NewSimpleClientset(csObjs...) 145 | return client 146 | } 147 | 148 | func createMockStatus(pod *v1.Pod, conditionType v1.PodConditionType) { 149 | if pod.Status.Conditions == nil { 150 | pod.Status.Conditions = make([]v1.PodCondition, 0) 151 | } 152 | pod.Status.Conditions = append(pod.Status.Conditions, v1.PodCondition{Type: conditionType, Status: v1.ConditionTrue}) 153 | } 154 | -------------------------------------------------------------------------------- /pkg/scheduler/interface/interface.go: -------------------------------------------------------------------------------- 1 | package schedulerinterface 2 | 3 | type Scheduler interface { 4 | Name() string 5 | } 6 | -------------------------------------------------------------------------------- /pkg/scheduler/scheduler_factory.go: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright 2021. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | package scheduler 17 | 18 | import ( 19 | "sync" 20 | 21 | "k8s.io/client-go/rest" 22 | ) 23 | 24 | type SchedulerFactory struct { 25 | sync.RWMutex 26 | config *rest.Config 27 | plugins map[string]string 28 | } 29 | 30 | func NewSchedulerFactory(config *rest.Config) {} 31 | -------------------------------------------------------------------------------- /result.txt: -------------------------------------------------------------------------------- 1 | avg(number) 2 | 499.5 3 | -------------------------------------------------------------------------------- /tests/e2e/README.md: -------------------------------------------------------------------------------- 1 | # E2E Testing 2 | 3 | End-to-end (e2e) testing is automated testing for real user scenarios. 4 | 5 | ## Build and Run Tests 6 | 7 | Prerequisites: 8 | - A running k8s cluster and kube config. We will need to pass kube config as arguments. 9 | - Have kubeconfig file ready. 10 | - Have a datafuse operator image ready. 11 | 12 | e2e tests are written as Go test. All go test techniques apply (e.g. picking what to run, timeout length). Let's say I want to run all tests in "test/e2e/": 13 | 14 | ```bash 15 | $ go test -v ./tests/e2e/ --kubeconfig "$HOME/.kube/config" --operator-image=zhihanz/datafuse-operator:latest 16 | ``` 17 | -------------------------------------------------------------------------------- /tests/e2e/clients/clickhouse-client.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: clickhouse-client 5 | spec: 6 | containers: 7 | - name: clickhouse-client 8 | image: yandex/clickhouse-client:21.5 9 | ports: 10 | - containerPort: 80 11 | command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 5 ; done"] 12 | restartPolicy: OnFailure 13 | -------------------------------------------------------------------------------- /tests/e2e/clients/mysql-client.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mysql-client 5 | spec: 6 | containers: 7 | - name: mysql-client 8 | image: arey/mysql-client:latest 9 | ports: 10 | - containerPort: 80 11 | command: ["/bin/sh", "-ec", "while :; do echo '.'; sleep 5 ; done"] 12 | restartPolicy: OnFailure 13 | 14 | -------------------------------------------------------------------------------- /tests/e2e/sqlfiles/clickhouse/test1.result: -------------------------------------------------------------------------------- 1 | 499.5 2 | -------------------------------------------------------------------------------- /tests/e2e/sqlfiles/mysql/test1.result: -------------------------------------------------------------------------------- 1 | avg(number) 2 | 499.5 3 | -------------------------------------------------------------------------------- /tests/e2e/sqlfiles/test1.sql: -------------------------------------------------------------------------------- 1 | SELECT avg(number) from numbers_mt(1000) -------------------------------------------------------------------------------- /tests/e2e/testfiles/default_follower.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: fuse-query-follower 6 | labels: 7 | app: fuse-query-follower 8 | env: dev 9 | spec: 10 | replicas: 2 11 | selector: 12 | matchLabels: 13 | app: fuse-query-follower 14 | env: dev 15 | datafuse-computegroup: group1-default 16 | datafuse-computegrouprole: follower 17 | 18 | template: 19 | metadata: 20 | labels: 21 | app: fuse-query-follower 22 | env: dev 23 | datafuse-computegroup: group1-default 24 | datafuse-computegrouprole: follower 25 | spec: 26 | containers: 27 | - name: fuse-query 28 | image: zhihanz/fuse-query:latest 29 | imagePullPolicy: IfNotPresent 30 | resources: 31 | requests: 32 | memory: "512Mi" 33 | cpu: "1300m" 34 | limits: 35 | memory: "512Mi" 36 | cpu: "2300m" 37 | env: 38 | - name: FUSE_QUERY_HTTP_API_ADDRESS 39 | value: 0.0.0.0:8080 40 | - name: FUSE_QUERY_MYSQL_HANDLER_HOST 41 | value: 0.0.0.0 42 | - name: FUSE_QUERY_METRIC_API_ADDRESS 43 | value: 0.0.0.0:7070 44 | - name: FUSE_QUERY_FLIGHT_API_ADDRESS 45 | value: 0.0.0.0:9090 46 | - name: FUSE_QUERY_MYSQL_HANDLER_PORT 47 | value: "3307" 48 | - name: FUSE_QUERY_PRIORITY 49 | value: "1" 50 | livenessProbe: 51 | failureThreshold: 3 52 | httpGet: 53 | path: /v1/hello 54 | port: http 55 | scheme: HTTP 56 | periodSeconds: 10 57 | successThreshold: 1 58 | timeoutSeconds: 1 59 | ports: 60 | - containerPort: 3307 61 | name: mysql 62 | protocol: TCP 63 | - containerPort: 8080 64 | name: http 65 | protocol: TCP 66 | - containerPort: 9090 67 | name: grpc 68 | protocol: TCP 69 | readinessProbe: 70 | failureThreshold: 3 71 | httpGet: 72 | path: /v1/configs 73 | port: http 74 | scheme: HTTP 75 | periodSeconds: 10 76 | successThreshold: 1 77 | timeoutSeconds: 1 -------------------------------------------------------------------------------- /tests/e2e/testfiles/default_generated_deploy.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: fuse-query-driver 6 | labels: 7 | app: fuse-query-driver 8 | env: dev 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: fuse-query-driver 14 | env: dev 15 | datafuse-computegroup: group1-default 16 | datafuse-computegrouprole: leader 17 | 18 | template: 19 | metadata: 20 | labels: 21 | app: fuse-query-driver 22 | env: dev 23 | datafuse-computegroup: group1-default 24 | datafuse-computegrouprole: leader 25 | spec: 26 | containers: 27 | - name: fuse-query-driver 28 | image: zhihanz/fuse-query:latest 29 | imagePullPolicy: IfNotPresent 30 | resources: 31 | requests: 32 | memory: "512Mi" 33 | cpu: "2300m" 34 | limits: 35 | memory: "512Mi" 36 | cpu: "2300m" 37 | env: 38 | - name: FUSE_QUERY_HTTP_API_ADDRESS 39 | value: 0.0.0.0:8080 40 | - name: FUSE_QUERY_MYSQL_HANDLER_HOST 41 | value: 0.0.0.0 42 | - name: FUSE_QUERY_METRIC_API_ADDRESS 43 | value: 0.0.0.0:7070 44 | - name: FUSE_QUERY_FLIGHT_API_ADDRESS 45 | value: 0.0.0.0:9090 46 | - name: FUSE_QUERY_MYSQL_HANDLER_PORT 47 | value: "3307" 48 | - name: FUSE_QUERY_PRIORITY 49 | value: "1" 50 | livenessProbe: 51 | failureThreshold: 3 52 | httpGet: 53 | path: /v1/hello 54 | port: http 55 | scheme: HTTP 56 | periodSeconds: 10 57 | successThreshold: 1 58 | timeoutSeconds: 1 59 | ports: 60 | - containerPort: 3307 61 | name: mysql 62 | protocol: TCP 63 | - containerPort: 8080 64 | name: http 65 | protocol: TCP 66 | - containerPort: 9090 67 | name: grpc 68 | protocol: TCP 69 | readinessProbe: 70 | failureThreshold: 3 71 | httpGet: 72 | path: /v1/configs 73 | port: http 74 | scheme: HTTP 75 | periodSeconds: 10 76 | successThreshold: 1 77 | timeoutSeconds: 1 -------------------------------------------------------------------------------- /tests/e2e/testfiles/default_operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseOperator 3 | metadata: 4 | name: datafusecluster-default 5 | namespace: default 6 | spec: 7 | computeGroups: 8 | - leaders: 9 | replicas: 1 10 | cores: 1 11 | coreLimit: "1300m" 12 | memory: "512Mi" 13 | memorylimit: "512Mi" 14 | image: zhihanz/fuse-query:latest 15 | imagePullPolicy: Always 16 | httpPort: 8080 17 | clickhousePort: 9000 18 | mysqlPort: 3306 19 | rpcPort: 9091 20 | metricsPort: 9098 21 | priority: 1 22 | namespace: default 23 | name: group1 -------------------------------------------------------------------------------- /tests/e2e/testfiles/default_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: fuse-query-service 5 | labels: 6 | app: fuse-query-driver 7 | env: dev 8 | datafuse-computegroup: group1-default 9 | datafuse-computegrouprole: leader 10 | spec: 11 | ports: 12 | - protocol: TCP 13 | port: 8080 14 | targetPort: 8080 15 | name: http 16 | - protocol: TCP 17 | port: 3307 18 | targetPort: 3307 19 | name: mysql 20 | - protocol: TCP 21 | port: 9090 22 | targetPort: 9090 23 | name: flight 24 | selector: 25 | run: my-nginx -------------------------------------------------------------------------------- /tests/e2e/testfiles/leader_with_2workers.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: datafuse.datafuselabs.io/v1alpha1 2 | kind: DatafuseOperator 3 | metadata: 4 | name: datafusecluster-default 5 | namespace: default 6 | spec: 7 | computeGroups: 8 | - leaders: 9 | replicas: 1 10 | cores: 1 11 | coreLimit: "1300m" 12 | memory: "512Mi" 13 | memorylimit: "512Mi" 14 | image: zhihanz/fuse-query:latest 15 | imagePullPolicy: Always 16 | httpPort: 8080 17 | clickhousePort: 9000 18 | mysqlPort: 3306 19 | rpcPort: 9091 20 | metricsPort: 9098 21 | priority: 1 22 | namespace: default 23 | name: multi-worker 24 | workers: 25 | - cores: 1 26 | coreLimit: "1300m" 27 | replicas: 1 28 | memory: "512Mi" 29 | memorylimit: "512Mi" 30 | image: zhihanz/fuse-query:latest 31 | imagePullPolicy: Always 32 | httpPort: 8080 33 | clickhousePort: 9000 34 | mysqlPort: 3306 35 | rpcPort: 9091 36 | metricsPort: 9098 37 | priority: 1 38 | name: "t1" 39 | - cores: 1 40 | replicas: 2 41 | coreLimit: "1300m" 42 | memory: "512Mi" 43 | memorylimit: "512Mi" 44 | image: zhihanz/fuse-query:latest 45 | imagePullPolicy: Always 46 | httpPort: 8080 47 | clickhousePort: 9000 48 | mysqlPort: 3306 49 | rpcPort: 9091 50 | metricsPort: 9098 51 | priority: 2 52 | name: "t2" -------------------------------------------------------------------------------- /tests/e2e/validate/valid_cluster.sql: -------------------------------------------------------------------------------- 1 | select * from system.clusters -------------------------------------------------------------------------------- /tests/framework/cluster_role.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "io" 10 | "os" 11 | 12 | rbacv1 "k8s.io/api/rbac/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/util/yaml" 16 | "k8s.io/client-go/kubernetes" 17 | ) 18 | 19 | func CreateClusterRole(kubeClient kubernetes.Interface, relativePath string) error { 20 | clusterRole, err := parseClusterRoleYaml(relativePath) 21 | if err != nil { 22 | return err 23 | } 24 | 25 | _, err = kubeClient.RbacV1().ClusterRoles().Get(context.TODO(), clusterRole.Name, metav1.GetOptions{}) 26 | 27 | if err == nil { 28 | // ClusterRole already exists -> Update 29 | _, err = kubeClient.RbacV1().ClusterRoles().Update(context.TODO(), clusterRole, metav1.UpdateOptions{}) 30 | if err != nil { 31 | return err 32 | } 33 | 34 | } else { 35 | // ClusterRole doesn't exists -> Create 36 | _, err = kubeClient.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) 37 | if err != nil { 38 | return err 39 | } 40 | } 41 | 42 | return nil 43 | } 44 | 45 | func DeleteClusterRole(kubeClient kubernetes.Interface, relativePath string) error { 46 | clusterRole, err := parseClusterRoleYaml(relativePath) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | if err := kubeClient.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRole.Name, metav1.DeleteOptions{}); err != nil { 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | 58 | func parseClusterRoleYaml(relativePath string) (*rbacv1.ClusterRole, error) { 59 | var manifest *os.File 60 | var err error 61 | 62 | var clusterRole rbacv1.ClusterRole 63 | if manifest, err = PathToOSFile(relativePath); err != nil { 64 | return nil, err 65 | } 66 | 67 | decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) 68 | for { 69 | var out unstructured.Unstructured 70 | err = decoder.Decode(&out) 71 | if err != nil { 72 | // this would indicate it's malformed YAML. 73 | break 74 | } 75 | 76 | if out.GetKind() == "ClusterRole" { 77 | var marshaled []byte 78 | marshaled, err = out.MarshalJSON() 79 | json.Unmarshal(marshaled, &clusterRole) 80 | break 81 | } 82 | } 83 | 84 | if err != io.EOF && err != nil { 85 | return nil, err 86 | } 87 | return &clusterRole, nil 88 | } 89 | -------------------------------------------------------------------------------- /tests/framework/cluster_role_binding.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "io" 10 | "os" 11 | 12 | rbacv1 "k8s.io/api/rbac/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/util/yaml" 16 | "k8s.io/client-go/kubernetes" 17 | ) 18 | 19 | func CreateClusterRoleBinding(kubeClient kubernetes.Interface, namespace, relativePath string) (FinalizerFn, error) { 20 | finalizerFn := func() error { 21 | return DeleteClusterRoleBinding(kubeClient, relativePath) 22 | } 23 | clusterRoleBinding, err := parseClusterRoleBindingYaml(relativePath) 24 | if err != nil { 25 | return finalizerFn, err 26 | } 27 | clusterRoleBinding.Subjects[0].Namespace = namespace 28 | _, err = kubeClient.RbacV1().ClusterRoleBindings().Get(context.TODO(), clusterRoleBinding.Name, metav1.GetOptions{}) 29 | 30 | if err == nil { 31 | // ClusterRoleBinding already exists -> Update 32 | _, err = kubeClient.RbacV1().ClusterRoleBindings().Update(context.TODO(), clusterRoleBinding, metav1.UpdateOptions{}) 33 | if err != nil { 34 | return finalizerFn, err 35 | } 36 | } else { 37 | // ClusterRoleBinding doesn't exists -> Create 38 | _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}) 39 | if err != nil { 40 | return finalizerFn, err 41 | } 42 | } 43 | 44 | return finalizerFn, err 45 | } 46 | 47 | func DeleteClusterRoleBinding(kubeClient kubernetes.Interface, relativePath string) error { 48 | clusterRoleBinding, err := parseClusterRoleYaml(relativePath) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | if err := kubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBinding.Name, metav1.DeleteOptions{}); err != nil { 54 | return err 55 | } 56 | 57 | return nil 58 | } 59 | 60 | func parseClusterRoleBindingYaml(relativePath string) (*rbacv1.ClusterRoleBinding, error) { 61 | var manifest *os.File 62 | var err error 63 | 64 | var clusterRoleBinding rbacv1.ClusterRoleBinding 65 | if manifest, err = PathToOSFile(relativePath); err != nil { 66 | return nil, err 67 | } 68 | 69 | decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) 70 | for { 71 | var out unstructured.Unstructured 72 | err = decoder.Decode(&out) 73 | if err != nil { 74 | // this would indicate it's malformed YAML. 75 | break 76 | } 77 | 78 | if out.GetKind() == "ClusterRoleBinding" { 79 | var marshaled []byte 80 | marshaled, err = out.MarshalJSON() 81 | json.Unmarshal(marshaled, &clusterRoleBinding) 82 | break 83 | } 84 | } 85 | 86 | if err != io.EOF && err != nil { 87 | return nil, err 88 | } 89 | return &clusterRoleBinding, nil 90 | } 91 | -------------------------------------------------------------------------------- /tests/framework/crd.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "io/ioutil" 9 | "net/http" 10 | "time" 11 | 12 | "github.com/ghodss/yaml" 13 | "github.com/pkg/errors" 14 | v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" 15 | apierrors "k8s.io/apimachinery/pkg/api/errors" 16 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 17 | "k8s.io/apimachinery/pkg/runtime" 18 | "k8s.io/apimachinery/pkg/util/wait" 19 | ) 20 | 21 | // GetCRD gets a custom resource definition from the apiserver. 22 | func (f *Framework) GetCRD(name string) (*v1.CustomResourceDefinition, error) { 23 | crd, err := f.APIServerClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), name, metav1.GetOptions{}) 24 | if err != nil { 25 | return nil, errors.Wrapf(err, "unable to get CRD with name %v", name) 26 | } 27 | return crd, nil 28 | } 29 | 30 | // ListCRDs gets a list of custom resource definitions from the apiserver. 31 | func (f *Framework) ListCRDs() (*v1.CustomResourceDefinitionList, error) { 32 | crds, err := f.APIServerClient.ApiextensionsV1().CustomResourceDefinitions().List(context.TODO(), metav1.ListOptions{}) 33 | if err != nil { 34 | return nil, errors.Wrap(err, "unable to list CRDs") 35 | } 36 | return crds, nil 37 | } 38 | 39 | // CreateCRD creates a custom resource definition on the apiserver. 40 | func (f *Framework) CreateCRD(crd *v1.CustomResourceDefinition) error { 41 | _, err := f.APIServerClient.ApiextensionsV1().CustomResourceDefinitions().Get(context.TODO(), crd.Name, metav1.GetOptions{}) 42 | if err != nil && !apierrors.IsNotFound(err) { 43 | return errors.Wrapf(err, "getting CRD: %s", crd.Spec.Names.Kind) 44 | } 45 | 46 | if apierrors.IsNotFound(err) { 47 | _, err := f.APIServerClient.ApiextensionsV1().CustomResourceDefinitions().Create(context.TODO(), crd, metav1.CreateOptions{}) 48 | if err != nil { 49 | return errors.Wrapf(err, "create CRD: %s", crd.Spec.Names.Kind) 50 | } 51 | } 52 | return nil 53 | } 54 | 55 | // MakeCRD creates a CustomResourceDefinition object from yaml manifest. 56 | func (f *Framework) MakeCRD(pathToYaml string) (*v1.CustomResourceDefinition, error) { 57 | manifest, err := ioutil.ReadFile(pathToYaml) 58 | if err != nil { 59 | return nil, errors.Wrapf(err, "read CRD asset file: %s", pathToYaml) 60 | } 61 | 62 | crd := v1.CustomResourceDefinition{} 63 | err = yaml.Unmarshal(manifest, &crd) 64 | if err != nil { 65 | return nil, errors.Wrapf(err, "unmarshal CRD asset file: %s", pathToYaml) 66 | } 67 | 68 | return &crd, nil 69 | } 70 | 71 | // WaitForCRDReady waits for a Custom Resource Definition to be available for use. 72 | func WaitForCRDReady(listFunc func(opts metav1.ListOptions) (runtime.Object, error)) error { 73 | err := wait.Poll(3*time.Second, 10*time.Minute, func() (bool, error) { 74 | _, err := listFunc(metav1.ListOptions{}) 75 | if err != nil { 76 | if se, ok := err.(*apierrors.StatusError); ok { 77 | if se.Status().Code == http.StatusNotFound { 78 | return false, nil 79 | } 80 | } 81 | return false, errors.Wrap(err, "failed to list CRD") 82 | } 83 | return true, nil 84 | }) 85 | 86 | return errors.Wrap(err, "timed out waiting for Custom Resource") 87 | } 88 | 89 | // // CreateCRDAndWaitUntilReady creates a Custom Resource Definition from yaml 90 | // // manifest on the apiserver and wait until it is available for use. 91 | // func (f *Framework) CreateCRDAndWaitUntilReady(crdName string, listFunc func(opts metav1.ListOptions) (runtime.Object, error)) error { 92 | // crdName = strings.ToLower(crdName) 93 | // group := monitoring.GroupName 94 | // assetPath := "../../example/prometheus-operator-crd/" + group + "_" + crdName + ".yaml" 95 | 96 | // crd, err := f.MakeCRD(assetPath) 97 | // if err != nil { 98 | // return errors.Wrapf(err, "create CRD: %s from manifest: %s", crdName, assetPath) 99 | // } 100 | 101 | // crd.ObjectMeta.Name = crd.Spec.Names.Plural + "." + group 102 | // crd.Spec.Group = group 103 | 104 | // err = f.CreateCRD(crd) 105 | // if err != nil { 106 | // return errors.Wrapf(err, "create CRD %s on the apiserver", crdName) 107 | // } 108 | 109 | // err = WaitForCRDReady(listFunc) 110 | // if err != nil { 111 | // return errors.Wrapf(err, "%s CRD not ready", crdName) 112 | // } 113 | 114 | // return nil 115 | // } 116 | -------------------------------------------------------------------------------- /tests/framework/deployment.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | "time" 10 | 11 | "github.com/pkg/errors" 12 | appsv1 "k8s.io/api/apps/v1" 13 | apierrors "k8s.io/apimachinery/pkg/api/errors" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/util/wait" 16 | "k8s.io/apimachinery/pkg/util/yaml" 17 | "k8s.io/client-go/kubernetes" 18 | ) 19 | 20 | func GetDeployment(kubeCilent kubernetes.Interface, ns, name string) (*appsv1.Deployment, error) { 21 | return kubeCilent.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) 22 | } 23 | 24 | func UpdateDeployment(kubeCilent kubernetes.Interface, deployment *appsv1.Deployment) (*appsv1.Deployment, error) { 25 | return kubeCilent.AppsV1().Deployments(deployment.Namespace).Update(context.TODO(), deployment, metav1.UpdateOptions{}) 26 | } 27 | 28 | func MakeDeployment(pathToYaml string) (*appsv1.Deployment, error) { 29 | manifest, err := PathToOSFile(pathToYaml) 30 | if err != nil { 31 | return nil, err 32 | } 33 | deployment := appsv1.Deployment{} 34 | if err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&deployment); err != nil { 35 | return nil, errors.Wrap(err, fmt.Sprintf("failed to decode file %s", pathToYaml)) 36 | } 37 | 38 | return &deployment, nil 39 | } 40 | 41 | func CreateDeployment(kubeClient kubernetes.Interface, namespace string, d *appsv1.Deployment) error { 42 | d.Namespace = namespace 43 | _, err := kubeClient.AppsV1().Deployments(namespace).Create(context.TODO(), d, metav1.CreateOptions{}) 44 | if err != nil { 45 | return errors.Wrap(err, fmt.Sprintf("failed to create deployment %s", d.Name)) 46 | } 47 | return nil 48 | } 49 | 50 | func DeleteDeployment(kubeClient kubernetes.Interface, namespace, name string) error { 51 | d, err := kubeClient.AppsV1().Deployments(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | zero := int32(0) 57 | d.Spec.Replicas = &zero 58 | 59 | d, err = kubeClient.AppsV1().Deployments(namespace).Update(context.TODO(), d, metav1.UpdateOptions{}) 60 | if err != nil { 61 | return err 62 | } 63 | return kubeClient.AppsV1beta2().Deployments(namespace).Delete(context.TODO(), d.Name, metav1.DeleteOptions{}) 64 | } 65 | 66 | func WaitUntilDeploymentGone(kubeClient kubernetes.Interface, namespace, name string, timeout time.Duration) error { 67 | return wait.Poll(time.Second, timeout, func() (bool, error) { 68 | _, err := kubeClient. 69 | AppsV1beta2().Deployments(namespace). 70 | Get(context.TODO(), name, metav1.GetOptions{}) 71 | 72 | if err != nil { 73 | if apierrors.IsNotFound(err) { 74 | return true, nil 75 | } 76 | 77 | return false, err 78 | } 79 | 80 | return false, nil 81 | }) 82 | } 83 | -------------------------------------------------------------------------------- /tests/framework/helpers.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | "net/http" 10 | "os" 11 | "path/filepath" 12 | "time" 13 | 14 | v1 "k8s.io/api/core/v1" 15 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 16 | "k8s.io/apimachinery/pkg/util/wait" 17 | "k8s.io/client-go/kubernetes" 18 | "k8s.io/client-go/rest" 19 | 20 | "github.com/pkg/errors" 21 | ) 22 | 23 | func PathToOSFile(relativPath string) (*os.File, error) { 24 | path, err := filepath.Abs(relativPath) 25 | if err != nil { 26 | return nil, errors.Wrap(err, fmt.Sprintf("failed generate absolute file path of %s", relativPath)) 27 | } 28 | 29 | manifest, err := os.Open(path) 30 | if err != nil { 31 | return nil, errors.Wrap(err, fmt.Sprintf("failed to open file %s", path)) 32 | } 33 | 34 | return manifest, nil 35 | } 36 | 37 | // PodRunningAndReady returns whether a pod is running and each container has 38 | // passed it's ready state. 39 | func PodRunningAndReady(pod v1.Pod) (bool, error) { 40 | switch pod.Status.Phase { 41 | case v1.PodFailed, v1.PodSucceeded: 42 | return false, fmt.Errorf("pod completed") 43 | case v1.PodRunning: 44 | for _, cond := range pod.Status.Conditions { 45 | if cond.Type != v1.PodReady { 46 | continue 47 | } 48 | return cond.Status == v1.ConditionTrue, nil 49 | } 50 | return false, fmt.Errorf("pod ready condition not found") 51 | } 52 | return false, nil 53 | } 54 | 55 | // WaitForPodsReady waits for a selection of Pods to be running and each 56 | // container to pass its readiness check. 57 | func WaitForPodsReady(kubeClient kubernetes.Interface, namespace string, timeout time.Duration, expectedReplicas int, opts metav1.ListOptions) error { 58 | return wait.Poll(time.Second, timeout, func() (bool, error) { 59 | pl, err := kubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts) 60 | if err != nil { 61 | return false, err 62 | } 63 | 64 | runningAndReady := 0 65 | for _, p := range pl.Items { 66 | isRunningAndReady, err := PodRunningAndReady(p) 67 | if err != nil { 68 | return false, err 69 | } 70 | 71 | if isRunningAndReady { 72 | runningAndReady++ 73 | } 74 | } 75 | 76 | if runningAndReady == expectedReplicas { 77 | return true, nil 78 | } 79 | return false, nil 80 | }) 81 | } 82 | 83 | func WaitForPodsRunImage(kubeClient kubernetes.Interface, namespace string, expectedReplicas int, image string, opts metav1.ListOptions) error { 84 | return wait.Poll(time.Second, time.Minute*5, func() (bool, error) { 85 | pl, err := kubeClient.CoreV1().Pods(namespace).List(context.TODO(), opts) 86 | if err != nil { 87 | return false, err 88 | } 89 | 90 | runningImage := 0 91 | for _, p := range pl.Items { 92 | if podRunsImage(p, image) { 93 | runningImage++ 94 | } 95 | } 96 | 97 | if runningImage == expectedReplicas { 98 | return true, nil 99 | } 100 | return false, nil 101 | }) 102 | } 103 | 104 | func WaitForHTTPSuccessStatusCode(timeout time.Duration, url string) error { 105 | var resp *http.Response 106 | err := wait.Poll(time.Second, timeout, func() (bool, error) { 107 | var err error 108 | resp, err = http.Get(url) 109 | if err == nil && resp.StatusCode == 200 { 110 | return true, nil 111 | } 112 | return false, nil 113 | }) 114 | 115 | return errors.Wrap(err, fmt.Sprintf( 116 | "waiting for %v to return a successful status code timed out. Last response from server was: %v", 117 | url, 118 | resp, 119 | )) 120 | } 121 | 122 | func podRunsImage(p v1.Pod, image string) bool { 123 | for _, c := range p.Spec.Containers { 124 | if image == c.Image { 125 | return true 126 | } 127 | } 128 | 129 | return false 130 | } 131 | 132 | func GetLogs(kubeClient kubernetes.Interface, namespace string, podName, containerName string) (string, error) { 133 | logs, err := kubeClient.CoreV1().RESTClient().Get(). 134 | Resource("pods"). 135 | Namespace(namespace). 136 | Name(podName).SubResource("log"). 137 | Param("container", containerName). 138 | Do(context.TODO()). 139 | Raw() 140 | if err != nil { 141 | return "", err 142 | } 143 | return string(logs), err 144 | } 145 | 146 | func ProxyGetPod(kubeClient kubernetes.Interface, namespace string, podName string, port string, path string) *rest.Request { 147 | return kubeClient.CoreV1().RESTClient().Get().Prefix("proxy").Namespace(namespace).Resource("pods").Name(podName + ":" + port).Suffix(path) 148 | } 149 | -------------------------------------------------------------------------------- /tests/framework/namespace.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "fmt" 10 | "testing" 11 | 12 | "github.com/pkg/errors" 13 | v1 "k8s.io/api/core/v1" 14 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 15 | "k8s.io/apimachinery/pkg/types" 16 | "k8s.io/client-go/kubernetes" 17 | ) 18 | 19 | func CreateNamespace(kubeClient kubernetes.Interface, name string) (*v1.Namespace, error) { 20 | namespace, err := kubeClient.CoreV1().Namespaces().Create(context.TODO(), &v1.Namespace{ 21 | ObjectMeta: metav1.ObjectMeta{ 22 | Name: name, 23 | }, 24 | }, metav1.CreateOptions{}) 25 | if err != nil { 26 | return nil, errors.Wrap(err, fmt.Sprintf("failed to create namespace with name %v", name)) 27 | } 28 | return namespace, nil 29 | } 30 | 31 | func (ctx *TestCtx) CreateNamespace(t *testing.T, kubeClient kubernetes.Interface) string { 32 | name := ctx.GetObjID() 33 | if _, err := CreateNamespace(kubeClient, name); err != nil { 34 | t.Fatal(err) 35 | } 36 | 37 | namespaceFinalizerFn := func() error { 38 | return DeleteNamespace(kubeClient, name) 39 | } 40 | 41 | ctx.AddFinalizerFn(namespaceFinalizerFn) 42 | 43 | return name 44 | } 45 | 46 | func DeleteNamespace(kubeClient kubernetes.Interface, name string) error { 47 | return kubeClient.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}) 48 | } 49 | 50 | func AddLabelsToNamespace(kubeClient kubernetes.Interface, name string, additionalLabels map[string]string) error { 51 | ns, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | if ns.Labels == nil { 57 | ns.Labels = map[string]string{} 58 | } 59 | 60 | for k, v := range additionalLabels { 61 | ns.Labels[k] = v 62 | } 63 | 64 | _, err = kubeClient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) 65 | if err != nil { 66 | return err 67 | } 68 | 69 | return nil 70 | } 71 | 72 | func RemoveLabelsFromNamespace(kubeClient kubernetes.Interface, name string, labels ...string) error { 73 | ns, err := kubeClient.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) 74 | if err != nil { 75 | return err 76 | } 77 | 78 | if len(ns.Labels) == 0 { 79 | return nil 80 | } 81 | 82 | type patch struct { 83 | Op string `json:"op"` 84 | Path string `json:"path"` 85 | } 86 | 87 | var patches []patch 88 | for _, l := range labels { 89 | patches = append(patches, patch{Op: "remove", Path: "/metadata/labels/" + l}) 90 | } 91 | b, err := json.Marshal(patches) 92 | if err != nil { 93 | return err 94 | } 95 | 96 | _, err = kubeClient.CoreV1().Namespaces().Patch(context.TODO(), name, types.JSONPatchType, b, metav1.PatchOptions{}) 97 | if err != nil { 98 | return err 99 | } 100 | 101 | return nil 102 | } 103 | -------------------------------------------------------------------------------- /tests/framework/operator.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "fmt" 9 | 10 | "github.com/pkg/errors" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/util/yaml" 13 | 14 | "datafuselabs.io/datafuse-operator/pkg/apis/datafuse/v1alpha1" 15 | crdclientset "datafuselabs.io/datafuse-operator/pkg/client/clientset/versioned" 16 | testutils "datafuselabs.io/datafuse-operator/tests/utils" 17 | ) 18 | 19 | func MakeDatafuseOperatorFromYaml(pathToYaml string) (*v1alpha1.DatafuseOperator, error) { 20 | manifest, err := testutils.PathToOSFile(pathToYaml) 21 | if err != nil { 22 | return nil, err 23 | } 24 | operator := v1alpha1.DatafuseOperator{} 25 | if err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&operator); err != nil { 26 | return nil, errors.Wrap(err, fmt.Sprintf("failed to decode file %s", pathToYaml)) 27 | } 28 | 29 | return &operator, nil 30 | } 31 | 32 | func CreateDatafuseOperator(crdclientset crdclientset.Interface, namespace string, op *v1alpha1.DatafuseOperator) error { 33 | _, err := crdclientset.DatafuseV1alpha1().DatafuseOperators(namespace).Create(context.TODO(), op, metav1.CreateOptions{}) 34 | if err != nil { 35 | return errors.Wrap(err, fmt.Sprintf("failed to create DatafuseOperator %s in %s", op.Name, op.Namespace)) 36 | } 37 | return nil 38 | } 39 | 40 | func UpdateDatafuseOperator(crdclientset crdclientset.Interface, namespace string, op *v1alpha1.DatafuseOperator) error { 41 | _, err := crdclientset.DatafuseV1alpha1().DatafuseOperators(namespace).Update(context.TODO(), op, metav1.UpdateOptions{}) 42 | if err != nil { 43 | return errors.Wrap(err, fmt.Sprintf("failed to create DatafuseOperator %s in %s", op.Name, op.Namespace)) 44 | } 45 | return nil 46 | } 47 | 48 | func GetDatafuseOperator(crdclientset crdclientset.Interface, namespace string, name string) error { 49 | _, err := crdclientset.DatafuseV1alpha1().DatafuseOperators(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 50 | if err != nil { 51 | return errors.Wrap(err, fmt.Sprintf("failed to create DatafuseOperator %s in %s", name, namespace)) 52 | } 53 | return nil 54 | } 55 | 56 | func DeleteDatafuseOperator(crdclientset crdclientset.Interface, namespace string, name string) error { 57 | err := crdclientset.DatafuseV1alpha1().DatafuseOperators(namespace).Delete(context.TODO(), name, metav1.DeleteOptions{}) 58 | if err != nil { 59 | return errors.Wrap(err, fmt.Sprintf("failed to create DatafuseOperator %s in %s", name, namespace)) 60 | } 61 | return nil 62 | } 63 | -------------------------------------------------------------------------------- /tests/framework/pod.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "bytes" 8 | "context" 9 | "fmt" 10 | "io" 11 | "net/url" 12 | "strings" 13 | 14 | "github.com/pkg/errors" 15 | corev1 "k8s.io/api/core/v1" 16 | v1 "k8s.io/api/core/v1" 17 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 18 | "k8s.io/apimachinery/pkg/util/yaml" 19 | "k8s.io/client-go/kubernetes" 20 | kscheme "k8s.io/client-go/kubernetes/scheme" 21 | "k8s.io/client-go/rest" 22 | "k8s.io/client-go/tools/remotecommand" 23 | ) 24 | 25 | func GetPod(kubeCilent kubernetes.Interface, ns, name string) (*corev1.Pod, error) { 26 | return kubeCilent.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) 27 | } 28 | 29 | func UpdatePod(kubeCilent kubernetes.Interface, pod *corev1.Pod) (*corev1.Pod, error) { 30 | return kubeCilent.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{}) 31 | } 32 | 33 | func MakePod(pathToYaml string) (*corev1.Pod, error) { 34 | manifest, err := PathToOSFile(pathToYaml) 35 | if err != nil { 36 | return nil, err 37 | } 38 | deployment := corev1.Pod{} 39 | if err := yaml.NewYAMLOrJSONDecoder(manifest, 100).Decode(&deployment); err != nil { 40 | return nil, errors.Wrap(err, fmt.Sprintf("failed to decode file %s", pathToYaml)) 41 | } 42 | 43 | return &deployment, nil 44 | } 45 | 46 | func CreatePod(kubeClient kubernetes.Interface, namespace string, p *corev1.Pod) error { 47 | p.Namespace = namespace 48 | _, err := kubeClient.CoreV1().Pods(namespace).Create(context.TODO(), p, metav1.CreateOptions{}) 49 | if err != nil { 50 | return errors.Wrap(err, fmt.Sprintf("failed to create deployment %s", p.Name)) 51 | } 52 | return nil 53 | } 54 | 55 | func DeletePod(kubeClient kubernetes.Interface, namespace, name string) error { 56 | p, err := kubeClient.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) 57 | if err != nil { 58 | return err 59 | } 60 | 61 | return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), p.Name, metav1.DeleteOptions{}) 62 | } 63 | 64 | // PrintPodLogs prints the logs of a specified Pod 65 | func (f *Framework) PrintPodLogs(ns, p string) error { 66 | pod, err := f.KubeClient.CoreV1().Pods(ns).Get(context.TODO(), p, metav1.GetOptions{}) 67 | if err != nil { 68 | return errors.Wrapf(err, "failed to print logs of pod '%v': failed to get pod", p) 69 | } 70 | 71 | for _, c := range pod.Spec.Containers { 72 | req := f.KubeClient.CoreV1().Pods(ns).GetLogs(p, &corev1.PodLogOptions{Container: c.Name}) 73 | resp, err := req.DoRaw(context.TODO()) 74 | if err != nil { 75 | return errors.Wrapf(err, "failed to retrieve logs of pod '%v'", p) 76 | } 77 | 78 | fmt.Printf("=== Logs of %v/%v/%v:", ns, p, c.Name) 79 | fmt.Println(string(resp)) 80 | } 81 | 82 | return nil 83 | } 84 | 85 | // ExecOptions passed to ExecWithOptions 86 | type ExecOptions struct { 87 | Command []string 88 | Namespace string 89 | PodName string 90 | ContainerName string 91 | Stdin io.Reader 92 | CaptureStdout bool 93 | CaptureStderr bool 94 | // If false, whitespace in std{err,out} will be removed. 95 | PreserveWhitespace bool 96 | } 97 | 98 | func (f *Framework) MakeExecOptions(containerName, podName, namespace string, commands []string) ExecOptions { 99 | return ExecOptions{ 100 | Command: commands, 101 | Namespace: namespace, 102 | PodName: podName, 103 | ContainerName: containerName, 104 | CaptureStdout: true, 105 | CaptureStderr: true, 106 | PreserveWhitespace: true, 107 | } 108 | } 109 | 110 | // ExecWithOptions executes a command in the specified container, returning 111 | // stdout, stderr and error. `options` allowed for additional parameters to be 112 | // passed. Inspired by 113 | // https://github.com/kubernetes/kubernetes/blob/dde6e8e7465468c32642659cb708a5cc922add64/test/e2e/framework/exec_util.go#L36-L51 114 | func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) { 115 | const tty = false 116 | 117 | req := f.KubeClient.CoreV1().RESTClient().Post(). 118 | Resource("pods"). 119 | Name(options.PodName). 120 | Namespace(options.Namespace). 121 | SubResource("exec"). 122 | Param("container", options.ContainerName) 123 | req.VersionedParams(&v1.PodExecOptions{ 124 | Container: options.ContainerName, 125 | Command: options.Command, 126 | Stdin: options.Stdin != nil, 127 | Stdout: options.CaptureStdout, 128 | Stderr: options.CaptureStderr, 129 | TTY: tty, 130 | }, kscheme.ParameterCodec) 131 | 132 | var stdout, stderr bytes.Buffer 133 | err := execute("POST", req.URL(), f.RestConfig, options.Stdin, &stdout, &stderr, tty) 134 | 135 | if options.PreserveWhitespace { 136 | return stdout.String(), stderr.String(), err 137 | } 138 | return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err 139 | } 140 | 141 | func execute(method string, url *url.URL, config *rest.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error { 142 | exec, err := remotecommand.NewSPDYExecutor(config, method, url) 143 | if err != nil { 144 | return err 145 | } 146 | return exec.Stream(remotecommand.StreamOptions{ 147 | Stdin: stdin, 148 | Stdout: stdout, 149 | Stderr: stderr, 150 | Tty: tty, 151 | }) 152 | } 153 | -------------------------------------------------------------------------------- /tests/framework/role.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "io" 10 | "os" 11 | 12 | rbacv1 "k8s.io/api/rbac/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/util/yaml" 16 | "k8s.io/client-go/kubernetes" 17 | ) 18 | 19 | func CreateRole(kubeClient kubernetes.Interface, ns string, relativePath string) error { 20 | role, err := parseRoleYaml(relativePath) 21 | if err != nil { 22 | return err 23 | } 24 | 25 | role.Namespace = ns 26 | _, err = kubeClient.RbacV1().Roles(ns).Get(context.TODO(), role.Name, metav1.GetOptions{}) 27 | 28 | if err == nil { 29 | // Role already exists -> Update 30 | _, err = kubeClient.RbacV1().Roles(ns).Update(context.TODO(), role, metav1.UpdateOptions{}) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | } else { 36 | // Role doesn't exists -> Create 37 | _, err = kubeClient.RbacV1().Roles(ns).Create(context.TODO(), role, metav1.CreateOptions{}) 38 | if err != nil { 39 | return err 40 | } 41 | } 42 | 43 | return nil 44 | } 45 | 46 | func DeleteRole(kubeClient kubernetes.Interface, ns string, relativePath string) error { 47 | role, err := parseRoleYaml(relativePath) 48 | if err != nil { 49 | return err 50 | } 51 | 52 | if err := kubeClient.RbacV1().Roles(ns).Delete(context.TODO(), role.Name, metav1.DeleteOptions{}); err != nil { 53 | return err 54 | } 55 | 56 | return nil 57 | } 58 | 59 | func parseRoleYaml(relativePath string) (*rbacv1.Role, error) { 60 | var manifest *os.File 61 | var err error 62 | 63 | var role rbacv1.Role 64 | if manifest, err = PathToOSFile(relativePath); err != nil { 65 | return nil, err 66 | } 67 | 68 | decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) 69 | for { 70 | var out unstructured.Unstructured 71 | err = decoder.Decode(&out) 72 | if err != nil { 73 | // this would indicate it's malformed YAML. 74 | break 75 | } 76 | 77 | if out.GetKind() == "Role" { 78 | var marshaled []byte 79 | marshaled, err = out.MarshalJSON() 80 | json.Unmarshal(marshaled, &role) 81 | break 82 | } 83 | } 84 | 85 | if err != io.EOF && err != nil { 86 | return nil, err 87 | } 88 | return &role, nil 89 | } 90 | -------------------------------------------------------------------------------- /tests/framework/role_binding.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "io" 10 | "os" 11 | 12 | rbacv1 "k8s.io/api/rbac/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/util/yaml" 16 | "k8s.io/client-go/kubernetes" 17 | ) 18 | 19 | func CreateRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) (FinalizerFn, error) { 20 | finalizerFn := func() error { 21 | return DeleteRoleBinding(kubeClient, ns, relativePath) 22 | } 23 | roleBinding, err := parseRoleBindingYaml(relativePath) 24 | if err != nil { 25 | return finalizerFn, err 26 | } 27 | 28 | roleBinding.Namespace = ns 29 | roleBinding.Subjects[0].Namespace = ns 30 | _, err = kubeClient.RbacV1().RoleBindings(ns).Get(context.TODO(), roleBinding.Name, metav1.GetOptions{}) 31 | 32 | if err == nil { 33 | // RoleBinding already exists -> Update 34 | _, err = kubeClient.RbacV1().RoleBindings(ns).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}) 35 | if err != nil { 36 | return finalizerFn, err 37 | } 38 | } else { 39 | // RoleBinding doesn't exists -> Create 40 | _, err = kubeClient.RbacV1().RoleBindings(ns).Create(context.TODO(), roleBinding, metav1.CreateOptions{}) 41 | if err != nil { 42 | return finalizerFn, err 43 | } 44 | } 45 | 46 | return finalizerFn, err 47 | } 48 | 49 | func DeleteRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) error { 50 | roleBinding, err := parseRoleBindingYaml(relativePath) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | if err := kubeClient.RbacV1().RoleBindings(ns).Delete( 56 | context.TODO(), 57 | roleBinding.Name, 58 | metav1.DeleteOptions{}, 59 | ); err != nil { 60 | return err 61 | } 62 | 63 | return nil 64 | } 65 | 66 | func parseRoleBindingYaml(relativePath string) (*rbacv1.RoleBinding, error) { 67 | var manifest *os.File 68 | var err error 69 | 70 | var roleBinding rbacv1.RoleBinding 71 | if manifest, err = PathToOSFile(relativePath); err != nil { 72 | return nil, err 73 | } 74 | 75 | decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) 76 | for { 77 | var out unstructured.Unstructured 78 | err = decoder.Decode(&out) 79 | if err != nil { 80 | // this would indicate it's malformed YAML. 81 | break 82 | } 83 | 84 | if out.GetKind() == "RoleBinding" { 85 | var marshaled []byte 86 | marshaled, err = out.MarshalJSON() 87 | json.Unmarshal(marshaled, &roleBinding) 88 | break 89 | } 90 | } 91 | 92 | if err != io.EOF && err != nil { 93 | return nil, err 94 | } 95 | return &roleBinding, nil 96 | } 97 | -------------------------------------------------------------------------------- /tests/framework/service_account.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package framework 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | "io" 10 | "os" 11 | 12 | v1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 15 | "k8s.io/apimachinery/pkg/util/yaml" 16 | "k8s.io/client-go/kubernetes" 17 | ) 18 | 19 | func CreateServiceAccount(kubeClient kubernetes.Interface, namespace string, relativePath string) (FinalizerFn, error) { 20 | finalizerFn := func() error { 21 | return DeleteServiceAccount(kubeClient, namespace, relativePath) 22 | } 23 | 24 | serviceAccount, err := parseServiceAccountYaml(relativePath) 25 | if err != nil { 26 | return finalizerFn, err 27 | } 28 | serviceAccount.Namespace = namespace 29 | _, err = kubeClient.CoreV1().ServiceAccounts(namespace).Create(context.TODO(), serviceAccount, metav1.CreateOptions{}) 30 | if err != nil { 31 | return finalizerFn, err 32 | } 33 | 34 | return finalizerFn, nil 35 | } 36 | 37 | func parseServiceAccountYaml(relativePath string) (*v1.ServiceAccount, error) { 38 | var manifest *os.File 39 | var err error 40 | 41 | var serviceAccount v1.ServiceAccount 42 | if manifest, err = PathToOSFile(relativePath); err != nil { 43 | return nil, err 44 | } 45 | 46 | decoder := yaml.NewYAMLOrJSONDecoder(manifest, 100) 47 | for { 48 | var out unstructured.Unstructured 49 | err = decoder.Decode(&out) 50 | if err != nil { 51 | // this would indicate it's malformed YAML. 52 | break 53 | } 54 | 55 | if out.GetKind() == "ServiceAccount" { 56 | var marshaled []byte 57 | marshaled, err = out.MarshalJSON() 58 | json.Unmarshal(marshaled, &serviceAccount) 59 | break 60 | } 61 | } 62 | 63 | if err != io.EOF && err != nil { 64 | return nil, err 65 | } 66 | return &serviceAccount, nil 67 | } 68 | 69 | func DeleteServiceAccount(kubeClient kubernetes.Interface, namespace string, relativePath string) error { 70 | serviceAccount, err := parseServiceAccountYaml(relativePath) 71 | if err != nil { 72 | return err 73 | } 74 | 75 | return kubeClient.CoreV1().ServiceAccounts(namespace).Delete(context.TODO(), serviceAccount.Name, metav1.DeleteOptions{}) 76 | } 77 | -------------------------------------------------------------------------------- /tests/utils/convert/convert.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package convert 5 | 6 | func Int32ToPtr(num int32) *int32 { 7 | return &num 8 | } 9 | 10 | func StringToPtr(str string) *string { 11 | return &str 12 | } 13 | -------------------------------------------------------------------------------- /tests/utils/retry/retry.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package retry 5 | 6 | import ( 7 | "fmt" 8 | "testing" 9 | "time" 10 | ) 11 | 12 | type Config struct { 13 | err error 14 | timeout time.Duration 15 | interval time.Duration 16 | coverage int32 17 | } 18 | 19 | func NewConfig() *Config { 20 | return &Config{} 21 | } 22 | 23 | func (c *Config) SetTimeOut(timeout time.Duration) *Config { 24 | c.timeout = timeout 25 | return c 26 | } 27 | 28 | func (c *Config) SetInterval(interval time.Duration) *Config { 29 | c.interval = interval 30 | return c 31 | } 32 | 33 | func (c *Config) SetCoverage(cov int32) *Config { 34 | c.coverage = cov 35 | return c 36 | } 37 | 38 | type RetriableFunc func() (result interface{}, completed bool, err error) 39 | 40 | // timeout: total time for the retry period 41 | // interval: time timeval between two tests 42 | // coverage: should success coverage times for completion 43 | // err: latest err 44 | var defaultConfig = Config{ 45 | timeout: 30 * time.Second, 46 | interval: 3 * time.Second, 47 | coverage: 0, 48 | err: nil, 49 | } 50 | 51 | // Do function will retry given func util timeout 52 | func Do(fn RetriableFunc, arg Config) (result interface{}, err error) { 53 | cfg := arg 54 | to := time.After(cfg.timeout) 55 | successes := 0 56 | total := 0 57 | var latesterr error = nil 58 | for { 59 | select { 60 | case <-to: 61 | return nil, fmt.Errorf("timeout while waiting after %d attempts (last error: %v)", total, latesterr) 62 | default: 63 | } 64 | result, completed, err := fn() 65 | total++ 66 | if completed { 67 | if err != nil { 68 | successes = 0 69 | } else { 70 | successes++ 71 | } 72 | if successes >= int(cfg.coverage) { 73 | return result, err 74 | } 75 | continue 76 | } else { 77 | successes = 0 78 | } 79 | 80 | if err != nil { 81 | latesterr = err 82 | } 83 | 84 | select { 85 | case <-to: 86 | convergeStr := "" 87 | if cfg.coverage > 1 { 88 | convergeStr = fmt.Sprintf(", %d/%d successes", successes, cfg.coverage) 89 | } 90 | return nil, fmt.Errorf("timeout while waiting after %d attempts%s (last error: %v)", total, convergeStr, latesterr) 91 | case <-time.After(cfg.interval): 92 | } 93 | } 94 | } 95 | 96 | func UntilSuccess(fn func() error, arg Config) error { 97 | _, e := Do(func() (interface{}, bool, error) { 98 | err := fn() 99 | if err != nil { 100 | return nil, false, err 101 | } 102 | 103 | return nil, true, nil 104 | }, arg) 105 | return e 106 | } 107 | 108 | func UntilSuccessOrFail(t *testing.T, fn func() error, arg Config) { 109 | err := UntilSuccess(fn, arg) 110 | if err != nil { 111 | t.Fatalf("retry function failed") 112 | } 113 | } 114 | -------------------------------------------------------------------------------- /tests/utils/retry/retry_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package retry 5 | 6 | import ( 7 | "fmt" 8 | "testing" 9 | "time" 10 | 11 | "github.com/stretchr/testify/assert" 12 | ) 13 | 14 | func TestUntilSuccess(t *testing.T) { 15 | cfg := Config{ 16 | interval: 1 * time.Millisecond, 17 | timeout: 10 * time.Millisecond, 18 | coverage: 2, 19 | } 20 | t.Run("success", func(t *testing.T) { 21 | success := 0 22 | retryFunc := func() error { 23 | if success < 2 { 24 | success++ 25 | return fmt.Errorf("not success") 26 | } 27 | return nil 28 | } 29 | err := UntilSuccess(retryFunc, cfg) 30 | assert.Equal(t, nil, err) 31 | assert.Equal(t, 2, success) 32 | }) 33 | t.Run("failed", func(t *testing.T) { 34 | success := 0 35 | retryFunc := func() error { 36 | if success < 100 { 37 | success++ 38 | return fmt.Errorf("not success") 39 | } 40 | return nil 41 | } 42 | err := UntilSuccess(retryFunc, cfg) 43 | assert.Contains(t, err.Error(), "timeout") 44 | assert.Equal(t, true, success <= 10) 45 | }) 46 | } 47 | -------------------------------------------------------------------------------- /tests/utils/sql/sql_utils.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package sql 5 | 6 | import ( 7 | "fmt" 8 | "io/ioutil" 9 | "path/filepath" 10 | "strings" 11 | 12 | "github.com/pkg/errors" 13 | ) 14 | 15 | type SQLPair struct { 16 | Query string 17 | Result string 18 | } 19 | 20 | func BuildSQLPair(sql_template, sqldir, resultdir string) (map[string]SQLPair, error) { 21 | sqlPath, err := filepath.Abs(sqldir) 22 | if err != nil { 23 | return nil, errors.Wrap(err, fmt.Sprintf("failed generate absolute file path of %s", sqldir)) 24 | } 25 | resultPath, err := filepath.Abs(resultdir) 26 | if err != nil { 27 | return nil, errors.Wrap(err, fmt.Sprintf("failed generate absolute file path of %s", resultdir)) 28 | } 29 | 30 | items, _ := ioutil.ReadDir(sqlPath) 31 | ans := make(map[string]SQLPair) 32 | for _, item := range items { 33 | if item.IsDir() { 34 | continue 35 | } else if strings.HasSuffix(item.Name(), ".sql") { 36 | key := strings.TrimSuffix(item.Name(), ".sql") 37 | var pair SQLPair 38 | if p, found := ans[key]; !found { 39 | pair = SQLPair{} 40 | } else { 41 | pair = p 42 | } 43 | sql, err := ioutil.ReadFile(sqlPath + "/" + item.Name()) 44 | if err != nil { 45 | return nil, err 46 | } 47 | pair.Query = fmt.Sprintf(sql_template, string(sql)) 48 | ans[key] = pair 49 | } 50 | } 51 | items, _ = ioutil.ReadDir(resultPath) 52 | for _, item := range items { 53 | if item.IsDir() { 54 | continue 55 | } else if strings.HasSuffix(item.Name(), ".result") { 56 | key := strings.TrimSuffix(item.Name(), ".result") 57 | var pair SQLPair 58 | if p, found := ans[key]; !found { 59 | pair = SQLPair{} 60 | } else { 61 | pair = p 62 | } 63 | result, err := ioutil.ReadFile(resultPath + "/" + item.Name()) 64 | if err != nil { 65 | return nil, err 66 | } 67 | pair.Result = string(result) 68 | ans[key] = pair 69 | } 70 | } 71 | return ans, nil 72 | } 73 | -------------------------------------------------------------------------------- /tests/utils/ssa/ssa.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package ssa 5 | 6 | import ( 7 | "context" 8 | "encoding/json" 9 | 10 | "k8s.io/apimachinery/pkg/api/meta" 11 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 12 | "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" 13 | "k8s.io/apimachinery/pkg/runtime/serializer/yaml" 14 | "k8s.io/apimachinery/pkg/types" 15 | "k8s.io/client-go/discovery" 16 | memory "k8s.io/client-go/discovery/cached" 17 | "k8s.io/client-go/dynamic" 18 | "k8s.io/client-go/restmapper" 19 | ctrl "sigs.k8s.io/controller-runtime" 20 | ) 21 | 22 | // ServerSideApply is easier to use than kubectl apply in CI/CD environment 23 | // Only supports k8s 1.18+ 24 | func ServerSideApply(argYaml string) error { 25 | cfg := ctrl.GetConfigOrDie() 26 | // 1. Prepare a RESTMapper to find GVR 27 | dc, err := discovery.NewDiscoveryClientForConfig(cfg) 28 | if err != nil { 29 | return err 30 | } 31 | mapper := restmapper.NewDeferredDiscoveryRESTMapper(memory.NewMemCacheClient(dc)) 32 | // 2. Prepare the dynamic client 33 | dyn, err := dynamic.NewForConfig(cfg) 34 | if err != nil { 35 | return err 36 | } 37 | // 3. Decode YAML manifest into unstructured.Unstructured 38 | obj := &unstructured.Unstructured{} 39 | decodeUnstr := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme) 40 | _, gvk, err := decodeUnstr.Decode([]byte(argYaml), nil, obj) 41 | if err != nil { 42 | return err 43 | } 44 | 45 | // 4. Find GVR 46 | mapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version) 47 | if err != nil { 48 | return err 49 | } 50 | 51 | // 5. Obtain REST interface for the GVR 52 | var dr dynamic.ResourceInterface 53 | if mapping.Scope.Name() == meta.RESTScopeNameNamespace { 54 | // namespaced resources should specify the namespace 55 | dr = dyn.Resource(mapping.Resource).Namespace(obj.GetNamespace()) 56 | } else { 57 | // for cluster-wide resources 58 | dr = dyn.Resource(mapping.Resource) 59 | } 60 | 61 | // 6. Marshal object into JSON 62 | data, err := json.Marshal(obj) 63 | if err != nil { 64 | return err 65 | } 66 | 67 | // 7. Create or Update the object with SSA 68 | // types.ApplyPatchType indicates SSA. 69 | // FieldManager specifies the field owner ID. 70 | _, err = dr.Patch(context.TODO(), obj.GetName(), types.ApplyPatchType, data, metav1.PatchOptions{ 71 | FieldManager: "datafuse-test", 72 | }) 73 | return err 74 | } 75 | -------------------------------------------------------------------------------- /tests/utils/ssa/ssa_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package ssa 5 | 6 | import ( 7 | "testing" 8 | 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | const testYaml = ` 13 | apiVersion: apps/v1 14 | kind: Deployment 15 | metadata: 16 | name: nginx-test 17 | namespace: default 18 | spec: 19 | selector: 20 | matchLabels: 21 | app: nginx 22 | template: 23 | metadata: 24 | labels: 25 | app: nginx 26 | spec: 27 | containers: 28 | - name: nginx 29 | image: nginx:latest 30 | ` 31 | 32 | func TestServerSideApply(t *testing.T) { 33 | err := ServerSideApply(testYaml) 34 | assert.NoError(t, err) 35 | } 36 | -------------------------------------------------------------------------------- /tests/utils/utils.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package utils 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | "path/filepath" 10 | 11 | "github.com/pkg/errors" 12 | ) 13 | 14 | // PathToOSFile gets the absolute path from relative path. 15 | func PathToOSFile(relativePath string) (*os.File, error) { 16 | path, err := filepath.Abs(relativePath) 17 | if err != nil { 18 | return nil, errors.Wrap(err, fmt.Sprintf("failed generate absolute file path of %s", relativePath)) 19 | } 20 | 21 | manifest, err := os.Open(path) 22 | if err != nil { 23 | return nil, errors.Wrap(err, fmt.Sprintf("failed to open file %s", path)) 24 | } 25 | 26 | return manifest, nil 27 | } 28 | -------------------------------------------------------------------------------- /utils/kube.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package utils 5 | 6 | import ( 7 | "fmt" 8 | "os" 9 | 10 | "k8s.io/client-go/kubernetes" 11 | "k8s.io/client-go/rest" 12 | "k8s.io/client-go/tools/clientcmd" 13 | ) 14 | 15 | var KubeConfig string 16 | 17 | func GetKubeConfigLocation() string { 18 | kubeconfig := KubeConfig 19 | if kubeconfig != "" { 20 | return kubeconfig 21 | } 22 | kubeconfig = os.Getenv("KUBECONFIG") 23 | if kubeconfig == "" { 24 | kubeconfig = clientcmd.RecommendedHomeFile 25 | } 26 | return kubeconfig 27 | } 28 | func GetK8sConfig() (*rest.Config, error) { 29 | kubeconfig := GetKubeConfigLocation() 30 | config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) 31 | if err != nil { 32 | return nil, fmt.Errorf("error building config from kubeconfig located in %s: %w", kubeconfig, err) 33 | } 34 | return config, nil 35 | } 36 | 37 | func GetK8sClient() (*kubernetes.Clientset, error) { 38 | config, err := GetK8sConfig() 39 | if err != nil { 40 | return nil, fmt.Errorf("failed to get kubeconfig: %w", err) 41 | } 42 | 43 | kubeCli, err := kubernetes.NewForConfig(config) 44 | if err != nil { 45 | return nil, fmt.Errorf("failed to generate k8s client: %w", err) 46 | } 47 | 48 | return kubeCli, nil 49 | } 50 | -------------------------------------------------------------------------------- /utils/kube_test.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package utils 5 | 6 | import ( 7 | "os" 8 | "testing" 9 | 10 | "github.com/stretchr/testify/assert" 11 | "k8s.io/client-go/tools/clientcmd" 12 | ) 13 | 14 | func TestGetKubeConfigLocation(t *testing.T) { 15 | t.Run("default", func(t *testing.T) { 16 | original := os.Getenv("KUBECONFIG") 17 | os.Setenv("KUBECONFIG", "") 18 | defer os.Setenv("KUBECONFIG", original) 19 | 20 | config := GetKubeConfigLocation() 21 | assert.Equal(t, config, clientcmd.RecommendedHomeFile) 22 | }) 23 | 24 | t.Run("KUBECONFIG", func(t *testing.T) { 25 | actual := "./testconfig" 26 | original := os.Getenv("KUBECONFIG") 27 | os.Setenv("KUBECONFIG", actual) 28 | defer os.Setenv("KUBECONFIG", original) 29 | 30 | config := GetKubeConfigLocation() 31 | assert.Equal(t, config, actual) 32 | }) 33 | t.Run("local", func(t *testing.T) { 34 | actual := "./testconfig" 35 | original := os.Getenv("KUBECONFIG") 36 | os.Setenv("KUBECONFIG", "") 37 | defer os.Setenv("KUBECONFIG", original) 38 | 39 | KubeConfig = actual 40 | config := GetKubeConfigLocation() 41 | assert.Equal(t, config, actual) 42 | KubeConfig = "" //cleanup 43 | }) 44 | } 45 | -------------------------------------------------------------------------------- /utils/signals.go: -------------------------------------------------------------------------------- 1 | // Copyright 2020-2021 The Datafuse Authors. 2 | // 3 | // SPDX-License-Identifier: Apache-2.0. 4 | package utils 5 | 6 | import ( 7 | "os" 8 | "os/signal" 9 | "syscall" 10 | ) 11 | 12 | var onlyOneSignalHandler = make(chan struct{}) 13 | var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM} 14 | 15 | // SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned 16 | // which is closed on one of these signals. If a second signal is caught, the program 17 | // is terminated with exit code 1. 18 | func SetupSignalHandler() (stopCh <-chan struct{}) { 19 | close(onlyOneSignalHandler) // panics when called twice 20 | 21 | stop := make(chan struct{}) 22 | c := make(chan os.Signal, 2) 23 | signal.Notify(c, shutdownSignals...) 24 | go func() { 25 | <-c 26 | close(stop) 27 | <-c 28 | os.Exit(1) // second signal. Exit directly. 29 | }() 30 | 31 | return stop 32 | } 33 | --------------------------------------------------------------------------------