├── .devcontainer ├── Dockerfile ├── devcontainer.json └── scripts │ └── python_venv.sh ├── .dockerignore ├── .gitattributes ├── .gitignore ├── .golangci.yaml ├── .vscode └── launch.json ├── Dockerfile ├── LICENSE ├── Makefile ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── dbfsblock_types.go │ ├── dbfsblock_types_test.go │ ├── dcluster_types.go │ ├── dcluster_types_extra.go │ ├── dcluster_types_test.go │ ├── djob_types.go │ ├── djob_types_extra.go │ ├── djob_types_test.go │ ├── groupversion_info.go │ ├── helpers.go │ ├── helpers_test.go │ ├── run_types.go │ ├── run_types_extra.go │ ├── run_types_test.go │ ├── secretscope_types.go │ ├── secretscope_types_extra.go │ ├── secretscope_types_test.go │ ├── suite_test.go │ ├── workspaceitem_types.go │ ├── workspaceitem_types_test.go │ └── zz_generated.deepcopy.go ├── azure-pipelines.yaml ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── bases │ │ ├── databricks.microsoft.com_dbfsblocks.yaml │ │ ├── databricks.microsoft.com_dclusters.yaml │ │ ├── databricks.microsoft.com_djobs.yaml │ │ ├── databricks.microsoft.com_runs.yaml │ │ ├── databricks.microsoft.com_secretscopes.yaml │ │ └── databricks.microsoft.com_workspaceitems.yaml │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_dbfsblocks.yaml │ │ ├── cainjection_in_dclusters.yaml │ │ ├── cainjection_in_djobs.yaml │ │ ├── cainjection_in_runs.yaml │ │ ├── cainjection_in_secretscopes.yaml │ │ ├── cainjection_in_workspaceitems.yaml │ │ ├── webhook_in_dbfsblocks.yaml │ │ ├── webhook_in_dclusters.yaml │ │ ├── webhook_in_djobs.yaml │ │ ├── webhook_in_runs.yaml │ │ ├── webhook_in_secretscopes.yaml │ │ └── webhook_in_workspaceitems.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_image_patch.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── prometheus │ ├── grafana-dashboard-configmap.yaml │ ├── grafana-dashboard-load-test-configmap.yaml │ ├── grafana-dashboard-mockapi-configmap.yaml │ ├── kustomization.yaml │ └── monitor.yaml ├── rbac │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ ├── role.yaml │ └── role_binding.yaml ├── samples │ ├── databricks_v1alpha1_dbfsblock.yaml │ ├── databricks_v1alpha1_dcluster.yaml │ ├── databricks_v1alpha1_djob.yaml │ ├── databricks_v1alpha1_run_direct.yaml │ ├── databricks_v1alpha1_run_job.yaml │ ├── databricks_v1alpha1_secretscope.yaml │ └── databricks_v1alpha1_workspaceitem.yaml └── webhook │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ ├── manifests.yaml │ └── service.yaml ├── controllers ├── dbfsblock_controller.go ├── dbfsblock_controller_databricks.go ├── dbfsblock_controller_finalizer.go ├── dbfsblock_controller_test.go ├── dcluster_controller.go ├── dcluster_controller_databricks.go ├── dcluster_controller_finalizer.go ├── dcluster_controller_test.go ├── djob_controller.go ├── djob_controller_databricks.go ├── djob_controller_finalizer.go ├── djob_controller_test.go ├── doc.go ├── metrics.go ├── run_controller.go ├── run_controller_databricks.go ├── run_controller_finalizer.go ├── run_controller_test.go ├── secretscope_controller.go ├── secretscope_controller_databricks.go ├── secretscope_controller_finalizer.go ├── secretscope_controller_test.go ├── suite_test.go ├── workspaceitem_controller.go ├── workspaceitem_controller_databricks.go ├── workspaceitem_controller_finalizer.go └── workspaceitem_controller_test.go ├── docs ├── contributing.md ├── debugging.md ├── deploy.md ├── images │ ├── azure-databricks-operator-highlevel.jpg │ ├── azure-databricks-operator.jpg │ ├── copy-filepath-in-dbricks.jpg │ ├── create-cluster.jpg │ ├── databricks-job.jpg │ ├── debugging.gif │ ├── devcontainer.gif │ ├── development-flow.jpg │ ├── direct-run.jpg │ ├── import-notebooks-databricks.gif │ ├── run-periodic-job.jpg │ ├── sample1.gif │ ├── sample2.gif │ ├── sample3.gif │ └── secretscopes-runs.jpg ├── locust.md ├── metrics.md ├── mockapi.md ├── mockapi_samples │ ├── config_sample.http │ ├── job_sample.http │ └── run_sample.http ├── resources.md ├── roadmap.md ├── samples.md └── samples │ ├── 1_direct_run │ ├── basic1.ipynb │ └── run_basic1.yaml │ ├── 2_job_run │ ├── cluster_interactive1.yaml │ ├── run_basic1_periodic_on_existing_cluster.yaml │ ├── run_basic1_periodic_on_existing_cluster_by_name.yaml │ └── run_basic1_periodic_on_new_cluster.yaml │ └── 3_secret_scope │ ├── cluster_interactive2.yaml │ ├── eventhub_ingest.ipynb │ ├── run_twitter1.yaml │ ├── secretscope_eventhub.yaml │ ├── secretscope_twitter.yaml │ └── twitter_ingest.ipynb ├── go.mod ├── go.sum ├── hack ├── boilerplate.go.txt ├── portforwards.sh └── verify_load_tests │ └── main.go ├── locust ├── .flake8 ├── .gitignore ├── Dockerfile ├── behaviours │ ├── noop_locust.py │ ├── scenario1_run_submit_delete.py │ ├── scenario2_run_submit.py │ └── scenario5_run_submit_delete_high_wait_time.py ├── locust_files │ ├── __init__.py │ └── db_locust │ │ ├── __init__.py │ │ ├── constant.py │ │ ├── db_client.py │ │ ├── db_collector.py │ │ ├── db_decorator.py │ │ ├── db_locust.py │ │ ├── db_noop_client.py │ │ └── db_run_client.py ├── manifests │ └── deployment.yaml ├── requirements.dev.txt ├── requirements.txt ├── setup.py └── test │ └── unit │ ├── __init__.py │ └── db_run_client_test.py ├── main.go └── mockapi ├── Dockerfile ├── handler ├── cluster_handler.go ├── common_handler.go ├── config_handler.go ├── job_handler.go ├── run_handler.go └── shared.go ├── integration_tests ├── common_api_test.go ├── job_api_test.go ├── metrics_api_test.go ├── run_api_test.go ├── run_memory_usage_test.go └── test_data │ ├── job │ └── job_create.json │ └── run │ └── run_submit.json ├── main.go ├── manifests ├── deployment.yaml └── service.yaml ├── middleware └── middleware.go ├── model ├── jobs_list_response.go ├── jobs_runs_submit_request.go └── xml_response.go ├── repository ├── cluster_repository.go ├── job_repository.go ├── run_repository.go ├── run_repository_test.go └── shared.go └── router ├── prometheus.go └── router.go /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // If you want to run as a non-root user in the container, see .devcontainer/docker-compose.yml. 2 | { 3 | "name": "Go", 4 | "dockerFile": "./Dockerfile", 5 | "workspaceFolder": "/workspace", 6 | "context": "..", 7 | "workspaceMount": "src=${localWorkspaceFolder},dst=/workspace,type=bind", 8 | "runArgs": [ 9 | // Mount the env file 10 | "--env-file", "${localWorkspaceFolder}/.devcontainer/.env", 11 | // Mount go mod cache 12 | "-v", "dboperator-gomodcache:/go/pkg", 13 | // Cache vscode exentsions installs and homedir 14 | "-v", "dboperator-vscodecache:/root/.vscode-server", 15 | // Keep command history 16 | "-v", "dboperator-bashhistory:/root/commandhistory", 17 | // Enable security needed for docker 18 | "--cap-add=SYS_PTRACE", "--security-opt", "seccomp=unconfined", 19 | // Mount docker socket for docker builds 20 | "-v", "/var/run/docker.sock:/var/run/docker.sock", 21 | "--network", "host", 22 | "-e GO111MODULE=on", 23 | // Mount azure 24 | "-v", "${env:HOME}${env:USERPROFILE}/.azure:/home/vscode/.azure" 25 | // Optionally mount k8s auth to existing cluster 26 | // "-v", "${env:HOME}${env:USERPROFILE}/.kube:/home/vscode/.kube", 27 | ], 28 | "forwardPorts": [8085, 8089, 9090], 29 | "extensions": [ 30 | "ms-azuretools.vscode-docker", 31 | "ms-vscode.go", 32 | "ms-python.python", 33 | "humao.rest-client", 34 | "yzhang.markdown-all-in-one" 35 | ], 36 | "settings": { 37 | "go.gopath": "/go", 38 | "go.useLanguageServer": true, 39 | "[go]": { 40 | "editor.snippetSuggestions": "none", 41 | "editor.formatOnSave": true, 42 | "editor.codeActionsOnSave": { 43 | "source.organizeImports": true, 44 | } 45 | }, 46 | "gopls": { 47 | "usePlaceholders": true, // add parameter placeholders when completing a function 48 | // Experimental settings 49 | "completeUnimported": true, // autocomplete unimported packages 50 | "watchFileChanges": true, // watch file changes outside of the editor 51 | "deepCompletion": true // enable deep completion 52 | }, 53 | "go.toolsEnvVars": { 54 | "GO111MODULE": "on" 55 | }, 56 | "yaml.schemas": { 57 | "kubernetes": "*.yaml" 58 | }, 59 | "go.lintTool":"golangci-lint", 60 | "go.lintFlags": [ 61 | "--fast" 62 | ], 63 | "python.pythonPath": "/usr/local/bin/python", 64 | "python.linting.enabled": true, 65 | "python.linting.pylintEnabled": true, 66 | "python.testing.pytestEnabled": true, 67 | "python.linting.flake8Enabled": true, 68 | "python.linting.lintOnSave": true, 69 | "python.testing.promptToConfigure": false, 70 | "python.testing.unittestEnabled": false, 71 | "python.testing.nosetestsEnabled": false, 72 | "python.formatting.provider": "black", 73 | "python.linting.pylintPath": "/usr/local/bin/pylint", 74 | "remote.extensionKind": { 75 | "ms-azuretools.vscode-docker": "workspace" 76 | } 77 | } 78 | } -------------------------------------------------------------------------------- /.devcontainer/scripts/python_venv.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | set -e 3 | set -x 4 | 5 | mkdir /python_venv 6 | python3 -m venv /python_venv/venv 7 | source /python_venv/venv/bin/activate # You can also tell VSCode to use the interpretter in this location 8 | pip3 install -r requirements.dev.txt 9 | pip3 install -r requirements.txt 10 | echo "source /python_venv/venv/bin/activate" >> "$HOME/.bashrc" 11 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | # During build .dockercache and .gocache download locations used to speed up the build 2 | # as these are large they are ignored to avoid the build copying them into docker context 3 | .dockercache 4 | .gocache 5 | .git 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | *.exe 3 | *.exe~ 4 | *.dll 5 | *.so 6 | *.dylib 7 | 8 | # Test binary, build with `go test -c` 9 | *.test 10 | 11 | # Output of the go coverage tool, specifically when used with LiteIDE 12 | /cover.* 13 | /cover/ 14 | 15 | .env 16 | 17 | /bin 18 | cmd/manager/__debug_bin 19 | 20 | .env 21 | __debug_bin 22 | settings.json 23 | operatorsetup.yaml 24 | -------------------------------------------------------------------------------- /.golangci.yaml: -------------------------------------------------------------------------------- 1 | run: 2 | deadline: 5m 3 | skip-files: [] 4 | 5 | linters-settings: 6 | linters-settings.govet: 7 | check-shadowing: true 8 | 9 | linters-settings.gocyclo: 10 | min-complexity: 12.0 11 | 12 | linters-settings.maligned: 13 | suggest-new: true 14 | 15 | linters-settings.goconst: 16 | min-len: 3.0 17 | min-occurrences: 3.0 18 | 19 | linters-settings.misspell: 20 | locale: "US" 21 | ignore-words: 22 | - listend 23 | - analyses 24 | - cancelling 25 | 26 | linters: 27 | enable: 28 | - vet 29 | - golint 30 | - gofmt 31 | - deadcode 32 | - varcheck 33 | - structcheck 34 | - misspell 35 | - errcheck 36 | - gosimple 37 | - govet 38 | - ineffassign 39 | 40 | issues: 41 | exclude-use-default: false 42 | max-per-linter: 0 43 | max-same-issues: 0 44 | exclude: [] 45 | -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "0.2.0", 3 | "configurations": [ 4 | { 5 | "name": "Debug", 6 | "type": "go", 7 | "request": "launch", 8 | "mode": "auto", 9 | "program": "${workspaceFolder}/main.go", 10 | "env": {}, 11 | "args": [] 12 | } 13 | ] 14 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.12.5-alpine3.9 as builder 3 | 4 | # Install certs, git, and mercurial 5 | RUN apk add --no-cache ca-certificates git mercurial 6 | 7 | WORKDIR /workspace 8 | # Copy the Go Modules manifests 9 | COPY go.mod go.mod 10 | COPY go.sum go.sum 11 | # cache deps before building and copying source so that we don't need to re-download as much 12 | # and so that source changes don't invalidate our downloaded layer 13 | RUN go mod download 14 | 15 | # Copy the go source 16 | COPY main.go main.go 17 | COPY api/ api/ 18 | COPY controllers/ controllers/ 19 | 20 | # Build 21 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go 22 | 23 | # Use distroless as minimal base image to package the manager binary 24 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 25 | FROM gcr.io/distroless/static:latest 26 | ENV DATABRICKS_HOST "" 27 | ENV DATABRICKS_TOKEN "" 28 | WORKDIR / 29 | COPY --from=builder /workspace/manager . 30 | ENTRYPOINT ["/manager"] 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Microsoft Corporation. All rights reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE 22 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | version: "2" 2 | domain: microsoft.com 3 | repo: github.com/microsoft/azure-databricks-operator 4 | resources: 5 | - group: databricks 6 | version: v1alpha1 7 | kind: SecretScope 8 | - group: databricks 9 | version: v1alpha1 10 | kind: Djob 11 | - group: databricks 12 | version: v1alpha1 13 | kind: Run 14 | - group: databricks 15 | version: v1alpha1 16 | kind: Dcluster 17 | - group: databricks 18 | version: v1alpha1 19 | kind: DbfsBlock 20 | - group: databricks 21 | version: v1alpha1 22 | kind: WorkspaceItem 23 | -------------------------------------------------------------------------------- /api/v1alpha1/dcluster_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 29 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 | ) 31 | 32 | // DclusterStatus represents the status for a Dcluster 33 | type DclusterStatus struct { 34 | ClusterInfo *DclusterInfo `json:"cluster_info,omitempty"` 35 | } 36 | 37 | // +kubebuilder:object:root=true 38 | 39 | // Dcluster is the Schema for the dclusters API 40 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" 41 | // +kubebuilder:printcolumn:name="ClusterID",type="string",JSONPath=".status.cluster_info.cluster_id" 42 | // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.cluster_info.state" 43 | type Dcluster struct { 44 | metav1.TypeMeta `json:",inline"` 45 | metav1.ObjectMeta `json:"metadata,omitempty"` 46 | 47 | Spec *dbmodels.NewCluster `json:"spec,omitempty"` 48 | Status *DclusterStatus `json:"status,omitempty"` 49 | } 50 | 51 | // IsBeingDeleted returns true if a deletion timestamp is set 52 | func (dcluster *Dcluster) IsBeingDeleted() bool { 53 | return !dcluster.ObjectMeta.DeletionTimestamp.IsZero() 54 | } 55 | 56 | // IsSubmitted returns true if the item has been submitted to DataBricks 57 | func (dcluster *Dcluster) IsSubmitted() bool { 58 | if dcluster.Status == nil || 59 | dcluster.Status.ClusterInfo == nil || 60 | dcluster.Status.ClusterInfo.ClusterID == "" { 61 | return false 62 | } 63 | return true 64 | } 65 | 66 | // DclusterFinalizerName is the name of the finalizer for the Dcluster operator 67 | const DclusterFinalizerName = "dcluster.finalizers.databricks.microsoft.com" 68 | 69 | // HasFinalizer returns true if the item has the specified finalizer 70 | func (dcluster *Dcluster) HasFinalizer(finalizerName string) bool { 71 | return containsString(dcluster.ObjectMeta.Finalizers, finalizerName) 72 | } 73 | 74 | // AddFinalizer adds the specified finalizer 75 | func (dcluster *Dcluster) AddFinalizer(finalizerName string) { 76 | dcluster.ObjectMeta.Finalizers = append(dcluster.ObjectMeta.Finalizers, finalizerName) 77 | } 78 | 79 | // RemoveFinalizer removes the specified finalizer 80 | func (dcluster *Dcluster) RemoveFinalizer(finalizerName string) { 81 | dcluster.ObjectMeta.Finalizers = removeString(dcluster.ObjectMeta.Finalizers, finalizerName) 82 | } 83 | 84 | // +kubebuilder:object:root=true 85 | 86 | // DclusterList contains a list of Dcluster 87 | type DclusterList struct { 88 | metav1.TypeMeta `json:",inline"` 89 | metav1.ListMeta `json:"metadata,omitempty"` 90 | Items []Dcluster `json:"items"` 91 | } 92 | 93 | func init() { 94 | SchemeBuilder.Register(&Dcluster{}, &DclusterList{}) 95 | } 96 | -------------------------------------------------------------------------------- /api/v1alpha1/dcluster_types_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | "time" 29 | 30 | . "github.com/onsi/ginkgo" 31 | . "github.com/onsi/gomega" 32 | 33 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 34 | "golang.org/x/net/context" 35 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 36 | "k8s.io/apimachinery/pkg/types" 37 | ) 38 | 39 | // These tests are written in BDD-style using Ginkgo framework. Refer to 40 | // http://onsi.github.io/ginkgo to learn more. 41 | 42 | var _ = Describe("Dcluster", func() { 43 | var ( 44 | key types.NamespacedName 45 | created, fetched *Dcluster 46 | ) 47 | 48 | BeforeEach(func() { 49 | // Add any setup steps that needs to be executed before each test 50 | }) 51 | 52 | AfterEach(func() { 53 | // Add any teardown steps that needs to be executed after each test 54 | }) 55 | 56 | // Add Tests for OpenAPI validation (or additional CRD features) specified in 57 | // your API definition. 58 | // Avoid adding tests for vanilla CRUD operations because they would 59 | // test Kubernetes API server, which isn't the goal here. 60 | Context("Create API", func() { 61 | 62 | It("should create an object successfully", func() { 63 | 64 | key = types.NamespacedName{ 65 | Name: "foo-" + RandomString(5), 66 | Namespace: "default", 67 | } 68 | created = &Dcluster{ 69 | ObjectMeta: metav1.ObjectMeta{ 70 | Name: key.Name, 71 | Namespace: key.Namespace, 72 | }} 73 | 74 | By("creating an API obj") 75 | Expect(k8sClient.Create(context.TODO(), created)).To(Succeed()) 76 | 77 | fetched = &Dcluster{} 78 | Expect(k8sClient.Get(context.TODO(), key, fetched)).To(Succeed()) 79 | Expect(fetched).To(Equal(created)) 80 | 81 | By("deleting the created object") 82 | Expect(k8sClient.Delete(context.TODO(), created)).To(Succeed()) 83 | Expect(k8sClient.Get(context.TODO(), key, created)).ToNot(Succeed()) 84 | }) 85 | 86 | It("should correctly handle isSubmitted", func() { 87 | dcluster := &Dcluster{ 88 | Status: &DclusterStatus{ 89 | ClusterInfo: &DclusterInfo{ 90 | ClusterID: "221-322-djaj2", 91 | }, 92 | }, 93 | } 94 | Expect(dcluster.IsSubmitted()).To(BeTrue()) 95 | 96 | dcluster2 := &Dcluster{ 97 | Status: &DclusterStatus{ 98 | ClusterInfo: nil, 99 | }, 100 | } 101 | Expect(dcluster2.IsSubmitted()).To(BeFalse()) 102 | }) 103 | 104 | It("should correctly handle finalizers", func() { 105 | dcluster := &Dcluster{ 106 | ObjectMeta: metav1.ObjectMeta{ 107 | DeletionTimestamp: &metav1.Time{ 108 | Time: time.Now(), 109 | }, 110 | }, 111 | } 112 | Expect(dcluster.IsBeingDeleted()).To(BeTrue()) 113 | 114 | dcluster.AddFinalizer(DclusterFinalizerName) 115 | Expect(len(dcluster.GetFinalizers())).To(Equal(1)) 116 | Expect(dcluster.HasFinalizer(DclusterFinalizerName)).To(BeTrue()) 117 | 118 | dcluster.RemoveFinalizer(DclusterFinalizerName) 119 | Expect(len(dcluster.GetFinalizers())).To(Equal(0)) 120 | Expect(dcluster.HasFinalizer(DclusterFinalizerName)).To(BeFalse()) 121 | }) 122 | 123 | It("should correctly handle float to string", func() { 124 | clusterInfo := &dbmodels.ClusterInfo{ 125 | ClusterCores: 20.32, 126 | } 127 | 128 | dclusterInfo := &DclusterInfo{} 129 | dclusterInfo.FromDataBricksClusterInfo(*clusterInfo) 130 | 131 | Expect(dclusterInfo.ClusterCores).To(Equal("20.32")) 132 | }) 133 | }) 134 | 135 | }) 136 | -------------------------------------------------------------------------------- /api/v1alpha1/djob_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 29 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 | ) 31 | 32 | // DjobStatus is the status object for the Djob 33 | type DjobStatus struct { 34 | JobStatus *dbmodels.Job `json:"job_status,omitempty"` 35 | Last10Runs []dbmodels.Run `json:"last_10_runs,omitempty"` 36 | } 37 | 38 | // +kubebuilder:object:root=true 39 | 40 | // Djob is the Schema for the djobs API 41 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" 42 | // +kubebuilder:printcolumn:name="JobID",type="integer",JSONPath=".status.job_status.job_id" 43 | type Djob struct { 44 | metav1.TypeMeta `json:",inline"` 45 | metav1.ObjectMeta `json:"metadata,omitempty"` 46 | 47 | Spec *JobSettings `json:"spec,omitempty"` 48 | Status *DjobStatus `json:"status,omitempty"` 49 | } 50 | 51 | // IsBeingDeleted returns true if a deletion timestamp is set 52 | func (djob *Djob) IsBeingDeleted() bool { 53 | return !djob.ObjectMeta.DeletionTimestamp.IsZero() 54 | } 55 | 56 | // IsSubmitted returns true if the item has been submitted to DataBricks 57 | func (djob *Djob) IsSubmitted() bool { 58 | if djob.Status == nil || djob.Status.JobStatus == nil || djob.Status.JobStatus.JobID == 0 { 59 | return false 60 | } 61 | return djob.Status.JobStatus.JobID > 0 62 | } 63 | 64 | // DjobFinalizerName is the name of the djob finalizer 65 | const DjobFinalizerName = "djob.finalizers.databricks.microsoft.com" 66 | 67 | // HasFinalizer returns true if the item has the specified finalizer 68 | func (djob *Djob) HasFinalizer(finalizerName string) bool { 69 | return containsString(djob.ObjectMeta.Finalizers, finalizerName) 70 | } 71 | 72 | // AddFinalizer adds the specified finalizer 73 | func (djob *Djob) AddFinalizer(finalizerName string) { 74 | djob.ObjectMeta.Finalizers = append(djob.ObjectMeta.Finalizers, finalizerName) 75 | } 76 | 77 | // RemoveFinalizer removes the specified finalizer 78 | func (djob *Djob) RemoveFinalizer(finalizerName string) { 79 | djob.ObjectMeta.Finalizers = removeString(djob.ObjectMeta.Finalizers, finalizerName) 80 | } 81 | 82 | // +kubebuilder:object:root=true 83 | 84 | // DjobList contains a list of Djob 85 | type DjobList struct { 86 | metav1.TypeMeta `json:",inline"` 87 | metav1.ListMeta `json:"metadata,omitempty"` 88 | Items []Djob `json:"items"` 89 | } 90 | 91 | func init() { 92 | SchemeBuilder.Register(&Djob{}, &DjobList{}) 93 | } 94 | -------------------------------------------------------------------------------- /api/v1alpha1/djob_types_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | "time" 29 | 30 | . "github.com/onsi/ginkgo" 31 | . "github.com/onsi/gomega" 32 | 33 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 34 | "golang.org/x/net/context" 35 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 36 | "k8s.io/apimachinery/pkg/types" 37 | ) 38 | 39 | // These tests are written in BDD-style using Ginkgo framework. Refer to 40 | // http://onsi.github.io/ginkgo to learn more. 41 | 42 | var _ = Describe("Djob", func() { 43 | var ( 44 | key types.NamespacedName 45 | created, fetched *Djob 46 | ) 47 | 48 | BeforeEach(func() { 49 | // Add any setup steps that needs to be executed before each test 50 | }) 51 | 52 | AfterEach(func() { 53 | // Add any teardown steps that needs to be executed after each test 54 | }) 55 | 56 | // Add Tests for OpenAPI validation (or additional CRD features) specified in 57 | // your API definition. 58 | // Avoid adding tests for vanilla CRUD operations because they would 59 | // test Kubernetes API server, which isn't the goal here. 60 | Context("Create API", func() { 61 | 62 | It("should create an object successfully", func() { 63 | 64 | key = types.NamespacedName{ 65 | Name: "foo-" + RandomString(5), 66 | Namespace: "default", 67 | } 68 | created = &Djob{ 69 | ObjectMeta: metav1.ObjectMeta{ 70 | Name: key.Name, 71 | Namespace: key.Namespace, 72 | }} 73 | By("creating an API obj") 74 | Expect(k8sClient.Create(context.Background(), created)).To(Succeed()) 75 | 76 | fetched = &Djob{} 77 | Expect(k8sClient.Get(context.Background(), key, fetched)).To(Succeed()) 78 | Expect(fetched).To(Equal(created)) 79 | 80 | By("deleting the created object") 81 | Expect(k8sClient.Delete(context.Background(), created)).To(Succeed()) 82 | Expect(k8sClient.Get(context.Background(), key, created)).ToNot(Succeed()) 83 | }) 84 | 85 | It("should correctly handle isSubmitted", func() { 86 | djob := &Djob{ 87 | Status: &DjobStatus{ 88 | JobStatus: &dbmodels.Job{ 89 | JobID: 20, 90 | }, 91 | }, 92 | } 93 | Expect(djob.IsSubmitted()).To(BeTrue()) 94 | 95 | djob2 := &Djob{ 96 | Status: &DjobStatus{ 97 | JobStatus: nil, 98 | }, 99 | } 100 | Expect(djob2.IsSubmitted()).To(BeFalse()) 101 | }) 102 | 103 | It("should correctly handle finalizers", func() { 104 | djob := &Djob{ 105 | ObjectMeta: metav1.ObjectMeta{ 106 | DeletionTimestamp: &metav1.Time{ 107 | Time: time.Now(), 108 | }, 109 | }, 110 | } 111 | Expect(djob.IsBeingDeleted()).To(BeTrue()) 112 | 113 | djob.AddFinalizer(DjobFinalizerName) 114 | Expect(len(djob.GetFinalizers())).To(Equal(1)) 115 | Expect(djob.HasFinalizer(DjobFinalizerName)).To(BeTrue()) 116 | 117 | djob.RemoveFinalizer(DjobFinalizerName) 118 | Expect(len(djob.GetFinalizers())).To(Equal(0)) 119 | Expect(djob.HasFinalizer(DjobFinalizerName)).To(BeFalse()) 120 | }) 121 | }) 122 | 123 | }) 124 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | // Package v1alpha1 contains API Schema definitions for the databricks v1 API group 26 | // +kubebuilder:object:generate=true 27 | // +groupName=databricks.microsoft.com 28 | package v1alpha1 29 | 30 | import ( 31 | "k8s.io/apimachinery/pkg/runtime/schema" 32 | "sigs.k8s.io/controller-runtime/pkg/scheme" 33 | ) 34 | 35 | var ( 36 | // GroupVersion is group version used to register these objects 37 | GroupVersion = schema.GroupVersion{Group: "databricks.microsoft.com", Version: "v1alpha1"} 38 | 39 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 40 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 41 | 42 | // AddToScheme adds the types in this group-version to the given scheme. 43 | AddToScheme = SchemeBuilder.AddToScheme 44 | ) 45 | -------------------------------------------------------------------------------- /api/v1alpha1/helpers.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE.. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | "math/rand" 29 | "time" 30 | ) 31 | 32 | const charset = "abcdefghijklmnopqrstuvwxyz" 33 | 34 | func containsString(slice []string, s string) bool { 35 | for _, item := range slice { 36 | if item == s { 37 | return true 38 | } 39 | } 40 | return false 41 | } 42 | 43 | func removeString(slice []string, s string) (result []string) { 44 | for _, item := range slice { 45 | if item == s { 46 | continue 47 | } 48 | result = append(result, item) 49 | } 50 | return 51 | } 52 | 53 | func randomStringWithCharset(length int, charset string) string { 54 | var seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) 55 | b := make([]byte, length) 56 | for i := range b { 57 | b[i] = charset[seededRand.Intn(len(charset))] 58 | } 59 | return string(b) 60 | } 61 | 62 | // RandomString generates a random string from a subset of characters 63 | func RandomString(length int) string { 64 | return randomStringWithCharset(length, charset) 65 | } 66 | -------------------------------------------------------------------------------- /api/v1alpha1/helpers_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | . "github.com/onsi/ginkgo" 29 | . "github.com/onsi/gomega" 30 | ) 31 | 32 | var _ = Describe("Helpers", func() { 33 | 34 | BeforeEach(func() { 35 | // Add any setup steps that needs to be executed before each test 36 | }) 37 | 38 | AfterEach(func() { 39 | // Add any teardown steps that needs to be executed after each test 40 | }) 41 | 42 | // Add Tests for OpenAPI validation (or additional CRD features) specified in 43 | // your API definition. 44 | // Avoid adding tests for vanilla CRUD operations because they would 45 | // test Kubernetes API server, which isn't the goal here. 46 | Context("String Operations", func() { 47 | It("should contain string", func() { 48 | slice1 := []string{"strA", "strB", "strC"} 49 | slice2 := []string{"strD", "strE", "strF"} 50 | 51 | for _, str := range slice1 { 52 | Expect(containsString(slice1, str)).To(BeTrue()) 53 | } 54 | 55 | for _, str := range slice2 { 56 | Expect(containsString(slice1, str)).To(BeFalse()) 57 | } 58 | }) 59 | 60 | It("should remove string", func() { 61 | slice := []string{"strA", "strB", "strC"} 62 | before := len(slice) 63 | 64 | for _, str := range slice { 65 | Expect(containsString(removeString(slice, str), str)).To(BeFalse()) 66 | } 67 | 68 | for _, str := range slice { 69 | Expect(len(slice)).To(BeIdenticalTo(before)) 70 | slice = removeString(slice, str) 71 | Expect(len(slice)).To(BeIdenticalTo(before - 1)) 72 | before-- 73 | } 74 | 75 | Expect(len(slice)).To(BeIdenticalTo(0)) 76 | }) 77 | 78 | It("should create random string matches length", func() { 79 | a1 := RandomString(5) 80 | a2 := RandomString(5) 81 | b1 := RandomString(10) 82 | 83 | Expect(a1).ToNot(Equal(a2)) 84 | Expect(len(a1)).To(Equal(len(a2))) 85 | Expect(len(b1)).To(Equal(10)) 86 | }) 87 | }) 88 | }) 89 | -------------------------------------------------------------------------------- /api/v1alpha1/run_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | dbazure "github.com/xinsnake/databricks-sdk-golang/azure" 29 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 30 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 | ) 32 | 33 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 34 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 35 | 36 | // RunSpec defines the desired state of Run 37 | type RunSpec struct { 38 | // dedicated for job run 39 | JobName string `json:"job_name,omitempty"` 40 | *dbmodels.RunParameters `json:",inline"` 41 | // dedicated for direct run 42 | RunName string `json:"run_name,omitempty"` 43 | ClusterSpec `json:",inline"` 44 | *dbmodels.JobTask `json:",inline"` 45 | TimeoutSeconds int32 `json:"timeout_seconds,omitempty"` 46 | } 47 | 48 | // +kubebuilder:object:root=true 49 | 50 | // Run is the Schema for the runs API 51 | // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" 52 | // +kubebuilder:printcolumn:name="RunID",type="integer",JSONPath=".status.metadata.run_id" 53 | // +kubebuilder:printcolumn:name="State",type="string",JSONPath=".status.metadata.state.life_cycle_state" 54 | type Run struct { 55 | metav1.TypeMeta `json:",inline"` 56 | metav1.ObjectMeta `json:"metadata,omitempty"` 57 | 58 | Spec *RunSpec `json:"spec,omitempty"` 59 | Status *dbazure.JobsRunsGetOutputResponse `json:"status,omitempty"` 60 | } 61 | 62 | // IsBeingDeleted returns true if a deletion timestamp is set 63 | func (run *Run) IsBeingDeleted() bool { 64 | return !run.ObjectMeta.DeletionTimestamp.IsZero() 65 | } 66 | 67 | // IsSubmitted returns true if the item has been submitted to DataBricks 68 | func (run *Run) IsSubmitted() bool { 69 | if run.Status == nil || run.Status.Metadata.JobID == 0 { 70 | return false 71 | } 72 | return run.Status.Metadata.JobID > 0 73 | } 74 | 75 | // IsTerminated return true if item is in terminal state 76 | func (run *Run) IsTerminated() bool { 77 | if run.Status == nil || run.Status.Metadata.State == nil || run.Status.Metadata.State.LifeCycleState == nil { 78 | return false 79 | } 80 | switch *run.Status.Metadata.State.LifeCycleState { 81 | case dbmodels.RunLifeCycleStateTerminated, dbmodels.RunLifeCycleStateSkipped, dbmodels.RunLifeCycleStateInternalError: 82 | return true 83 | } 84 | return false 85 | } 86 | 87 | // RunFinalizerName is the name of the run finalizer 88 | const RunFinalizerName = "run.finalizers.databricks.microsoft.com" 89 | 90 | // HasFinalizer returns true if the item has the specified finalizer 91 | func (run *Run) HasFinalizer(finalizerName string) bool { 92 | return containsString(run.ObjectMeta.Finalizers, finalizerName) 93 | } 94 | 95 | // AddFinalizer adds the specified finalizer 96 | func (run *Run) AddFinalizer(finalizerName string) { 97 | run.ObjectMeta.Finalizers = append(run.ObjectMeta.Finalizers, finalizerName) 98 | } 99 | 100 | // RemoveFinalizer removes the specified finalizer 101 | func (run *Run) RemoveFinalizer(finalizerName string) { 102 | run.ObjectMeta.Finalizers = removeString(run.ObjectMeta.Finalizers, finalizerName) 103 | } 104 | 105 | // +kubebuilder:object:root=true 106 | 107 | // RunList contains a list of Run 108 | type RunList struct { 109 | metav1.TypeMeta `json:",inline"` 110 | metav1.ListMeta `json:"metadata,omitempty"` 111 | Items []Run `json:"items"` 112 | } 113 | 114 | func init() { 115 | SchemeBuilder.Register(&Run{}, &RunList{}) 116 | } 117 | -------------------------------------------------------------------------------- /api/v1alpha1/run_types_extra.go: -------------------------------------------------------------------------------- 1 | package v1alpha1 2 | 3 | import ( 4 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 5 | ) 6 | 7 | // ClusterSpec is similar to dbmodels.ClusterSpec, the reason it 8 | // exists is because dbmodels.ClusterSpec doesn't support ExistingClusterName 9 | // ExistingClusterName allows discovering databricks clusters by it's kubernetese object name 10 | type ClusterSpec struct { 11 | ExistingClusterID string `json:"existing_cluster_id,omitempty" url:"existing_cluster_id,omitempty"` 12 | ExistingClusterName string `json:"existing_cluster_name,omitempty" url:"existing_cluster_name,omitempty"` 13 | NewCluster *dbmodels.NewCluster `json:"new_cluster,omitempty" url:"new_cluster,omitempty"` 14 | Libraries []dbmodels.Library `json:"libraries,omitempty" url:"libraries,omitempty"` 15 | } 16 | 17 | // ToK8sClusterSpec converts a databricks ClusterSpec object to k8s ClusterSpec object. 18 | // It is needed to add ExistingClusterName and follow k8s camleCase naming convention 19 | func ToK8sClusterSpec(dbjs *dbmodels.ClusterSpec) ClusterSpec { 20 | var k8sjs ClusterSpec 21 | k8sjs.ExistingClusterID = dbjs.ExistingClusterID 22 | k8sjs.NewCluster = dbjs.NewCluster 23 | k8sjs.Libraries = dbjs.Libraries 24 | return k8sjs 25 | } 26 | 27 | // ToDatabricksClusterSpec converts a k8s ClusterSpec object to a DataBricks ClusterSpec object. 28 | // It is needed to add ExistingClusterName and follow k8s camleCase naming convention 29 | func ToDatabricksClusterSpec(k8sjs *ClusterSpec) dbmodels.ClusterSpec { 30 | 31 | var dbjs dbmodels.ClusterSpec 32 | dbjs.ExistingClusterID = k8sjs.ExistingClusterID 33 | dbjs.NewCluster = k8sjs.NewCluster 34 | dbjs.Libraries = k8sjs.Libraries 35 | return dbjs 36 | } 37 | -------------------------------------------------------------------------------- /api/v1alpha1/run_types_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | "time" 29 | 30 | . "github.com/onsi/ginkgo" 31 | . "github.com/onsi/gomega" 32 | dbazure "github.com/xinsnake/databricks-sdk-golang/azure" 33 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 34 | 35 | "golang.org/x/net/context" 36 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 37 | "k8s.io/apimachinery/pkg/types" 38 | ) 39 | 40 | // These tests are written in BDD-style using Ginkgo framework. Refer to 41 | // http://onsi.github.io/ginkgo to learn more. 42 | 43 | var _ = Describe("Run", func() { 44 | var ( 45 | key types.NamespacedName 46 | created, fetched *Run 47 | ) 48 | 49 | BeforeEach(func() { 50 | // Add any setup steps that needs to be executed before each test 51 | }) 52 | 53 | AfterEach(func() { 54 | // Add any teardown steps that needs to be executed after each test 55 | }) 56 | 57 | // Add Tests for OpenAPI validation (or additional CRD features) specified in 58 | // your API definition. 59 | // Avoid adding tests for vanilla CRUD operations because they would 60 | // test Kubernetes API server, which isn't the goal here. 61 | Context("Create API", func() { 62 | 63 | It("should create an object successfully", func() { 64 | 65 | key = types.NamespacedName{ 66 | Name: "foo-" + RandomString(5), 67 | Namespace: "default", 68 | } 69 | created = &Run{ 70 | ObjectMeta: metav1.ObjectMeta{ 71 | Name: key.Name, 72 | Namespace: key.Namespace, 73 | }} 74 | 75 | By("creating an API obj") 76 | Expect(k8sClient.Create(context.Background(), created)).To(Succeed()) 77 | 78 | fetched = &Run{} 79 | Expect(k8sClient.Get(context.Background(), key, fetched)).To(Succeed()) 80 | Expect(fetched).To(Equal(created)) 81 | 82 | By("deleting the created object") 83 | Expect(k8sClient.Delete(context.Background(), created)).To(Succeed()) 84 | Expect(k8sClient.Get(context.Background(), key, created)).ToNot(Succeed()) 85 | }) 86 | 87 | }) 88 | 89 | It("should correctly handle isSubmitted", func() { 90 | run := &Run{ 91 | Status: &dbazure.JobsRunsGetOutputResponse{ 92 | Metadata: dbmodels.Run{ 93 | JobID: 23, 94 | }, 95 | }, 96 | } 97 | Expect(run.IsSubmitted()).To(BeTrue()) 98 | 99 | run2 := &Run{ 100 | Status: nil, 101 | } 102 | Expect(run2.IsSubmitted()).To(BeFalse()) 103 | }) 104 | 105 | It("should correctly handle finalizers", func() { 106 | run := &Run{ 107 | ObjectMeta: metav1.ObjectMeta{ 108 | DeletionTimestamp: &metav1.Time{ 109 | Time: time.Now(), 110 | }, 111 | }, 112 | } 113 | Expect(run.IsBeingDeleted()).To(BeTrue()) 114 | 115 | run.AddFinalizer(RunFinalizerName) 116 | Expect(len(run.GetFinalizers())).To(Equal(1)) 117 | Expect(run.HasFinalizer(RunFinalizerName)).To(BeTrue()) 118 | 119 | run.RemoveFinalizer(RunFinalizerName) 120 | Expect(len(run.GetFinalizers())).To(Equal(0)) 121 | Expect(run.HasFinalizer(RunFinalizerName)).To(BeFalse()) 122 | }) 123 | }) 124 | -------------------------------------------------------------------------------- /api/v1alpha1/secretscope_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 29 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 | ) 31 | 32 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 33 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 34 | 35 | // SecretScopeSpec defines the desired state of SecretScope 36 | type SecretScopeSpec struct { 37 | // INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 38 | // Important: Run "make" to regenerate code after modifying this file 39 | InitialManagePrincipal string `json:"initial_manage_permission,omitempty"` 40 | SecretScopeSecrets []SecretScopeSecret `json:"secrets,omitempty"` 41 | SecretScopeACLs []SecretScopeACL `json:"acls,omitempty"` 42 | } 43 | 44 | // SecretScopeStatus defines the observed state of SecretScope 45 | type SecretScopeStatus struct { 46 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 47 | // Important: Run "make" to regenerate code after modifying this file 48 | SecretScope *dbmodels.SecretScope `json:"secretscope,omitempty"` 49 | SecretInClusterAvailable bool `json:"secretinclusteravailable,omitempty"` 50 | } 51 | 52 | // +kubebuilder:object:root=true 53 | 54 | // SecretScope is the Schema for the secretscopes API 55 | type SecretScope struct { 56 | metav1.TypeMeta `json:",inline"` 57 | metav1.ObjectMeta `json:"metadata,omitempty"` 58 | 59 | Spec SecretScopeSpec `json:"spec,omitempty"` 60 | Status SecretScopeStatus `json:"status,omitempty"` 61 | } 62 | 63 | // IsSecretAvailable returns true if secret in cluster is available 64 | func (ss *SecretScope) IsSecretAvailable() bool { 65 | return ss.Status.SecretInClusterAvailable 66 | } 67 | 68 | // IsSubmitted returns true if the item has been submitted to DataBricks 69 | func (ss *SecretScope) IsSubmitted() bool { 70 | return ss.Status.SecretScope != nil 71 | } 72 | 73 | // IsBeingDeleted returns true if a deletion timestamp is set 74 | func (ss *SecretScope) IsBeingDeleted() bool { 75 | return !ss.ObjectMeta.DeletionTimestamp.IsZero() 76 | } 77 | 78 | // SecretScopeFinalizerName is the name of the secretscope finalizer 79 | const SecretScopeFinalizerName = "secretscope.finalizers.databricks.microsoft.com" 80 | 81 | // HasFinalizer returns true if the item has the specified finalizer 82 | func (ss *SecretScope) HasFinalizer(finalizerName string) bool { 83 | return containsString(ss.ObjectMeta.Finalizers, finalizerName) 84 | } 85 | 86 | // AddFinalizer adds the specified finalizer 87 | func (ss *SecretScope) AddFinalizer(finalizerName string) { 88 | ss.ObjectMeta.Finalizers = append(ss.ObjectMeta.Finalizers, finalizerName) 89 | } 90 | 91 | // RemoveFinalizer removes the specified finalizer 92 | func (ss *SecretScope) RemoveFinalizer(finalizerName string) { 93 | ss.ObjectMeta.Finalizers = removeString(ss.ObjectMeta.Finalizers, finalizerName) 94 | } 95 | 96 | // +kubebuilder:object:root=true 97 | 98 | // SecretScopeList contains a list of SecretScope 99 | type SecretScopeList struct { 100 | metav1.TypeMeta `json:",inline"` 101 | metav1.ListMeta `json:"metadata,omitempty"` 102 | Items []SecretScope `json:"items"` 103 | } 104 | 105 | func init() { 106 | SchemeBuilder.Register(&SecretScope{}, &SecretScopeList{}) 107 | } 108 | -------------------------------------------------------------------------------- /api/v1alpha1/secretscope_types_extra.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | // SecretScopeSecret represents a secret in a secret scope 28 | type SecretScopeSecret struct { 29 | Key string `json:"key,omitempty"` 30 | StringValue string `json:"string_value,omitempty"` 31 | ByteValue string `json:"byte_value,omitempty"` 32 | ValueFrom *SecretScopeValueFrom `json:"value_from,omitempty"` 33 | } 34 | 35 | // SecretScopeACL represents ACLs for a secret scope 36 | type SecretScopeACL struct { 37 | Principal string `json:"principal,omitempty"` 38 | Permission string `json:"permission,omitempty"` 39 | } 40 | 41 | // SecretScopeValueFrom references a secret scope 42 | type SecretScopeValueFrom struct { 43 | SecretKeyRef SecretScopeKeyRef `json:"secret_key_ref,omitempty"` 44 | } 45 | 46 | // SecretScopeKeyRef refers to a secret scope Key 47 | type SecretScopeKeyRef struct { 48 | Name string `json:"name,omitempty"` 49 | Key string `json:"key,omitempty"` 50 | } 51 | -------------------------------------------------------------------------------- /api/v1alpha1/secretscope_types_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | "time" 29 | 30 | . "github.com/onsi/ginkgo" 31 | . "github.com/onsi/gomega" 32 | 33 | "golang.org/x/net/context" 34 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 35 | "k8s.io/apimachinery/pkg/types" 36 | ) 37 | 38 | // These tests are written in BDD-style using Ginkgo framework. Refer to 39 | // http://onsi.github.io/ginkgo to learn more. 40 | 41 | var _ = Describe("SecretScope", func() { 42 | var ( 43 | key types.NamespacedName 44 | created, fetched *SecretScope 45 | ) 46 | 47 | BeforeEach(func() { 48 | // Add any setup steps that needs to be executed before each test 49 | }) 50 | 51 | AfterEach(func() { 52 | // Add any teardown steps that needs to be executed after each test 53 | }) 54 | 55 | // Add Tests for OpenAPI validation (or additional CRD features) specified in 56 | // your API definition. 57 | // Avoid adding tests for vanilla CRUD operations because they would 58 | // test Kubernetes API server, which isn't the goal here. 59 | Context("Create API", func() { 60 | 61 | It("should create an object successfully", func() { 62 | 63 | key = types.NamespacedName{ 64 | Name: "foo-" + RandomString(5), 65 | Namespace: "default", 66 | } 67 | created = &SecretScope{ 68 | ObjectMeta: metav1.ObjectMeta{ 69 | Name: key.Name, 70 | Namespace: key.Namespace, 71 | }} 72 | 73 | By("creating an API obj") 74 | Expect(k8sClient.Create(context.Background(), created)).To(Succeed()) 75 | 76 | fetched = &SecretScope{} 77 | Expect(k8sClient.Get(context.Background(), key, fetched)).To(Succeed()) 78 | Expect(fetched).To(Equal(created)) 79 | 80 | By("deleting the created object") 81 | Expect(k8sClient.Delete(context.Background(), created)).To(Succeed()) 82 | Expect(k8sClient.Get(context.Background(), key, created)).ToNot(Succeed()) 83 | }) 84 | 85 | It("should correctly handle isSubmitted", func() { 86 | secretScope := &SecretScope{} 87 | Expect(secretScope.IsSubmitted()).To(BeFalse()) 88 | }) 89 | 90 | It("should correctly handle finalizers", func() { 91 | secretScope := &SecretScope{ 92 | ObjectMeta: metav1.ObjectMeta{ 93 | DeletionTimestamp: &metav1.Time{ 94 | Time: time.Now(), 95 | }, 96 | }, 97 | } 98 | Expect(secretScope.IsBeingDeleted()).To(BeTrue()) 99 | 100 | secretScope.AddFinalizer(SecretScopeFinalizerName) 101 | Expect(len(secretScope.GetFinalizers())).To(Equal(1)) 102 | Expect(secretScope.HasFinalizer(SecretScopeFinalizerName)).To(BeTrue()) 103 | 104 | secretScope.RemoveFinalizer(SecretScopeFinalizerName) 105 | Expect(len(secretScope.GetFinalizers())).To(Equal(0)) 106 | Expect(secretScope.HasFinalizer(SecretScopeFinalizerName)).To(BeFalse()) 107 | }) 108 | }) 109 | 110 | }) 111 | -------------------------------------------------------------------------------- /api/v1alpha1/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package v1alpha1 26 | 27 | import ( 28 | "os" 29 | "path/filepath" 30 | "testing" 31 | 32 | . "github.com/onsi/ginkgo" 33 | . "github.com/onsi/gomega" 34 | 35 | "k8s.io/client-go/kubernetes/scheme" 36 | "k8s.io/client-go/rest" 37 | "sigs.k8s.io/controller-runtime/pkg/client" 38 | "sigs.k8s.io/controller-runtime/pkg/envtest" 39 | logf "sigs.k8s.io/controller-runtime/pkg/log" 40 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 41 | ) 42 | 43 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 44 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 45 | 46 | var cfg *rest.Config 47 | var k8sClient client.Client 48 | var testEnv *envtest.Environment 49 | 50 | func TestAPIs(t *testing.T) { 51 | RegisterFailHandler(Fail) 52 | 53 | RunSpecsWithDefaultAndCustomReporters(t, 54 | "v1 Suite", 55 | []Reporter{envtest.NewlineReporter{}}) 56 | } 57 | 58 | var _ = BeforeSuite(func(done Done) { 59 | logf.SetLogger(zap.LoggerTo(GinkgoWriter, true)) 60 | 61 | By("bootstrapping test environment") 62 | t := true 63 | if os.Getenv("TEST_USE_EXISTING_CLUSTER") == "true" { 64 | testEnv = &envtest.Environment{ 65 | UseExistingCluster: &t, 66 | } 67 | } else { 68 | testEnv = &envtest.Environment{ 69 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, 70 | } 71 | } 72 | 73 | err := SchemeBuilder.AddToScheme(scheme.Scheme) 74 | Expect(err).NotTo(HaveOccurred()) 75 | 76 | cfg, err = testEnv.Start() 77 | Expect(err).ToNot(HaveOccurred()) 78 | Expect(cfg).ToNot(BeNil()) 79 | 80 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 81 | Expect(err).ToNot(HaveOccurred()) 82 | Expect(k8sClient).ToNot(BeNil()) 83 | 84 | close(done) 85 | }, 60) 86 | 87 | var _ = AfterSuite(func() { 88 | By("tearing down the test environment") 89 | err := testEnv.Stop() 90 | Expect(err).ToNot(HaveOccurred()) 91 | }) 92 | -------------------------------------------------------------------------------- /azure-pipelines.yaml: -------------------------------------------------------------------------------- 1 | trigger: 2 | branches: 3 | include: 4 | - master 5 | 6 | pool: 7 | vmImage: 'Ubuntu 16.04' 8 | 9 | variables: 10 | IMAGE_NAME: 'candidate/k8s/azure-databricks/operator:$(Build.SourceVersion)' 11 | # ACR_NAME: - set this value in Azure Devops variables 12 | # AZURE_SUBSCRIPTION: - set this value in Azure Devops variables 13 | # DATABRICKS_HOST: - set this value in Azure Devops variables 14 | # DATABRICKS_TOKEN: - set this value in Azure Devops variables 15 | 16 | jobs: 17 | - job: Build 18 | steps: 19 | # Cache the docker image file 20 | - task: Cache@2 21 | inputs: 22 | key: go-cache | go.sum 23 | path: ".gocache" 24 | restoreKeys: go-cache 25 | displayName: Cache go mod cache 26 | 27 | # Cache the docker image file 28 | - task: Cache@2 29 | inputs: 30 | key: docker-image | .devcontainer/** 31 | path: ".dockercache" 32 | restoreKeys: docker-image 33 | cacheHitVar: DOCKER_CACHE_HIT 34 | displayName: Cache docker layers 35 | 36 | - script: | 37 | docker info 38 | docker load -i ./.dockercache/devcontainer.tar 39 | condition: eq(variables.DOCKER_CACHE_HIT, 'true') 40 | displayName: Load cached devcontainer image 41 | 42 | - script: | 43 | set -e 44 | 45 | # Create dockercache directory 46 | mkdir -p ./.dockercache/ 47 | 48 | echo "-------> Building devcontainer" 49 | docker build --cache-from devcontainer:latest -t devcontainer -f ./.devcontainer/Dockerfile . 50 | displayName: Build devcontainer image 51 | 52 | - script: | 53 | echo "-------> Saving docker image" 54 | docker image save -o ./.dockercache/devcontainer.tar devcontainer 55 | condition: and(succeeded(), ne(variables.DOCKER_CACHE_HIT, 'true')) 56 | displayName: Save devcontainer image 57 | 58 | - script: | 59 | set -e 60 | 61 | # Create a directory for go mod cache 62 | mkdir -p $(System.DefaultWorkingDirectory)/.gocache 63 | 64 | echo "-------> Building code and running tests" 65 | # Commands to execute in the devcontainer 66 | COMMANDS="source ~/.bashrc && make test-local" 67 | # Run the dev container 68 | docker run -v $(System.DefaultWorkingDirectory)/.gocache:/go/pkg/ -v /var/run/docker.sock:/var/run/docker.sock -v $(System.DefaultWorkingDirectory):/src --workdir /src --entrypoint /bin/bash --network="host" devcontainer -c "$COMMANDS" 69 | 70 | sudo chown -R $USER $(System.DefaultWorkingDirectory) 71 | condition: succeeded() 72 | displayName: Unit and load tests 73 | 74 | - script: | 75 | set -e 76 | 77 | echo "-------> Run integration tests" 78 | # Run `make` to build and test the code 79 | docker run -v $(System.DefaultWorkingDirectory)/.gocache:/go/pkg/ -v /var/run/docker.sock:/var/run/docker.sock -v $(System.DefaultWorkingDirectory):/src --workdir /src --entrypoint /bin/bash --network="host" --env DATABRICKS_HOST=$(DATABRICKS_HOST) --env DATABRICKS_TOKEN=$(DATABRICKS_TOKEN) devcontainer -c "make test" 80 | condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master')) 81 | displayName: Integration tests 82 | 83 | - script: | 84 | docker stop `docker ps -qa` 85 | docker system prune -f --volumes -a 86 | displayName: Clean up docker 87 | 88 | - task: PublishCodeCoverageResults@1 89 | inputs: 90 | codeCoverageTool: Cobertura 91 | summaryFileLocation: $(System.DefaultWorkingDirectory)/**/cover.xml 92 | reportDirectory: $(System.DefaultWorkingDirectory)/**/cover 93 | 94 | - task: AzureCLI@1 95 | condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master')) 96 | displayName: Push to Azure Container Registry 97 | inputs: 98 | azureSubscription: $(AZURE_SUBSCRIPTION) 99 | scriptLocation: inlineScript 100 | failOnStandardError: false 101 | inlineScript: az acr build --registry $(ACR_NAME) --image $(IMAGE_NAME) . 102 | 103 | - script: | 104 | ./bin/kustomize build config/default > $(Build.ArtifactStagingDirectory)/setup.yaml 105 | set -x 106 | echo $(IMAGE_NAME) > $(Build.ArtifactStagingDirectory)/azure-databricks-operator.txt 107 | ls $(Build.ArtifactStagingDirectory) 108 | continueOnError: 'false' 109 | displayName: 'Prepare manifests for publish' 110 | condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master')) 111 | 112 | - task: PublishBuildArtifacts@1 113 | condition: and(succeeded(), eq(variables['Build.SourceBranch'], 'refs/heads/master')) 114 | inputs: 115 | pathtoPublish: $(Build.ArtifactStagingDirectory) 116 | artifactName: drop 117 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for breaking changes 4 | apiVersion: cert-manager.io/v1alpha2 5 | kind: Issuer 6 | metadata: 7 | name: selfsigned-issuer 8 | namespace: system 9 | spec: 10 | selfSigned: {} 11 | --- 12 | apiVersion: cert-manager.io/v1alpha2 13 | kind: Certificate 14 | metadata: 15 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 16 | namespace: system 17 | spec: 18 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 19 | dnsNames: 20 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 22 | issuerRef: 23 | kind: Issuer 24 | name: selfsigned-issuer 25 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 26 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd/bases/databricks.microsoft.com_dbfsblocks.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.2.4 8 | creationTimestamp: null 9 | name: dbfsblocks.databricks.microsoft.com 10 | spec: 11 | additionalPrinterColumns: 12 | - JSONPath: .metadata.creationTimestamp 13 | name: Age 14 | type: date 15 | - JSONPath: .status.file_hash 16 | name: SHA1SUM 17 | type: string 18 | - JSONPath: .status.file_info.path 19 | name: Path 20 | type: string 21 | - JSONPath: .status.file_info.file_size 22 | name: Size 23 | type: integer 24 | group: databricks.microsoft.com 25 | names: 26 | kind: DbfsBlock 27 | listKind: DbfsBlockList 28 | plural: dbfsblocks 29 | singular: dbfsblock 30 | scope: Namespaced 31 | subresources: {} 32 | validation: 33 | openAPIV3Schema: 34 | description: DbfsBlock is the Schema for the dbfsblocks API 35 | properties: 36 | apiVersion: 37 | description: 'APIVersion defines the versioned schema of this representation 38 | of an object. Servers should convert recognized schemas to the latest 39 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 40 | type: string 41 | kind: 42 | description: 'Kind is a string value representing the REST resource this 43 | object represents. Servers may infer this from the endpoint the client 44 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 45 | type: string 46 | metadata: 47 | type: object 48 | spec: 49 | description: DbfsBlockSpec defines the desired state of DbfsBlock 50 | properties: 51 | data: 52 | type: string 53 | path: 54 | type: string 55 | type: object 56 | status: 57 | description: DbfsBlockStatus defines the observed state of DbfsBlock 58 | properties: 59 | file_hash: 60 | type: string 61 | file_info: 62 | properties: 63 | file_size: 64 | format: int64 65 | type: integer 66 | is_dir: 67 | type: boolean 68 | path: 69 | type: string 70 | type: object 71 | type: object 72 | type: object 73 | version: v1alpha1 74 | versions: 75 | - name: v1alpha1 76 | served: true 77 | storage: true 78 | status: 79 | acceptedNames: 80 | kind: "" 81 | plural: "" 82 | conditions: [] 83 | storedVersions: [] 84 | -------------------------------------------------------------------------------- /config/crd/bases/databricks.microsoft.com_secretscopes.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.2.4 8 | creationTimestamp: null 9 | name: secretscopes.databricks.microsoft.com 10 | spec: 11 | group: databricks.microsoft.com 12 | names: 13 | kind: SecretScope 14 | listKind: SecretScopeList 15 | plural: secretscopes 16 | singular: secretscope 17 | scope: Namespaced 18 | validation: 19 | openAPIV3Schema: 20 | description: SecretScope is the Schema for the secretscopes API 21 | properties: 22 | apiVersion: 23 | description: 'APIVersion defines the versioned schema of this representation 24 | of an object. Servers should convert recognized schemas to the latest 25 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 26 | type: string 27 | kind: 28 | description: 'Kind is a string value representing the REST resource this 29 | object represents. Servers may infer this from the endpoint the client 30 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 31 | type: string 32 | metadata: 33 | type: object 34 | spec: 35 | description: SecretScopeSpec defines the desired state of SecretScope 36 | properties: 37 | acls: 38 | items: 39 | description: SecretScopeACL represents ACLs for a secret scope 40 | properties: 41 | permission: 42 | type: string 43 | principal: 44 | type: string 45 | type: object 46 | type: array 47 | initial_manage_permission: 48 | description: 'INSERT ADDITIONAL SPEC FIELDS - desired state of cluster 49 | Important: Run "make" to regenerate code after modifying this file' 50 | type: string 51 | secrets: 52 | items: 53 | description: SecretScopeSecret represents a secret in a secret scope 54 | properties: 55 | byte_value: 56 | type: string 57 | key: 58 | type: string 59 | string_value: 60 | type: string 61 | value_from: 62 | description: SecretScopeValueFrom references a secret scope 63 | properties: 64 | secret_key_ref: 65 | description: SecretScopeKeyRef refers to a secret scope Key 66 | properties: 67 | key: 68 | type: string 69 | name: 70 | type: string 71 | type: object 72 | type: object 73 | type: object 74 | type: array 75 | type: object 76 | status: 77 | description: SecretScopeStatus defines the observed state of SecretScope 78 | properties: 79 | secretinclusteravailable: 80 | type: boolean 81 | secretscope: 82 | description: 'INSERT ADDITIONAL STATUS FIELD - define observed state 83 | of cluster Important: Run "make" to regenerate code after modifying 84 | this file' 85 | properties: 86 | backend_type: 87 | type: string 88 | name: 89 | type: string 90 | type: object 91 | type: object 92 | type: object 93 | version: v1alpha1 94 | versions: 95 | - name: v1alpha1 96 | served: true 97 | storage: true 98 | status: 99 | acceptedNames: 100 | kind: "" 101 | plural: "" 102 | conditions: [] 103 | storedVersions: [] 104 | -------------------------------------------------------------------------------- /config/crd/bases/databricks.microsoft.com_workspaceitems.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.2.4 8 | creationTimestamp: null 9 | name: workspaceitems.databricks.microsoft.com 10 | spec: 11 | additionalPrinterColumns: 12 | - JSONPath: .metadata.creationTimestamp 13 | name: Age 14 | type: date 15 | - JSONPath: .status.object_hash 16 | name: SHA1SUM 17 | type: string 18 | - JSONPath: .status.object_info.language 19 | name: Language 20 | type: string 21 | - JSONPath: .status.object_info.object_type 22 | name: Type 23 | priority: 1 24 | type: string 25 | - JSONPath: .status.object_info.path 26 | name: Path 27 | priority: 1 28 | type: string 29 | group: databricks.microsoft.com 30 | names: 31 | kind: WorkspaceItem 32 | listKind: WorkspaceItemList 33 | plural: workspaceitems 34 | singular: workspaceitem 35 | scope: Namespaced 36 | subresources: {} 37 | validation: 38 | openAPIV3Schema: 39 | description: WorkspaceItem is the Schema for the workspaceitems API 40 | properties: 41 | apiVersion: 42 | description: 'APIVersion defines the versioned schema of this representation 43 | of an object. Servers should convert recognized schemas to the latest 44 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' 45 | type: string 46 | kind: 47 | description: 'Kind is a string value representing the REST resource this 48 | object represents. Servers may infer this from the endpoint the client 49 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' 50 | type: string 51 | metadata: 52 | type: object 53 | spec: 54 | description: WorkspaceItemSpec defines the desired state of WorkspaceItem 55 | properties: 56 | content: 57 | type: string 58 | format: 59 | type: string 60 | language: 61 | type: string 62 | path: 63 | type: string 64 | type: object 65 | status: 66 | description: WorkspaceItemStatus defines the observed state of WorkspaceItem 67 | properties: 68 | object_hash: 69 | type: string 70 | object_info: 71 | properties: 72 | language: 73 | type: string 74 | object_type: 75 | type: string 76 | path: 77 | type: string 78 | type: object 79 | type: object 80 | type: object 81 | version: v1alpha1 82 | versions: 83 | - name: v1alpha1 84 | served: true 85 | storage: true 86 | status: 87 | acceptedNames: 88 | kind: "" 89 | plural: "" 90 | conditions: [] 91 | storedVersions: [] 92 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/databricks.microsoft.com_secretscopes.yaml 6 | - bases/databricks.microsoft.com_djobs.yaml 7 | - bases/databricks.microsoft.com_runs.yaml 8 | - bases/databricks.microsoft.com_dclusters.yaml 9 | - bases/databricks.microsoft.com_dbfsblocks.yaml 10 | - bases/databricks.microsoft.com_workspaceitems.yaml 11 | 12 | # +kubebuilder:scaffold:crdkustomizeresource 13 | 14 | patchesStrategicMerge: 15 | # [WEBHOOK] patches here are for enabling the conversion webhook for each CRD 16 | #- patches/webhook_in_secretscopes.yaml 17 | #- patches/webhook_in_djobs.yaml 18 | #- patches/webhook_in_runs.yaml 19 | #- patches/webhook_in_dclusters.yaml 20 | #- patches/webhook_in_dbfsblocks.yaml 21 | #- patches/webhook_in_workspaceitems.yaml 22 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 23 | 24 | # [CAINJECTION] patches here are for enabling the CA injection for each CRD 25 | #- patches/cainjection_in_secretscopes.yaml 26 | #- patches/cainjection_in_djobs.yaml 27 | #- patches/cainjection_in_runs.yaml 28 | #- patches/cainjection_in_dclusters.yaml 29 | #- patches/cainjection_in_dbfsblocks.yaml 30 | #- patches/cainjection_in_workspaceitems.yaml 31 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 32 | 33 | # the following config is for teaching kustomize how to do kustomization for CRDs. 34 | configurations: 35 | - kustomizeconfig.yaml 36 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | group: apiextensions.k8s.io 8 | path: spec/conversion/webhookClientConfig/service/name 9 | 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | group: apiextensions.k8s.io 13 | path: spec/conversion/webhookClientConfig/service/namespace 14 | create: false 15 | 16 | varReference: 17 | - path: metadata/annotations 18 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_dbfsblocks.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: dbfsblocks.databricks.microsoft.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_dclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: dclusters.databricks.microsoft.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_djobs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: djobs.databricks.microsoft.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_runs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: runs.databricks.microsoft.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_secretscopes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: secretscopes.databricks.microsoft.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_workspaceitems.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: workspaceitems.databricks.microsoft.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_dbfsblocks.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: dbfsblocks.databricks.microsoft.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_dclusters.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: dclusters.databricks.microsoft.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_djobs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: djobs.databricks.microsoft.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_runs.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: runs.databricks.microsoft.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_secretscopes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: secretscopes.databricks.microsoft.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_workspaceitems.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: workspaceitems.databricks.microsoft.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: azure-databricks-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: azure-databricks-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml 20 | #- ../webhook 21 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 22 | #- ../certmanager 23 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 24 | #- ../prometheus 25 | 26 | patchesStrategicMerge: 27 | - manager_image_patch.yaml 28 | # Protect the /metrics endpoint by putting it behind auth. 29 | # Only one of manager_auth_proxy_patch.yaml and 30 | # manager_prometheus_metrics_patch.yaml should be enabled. 31 | - manager_auth_proxy_patch.yaml 32 | # If you want your controller-manager to expose the /metrics 33 | # endpoint w/o any authn/z, uncomment the following line and 34 | # comment manager_auth_proxy_patch.yaml. 35 | # Only one of manager_auth_proxy_patch.yaml and 36 | # manager_prometheus_metrics_patch.yaml should be enabled. 37 | #- manager_prometheus_metrics_patch.yaml 38 | 39 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml 40 | #- manager_webhook_patch.yaml 41 | 42 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 43 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 44 | # 'CERTMANAGER' needs to be enabled to use ca injection 45 | #- webhookcainjection_patch.yaml 46 | 47 | # the following config is for teaching kustomize how to do var substitution 48 | vars: 49 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 50 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 51 | # objref: 52 | # kind: Certificate 53 | # group: cert-manager.io 54 | # version: v1alpha2 55 | # name: serving-cert # this name should match the one in certificate.yaml 56 | # fieldref: 57 | # fieldpath: metadata.namespace 58 | #- name: CERTIFICATE_NAME 59 | # objref: 60 | # kind: Certificate 61 | # group: cert-manager.io 62 | # version: v1alpha2 63 | # name: serving-cert # this name should match the one in certificate.yaml 64 | #- name: SERVICE_NAMESPACE # namespace of the service 65 | # objref: 66 | # kind: Service 67 | # version: v1 68 | # name: webhook-service 69 | # fieldref: 70 | # fieldpath: metadata.namespace 71 | #- name: SERVICE_NAME 72 | # objref: 73 | # kind: Service 74 | # version: v1 75 | # name: webhook-service 76 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the controller manager, 2 | # it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | name: https 22 | - name: manager 23 | args: 24 | - "--metrics-addr=127.0.0.1:8080" 25 | - "--enable-leader-election" 26 | -------------------------------------------------------------------------------- /config/default/manager_image_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | # Change the value of image field below to your controller image URL 11 | - name: manager 12 | env: 13 | - name: DATABRICKS_HOST 14 | valueFrom: 15 | secretKeyRef: 16 | name: dbrickssettings 17 | key: DatabricksHost 18 | - name: DATABRICKS_TOKEN 19 | valueFrom: 20 | secretKeyRef: 21 | name: dbrickssettings 22 | key: DatabricksToken 23 | - name: MAX_CONCURRENT_RUN_RECONCILES 24 | value: "1" 25 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1beta1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1beta1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | images: 6 | - name: controller 7 | newName: IMAGE_URL 8 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | spec: 25 | containers: 26 | - command: 27 | - /manager 28 | args: 29 | - --enable-leader-election 30 | image: controller:latest 31 | name: manager 32 | resources: 33 | limits: 34 | cpu: 500m 35 | memory: 1Gi 36 | requests: 37 | cpu: 200m 38 | memory: 512Mi 39 | terminationGracePeriodSeconds: 10 40 | -------------------------------------------------------------------------------- /config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | scheme: https 15 | bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 16 | tlsConfig: 17 | insecureSkipVerify: true # Configure certs here if set up for auth_proxy (uses self-signed currently) 18 | selector: 19 | matchLabels: 20 | control-plane: controller-manager 21 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | # Comment the following 3 lines if you want to disable 7 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 8 | # which protects your /metrics endpoint. 9 | - auth_proxy_service.yaml 10 | - auth_proxy_role.yaml 11 | - auth_proxy_role_binding.yaml 12 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - events 31 | verbs: 32 | - create 33 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: manager-role 8 | rules: 9 | - apiGroups: 10 | - apps 11 | resources: 12 | - deployments 13 | verbs: 14 | - create 15 | - delete 16 | - get 17 | - list 18 | - patch 19 | - update 20 | - watch 21 | - apiGroups: 22 | - apps 23 | resources: 24 | - deployments/status 25 | verbs: 26 | - get 27 | - patch 28 | - update 29 | - apiGroups: 30 | - "" 31 | resources: 32 | - events 33 | verbs: 34 | - create 35 | - delete 36 | - get 37 | - list 38 | - patch 39 | - update 40 | - watch 41 | - apiGroups: 42 | - "" 43 | resources: 44 | - secrets 45 | verbs: 46 | - create 47 | - delete 48 | - get 49 | - list 50 | - patch 51 | - update 52 | - watch 53 | - apiGroups: 54 | - databricks.microsoft.com 55 | resources: 56 | - dbfsblocks 57 | verbs: 58 | - create 59 | - delete 60 | - get 61 | - list 62 | - patch 63 | - update 64 | - watch 65 | - apiGroups: 66 | - databricks.microsoft.com 67 | resources: 68 | - dbfsblocks/status 69 | verbs: 70 | - get 71 | - patch 72 | - update 73 | - apiGroups: 74 | - databricks.microsoft.com 75 | resources: 76 | - dclusters 77 | verbs: 78 | - create 79 | - delete 80 | - get 81 | - list 82 | - patch 83 | - update 84 | - watch 85 | - apiGroups: 86 | - databricks.microsoft.com 87 | resources: 88 | - dclusters/status 89 | verbs: 90 | - get 91 | - patch 92 | - update 93 | - apiGroups: 94 | - databricks.microsoft.com 95 | resources: 96 | - djobs 97 | verbs: 98 | - create 99 | - delete 100 | - get 101 | - list 102 | - patch 103 | - update 104 | - watch 105 | - apiGroups: 106 | - databricks.microsoft.com 107 | resources: 108 | - djobs/status 109 | verbs: 110 | - get 111 | - patch 112 | - update 113 | - apiGroups: 114 | - databricks.microsoft.com 115 | resources: 116 | - events 117 | verbs: 118 | - create 119 | - delete 120 | - get 121 | - list 122 | - patch 123 | - update 124 | - watch 125 | - apiGroups: 126 | - databricks.microsoft.com 127 | resources: 128 | - runs 129 | verbs: 130 | - create 131 | - delete 132 | - get 133 | - list 134 | - patch 135 | - update 136 | - watch 137 | - apiGroups: 138 | - databricks.microsoft.com 139 | resources: 140 | - runs/status 141 | verbs: 142 | - get 143 | - patch 144 | - update 145 | - apiGroups: 146 | - databricks.microsoft.com 147 | resources: 148 | - secretscopes 149 | verbs: 150 | - create 151 | - delete 152 | - get 153 | - list 154 | - patch 155 | - update 156 | - watch 157 | - apiGroups: 158 | - databricks.microsoft.com 159 | resources: 160 | - secretscopes/status 161 | verbs: 162 | - get 163 | - patch 164 | - update 165 | - apiGroups: 166 | - databricks.microsoft.com 167 | resources: 168 | - workspaceitems 169 | verbs: 170 | - create 171 | - delete 172 | - get 173 | - list 174 | - patch 175 | - update 176 | - watch 177 | - apiGroups: 178 | - databricks.microsoft.com 179 | resources: 180 | - workspaceitems/status 181 | verbs: 182 | - get 183 | - patch 184 | - update 185 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/databricks_v1alpha1_dbfsblock.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databricks.microsoft.com/v1alpha1 2 | kind: DbfsBlock 3 | metadata: 4 | name: dbfsblock-sample 5 | spec: 6 | path: /dbfsblock-sample 7 | data: ZGF0YWJyaWNrcwo= 8 | -------------------------------------------------------------------------------- /config/samples/databricks_v1alpha1_dcluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databricks.microsoft.com/v1alpha1 2 | kind: Dcluster 3 | metadata: 4 | name: dcluster-sample 5 | spec: 6 | spark_version: 5.3.x-scala2.11 7 | node_type_id: Standard_D3_v2 8 | autoscale: 9 | min_workers: 2 10 | max_workers: 3 11 | -------------------------------------------------------------------------------- /config/samples/databricks_v1alpha1_djob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databricks.microsoft.com/v1alpha1 2 | kind: Djob 3 | metadata: 4 | name: djob-sample 5 | spec: 6 | # This spec is directly linked to the JobSettings structure 7 | # https://docs.databricks.com/api/latest/jobs.html#jobsettings 8 | new_cluster: 9 | spark_version: 5.3.x-scala2.11 10 | node_type_id: Standard_D3_v2 11 | num_workers: 3 12 | libraries: 13 | - jar: 'dbfs:/my-jar.jar' 14 | - maven: 15 | coordinates: 'org.jsoup:jsoup:1.7.2' 16 | timeout_seconds: 3600 17 | max_retries: 1 18 | schedule: 19 | quartz_cron_expression: 0 15 22 ? * * 20 | timezone_id: America/Los_Angeles 21 | spark_jar_task: 22 | main_class_name: com.databricks.ComputeModels 23 | -------------------------------------------------------------------------------- /config/samples/databricks_v1alpha1_run_direct.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databricks.microsoft.com/v1alpha1 2 | kind: Run 3 | metadata: 4 | name: run-sample 5 | spec: 6 | # create a run directly without a job 7 | new_cluster: 8 | spark_version: 5.3.x-scala2.11 9 | node_type_id: Standard_D3_v2 10 | num_workers: 3 11 | libraries: 12 | - jar: 'dbfs:/my-jar.jar' 13 | - maven: 14 | coordinates: 'org.jsoup:jsoup:1.7.2' 15 | spark_jar_task: 16 | main_class_name: com.databricks.ComputeModels 17 | -------------------------------------------------------------------------------- /config/samples/databricks_v1alpha1_run_job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databricks.microsoft.com/v1alpha1 2 | kind: Run 3 | metadata: 4 | name: run-sample 5 | spec: 6 | # run an existing job with job id 7 | job_name: djob-sample 8 | jar_params: 9 | - "test" 10 | - "test_param2" 11 | -------------------------------------------------------------------------------- /config/samples/databricks_v1alpha1_secretscope.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databricks.microsoft.com/v1alpha1 2 | kind: SecretScope 3 | metadata: 4 | name: secretscope-sample 5 | spec: 6 | initial_manage_principal: users 7 | secrets: 8 | - key: string-secret 9 | string_value: helloworld 10 | - key: byte-secret 11 | byte_value: aGVsbG93b3JsZA== 12 | - key: secret-key2 13 | value_from: 14 | secret_key_ref: 15 | name: mysecret 16 | key: username 17 | acls: 18 | - principal: jacob.zhou@dataexchange.work 19 | permission: READ 20 | -------------------------------------------------------------------------------- /config/samples/databricks_v1alpha1_workspaceitem.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: databricks.microsoft.com/v1alpha1 2 | kind: WorkspaceItem 3 | metadata: 4 | name: workspaceitem-sample 5 | spec: 6 | content: | 7 | MSsx 8 | path: /ScalaExampleNotebook 9 | language: SCALA 10 | overwrite: true 11 | format: SOURCE 12 | -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting vars. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | 24 | varReference: 25 | - path: metadata/annotations 26 | -------------------------------------------------------------------------------- /config/webhook/manifests.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/config/webhook/manifests.yaml -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 9443 11 | selector: 12 | control-plane: controller-manager 13 | -------------------------------------------------------------------------------- /controllers/dbfsblock_controller_databricks.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | "encoding/base64" 30 | "fmt" 31 | "time" 32 | 33 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 34 | ) 35 | 36 | func (r *DbfsBlockReconciler) submit(instance *databricksv1alpha1.DbfsBlock) error { 37 | r.Log.Info(fmt.Sprintf("Create block %s", instance.GetName())) 38 | 39 | data, err := base64.StdEncoding.DecodeString(instance.Spec.Data) 40 | if err != nil { 41 | return err 42 | } 43 | 44 | // Open handler 45 | execution := NewExecution("dbfsblocks", "create") 46 | createResponse, err := r.APIClient.Dbfs().Create(instance.Spec.Path, true) 47 | execution.Finish(err) 48 | 49 | if err != nil { 50 | return err 51 | } 52 | 53 | // DataBricks limits the AddBlock size to be 1024KB 54 | var g = 1000 55 | for i := 0; i < len(data); i += g { 56 | execution = NewExecution("dbfsblocks", "add_block") 57 | 58 | if i+g <= len(data) { 59 | err = r.APIClient.Dbfs().AddBlock(createResponse.Handle, data[i:i+g]) 60 | } else { 61 | err = r.APIClient.Dbfs().AddBlock(createResponse.Handle, data[i:]) 62 | } 63 | 64 | execution.Finish(err) 65 | 66 | if err != nil { 67 | return err 68 | } 69 | } 70 | 71 | // Close handler 72 | execution = NewExecution("dbfsblocks", "close") 73 | err = r.APIClient.Dbfs().Close(createResponse.Handle) 74 | execution.Finish(err) 75 | 76 | if err != nil { 77 | return err 78 | } 79 | 80 | time.Sleep(1 * time.Second) 81 | 82 | // Refresh info 83 | execution = NewExecution("dbfsblocks", "get_status") 84 | fileInfo, err := r.APIClient.Dbfs().GetStatus(instance.Spec.Path) 85 | execution.Finish(err) 86 | 87 | if err != nil { 88 | return err 89 | } 90 | 91 | instance.Status = &databricksv1alpha1.DbfsBlockStatus{ 92 | FileInfo: &fileInfo, 93 | FileHash: instance.GetHash(), 94 | } 95 | 96 | return r.Update(context.Background(), instance) 97 | } 98 | 99 | func (r *DbfsBlockReconciler) delete(instance *databricksv1alpha1.DbfsBlock) error { 100 | r.Log.Info(fmt.Sprintf("Deleting block %s", instance.GetName())) 101 | 102 | if instance.Status == nil || instance.Status.FileInfo == nil { 103 | return nil 104 | } 105 | 106 | path := instance.Status.FileInfo.Path 107 | 108 | execution := NewExecution("dbfsblocks", "delete") 109 | err := r.APIClient.Dbfs().Delete(path, true) 110 | execution.Finish(err) 111 | return err 112 | } 113 | -------------------------------------------------------------------------------- /controllers/dbfsblock_controller_finalizer.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | 30 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 31 | ) 32 | 33 | func (r *DbfsBlockReconciler) addFinalizer(instance *databricksv1alpha1.DbfsBlock) error { 34 | instance.AddFinalizer(databricksv1alpha1.DbfsBlockFinalizerName) 35 | return r.Update(context.Background(), instance) 36 | } 37 | 38 | func (r *DbfsBlockReconciler) handleFinalizer(instance *databricksv1alpha1.DbfsBlock) error { 39 | if !instance.HasFinalizer(databricksv1alpha1.DbfsBlockFinalizerName) { 40 | return nil 41 | } 42 | 43 | if err := r.delete(instance); err != nil { 44 | return err 45 | } 46 | instance.RemoveFinalizer(databricksv1alpha1.DbfsBlockFinalizerName) 47 | return r.Update(context.Background(), instance) 48 | } 49 | -------------------------------------------------------------------------------- /controllers/dcluster_controller_databricks.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | "fmt" 30 | "reflect" 31 | 32 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 33 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 34 | ) 35 | 36 | func (r *DclusterReconciler) submit(instance *databricksv1alpha1.Dcluster) error { 37 | r.Log.Info(fmt.Sprintf("Create cluster %s", instance.GetName())) 38 | 39 | instance.Spec.ClusterName = instance.GetName() 40 | 41 | if instance.Status != nil && instance.Status.ClusterInfo != nil && instance.Status.ClusterInfo.ClusterID != "" { 42 | err := r.APIClient.Clusters().PermanentDelete(instance.Status.ClusterInfo.ClusterID) 43 | if err != nil { 44 | return err 45 | } 46 | } 47 | 48 | clusterInfo, err := r.createCluster(instance) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | var info databricksv1alpha1.DclusterInfo 54 | instance.Status = &databricksv1alpha1.DclusterStatus{ 55 | ClusterInfo: info.FromDataBricksClusterInfo(clusterInfo), 56 | } 57 | return r.Update(context.Background(), instance) 58 | } 59 | 60 | func (r *DclusterReconciler) refresh(instance *databricksv1alpha1.Dcluster) error { 61 | r.Log.Info(fmt.Sprintf("Refresh cluster %s", instance.GetName())) 62 | 63 | if instance.Status == nil || instance.Status.ClusterInfo == nil { 64 | return nil 65 | } 66 | 67 | clusterInfo, err := r.getCluster(instance.Status.ClusterInfo.ClusterID) 68 | if err != nil { 69 | return err 70 | } 71 | 72 | if reflect.DeepEqual(instance.Status.ClusterInfo, &clusterInfo) { 73 | return nil 74 | } 75 | 76 | var info databricksv1alpha1.DclusterInfo 77 | instance.Status = &databricksv1alpha1.DclusterStatus{ 78 | ClusterInfo: info.FromDataBricksClusterInfo(clusterInfo), 79 | } 80 | return r.Update(context.Background(), instance) 81 | } 82 | 83 | func (r *DclusterReconciler) delete(instance *databricksv1alpha1.Dcluster) error { 84 | r.Log.Info(fmt.Sprintf("Deleting cluster %s", instance.GetName())) 85 | 86 | if instance.Status == nil || instance.Status.ClusterInfo == nil { 87 | return nil 88 | } 89 | 90 | execution := NewExecution("dclusters", "delete") 91 | err := r.APIClient.Clusters().PermanentDelete(instance.Status.ClusterInfo.ClusterID) 92 | execution.Finish(err) 93 | return err 94 | } 95 | 96 | func (r *DclusterReconciler) getCluster(clusterID string) (cluster dbmodels.ClusterInfo, err error) { 97 | execution := NewExecution("dclusters", "get") 98 | cluster, err = r.APIClient.Clusters().Get(clusterID) 99 | execution.Finish(err) 100 | return cluster, err 101 | } 102 | 103 | func (r *DclusterReconciler) createCluster(instance *databricksv1alpha1.Dcluster) (cluster dbmodels.ClusterInfo, err error) { 104 | execution := NewExecution("dclusters", "create") 105 | cluster, err = r.APIClient.Clusters().Create(*instance.Spec) 106 | execution.Finish(err) 107 | return cluster, err 108 | } 109 | -------------------------------------------------------------------------------- /controllers/dcluster_controller_finalizer.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | 30 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 31 | ) 32 | 33 | func (r *DclusterReconciler) addFinalizer(instance *databricksv1alpha1.Dcluster) error { 34 | instance.AddFinalizer(databricksv1alpha1.DclusterFinalizerName) 35 | return r.Update(context.Background(), instance) 36 | } 37 | 38 | func (r *DclusterReconciler) handleFinalizer(instance *databricksv1alpha1.Dcluster) error { 39 | if !instance.HasFinalizer(databricksv1alpha1.DclusterFinalizerName) { 40 | return nil 41 | } 42 | 43 | if err := r.delete(instance); err != nil { 44 | return err 45 | } 46 | instance.RemoveFinalizer(databricksv1alpha1.DclusterFinalizerName) 47 | return r.Update(context.Background(), instance) 48 | } 49 | -------------------------------------------------------------------------------- /controllers/dcluster_controller_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | "time" 30 | 31 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 32 | . "github.com/onsi/ginkgo" 33 | . "github.com/onsi/gomega" 34 | dbmodels "github.com/xinsnake/databricks-sdk-golang/azure/models" 35 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 36 | "k8s.io/apimachinery/pkg/types" 37 | ) 38 | 39 | var _ = Describe("Dcluster Controller", func() { 40 | 41 | const timeout = time.Second * 30 42 | const interval = time.Second * 1 43 | 44 | BeforeEach(func() { 45 | // Add any setup steps that needs to be executed before each test 46 | }) 47 | 48 | AfterEach(func() { 49 | // Add any teardown steps that needs to be executed after each test 50 | }) 51 | 52 | // Add Tests for OpenAPI validation (or additional CRD features) specified in 53 | // your API definition. 54 | // Avoid adding tests for vanilla CRUD operations because they would 55 | // test Kubernetes API server, which isn't the goal here. 56 | Context("Cluster with autho-scaling", func() { 57 | It("Should create successfully", func() { 58 | 59 | key := types.NamespacedName{ 60 | Name: "t-cluster" + "-" + randomStringWithCharset(10, charset), 61 | Namespace: "default", 62 | } 63 | 64 | created := &databricksv1alpha1.Dcluster{ 65 | ObjectMeta: metav1.ObjectMeta{ 66 | Name: key.Name, 67 | Namespace: key.Namespace, 68 | }, 69 | Spec: &dbmodels.NewCluster{ 70 | Autoscale: &dbmodels.AutoScale{ 71 | MinWorkers: 2, 72 | MaxWorkers: 5, 73 | }, 74 | AutoterminationMinutes: 10, 75 | NodeTypeID: "Standard_D3_v2", 76 | SparkVersion: "5.3.x-scala2.11", 77 | }, 78 | } 79 | 80 | // Create 81 | Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) 82 | 83 | By("Expecting submitted") 84 | Eventually(func() bool { 85 | f := &databricksv1alpha1.Dcluster{} 86 | _ = k8sClient.Get(context.Background(), key, f) 87 | return f.IsSubmitted() 88 | }, timeout, interval).Should(BeTrue()) 89 | 90 | // Delete 91 | By("Expecting to delete successfully") 92 | Eventually(func() error { 93 | f := &databricksv1alpha1.Dcluster{} 94 | _ = k8sClient.Get(context.Background(), key, f) 95 | return k8sClient.Delete(context.Background(), f) 96 | }, timeout, interval).Should(Succeed()) 97 | 98 | By("Expecting to delete finish") 99 | Eventually(func() error { 100 | f := &databricksv1alpha1.Dcluster{} 101 | return k8sClient.Get(context.Background(), key, f) 102 | }, timeout, interval).ShouldNot(Succeed()) 103 | }) 104 | }) 105 | }) 106 | -------------------------------------------------------------------------------- /controllers/djob_controller_finalizer.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | 30 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 31 | ) 32 | 33 | func (r *DjobReconciler) addFinalizer(instance *databricksv1alpha1.Djob) error { 34 | instance.AddFinalizer(databricksv1alpha1.DjobFinalizerName) 35 | return r.Update(context.Background(), instance) 36 | } 37 | 38 | func (r *DjobReconciler) handleFinalizer(instance *databricksv1alpha1.Djob) error { 39 | if !instance.HasFinalizer(databricksv1alpha1.DjobFinalizerName) { 40 | return nil 41 | } 42 | 43 | if err := r.delete(instance); err != nil { 44 | return err 45 | } 46 | instance.RemoveFinalizer(databricksv1alpha1.DjobFinalizerName) 47 | return r.Update(context.Background(), instance) 48 | } 49 | -------------------------------------------------------------------------------- /controllers/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | // +kubebuilder:rbac:groups=databricks.microsoft.com,resources=events,verbs=get;list;watch;create;update;patch;delete 28 | // +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete 29 | // +kubebuilder:rbac:groups=apps,resources=deployments/status,verbs=get;update;patch 30 | // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete 31 | // +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;update;patch;delete 32 | -------------------------------------------------------------------------------- /controllers/metrics.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "time" 29 | 30 | "github.com/prometheus/client_golang/prometheus" 31 | "sigs.k8s.io/controller-runtime/pkg/metrics" 32 | ) 33 | 34 | const ( 35 | successMetric = "success" 36 | failureMetric = "failure" 37 | ) 38 | 39 | var databricksRequestHistogram = prometheus.NewHistogramVec(prometheus.HistogramOpts{ 40 | Name: "databricks_request_duration_seconds", 41 | Help: "Duration of upstream calls to Databricks REST service endpoints", 42 | }, []string{"object_type", "action", "outcome"}) 43 | 44 | func init() { 45 | // Register custom metrics with the global prometheus registry 46 | metrics.Registry.MustRegister(databricksRequestHistogram) 47 | } 48 | 49 | // NewExecution creates an Execution instance and starts the timer 50 | func NewExecution(objectType string, action string) Execution { 51 | return Execution{ 52 | begin: time.Now(), 53 | labels: prometheus.Labels{"object_type": objectType, "action": action}, 54 | } 55 | } 56 | 57 | // Execution tracks state for an API execution for emitting metrics 58 | type Execution struct { 59 | begin time.Time 60 | labels prometheus.Labels 61 | } 62 | 63 | // Finish is used to log duration and success/failure 64 | func (e *Execution) Finish(err error) { 65 | if err == nil { 66 | e.labels["outcome"] = successMetric 67 | } else { 68 | e.labels["outcome"] = failureMetric 69 | } 70 | duration := time.Since(e.begin) 71 | databricksRequestHistogram.With(e.labels).Observe(duration.Seconds()) 72 | } 73 | -------------------------------------------------------------------------------- /controllers/run_controller_finalizer.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | 30 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 31 | ) 32 | 33 | func (r *RunReconciler) addFinalizer(instance *databricksv1alpha1.Run) error { 34 | instance.AddFinalizer(databricksv1alpha1.RunFinalizerName) 35 | return r.Update(context.Background(), instance) 36 | } 37 | 38 | // handleFinalizer returns a bool and an error. If error is set then the attempt failed, otherwise boolean indicates whether it completed 39 | func (r *RunReconciler) handleFinalizer(instance *databricksv1alpha1.Run) (bool, error) { 40 | if !instance.HasFinalizer(databricksv1alpha1.RunFinalizerName) { 41 | return true, nil 42 | } 43 | 44 | completed, err := r.delete(instance) 45 | if err != nil { 46 | return false, err 47 | } 48 | if completed { 49 | instance.RemoveFinalizer(databricksv1alpha1.RunFinalizerName) 50 | err := r.Update(context.Background(), instance) 51 | return err != nil, err 52 | } 53 | return false, nil // no error, but indicate not completed to trigger a requeue to delete once cancelled 54 | } 55 | -------------------------------------------------------------------------------- /controllers/secretscope_controller_finalizer.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | "fmt" 30 | 31 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 32 | ) 33 | 34 | func (r *SecretScopeReconciler) addFinalizer(instance *databricksv1alpha1.SecretScope) error { 35 | instance.AddFinalizer(databricksv1alpha1.SecretScopeFinalizerName) 36 | err := r.Update(context.Background(), instance) 37 | if err != nil { 38 | return fmt.Errorf("failed to update secret scope finalizer: %v", err) 39 | } 40 | return nil 41 | } 42 | 43 | func (r *SecretScopeReconciler) handleFinalizer(instance *databricksv1alpha1.SecretScope) error { 44 | if instance.HasFinalizer(databricksv1alpha1.SecretScopeFinalizerName) { 45 | if err := r.delete(instance); err != nil { 46 | return err 47 | } 48 | 49 | instance.RemoveFinalizer(databricksv1alpha1.SecretScopeFinalizerName) 50 | if err := r.Update(context.Background(), instance); err != nil { 51 | return err 52 | } 53 | } 54 | // Our finalizer has finished, so the reconciler can do nothing. 55 | return nil 56 | } 57 | -------------------------------------------------------------------------------- /controllers/workspaceitem_controller_databricks.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | "encoding/base64" 30 | "fmt" 31 | "time" 32 | 33 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 34 | ) 35 | 36 | func (r *WorkspaceItemReconciler) submit(instance *databricksv1alpha1.WorkspaceItem) error { 37 | r.Log.Info(fmt.Sprintf("Create item %s", instance.GetName())) 38 | 39 | if instance.Spec == nil || len(instance.Spec.Content) <= 0 { 40 | return fmt.Errorf("Workspace Content is empty") 41 | } 42 | data, err := base64.StdEncoding.DecodeString(instance.Spec.Content) 43 | if err != nil { 44 | return err 45 | } 46 | 47 | execution := NewExecution("workspaceitems", "import") 48 | err = r.APIClient.Workspace().Import(instance.Spec.Path, instance.Spec.Format, instance.Spec.Language, data, true) 49 | execution.Finish(err) 50 | if err != nil { 51 | return err 52 | } 53 | 54 | time.Sleep(1 * time.Second) 55 | 56 | // Refresh info 57 | execution = NewExecution("workspaceitems", "get_status") 58 | objectInfo, err := r.APIClient.Workspace().GetStatus(instance.Spec.Path) 59 | execution.Finish(err) 60 | if err != nil { 61 | return err 62 | } 63 | 64 | instance.Status = &databricksv1alpha1.WorkspaceItemStatus{ 65 | ObjectInfo: &objectInfo, 66 | ObjectHash: instance.GetHash(), 67 | } 68 | 69 | return r.Update(context.Background(), instance) 70 | } 71 | 72 | func (r *WorkspaceItemReconciler) delete(instance *databricksv1alpha1.WorkspaceItem) error { 73 | r.Log.Info(fmt.Sprintf("Deleting item %s", instance.GetName())) 74 | 75 | if instance.Status == nil || instance.Status.ObjectInfo == nil { 76 | return nil 77 | } 78 | 79 | path := instance.Status.ObjectInfo.Path 80 | 81 | execution := NewExecution("workspaceitems", "import") 82 | err := r.APIClient.Workspace().Delete(path, true) 83 | execution.Finish(err) 84 | return err 85 | } 86 | -------------------------------------------------------------------------------- /controllers/workspaceitem_controller_finalizer.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | 30 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 31 | ) 32 | 33 | func (r *WorkspaceItemReconciler) addFinalizer(instance *databricksv1alpha1.WorkspaceItem) error { 34 | instance.AddFinalizer(databricksv1alpha1.WorkspaceItemFinalizerName) 35 | return r.Update(context.Background(), instance) 36 | } 37 | 38 | func (r *WorkspaceItemReconciler) handleFinalizer(instance *databricksv1alpha1.WorkspaceItem) error { 39 | if !instance.HasFinalizer(databricksv1alpha1.WorkspaceItemFinalizerName) { 40 | return nil 41 | } 42 | 43 | if err := r.delete(instance); err != nil { 44 | return err 45 | } 46 | instance.RemoveFinalizer(databricksv1alpha1.WorkspaceItemFinalizerName) 47 | return r.Update(context.Background(), instance) 48 | } 49 | -------------------------------------------------------------------------------- /controllers/workspaceitem_controller_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | The MIT License (MIT) 3 | 4 | Copyright (c) 2019 Microsoft 5 | 6 | Permission is hereby granted, free of charge, to any person obtaining a copy 7 | of this software and associated documentation files (the "Software"), to deal 8 | in the Software without restriction, including without limitation the rights 9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 | copies of the Software, and to permit persons to whom the Software is 11 | furnished to do so, subject to the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included in all 14 | copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 22 | SOFTWARE. 23 | */ 24 | 25 | package controllers 26 | 27 | import ( 28 | "context" 29 | "time" 30 | 31 | databricksv1alpha1 "github.com/microsoft/azure-databricks-operator/api/v1alpha1" 32 | . "github.com/onsi/ginkgo" 33 | . "github.com/onsi/gomega" 34 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 35 | "k8s.io/apimachinery/pkg/types" 36 | ) 37 | 38 | var _ = Describe("WorkspaceItem Controller", func() { 39 | 40 | const timeout = time.Second * 30 41 | const interval = time.Second * 1 42 | 43 | BeforeEach(func() { 44 | // Add any setup steps that needs to be executed before each test 45 | }) 46 | 47 | AfterEach(func() { 48 | // Add any teardown steps that needs to be executed after each test 49 | }) 50 | 51 | // Add Tests for OpenAPI validation (or additional CRD features) specified in 52 | // your API definition. 53 | // Avoid adding tests for vanilla CRUD operations because they would 54 | // test Kubernetes API server, which isn't the goal here. 55 | Context("Workspace Item", func() { 56 | It("Should create successfully", func() { 57 | 58 | key := types.NamespacedName{ 59 | Name: "t-workspace-item" + randomStringWithCharset(10, charset), 60 | Namespace: "default", 61 | } 62 | 63 | created := &databricksv1alpha1.WorkspaceItem{ 64 | ObjectMeta: metav1.ObjectMeta{ 65 | Name: key.Name, 66 | Namespace: key.Namespace, 67 | }, 68 | Spec: &databricksv1alpha1.WorkspaceItemSpec{ 69 | Content: "MSsx", 70 | Path: "/test-notebook", 71 | Language: "SCALA", 72 | Format: "SOURCE", 73 | }, 74 | } 75 | 76 | // Create 77 | Expect(k8sClient.Create(context.Background(), created)).Should(Succeed()) 78 | 79 | By("Expecting submitted") 80 | Eventually(func() bool { 81 | f := &databricksv1alpha1.WorkspaceItem{} 82 | _ = k8sClient.Get(context.Background(), key, f) 83 | return f.IsSubmitted() 84 | }, timeout, interval).Should(BeTrue()) 85 | 86 | // Update 87 | updated := &databricksv1alpha1.WorkspaceItem{} 88 | Expect(k8sClient.Get(context.Background(), key, updated)).Should(Succeed()) 89 | 90 | updated.Spec.Content = "MSsy" 91 | Expect(k8sClient.Update(context.Background(), updated)).Should(Succeed()) 92 | 93 | // Delete 94 | By("Expecting to delete successfully") 95 | Eventually(func() error { 96 | f := &databricksv1alpha1.WorkspaceItem{} 97 | _ = k8sClient.Get(context.Background(), key, f) 98 | return k8sClient.Delete(context.Background(), f) 99 | }, timeout, interval).Should(Succeed()) 100 | 101 | By("Expecting to delete finish") 102 | Eventually(func() error { 103 | f := &databricksv1alpha1.WorkspaceItem{} 104 | return k8sClient.Get(context.Background(), key, f) 105 | }, timeout, interval).ShouldNot(Succeed()) 106 | }) 107 | }) 108 | }) 109 | -------------------------------------------------------------------------------- /docs/debugging.md: -------------------------------------------------------------------------------- 1 | # Debugging 2 | 3 | The following guide details how to debug the Databicks operator locally using the power of DevContainers and Kind. 4 | 5 | Running the debugger in VSCode from wihtin a DevContainer, you'll be able to interact with the operator just as you would if it was running within kubernetes using `kubectl`. 6 | 7 |  8 | 9 | ## Prerequests 10 | 11 | * DevContainer is up and running. 12 | * The operator is deployed and running within a local k8s cluster. 13 | 14 | ## Step-by-step guide 15 | 16 | Before we start, verify your local k8s cluster is running and the operator is deployed using `kubectl get pods -n azure-databricks-operator-system`. 17 | 18 | If it is not, run `make set-kindcluster` to spin up a new local k8s cluster using Kind with the operator deployed. 19 | 20 | For this example we'll be working with: 21 | 22 | * [controllers/secretscope_controller.go](../controllers/secretscope_controller.go) 23 | * [samples/3_secret_scope/secretscope_eventhub.yaml](samples/3_secret_scope/secretscope_eventhub.yaml) 24 | 25 | If you're not familar with how SecretScope works, spend some time reviewing `secretscope_controller.go`, more specificly the `Reconcile` func found on this [this](../controllers/secretscope_controller.go#L48) line. 26 | 27 | 1. Set your breakpoints. Place our breakpoint anywhere within the`Reconcile` func. 28 | 2. From your menu bar, click `Debug`-> `Start Debugging` (or simply hit `F5`). 29 | 3. From your console panel, click the `DEBUG CONSOLE` tab and verify the debugger is running. You should see something like this: INSERT IMAGE 30 | 4. Now click on the `TERMINAL` tab and enter `kubectl apply -f docs/samples/3_secret_scope/secretscope_eventhub.yaml`. 31 | 32 | If you've done everything right you should see your breakpoint hit. 33 | 34 | Happy debugging! 35 | -------------------------------------------------------------------------------- /docs/deploy.md: -------------------------------------------------------------------------------- 1 | # Deploy the operator 2 | 3 | ## Prerequests 4 | 5 | - You have `kubectl` configured pointing to the target Kubernetes cluster. 6 | - You have access to a DataBricks cluster and able to generate PAT token. To generate a token, check 7 | [generate a DataBricks token](https://docs.databricks.com/api/latest/authentication.html#generate-a-token). 8 | 9 | ## Step-by-step guide 10 | 11 | This will deploy the operator in namespace `azure-databricks-operator-system`. If you want to customise 12 | the namespace, you can either search-replace the namespace, or use `kustomise` by following the next 13 | section. 14 | 15 | 1. Download [the latest release manifests](https://github.com/microsoft/azure-databricks-operator/releases): 16 | 17 | ```sh 18 | wget https://github.com/microsoft/azure-databricks-operator/releases/latest/download/release.zip 19 | unzip release.zip 20 | ``` 21 | 22 | > (optional) [Configure maximum number of run reconcilers](##configure-maximum-number-of-run-reconcilers) 23 | 24 | 2. Create the `azure-databricks-operator-system` namespace: 25 | 26 | ```sh 27 | kubectl create namespace azure-databricks-operator-system 28 | ``` 29 | 30 | 3. Create Kubernetes secrets with values for `DATABRICKS_HOST` and `DATABRICKS_TOKEN`: 31 | 32 | ```shell 33 | kubectl --namespace azure-databricks-operator-system \ 34 | create secret generic dbrickssettings \ 35 | --from-literal=DatabricksHost="https://xxxx.azuredatabricks.net" \ 36 | --from-literal=DatabricksToken="xxxxx" 37 | ``` 38 | 39 | 4. Apply the manifests for the Operator and CRDs in `release/config`: 40 | 41 | ```sh 42 | kubectl apply -f release/config 43 | ``` 44 | 45 | ## Configure maximum number of run reconcilers 46 | 47 | 1. Change the `MAX_CONCURRENT_RUN_RECONCILES` value in `config/default/manager_image_patch.yaml` under the `env` section with the desired number of reconcilers 48 | ```yaml 49 | - name: MAX_CONCURRENT_RUN_RECONCILES 50 | value: "1" 51 | ``` 52 | 53 | > By default `MAX_CONCURRENT_RUN_RECONCILES` is set to 1 54 | 55 | ## Use kustomize to customise your deployment 56 | 57 | 1. Clone the source code: 58 | 59 | ```sh 60 | git clone git@github.com:microsoft/azure-databricks-operator.git 61 | ``` 62 | 63 | 2. Edit file `config/default/kustomization.yaml` file to change your preferences 64 | 65 | 3. Use `kustomize` to generate the final manifests and deploy: 66 | 67 | ```sh 68 | kustomize build config/default | kubectl apply -f - 69 | ``` 70 | 71 | 4. Deploy the CRDs: 72 | 73 | ```sh 74 | kubectl apply -f config/crd/bases 75 | ``` 76 | 77 | ## Test your deployment 78 | 79 | 1. Deploy a sample job, this will create a job in the default namespace: 80 | 81 | ```sh 82 | curl https://raw.githubusercontent.com/microsoft/azure-databricks-operator/master/config/samples/databricks_v1alpha1_djob.yaml | kubectl apply -f - 83 | ``` 84 | 85 | 2. Check the Job in Kubernetes: 86 | 87 | ```sh 88 | kubectl get djob 89 | ``` 90 | 91 | 3. Check the job is created successfully in DataBricks. 92 | 93 | ## Troubleshooting 94 | 95 | If you encounter any issue, you can check the log of the operator by pulling it from Kubernetes: 96 | 97 | ```sh 98 | # get the pod name of your operator 99 | kubectl --namespace azure-databricks-operator-system get pods 100 | 101 | # pull the logs 102 | kubectl --namespace azure-databricks-operator-system logs -f [name_of_the_operator_pod] 103 | ``` 104 | 105 | To further aid debugging diagnostic metrics are produced by the operator. Please review [the metrics page](metrics.md) for further information -------------------------------------------------------------------------------- /docs/images/azure-databricks-operator-highlevel.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/azure-databricks-operator-highlevel.jpg -------------------------------------------------------------------------------- /docs/images/azure-databricks-operator.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/azure-databricks-operator.jpg -------------------------------------------------------------------------------- /docs/images/copy-filepath-in-dbricks.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/copy-filepath-in-dbricks.jpg -------------------------------------------------------------------------------- /docs/images/create-cluster.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/create-cluster.jpg -------------------------------------------------------------------------------- /docs/images/databricks-job.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/databricks-job.jpg -------------------------------------------------------------------------------- /docs/images/debugging.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/debugging.gif -------------------------------------------------------------------------------- /docs/images/devcontainer.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/devcontainer.gif -------------------------------------------------------------------------------- /docs/images/development-flow.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/development-flow.jpg -------------------------------------------------------------------------------- /docs/images/direct-run.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/direct-run.jpg -------------------------------------------------------------------------------- /docs/images/import-notebooks-databricks.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/import-notebooks-databricks.gif -------------------------------------------------------------------------------- /docs/images/run-periodic-job.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/run-periodic-job.jpg -------------------------------------------------------------------------------- /docs/images/sample1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/sample1.gif -------------------------------------------------------------------------------- /docs/images/sample2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/sample2.gif -------------------------------------------------------------------------------- /docs/images/sample3.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/sample3.gif -------------------------------------------------------------------------------- /docs/images/secretscopes-runs.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Azure/azure-databricks-operator/265a0a8546293a5d52cdb8a98bac507ce0750cae/docs/images/secretscopes-runs.jpg -------------------------------------------------------------------------------- /docs/mockapi_samples/config_sample.http: -------------------------------------------------------------------------------- 1 | @baseURL=http://localhost:8085/ 2 | 3 | # Get config values 4 | # @name getConfig 5 | GET {{baseURL}}config 6 | 7 | ### 8 | 9 | # Set config values 10 | # @name setConfig 11 | PUT {{baseURL}}config 12 | content-type: application/json 13 | 14 | { 15 | "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MAX": 1000, 16 | "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_SLOW_REQUEST_MIN": 30000, 17 | "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MAX": 200, 18 | "DATABRICKS_MOCK_API_LATENCY_MILLISECONDS_FAST_REQUEST_MIN": 500, 19 | "DATABRICKS_MOCK_API_RATE_LIMIT": 30, 20 | "DATABRICKS_MOCK_API_ERROR_500_PROBABILITY": 10 21 | } 22 | 23 | 24 | ### 25 | 26 | # Set config values 27 | # @name patchConfig 28 | PATCH {{baseURL}}config 29 | content-type: application/json 30 | 31 | { 32 | "DATABRICKS_MOCK_API_ERROR_500_PROBABILITY": 10 33 | } 34 | -------------------------------------------------------------------------------- /docs/mockapi_samples/job_sample.http: -------------------------------------------------------------------------------- 1 | @baseURL=http://localhost:8085/api/ 2 | # Get list of empty jobs 3 | # @name getJobList 4 | GET {{baseURL}}api/2.0/jobs/list 5 | 6 | ### 7 | 8 | # Create a job and return a job ID 9 | # @name createJob 10 | POST {{baseURL}}2.0/jobs/create 11 | 12 | < ./../api/integration_tests/test_data/job/job_create.json 13 | 14 | ### 15 | 16 | # Get created job 17 | # @name getCreatedJob 18 | @jobID={{createJob.response.body.$.job_id}} 19 | GET {{baseURL}}2.0/jobs/get?job_id={{jobID}} 20 | 21 | ### 22 | 23 | # Job shows in the list of jobs 24 | GET {{baseURL}}2.0/jobs/list 25 | 26 | ### 27 | 28 | # Delete the job 29 | # @name deleteJob 30 | @jobID={{createJob.response.body.$.job_id}} 31 | POST {{baseURL}}2.0/jobs/delete 32 | 33 | { 34 | "job_id" : {{jobID}} 35 | } 36 | 37 | ### 38 | 39 | # Gone from the list 40 | GET {{baseURL}}2.0/jobs/list -------------------------------------------------------------------------------- /docs/mockapi_samples/run_sample.http: -------------------------------------------------------------------------------- 1 | @baseURL=http://localhost:8085/api/ 2 | # Get list of empty runs 3 | # @name getRunList 4 | GET {{baseURL}}2.0/jobs/runs/list 5 | 6 | ### 7 | 8 | # Submit a run and return a run ID 9 | # @name submitRun 10 | POST {{baseURL}}2.0/jobs/runs/submit 11 | 12 | < ./../api/integration_tests/test_data/run/run_submit.json 13 | 14 | ### 15 | 16 | # Get created run by ID 17 | # @name getCreatedRun 18 | @runID={{submitRun.response.body.$.run_id}} 19 | 20 | GET {{baseURL}}2.0/jobs/runs/get?run_id={{runID}} 21 | 22 | ### 23 | 24 | # Cancel the run by ID 25 | # @name cancelRun 26 | @runID={{submitRun.response.body.$.run_id}} 27 | POST {{baseURL}}2.0/jobs/runs/cancel 28 | 29 | { 30 | "run_id" : {{runID}} 31 | } 32 | 33 | ### 34 | 35 | # Delete the run by ID 36 | # @name deleteRun 37 | @runID={{submitRun.response.body.$.run_id}} 38 | POST {{baseURL}}2.0/jobs/runs/delete 39 | 40 | { 41 | "run_id" : {{runID}} 42 | } 43 | 44 | ### 45 | 46 | # Get submitted run output by ID 47 | # @name getCreatedRun 48 | @runID={{submitRun.response.body.$.run_id}} 49 | 50 | GET {{baseURL}}2.0/jobs/runs/get-output?run_id={{runID}} 51 | ### 52 | 53 | # Run shows in the list of runs 54 | GET {{baseURL}}2.0/jobs/runs/list 55 | -------------------------------------------------------------------------------- /docs/resources.md: -------------------------------------------------------------------------------- 1 | # Resources 2 | 3 | ## Kubernetes on WSL 4 | 5 | On Windows command line run `kubectl config view` to find the values of [windows-user-name],[minikube-ip],[port]: 6 | 7 | ```sh 8 | mkdir ~/.kube && cp /mnt/c/Users/[windows-user-name]/.kube/config ~/.kube 9 | ``` 10 | 11 | If you are using minikube you need to set bellow settings 12 | ```sh 13 | # allow kubectl to trust the certificate authority of minikube 14 | kubectl config set-cluster minikube \ 15 | --server=https://[minikube-ip]:[port] \ 16 | --certificate-authority=/mnt/c/Users/[windows-user-name]/.minikube/ca.crt 17 | 18 | # configure the client certificate to use when talking to minikube 19 | kubectl config set-credentials minikube \ 20 | --client-certificate=/mnt/c/Users/[windows-user-name]/.minikube/client.crt \ 21 | --client-key=/mnt/c/Users/[windows-user-name]/.minikube/client.key 22 | 23 | # create the context minikube with cluster and user info created above 24 | kubectl config set-context minikube --cluster=minikube --user=minikub 25 | ``` 26 | 27 | More info: 28 | 29 | - https://devkimchi.com/2018/06/05/running-kubernetes-on-wsl/ 30 | - https://www.jamessturtevant.com/posts/Running-Kubernetes-Minikube-on-Windows-10-with-WSL/ 31 | 32 | ## Build pipelines 33 | 34 | - [Create a pipeline and add a status badge to Github](https://docs.microsoft.com/en-us/azure/devops/pipelines/create-first-pipeline?view=azure-devops&tabs=tfs-2018-2) 35 | - [Customize status badge with shields.io](https://shields.io/) 36 | 37 | ## Controller metrics and dashboards 38 | 39 | For information on how to monitor metrics from published from the operator, please review [the metrics page](metrics.md). -------------------------------------------------------------------------------- /docs/roadmap.md: -------------------------------------------------------------------------------- 1 | # Currently Supported 2 | 3 | - Job (djob) 4 | - Run (run) 5 | - Secret Scope & Secret (secretscope) 6 | - Cluster (dcluster) 7 | - DBFS (dbfsblock) 8 | - Workspace (workspaceitem) 9 | 10 | # In Progress 11 | 12 | - Libraries 13 | 14 | # Future Development 15 | 16 | - Group 17 | - Token -------------------------------------------------------------------------------- /docs/samples.md: -------------------------------------------------------------------------------- 1 | # Direct Run 2 | 3 | ## 1. Create a spark cluster and Run databricks notebook 4 | 5 |  6 | 7 | [Direct run sample](samples/1_direct_run) shows how you can a spark cluster and run a databricks notebook. 8 | 9 | 1. Upload [basic1.ipynb](samples/1_direct_run/basic1.ipynb) 10 | 11 |  12 | 13 | 2. Update `notebook_path` in `samples/1_direct_run/run_basic1.yaml` file 14 | 15 |  16 | 17 | 3. Apply `samples/1_direct_run/run_basic1.yaml` 18 | 19 |  20 | 21 | ## 2. Create an interactive spark cluster and Run a databricks job on that cluster 22 | 23 |  24 | 25 | [Databricks periodic job sample](samples/2_job_run) shows how you can create an interactive spark cluster in databricks and attach it to one or many databricks notebooks. 26 | 27 | 1. Apply `samples/2_job_run/cluster_interactive1.yaml` file 28 | 29 | 2. Update `existing_cluster_id` in `samples/2_job_run/run_basic1_periodic_on_existing_cluster.yaml` file 30 | 31 | 3. Apply `samples/2_job_run/run_basic1_periodic_on_existing_cluster.yaml` 32 | 33 |  34 | 35 | ## 3. Create Secret scopes, installing dependencies/libraries on spark cluster 36 | 37 |  38 | 39 | [Databricks twitter ingest sample](samples/3_secret_scope) shows how you can create secret scopes in databricks, install libraries on your cluster, run a job that ingests data and acts as a message producer and sends the message to eventhub. 40 | 41 | 1. Upload [eventhub_ingest.ipynb](samples/3_secret_scope/eventhub_ingest.ipynb) 42 | 2. Upload [twitter_ingest.ipynb](samples/3_secret_scope/twitter_ingest.ipynb) 43 | 3. [Create eventhub namespace and eventhub in azure](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create) 44 | 4. Setup your twitter deveopler account 45 | 5. Replace `xxxxx` with correct values and create these two secrets 46 | 47 | ``` 48 | kubectl create secret generic twitter-secret --from-literal=TwitterAPIkey=xxxxx --from-literal=TwitterAPISecret=xxxxx --from-literal=TwitterAccessToken=xxxxx --from-literal=TwitterAccessSecret=xxxxx 49 | ``` 50 | 51 | ``` 52 | kubectl create secret generic eventhubnamespace-secret --from-literal=EventhubNamespace=xxxxx --from-literal=SharedAccessKeyName=xxxxx --from-literal=SharedAccessKey=xxxxx --from-literal=ConnectionString=Endpoint=sb://xxxxx.servicebus.windows.net/;SharedAccessKeyName=xxxxx;SharedAccessKey=xxxxx 53 | ``` 54 | 55 | 6. Apply `samples/3_secret_scope/secretscope_twitter.yaml` 56 | 7. Apply `samples/3_secret_scope/secretscope_eventhub.yaml` 57 | 8. Apply `samples/3_secret_scope/cluster_interactive2.yaml` 58 | 9. Apply `samples/3_secret_scope/run_twitter1.yaml` 59 | -------------------------------------------------------------------------------- /docs/samples/1_direct_run/basic1.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"code","source":["1+1"],"metadata":{},"outputs":[{"metadata":{},"output_type":"display_data","data":{"text/html":["\n