├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE.txt ├── Makefile ├── PROJECT ├── README.md ├── api └── v1alpha1 │ ├── databasemigration_types.go │ ├── groupversion_info.go │ ├── manageddatabase_types.go │ └── zz_generated.deepcopy.go ├── config ├── certmanager │ ├── certificate.yaml │ ├── kustomization.yaml │ └── kustomizeconfig.yaml ├── crd │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── patches │ │ ├── cainjection_in_databasemigrations.yaml │ │ ├── cainjection_in_manageddatabases.yaml │ │ ├── webhook_in_databasemigrations.yaml │ │ └── webhook_in_manageddatabases.yaml ├── default │ ├── kustomization.yaml │ ├── manager_auth_proxy_patch.yaml │ ├── manager_image_patch.yaml │ ├── manager_prometheus_metrics_patch.yaml │ ├── manager_webhook_patch.yaml │ └── webhookcainjection_patch.yaml ├── manager │ ├── kustomization.yaml │ └── manager.yaml ├── rbac │ ├── auth_proxy_role.yaml │ ├── auth_proxy_role_binding.yaml │ ├── auth_proxy_service.yaml │ ├── kustomization.yaml │ ├── leader_election_role.yaml │ ├── leader_election_role_binding.yaml │ └── role_binding.yaml ├── samples │ ├── dbaoperator_v1alpha1_databasemigration.yaml │ └── dbaoperator_v1alpha1_manageddatabase.yaml └── webhook │ ├── kustomization.yaml │ ├── kustomizeconfig.yaml │ └── service.yaml ├── controllers ├── errors.go ├── jobs.go ├── manageddatabase_controller.go ├── manageddatabase_controller_test.go ├── metrics.go ├── secrets.go └── suite_test.go ├── deploy ├── databasemigration.yaml ├── dba-operator.yaml ├── examples │ ├── debug.yaml │ ├── migrationcontainer │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── migration.py │ │ └── requirements.txt │ ├── quayio-manageddatabase-mysql.yaml │ ├── quayiocreds-secret-mysql.yaml │ ├── v1-appdatabasemigration.yaml │ ├── v2-appdatabasemigration.yaml │ └── v3-appdatabasemigration.yaml ├── manageddatabase.yaml ├── olm │ └── dba-operator.yaml └── pushgateway.yaml ├── go.mod ├── go.sum ├── hack └── boilerplate.go.txt ├── main.go ├── main_test.go ├── pkg ├── dbadmin │ ├── alembic │ │ └── version.go │ ├── connection.go │ ├── dbadmin.go │ ├── dbadminfakes │ │ ├── fake_db_admin.go │ │ └── fake_migration_engine.go │ └── mysqladmin │ │ ├── admin.go │ │ ├── admin_test.go │ │ └── errors.go ├── hints │ ├── hints.go │ └── hints_test.go └── xerrors │ └── errors.go └── tools └── tools.go /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Kubernetes Generated files - skip generated files, except for vendored files 17 | 18 | !vendor/**/zz_generated.* 19 | 20 | # editor and IDE paraphernalia 21 | .idea 22 | *.swp 23 | *.swo 24 | *~ 25 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: go 2 | go: 3 | - 1.13 4 | 5 | sudo: false 6 | 7 | before_install: 8 | - curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/v1.16.1/bin/linux/amd64/kubectl && chmod +x kubectl && sudo mv kubectl $GOPATH/bin 9 | - curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $GOPATH/bin v1.20.0 10 | - curl -sL https://github.com/kubernetes-sigs/kubebuilder/releases/download/v2.0.1/kubebuilder_2.0.1_linux_amd64.tar.gz | tar -xz -C /tmp/ && cp /tmp/kubebuilder_2.0.1_linux_amd64/bin/* $GOPATH/bin/ 11 | - export KUBEBUILDER_ASSETS=$GOPATH/bin 12 | 13 | script: 14 | - make lint && make test 15 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build the manager binary 2 | FROM golang:1.13.4 as builder 3 | 4 | WORKDIR /workspace 5 | # Copy the Go Modules manifests 6 | COPY go.mod go.mod 7 | COPY go.sum go.sum 8 | # cache deps before building and copying source so that we don't need to re-download as much 9 | # and so that source changes don't invalidate our downloaded layer 10 | RUN go mod download 11 | 12 | # Copy the go source 13 | COPY tools/ tools/ 14 | COPY main.go main.go 15 | COPY api/ api/ 16 | COPY controllers/ controllers/ 17 | COPY pkg/ pkg/ 18 | 19 | # Build 20 | RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o manager main.go 21 | 22 | # Use distroless as minimal base image to package the manager binary 23 | # Refer to https://github.com/GoogleContainerTools/distroless for more details 24 | FROM gcr.io/distroless/static:latest 25 | WORKDIR / 26 | COPY --from=builder /workspace/manager . 27 | ENTRYPOINT ["/manager"] 28 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # Image URL to use all building/pushing image targets 2 | IMG ?= controller:latest 3 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) 4 | CRD_OPTIONS ?= "crd:trivialVersions=true" 5 | 6 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 7 | ifeq (,$(shell go env GOBIN)) 8 | GOBIN=$(shell go env GOPATH)/bin 9 | else 10 | GOBIN=$(shell go env GOBIN) 11 | endif 12 | 13 | all: manager 14 | 15 | lint: vet 16 | golangci-lint -v run --enable golint --enable prealloc --enable gosec 17 | 18 | # Run tests 19 | test: generate manifests 20 | go test ./api/... ./controllers/... ./pkg/... -coverprofile cover.out 21 | 22 | # Build manager binary 23 | manager: generate 24 | go build -o bin/manager main.go 25 | 26 | installapi: 27 | kubectl apply -f deploy/databasemigration.yaml -f deploy/manageddatabase.yaml 28 | 29 | devenv: installapi 30 | kubectl apply -f deploy/pushgateway.yaml 31 | kubectl apply -f deploy/examples/ 32 | 33 | # Run against the configured Kubernetes cluster in ~/.kube/config 34 | run: generate 35 | go run ./main.go 36 | 37 | # Install CRDs into a cluster 38 | install: manifests 39 | kubectl apply -f config/crd/bases 40 | 41 | # Deploy controller in the configured Kubernetes cluster in ~/.kube/config 42 | deploy: manifests 43 | kubectl apply -f config/crd/bases 44 | kustomize build config/default | kubectl apply -f - 45 | 46 | # Generate manifests e.g. CRD, RBAC etc. 47 | manifests: controller-gen 48 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 49 | 50 | # Run go fmt against code 51 | fmt: 52 | go fmt ./... 53 | 54 | # Run go vet against code 55 | vet: 56 | go vet ./... 57 | 58 | # Generate code 59 | generate: controller-gen 60 | $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths=./api/... 61 | go generate ./... 62 | 63 | # Build the docker image 64 | docker-build: test 65 | docker build . -t ${IMG} 66 | @echo "updating kustomize image patch file for manager resource" 67 | sed -i'' -e 's@image: .*@image: '"${IMG}"'@' ./config/default/manager_image_patch.yaml 68 | 69 | # Push the docker image 70 | docker-push: 71 | docker push ${IMG} 72 | 73 | # find or download controller-gen 74 | # download controller-gen if necessary 75 | controller-gen: 76 | ifeq (, $(shell which controller-gen)) 77 | go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.1 78 | CONTROLLER_GEN=$(GOBIN)/controller-gen 79 | else 80 | CONTROLLER_GEN=$(shell which controller-gen) 81 | endif 82 | -------------------------------------------------------------------------------- /PROJECT: -------------------------------------------------------------------------------- 1 | version: "2" 2 | domain: app-sre.redhat.com 3 | repo: github.com/app-sre/dba-operator 4 | resources: 5 | - group: dbaoperator 6 | version: v1alpha1 7 | kind: DatabaseMigration 8 | - group: dbaoperator 9 | version: v1alpha1 10 | kind: ManagedDatabase 11 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Database Operator 2 | 3 | This project will provide a an interface for automating traditionally DBA managed aspects of using a relational database, e.g. migrations, access control, and health. 4 | 5 | ## Development Environment 6 | 7 | ### Pre-requisities 8 | 9 | 1. This project is based off of [kubebuilder](https://github.com/kubernetes-sigs/kubebuilder) so it and all of it's dependencies must be installed using the kubebuilder [installation instructions](https://book.kubebuilder.io/quick-start.html#installation). 10 | 1. Access to a local kubernetes cluster ([minikube](https://github.com/kubernetes/minikube), [docker for mac](https://docs.docker.com/docker-for-mac/install/)) with the `KUBECFG` env var set properly. 11 | 1. [kubefwd](https://github.com/txn2/kubefwd) to get access to services running on cluster and to use stable service names from the dev env and cluster. 12 | 13 | ### Dev environment 14 | 15 | ```sh 16 | make devenv 17 | sudo kubefwd svc -n default 18 | docker build -t quay.io/quaymigrations/test deploy/examples/migrationcontainer/ 19 | make run 20 | ``` 21 | 22 | ## Design 23 | 24 | This project will use the operator pattern, and a set of requirements about defining and packaging schema migrations, data migrations, and user access. 25 | 26 | ### Interface 27 | 28 | * CRD called DatabaseMigration 29 | * Reference to a backfill/data migration container 30 | * Define allowable parallelism for migration container 31 | * Reference to the name of the previous DatabaseMigration that is being deprecated 32 | * A set of hints about what the migration container will do to the database schema 33 | * CRD called ManagedDatabase 34 | * Reference to a secret which contains admin credentials 35 | * Desired schema version 36 | * Config for how the migration will signal its completion in the database 37 | * e.g. alembic writes a value to a table called `alembic_version` 38 | * Descriptions of the data metrics that the operator should harvest and publish from the managed DB. 39 | 40 | ### Flow 41 | 42 | 1. Developer creates a migration container conforming to the [migration container spec](deploy/examples/migrationcontainer/README.md) 43 | 1. Developer creates a DatabaseMigration that describes and references the migraiton container 44 | 1. For some applications (e.g. Quay) there is tooling to generate this from the migration itself 45 | 1. Developer generates versions of the app which use the schema and credentials at the corresponding migration version 46 | 1. Operator loads the DatabaseMigration into a cluster 47 | 1. Operator updates the desired database schema version in the ManagedDatabase CR 48 | 49 | ### Operator Control Loop 50 | 51 | 1. Read and publish metrics from the ManagedDatabase CR 52 | 1. Check if database schema version matches the desired version 53 | 1. If newer, run through all required migration loops until version matches desired 54 | 55 | #### Migration Loop 56 | 57 | 1. Verify that credentials from 2-migrations ago are unused 58 | 1. Ensures that the code which is incompatible with the change we're about to make is no longer accessing the database using a published secret 59 | 1. Ensure there are no existing connections using the credentials about to be dropped 60 | 1. Drop credentials from 2-migrations ago 61 | 1. Run database migration if required 62 | 1. Generate and add credentials for accessing the database from this migration version 63 | 1. Write secret containing the password that was generated 64 | 65 | ### Hints 66 | 67 | The following are desired scenarios that we aim to be able to detect via hints 68 | and running database metadata. 69 | 70 | 1. Applying a blocking index creation to a table that is large 71 | 1. Applying a blocking index creation to a table that has heavy writes 72 | 1. Adding a column to a table that is large ✔ 73 | 1. Adding a column to a table that has heavy writes 74 | 1. Adding a unique constraint to a table which has non-unique values ✔ 75 | 1. Adding a constraint to a column on a table that is large 76 | 1. Adding a constraint to a column on a table that has heavy writes 77 | 1. Making a column non-null when the database existing nulls ✔ 78 | 1. Adding a non-null column without providing a server default on a table that already has data ✔ 79 | 80 | ### FAQs 81 | 82 | #### Why report success or failure to prometheus when the job status has that information already? 83 | 84 | This will allow us to keep metrics on how often migrations are passing or failing. In aggregate 85 | this could allow one to manage a large number of databases statistically. 86 | 87 | #### Is it desirable to alert based on a failed migration? What does the SOP look like in that case? 88 | 89 | It may be desirable to inform an SRE immediately if certain migrations 90 | have failed. The SOP would probably look like: 91 | 92 | 1. Get paged 93 | 2. Read the status block in the `ManagedDatabase` object to find out what failed and when 94 | 3. Debug or call support 95 | 96 | #### What happens when the database is rolled back to an earlier version via a backup? 97 | 98 | The rectification loop will see that the desired version is older than the current version 99 | and start the migration process again. 100 | 101 | Open Question: should we clean up any credentials that exist for mifrations past the current version 102 | shown by the backup? 103 | 104 | #### What happens when a migration container fails? Do we rollback automatically? 105 | 106 | We report the status back via prometheus, which can then be alerted on, and write a corresponding 107 | status line in the k8s job. We will not rollback automatically. 108 | 109 | #### What happens when a migration is running and prometheus or prom push gateway die? 110 | 111 | We will page if prometheus is offline. We will page if prometheus can't scrape the push gateway. 112 | In no cases should the operator modify the status of the `Job` object based on an inability to read 113 | status/metrics. 114 | 115 | #### How do we get the version specific app credentials to the application servers? 116 | 117 | TBD 118 | 119 | #### Can we ask K8s if anyone is using the app credentials before rolling them? 120 | 121 | TBD 122 | 123 | #### What internal state does the operator keep? What happens when it crashes or is upgraded? 124 | 125 | TBD 126 | 127 | ### Notes 128 | 129 | * DSL for describing the schema migration itself 130 | * Model in peewee? 131 | * Use [Liquibase](https://rollout.io/blog/liquibase-tutorial-manage-database-schema/) format? 132 | * Use something that can be generated from an alembic migration? 133 | * Data migrations 134 | * Python? 135 | * Container based language independent? 136 | * Access control 137 | * Automatically generate 4 sets of credentials 138 | * How to communicate them to the app? 139 | * CRD status block? 140 | * Secret with a name? 141 | * Credentials must not be shared in the app code that gets distributed to customers 142 | * Shared username, password stored in secret named after the username? 143 | * How to prevent the app from not switching to the app specific credentials and just using admin credentials 144 | * Only give the DB operator the admin credentials 145 | * Migration models 146 | * Up/down containers 147 | * Multiple backends can be created easily, e.g. alembic for sqlalchemy projects 148 | * Less possibility for feature creep 149 | * Stories that will be harder 150 | * Migration failure visibility 151 | * App to schema mapping i.e. how to force provisioning/deprovisioning of credentials 152 | * DSL 153 | * Tooling around the content of the migrations as matched with running database 154 | * Adding a column on a table with a lot of write pressure will be bad 155 | * Adding an index without background indexing will lock the table 156 | * Stories that will be harder 157 | * All of them, since there is an extra layer 158 | * Hints 159 | * Keep the migrations in a backend specific container 160 | * Put some hints about what will be done to the tables in the migration CR 161 | * Adding an index to a table 162 | * MySQL implementation will check the DB to see if that will block 163 | * Adding a unique index to a table 164 | * MySQL implementation will check the data and warn if it will fail and warn that it will block writes 165 | * Adding a column to a table 166 | * MySQL implementation will check the table size and warn if it should use pt-schema-migrate, and that it may block 167 | 168 | ## Previous Work 169 | 170 | * [pt-online-schema-change](https://www.percona.com/doc/percona-toolkit/LATEST/pt-online-schema-change.html) 171 | * [Liquibase](https://rollout.io/blog/liquibase-tutorial-manage-database-schema/) 172 | * [MySQL Online DDL](https://www.fromdual.com/online-ddl_vs_pt-online-schema-change) 173 | * [Facebook OSC](https://github.com/facebookincubator/OnlineSchemaChange) 174 | * [Quay 4-Phase Migrations](https://github.com/coreos-inc/quay-policies-encrypted/blob/master/dbmigrations.md) 175 | -------------------------------------------------------------------------------- /api/v1alpha1/databasemigration_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package v1alpha1 17 | 18 | import ( 19 | corev1 "k8s.io/api/core/v1" 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 24 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 25 | 26 | // DatabaseMigrationSchemaHintColumn contains information about one column 27 | // being added to a table, or in a table definition 28 | type DatabaseMigrationSchemaHintColumn struct { 29 | Name string `json:"name,omitempty"` 30 | NotNullable bool `json:"notNullable,omitempty"` 31 | HasServerDefault bool `json:"hasServerDefault,omitempty"` 32 | } 33 | 34 | // DatabaseMigrationSchemaHint approximately describes what the migration is going to change 35 | type DatabaseMigrationSchemaHint struct { 36 | TableReference `json:",omitempty"` 37 | 38 | // +kubebuilder:validation:Enum=addColumn;createTable;createIndex;alterColumn 39 | Operation string `json:"operation,omitempty"` 40 | 41 | Columns []DatabaseMigrationSchemaHintColumn `json:"columns,omitempty"` 42 | 43 | // +kubebuilder:validation:Enum=index;unique;fulltext 44 | IndexType string `json:"indexType,omitempty"` 45 | } 46 | 47 | // DatabaseMigrationSpec defines the desired state of DatabaseMigration 48 | type DatabaseMigrationSpec struct { 49 | // +kubebuilder:validation:MinLength=1 50 | // +kubebuilder:validation:MaxLength=12 51 | Previous string `json:"previous,omitempty"` 52 | MigrationContainerSpec corev1.Container `json:"migrationContainerSpec,omitempty"` 53 | Scalable bool `json:"scalable,omitempty"` 54 | SchemaHints []DatabaseMigrationSchemaHint `json:"schemaHints,omitempty"` 55 | } 56 | 57 | // DatabaseMigrationStatus defines the observed state of DatabaseMigration 58 | type DatabaseMigrationStatus struct { 59 | } 60 | 61 | // +kubebuilder:object:root=true 62 | 63 | // DatabaseMigration is the Schema for the databasemigrations API 64 | // +kubebuilder:printcolumn:name="Previous",type=string,JSONPath=`.spec.previous` 65 | type DatabaseMigration struct { 66 | metav1.TypeMeta `json:",inline"` 67 | metav1.ObjectMeta `json:"metadata,omitempty"` 68 | 69 | Spec DatabaseMigrationSpec `json:"spec,omitempty"` 70 | Status DatabaseMigrationStatus `json:"status,omitempty"` 71 | } 72 | 73 | // +kubebuilder:object:root=true 74 | 75 | // DatabaseMigrationList contains a list of DatabaseMigration 76 | type DatabaseMigrationList struct { 77 | metav1.TypeMeta `json:",inline"` 78 | metav1.ListMeta `json:"metadata,omitempty"` 79 | Items []DatabaseMigration `json:"items"` 80 | } 81 | 82 | func init() { 83 | SchemeBuilder.Register(&DatabaseMigration{}, &DatabaseMigrationList{}) 84 | } 85 | -------------------------------------------------------------------------------- /api/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1alpha1 contains API Schema definitions for the dbaoperator v1alpha1 API group 17 | // +kubebuilder:object:generate=true 18 | // +groupName=dbaoperator.app-sre.redhat.com 19 | package v1alpha1 20 | 21 | import ( 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "sigs.k8s.io/controller-runtime/pkg/scheme" 24 | ) 25 | 26 | var ( 27 | // GroupVersion is group version used to register these objects 28 | GroupVersion = schema.GroupVersion{Group: "dbaoperator.app-sre.redhat.com", Version: "v1alpha1"} 29 | 30 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 31 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 32 | 33 | // AddToScheme adds the types in this group-version to the given scheme. 34 | AddToScheme = SchemeBuilder.AddToScheme 35 | ) 36 | -------------------------------------------------------------------------------- /api/v1alpha1/manageddatabase_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package v1alpha1 17 | 18 | import ( 19 | corev1 "k8s.io/api/core/v1" 20 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 21 | ) 22 | 23 | // EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! 24 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 25 | 26 | // ManagedDatabaseSpec defines the desired state of ManagedDatabase 27 | type ManagedDatabaseSpec struct { 28 | DesiredSchemaVersion string `json:"desiredSchemaVersion,omitempty"` 29 | Connection DatabaseConnectionInfo `json:"connection,omitempty"` 30 | MigrationEngine string `json:"migrationEngine,omitempty"` 31 | MigrationContainerConfig *MigrationContainerConfig `json:"migrationContainerConfig,omitempty"` 32 | ExportDataMetrics *DataMetrics `json:"exportDataMetrics,omitempty"` 33 | HintsEngine *HintsEngineConfig `json:"hintsEngine,omitempty"` 34 | ReadOnly bool `json:"readonly,omitempty"` 35 | } 36 | 37 | // HintsEngineConfig defines the values that can be passed to the hints engine 38 | // to help it understand context under which this manageddatabase runs that 39 | // can't be queried from the database directly. 40 | type HintsEngineConfig struct { 41 | Enabled bool `json:"enabled,omitempty"` 42 | 43 | // +kubebuilder:validation:Minimum=1 44 | LargeTableRowsThreshold uint64 `json:"largetableRowsThreshold,omitempty"` 45 | } 46 | 47 | // DataMetrics declares what information the DBA operator should expose from the 48 | // database under management 49 | type DataMetrics struct { 50 | TableEstimatedSize *[]TableReference `json:"tableEstimatedSize,omitempty"` 51 | TableNextID *[]TableReference `json:"tableNextID,omitempty"` 52 | SQLQuery *[]SQLQueryMetric `json:"sqlQuery,omitempty"` 53 | } 54 | 55 | // TableReference refers to a DB table by name 56 | type TableReference struct { 57 | TableName string `json:"tableName,omitempty"` 58 | } 59 | 60 | // SQLQueryMetric describes a SQL query to run against the database and how to 61 | // expose it as a metric. It must select exactly one value in one row, and the 62 | // value must represent either a counter (uint) or gauge (float). 63 | type SQLQueryMetric struct { 64 | // +kubebuilder:validation:Pattern="SELECT [^;]+;" 65 | Query string `json:"query,omitempty"` 66 | 67 | PrometheusMetric PrometheusMetricExporter `json:"prometheusMetric,omitempty"` 68 | } 69 | 70 | // PrometheusMetricExporter describes how a given value should be exported. 71 | type PrometheusMetricExporter struct { 72 | // +kubebuilder:validation:Enum=counter;gauge 73 | ValueType string `json:"valueType,omitempty"` 74 | 75 | // +kubebuilder:validation:database_[a-z_]+[a-z] 76 | Name string `json:"name,omitempty"` 77 | 78 | HelpString string `json:"helpString,omitempty"` 79 | ExtraLabels map[string]string `json:"extraLabels,omitempty"` 80 | } 81 | 82 | // DatabaseConnectionInfo defines engine specific connection parameters to establish 83 | // a connection to the database. 84 | type DatabaseConnectionInfo struct { 85 | // +kubebuilder:validation:MinLength=1 86 | DSNSecret string `json:"dsnSecret,omitempty"` 87 | } 88 | 89 | // MigrationContainerConfig defines extra configuration that a migration 90 | // container may require before it is able to run. Specify a secret name 91 | // and how to bind that into the container. 92 | type MigrationContainerConfig struct { 93 | Secret string `json:"secret,omitempty"` 94 | VolumeMount corev1.VolumeMount `json:"volumeMount,omitempty"` 95 | } 96 | 97 | // ManagedDatabaseError contains information about an error that occurred when 98 | // reconciling this ManagedDatabase, and whether the error is considered 99 | // temporary/transient. 100 | type ManagedDatabaseError struct { 101 | Message string `json:"message,omitempty"` 102 | Temporary bool `json:"temporary,omitempty"` 103 | } 104 | 105 | // ManagedDatabaseStatus defines the observed state of ManagedDatabase 106 | type ManagedDatabaseStatus struct { 107 | CurrentVersion string `json:"currentVersion,omitempty"` 108 | Errors []ManagedDatabaseError `json:"errors,omitempty"` 109 | } 110 | 111 | // ManagedDatabase is the Schema for the manageddatabases API 112 | // +kubebuilder:object:root=true 113 | // +kubebuilder:subresource:status 114 | // +kubebuilder:printcolumn:name="Current Version",type=string,JSONPath=`.status.currentVersion` 115 | // +kubebuilder:printcolumn:name="Desired Version",type=string,JSONPath=`.spec.desiredSchemaVersion` 116 | // +kubebuilder:printcolumn:name="Error",type=string,JSONPath=`.status.errors[0].message` 117 | type ManagedDatabase struct { 118 | metav1.TypeMeta `json:",inline"` 119 | metav1.ObjectMeta `json:"metadata,omitempty"` 120 | 121 | Spec ManagedDatabaseSpec `json:"spec,omitempty"` 122 | Status ManagedDatabaseStatus `json:"status,omitempty"` 123 | } 124 | 125 | // +kubebuilder:object:root=true 126 | 127 | // ManagedDatabaseList contains a list of ManagedDatabase 128 | type ManagedDatabaseList struct { 129 | metav1.TypeMeta `json:",inline"` 130 | metav1.ListMeta `json:"metadata,omitempty"` 131 | Items []ManagedDatabase `json:"items"` 132 | } 133 | 134 | func init() { 135 | SchemeBuilder.Register(&ManagedDatabase{}, &ManagedDatabaseList{}) 136 | } 137 | -------------------------------------------------------------------------------- /api/v1alpha1/zz_generated.deepcopy.go: -------------------------------------------------------------------------------- 1 | // +build !ignore_autogenerated 2 | 3 | /* 4 | 5 | Licensed under the Apache License, Version 2.0 (the "License"); 6 | you may not use this file except in compliance with the License. 7 | You may obtain a copy of the License at 8 | 9 | http://www.apache.org/licenses/LICENSE-2.0 10 | 11 | Unless required by applicable law or agreed to in writing, software 12 | distributed under the License is distributed on an "AS IS" BASIS, 13 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | See the License for the specific language governing permissions and 15 | limitations under the License. 16 | */ 17 | 18 | // Code generated by controller-gen. DO NOT EDIT. 19 | 20 | package v1alpha1 21 | 22 | import ( 23 | runtime "k8s.io/apimachinery/pkg/runtime" 24 | ) 25 | 26 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 27 | func (in *DataMetrics) DeepCopyInto(out *DataMetrics) { 28 | *out = *in 29 | if in.TableEstimatedSize != nil { 30 | in, out := &in.TableEstimatedSize, &out.TableEstimatedSize 31 | *out = new([]TableReference) 32 | if **in != nil { 33 | in, out := *in, *out 34 | *out = make([]TableReference, len(*in)) 35 | copy(*out, *in) 36 | } 37 | } 38 | if in.TableNextID != nil { 39 | in, out := &in.TableNextID, &out.TableNextID 40 | *out = new([]TableReference) 41 | if **in != nil { 42 | in, out := *in, *out 43 | *out = make([]TableReference, len(*in)) 44 | copy(*out, *in) 45 | } 46 | } 47 | if in.SQLQuery != nil { 48 | in, out := &in.SQLQuery, &out.SQLQuery 49 | *out = new([]SQLQueryMetric) 50 | if **in != nil { 51 | in, out := *in, *out 52 | *out = make([]SQLQueryMetric, len(*in)) 53 | for i := range *in { 54 | (*in)[i].DeepCopyInto(&(*out)[i]) 55 | } 56 | } 57 | } 58 | } 59 | 60 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DataMetrics. 61 | func (in *DataMetrics) DeepCopy() *DataMetrics { 62 | if in == nil { 63 | return nil 64 | } 65 | out := new(DataMetrics) 66 | in.DeepCopyInto(out) 67 | return out 68 | } 69 | 70 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 71 | func (in *DatabaseConnectionInfo) DeepCopyInto(out *DatabaseConnectionInfo) { 72 | *out = *in 73 | } 74 | 75 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseConnectionInfo. 76 | func (in *DatabaseConnectionInfo) DeepCopy() *DatabaseConnectionInfo { 77 | if in == nil { 78 | return nil 79 | } 80 | out := new(DatabaseConnectionInfo) 81 | in.DeepCopyInto(out) 82 | return out 83 | } 84 | 85 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 86 | func (in *DatabaseMigration) DeepCopyInto(out *DatabaseMigration) { 87 | *out = *in 88 | out.TypeMeta = in.TypeMeta 89 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 90 | in.Spec.DeepCopyInto(&out.Spec) 91 | out.Status = in.Status 92 | } 93 | 94 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseMigration. 95 | func (in *DatabaseMigration) DeepCopy() *DatabaseMigration { 96 | if in == nil { 97 | return nil 98 | } 99 | out := new(DatabaseMigration) 100 | in.DeepCopyInto(out) 101 | return out 102 | } 103 | 104 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 105 | func (in *DatabaseMigration) DeepCopyObject() runtime.Object { 106 | if c := in.DeepCopy(); c != nil { 107 | return c 108 | } 109 | return nil 110 | } 111 | 112 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 113 | func (in *DatabaseMigrationList) DeepCopyInto(out *DatabaseMigrationList) { 114 | *out = *in 115 | out.TypeMeta = in.TypeMeta 116 | out.ListMeta = in.ListMeta 117 | if in.Items != nil { 118 | in, out := &in.Items, &out.Items 119 | *out = make([]DatabaseMigration, len(*in)) 120 | for i := range *in { 121 | (*in)[i].DeepCopyInto(&(*out)[i]) 122 | } 123 | } 124 | } 125 | 126 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseMigrationList. 127 | func (in *DatabaseMigrationList) DeepCopy() *DatabaseMigrationList { 128 | if in == nil { 129 | return nil 130 | } 131 | out := new(DatabaseMigrationList) 132 | in.DeepCopyInto(out) 133 | return out 134 | } 135 | 136 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 137 | func (in *DatabaseMigrationList) DeepCopyObject() runtime.Object { 138 | if c := in.DeepCopy(); c != nil { 139 | return c 140 | } 141 | return nil 142 | } 143 | 144 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 145 | func (in *DatabaseMigrationSchemaHint) DeepCopyInto(out *DatabaseMigrationSchemaHint) { 146 | *out = *in 147 | out.TableReference = in.TableReference 148 | if in.Columns != nil { 149 | in, out := &in.Columns, &out.Columns 150 | *out = make([]DatabaseMigrationSchemaHintColumn, len(*in)) 151 | copy(*out, *in) 152 | } 153 | } 154 | 155 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseMigrationSchemaHint. 156 | func (in *DatabaseMigrationSchemaHint) DeepCopy() *DatabaseMigrationSchemaHint { 157 | if in == nil { 158 | return nil 159 | } 160 | out := new(DatabaseMigrationSchemaHint) 161 | in.DeepCopyInto(out) 162 | return out 163 | } 164 | 165 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 166 | func (in *DatabaseMigrationSchemaHintColumn) DeepCopyInto(out *DatabaseMigrationSchemaHintColumn) { 167 | *out = *in 168 | } 169 | 170 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseMigrationSchemaHintColumn. 171 | func (in *DatabaseMigrationSchemaHintColumn) DeepCopy() *DatabaseMigrationSchemaHintColumn { 172 | if in == nil { 173 | return nil 174 | } 175 | out := new(DatabaseMigrationSchemaHintColumn) 176 | in.DeepCopyInto(out) 177 | return out 178 | } 179 | 180 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 181 | func (in *DatabaseMigrationSpec) DeepCopyInto(out *DatabaseMigrationSpec) { 182 | *out = *in 183 | in.MigrationContainerSpec.DeepCopyInto(&out.MigrationContainerSpec) 184 | if in.SchemaHints != nil { 185 | in, out := &in.SchemaHints, &out.SchemaHints 186 | *out = make([]DatabaseMigrationSchemaHint, len(*in)) 187 | for i := range *in { 188 | (*in)[i].DeepCopyInto(&(*out)[i]) 189 | } 190 | } 191 | } 192 | 193 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseMigrationSpec. 194 | func (in *DatabaseMigrationSpec) DeepCopy() *DatabaseMigrationSpec { 195 | if in == nil { 196 | return nil 197 | } 198 | out := new(DatabaseMigrationSpec) 199 | in.DeepCopyInto(out) 200 | return out 201 | } 202 | 203 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 204 | func (in *DatabaseMigrationStatus) DeepCopyInto(out *DatabaseMigrationStatus) { 205 | *out = *in 206 | } 207 | 208 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatabaseMigrationStatus. 209 | func (in *DatabaseMigrationStatus) DeepCopy() *DatabaseMigrationStatus { 210 | if in == nil { 211 | return nil 212 | } 213 | out := new(DatabaseMigrationStatus) 214 | in.DeepCopyInto(out) 215 | return out 216 | } 217 | 218 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 219 | func (in *HintsEngineConfig) DeepCopyInto(out *HintsEngineConfig) { 220 | *out = *in 221 | } 222 | 223 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HintsEngineConfig. 224 | func (in *HintsEngineConfig) DeepCopy() *HintsEngineConfig { 225 | if in == nil { 226 | return nil 227 | } 228 | out := new(HintsEngineConfig) 229 | in.DeepCopyInto(out) 230 | return out 231 | } 232 | 233 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 234 | func (in *ManagedDatabase) DeepCopyInto(out *ManagedDatabase) { 235 | *out = *in 236 | out.TypeMeta = in.TypeMeta 237 | in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) 238 | in.Spec.DeepCopyInto(&out.Spec) 239 | in.Status.DeepCopyInto(&out.Status) 240 | } 241 | 242 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDatabase. 243 | func (in *ManagedDatabase) DeepCopy() *ManagedDatabase { 244 | if in == nil { 245 | return nil 246 | } 247 | out := new(ManagedDatabase) 248 | in.DeepCopyInto(out) 249 | return out 250 | } 251 | 252 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 253 | func (in *ManagedDatabase) DeepCopyObject() runtime.Object { 254 | if c := in.DeepCopy(); c != nil { 255 | return c 256 | } 257 | return nil 258 | } 259 | 260 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 261 | func (in *ManagedDatabaseError) DeepCopyInto(out *ManagedDatabaseError) { 262 | *out = *in 263 | } 264 | 265 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDatabaseError. 266 | func (in *ManagedDatabaseError) DeepCopy() *ManagedDatabaseError { 267 | if in == nil { 268 | return nil 269 | } 270 | out := new(ManagedDatabaseError) 271 | in.DeepCopyInto(out) 272 | return out 273 | } 274 | 275 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 276 | func (in *ManagedDatabaseList) DeepCopyInto(out *ManagedDatabaseList) { 277 | *out = *in 278 | out.TypeMeta = in.TypeMeta 279 | out.ListMeta = in.ListMeta 280 | if in.Items != nil { 281 | in, out := &in.Items, &out.Items 282 | *out = make([]ManagedDatabase, len(*in)) 283 | for i := range *in { 284 | (*in)[i].DeepCopyInto(&(*out)[i]) 285 | } 286 | } 287 | } 288 | 289 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDatabaseList. 290 | func (in *ManagedDatabaseList) DeepCopy() *ManagedDatabaseList { 291 | if in == nil { 292 | return nil 293 | } 294 | out := new(ManagedDatabaseList) 295 | in.DeepCopyInto(out) 296 | return out 297 | } 298 | 299 | // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. 300 | func (in *ManagedDatabaseList) DeepCopyObject() runtime.Object { 301 | if c := in.DeepCopy(); c != nil { 302 | return c 303 | } 304 | return nil 305 | } 306 | 307 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 308 | func (in *ManagedDatabaseSpec) DeepCopyInto(out *ManagedDatabaseSpec) { 309 | *out = *in 310 | out.Connection = in.Connection 311 | if in.MigrationContainerConfig != nil { 312 | in, out := &in.MigrationContainerConfig, &out.MigrationContainerConfig 313 | *out = new(MigrationContainerConfig) 314 | (*in).DeepCopyInto(*out) 315 | } 316 | if in.ExportDataMetrics != nil { 317 | in, out := &in.ExportDataMetrics, &out.ExportDataMetrics 318 | *out = new(DataMetrics) 319 | (*in).DeepCopyInto(*out) 320 | } 321 | if in.HintsEngine != nil { 322 | in, out := &in.HintsEngine, &out.HintsEngine 323 | *out = new(HintsEngineConfig) 324 | **out = **in 325 | } 326 | } 327 | 328 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDatabaseSpec. 329 | func (in *ManagedDatabaseSpec) DeepCopy() *ManagedDatabaseSpec { 330 | if in == nil { 331 | return nil 332 | } 333 | out := new(ManagedDatabaseSpec) 334 | in.DeepCopyInto(out) 335 | return out 336 | } 337 | 338 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 339 | func (in *ManagedDatabaseStatus) DeepCopyInto(out *ManagedDatabaseStatus) { 340 | *out = *in 341 | if in.Errors != nil { 342 | in, out := &in.Errors, &out.Errors 343 | *out = make([]ManagedDatabaseError, len(*in)) 344 | copy(*out, *in) 345 | } 346 | } 347 | 348 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedDatabaseStatus. 349 | func (in *ManagedDatabaseStatus) DeepCopy() *ManagedDatabaseStatus { 350 | if in == nil { 351 | return nil 352 | } 353 | out := new(ManagedDatabaseStatus) 354 | in.DeepCopyInto(out) 355 | return out 356 | } 357 | 358 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 359 | func (in *MigrationContainerConfig) DeepCopyInto(out *MigrationContainerConfig) { 360 | *out = *in 361 | in.VolumeMount.DeepCopyInto(&out.VolumeMount) 362 | } 363 | 364 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MigrationContainerConfig. 365 | func (in *MigrationContainerConfig) DeepCopy() *MigrationContainerConfig { 366 | if in == nil { 367 | return nil 368 | } 369 | out := new(MigrationContainerConfig) 370 | in.DeepCopyInto(out) 371 | return out 372 | } 373 | 374 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 375 | func (in *PrometheusMetricExporter) DeepCopyInto(out *PrometheusMetricExporter) { 376 | *out = *in 377 | if in.ExtraLabels != nil { 378 | in, out := &in.ExtraLabels, &out.ExtraLabels 379 | *out = make(map[string]string, len(*in)) 380 | for key, val := range *in { 381 | (*out)[key] = val 382 | } 383 | } 384 | } 385 | 386 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusMetricExporter. 387 | func (in *PrometheusMetricExporter) DeepCopy() *PrometheusMetricExporter { 388 | if in == nil { 389 | return nil 390 | } 391 | out := new(PrometheusMetricExporter) 392 | in.DeepCopyInto(out) 393 | return out 394 | } 395 | 396 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 397 | func (in *SQLQueryMetric) DeepCopyInto(out *SQLQueryMetric) { 398 | *out = *in 399 | in.PrometheusMetric.DeepCopyInto(&out.PrometheusMetric) 400 | } 401 | 402 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQLQueryMetric. 403 | func (in *SQLQueryMetric) DeepCopy() *SQLQueryMetric { 404 | if in == nil { 405 | return nil 406 | } 407 | out := new(SQLQueryMetric) 408 | in.DeepCopyInto(out) 409 | return out 410 | } 411 | 412 | // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. 413 | func (in *TableReference) DeepCopyInto(out *TableReference) { 414 | *out = *in 415 | } 416 | 417 | // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableReference. 418 | func (in *TableReference) DeepCopy() *TableReference { 419 | if in == nil { 420 | return nil 421 | } 422 | out := new(TableReference) 423 | in.DeepCopyInto(out) 424 | return out 425 | } 426 | -------------------------------------------------------------------------------- /config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | apiVersion: certmanager.k8s.io/v1alpha1 4 | kind: Issuer 5 | metadata: 6 | name: selfsigned-issuer 7 | namespace: system 8 | spec: 9 | selfSigned: {} 10 | --- 11 | apiVersion: certmanager.k8s.io/v1alpha1 12 | kind: Certificate 13 | metadata: 14 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 15 | namespace: system 16 | spec: 17 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 18 | commonName: $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 19 | dnsNames: 20 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 21 | issuerRef: 22 | kind: Issuer 23 | name: selfsigned-issuer 24 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 25 | -------------------------------------------------------------------------------- /config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: certmanager.k8s.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: certmanager.k8s.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: certmanager.k8s.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: certmanager.k8s.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/dbaoperator.app-sre.redhat.com_databasemigrations.yaml 6 | - bases/dbaoperator.app-sre.redhat.com_manageddatabases.yaml 7 | # +kubebuilder:scaffold:crdkustomizeresource 8 | 9 | patches: 10 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 11 | # patches here are for enabling the conversion webhook for each CRD 12 | #- patches/webhook_in_databasemigrations.yaml 13 | #- patches/webhook_in_manageddatabases.yaml 14 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 15 | 16 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 17 | # patches here are for enabling the CA injection for each CRD 18 | #- patches/cainjection_in_databasemigrations.yaml 19 | #- patches/cainjection_in_manageddatabases.yaml 20 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 21 | 22 | # the following config is for teaching kustomize how to do kustomization for CRDs. 23 | configurations: 24 | - kustomizeconfig.yaml 25 | -------------------------------------------------------------------------------- /config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | group: apiextensions.k8s.io 8 | path: spec/conversion/webhookClientConfig/service/name 9 | 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | group: apiextensions.k8s.io 13 | path: spec/conversion/webhookClientConfig/service/namespace 14 | create: false 15 | 16 | varReference: 17 | - path: metadata/annotations 18 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_databasemigrations.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: databasemigrations.dbaoperator.app-sre.redhat.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/cainjection_in_manageddatabases.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: manageddatabases.dbaoperator.app-sre.redhat.com 9 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_databasemigrations.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: databasemigrations.dbaoperator.app-sre.redhat.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/crd/patches/webhook_in_manageddatabases.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: manageddatabases.dbaoperator.app-sre.redhat.com 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: dba-operator-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: dba-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml 20 | #- ../webhook 21 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 22 | #- ../certmanager 23 | 24 | patches: 25 | - manager_image_patch.yaml 26 | # Protect the /metrics endpoint by putting it behind auth. 27 | # Only one of manager_auth_proxy_patch.yaml and 28 | # manager_prometheus_metrics_patch.yaml should be enabled. 29 | - manager_auth_proxy_patch.yaml 30 | # If you want your controller-manager to expose the /metrics 31 | # endpoint w/o any authn/z, uncomment the following line and 32 | # comment manager_auth_proxy_patch.yaml. 33 | # Only one of manager_auth_proxy_patch.yaml and 34 | # manager_prometheus_metrics_patch.yaml should be enabled. 35 | #- manager_prometheus_metrics_patch.yaml 36 | 37 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml 38 | #- manager_webhook_patch.yaml 39 | 40 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 41 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 42 | # 'CERTMANAGER' needs to be enabled to use ca injection 43 | #- webhookcainjection_patch.yaml 44 | 45 | # the following config is for teaching kustomize how to do var substitution 46 | vars: 47 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 48 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 49 | # objref: 50 | # kind: Certificate 51 | # group: certmanager.k8s.io 52 | # version: v1alpha1 53 | # name: serving-cert # this name should match the one in certificate.yaml 54 | # fieldref: 55 | # fieldpath: metadata.namespace 56 | #- name: CERTIFICATE_NAME 57 | # objref: 58 | # kind: Certificate 59 | # group: certmanager.k8s.io 60 | # version: v1alpha1 61 | # name: serving-cert # this name should match the one in certificate.yaml 62 | #- name: SERVICE_NAMESPACE # namespace of the service 63 | # objref: 64 | # kind: Service 65 | # version: v1 66 | # name: webhook-service 67 | # fieldref: 68 | # fieldpath: metadata.namespace 69 | #- name: SERVICE_NAME 70 | # objref: 71 | # kind: Service 72 | # version: v1 73 | # name: webhook-service 74 | -------------------------------------------------------------------------------- /config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the controller manager, 2 | # it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.0 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | name: https 22 | - name: manager 23 | args: 24 | - "--metrics-addr=127.0.0.1:8080" 25 | - "--enable-leader-election" 26 | -------------------------------------------------------------------------------- /config/default/manager_image_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | # Change the value of image field below to your controller image URL 11 | - image: IMAGE_URL 12 | name: manager 13 | -------------------------------------------------------------------------------- /config/default/manager_prometheus_metrics_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch enables Prometheus scraping for the manager pod. 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: controller-manager 6 | namespace: system 7 | spec: 8 | template: 9 | metadata: 10 | annotations: 11 | prometheus.io/scrape: 'true' 12 | spec: 13 | containers: 14 | # Expose the prometheus metrics on default port 15 | - name: manager 16 | ports: 17 | - containerPort: 8080 18 | name: metrics 19 | protocol: TCP 20 | -------------------------------------------------------------------------------- /config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1beta1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1beta1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | certmanager.k8s.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | spec: 25 | containers: 26 | - command: 27 | - /manager 28 | args: 29 | - --enable-leader-election 30 | image: controller:latest 31 | name: manager 32 | resources: 33 | limits: 34 | cpu: 100m 35 | memory: 30Mi 36 | requests: 37 | cpu: 100m 38 | memory: 20Mi 39 | terminationGracePeriodSeconds: 10 40 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | prometheus.io/port: "8443" 6 | prometheus.io/scheme: https 7 | prometheus.io/scrape: "true" 8 | labels: 9 | control-plane: controller-manager 10 | name: controller-manager-metrics-service 11 | namespace: system 12 | spec: 13 | ports: 14 | - name: https 15 | port: 8443 16 | targetPort: https 17 | selector: 18 | control-plane: controller-manager 19 | -------------------------------------------------------------------------------- /config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | # Comment the following 3 lines if you want to disable 7 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 8 | # which protects your /metrics endpoint. 9 | - auth_proxy_service.yaml 10 | - auth_proxy_role.yaml 11 | - auth_proxy_role_binding.yaml 12 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - events 31 | verbs: 32 | - create 33 | -------------------------------------------------------------------------------- /config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /config/samples/dbaoperator_v1alpha1_databasemigration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dbaoperator.app-sre.redhat.com/v1alpha1 2 | kind: DatabaseMigration 3 | metadata: 4 | name: databasemigration-sample 5 | spec: 6 | # Add fields here 7 | foo: bar 8 | -------------------------------------------------------------------------------- /config/samples/dbaoperator_v1alpha1_manageddatabase.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dbaoperator.app-sre.redhat.com/v1alpha1 2 | kind: ManagedDatabase 3 | metadata: 4 | name: manageddatabase-sample 5 | spec: 6 | # Add fields here 7 | foo: bar 8 | -------------------------------------------------------------------------------- /config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting vars. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | 24 | varReference: 25 | - path: metadata/annotations 26 | -------------------------------------------------------------------------------- /config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 443 11 | selector: 12 | control-plane: controller-manager 13 | -------------------------------------------------------------------------------- /controllers/errors.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "fmt" 5 | 6 | "k8s.io/apimachinery/pkg/types" 7 | ) 8 | 9 | type migrationError interface { 10 | error 11 | 12 | migrationName() types.NamespacedName 13 | } 14 | 15 | type migrationErrorStruct struct { 16 | underlying error 17 | causingMigrationName types.NamespacedName 18 | } 19 | 20 | func newMigrationErrorf(migrationName types.NamespacedName, format string, arguments ...interface{}) migrationError { 21 | return migrationErrorStruct{underlying: fmt.Errorf(format, arguments...), causingMigrationName: migrationName} 22 | } 23 | 24 | func (me migrationErrorStruct) Error() string { 25 | return me.underlying.Error() 26 | } 27 | 28 | func (me migrationErrorStruct) Unwrap() error { 29 | return me.underlying 30 | } 31 | 32 | func (me migrationErrorStruct) migrationName() types.NamespacedName { 33 | return me.causingMigrationName 34 | } 35 | -------------------------------------------------------------------------------- /controllers/jobs.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | dba "github.com/app-sre/dba-operator/api/v1alpha1" 5 | batchv1 "k8s.io/api/batch/v1" 6 | corev1 "k8s.io/api/core/v1" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | var noRetries = int32(0) 11 | var secretNotOptional = false 12 | 13 | func constructJobForMigration(managedDatabase *dba.ManagedDatabase, migration *dba.DatabaseMigration) (*batchv1.Job, error) { 14 | name := migrationName(managedDatabase.Name, migration.Name) 15 | 16 | var containerSpec corev1.Container 17 | migration.Spec.MigrationContainerSpec.DeepCopyInto(&containerSpec) 18 | 19 | falseBool := false 20 | csSource := &corev1.EnvVarSource{SecretKeyRef: &corev1.SecretKeySelector{ 21 | LocalObjectReference: corev1.LocalObjectReference{Name: managedDatabase.Spec.Connection.DSNSecret}, 22 | Key: "dsn", 23 | Optional: &falseBool, 24 | }} 25 | containerSpec.Env = append(containerSpec.Env, corev1.EnvVar{Name: "DBA_OP_CONNECTION_STRING", ValueFrom: csSource}) 26 | containerSpec.Env = append(containerSpec.Env, corev1.EnvVar{Name: "DBA_OP_JOB_ID", Value: name}) 27 | containerSpec.Env = append(containerSpec.Env, corev1.EnvVar{Name: "DBA_OP_PROMETHEUS_PUSH_GATEWAY_ADDR", Value: "prom-pushgateway:9091"}) 28 | containerSpec.Env = append(containerSpec.Env, corev1.EnvVar{Name: "DBA_OP_LABEL_DATABASE", Value: managedDatabase.Name}) 29 | containerSpec.Env = append(containerSpec.Env, corev1.EnvVar{Name: "DBA_OP_LABEL_MIGRATION", Value: migration.Name}) 30 | 31 | containerSpec.ImagePullPolicy = "IfNotPresent" // TODO removeme before prod 32 | 33 | job := &batchv1.Job{ 34 | ObjectMeta: metav1.ObjectMeta{ 35 | Labels: getStandardLabels(managedDatabase, migration), 36 | Annotations: make(map[string]string), 37 | Name: name, 38 | Namespace: managedDatabase.Namespace, 39 | }, 40 | Spec: batchv1.JobSpec{ 41 | Template: corev1.PodTemplateSpec{ 42 | Spec: corev1.PodSpec{ 43 | Containers: []corev1.Container{ 44 | containerSpec, 45 | }, 46 | RestartPolicy: corev1.RestartPolicyNever, 47 | }, 48 | }, 49 | BackoffLimit: &noRetries, 50 | }, 51 | } 52 | 53 | // If the migraction container requires extra config, mount the config 54 | // secret as a volume 55 | migrationConfig := managedDatabase.Spec.MigrationContainerConfig 56 | if migrationConfig != nil { 57 | volumeName := migrationConfig.VolumeMount.Name 58 | volume := corev1.Volume{ 59 | Name: volumeName, 60 | VolumeSource: corev1.VolumeSource{ 61 | Secret: &corev1.SecretVolumeSource{ 62 | SecretName: migrationConfig.Secret, 63 | Optional: &secretNotOptional, 64 | }, 65 | }, 66 | } 67 | 68 | job.Spec.Template.Spec.Volumes = append(job.Spec.Template.Spec.Volumes, volume) 69 | 70 | var mount corev1.VolumeMount 71 | migrationConfig.VolumeMount.DeepCopyInto(&mount) 72 | job.Spec.Template.Spec.Containers[0].VolumeMounts = append( 73 | job.Spec.Template.Spec.Containers[0].VolumeMounts, 74 | mount, 75 | ) 76 | } 77 | 78 | // TODO figure out a policy for adding annotations and labels 79 | 80 | return job, nil 81 | } 82 | -------------------------------------------------------------------------------- /controllers/manageddatabase_controller_test.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "encoding/hex" 6 | "fmt" 7 | "math/rand" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | batchv1 "k8s.io/api/batch/v1" 12 | corev1 "k8s.io/api/core/v1" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/types" 15 | "k8s.io/client-go/kubernetes/scheme" 16 | "sigs.k8s.io/controller-runtime/pkg/client" 17 | "sigs.k8s.io/controller-runtime/pkg/metrics" 18 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 19 | 20 | dba "github.com/app-sre/dba-operator/api/v1alpha1" 21 | "github.com/app-sre/dba-operator/pkg/dbadmin" 22 | "github.com/app-sre/dba-operator/pkg/dbadmin/dbadminfakes" 23 | ) 24 | 25 | func newMigration(namespace, name, previous string) dba.DatabaseMigration { 26 | return dba.DatabaseMigration{ 27 | TypeMeta: metav1.TypeMeta{ 28 | APIVersion: "dbaoperator.app-sre.redhat.com/v1alpha1", 29 | Kind: "DatabaseMigration", 30 | }, 31 | ObjectMeta: metav1.ObjectMeta{ 32 | Name: name, 33 | Namespace: namespace, 34 | }, 35 | Spec: dba.DatabaseMigrationSpec{ 36 | Previous: previous, 37 | MigrationContainerSpec: corev1.Container{ 38 | Name: fmt.Sprintf("%s-container-name", name), 39 | Image: "migrationcontainer/test", 40 | Command: []string{"migrate", name}, 41 | }, 42 | }, 43 | } 44 | } 45 | 46 | func newDSNSecret(namespace string) corev1.Secret { 47 | return corev1.Secret{ 48 | ObjectMeta: metav1.ObjectMeta{ 49 | Name: "connection-dsn", 50 | Namespace: namespace, 51 | }, 52 | StringData: map[string]string{ 53 | "dsn": "username:password@tcp(localhost:3306)/dbname", 54 | }, 55 | } 56 | } 57 | 58 | func newManagedDatabase(namespace, desiredSchemaVersion string) dba.ManagedDatabase { 59 | return dba.ManagedDatabase{ 60 | TypeMeta: metav1.TypeMeta{ 61 | APIVersion: "dbaoperator.app-sre.redhat.com/v1alpha1", 62 | Kind: "ManagedDatabase", 63 | }, 64 | ObjectMeta: metav1.ObjectMeta{ 65 | Name: "database-name", 66 | Namespace: namespace, 67 | }, 68 | Spec: dba.ManagedDatabaseSpec{ 69 | DesiredSchemaVersion: desiredSchemaVersion, 70 | Connection: dba.DatabaseConnectionInfo{ 71 | DSNSecret: "connection-dsn", 72 | }, 73 | MigrationEngine: "alembic", 74 | }, 75 | } 76 | } 77 | 78 | func randIdentifier(randomBytes int) string { 79 | identBytes := make([]byte, randomBytes) 80 | rand.Read(identBytes) // nolint:gosec 81 | 82 | // Here we prepend "var" to handle an edge case where some hex (e.g. 1e2) 83 | // gets interpreted as scientific notation by MySQL 84 | return "var" + hex.EncodeToString(identBytes) 85 | } 86 | 87 | var _ = Describe("ManagedDatabaseController", func() { 88 | var controller *ManagedDatabaseController 89 | var mockDB *dbadminfakes.FakeDbAdmin 90 | 91 | var namespace string 92 | var v1, v2 dba.DatabaseMigration 93 | var dsnSecret corev1.Secret 94 | var db dba.ManagedDatabase 95 | var dbObjectName types.NamespacedName 96 | var secretsToSave []corev1.Secret 97 | 98 | BeforeEach(func() { 99 | namespace = randIdentifier(16) 100 | v1 = newMigration(namespace, "v1", "") 101 | v2 = newMigration(namespace, "v2", "v1") 102 | dsnSecret = newDSNSecret(namespace) 103 | db = newManagedDatabase(namespace, "v1") 104 | dbObjectName = types.NamespacedName{ 105 | Namespace: db.Namespace, 106 | Name: db.Name, 107 | } 108 | secretsToSave = nil 109 | 110 | controller = NewManagedDatabaseController(k8sClient, scheme.Scheme, testLogger, metrics.Registry) 111 | 112 | mockDB = &dbadminfakes.FakeDbAdmin{} 113 | controller.initializeAdminConnection = func(_, _ string) (dbadmin.DbAdmin, error) { 114 | return mockDB, nil 115 | } 116 | }) 117 | 118 | Describe("Running Reconcile()", func() { 119 | var result reconcile.Result 120 | var err error 121 | 122 | AssertReconciliationSuccess := func() { 123 | It("should not return an error", func() { 124 | Expect(err).NotTo(HaveOccurred()) 125 | Expect(result.Requeue).To(BeFalse()) 126 | }) 127 | It("should not write any errors to the status block", func() { 128 | var afterReconcile dba.ManagedDatabase 129 | Expect(k8sClient.Get(context.Background(), dbObjectName, &afterReconcile)).NotTo(HaveOccurred()) 130 | Expect(afterReconcile.Status.Errors).To(BeNil()) 131 | }) 132 | It("should ask the database for the current state", func() { 133 | Expect(mockDB.ListUsernamesCallCount()).To(Equal(1)) 134 | Expect(mockDB.GetSchemaVersionCallCount()).To(Equal(1)) 135 | }) 136 | } 137 | 138 | AssertReconciliationError := func(numErrors int) { 139 | It("should not return an error", func() { 140 | Expect(err).NotTo(HaveOccurred()) 141 | Expect(result.Requeue).To(BeFalse()) 142 | }) 143 | It("should write an error to the status block", func() { 144 | var afterReconcile dba.ManagedDatabase 145 | Expect(k8sClient.Get(context.Background(), dbObjectName, &afterReconcile)).NotTo(HaveOccurred()) 146 | Expect(afterReconcile.Status.Errors).To(HaveLen(numErrors)) 147 | }) 148 | It("should have errors that converge", func() { 149 | req := reconcile.Request{NamespacedName: dbObjectName} 150 | _, err := controller.ReconcileManagedDatabase(req) 151 | Expect(err).ToNot(HaveOccurred()) 152 | 153 | var afterReconcile dba.ManagedDatabase 154 | Expect(k8sClient.Get(context.Background(), dbObjectName, &afterReconcile)).NotTo(HaveOccurred()) 155 | Expect(afterReconcile.Status.Errors).To(HaveLen(numErrors)) 156 | }) 157 | } 158 | 159 | AssertJobProvisioned := func(jobContainerName string, numTotalJobs int) { 160 | It(fmt.Sprintf("provisions a job with the container named %s", jobContainerName), func() { 161 | var jobsForDatabase batchv1.JobList 162 | Expect(k8sClient.List(context.Background(), &jobsForDatabase, client.InNamespace(namespace))).NotTo(HaveOccurred()) 163 | Expect(jobsForDatabase.Items).To(HaveLen(numTotalJobs)) 164 | 165 | containerNames := make(map[string]struct{}, len(jobsForDatabase.Items)) 166 | for _, job := range jobsForDatabase.Items { 167 | containerNames[job.Spec.Template.Spec.Containers[0].Name] = struct{}{} 168 | } 169 | Expect(containerNames).To(HaveKey(jobContainerName)) 170 | }) 171 | } 172 | 173 | JustBeforeEach(func() { 174 | ctx := context.Background() 175 | Expect(k8sClient.Create(ctx, &v1)).NotTo(HaveOccurred()) 176 | Expect(k8sClient.Create(ctx, &v2)).NotTo(HaveOccurred()) 177 | Expect(k8sClient.Create(ctx, &dsnSecret)).NotTo(HaveOccurred()) 178 | Expect(k8sClient.Create(ctx, &db)).NotTo(HaveOccurred()) 179 | 180 | for _, secret := range secretsToSave { 181 | Expect(db.UID).NotTo(Equal("")) 182 | secret.Labels = map[string]string{ 183 | "database-uid": string(db.UID), 184 | } 185 | 186 | Expect(k8sClient.Create(ctx, &secret)).NotTo(HaveOccurred()) 187 | } 188 | 189 | req := reconcile.Request{NamespacedName: dbObjectName} 190 | result, err = controller.ReconcileManagedDatabase(req) 191 | }) 192 | 193 | Context("on an unmanaged database", func() { 194 | AssertReconciliationSuccess() 195 | 196 | var createdJob batchv1.Job 197 | Context("will create a job", func() { 198 | JustBeforeEach(func() { 199 | var jobsForDatabase batchv1.JobList 200 | Expect(k8sClient.List(context.Background(), &jobsForDatabase, client.InNamespace(namespace))).NotTo(HaveOccurred()) 201 | Expect(jobsForDatabase.Items).To(HaveLen(1)) 202 | createdJob = jobsForDatabase.Items[0] 203 | }) 204 | 205 | It("brings the database up to the first version", func() { 206 | Expect(createdJob.Spec.Template.Spec.Containers[0].Name).To(Equal("v1-container-name")) 207 | }) 208 | 209 | It("implements the migration container protocol", func() { 210 | env := createdJob.Spec.Template.Spec.Containers[0].Env 211 | envMap := make(map[string]corev1.EnvVar, len(env)) 212 | for _, envVar := range env { 213 | envMap[envVar.Name] = envVar 214 | } 215 | 216 | Expect(envMap).Should(HaveKey("DBA_OP_CONNECTION_STRING")) 217 | Expect(envMap).Should(HaveKey("DBA_OP_JOB_ID")) 218 | Expect(envMap).Should(HaveKey("DBA_OP_PROMETHEUS_PUSH_GATEWAY_ADDR")) 219 | Expect(envMap).Should(HaveKey("DBA_OP_LABEL_DATABASE")) 220 | Expect(envMap).Should(HaveKey("DBA_OP_LABEL_MIGRATION")) 221 | }) 222 | }) 223 | 224 | Context("which is readonly", func() { 225 | BeforeEach(func() { 226 | db.Spec.ReadOnly = true 227 | }) 228 | 229 | AssertReconciliationError(1) 230 | }) 231 | }) 232 | 233 | Context("on a database that is already at v1", func() { 234 | BeforeEach(func() { 235 | mockDB.GetSchemaVersionReturns("v1", nil) 236 | }) 237 | 238 | AssertReconciliationSuccess() 239 | 240 | It("should write a new user to the database for v1", func() { 241 | Expect(mockDB.WriteCredentialsCallCount()).To(Equal(1)) 242 | }) 243 | It("should provision a new secret with credentials for the new database user", func() { 244 | var userSecret corev1.Secret 245 | newSecretName := types.NamespacedName{ 246 | Namespace: namespace, 247 | Name: fmt.Sprintf("%s-%s", db.Name, v1.Name), 248 | } 249 | Expect(k8sClient.Get(context.Background(), newSecretName, &userSecret)).NotTo(HaveOccurred()) 250 | }) 251 | 252 | Context("but wants to be at v2", func() { 253 | BeforeEach(func() { 254 | db.Spec.DesiredSchemaVersion = "v2" 255 | 256 | v1Secret := corev1.Secret{ 257 | ObjectMeta: metav1.ObjectMeta{ 258 | Name: fmt.Sprintf("%s-%s", db.Name, v1.Name), 259 | Namespace: namespace, 260 | }, 261 | StringData: map[string]string{}, 262 | } 263 | secretsToSave = append(secretsToSave, v1Secret) 264 | 265 | mockDB.ListUsernamesReturns([]string{"dba_v1"}, nil) 266 | }) 267 | 268 | AssertReconciliationSuccess() 269 | 270 | It("should not yet write a new user to the database for v2 yet", func() { 271 | Expect(mockDB.WriteCredentialsCallCount()).To(Equal(0)) 272 | }) 273 | 274 | AssertJobProvisioned("v2-container-name", 1) 275 | }) 276 | 277 | Context("but wants to be at an invalid version", func() { 278 | BeforeEach(func() { 279 | db.Spec.DesiredSchemaVersion = "unknown" 280 | }) 281 | 282 | AssertReconciliationError(1) 283 | 284 | It("should append a label to the database naming the missing migration", func() { 285 | expectedLabelValue := fmt.Sprintf("%s.unknown", namespace) 286 | var afterReconcile dba.ManagedDatabase 287 | Expect(k8sClient.Get(context.Background(), dbObjectName, &afterReconcile)).NotTo(HaveOccurred()) 288 | Expect(afterReconcile.Labels).To(HaveKeyWithValue(BlockedByMigrationLabelKey, expectedLabelValue)) 289 | }) 290 | }) 291 | 292 | Context("which is readonly", func() { 293 | BeforeEach(func() { 294 | db.Spec.ReadOnly = true 295 | }) 296 | 297 | AssertReconciliationError(1) 298 | }) 299 | 300 | Context("which has already stabilized", func() { 301 | BeforeEach(func() { 302 | mockDB.ListUsernamesReturns([]string{"dba_v1"}, nil) 303 | v1Secret := corev1.Secret{ 304 | ObjectMeta: metav1.ObjectMeta{ 305 | Name: fmt.Sprintf("%s-%s", db.Name, v1.Name), 306 | Namespace: namespace, 307 | }, 308 | StringData: map[string]string{}, 309 | } 310 | 311 | secretsToSave = append(secretsToSave, v1Secret) 312 | }) 313 | 314 | AssertReconciliationSuccess() 315 | 316 | It("should make no mutations to the users in the database", func() { 317 | Expect(mockDB.WriteCredentialsCallCount()).To(Equal(0)) 318 | Expect(mockDB.VerifyUnusedAndDeleteCredentialsCallCount()).To(Equal(0)) 319 | }) 320 | 321 | Context("and which is readonly", func() { 322 | BeforeEach(func() { 323 | db.Spec.ReadOnly = true 324 | }) 325 | 326 | AssertReconciliationSuccess() 327 | 328 | It("should make no mutations to the users in the database", func() { 329 | Expect(mockDB.WriteCredentialsCallCount()).To(Equal(0)) 330 | Expect(mockDB.VerifyUnusedAndDeleteCredentialsCallCount()).To(Equal(0)) 331 | }) 332 | }) 333 | }) 334 | }) 335 | 336 | Context("on a database that is already at v2", func() { 337 | BeforeEach(func() { 338 | db.Spec.DesiredSchemaVersion = "v2" 339 | 340 | mockDB.GetSchemaVersionReturns("v2", nil) 341 | mockDB.ListUsernamesReturns([]string{"dba_v1"}, nil) 342 | 343 | v1Secret := corev1.Secret{ 344 | ObjectMeta: metav1.ObjectMeta{ 345 | Name: fmt.Sprintf("%s-%s", db.Name, v1.Name), 346 | Namespace: namespace, 347 | }, 348 | StringData: map[string]string{}, 349 | } 350 | 351 | secretsToSave = append(secretsToSave, v1Secret) 352 | }) 353 | 354 | AssertReconciliationSuccess() 355 | 356 | It("creates the v2 credentials", func() { 357 | Expect(mockDB.WriteCredentialsCallCount()).To(Equal(1)) 358 | 359 | v2SecretName := types.NamespacedName{ 360 | Namespace: namespace, 361 | Name: fmt.Sprintf("%s-%s", db.Name, v2.Name), 362 | } 363 | var v2Secret corev1.Secret 364 | Expect(k8sClient.Get(context.Background(), v2SecretName, &v2Secret)).NotTo(HaveOccurred()) 365 | }) 366 | 367 | Context("but that wants to be at v3", func() { 368 | BeforeEach(func() { 369 | db.Spec.DesiredSchemaVersion = "v3" 370 | mockDB.ListUsernamesReturns([]string{"dba_v1", "dba_v2"}, nil) 371 | 372 | v2Secret := corev1.Secret{ 373 | ObjectMeta: metav1.ObjectMeta{ 374 | Name: fmt.Sprintf("%s-%s", db.Name, v2.Name), 375 | Namespace: namespace, 376 | }, 377 | StringData: map[string]string{}, 378 | } 379 | secretsToSave = append(secretsToSave, v2Secret) 380 | 381 | v3 := newMigration(namespace, "v3", "v2") 382 | Expect(k8sClient.Create(context.Background(), &v3)).NotTo(HaveOccurred()) 383 | }) 384 | 385 | AssertReconciliationSuccess() 386 | 387 | It("revokes the v1 credentials", func() { 388 | Expect(mockDB.VerifyUnusedAndDeleteCredentialsCallCount()).To(Equal(1)) 389 | 390 | v1SecretName := types.NamespacedName{ 391 | Namespace: namespace, 392 | Name: fmt.Sprintf("%s-%s", db.Name, v1.Name), 393 | } 394 | var v1Secret corev1.Secret 395 | Expect(k8sClient.Get(context.Background(), v1SecretName, &v1Secret)).To(HaveOccurred()) 396 | }) 397 | 398 | AssertJobProvisioned("v3-container-name", 1) 399 | 400 | Context("which is readonly", func() { 401 | BeforeEach(func() { 402 | db.Spec.ReadOnly = true 403 | }) 404 | 405 | AssertReconciliationError(1) 406 | }) 407 | }) 408 | 409 | Context("but that erroneously wants to roll back to v1", func() { 410 | BeforeEach(func() { 411 | db.Spec.DesiredSchemaVersion = "v1" 412 | mockDB.ListUsernamesReturns([]string{"dba_v1", "dba_v2"}, nil) 413 | 414 | v2Secret := corev1.Secret{ 415 | ObjectMeta: metav1.ObjectMeta{ 416 | Name: fmt.Sprintf("%s-%s", db.Name, v2.Name), 417 | Namespace: namespace, 418 | }, 419 | StringData: map[string]string{}, 420 | } 421 | secretsToSave = append(secretsToSave, v2Secret) 422 | }) 423 | 424 | AssertReconciliationError(1) 425 | }) 426 | }) 427 | 428 | Context("but can't connect to the database", func() { 429 | BeforeEach(func() { 430 | controller.initializeAdminConnection = func(_, _ string) (dbadmin.DbAdmin, error) { 431 | return nil, fmt.Errorf("Can't connect to the database") 432 | } 433 | }) 434 | 435 | AssertReconciliationError(1) 436 | }) 437 | }) 438 | }) 439 | -------------------------------------------------------------------------------- /controllers/metrics.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "io" 5 | "reflect" 6 | "time" 7 | 8 | dba "github.com/app-sre/dba-operator/api/v1alpha1" 9 | "github.com/app-sre/dba-operator/pkg/dbadmin" 10 | "github.com/go-logr/logr" 11 | "github.com/prometheus/client_golang/prometheus" 12 | ) 13 | 14 | // ManagedDatabaseControllerMetrics should contain all of the metrics exported 15 | // by the ManagedDatabaseController 16 | type ManagedDatabaseControllerMetrics struct { 17 | MigrationJobsSpawned prometheus.Counter 18 | CredentialsCreated prometheus.Counter 19 | CredentialsRevoked prometheus.Counter 20 | RegisteredMigrations prometheus.Gauge 21 | ManagedDatabases prometheus.Gauge 22 | } 23 | 24 | func getAllMetrics(metrics ManagedDatabaseControllerMetrics) []prometheus.Collector { 25 | metricsValue := reflect.ValueOf(metrics) 26 | collectors := make([]prometheus.Collector, 0, metricsValue.NumField()) 27 | for i := 0; i < metricsValue.NumField(); i++ { 28 | collectors = append(collectors, metricsValue.Field(i).Interface().(prometheus.Collector)) 29 | } 30 | return collectors 31 | } 32 | 33 | func generateManagedDatabaseControllerMetrics() ManagedDatabaseControllerMetrics { 34 | return ManagedDatabaseControllerMetrics{ 35 | MigrationJobsSpawned: prometheus.NewCounter(prometheus.CounterOpts{ 36 | Name: "dba_operator_migration_jobs_spawned_total", 37 | }), 38 | CredentialsCreated: prometheus.NewCounter(prometheus.CounterOpts{ 39 | Name: "dba_operator_credentials_created_total", 40 | }), 41 | CredentialsRevoked: prometheus.NewCounter(prometheus.CounterOpts{ 42 | Name: "dba_operator_credentials_revoked_total", 43 | }), 44 | RegisteredMigrations: prometheus.NewGauge(prometheus.GaugeOpts{ 45 | Name: "dba_operator_registered_migrations_total", 46 | }), 47 | ManagedDatabases: prometheus.NewGauge(prometheus.GaugeOpts{ 48 | Name: "dba_operator_managed_databases_total", 49 | }), 50 | } 51 | } 52 | 53 | type databaseMetricsCollector struct { 54 | db dbadmin.DbAdmin 55 | log logr.Logger 56 | 57 | rowEstimateMetrics map[dba.TableReference]*prometheus.Desc 58 | rowEstimateFilter []dbadmin.TableName 59 | 60 | nextIDMetrics map[dba.TableReference]*prometheus.Desc 61 | nextIDFilter []dbadmin.TableName 62 | 63 | sqlQueryMetrics []sqlQueryExporter 64 | } 65 | 66 | type sqlQueryExporter struct { 67 | spec dba.SQLQueryMetric 68 | desc *prometheus.Desc 69 | timing prometheus.Histogram 70 | } 71 | 72 | // CollectorCloser is a custom interface adding a Close method to 73 | // prometheus.Collector 74 | type CollectorCloser interface { 75 | prometheus.Collector 76 | io.Closer 77 | } 78 | 79 | // NewDatabaseMetricsCollector creates a database metrics collector for 80 | // instrumenting the data in a live running database. 81 | func NewDatabaseMetricsCollector(db dbadmin.DbAdmin, dbName string, log logr.Logger, metrics *dba.DataMetrics) CollectorCloser { 82 | rowEstimateMetrics := make(map[dba.TableReference]*prometheus.Desc) 83 | rowEstimateFilter := make([]dbadmin.TableName, 0) 84 | 85 | nextIDMetrics := make(map[dba.TableReference]*prometheus.Desc) 86 | nextIDFilter := make([]dbadmin.TableName, 0) 87 | 88 | sqlQueryMetrics := make([]sqlQueryExporter, 0) 89 | 90 | if metrics != nil { 91 | if metrics.TableEstimatedSize != nil { 92 | for _, tableRef := range *metrics.TableEstimatedSize { 93 | labels := prometheus.Labels{ 94 | "database": dbName, 95 | "table": tableRef.TableName, 96 | } 97 | 98 | rowEstimateMetrics[tableRef] = prometheus.NewDesc( 99 | "database_table_rows_estimate", 100 | "Estimate of the number of rows in the specified table", 101 | nil, 102 | labels, 103 | ) 104 | rowEstimateFilter = append(rowEstimateFilter, dbadmin.TableName(tableRef.TableName)) 105 | } 106 | } 107 | 108 | if metrics.TableNextID != nil { 109 | for _, tableRef := range *metrics.TableNextID { 110 | labels := prometheus.Labels{ 111 | "database": dbName, 112 | "table": tableRef.TableName, 113 | } 114 | 115 | nextIDMetrics[tableRef] = prometheus.NewDesc( 116 | "database_table_next_id", 117 | "The next id which will be assigned to a new row in the table", 118 | nil, 119 | labels, 120 | ) 121 | nextIDFilter = append(nextIDFilter, dbadmin.TableName(tableRef.TableName)) 122 | } 123 | } 124 | 125 | if metrics.SQLQuery != nil { 126 | for _, sqlQuery := range *metrics.SQLQuery { 127 | labels := prometheus.Labels{ 128 | "database": dbName, 129 | } 130 | for k, v := range sqlQuery.PrometheusMetric.ExtraLabels { 131 | labels[k] = v 132 | } 133 | 134 | sqlQueryDesc := prometheus.NewDesc( 135 | sqlQuery.PrometheusMetric.Name, 136 | sqlQuery.PrometheusMetric.HelpString, 137 | nil, 138 | labels, 139 | ) 140 | 141 | histogram := prometheus.NewHistogram(prometheus.HistogramOpts{ 142 | Namespace: "database", 143 | Name: "query_duration_seconds", 144 | Help: "Histogram of database query durations", 145 | ConstLabels: prometheus.Labels{ 146 | "database": dbName, 147 | "verb": "SELECT", 148 | "query_for_metric": sqlQuery.PrometheusMetric.Name, 149 | }, 150 | }) 151 | 152 | sqlQueryMetrics = append(sqlQueryMetrics, sqlQueryExporter{ 153 | spec: sqlQuery, 154 | desc: sqlQueryDesc, 155 | timing: histogram, 156 | }) 157 | } 158 | } 159 | } 160 | 161 | return &databaseMetricsCollector{ 162 | db: db, 163 | log: log, 164 | rowEstimateMetrics: rowEstimateMetrics, 165 | rowEstimateFilter: rowEstimateFilter, 166 | nextIDMetrics: nextIDMetrics, 167 | nextIDFilter: nextIDFilter, 168 | sqlQueryMetrics: sqlQueryMetrics, 169 | } 170 | } 171 | 172 | // Describe implements prometheus.Collector 173 | func (dmc *databaseMetricsCollector) Describe(ch chan<- *prometheus.Desc) { 174 | for _, desc := range dmc.rowEstimateMetrics { 175 | ch <- desc 176 | } 177 | for _, desc := range dmc.nextIDMetrics { 178 | ch <- desc 179 | } 180 | for _, exporter := range dmc.sqlQueryMetrics { 181 | ch <- exporter.desc 182 | ch <- exporter.timing.Desc() 183 | } 184 | } 185 | 186 | // Collect implements prometheus.Collector 187 | func (dmc *databaseMetricsCollector) Collect(ch chan<- prometheus.Metric) { 188 | estimates, err := dmc.db.GetTableSizeEstimates(dmc.rowEstimateFilter) 189 | if err != nil { 190 | dmc.log.Error(err, "Unable to get table size estimates for specified tables") 191 | } else { 192 | for tableRef, desc := range dmc.rowEstimateMetrics { 193 | estimate := estimates[dbadmin.TableName(tableRef.TableName)] 194 | ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(estimate)) 195 | } 196 | } 197 | 198 | nextIDs, err := dmc.db.GetNextIDs(dmc.nextIDFilter) 199 | if err != nil { 200 | dmc.log.Error(err, "Unable to get next IDs for specified tables") 201 | } else { 202 | for tableRef, desc := range dmc.nextIDMetrics { 203 | nextID := nextIDs[dbadmin.TableName(tableRef.TableName)] 204 | ch <- prometheus.MustNewConstMetric(desc, prometheus.GaugeValue, float64(nextID)) 205 | } 206 | } 207 | 208 | for _, exporter := range dmc.sqlQueryMetrics { 209 | var valueType prometheus.ValueType 210 | if exporter.spec.PrometheusMetric.ValueType == "counter" { 211 | valueType = prometheus.CounterValue 212 | } else if exporter.spec.PrometheusMetric.ValueType == "gauge" { 213 | valueType = prometheus.GaugeValue 214 | } else { 215 | dmc.log.Error(err, "Unknown prometheus metric type", "metricType", exporter.spec.PrometheusMetric.ValueType) 216 | } 217 | 218 | start := time.Now() 219 | if metricValue, err := dmc.db.SelectFloat(exporter.spec.Query); err != nil { 220 | dmc.log.Error(err, "Unable to load custom SQL metric from database", "metricName", exporter.spec.PrometheusMetric.Name) 221 | } else { 222 | duration := time.Since(start) 223 | exporter.timing.Observe(duration.Seconds()) 224 | 225 | ch <- prometheus.MustNewConstMetric(exporter.desc, valueType, metricValue) 226 | ch <- exporter.timing 227 | } 228 | } 229 | } 230 | 231 | func (dmc *databaseMetricsCollector) Close() error { 232 | return dmc.db.Close() 233 | } 234 | -------------------------------------------------------------------------------- /controllers/secrets.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | dba "github.com/app-sre/dba-operator/api/v1alpha1" 8 | "github.com/go-logr/logr" 9 | corev1 "k8s.io/api/core/v1" 10 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 11 | "k8s.io/apimachinery/pkg/runtime" 12 | "k8s.io/apimachinery/pkg/types" 13 | ctrl "sigs.k8s.io/controller-runtime" 14 | "sigs.k8s.io/controller-runtime/pkg/client" 15 | ) 16 | 17 | func secretUsedInPod(secretName string, pods *corev1.PodList) *corev1.Pod { 18 | for _, pod := range pods.Items { 19 | for _, volume := range pod.Spec.Volumes { 20 | if volume.Secret != nil && volume.Secret.SecretName == secretName { 21 | return &pod 22 | } 23 | } 24 | for _, container := range pod.Spec.Containers { 25 | for _, source := range container.EnvFrom { 26 | if source.SecretRef != nil && source.SecretRef.Name == secretName { 27 | return &pod 28 | } 29 | } 30 | for _, envVar := range container.Env { 31 | if envVar.ValueFrom != nil && envVar.ValueFrom.SecretKeyRef != nil && envVar.ValueFrom.SecretKeyRef.Name == secretName { 32 | return &pod 33 | } 34 | } 35 | } 36 | } 37 | return nil 38 | } 39 | 40 | func deleteSecretIfUnused(ctx context.Context, log logr.Logger, apiClient client.Client, namespace, secretName string) error { 41 | qSecretName := types.NamespacedName{Namespace: namespace, Name: secretName} 42 | 43 | // Iterate pods to see if the secret is bound anywhere 44 | var allPods corev1.PodList 45 | if err := apiClient.List(ctx, &allPods, client.InNamespace(namespace)); err != nil { 46 | return err 47 | } 48 | if usedBy := secretUsedInPod(secretName, &allPods); usedBy != nil { 49 | return fmt.Errorf("Secret %s is used in pod %s", secretName, usedBy) 50 | } 51 | 52 | var secret corev1.Secret 53 | if err := apiClient.Get(ctx, qSecretName, &secret); err != nil { 54 | log.Error(err, "unable to fetch credentials secret") 55 | return err 56 | } 57 | return apiClient.Delete(ctx, &secret) 58 | } 59 | 60 | func listSecretsForDatabase(ctx context.Context, apiClient client.Client, owningDb *dba.ManagedDatabase) (*corev1.SecretList, error) { 61 | var foundSecrets corev1.SecretList 62 | labelSelector := make(map[string]string) 63 | labelSelector["database-uid"] = string(owningDb.UID) 64 | if err := apiClient.List(ctx, &foundSecrets, client.InNamespace(owningDb.Namespace), client.MatchingLabels(labelSelector)); err != nil { 65 | return nil, err 66 | } 67 | return &foundSecrets, nil 68 | } 69 | 70 | func writeCredentialsSecret( 71 | ctx context.Context, 72 | apiClient client.Client, 73 | namespace string, 74 | secretName string, 75 | username string, 76 | password string, 77 | labels map[string]string, 78 | owner metav1.Object, 79 | scheme *runtime.Scheme, 80 | ) error { 81 | 82 | newSecret := corev1.Secret{ 83 | ObjectMeta: metav1.ObjectMeta{ 84 | Labels: labels, 85 | Annotations: make(map[string]string), 86 | Name: secretName, 87 | Namespace: namespace, 88 | }, 89 | StringData: map[string]string{ 90 | "username": username, 91 | "password": password, 92 | }, 93 | } 94 | 95 | // TODO figure out a policy for adding annotations and labels 96 | 97 | _ = ctrl.SetControllerReference(owner, &newSecret, scheme) 98 | 99 | return apiClient.Create(ctx, &newSecret) 100 | } 101 | -------------------------------------------------------------------------------- /controllers/suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package controllers 17 | 18 | import ( 19 | "path/filepath" 20 | "testing" 21 | 22 | "github.com/go-logr/logr" 23 | . "github.com/onsi/ginkgo" 24 | . "github.com/onsi/gomega" 25 | 26 | dbaoperatorv1alpha1 "github.com/app-sre/dba-operator/api/v1alpha1" 27 | "k8s.io/client-go/kubernetes/scheme" 28 | "k8s.io/client-go/rest" 29 | "sigs.k8s.io/controller-runtime/pkg/client" 30 | "sigs.k8s.io/controller-runtime/pkg/envtest" 31 | logf "sigs.k8s.io/controller-runtime/pkg/log" 32 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 33 | // +kubebuilder:scaffold:imports 34 | ) 35 | 36 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 37 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 38 | 39 | var cfg *rest.Config 40 | var k8sClient client.Client 41 | var testEnv *envtest.Environment 42 | var testLogger logr.Logger 43 | 44 | func TestAPIs(t *testing.T) { 45 | RegisterFailHandler(Fail) 46 | 47 | RunSpecsWithDefaultAndCustomReporters(t, 48 | "Controller Suite", 49 | []Reporter{envtest.NewlineReporter{}}) 50 | } 51 | 52 | var _ = BeforeSuite(func(done Done) { 53 | testLogger = zap.LoggerTo(GinkgoWriter, true) 54 | logf.SetLogger(testLogger) 55 | 56 | By("bootstrapping test environment") 57 | testEnv = &envtest.Environment{ 58 | CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, 59 | } 60 | 61 | var err error 62 | cfg, err = testEnv.Start() 63 | Expect(err).ToNot(HaveOccurred()) 64 | Expect(cfg).ToNot(BeNil()) 65 | 66 | err = dbaoperatorv1alpha1.AddToScheme(scheme.Scheme) 67 | Expect(err).NotTo(HaveOccurred()) 68 | 69 | // +kubebuilder:scaffold:scheme 70 | 71 | k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) 72 | Expect(err).ToNot(HaveOccurred()) 73 | Expect(k8sClient).ToNot(BeNil()) 74 | 75 | close(done) 76 | }, 60) 77 | 78 | var _ = AfterSuite(func() { 79 | By("tearing down the test environment") 80 | err := testEnv.Stop() 81 | Expect(err).ToNot(HaveOccurred()) 82 | }) 83 | -------------------------------------------------------------------------------- /deploy/dba-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: dba-operator 5 | labels: 6 | app: dba-operator 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: dba-operator 12 | template: 13 | metadata: 14 | labels: 15 | app: dba-operator 16 | spec: 17 | containers: 18 | - name: dba-operator 19 | image: quay.io/quay/dba-operator:v0.1.0 20 | ports: 21 | - containerPort: 8080 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: dba-operator 27 | spec: 28 | selector: 29 | app: dba-operator 30 | ports: 31 | - protocol: TCP 32 | port: 8080 33 | targetPort: 8080 34 | -------------------------------------------------------------------------------- /deploy/examples/debug.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mysql-db-init 5 | data: 6 | db-init.sql: | 7 | GRANT ALL PRIVILEGES ON *.* TO 'user'@'%' WITH GRANT OPTION; 8 | --- 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | metadata: 12 | name: mysql-debug 13 | labels: 14 | app: mysql 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: mysql 20 | template: 21 | metadata: 22 | labels: 23 | app: mysql 24 | spec: 25 | containers: 26 | - name: mysql 27 | image: mysql/mysql-server 28 | ports: 29 | - containerPort: 3306 30 | env: 31 | - name: MYSQL_ROOT_PASSWORD 32 | value: root_password 33 | - name: MYSQL_DATABASE 34 | value: quay 35 | - name: MYSQL_USER 36 | value: user 37 | - name: MYSQL_PASSWORD 38 | value: password 39 | volumeMounts: 40 | - name: initdb-vol 41 | mountPath: /docker-entrypoint-initdb.d 42 | volumes: 43 | - name: initdb-vol 44 | configMap: 45 | name: mysql-db-init 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | name: mysql-debug 51 | spec: 52 | selector: 53 | app: mysql 54 | ports: 55 | - protocol: TCP 56 | port: 3306 57 | targetPort: 3306 58 | --- 59 | apiVersion: v1 60 | kind: ConfigMap 61 | metadata: 62 | name: prometheus-server-conf 63 | labels: 64 | name: prometheus-server-conf 65 | data: 66 | prometheus.yml: |- 67 | global: 68 | scrape_interval: 5s 69 | evaluation_interval: 5s 70 | 71 | scrape_configs: 72 | - job_name: 'migration-pushgateway' 73 | honor_labels: true 74 | static_configs: 75 | - targets: ['prom-pushgateway:9091'] 76 | --- 77 | apiVersion: extensions/v1beta1 78 | kind: Deployment 79 | metadata: 80 | name: prometheus-deployment 81 | labels: 82 | app: prometheus 83 | spec: 84 | replicas: 1 85 | selector: 86 | matchLabels: 87 | app: prometheus 88 | template: 89 | metadata: 90 | labels: 91 | app: prometheus 92 | spec: 93 | containers: 94 | - name: prometheus 95 | image: prom/prometheus:v2.12.0 96 | args: 97 | - "--config.file=/etc/prometheus/prometheus.yml" 98 | ports: 99 | - containerPort: 9090 100 | volumeMounts: 101 | - name: prometheus-config-volume 102 | mountPath: /etc/prometheus/ 103 | volumes: 104 | - name: prometheus-config-volume 105 | configMap: 106 | defaultMode: 420 107 | name: prometheus-server-conf 108 | --- 109 | apiVersion: v1 110 | kind: Service 111 | metadata: 112 | name: prometheus 113 | annotations: 114 | prometheus.io/scrape: 'true' 115 | prometheus.io/port: '9090' 116 | spec: 117 | selector: 118 | app: prometheus 119 | ports: 120 | - protocol: TCP 121 | port: 9090 122 | targetPort: 9090 123 | -------------------------------------------------------------------------------- /deploy/examples/migrationcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.access.redhat.com/ubi8/python-36 2 | MAINTAINER Jake Moshenko jmoshenk@redhat.com 3 | 4 | COPY requirements.txt . 5 | 6 | RUN pip install -r requirements.txt 7 | 8 | COPY migration.py . 9 | 10 | ENTRYPOINT ["python", "migration.py"] 11 | -------------------------------------------------------------------------------- /deploy/examples/migrationcontainer/README.md: -------------------------------------------------------------------------------- 1 | # Migration Container Interface 2 | 3 | The migration container will push statistics and completion information to 4 | a Prometheus push gateway available specified in the environment. The migration 5 | container should return `0` on success or non-zero on failure. 6 | 7 | ## Environment 8 | 9 | ### DBA_OP_PROMETHEUS_PUSH_GATEWAY_ADDR 10 | 11 | The address of the prometheus push gateway, in the form of: 12 | `localhost:9091` 13 | 14 | ### DBA_OP_JOB_ID 15 | 16 | A unique opaque string that is used by the operator to identify the specific 17 | migration being monitored. 18 | 19 | ### DBA_OP_LABEL_* 20 | 21 | Labels that should be added as groupings to the prometheus job. Migration 22 | containers should truncate the prefix `DBA_OP_LABEL_` and lower case any 23 | remainder before setting the grouping. 24 | 25 | ### DBA_OP_CONNECTION_STRING 26 | 27 | An [xo/dburl](https://github.com/xo/dburl) database DSN that often contains the 28 | username, password, hostname, port, and logical database schema, e.g.: 29 | 30 | #### MySQL Example Connection String 31 | 32 | `mysql://username:password@hostname:port/dbname` 33 | 34 | #### PostgreSQL Example Connection String 35 | 36 | `postgres://username:password@hostname:port/dbname` 37 | 38 | ## Prometheus Metrics 39 | 40 | The container must push the following metrics to the prometheus push gateway 41 | specified, with the `job_id` set to the `DBA_OP_JOB_ID` as often as possible. 42 | 43 | ### migration_completion_percent 44 | 45 | | Prometheus Type | Numerical Type | Values | 46 | |-----------------|----------------|----------:| 47 | | Gauge | Float | 0.0 - 1.0 | 48 | 49 | An estimate of the percentage of the total migration work that has been completed. 50 | 51 | ### migration_complete_total 52 | 53 | | Prometheus Type | Numerical Type | Values | 54 | |-----------------|----------------|-------:| 55 | | Counter | Binary Int | 0, 1 | 56 | 57 | A signal on whether the job has completed (`1`) or not (`0`). 58 | 59 | ### migration_failed_total 60 | 61 | | Prometheus Type | Numerical Type | Values | 62 | |-----------------|----------------|-------:| 63 | | Counter | Binary Int | 0, 1 | 64 | 65 | A signal on whether the job has failed (`1`) or not (`0`). 66 | 67 | ### migration_items_complete_total 68 | 69 | | Prometheus Type | Numerical Type | Values | 70 | |-----------------|----------------|--------:| 71 | | Counter | Integer | 0 - inf | 72 | 73 | The number of things that have been completed. This is used as a hint about 74 | whether progress is being made. 75 | -------------------------------------------------------------------------------- /deploy/examples/migrationcontainer/migration.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import logging 4 | import time 5 | import argparse 6 | import re 7 | 8 | import pymysql.cursors 9 | import pymysql.err 10 | 11 | from prometheus_client import CollectorRegistry, Gauge, Counter, push_to_gateway 12 | 13 | 14 | FORMAT = '%(asctime)s [%(process)d] [%(levelname)s] [%(name)s] %(message)s' 15 | 16 | TABLE_DEF = """ 17 | CREATE TABLE `alembic_version` ( 18 | `version_num` varchar(255) NOT NULL 19 | ) ENGINE=InnoDB DEFAULT CHARSET=UTF8MB4 COLLATE=utf8mb4_bin; 20 | """ 21 | PROM_LABEL_PREFIX = 'DBA_OP_LABEL_' 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | def run(db_connection_string, push_gateway_addr, job_id, labels, write_version, 26 | run_seconds, fail_seconds): 27 | 28 | logger.debug('Starting migration') 29 | registry = CollectorRegistry() 30 | 31 | migration_completion_percent = Gauge( 32 | 'migration_completion_percent', 33 | 'Estimate of the completion percentage of the job', 34 | registry=registry, 35 | ) 36 | migration_complete_total = Counter( 37 | 'migration_complete_total', 38 | 'Binary value of whether or not the job is complete', 39 | registry=registry, 40 | ) 41 | migration_failed_total = Counter( 42 | 'migration_failed_total', 43 | 'Binary value of whether or not the job has failed', 44 | registry=registry, 45 | ) 46 | migration_items_completed_total = Counter( 47 | 'migration_items_completed_total', 48 | 'Number of items this migration has completed', 49 | registry=registry, 50 | ) 51 | 52 | def update_metrics(): 53 | push_to_gateway(push_gateway_addr, job=job_id, registry=registry, 54 | grouping_key=labels) 55 | 56 | for i in range(run_seconds): 57 | if i >= fail_seconds: 58 | migration_failed_total.inc() 59 | update_metrics() 60 | sys.exit(1) 61 | 62 | migration_items_completed_total.inc() 63 | migration_completion_percent.set(float(i)/run_seconds) 64 | update_metrics() 65 | logger.debug('%s/%s items completed', i, run_seconds) 66 | time.sleep(1) 67 | 68 | # Write the completion to the database 69 | _write_database_version(db_connection_string, write_version) 70 | migration_complete_total.inc() 71 | migration_completion_percent.set(1.0) 72 | update_metrics() 73 | 74 | 75 | def _parse_mysql_dsn(db_connection_string): 76 | # DO NOT use this regex as authoritative for a MySQL DSN 77 | matcher = re.match( 78 | r'mysql:\/\/([^:]+):([^@]+)@([^:]+):([0-9]+)\/([a-zA-Z0-9]+)', 79 | db_connection_string, 80 | ) 81 | assert matcher is not None 82 | 83 | return { 84 | "host": matcher.group(3), 85 | "user": matcher.group(1), 86 | "password": matcher.group(2), 87 | "database": matcher.group(5), 88 | "port": int(matcher.group(4)), 89 | } 90 | 91 | 92 | def _write_database_version(db_connection_string, version): 93 | connection_params = _parse_mysql_dsn(db_connection_string) 94 | db_conn = pymysql.connect(autocommit=True, **connection_params) 95 | 96 | try: 97 | with db_conn.cursor() as cursor: 98 | sql = "UPDATE alembic_version SET version_num = %s" 99 | cursor.execute(sql, (version)) 100 | except pymysql.err.ProgrammingError: 101 | # Likely the table was missing 102 | with db_conn.cursor() as cursor: 103 | cursor.execute(TABLE_DEF) 104 | create = "INSERT INTO alembic_version (version_num) VALUES (%s)" 105 | cursor.execute(create, (version)) 106 | 107 | 108 | def _process_label_key(label_key): 109 | return label_key[len(PROM_LABEL_PREFIX):].lower() 110 | 111 | 112 | if __name__ == '__main__': 113 | logging.basicConfig(format=FORMAT, level=logging.DEBUG) 114 | 115 | check_vars = [ 116 | 'DBA_OP_PROMETHEUS_PUSH_GATEWAY_ADDR', 117 | 'DBA_OP_JOB_ID', 118 | 'DBA_OP_CONNECTION_STRING', 119 | ] 120 | for env_var_name in check_vars: 121 | if not env_var_name in os.environ: 122 | logger.error('Must provide the environment variable %s', env_var_name) 123 | sys.exit(1) 124 | 125 | logger = logging.getLogger(os.environ['DBA_OP_JOB_ID']) 126 | 127 | parser = argparse.ArgumentParser( 128 | description='Run a fake migration container.', 129 | ) 130 | parser.add_argument( 131 | '--seconds', 132 | default=30, 133 | type=int, 134 | help='Number of seconds for which to run', 135 | ) 136 | parser.add_argument( 137 | '--fail_after', 138 | default=sys.maxsize, 139 | type=int, 140 | help='Number of seconds after which to fail (default: succeed)', 141 | ) 142 | parser.add_argument( 143 | '--write_version', 144 | required=True, 145 | type=str, 146 | help='Database version to set after completion', 147 | ) 148 | args = parser.parse_args() 149 | 150 | # Parse the env to find labels that we need to add 151 | labels = {_process_label_key(k): v for k, v in os.environ.items() 152 | if k.startswith(PROM_LABEL_PREFIX)} 153 | 154 | run( 155 | os.environ['DBA_OP_CONNECTION_STRING'], 156 | os.environ['DBA_OP_PROMETHEUS_PUSH_GATEWAY_ADDR'], 157 | os.environ['DBA_OP_JOB_ID'], 158 | labels, 159 | args.write_version, 160 | args.seconds, 161 | args.fail_after, 162 | ) 163 | -------------------------------------------------------------------------------- /deploy/examples/migrationcontainer/requirements.txt: -------------------------------------------------------------------------------- 1 | prometheus-client 2 | PyMySQL 3 | -------------------------------------------------------------------------------- /deploy/examples/quayio-manageddatabase-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dbaoperator.app-sre.redhat.com/v1alpha1 2 | kind: ManagedDatabase 3 | metadata: 4 | name: quay-production-mysql 5 | spec: 6 | connection: 7 | dsnSecret: quayiocreds-mysql 8 | migrationEngine: alembic 9 | desiredSchemaVersion: v3 10 | hintsEngine: 11 | enabled: true 12 | largetableRowsThreshold: 1000000 -------------------------------------------------------------------------------- /deploy/examples/quayiocreds-secret-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: quayiocreds-mysql 5 | type: Opaque 6 | data: 7 | dsn: bXlzcWw6Ly91c2VyOnBhc3N3b3JkQG15c3FsLWRlYnVnOjMzMDYvcXVheQ== -------------------------------------------------------------------------------- /deploy/examples/v1-appdatabasemigration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dbaoperator.app-sre.redhat.com/v1alpha1 2 | kind: DatabaseMigration 3 | metadata: 4 | name: v1 5 | spec: 6 | previous: null 7 | migrationContainerSpec: 8 | name: v1-phase1-add-new-column 9 | image: quay.io/quaymigrations/test 10 | command: ["python", "migration.py", "--write_version", "v1"] 11 | schemaHints: 12 | - operation: addColumn 13 | table: image 14 | columns: 15 | - name: anintcolumn 16 | - operation: createIndex 17 | table: image 18 | columns: 19 | - name: storage 20 | indexType: index -------------------------------------------------------------------------------- /deploy/examples/v2-appdatabasemigration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dbaoperator.app-sre.redhat.com/v1alpha1 2 | kind: DatabaseMigration 3 | metadata: 4 | name: v2 5 | spec: 6 | previous: v1 7 | migrationContainerSpec: 8 | name: v2-phase2-backfill 9 | image: quay.io/quaymigrations/test 10 | command: ["python", "migration.py", "--write_version", "v2"] 11 | scalable: true 12 | -------------------------------------------------------------------------------- /deploy/examples/v3-appdatabasemigration.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: dbaoperator.app-sre.redhat.com/v1alpha1 2 | kind: DatabaseMigration 3 | metadata: 4 | name: v3 5 | spec: 6 | previous: v2 7 | migrationContainerSpec: 8 | name: v3-phase3-stop-writing 9 | image: quay.io/quaymigrations/test 10 | command: ["python", "migration.py", "--write_version", "v3", "--fail_after", "10"] 11 | scalable: true 12 | -------------------------------------------------------------------------------- /deploy/manageddatabase.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | creationTimestamp: null 7 | name: manageddatabases.dbaoperator.app-sre.redhat.com 8 | spec: 9 | additionalPrinterColumns: 10 | - JSONPath: .status.currentVersion 11 | name: Current Version 12 | type: string 13 | - JSONPath: .spec.desiredSchemaVersion 14 | name: Desired Version 15 | type: string 16 | - JSONPath: .status.errors[0].message 17 | name: Error 18 | type: string 19 | group: dbaoperator.app-sre.redhat.com 20 | names: 21 | kind: ManagedDatabase 22 | listKind: ManagedDatabaseList 23 | plural: manageddatabases 24 | singular: manageddatabase 25 | scope: "" 26 | subresources: 27 | status: {} 28 | validation: 29 | openAPIV3Schema: 30 | description: ManagedDatabase is the Schema for the manageddatabases API 31 | properties: 32 | apiVersion: 33 | description: 'APIVersion defines the versioned schema of this representation 34 | of an object. Servers should convert recognized schemas to the latest 35 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' 36 | type: string 37 | kind: 38 | description: 'Kind is a string value representing the REST resource this 39 | object represents. Servers may infer this from the endpoint the client 40 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' 41 | type: string 42 | metadata: 43 | type: object 44 | spec: 45 | description: ManagedDatabaseSpec defines the desired state of ManagedDatabase 46 | properties: 47 | connection: 48 | description: DatabaseConnectionInfo defines engine specific connection 49 | parameters to establish a connection to the database. 50 | properties: 51 | dsnSecret: 52 | minLength: 1 53 | type: string 54 | type: object 55 | desiredSchemaVersion: 56 | type: string 57 | exportDataMetrics: 58 | description: DataMetrics declares what information the DBA operator 59 | should expose from the database under management 60 | properties: 61 | sqlQuery: 62 | items: 63 | description: SQLQueryMetric describes a SQL query to run against 64 | the database and how to expose it as a metric. It must select 65 | exactly one value in one row, and the value must represent either 66 | a counter (uint) or gauge (float). 67 | properties: 68 | prometheusMetric: 69 | description: PrometheusMetricExporter describes how a given 70 | value should be exported. 71 | properties: 72 | extraLabels: 73 | additionalProperties: 74 | type: string 75 | type: object 76 | helpString: 77 | type: string 78 | name: 79 | type: string 80 | valueType: 81 | enum: 82 | - counter 83 | - gauge 84 | type: string 85 | type: object 86 | query: 87 | pattern: SELECT [^;]+; 88 | type: string 89 | type: object 90 | type: array 91 | tableEstimatedSize: 92 | items: 93 | description: TableReference refers to a DB table by name 94 | properties: 95 | tableName: 96 | type: string 97 | type: object 98 | type: array 99 | tableNextID: 100 | items: 101 | description: TableReference refers to a DB table by name 102 | properties: 103 | tableName: 104 | type: string 105 | type: object 106 | type: array 107 | type: object 108 | hintsEngine: 109 | description: HintsEngineConfig defines the values that can be passed 110 | to the hints engine to help it understand context under which this 111 | manageddatabase runs that can't be queried from the database directly. 112 | properties: 113 | enabled: 114 | type: boolean 115 | largetableRowsThreshold: 116 | format: int64 117 | minimum: 1 118 | type: integer 119 | type: object 120 | migrationContainerConfig: 121 | description: MigrationContainerConfig defines extra configuration that 122 | a migration container may require before it is able to run. Specify 123 | a secret name and how to bind that into the container. 124 | properties: 125 | secret: 126 | type: string 127 | volumeMount: 128 | description: VolumeMount describes a mounting of a Volume within 129 | a container. 130 | properties: 131 | mountPath: 132 | description: Path within the container at which the volume should 133 | be mounted. Must not contain ':'. 134 | type: string 135 | mountPropagation: 136 | description: mountPropagation determines how mounts are propagated 137 | from the host to container and the other way around. When 138 | not set, MountPropagationNone is used. This field is beta 139 | in 1.10. 140 | type: string 141 | name: 142 | description: This must match the Name of a Volume. 143 | type: string 144 | readOnly: 145 | description: Mounted read-only if true, read-write otherwise 146 | (false or unspecified). Defaults to false. 147 | type: boolean 148 | subPath: 149 | description: Path within the volume from which the container's 150 | volume should be mounted. Defaults to "" (volume's root). 151 | type: string 152 | subPathExpr: 153 | description: Expanded path within the volume from which the 154 | container's volume should be mounted. Behaves similarly to 155 | SubPath but environment variable references $(VAR_NAME) are 156 | expanded using the container's environment. Defaults to "" 157 | (volume's root). SubPathExpr and SubPath are mutually exclusive. 158 | This field is alpha in 1.14. 159 | type: string 160 | required: 161 | - mountPath 162 | - name 163 | type: object 164 | type: object 165 | migrationEngine: 166 | type: string 167 | type: object 168 | status: 169 | description: ManagedDatabaseStatus defines the observed state of ManagedDatabase 170 | properties: 171 | currentVersion: 172 | type: string 173 | errors: 174 | items: 175 | description: ManagedDatabaseError contains information about an error 176 | that occurred when reconciling this ManagedDatabase, and whether 177 | the error is considered temporary/transient. 178 | properties: 179 | message: 180 | type: string 181 | temporary: 182 | type: boolean 183 | type: object 184 | type: array 185 | type: object 186 | type: object 187 | version: v1alpha1 188 | versions: 189 | - name: v1alpha1 190 | served: true 191 | storage: true 192 | status: 193 | acceptedNames: 194 | kind: "" 195 | plural: "" 196 | conditions: [] 197 | storedVersions: [] 198 | -------------------------------------------------------------------------------- /deploy/olm/dba-operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: operators.coreos.com/v1alpha1 2 | kind: ClusterServiceVersion 3 | metadata: 4 | annotations: 5 | name: dba-operator-v0.1.0 6 | spec: 7 | description: This operator automates some of the work that a Database Administrator (DBA) would do to keep a specific database instance healthy. 8 | displayName: DBA Operator 9 | keywords: 10 | - database 11 | maintainers: 12 | - email: jmoshenk@redhat.com 13 | name: Jake Moshenko 14 | - email: jimi@redhat.com 15 | name: Jimmy Zelinskie 16 | maturity: alpha 17 | provider: 18 | name: Red Hat App-SRE Team 19 | url: https://github.com/app-sre 20 | version: 0.1.0 21 | 22 | installModes: 23 | - supported: true 24 | type: AllNamespaces 25 | 26 | install: 27 | strategy: deployment 28 | spec: 29 | clusterPermissions: 30 | - serviceAccountName: dba-operator 31 | rules: 32 | # DBA operator reads secrets as part of setup, and creates/deletes secrets to gate DB access 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - secrets 37 | verbs: 38 | - '*' 39 | # DBA operator iterates pods to see if a secret is in use 40 | - apiGroups: 41 | - "" 42 | resources: 43 | - pods 44 | verbs: 45 | - get 46 | - list 47 | # DBA operator manages jobs to perform the migrations 48 | - apiGroups: 49 | - batch 50 | resources: 51 | - jobs 52 | verbs: 53 | - '*' 54 | # DBA operator has its own set of CRDs that define the interactions with it 55 | - apiGroups: 56 | - dbaoperator.app-sre.redhat.com 57 | resources: 58 | - databasemigrations 59 | - manageddatabases 60 | verbs: 61 | - '*' 62 | # The DBA operator CRDs have status subresources that must be managed 63 | - apiGroups: 64 | - dbaoperator.app-sre.redhat.com 65 | resources: 66 | - manageddatabases/status 67 | verbs: 68 | - get 69 | - patch 70 | - update 71 | deployments: 72 | - name: dba-operator 73 | spec: 74 | replicas: 1 75 | selector: 76 | matchLabels: 77 | app: dba-operator 78 | template: 79 | metadata: 80 | labels: 81 | app: dba-operator 82 | spec: 83 | containers: 84 | - name: dba-operator 85 | image: quay.io/quay/dba-operator:v0.1.0 86 | ports: 87 | - containerPort: 8080 88 | 89 | customresourcedefinitions: 90 | owned: 91 | - name: databasemigrations.dbaoperator.app-sre.redhat.com 92 | version: v1alpha1 93 | kind: DatabaseMigration 94 | - name: manageddatabases.dbaoperator.app-sre.redhat.com 95 | version: v1alpha1 96 | kind: ManagedDatabase 97 | 98 | nativeAPIs: 99 | - group: "" 100 | version: v1 101 | kind: Pod 102 | - group: "" 103 | version: v1 104 | kind: Secret 105 | - group: batch 106 | version: v1 107 | kind: Job 108 | -------------------------------------------------------------------------------- /deploy/pushgateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: pushgateway-deployment 5 | labels: 6 | app: pushgateway 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: pushgateway 12 | template: 13 | metadata: 14 | labels: 15 | app: pushgateway 16 | spec: 17 | containers: 18 | - name: pushgateway 19 | image: prom/pushgateway:v0.9.1 20 | ports: 21 | - containerPort: 9091 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: prom-pushgateway 27 | spec: 28 | selector: 29 | app: pushgateway 30 | ports: 31 | - protocol: TCP 32 | port: 9091 33 | targetPort: 9091 34 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/app-sre/dba-operator 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/DATA-DOG/go-sqlmock v1.3.3 7 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect 8 | github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 // indirect 9 | github.com/deckarep/golang-set v1.7.1 10 | github.com/go-logr/logr v0.1.0 11 | github.com/go-sql-driver/mysql v1.4.1 12 | github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365 13 | github.com/jmoiron/sqlx v1.2.0 14 | github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 // indirect 15 | github.com/onsi/ginkgo v1.8.0 16 | github.com/onsi/gomega v1.5.0 17 | github.com/pkg/errors v0.8.1 18 | github.com/prometheus/client_golang v0.9.0 19 | github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e 20 | github.com/sirupsen/logrus v1.4.2 // indirect 21 | github.com/stretchr/testify v1.3.0 22 | github.com/xo/dburl v0.0.0-20191116074001-eeed741d1a6d 23 | gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect 24 | k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b 25 | k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d 26 | k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible 27 | sigs.k8s.io/controller-runtime v0.2.1 28 | sigs.k8s.io/controller-tools v0.2.1 // indirect 29 | ) 30 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= 2 | cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= 3 | github.com/DATA-DOG/go-sqlmock v1.3.3 h1:CWUqKXe0s8A2z6qCgkP4Kru7wC11YoAnoupUKFDnH08= 4 | github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= 5 | github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= 6 | github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= 7 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= 8 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 9 | github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 10 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 11 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= 13 | github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= 14 | github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= 15 | github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= 16 | github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= 17 | github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= 18 | github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= 19 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 20 | github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= 21 | github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= 22 | github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54= 23 | github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= 24 | github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= 25 | github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= 26 | github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= 27 | github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= 28 | github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= 29 | github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= 30 | github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= 31 | github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= 32 | github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7 h1:u4bArs140e9+AfE52mFHOXVFnOSBJBRlzTHrOPLOIhE= 33 | github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= 34 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 35 | github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 36 | github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= 37 | github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 38 | github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= 39 | github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= 40 | github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= 41 | github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= 42 | github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= 43 | github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47 h1:UnszMmmmm5vLwWzDjTFVIkfhvWF1NdrmChl8L2NUDCw= 44 | github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= 45 | github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= 46 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 47 | github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365 h1:ECW73yc9MY7935nNYXUkK7Dz17YuSUI9yqRqYS8aBww= 48 | github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= 49 | github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= 50 | github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= 51 | github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= 52 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 53 | github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= 54 | github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= 55 | github.com/joefitzgerald/rainbow-reporter v0.1.0 h1:AuMG652zjdzI0YCCnXAqATtRBpGXMcAnrajcaTrSeuo= 56 | github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= 57 | github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 58 | github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= 59 | github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= 60 | github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= 61 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 62 | github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= 63 | github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= 64 | github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= 65 | github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= 66 | github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= 67 | github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= 68 | github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= 69 | github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= 70 | github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= 71 | github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= 72 | github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= 73 | github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= 74 | github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= 75 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 76 | github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE= 77 | github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= 78 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= 79 | github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= 80 | github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 81 | github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= 82 | github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= 83 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 84 | github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= 85 | github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 86 | github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 87 | github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= 88 | github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 89 | github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs= 90 | github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= 91 | github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= 92 | github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 93 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 94 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 95 | github.com/prometheus/client_golang v0.9.0 h1:tXuTFVHC03mW0D+Ua1Q2d1EAVqLTuggX50V0VLICCzY= 96 | github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 97 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= 98 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 99 | github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= 100 | github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 101 | github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= 102 | github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 103 | github.com/sclevine/spec v1.2.0 h1:1Jwdf9jSfDl9NVmt8ndHqbTZ7XCCPbh1jI3hkDBHVYA= 104 | github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= 105 | github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= 106 | github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= 107 | github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= 108 | github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= 109 | github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 110 | github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= 111 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 112 | github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 113 | github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= 114 | github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= 115 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 116 | github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= 117 | github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= 118 | go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= 119 | go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= 120 | go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= 121 | go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= 122 | go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= 123 | go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= 124 | golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= 125 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= 126 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 127 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 128 | golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 129 | golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= 130 | golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 131 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 132 | golang.org/x/net v0.0.0-20190628185345-da137c7871d7 h1:rTIdg5QFRR7XCaK4LCjBiPbx8j4DQRpdYMnGn/bJUEU= 133 | golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 134 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be h1:vEDujvNQGv4jgYKudGeI/+DAX4Jffq6hpD55MmoEvKs= 135 | golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= 136 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 137 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 138 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 139 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 140 | golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 141 | golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 142 | golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872 h1:cGjJzUd8RgBw428LXP65YXni0aiGNA4Bl+ls8SmLOm8= 143 | golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 144 | golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= 145 | golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 146 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 147 | golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= 148 | golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= 149 | golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= 150 | golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= 151 | golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 152 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 153 | golang.org/x/tools v0.0.0-20190501045030-23463209683d h1:D7DVZUZEUgsSIDTivnUtVeGfN5AvhDIKtdIZAqx0ieE= 154 | golang.org/x/tools v0.0.0-20190501045030-23463209683d/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= 155 | golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db h1:9hRk1xeL9LTT3yX/941DqeBz87XgHAQuj+TbimYJuiw= 156 | golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= 157 | gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= 158 | gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= 159 | google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= 160 | gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= 161 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 162 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 163 | gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= 164 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 165 | gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= 166 | gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= 167 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 168 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 169 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 170 | gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= 171 | gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 172 | gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22 h1:0efs3hwEZhFKsCoP8l6dDB1AZWMgnEl3yWXWRZTOaEA= 173 | gopkg.in/yaml.v3 v3.0.0-20190709130402-674ba3eaed22/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= 174 | k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b h1:aBGgKJUM9Hk/3AE8WaZIApnTxG35kbuQba2w+SXqezo= 175 | k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= 176 | k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8 h1:q1Qvjzs/iEdXF6A1a8H3AKVFDzJNcJn3nXMs6R6qFtA= 177 | k8s.io/apiextensions-apiserver v0.0.0-20190409022649-727a075fdec8/go.mod h1:IxkesAMoaCRoLrPJdZNZUQp9NfZnzqaVzLhb2VEQzXE= 178 | k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d h1:Jmdtdt1ZnoGfWWIIik61Z7nKYgO3J+swQJtPYsP9wHA= 179 | k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= 180 | k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible h1:U5Bt+dab9K8qaUmXINrkXO135kA11/i5Kg1RUydgaMQ= 181 | k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= 182 | k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= 183 | k8s.io/klog v0.3.0 h1:0VPpR+sizsiivjIfIAQH/rl8tan6jvWkS7lU+0di3lE= 184 | k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= 185 | k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c h1:3KSCztE7gPitlZmWbNwue/2U0YruD65DqX3INopDAQM= 186 | k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= 187 | k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5 h1:VBM/0P5TWxwk+Nw6Z+lAw3DKgO76g90ETOiA6rfLV1Y= 188 | k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= 189 | sigs.k8s.io/controller-runtime v0.2.1 h1:XwUV7gwU/2Uerl9Vb5TpoA3wMQgOxI/LdLq8UhkSSRA= 190 | sigs.k8s.io/controller-runtime v0.2.1/go.mod h1:9dyohw3ZtoXQuV1e766PHUn+cmrRCIcBh6XIMFNMZ+I= 191 | sigs.k8s.io/controller-tools v0.2.1 h1:HoCik83vXOpPi7KSJWdPRmiGntyOzK0v0BTV4U+pl8o= 192 | sigs.k8s.io/controller-tools v0.2.1/go.mod h1:cenyhL7t2e7izk/Zy7ZxDqQ9YEj0niU5VDL1PWMgZ5s= 193 | sigs.k8s.io/testing_frameworks v0.1.1 h1:cP2l8fkA3O9vekpy5Ks8mmA0NW/F7yBdXf8brkWhVrs= 194 | sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= 195 | sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= 196 | sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= 197 | -------------------------------------------------------------------------------- /hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package main 17 | 18 | import ( 19 | "flag" 20 | "os" 21 | 22 | "k8s.io/apimachinery/pkg/runtime" 23 | clientgoscheme "k8s.io/client-go/kubernetes/scheme" 24 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 25 | ctrl "sigs.k8s.io/controller-runtime" 26 | "sigs.k8s.io/controller-runtime/pkg/log/zap" 27 | "sigs.k8s.io/controller-runtime/pkg/metrics" 28 | 29 | // +kubebuilder:scaffold:imports 30 | 31 | _ "github.com/app-sre/dba-operator/pkg/dbadmin/mysqladmin" 32 | 33 | dbaoperatorv1alpha1 "github.com/app-sre/dba-operator/api/v1alpha1" 34 | "github.com/app-sre/dba-operator/controllers" 35 | ) 36 | 37 | var ( 38 | scheme = runtime.NewScheme() 39 | setupLog = ctrl.Log.WithName("setup") 40 | ) 41 | 42 | func init() { 43 | _ = clientgoscheme.AddToScheme(scheme) 44 | 45 | _ = dbaoperatorv1alpha1.AddToScheme(scheme) 46 | // +kubebuilder:scaffold:scheme 47 | } 48 | 49 | func main() { 50 | flag.Parse() 51 | 52 | var metricsAddr string 53 | var enableLeaderElection bool 54 | flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") 55 | flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, 56 | "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") 57 | flag.Parse() 58 | 59 | ctrl.SetLogger(zap.Logger(true)) 60 | 61 | mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ 62 | Scheme: scheme, 63 | MetricsBindAddress: metricsAddr, 64 | LeaderElection: enableLeaderElection, 65 | }) 66 | if err != nil { 67 | setupLog.Error(err, "unable to start manager") 68 | os.Exit(1) 69 | } 70 | 71 | controller := controllers.NewManagedDatabaseController( 72 | mgr.GetClient(), 73 | mgr.GetScheme(), 74 | ctrl.Log.WithName("controllers").WithName("ManagedDatabase"), 75 | metrics.Registry, 76 | ) 77 | if err = controller.SetupWithManager(mgr); err != nil { 78 | setupLog.Error(err, "unable to create controller", "controller", "ManagedDatabase") 79 | os.Exit(1) 80 | } 81 | // +kubebuilder:scaffold:builder 82 | 83 | setupLog.Info("starting manager") 84 | if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { 85 | setupLog.Error(err, "problem running manager") 86 | os.Exit(1) 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /main_test.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "os/exec" 5 | "testing" 6 | ) 7 | 8 | func TestGoFmt(t *testing.T) { 9 | out, err := exec.Command("gofmt", "-l", ".").Output() 10 | if err != nil { 11 | t.Fatal(err) 12 | } 13 | 14 | if len(out) > 0 { 15 | t.Fatalf("gofmt returned results: %s", out) 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /pkg/dbadmin/alembic/version.go: -------------------------------------------------------------------------------- 1 | package alembic 2 | 3 | import ( 4 | "github.com/app-sre/dba-operator/pkg/dbadmin" 5 | ) 6 | 7 | // MigrationEngine is a type which implements the MigrationEngine 8 | // interface for Alembic migrations 9 | type MigrationEngine struct{} 10 | 11 | // CreateMigrationEngine instantiates an MigrationEngine 12 | func CreateMigrationEngine() dbadmin.MigrationEngine { 13 | return &MigrationEngine{} 14 | } 15 | 16 | // GetVersionQuery implements MigrationEngine 17 | func (amm *MigrationEngine) GetVersionQuery() string { 18 | return "SELECT version_num FROM alembic_version LIMIT 1" 19 | } 20 | -------------------------------------------------------------------------------- /pkg/dbadmin/connection.go: -------------------------------------------------------------------------------- 1 | package dbadmin 2 | 3 | import ( 4 | "database/sql" 5 | "errors" 6 | "fmt" 7 | 8 | "github.com/xo/dburl" 9 | ) 10 | 11 | type engineInit func(*sql.DB, string, MigrationEngine) (DbAdmin, error) 12 | 13 | var engines = make(map[string]engineInit) 14 | 15 | // Register is used to notify that there is a new DbAdmin compatible engine. 16 | func Register(driverName string, initFunc engineInit) { 17 | engines[driverName] = initFunc 18 | } 19 | 20 | // Open will use the specified DSN to create a new engine-specifc DbAdmin 21 | // database connection. 22 | func Open(dsn string, migrationEngine MigrationEngine) (DbAdmin, error) { 23 | parsed, err := dburl.Parse(dsn) 24 | if err != nil { 25 | return nil, fmt.Errorf("Unable to parse connection dsn: %w", err) 26 | } 27 | 28 | if parsed.User.Username() == "" { 29 | return nil, errors.New("Must provide username in the connection DSN") 30 | } 31 | _, passwordSet := parsed.User.Password() 32 | if !passwordSet { 33 | return nil, errors.New("Must provide password in the connection DSN") 34 | } 35 | 36 | if len(parsed.Path) <= 1 { 37 | return nil, errors.New("Must provide specific database name in the connection DSN") 38 | } 39 | dbName := parsed.Path[1:] 40 | 41 | handle, err := dburl.Open(dsn) 42 | if err != nil { 43 | return nil, fmt.Errorf("Unable to instantiate engine: %w", err) 44 | } 45 | 46 | initFunc, ok := engines[parsed.Driver] 47 | if !ok { 48 | return nil, fmt.Errorf("Driver (%s) is not registered, please import conforming driver", parsed.Driver) 49 | } 50 | 51 | return initFunc(handle, dbName, migrationEngine) 52 | } 53 | -------------------------------------------------------------------------------- /pkg/dbadmin/dbadmin.go: -------------------------------------------------------------------------------- 1 | package dbadmin 2 | 3 | import "io" 4 | 5 | // TableName is an alias for string to make function signatures more expressive 6 | type TableName string 7 | 8 | //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . DbAdmin 9 | 10 | // DbAdmin contains the methods that are used to introspect runtime state 11 | // and control access to a database 12 | type DbAdmin interface { 13 | io.Closer 14 | 15 | // WriteCredentials will add a username to the database with the given password 16 | WriteCredentials(username, password string) error 17 | 18 | // ListUsernames will return a list of all usernames in the database with 19 | // the given prefix. 20 | ListUsernames(usernamePrefix string) ([]string, error) 21 | 22 | // VerifyUnusedAndDeleteCredentials will ensure that there are no current 23 | // connections using the specified username, and then delete the user. 24 | // If there is an active connection using the credentials an error will be 25 | // returned. 26 | VerifyUnusedAndDeleteCredentials(username string) error 27 | 28 | // GetSchemaVersion will return the current version of the database, usually 29 | // as decoded by a MigrationEngine instance. 30 | GetSchemaVersion() (string, error) 31 | 32 | // GetTableSizeEstimates will return the estimated number of rows in the 33 | // specified tables. 34 | // 35 | // If a table does not exist, the map will contain the value zero for that 36 | // table. 37 | GetTableSizeEstimates(tableNames []TableName) (map[TableName]uint64, error) 38 | 39 | // GetNextIDs will return the next ID that will be assigned to a new row in 40 | // the specified tables. 41 | GetNextIDs(tableNames []TableName) (map[TableName]uint64, error) 42 | 43 | // SelectFloat will select a single value out of a single row and return 44 | // only that value as a float. 45 | SelectFloat(selectQuery string) (float64, error) 46 | 47 | // IsBlockingIndexCreation will determine whether adding an index to the 48 | // specified table will cause writes to 49 | IsBlockingIndexCreation(tableName TableName, indexType IndexType, columns ...string) (bool, error) 50 | 51 | // AddConstraintWillFail returns true if adding the constraint will fail due 52 | // to the data itself. 53 | ConstraintWillFail(tableName TableName, constraintType ConstraintType, columns ...string) (bool, error) 54 | } 55 | 56 | // IndexType defines the varioues types of indexes we will evaluate for being 57 | // applied to the database 58 | type IndexType int 59 | 60 | const ( 61 | // Index is a regular single column or compound index. 62 | Index IndexType = iota 63 | 64 | // PrimaryKeyIndex is an index that is unique and implicitly applied on 65 | // primary key fields. 66 | PrimaryKeyIndex 67 | 68 | // UniqueIndex indexes enforce that rows contain unique values for the 69 | // indexed column(s). Null values are considered as non-matching and can be 70 | // present and repeated. 71 | UniqueIndex 72 | 73 | // FulltextIndex indexes aid in the process of full-text searching. 74 | FulltextIndex 75 | 76 | // SpatialIndex indexes represent multi-dimensional location based data. 77 | SpatialIndex 78 | ) 79 | 80 | // ConstraintType defines the various types of constraints we can evaluate for 81 | // being applied to a database. 82 | type ConstraintType int 83 | 84 | const ( 85 | // NotNullConstraint guarantees that a column will not contain any `NULL` 86 | // values 87 | NotNullConstraint ConstraintType = iota 88 | 89 | // UniqueConstraint requires that all rows in a column have unique values 90 | // over the columns specified. 91 | UniqueConstraint 92 | ) 93 | 94 | //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 . MigrationEngine 95 | 96 | // MigrationEngine is an interface for deciphering the bookkeeping information 97 | // stored by a particular migration framework from within a database 98 | type MigrationEngine interface { 99 | // GetVersionQuery will return the SQL query that should be run against a 100 | // database to return a single string, which represents the current version 101 | // of the database. 102 | GetVersionQuery() string 103 | } 104 | -------------------------------------------------------------------------------- /pkg/dbadmin/dbadminfakes/fake_migration_engine.go: -------------------------------------------------------------------------------- 1 | // Code generated by counterfeiter. DO NOT EDIT. 2 | package dbadminfakes 3 | 4 | import ( 5 | "sync" 6 | 7 | "github.com/app-sre/dba-operator/pkg/dbadmin" 8 | ) 9 | 10 | type FakeMigrationEngine struct { 11 | GetVersionQueryStub func() string 12 | getVersionQueryMutex sync.RWMutex 13 | getVersionQueryArgsForCall []struct { 14 | } 15 | getVersionQueryReturns struct { 16 | result1 string 17 | } 18 | getVersionQueryReturnsOnCall map[int]struct { 19 | result1 string 20 | } 21 | invocations map[string][][]interface{} 22 | invocationsMutex sync.RWMutex 23 | } 24 | 25 | func (fake *FakeMigrationEngine) GetVersionQuery() string { 26 | fake.getVersionQueryMutex.Lock() 27 | ret, specificReturn := fake.getVersionQueryReturnsOnCall[len(fake.getVersionQueryArgsForCall)] 28 | fake.getVersionQueryArgsForCall = append(fake.getVersionQueryArgsForCall, struct { 29 | }{}) 30 | fake.recordInvocation("GetVersionQuery", []interface{}{}) 31 | fake.getVersionQueryMutex.Unlock() 32 | if fake.GetVersionQueryStub != nil { 33 | return fake.GetVersionQueryStub() 34 | } 35 | if specificReturn { 36 | return ret.result1 37 | } 38 | fakeReturns := fake.getVersionQueryReturns 39 | return fakeReturns.result1 40 | } 41 | 42 | func (fake *FakeMigrationEngine) GetVersionQueryCallCount() int { 43 | fake.getVersionQueryMutex.RLock() 44 | defer fake.getVersionQueryMutex.RUnlock() 45 | return len(fake.getVersionQueryArgsForCall) 46 | } 47 | 48 | func (fake *FakeMigrationEngine) GetVersionQueryCalls(stub func() string) { 49 | fake.getVersionQueryMutex.Lock() 50 | defer fake.getVersionQueryMutex.Unlock() 51 | fake.GetVersionQueryStub = stub 52 | } 53 | 54 | func (fake *FakeMigrationEngine) GetVersionQueryReturns(result1 string) { 55 | fake.getVersionQueryMutex.Lock() 56 | defer fake.getVersionQueryMutex.Unlock() 57 | fake.GetVersionQueryStub = nil 58 | fake.getVersionQueryReturns = struct { 59 | result1 string 60 | }{result1} 61 | } 62 | 63 | func (fake *FakeMigrationEngine) GetVersionQueryReturnsOnCall(i int, result1 string) { 64 | fake.getVersionQueryMutex.Lock() 65 | defer fake.getVersionQueryMutex.Unlock() 66 | fake.GetVersionQueryStub = nil 67 | if fake.getVersionQueryReturnsOnCall == nil { 68 | fake.getVersionQueryReturnsOnCall = make(map[int]struct { 69 | result1 string 70 | }) 71 | } 72 | fake.getVersionQueryReturnsOnCall[i] = struct { 73 | result1 string 74 | }{result1} 75 | } 76 | 77 | func (fake *FakeMigrationEngine) Invocations() map[string][][]interface{} { 78 | fake.invocationsMutex.RLock() 79 | defer fake.invocationsMutex.RUnlock() 80 | fake.getVersionQueryMutex.RLock() 81 | defer fake.getVersionQueryMutex.RUnlock() 82 | copiedInvocations := map[string][][]interface{}{} 83 | for key, value := range fake.invocations { 84 | copiedInvocations[key] = value 85 | } 86 | return copiedInvocations 87 | } 88 | 89 | func (fake *FakeMigrationEngine) recordInvocation(key string, args []interface{}) { 90 | fake.invocationsMutex.Lock() 91 | defer fake.invocationsMutex.Unlock() 92 | if fake.invocations == nil { 93 | fake.invocations = map[string][][]interface{}{} 94 | } 95 | if fake.invocations[key] == nil { 96 | fake.invocations[key] = [][]interface{}{} 97 | } 98 | fake.invocations[key] = append(fake.invocations[key], args) 99 | } 100 | 101 | var _ dbadmin.MigrationEngine = new(FakeMigrationEngine) 102 | -------------------------------------------------------------------------------- /pkg/dbadmin/mysqladmin/admin.go: -------------------------------------------------------------------------------- 1 | package mysqladmin 2 | 3 | import ( 4 | "database/sql" 5 | "encoding/hex" 6 | "fmt" 7 | "math/rand" 8 | 9 | mapset "github.com/deckarep/golang-set" 10 | "github.com/go-sql-driver/mysql" 11 | "github.com/jmoiron/sqlx" 12 | 13 | "github.com/app-sre/dba-operator/pkg/dbadmin" 14 | "github.com/app-sre/dba-operator/pkg/xerrors" 15 | ) 16 | 17 | const driverName = "mysql" 18 | 19 | func init() { 20 | dbadmin.Register(driverName, CreateMySQLAdmin) 21 | } 22 | 23 | // MySQLDbAdmin is a type which implements DbAdmin for MySQL databases 24 | type MySQLDbAdmin struct { 25 | handle *sqlx.DB 26 | database string 27 | engine dbadmin.MigrationEngine 28 | } 29 | 30 | type sqlValue struct { 31 | value *string 32 | quoted bool 33 | } 34 | 35 | func quoted(needsToBeQuoted string) sqlValue { 36 | return sqlValue{value: &needsToBeQuoted, quoted: true} 37 | } 38 | 39 | func noquote(cantBeQuoted string) sqlValue { 40 | return sqlValue{value: &cantBeQuoted, quoted: false} 41 | } 42 | 43 | // CreateMySQLAdmin will instantiate a MySQLDbAdmin object with the specified 44 | // connection and MigrationEngine. 45 | func CreateMySQLAdmin(db *sql.DB, dbName string, engine dbadmin.MigrationEngine) (dbadmin.DbAdmin, error) { 46 | return &MySQLDbAdmin{sqlx.NewDb(db, driverName), dbName, engine}, nil 47 | } 48 | 49 | func randIdentifier(randomBytes int) string { 50 | identBytes := make([]byte, randomBytes) 51 | rand.Read(identBytes) // nolint:gosec 52 | 53 | // Here we prepend "var" to handle an edge case where some hex (e.g. 1e2) 54 | // gets interpreted as scientific notation by MySQL 55 | return "var" + hex.EncodeToString(identBytes) 56 | } 57 | 58 | // This method attempts to prevent sql injection on MySQL DBMS control commands 59 | // such as CREATE USER and GRANT which don't support variables in prepared statements. 60 | // The design of this operator shouldn't require preventing injection as these values 61 | // are developer supplied and not end-user supplied, but it may help prevent errors 62 | // and should be considered a best practice. 63 | func (mdba *MySQLDbAdmin) indirectSubstitute(format string, args ...sqlValue) xerrors.EnhancedError { 64 | tx, err := mdba.handle.Begin() 65 | if err != nil { 66 | return wrap(err) 67 | } 68 | defer tx.Rollback() // nolint: errcheck 69 | 70 | finalArgs := make([]interface{}, 0, len(args)) 71 | for _, arg := range args { 72 | newIdent := randIdentifier(16) 73 | 74 | if arg.quoted { 75 | finalArgs = append(finalArgs, fmt.Sprintf(`", QUOTE(@%s), "`, newIdent)) 76 | } else { 77 | finalArgs = append(finalArgs, fmt.Sprintf(`", @%s, "`, newIdent)) 78 | } 79 | 80 | _, err = tx.Exec(fmt.Sprintf("SET @%s := ?", newIdent), arg.value) 81 | if err != nil { 82 | return wrap(err) 83 | } 84 | } 85 | 86 | rawSQLStmt := fmt.Sprintf(format, finalArgs...) 87 | stmtStringName := randIdentifier(16) 88 | createStmt := fmt.Sprintf(`SET @%s := CONCAT("%s")`, stmtStringName, rawSQLStmt) 89 | _, err = tx.Exec(createStmt) 90 | if err != nil { 91 | return wrap(err) 92 | } 93 | 94 | stmtName := randIdentifier(16) 95 | 96 | // nolint:gosec 97 | _, err = tx.Exec(fmt.Sprintf("PREPARE %s FROM @%s", stmtName, stmtStringName)) 98 | if err != nil { 99 | return wrap(err) 100 | } 101 | 102 | _, err = tx.Exec(fmt.Sprintf("EXECUTE %s", stmtName)) 103 | if err != nil { 104 | return wrap(err) 105 | } 106 | 107 | if err := tx.Commit(); err != nil { 108 | return wrap(err) 109 | } 110 | 111 | return nil 112 | } 113 | 114 | // WriteCredentials implements DbADmin 115 | func (mdba *MySQLDbAdmin) WriteCredentials(username, password string) error { 116 | 117 | err := mdba.indirectSubstitute( 118 | "CREATE USER %s@'%%' IDENTIFIED BY %s", 119 | quoted(username), 120 | quoted(password), 121 | ) 122 | if err != nil { 123 | return fmt.Errorf("Unable to create new user %s: %w", username, err) 124 | } 125 | 126 | err = mdba.indirectSubstitute( 127 | "GRANT SELECT, INSERT, UPDATE, DELETE ON %s.* TO %s", 128 | noquote(mdba.database), 129 | quoted(username), 130 | ) 131 | if err != nil { 132 | return fmt.Errorf("Unable to grant permission to new user %s: %w", username, err) 133 | } 134 | 135 | return nil 136 | } 137 | 138 | // ListUsernames implements DbADmin 139 | func (mdba *MySQLDbAdmin) ListUsernames(usernamePrefix string) ([]string, error) { 140 | var usernames []string 141 | if err := mdba.handle.Select( 142 | &usernames, 143 | "SELECT user FROM mysql.user WHERE user LIKE ?", 144 | usernamePrefix+"%", 145 | ); err != nil { 146 | return []string{}, fmt.Errorf("Unable to list existing usernames: %w", wrap(err)) 147 | } 148 | 149 | return usernames, nil 150 | } 151 | 152 | // VerifyUnusedAndDeleteCredentials implements DbAdmin 153 | func (mdba *MySQLDbAdmin) VerifyUnusedAndDeleteCredentials(username string) error { 154 | var sessionCount int 155 | if err := mdba.handle.Get( 156 | &sessionCount, 157 | "SELECT COUNT(*) FROM information_schema.processlist WHERE user = ?", 158 | username, 159 | ); err != nil { 160 | return fmt.Errorf("Unable to query or parse session count for user %s: %w", username, wrap(err)) 161 | } 162 | 163 | if sessionCount > 0 { 164 | return xerrors.NewTempErrorf("Unable to remove user %s, %d active sessions remaining", username, sessionCount) 165 | } 166 | 167 | if err := mdba.indirectSubstitute( 168 | "DROP USER %s", 169 | quoted(username), 170 | ); err != nil { 171 | return fmt.Errorf("Unable to remove user %s from the database: %w", username, err) 172 | } 173 | 174 | return nil 175 | } 176 | 177 | // GetSchemaVersion implements DbAdmin 178 | func (mdba *MySQLDbAdmin) GetSchemaVersion() (string, error) { 179 | var version string 180 | if err := mdba.handle.Get(&version, mdba.engine.GetVersionQuery()); err != nil { 181 | mysqlErr, ok := err.(*mysql.MySQLError) 182 | if ok && mysqlErr.Number == 1146 { 183 | // No migration engine metadata, likely an empty database 184 | return "", nil 185 | } 186 | return "", wrap(err) 187 | } 188 | 189 | return version, nil 190 | } 191 | 192 | // GetTableSizeEstimates implements DbAdmin 193 | func (mdba *MySQLDbAdmin) GetTableSizeEstimates(tableNames []dbadmin.TableName) (map[dbadmin.TableName]uint64, error) { 194 | estimates := make(map[dbadmin.TableName]uint64) 195 | 196 | // Pre-allocate the map with zeroes for each table 197 | for _, tableName := range tableNames { 198 | estimates[tableName] = 0 199 | } 200 | 201 | if len(tableNames) == 0 { 202 | return estimates, nil 203 | } 204 | 205 | query, args, err := sqlx.In("SELECT TABLE_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN (?);", tableNames) 206 | if err != nil { 207 | return estimates, fmt.Errorf("Unable to prepare query to load table size estimates: %w", err) 208 | } 209 | query = mdba.handle.Rebind(query) 210 | 211 | var results []struct { 212 | TableName string `db:"TABLE_NAME"` 213 | TableRows uint64 `db:"TABLE_ROWS"` 214 | } 215 | if err := mdba.handle.Select(&results, query, args...); err != nil { 216 | return estimates, wrap(err) 217 | } 218 | 219 | for _, result := range results { 220 | estimates[dbadmin.TableName(result.TableName)] = result.TableRows 221 | } 222 | 223 | return estimates, nil 224 | } 225 | 226 | // GetNextIDs implements DbAdmin 227 | func (mdba *MySQLDbAdmin) GetNextIDs(tableNames []dbadmin.TableName) (map[dbadmin.TableName]uint64, error) { 228 | nextIDs := make(map[dbadmin.TableName]uint64) 229 | 230 | if len(tableNames) == 0 { 231 | return nextIDs, nil 232 | } 233 | 234 | query, args, err := sqlx.In("SELECT TABLE_NAME, AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN (?);", tableNames) 235 | if err != nil { 236 | return nextIDs, fmt.Errorf("Unable to prepare query to load table size nextIDs: %w", err) 237 | } 238 | query = mdba.handle.Rebind(query) 239 | 240 | var results []struct { 241 | TableName string `db:"TABLE_NAME"` 242 | AutoIncrement uint64 `db:"AUTO_INCREMENT"` 243 | } 244 | if err := mdba.handle.Select(&results, query, args...); err != nil { 245 | return nextIDs, wrap(err) 246 | } 247 | 248 | if len(tableNames) != len(results) { 249 | return nextIDs, fmt.Errorf("Unable to load table nextIDs for all tables, expected %d, got %d", len(tableNames), len(results)) 250 | } 251 | 252 | for _, result := range results { 253 | nextIDs[dbadmin.TableName(result.TableName)] = result.AutoIncrement 254 | } 255 | 256 | return nextIDs, nil 257 | } 258 | 259 | // SelectFloat implements DbAdmin 260 | func (mdba *MySQLDbAdmin) SelectFloat(selectQuery string) (result float64, _ error) { 261 | if err := mdba.handle.Get(&result, selectQuery); err != nil { 262 | return result, wrap(err) 263 | } 264 | 265 | return result, nil 266 | } 267 | 268 | // IsBlockingIndexCreation implements DbAdmin 269 | func (mdba *MySQLDbAdmin) IsBlockingIndexCreation(tableName dbadmin.TableName, indexType dbadmin.IndexType, columns ...string) (bool, error) { 270 | return false, fmt.Errorf("Not implemented") 271 | } 272 | 273 | // ConstraintWillFail implements DbAdmin 274 | func (mdba *MySQLDbAdmin) ConstraintWillFail(tableName dbadmin.TableName, constraintType dbadmin.ConstraintType, columns ...string) (bool, error) { 275 | switch constraintType { 276 | case dbadmin.NotNullConstraint: 277 | // Find out if the table already has a not null constraint on all of the named columns 278 | query, args, err := sqlx.In("SHOW COLUMNS FROM `?` WHERE Field IN (?);", tableName, columns) 279 | if err != nil { 280 | return true, fmt.Errorf("Unable to prepare query to read table definition: %w", err) 281 | } 282 | query = mdba.handle.Rebind(query) 283 | 284 | var fieldDefinitions []struct { 285 | Field string 286 | Type string 287 | Null string 288 | } 289 | if err := mdba.handle.Select(&fieldDefinitions, query, args...); err != nil { 290 | return true, wrap(err) 291 | } 292 | 293 | fieldNullableMap := make(map[string]string, len(fieldDefinitions)) 294 | for _, fieldDefinition := range fieldDefinitions { 295 | fieldNullableMap[fieldDefinition.Field] = fieldDefinition.Null 296 | } 297 | 298 | for _, columnToCheck := range columns { 299 | definition, ok := fieldNullableMap[columnToCheck] 300 | if !ok { 301 | // Will definitely fail because the column doesn't exist 302 | return true, nil 303 | } 304 | 305 | if definition == "YES" { 306 | // The table currently allows for nulls in the column, and we're 307 | // trying to make it not nullable. Check if there are any nulls 308 | // in that column. 309 | query := "SELECT * FROM `?` WHERE `?` NOT NULL LIMIT 1;" 310 | _, err := mdba.handle.QueryRowx(query, tableName, columnToCheck).SliceScan() 311 | if err != sql.ErrNoRows { 312 | if err != nil { 313 | return true, wrap(err) 314 | } 315 | 316 | // A value was returned, meaning there are null rows in this column 317 | return true, nil 318 | } 319 | } 320 | } 321 | return false, nil 322 | case dbadmin.UniqueConstraint: 323 | listUniqueIndexQuery := "SHOW INDEX FROM `?` WHERE Non_unique = 0;" 324 | 325 | var uniqueIndexRows []struct { 326 | KeyName string `db:"Key_name"` 327 | ColumnName string `db:"Column_name"` 328 | } 329 | if err := mdba.handle.Select(&uniqueIndexRows, listUniqueIndexQuery, tableName); err != nil { 330 | return true, wrap(err) 331 | } 332 | 333 | columnsByIndex := make(map[string]mapset.Set) 334 | for _, uniqueIndexRow := range uniqueIndexRows { 335 | indexColumnSet, ok := columnsByIndex[uniqueIndexRow.KeyName] 336 | if !ok { 337 | indexColumnSet = mapset.NewSet() 338 | columnsByIndex[uniqueIndexRow.KeyName] = indexColumnSet 339 | } 340 | 341 | indexColumnSet.Add(uniqueIndexRow.ColumnName) 342 | } 343 | 344 | newConstraintColumnSet := mapset.NewSet() 345 | for _, columnName := range columns { 346 | newConstraintColumnSet.Add(columnName) 347 | } 348 | for _, existingIndexColumnSet := range columnsByIndex { 349 | if existingIndexColumnSet.Equal(newConstraintColumnSet) { 350 | // We can add this constraint because there is already a unique 351 | // index over the same columns 352 | return false, nil 353 | } 354 | } 355 | 356 | // There is no existing index, we must check if there is some value in 357 | // the database which violates the constraint 358 | query, args, err := sqlx.In("SELECT COUNT(*) FROM `?` GROUP BY ? HAVING COUNT(*) > 1 LIMIT 1;", tableName, columns) 359 | if err != nil { 360 | return true, fmt.Errorf("Unable to prepare query to search for duplicate data: %w", err) 361 | } 362 | query = mdba.handle.Rebind(query) 363 | 364 | _, err = mdba.handle.QueryRowx(query, args...).SliceScan() 365 | if err != sql.ErrNoRows { 366 | if err != nil { 367 | return true, wrap(err) 368 | } 369 | 370 | // A value was returned, meaning there are duplicate rows 371 | return true, nil 372 | } 373 | 374 | return false, nil 375 | default: 376 | return false, fmt.Errorf("Unknown constraint type: %d", constraintType) 377 | } 378 | } 379 | 380 | // Close implements DbAdmin 381 | func (mdba *MySQLDbAdmin) Close() error { 382 | return mdba.handle.Close() 383 | } 384 | -------------------------------------------------------------------------------- /pkg/dbadmin/mysqladmin/admin_test.go: -------------------------------------------------------------------------------- 1 | package mysqladmin 2 | 3 | import ( 4 | "errors" 5 | "testing" 6 | 7 | "github.com/DATA-DOG/go-sqlmock" 8 | "github.com/go-sql-driver/mysql" 9 | "github.com/jmoiron/sqlx" 10 | "github.com/stretchr/testify/assert" 11 | 12 | "github.com/app-sre/dba-operator/pkg/dbadmin" 13 | "github.com/app-sre/dba-operator/pkg/dbadmin/dbadminfakes" 14 | "github.com/app-sre/dba-operator/pkg/xerrors" 15 | ) 16 | 17 | const ( 18 | fakeDBName = "fakedb" 19 | fakeVersion = "123" 20 | fakeUsername = "fakeUser" 21 | fakePassword = "fakePassword" 22 | versionQuery = "SELECT version_num FROM version_table LIMIT 1" 23 | ) 24 | 25 | var emptyResult = sqlmock.NewResult(0, 0) 26 | 27 | func createMockDB(t *testing.T) (dbadmin.DbAdmin, sqlmock.Sqlmock) { 28 | mockDB, mock, err := sqlmock.New() 29 | assert.NoError(t, err, "Sqlmock should not raise an error") 30 | handle := sqlx.NewDb(mockDB, "mysql") 31 | 32 | migrationEngine := dbadminfakes.FakeMigrationEngine{} 33 | migrationEngine.GetVersionQueryReturns(versionQuery) 34 | return &MySQLDbAdmin{handle, fakeDBName, &migrationEngine}, mock 35 | } 36 | 37 | func TestGetSchemaVersion(t *testing.T) { 38 | assert := assert.New(t) 39 | db, mock := createMockDB(t) 40 | mock.ExpectQuery(versionQuery).WillReturnRows(mock.NewRows([]string{"version_num"}).AddRow(fakeVersion)) 41 | 42 | currentVersion, err := db.GetSchemaVersion() 43 | assert.NoError(err) 44 | assert.Equal(fakeVersion, currentVersion) 45 | } 46 | 47 | func TestGetSchemaVersionEmptyDB(t *testing.T) { 48 | assert := assert.New(t) 49 | db, mock := createMockDB(t) 50 | mock.ExpectQuery(versionQuery).WillReturnError(&mysql.MySQLError{Number: 1146, Message: "Unknown table"}) 51 | 52 | currentVersion, err := db.GetSchemaVersion() 53 | assert.NoError(err) 54 | assert.Equal("", currentVersion) 55 | } 56 | 57 | var retryableErrorTests = []struct { 58 | err error 59 | retryable bool 60 | }{ 61 | // Positive cases 62 | {&mysql.MySQLError{Number: 1020, Message: "0xdeadbeef"}, true}, 63 | {&mysql.MySQLError{Number: 1020, Message: "0xdeadbeef"}, true}, 64 | {&mysql.MySQLError{Number: 1036, Message: "0xdeadbeef"}, true}, 65 | {&mysql.MySQLError{Number: 1040, Message: "0xdeadbeef"}, true}, 66 | {&mysql.MySQLError{Number: 1043, Message: "0xdeadbeef"}, true}, 67 | {&mysql.MySQLError{Number: 1053, Message: "0xdeadbeef"}, true}, 68 | {&mysql.MySQLError{Number: 1105, Message: "0xdeadbeef"}, true}, 69 | {&mysql.MySQLError{Number: 1129, Message: "0xdeadbeef"}, true}, 70 | {&mysql.MySQLError{Number: 1180, Message: "0xdeadbeef"}, true}, 71 | {&mysql.MySQLError{Number: 1181, Message: "0xdeadbeef"}, true}, 72 | {&mysql.MySQLError{Number: 1182, Message: "0xdeadbeef"}, true}, 73 | {&mysql.MySQLError{Number: 1188, Message: "0xdeadbeef"}, true}, 74 | {&mysql.MySQLError{Number: 1202, Message: "0xdeadbeef"}, true}, 75 | {&mysql.MySQLError{Number: 1203, Message: "0xdeadbeef"}, true}, 76 | {&mysql.MySQLError{Number: 1205, Message: "0xdeadbeef"}, true}, 77 | {&mysql.MySQLError{Number: 1206, Message: "0xdeadbeef"}, true}, 78 | {&mysql.MySQLError{Number: 1218, Message: "0xdeadbeef"}, true}, 79 | {&mysql.MySQLError{Number: 1220, Message: "0xdeadbeef"}, true}, 80 | {&mysql.MySQLError{Number: 1290, Message: "0xdeadbeef"}, true}, 81 | {&mysql.MySQLError{Number: 1297, Message: "0xdeadbeef"}, true}, 82 | {&mysql.MySQLError{Number: 1317, Message: "0xdeadbeef"}, true}, 83 | {&mysql.MySQLError{Number: 1637, Message: "0xdeadbeef"}, true}, 84 | {&mysql.MySQLError{Number: 1836, Message: "0xdeadbeef"}, true}, 85 | {&mysql.MySQLError{Number: 1874, Message: "0xdeadbeef"}, true}, 86 | {&mysql.MySQLError{Number: 3019, Message: "0xdeadbeef"}, true}, 87 | {&mysql.MySQLError{Number: 3032, Message: "0xdeadbeef"}, true}, 88 | {&mysql.MySQLError{Number: 3168, Message: "0xdeadbeef"}, true}, 89 | {&mysql.MySQLError{Number: 3169, Message: "0xdeadbeef"}, true}, 90 | {&mysql.MySQLError{Number: 3186, Message: "0xdeadbeef"}, true}, 91 | {&mysql.MySQLError{Number: 3572, Message: "0xdeadbeef"}, true}, 92 | {mysql.ErrInvalidConn, true}, 93 | {mysql.ErrMalformPkt, true}, 94 | {mysql.ErrPktSync, true}, 95 | {mysql.ErrPktSyncMul, true}, 96 | {mysql.ErrBusyBuffer, true}, 97 | 98 | // Negative cases 99 | {mysql.ErrUnknownPlugin, false}, 100 | {&mysql.MySQLError{Number: 1000, Message: "0xdeadbeef"}, false}, 101 | {&mysql.MySQLError{Number: 0, Message: "0xdeadbeef"}, false}, 102 | } 103 | 104 | func TestRetryableError(t *testing.T) { 105 | assert := assert.New(t) 106 | db, mock := createMockDB(t) 107 | 108 | for _, errorTest := range retryableErrorTests { 109 | mock.ExpectQuery(versionQuery).WillReturnError(errorTest.err) 110 | _, err := db.GetSchemaVersion() 111 | assert.Error(err) 112 | 113 | var maybeTemporary xerrors.EnhancedError 114 | errors.As(err, &maybeTemporary) 115 | 116 | isTemporary := false 117 | if maybeTemporary != nil { 118 | isTemporary = maybeTemporary.Temporary() 119 | } 120 | assert.Equal(errorTest.retryable, isTemporary) 121 | } 122 | } 123 | 124 | func TestWriteCredentials(t *testing.T) { 125 | assert := assert.New(t) 126 | db, mock := createMockDB(t) 127 | 128 | mock.ExpectBegin() 129 | mock.ExpectExec(`^SET @(\S+) := ?`).WithArgs(fakeUsername).WillReturnResult(emptyResult) 130 | mock.ExpectExec(`^SET @(\S+) := ?`).WithArgs(fakePassword).WillReturnResult(emptyResult) 131 | mock.ExpectExec(`^SET @(\S+) := CONCAT\("CREATE USER ", QUOTE\([^\)]+\), "@'%' IDENTIFIED BY ", QUOTE\([^\)]+\), ""\)`).WillReturnResult(emptyResult) 132 | mock.ExpectExec(`^PREPARE (\S+) FROM (\S+)`).WillReturnResult(emptyResult) 133 | mock.ExpectExec(`EXECUTE (\S+)`).WillReturnResult(emptyResult) 134 | mock.ExpectCommit() 135 | 136 | mock.ExpectBegin() 137 | mock.ExpectExec(`^SET @(\S+) := ?`).WithArgs(fakeDBName).WillReturnResult(emptyResult) 138 | mock.ExpectExec(`^SET @(\S+) := ?`).WithArgs(fakeUsername).WillReturnResult(emptyResult) 139 | mock.ExpectExec(`^SET @(\S+) := CONCAT\("GRANT SELECT, INSERT, UPDATE, DELETE ON ", @(\S+), "\.\* TO ", QUOTE\(@(\S+)\), ""\)`).WillReturnResult(emptyResult) 140 | mock.ExpectExec(`^PREPARE (\S+) FROM (\S+)`).WillReturnResult(emptyResult) 141 | mock.ExpectExec(`EXECUTE (\S+)`).WillReturnResult(emptyResult) 142 | mock.ExpectCommit() 143 | 144 | assert.NoError(db.WriteCredentials(fakeUsername, fakePassword)) 145 | } 146 | 147 | func TestWriteCredentialsConflict(t *testing.T) { 148 | assert := assert.New(t) 149 | db, mock := createMockDB(t) 150 | 151 | mock.ExpectBegin() 152 | mock.ExpectExec(`^SET @(\S+) := ?`).WithArgs(fakeUsername).WillReturnResult(emptyResult) 153 | mock.ExpectExec(`^SET @(\S+) := ?`).WithArgs(fakePassword).WillReturnResult(emptyResult) 154 | mock.ExpectExec(`^SET @(\S+) := CONCAT\("CREATE USER ", QUOTE\([^\)]+\), "@'%' IDENTIFIED BY ", QUOTE\([^\)]+\), ""\)`).WillReturnResult(emptyResult) 155 | mock.ExpectExec(`^PREPARE (\S+) FROM (\S+)`).WillReturnResult(emptyResult) 156 | expectedError := errors.New("username already exists") 157 | mock.ExpectExec(`EXECUTE (\S+)`).WillReturnError(expectedError) 158 | mock.ExpectRollback() 159 | 160 | err := db.WriteCredentials(fakeUsername, fakePassword) 161 | assert.Truef(errors.Is(err, expectedError), "Only the expected error should be returned, actual error: %v", err) 162 | } 163 | 164 | func TestListUsernames(t *testing.T) { 165 | assert := assert.New(t) 166 | db, mock := createMockDB(t) 167 | 168 | rows := mock.NewRows([]string{"user"}).AddRow("dba123").AddRow("dba456") 169 | mock.ExpectQuery("^SELECT user FROM mysql.user WHERE user LIKE ?").WithArgs("dba%").WillReturnRows(rows) 170 | 171 | usernames, err := db.ListUsernames("dba") 172 | assert.NoError(err) 173 | assert.Equal(2, len(usernames)) 174 | } 175 | 176 | func TestDeleteCredentialsUnused(t *testing.T) { 177 | assert := assert.New(t) 178 | db, mock := createMockDB(t) 179 | 180 | mock.ExpectQuery(`SELECT COUNT\(\*\) FROM information_schema.processlist WHERE user = ?`). 181 | WithArgs(fakeUsername). 182 | WillReturnRows(mock.NewRows([]string{"count"}).AddRow(0)) 183 | 184 | mock.ExpectBegin() 185 | mock.ExpectExec(`^SET @(\S+) := ?`).WithArgs(fakeUsername).WillReturnResult(emptyResult) 186 | mock.ExpectExec(`^SET @(\S+) := CONCAT\("DROP USER ", QUOTE\([^\)]+\), ""\)`).WillReturnResult(emptyResult) 187 | mock.ExpectExec(`^PREPARE (\S+) FROM (\S+)`).WillReturnResult(emptyResult) 188 | mock.ExpectExec(`EXECUTE (\S+)`).WillReturnResult(emptyResult) 189 | mock.ExpectCommit() 190 | 191 | err := db.VerifyUnusedAndDeleteCredentials(fakeUsername) 192 | assert.NoError(err) 193 | } 194 | 195 | func TestDeleteCredentialsStillUsed(t *testing.T) { 196 | assert := assert.New(t) 197 | db, mock := createMockDB(t) 198 | 199 | mock.ExpectQuery(`SELECT COUNT\(\*\) FROM information_schema.processlist WHERE user = ?`). 200 | WithArgs(fakeUsername). 201 | WillReturnRows(mock.NewRows([]string{"count"}).AddRow(1)) 202 | 203 | err := db.VerifyUnusedAndDeleteCredentials(fakeUsername) 204 | 205 | var maybeTemporary xerrors.EnhancedError 206 | assert.True(errors.As(err, &maybeTemporary)) 207 | assert.True(maybeTemporary.Temporary()) 208 | } 209 | 210 | func TestGetTableSizeEstimates(t *testing.T) { 211 | assert := assert.New(t) 212 | db, mock := createMockDB(t) 213 | 214 | table1 := dbadmin.TableName("fakeTable1") 215 | mock.ExpectQuery(`SELECT TABLE_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN \(\?\);`). 216 | WithArgs(table1). 217 | WillReturnRows(mock.NewRows([]string{"TABLE_NAME", "TABLE_ROWS"}).AddRow(table1, 50)) 218 | 219 | estimates, err := db.GetTableSizeEstimates([]dbadmin.TableName{table1}) 220 | assert.NoError(err) 221 | 222 | rowCount, ok := estimates[table1] 223 | assert.True(ok) 224 | assert.Equal(uint64(50), rowCount) 225 | 226 | _, notOk := estimates[dbadmin.TableName("notARealTable")] 227 | assert.False(notOk) 228 | } 229 | 230 | func TestGetTableSizeEstimatesMultiple(t *testing.T) { 231 | assert := assert.New(t) 232 | db, mock := createMockDB(t) 233 | 234 | table1 := dbadmin.TableName("fakeTable1") 235 | table2 := dbadmin.TableName("fakeTable2") 236 | mock.ExpectQuery(`SELECT TABLE_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN \(\?, \?\);`). 237 | WithArgs(table1, table2). 238 | WillReturnRows(mock.NewRows([]string{"TABLE_NAME", "TABLE_ROWS"}).AddRow(table1, 50).AddRow(table2, 100)) 239 | 240 | estimates, err := db.GetTableSizeEstimates([]dbadmin.TableName{table1, table2}) 241 | assert.NoError(err) 242 | 243 | rowCount, ok := estimates[table1] 244 | assert.True(ok) 245 | assert.Equal(uint64(50), rowCount) 246 | 247 | rowCount, ok = estimates[table2] 248 | assert.True(ok) 249 | assert.Equal(uint64(100), rowCount) 250 | } 251 | 252 | func TestGetTableSizeEstimatesMissingTables(t *testing.T) { 253 | assert := assert.New(t) 254 | db, mock := createMockDB(t) 255 | 256 | table1 := dbadmin.TableName("fakeTable1") 257 | table2 := dbadmin.TableName("fakeTable2") 258 | mock.ExpectQuery(`SELECT TABLE_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN \(\?, \?\);`). 259 | WithArgs(table1, table2). 260 | WillReturnRows(mock.NewRows([]string{"TABLE_NAME", "TABLE_ROWS"}).AddRow(table1, 50)) 261 | 262 | estimates, err := db.GetTableSizeEstimates([]dbadmin.TableName{table1, table2}) 263 | assert.NoError(err) 264 | assert.Equal(uint64(50), estimates[table1]) 265 | assert.Equal(uint64(0), estimates[table2]) 266 | } 267 | 268 | func TestGetNextIDs(t *testing.T) { 269 | assert := assert.New(t) 270 | db, mock := createMockDB(t) 271 | 272 | table1 := dbadmin.TableName("fakeTable1") 273 | mock.ExpectQuery(`SELECT TABLE_NAME, AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN \(\?\);`). 274 | WithArgs(table1). 275 | WillReturnRows(mock.NewRows([]string{"TABLE_NAME", "AUTO_INCREMENT"}).AddRow(table1, 50)) 276 | 277 | estimates, err := db.GetNextIDs([]dbadmin.TableName{table1}) 278 | assert.NoError(err) 279 | 280 | rowCount, ok := estimates[table1] 281 | assert.True(ok) 282 | assert.Equal(uint64(50), rowCount) 283 | 284 | _, notOk := estimates[dbadmin.TableName("notARealTable")] 285 | assert.False(notOk) 286 | } 287 | 288 | func TestGetNextIDsMultiple(t *testing.T) { 289 | assert := assert.New(t) 290 | db, mock := createMockDB(t) 291 | 292 | table1 := dbadmin.TableName("fakeTable1") 293 | table2 := dbadmin.TableName("fakeTable2") 294 | mock.ExpectQuery(`SELECT TABLE_NAME, AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN \(\?, \?\);`). 295 | WithArgs(table1, table2). 296 | WillReturnRows(mock.NewRows([]string{"TABLE_NAME", "AUTO_INCREMENT"}).AddRow(table1, 50).AddRow(table2, 100)) 297 | 298 | estimates, err := db.GetNextIDs([]dbadmin.TableName{table1, table2}) 299 | assert.NoError(err) 300 | 301 | rowCount, ok := estimates[table1] 302 | assert.True(ok) 303 | assert.Equal(uint64(50), rowCount) 304 | 305 | rowCount, ok = estimates[table2] 306 | assert.True(ok) 307 | assert.Equal(uint64(100), rowCount) 308 | } 309 | 310 | func TestGetNextIDsMissingTables(t *testing.T) { 311 | assert := assert.New(t) 312 | db, mock := createMockDB(t) 313 | 314 | table1 := dbadmin.TableName("fakeTable1") 315 | table2 := dbadmin.TableName("fakeTable2") 316 | mock.ExpectQuery(`SELECT TABLE_NAME, AUTO_INCREMENT FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME IN \(\?, \?\);`). 317 | WithArgs(table1, table2). 318 | WillReturnRows(mock.NewRows([]string{"TABLE_NAME", "AUTO_INCREMENT"}).AddRow(table1, 50)) 319 | 320 | _, err := db.GetNextIDs([]dbadmin.TableName{table1, table2}) 321 | assert.Error(err) 322 | } 323 | 324 | func TestSelectFloat(t *testing.T) { 325 | assert := assert.New(t) 326 | db, mock := createMockDB(t) 327 | 328 | mock.ExpectQuery(`SELECT COUNT\(\*\) FROM tableName`). 329 | WillReturnRows(mock.NewRows([]string{"count"}).AddRow(1000)) 330 | 331 | floatVal, err := db.SelectFloat(`SELECT COUNT(*) FROM tableName`) 332 | assert.NoError(err) 333 | assert.Equal(1000.0, floatVal) 334 | } 335 | 336 | func TestClose(t *testing.T) { 337 | assert := assert.New(t) 338 | db, mock := createMockDB(t) 339 | 340 | mock.ExpectClose() 341 | 342 | assert.NoError(db.Close()) 343 | } 344 | -------------------------------------------------------------------------------- /pkg/dbadmin/mysqladmin/errors.go: -------------------------------------------------------------------------------- 1 | package mysqladmin 2 | 3 | import ( 4 | "errors" 5 | 6 | "github.com/go-sql-driver/mysql" 7 | 8 | "github.com/app-sre/dba-operator/pkg/xerrors" 9 | ) 10 | 11 | type wrappedMySQLError struct { 12 | error 13 | } 14 | 15 | var retryableErrors = map[uint16]interface{}{ 16 | 17 | 1020: nil, // ER_CHECKREAD 18 | 1036: nil, // ER_OPEN_AS_READONLY 19 | 1040: nil, // ER_CON_COUNT_ERROR 20 | 1043: nil, // ER_HANDSHAKE_ERROR 21 | 1053: nil, // ER_SERVER_SHUTDOWN 22 | 1105: nil, // ER_UNKNOWN_ERROR 23 | 1129: nil, // ER_HOST_IS_BLOCKED 24 | 1180: nil, // ER_ERROR_DURING_COMMIT 25 | 1181: nil, // ER_ERROR_DURING_ROLLBACK 26 | 1182: nil, // ER_ERROR_DURING_FLUSH_LOGS 27 | 1188: nil, // ER_MASTER 28 | 1202: nil, // ER_SLAVE_THREAD 29 | 1203: nil, // ER_TOO_MANY_USER_CONNECTIONS 30 | 1205: nil, // ER_LOCK_WAIT_TIMEOUT 31 | 1206: nil, // ER_LOCK_TABLE_FULL 32 | 1218: nil, // ER_CONNECT_TO_MASTER 33 | 1220: nil, // ER_ERROR_WHEN_EXECUTING_COMMAND 34 | 1290: nil, // ER_OPTION_PREVENTS_STATEMENT 35 | 1297: nil, // ER_GET_TEMPORARY_ERRMSG 36 | 1317: nil, // ER_QUERY_INTERRUPTED 37 | 1637: nil, // ER_TOO_MANY_CONCURRENT_TRXS 38 | 1836: nil, // ER_READ_ONLY_MODE 39 | 1874: nil, // ER_INNODB_READ_ONLY 40 | 3019: nil, // ER_INNODB_UNDO_LOG_FULL 41 | 3032: nil, // ER_SERVER_OFFLINE_MODE 42 | 3168: nil, // ER_SERVER_ISNT_AVAILABLE 43 | 3169: nil, // ER_SESSION_WAS_KILLED 44 | 3186: nil, // ER_CAPACITY_EXCEEDED_IN_PARSER 45 | 3572: nil, // ER_LOCK_NOWAIT 46 | 47 | } 48 | 49 | func wrap(err error) xerrors.EnhancedError { 50 | if err != nil { 51 | return wrappedMySQLError{error: err} 52 | } 53 | return nil 54 | } 55 | 56 | func (err wrappedMySQLError) Unwrap() error { 57 | return err.error 58 | } 59 | 60 | // Temporary implements the EnhancedError interface 61 | func (err wrappedMySQLError) Temporary() bool { 62 | switch err.error { 63 | case mysql.ErrInvalidConn, mysql.ErrMalformPkt, mysql.ErrPktSync, mysql.ErrPktSyncMul, mysql.ErrBusyBuffer: 64 | return true 65 | } 66 | 67 | var mysqle *mysql.MySQLError 68 | if errors.As(err.error, &mysqle) { 69 | if _, ok := retryableErrors[mysqle.Number]; ok { 70 | return true 71 | } 72 | } 73 | 74 | return false 75 | } 76 | -------------------------------------------------------------------------------- /pkg/hints/hints.go: -------------------------------------------------------------------------------- 1 | package hints 2 | 3 | import ( 4 | "fmt" 5 | "reflect" 6 | 7 | dba "github.com/app-sre/dba-operator/api/v1alpha1" 8 | "github.com/app-sre/dba-operator/pkg/dbadmin" 9 | "github.com/iancoleman/strcase" 10 | ) 11 | 12 | const ( 13 | addColumnTableTooLarge = "Attempting to add a column to a large table (%s)" 14 | addBlockingIndextableTooLarge = "Attempting to create an index (type=%s) to a large table (%s)" 15 | addNotNullableColumnNoDefault = "Attempting to add a NOT NULL column (%s) to an existing table (%s) without specifying a server default" 16 | addNotNullConstraintWillFail = "Attempting to convert a column (%s) to NOT NULL on a table (%s) when null data is present" 17 | addUniqueIndexWillFail = "Attempting to add a unique index to a table (%s) which contains non-unique data over the specified columns (%v)" 18 | ) 19 | 20 | // Engine exposes the interface used to ask the engine for errors related to 21 | // hints. 22 | type Engine interface { 23 | ProcessHints([]dba.DatabaseMigrationSchemaHint) ([]dba.ManagedDatabaseError, error) 24 | } 25 | 26 | // NewHintsEngine creates an instace of the Engine bound to a specific database. 27 | func NewHintsEngine(handle dbadmin.DbAdmin, largeTableThreshold uint64) Engine { 28 | he := &hintsEngine{ 29 | handle: handle, 30 | operationToProcessor: make(map[string]processorFunc), 31 | largeTableThreshold: largeTableThreshold, 32 | } 33 | 34 | // Reflect over the methods on this type to find ones that are of the type 35 | // processorFunc, and store them in a map to make it easy to dispatch them 36 | // by name as we iterate over hints supplied by the user. Exported methods 37 | // will be renamed from CamelCase to lowerCamelCase as they are specified in 38 | // the hints schema. 39 | engineInstance := reflect.ValueOf(he) 40 | engineType := reflect.TypeOf(he) 41 | for i := 0; i < engineType.NumMethod(); i++ { 42 | method := engineType.Method(i) 43 | boundMethod := engineInstance.Method(i) 44 | if processor, ok := boundMethod.Interface().(func(dba.DatabaseMigrationSchemaHint, bool) ([]dba.ManagedDatabaseError, error)); ok { 45 | he.operationToProcessor[strcase.ToLowerCamel(method.Name)] = processorFunc(processor) 46 | } 47 | } 48 | 49 | return he 50 | } 51 | 52 | type processorFunc func(dba.DatabaseMigrationSchemaHint, bool) ([]dba.ManagedDatabaseError, error) 53 | 54 | type hintsEngine struct { 55 | handle dbadmin.DbAdmin 56 | largeTableThreshold uint64 57 | 58 | operationToProcessor map[string]processorFunc 59 | } 60 | 61 | func (he *hintsEngine) ProcessHints(hints []dba.DatabaseMigrationSchemaHint) ([]dba.ManagedDatabaseError, error) { 62 | tablesReferenced := make(map[dbadmin.TableName]struct{}) 63 | 64 | // In one pass, just collect the table names 65 | for _, hint := range hints { 66 | tablesReferenced[dbadmin.TableName(hint.TableName)] = struct{}{} 67 | } 68 | 69 | // Load the table sizes for all of the referenced tables 70 | tableNamesList := make([]dbadmin.TableName, len(tablesReferenced)) 71 | for oneTableName := range tablesReferenced { 72 | tableNamesList = append(tableNamesList, oneTableName) 73 | } 74 | 75 | var migrationErrors []dba.ManagedDatabaseError 76 | estimates, err := he.handle.GetTableSizeEstimates(tableNamesList) 77 | if err != nil { 78 | return migrationErrors, fmt.Errorf("Unable to load table names: %w", err) 79 | } 80 | 81 | for _, hint := range hints { 82 | if processor, ok := he.operationToProcessor[hint.Operation]; !ok { 83 | unknownOp := newMigrationErrorF("Unknown hint operation: %s", hint.Operation) 84 | migrationErrors = append(migrationErrors, unknownOp) 85 | } else { 86 | tableSize := estimates[dbadmin.TableName(hint.TableName)] 87 | processorErrors, err := processor(hint, tableSize > he.largeTableThreshold) 88 | if err != nil { 89 | return migrationErrors, err 90 | } 91 | migrationErrors = append(migrationErrors, processorErrors...) 92 | } 93 | } 94 | 95 | return migrationErrors, nil 96 | } 97 | 98 | func (he *hintsEngine) AddColumn(hint dba.DatabaseMigrationSchemaHint, largeTable bool) (migrationErrors []dba.ManagedDatabaseError, err error) { 99 | tableName := dbadmin.TableName(hint.TableName) 100 | 101 | if largeTable { 102 | migrationErrors = append(migrationErrors, newMigrationErrorF(addColumnTableTooLarge, tableName)) 103 | } 104 | 105 | for _, column := range hint.Columns { 106 | if column.NotNullable && !column.HasServerDefault { 107 | migrationErrors = append(migrationErrors, newMigrationErrorF(addNotNullableColumnNoDefault, column.Name, tableName)) 108 | } 109 | } 110 | 111 | return 112 | } 113 | 114 | func (he *hintsEngine) CreateIndex(hint dba.DatabaseMigrationSchemaHint, largeTable bool) (migrationErrors []dba.ManagedDatabaseError, err error) { 115 | tableName := dbadmin.TableName(hint.TableName) 116 | 117 | switch hint.IndexType { 118 | case "unique": 119 | columnNames := make([]string, len(hint.Columns)) 120 | for _, columnReference := range hint.Columns { 121 | columnNames = append(columnNames, columnReference.Name) 122 | } 123 | 124 | var willFail bool 125 | willFail, err = he.handle.ConstraintWillFail(tableName, dbadmin.UniqueConstraint, columnNames...) 126 | if err != nil { 127 | return 128 | } 129 | if willFail { 130 | migrationErrors = append(migrationErrors, newMigrationErrorF(addUniqueIndexWillFail, tableName, columnNames)) 131 | } 132 | case "index": 133 | // Regular index creation always allows concurrent DML and will not block 134 | return migrationErrors, nil 135 | case "fulltext": 136 | if largeTable { 137 | migrationErrors = append(migrationErrors, newMigrationErrorF(addBlockingIndextableTooLarge, hint.IndexType, tableName)) 138 | } 139 | default: 140 | return migrationErrors, fmt.Errorf("Unknown index type: %s", hint.IndexType) 141 | } 142 | 143 | return 144 | } 145 | 146 | func (he *hintsEngine) CreateTable(hint dba.DatabaseMigrationSchemaHint, largeTable bool) (migrationErrors []dba.ManagedDatabaseError, err error) { 147 | // Create table is always fast and easy 148 | return 149 | } 150 | 151 | func (he *hintsEngine) AlterColumn(hint dba.DatabaseMigrationSchemaHint, largeTable bool) (migrationErrors []dba.ManagedDatabaseError, err error) { 152 | tableName := dbadmin.TableName(hint.TableName) 153 | 154 | for _, column := range hint.Columns { 155 | if column.NotNullable { 156 | var willFail bool 157 | willFail, err = he.handle.ConstraintWillFail(tableName, dbadmin.NotNullConstraint, column.Name) 158 | if err != nil { 159 | return 160 | } 161 | 162 | if willFail { 163 | migrationErrors = append(migrationErrors, newMigrationErrorF(addNotNullConstraintWillFail, column.Name, tableName)) 164 | } 165 | } 166 | } 167 | 168 | return 169 | } 170 | 171 | func newMigrationErrorF(message string, arguments ...interface{}) dba.ManagedDatabaseError { 172 | return dba.ManagedDatabaseError{ 173 | Message: fmt.Sprintf(message, arguments...), 174 | Temporary: false, 175 | } 176 | } 177 | -------------------------------------------------------------------------------- /pkg/hints/hints_test.go: -------------------------------------------------------------------------------- 1 | package hints 2 | 3 | import ( 4 | "testing" 5 | 6 | dba "github.com/app-sre/dba-operator/api/v1alpha1" 7 | "github.com/app-sre/dba-operator/pkg/dbadmin" 8 | "github.com/app-sre/dba-operator/pkg/dbadmin/dbadminfakes" 9 | "github.com/stretchr/testify/assert" 10 | ) 11 | 12 | const ( 13 | fakeTableName = dbadmin.TableName("fakeTable") 14 | fakeColumn = "fakeColumn" 15 | largeTableDefinition = 1_000_000 16 | ) 17 | 18 | var addColumnTests = []struct { 19 | hint dba.DatabaseMigrationSchemaHintColumn 20 | tableSize uint64 21 | numExpectedErrors int 22 | }{ 23 | { 24 | dba.DatabaseMigrationSchemaHintColumn{ 25 | NotNullable: false, 26 | HasServerDefault: false, 27 | }, 0, 0, 28 | }, 29 | { 30 | dba.DatabaseMigrationSchemaHintColumn{ 31 | NotNullable: true, 32 | HasServerDefault: false, 33 | }, 0, 1, 34 | }, 35 | { 36 | dba.DatabaseMigrationSchemaHintColumn{ 37 | NotNullable: false, 38 | HasServerDefault: true, 39 | }, 0, 0, 40 | }, 41 | { 42 | dba.DatabaseMigrationSchemaHintColumn{ 43 | NotNullable: true, 44 | HasServerDefault: true, 45 | }, 0, 0, 46 | }, 47 | { 48 | dba.DatabaseMigrationSchemaHintColumn{ 49 | NotNullable: false, 50 | HasServerDefault: false, 51 | }, 1, 0, 52 | }, 53 | { 54 | dba.DatabaseMigrationSchemaHintColumn{ 55 | NotNullable: true, 56 | HasServerDefault: false, 57 | }, 1, 1, 58 | }, 59 | { 60 | dba.DatabaseMigrationSchemaHintColumn{ 61 | NotNullable: false, 62 | HasServerDefault: true, 63 | }, 1, 0, 64 | }, 65 | { 66 | dba.DatabaseMigrationSchemaHintColumn{ 67 | NotNullable: true, 68 | HasServerDefault: true, 69 | }, 1, 0, 70 | }, 71 | { 72 | dba.DatabaseMigrationSchemaHintColumn{ 73 | NotNullable: false, 74 | HasServerDefault: false, 75 | }, 2_000_000, 1, 76 | }, 77 | { 78 | dba.DatabaseMigrationSchemaHintColumn{ 79 | NotNullable: true, 80 | HasServerDefault: false, 81 | }, 2_000_000, 2, 82 | }, 83 | { 84 | dba.DatabaseMigrationSchemaHintColumn{ 85 | NotNullable: false, 86 | HasServerDefault: true, 87 | }, 2_000_000, 1, 88 | }, 89 | { 90 | dba.DatabaseMigrationSchemaHintColumn{ 91 | NotNullable: true, 92 | HasServerDefault: true, 93 | }, 2_000_000, 1, 94 | }, 95 | } 96 | 97 | func TestAddColumnsToTable(t *testing.T) { 98 | assert := assert.New(t) 99 | 100 | for _, addColumnTest := range addColumnTests { 101 | addColumnTest.hint.Name = fakeColumn 102 | hints := []dba.DatabaseMigrationSchemaHint{ 103 | { 104 | TableReference: dba.TableReference{ 105 | TableName: string(fakeTableName), 106 | }, 107 | Operation: "addColumn", 108 | Columns: []dba.DatabaseMigrationSchemaHintColumn{ 109 | addColumnTest.hint, 110 | }, 111 | }, 112 | } 113 | 114 | fakeDB := &dbadminfakes.FakeDbAdmin{} 115 | fakeDB.GetTableSizeEstimatesReturns(map[dbadmin.TableName]uint64{ 116 | fakeTableName: addColumnTest.tableSize, 117 | }, nil) 118 | 119 | hintsEngine := NewHintsEngine(fakeDB, largeTableDefinition) 120 | 121 | schemaErrors, _ := hintsEngine.ProcessHints(hints) 122 | assert.Equalf( 123 | addColumnTest.numExpectedErrors, 124 | len(schemaErrors), 125 | "Unexpected number of errors with %v, errors %v", 126 | addColumnTest, 127 | schemaErrors, 128 | ) 129 | 130 | assert.Equal(1, fakeDB.GetTableSizeEstimatesCallCount()) 131 | } 132 | } 133 | 134 | var addIndexTests = []struct { 135 | indexType string 136 | addConstraintWillFail bool 137 | numExpectedErrors int 138 | }{ 139 | {"unique", true, 1}, 140 | {"unique", false, 0}, 141 | } 142 | 143 | func TestAddIndexToTable(t *testing.T) { 144 | assert := assert.New(t) 145 | 146 | for _, addIndexTest := range addIndexTests { 147 | hints := []dba.DatabaseMigrationSchemaHint{ 148 | { 149 | TableReference: dba.TableReference{ 150 | TableName: string(fakeTableName), 151 | }, 152 | Operation: "createIndex", 153 | Columns: []dba.DatabaseMigrationSchemaHintColumn{ 154 | dba.DatabaseMigrationSchemaHintColumn{ 155 | Name: fakeColumn, 156 | }, 157 | }, 158 | IndexType: addIndexTest.indexType, 159 | }, 160 | } 161 | 162 | fakeDB := &dbadminfakes.FakeDbAdmin{} 163 | fakeDB.ConstraintWillFailReturns(addIndexTest.addConstraintWillFail, nil) 164 | 165 | hintsEngine := NewHintsEngine(fakeDB, largeTableDefinition) 166 | 167 | schemaErrors, _ := hintsEngine.ProcessHints(hints) 168 | assert.Equalf( 169 | addIndexTest.numExpectedErrors, 170 | len(schemaErrors), 171 | "Unexpected number of errors with %v, errors %v", 172 | addIndexTest, 173 | schemaErrors, 174 | ) 175 | 176 | assert.Equal(1, fakeDB.ConstraintWillFailCallCount()) 177 | } 178 | } 179 | 180 | var alterColumnTests = []struct { 181 | columnHint dba.DatabaseMigrationSchemaHintColumn 182 | addConstraintWillFail bool 183 | numExpectedErrors int 184 | }{ 185 | { 186 | dba.DatabaseMigrationSchemaHintColumn{ 187 | NotNullable: false, 188 | }, false, 0, 189 | }, 190 | { 191 | dba.DatabaseMigrationSchemaHintColumn{ 192 | NotNullable: true, 193 | }, false, 0, 194 | }, 195 | { 196 | dba.DatabaseMigrationSchemaHintColumn{ 197 | NotNullable: true, 198 | }, true, 1, 199 | }, 200 | } 201 | 202 | func TestAlterColumn(t *testing.T) { 203 | assert := assert.New(t) 204 | 205 | for _, alterColumnTest := range alterColumnTests { 206 | alterColumnTest.columnHint.Name = fakeColumn 207 | hints := []dba.DatabaseMigrationSchemaHint{ 208 | { 209 | TableReference: dba.TableReference{ 210 | TableName: string(fakeTableName), 211 | }, 212 | Operation: "alterColumn", 213 | Columns: []dba.DatabaseMigrationSchemaHintColumn{ 214 | alterColumnTest.columnHint, 215 | }, 216 | }, 217 | } 218 | 219 | fakeDB := &dbadminfakes.FakeDbAdmin{} 220 | 221 | expectedConstraintWillFailCallCount := 0 222 | if alterColumnTest.columnHint.NotNullable { 223 | fakeDB.ConstraintWillFailReturns(alterColumnTest.addConstraintWillFail, nil) 224 | expectedConstraintWillFailCallCount++ 225 | } 226 | 227 | hintsEngine := NewHintsEngine(fakeDB, largeTableDefinition) 228 | 229 | schemaErrors, _ := hintsEngine.ProcessHints(hints) 230 | assert.Equalf( 231 | alterColumnTest.numExpectedErrors, 232 | len(schemaErrors), 233 | "Unexpected number of errors with %v, errors %v", 234 | alterColumnTest, 235 | schemaErrors, 236 | ) 237 | 238 | assert.Equal(expectedConstraintWillFailCallCount, fakeDB.ConstraintWillFailCallCount()) 239 | } 240 | } 241 | -------------------------------------------------------------------------------- /pkg/xerrors/errors.go: -------------------------------------------------------------------------------- 1 | package xerrors 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | // EnhancedError contains additional information about whether or not the error 8 | // should be considered temporary and the original operation therefore a candidate 9 | // for retrying. 10 | type EnhancedError interface { 11 | error 12 | 13 | // Returns true if the error should be considered temporary or transient 14 | Temporary() bool 15 | } 16 | 17 | type temporaryError struct { 18 | message string 19 | } 20 | 21 | // NewTempErrorf will create a new base error that is always considered 22 | // temporary and follows the calling convention of Sprintf. 23 | func NewTempErrorf(format string, arguments ...interface{}) error { 24 | return temporaryError{message: fmt.Sprintf(format, arguments...)} 25 | } 26 | 27 | func (te temporaryError) Error() string { 28 | return te.message 29 | } 30 | 31 | func (te temporaryError) Temporary() bool { 32 | return true 33 | } 34 | -------------------------------------------------------------------------------- /tools/tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package tools 4 | 5 | import ( 6 | _ "github.com/maxbrunsfeld/counterfeiter/v6" 7 | ) 8 | 9 | // This file imports packages that are used when running go generate, or used 10 | // during the development process but not otherwise depended on by built code. 11 | --------------------------------------------------------------------------------