├── .dockerignore ├── .gitignore ├── Gopkg.lock ├── Gopkg.toml ├── LICENSE ├── README.md ├── build └── Dockerfile ├── cmd └── manager │ └── main.go ├── deploy ├── crds │ ├── vitess_v1alpha2_vitesscell_crd.yaml │ ├── vitess_v1alpha2_vitesscluster_crd.yaml │ ├── vitess_v1alpha2_vitesskeyspace_crd.yaml │ ├── vitess_v1alpha2_vitesslockserver_crd.yaml │ ├── vitess_v1alpha2_vitessshard_crd.yaml │ ├── vitess_v1alpha2_vitesstablet_crd.yaml │ └── vitess_v1alpha2_vitesstablet_crd.yaml.bak ├── operator.yaml ├── role.yaml ├── role_binding.yaml └── service_account.yaml ├── examples ├── all-in-one.yaml ├── distributed.yaml ├── etcd-clusters-minimal.yaml └── etcd-clusters.yaml ├── my-vitess.yaml ├── pkg ├── apis │ ├── addtoscheme_vitess_v1alpha2.go │ ├── apis.go │ └── vitess │ │ └── v1alpha2 │ │ ├── doc.go │ │ ├── interfaces.go │ │ ├── register.go │ │ ├── samples.yaml │ │ ├── shared_helpers.go │ │ ├── shared_types.go │ │ ├── vitesscell_helpers.go │ │ ├── vitesscell_types.go │ │ ├── vitesscluster_helpers.go │ │ ├── vitesscluster_types.go │ │ ├── vitesskeyspace_helpers.go │ │ ├── vitesskeyspace_types.go │ │ ├── vitesslockserver_types.go │ │ ├── vitessshard_helpers.go │ │ ├── vitessshard_types.go │ │ ├── vitesstablet_helpers.go │ │ ├── vitesstablet_types.go │ │ └── zz_generated.deepcopy.go ├── controller │ ├── add_vitesscluster.go │ ├── add_vitesslockserver.go │ ├── controller.go │ ├── vitesscluster │ │ ├── reconcile_cell.go │ │ ├── reconcile_cell_test.go │ │ ├── reconcile_cluster.go │ │ ├── reconcile_keyspace.go │ │ ├── reconcile_shard.go │ │ ├── reconcile_tablet.go │ │ ├── utils.go │ │ ├── vitesscluster_controller.go │ │ └── vitesscluster_controller_test.go │ └── vitesslockserver │ │ └── vitesslockserver_controller.go ├── normalizer │ ├── errors.go │ ├── normalizer.go │ ├── normalizer_test.go │ ├── sanity.go │ └── validation.go └── util │ └── scripts │ ├── init-mysql-creds.go │ ├── init_replica_master.go │ ├── main.go │ ├── mysql.go │ ├── tablet.go │ ├── vtcltd.go │ └── vtgate.go └── version └── version.go /.dockerignore: -------------------------------------------------------------------------------- 1 | # Ignore everything 2 | ** 3 | 4 | 5 | # Allow build artifact 6 | !build/_output/bin/vitess-operator 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Temporary Build Files 2 | vendor/ 3 | build/_output 4 | build/_test 5 | # Created by https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 6 | ### Emacs ### 7 | # -*- mode: gitignore; -*- 8 | *~ 9 | \#*\# 10 | /.emacs.desktop 11 | /.emacs.desktop.lock 12 | *.elc 13 | auto-save-list 14 | tramp 15 | .\#* 16 | # Org-mode 17 | .org-id-locations 18 | *_archive 19 | # flymake-mode 20 | *_flymake.* 21 | # eshell files 22 | /eshell/history 23 | /eshell/lastdir 24 | # elpa packages 25 | /elpa/ 26 | # reftex files 27 | *.rel 28 | # AUCTeX auto folder 29 | /auto/ 30 | # cask packages 31 | .cask/ 32 | dist/ 33 | # Flycheck 34 | flycheck_*.el 35 | # server auth directory 36 | /server/ 37 | # projectiles files 38 | .projectile 39 | projectile-bookmarks.eld 40 | # directory configuration 41 | .dir-locals.el 42 | # saveplace 43 | places 44 | # url cache 45 | url/cache/ 46 | # cedet 47 | ede-projects.el 48 | # smex 49 | smex-items 50 | # company-statistics 51 | company-statistics-cache.el 52 | # anaconda-mode 53 | anaconda-mode/ 54 | ### Go ### 55 | # Binaries for programs and plugins 56 | *.exe 57 | *.exe~ 58 | *.dll 59 | *.so 60 | *.dylib 61 | # Test binary, build with 'go test -c' 62 | *.test 63 | # Output of the go coverage tool, specifically when used with LiteIDE 64 | *.out 65 | ### Vim ### 66 | # swap 67 | .sw[a-p] 68 | .*.sw[a-p] 69 | # session 70 | Session.vim 71 | # temporary 72 | .netrwhist 73 | # auto-generated tag files 74 | tags 75 | ### VisualStudioCode ### 76 | .vscode/* 77 | .history 78 | # End of https://www.gitignore.io/api/go,vim,emacs,visualstudiocode 79 | -------------------------------------------------------------------------------- /Gopkg.lock: -------------------------------------------------------------------------------- 1 | # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. 2 | 3 | 4 | [[projects]] 5 | digest = "1:fd1a7ca82682444a45424f6af37b1e0373f632e5a303441b111558ae8656a9b7" 6 | name = "cloud.google.com/go" 7 | packages = ["compute/metadata"] 8 | pruneopts = "NT" 9 | revision = "0ebda48a7f143b1cce9eb37a8c1106ac762a3430" 10 | version = "v0.34.0" 11 | 12 | [[projects]] 13 | digest = "1:d8ebbd207f3d3266d4423ce4860c9f3794956306ded6c7ba312ecc69cdfbf04c" 14 | name = "github.com/PuerkitoBio/purell" 15 | packages = ["."] 16 | pruneopts = "NT" 17 | revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" 18 | version = "v1.1.0" 19 | 20 | [[projects]] 21 | branch = "master" 22 | digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" 23 | name = "github.com/PuerkitoBio/urlesc" 24 | packages = ["."] 25 | pruneopts = "NT" 26 | revision = "de5bf2ad457846296e2031421a34e2568e304e35" 27 | 28 | [[projects]] 29 | branch = "master" 30 | digest = "1:c819830f4f5ef85874a90ac3cbcc96cd322c715f5c96fbe4722eacd3dafbaa07" 31 | name = "github.com/beorn7/perks" 32 | packages = ["quantile"] 33 | pruneopts = "NT" 34 | revision = "3a771d992973f24aa725d07868b467d1ddfceafb" 35 | 36 | [[projects]] 37 | digest = "1:4b8b5811da6970495e04d1f4e98bb89518cc3cfc3b3f456bdb876ed7b6c74049" 38 | name = "github.com/davecgh/go-spew" 39 | packages = ["spew"] 40 | pruneopts = "NT" 41 | revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" 42 | version = "v1.1.1" 43 | 44 | [[projects]] 45 | digest = "1:e6f888d4be8ec0f05c50e2aba83da4948b58045dee54d03be81fa74ea673302c" 46 | name = "github.com/emicklei/go-restful" 47 | packages = [ 48 | ".", 49 | "log", 50 | ] 51 | pruneopts = "NT" 52 | revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0" 53 | version = "v2.8.0" 54 | 55 | [[projects]] 56 | digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" 57 | name = "github.com/ghodss/yaml" 58 | packages = ["."] 59 | pruneopts = "NT" 60 | revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" 61 | version = "v1.0.0" 62 | 63 | [[projects]] 64 | branch = "master" 65 | digest = "1:d421af4c4fe51d399667d573982d663fe1fa67020a88d3ae43466ebfe8e2b5c9" 66 | name = "github.com/go-logr/logr" 67 | packages = ["."] 68 | pruneopts = "NT" 69 | revision = "9fb12b3b21c5415d16ac18dc5cd42c1cfdd40c4e" 70 | 71 | [[projects]] 72 | digest = "1:340497a512995aa69c0add901d79a2096b3449d35a44a6f1f1115091a9f8c687" 73 | name = "github.com/go-logr/zapr" 74 | packages = ["."] 75 | pruneopts = "NT" 76 | revision = "7536572e8d55209135cd5e7ccf7fce43dca217ab" 77 | version = "v0.1.0" 78 | 79 | [[projects]] 80 | digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" 81 | name = "github.com/go-openapi/jsonpointer" 82 | packages = ["."] 83 | pruneopts = "NT" 84 | revision = "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004" 85 | version = "v0.18.0" 86 | 87 | [[projects]] 88 | digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" 89 | name = "github.com/go-openapi/jsonreference" 90 | packages = ["."] 91 | pruneopts = "NT" 92 | revision = "8483a886a90412cd6858df4ea3483dce9c8e35a3" 93 | version = "v0.18.0" 94 | 95 | [[projects]] 96 | digest = "1:4da4ea0a664ba528965683d350f602d0f11464e6bb2e17aad0914723bc25d163" 97 | name = "github.com/go-openapi/spec" 98 | packages = ["."] 99 | pruneopts = "NT" 100 | revision = "5b6cdde3200976e3ecceb2868706ee39b6aff3e4" 101 | version = "v0.18.0" 102 | 103 | [[projects]] 104 | digest = "1:dc0f590770e5a6c70ea086232324f7b7dc4857c60eca63ab8ff78e0a5cfcdbf3" 105 | name = "github.com/go-openapi/swag" 106 | packages = ["."] 107 | pruneopts = "NT" 108 | revision = "1d29f06aebd59ccdf11ae04aa0334ded96e2d909" 109 | version = "v0.18.0" 110 | 111 | [[projects]] 112 | digest = "1:932970e69f16e127aa0653b8263ae588cd127fa53273e19ba44332902c9826f2" 113 | name = "github.com/gogo/protobuf" 114 | packages = [ 115 | "proto", 116 | "sortkeys", 117 | ] 118 | pruneopts = "NT" 119 | revision = "4cbf7e384e768b4e01799441fdf2a706a5635ae7" 120 | version = "v1.2.0" 121 | 122 | [[projects]] 123 | branch = "master" 124 | digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" 125 | name = "github.com/golang/glog" 126 | packages = ["."] 127 | pruneopts = "NT" 128 | revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" 129 | 130 | [[projects]] 131 | branch = "master" 132 | digest = "1:aaedc94233e56ed57cdb04e3abfacc85c90c14082b62e3cdbe8ea72fc06ee035" 133 | name = "github.com/golang/groupcache" 134 | packages = ["lru"] 135 | pruneopts = "NT" 136 | revision = "c65c006176ff7ff98bb916961c7abbc6b0afc0aa" 137 | 138 | [[projects]] 139 | digest = "1:d7cb4458ea8782e6efacd8f4940796ec559c90833509c436f40c4085b98156dd" 140 | name = "github.com/golang/protobuf" 141 | packages = [ 142 | "proto", 143 | "ptypes", 144 | "ptypes/any", 145 | "ptypes/duration", 146 | "ptypes/timestamp", 147 | ] 148 | pruneopts = "NT" 149 | revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" 150 | version = "v1.2.0" 151 | 152 | [[projects]] 153 | branch = "master" 154 | digest = "1:05f95ffdfcf651bdb0f05b40b69e7f5663047f8da75c72d58728acb59b5cc107" 155 | name = "github.com/google/btree" 156 | packages = ["."] 157 | pruneopts = "NT" 158 | revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306" 159 | 160 | [[projects]] 161 | branch = "master" 162 | digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc" 163 | name = "github.com/google/gofuzz" 164 | packages = ["."] 165 | pruneopts = "NT" 166 | revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1" 167 | 168 | [[projects]] 169 | digest = "1:56a1f3949ebb7fa22fa6b4e4ac0fe0f77cc4faee5b57413e6fa9199a8458faf1" 170 | name = "github.com/google/uuid" 171 | packages = ["."] 172 | pruneopts = "NT" 173 | revision = "9b3b1e0f5f99ae461456d768e7d301a7acdaa2d8" 174 | version = "v1.1.0" 175 | 176 | [[projects]] 177 | digest = "1:289332c13b80edfefc88397cce5266c16845dcf204fa2f6ac7e464ee4c7f6e96" 178 | name = "github.com/googleapis/gnostic" 179 | packages = [ 180 | "OpenAPIv2", 181 | "compiler", 182 | "extensions", 183 | ] 184 | pruneopts = "NT" 185 | revision = "7c663266750e7d82587642f65e60bc4083f1f84e" 186 | version = "v0.2.0" 187 | 188 | [[projects]] 189 | branch = "master" 190 | digest = "1:97972f03fbf34ec4247ddc78ddb681389c468c020492aa32b109744a54fc0c14" 191 | name = "github.com/gregjones/httpcache" 192 | packages = [ 193 | ".", 194 | "diskcache", 195 | ] 196 | pruneopts = "NT" 197 | revision = "c63ab54fda8f77302f8d414e19933f2b6026a089" 198 | 199 | [[projects]] 200 | digest = "1:b42cde0e1f3c816dd57f57f7bbcf05ca40263ad96f168714c130c611fc0856a6" 201 | name = "github.com/hashicorp/golang-lru" 202 | packages = [ 203 | ".", 204 | "simplelru", 205 | ] 206 | pruneopts = "NT" 207 | revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768" 208 | version = "v0.5.0" 209 | 210 | [[projects]] 211 | digest = "1:9a52adf44086cead3b384e5d0dbf7a1c1cce65e67552ee3383a8561c42a18cd3" 212 | name = "github.com/imdario/mergo" 213 | packages = ["."] 214 | pruneopts = "NT" 215 | revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4" 216 | version = "v0.3.6" 217 | 218 | [[projects]] 219 | digest = "1:1d39c063244ad17c4b18e8da1551163b6ffb52bd1640a49a8ec5c3b7bf4dbd5d" 220 | name = "github.com/json-iterator/go" 221 | packages = ["."] 222 | pruneopts = "NT" 223 | revision = "1624edc4454b8682399def8740d46db5e4362ba4" 224 | version = "v1.1.5" 225 | 226 | [[projects]] 227 | branch = "master" 228 | digest = "1:7d9fcac7f1228470c4ea0ee31cdfb662a758c44df691e39b3e76c11d3e12ba8f" 229 | name = "github.com/mailru/easyjson" 230 | packages = [ 231 | "buffer", 232 | "jlexer", 233 | "jwriter", 234 | ] 235 | pruneopts = "NT" 236 | revision = "60711f1a8329503b04e1c88535f419d0bb440bff" 237 | 238 | [[projects]] 239 | branch = "master" 240 | digest = "1:0e9bfc47ab9941ecc3344e580baca5deb4091177e84dd9773b48b38ec26b93d5" 241 | name = "github.com/mattbaird/jsonpatch" 242 | packages = ["."] 243 | pruneopts = "NT" 244 | revision = "81af80346b1a01caae0cbc27fd3c1ba5b11e189f" 245 | 246 | [[projects]] 247 | digest = "1:ea1db000388d88b31db7531c83016bef0d6db0d908a07794bfc36aca16fbf935" 248 | name = "github.com/matttproud/golang_protobuf_extensions" 249 | packages = ["pbutil"] 250 | pruneopts = "NT" 251 | revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" 252 | version = "v1.0.1" 253 | 254 | [[projects]] 255 | digest = "1:2f42fa12d6911c7b7659738758631bec870b7e9b4c6be5444f963cdcfccc191f" 256 | name = "github.com/modern-go/concurrent" 257 | packages = ["."] 258 | pruneopts = "NT" 259 | revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94" 260 | version = "1.0.3" 261 | 262 | [[projects]] 263 | digest = "1:c6aca19413b13dc59c220ad7430329e2ec454cc310bc6d8de2c7e2b93c18a0f6" 264 | name = "github.com/modern-go/reflect2" 265 | packages = ["."] 266 | pruneopts = "NT" 267 | revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" 268 | version = "1.0.1" 269 | 270 | [[projects]] 271 | branch = "master" 272 | digest = "1:8bd17c22f9f99d81aff5370c855aaec086bf015ae6fa1fae3dac3cba2f117777" 273 | name = "github.com/operator-framework/operator-sdk" 274 | packages = [ 275 | "pkg/k8sutil", 276 | "pkg/leader", 277 | "pkg/ready", 278 | "version", 279 | ] 280 | pruneopts = "NT" 281 | revision = "94d42526a13ea44d5721cdcfdd773b233f93cee3" 282 | 283 | [[projects]] 284 | digest = "1:93b1d84c5fa6d1ea52f4114c37714cddd84d5b78f151b62bb101128dd51399bf" 285 | name = "github.com/pborman/uuid" 286 | packages = ["."] 287 | pruneopts = "NT" 288 | revision = "adf5a7427709b9deb95d29d3fa8a2bf9cfd388f1" 289 | version = "v1.2" 290 | 291 | [[projects]] 292 | branch = "master" 293 | digest = "1:bf2ac97824a7221eb16b096aecc1c390d4c8a4e49524386aaa2e2dd215cbfb31" 294 | name = "github.com/petar/GoLLRB" 295 | packages = ["llrb"] 296 | pruneopts = "NT" 297 | revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4" 298 | 299 | [[projects]] 300 | digest = "1:e4e9e026b8e4c5630205cd0208efb491b40ad40552e57f7a646bb8a46896077b" 301 | name = "github.com/peterbourgon/diskv" 302 | packages = ["."] 303 | pruneopts = "NT" 304 | revision = "5f041e8faa004a95c88a202771f4cc3e991971e6" 305 | version = "v2.0.1" 306 | 307 | [[projects]] 308 | digest = "1:ec2a29e3bd141038ae5c3d3a4f57db0c341fcc1d98055a607aedd683aed124ee" 309 | name = "github.com/prometheus/client_golang" 310 | packages = [ 311 | "prometheus", 312 | "prometheus/internal", 313 | "prometheus/promhttp", 314 | ] 315 | pruneopts = "NT" 316 | revision = "505eaef017263e299324067d40ca2c48f6a2cf50" 317 | version = "v0.9.2" 318 | 319 | [[projects]] 320 | branch = "master" 321 | digest = "1:c2cc5049e927e2749c0d5163c9f8d924880d83e84befa732b9aad0b6be227bed" 322 | name = "github.com/prometheus/client_model" 323 | packages = ["go"] 324 | pruneopts = "NT" 325 | revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" 326 | 327 | [[projects]] 328 | branch = "master" 329 | digest = "1:aff0fac3bf0847ca241ebba899b74f614dee3c74d376f2be6ade2b1b22dd8e7c" 330 | name = "github.com/prometheus/common" 331 | packages = [ 332 | "expfmt", 333 | "internal/bitbucket.org/ww/goautoneg", 334 | "model", 335 | ] 336 | pruneopts = "NT" 337 | revision = "67670fe90761d7ff18ec1d640135e53b9198328f" 338 | 339 | [[projects]] 340 | branch = "master" 341 | digest = "1:523adcc0953fdf00dab08f45cad651f74682fb489bd2d672aa9f96e568e2f11f" 342 | name = "github.com/prometheus/procfs" 343 | packages = [ 344 | ".", 345 | "internal/util", 346 | "nfs", 347 | "xfs", 348 | ] 349 | pruneopts = "NT" 350 | revision = "1dc9a6cbc91aacc3e8b2d63db4d2e957a5394ac4" 351 | 352 | [[projects]] 353 | digest = "1:9d8420bbf131d1618bde6530af37c3799340d3762cc47210c1d9532a4c3a2779" 354 | name = "github.com/spf13/pflag" 355 | packages = ["."] 356 | pruneopts = "NT" 357 | revision = "298182f68c66c05229eb03ac171abe6e309ee79a" 358 | version = "v1.0.3" 359 | 360 | [[projects]] 361 | digest = "1:22f696cee54865fb8e9ff91df7b633f6b8f22037a8015253c6b6a71ca82219c7" 362 | name = "go.uber.org/atomic" 363 | packages = ["."] 364 | pruneopts = "NT" 365 | revision = "1ea20fb1cbb1cc08cbd0d913a96dead89aa18289" 366 | version = "v1.3.2" 367 | 368 | [[projects]] 369 | digest = "1:58ca93bdf81bac106ded02226b5395a0595d5346cdc4caa8d9c1f3a5f8f9976e" 370 | name = "go.uber.org/multierr" 371 | packages = ["."] 372 | pruneopts = "NT" 373 | revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" 374 | version = "v1.1.0" 375 | 376 | [[projects]] 377 | digest = "1:572fa4496563920f3e3107a2294cf2621d6cc4ffd03403fb6397b1bab9fa082a" 378 | name = "go.uber.org/zap" 379 | packages = [ 380 | ".", 381 | "buffer", 382 | "internal/bufferpool", 383 | "internal/color", 384 | "internal/exit", 385 | "zapcore", 386 | ] 387 | pruneopts = "NT" 388 | revision = "ff33455a0e382e8a81d14dd7c922020b6b5e7982" 389 | version = "v1.9.1" 390 | 391 | [[projects]] 392 | branch = "master" 393 | digest = "1:d6d3b59b8c4ceb6a7db2f20169719e57a8dcfa2c055b4418feb3fcc7bbd1a936" 394 | name = "golang.org/x/crypto" 395 | packages = ["ssh/terminal"] 396 | pruneopts = "NT" 397 | revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447" 398 | 399 | [[projects]] 400 | branch = "master" 401 | digest = "1:9300b9f62f41c3dded875b33117e09e9269790eb4372f5e96621f1e375ed5307" 402 | name = "golang.org/x/net" 403 | packages = [ 404 | "context", 405 | "context/ctxhttp", 406 | "http/httpguts", 407 | "http2", 408 | "http2/hpack", 409 | "idna", 410 | ] 411 | pruneopts = "NT" 412 | revision = "927f97764cc334a6575f4b7a1584a147864d5723" 413 | 414 | [[projects]] 415 | branch = "master" 416 | digest = "1:bdb664c89389d18d2aa69fb3b61fe5e2effc09e55b333a56e3cb071026418e33" 417 | name = "golang.org/x/oauth2" 418 | packages = [ 419 | ".", 420 | "google", 421 | "internal", 422 | "jws", 423 | "jwt", 424 | ] 425 | pruneopts = "NT" 426 | revision = "d668ce993890a79bda886613ee587a69dd5da7a6" 427 | 428 | [[projects]] 429 | branch = "master" 430 | digest = "1:ba9eac3f65b198913110d60538b661e03e799f72a6ed777d2b34106ce2f364b4" 431 | name = "golang.org/x/sys" 432 | packages = [ 433 | "unix", 434 | "windows", 435 | ] 436 | pruneopts = "NT" 437 | revision = "b4a75ba826a64a70990f11a225237acd6ef35c9f" 438 | 439 | [[projects]] 440 | digest = "1:8c74f97396ed63cc2ef04ebb5fc37bb032871b8fd890a25991ed40974b00cd2a" 441 | name = "golang.org/x/text" 442 | packages = [ 443 | "collate", 444 | "collate/build", 445 | "internal/colltab", 446 | "internal/gen", 447 | "internal/tag", 448 | "internal/triegen", 449 | "internal/ucd", 450 | "language", 451 | "secure/bidirule", 452 | "transform", 453 | "unicode/bidi", 454 | "unicode/cldr", 455 | "unicode/norm", 456 | "unicode/rangetable", 457 | "width", 458 | ] 459 | pruneopts = "NT" 460 | revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" 461 | version = "v0.3.0" 462 | 463 | [[projects]] 464 | branch = "master" 465 | digest = "1:9fdc2b55e8e0fafe4b41884091e51e77344f7dc511c5acedcfd98200003bff90" 466 | name = "golang.org/x/time" 467 | packages = ["rate"] 468 | pruneopts = "NT" 469 | revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd" 470 | 471 | [[projects]] 472 | branch = "master" 473 | digest = "1:28a8b8275930d678cd5a2d4084f7ae8427a1e40c48085b6ae1340a4c00f79103" 474 | name = "golang.org/x/tools" 475 | packages = [ 476 | "go/ast/astutil", 477 | "go/gcexportdata", 478 | "go/internal/cgo", 479 | "go/internal/gcimporter", 480 | "go/internal/packagesdriver", 481 | "go/packages", 482 | "go/types/typeutil", 483 | "imports", 484 | "internal/fastwalk", 485 | "internal/gopathwalk", 486 | "internal/semver", 487 | ] 488 | pruneopts = "NT" 489 | revision = "d00ac6d27372a4273825635281f2dc360d4be563" 490 | 491 | [[projects]] 492 | digest = "1:902ffa11f1d8c19c12b05cabffe69e1a16608ad03a8899ebcb9c6bde295660ae" 493 | name = "google.golang.org/appengine" 494 | packages = [ 495 | ".", 496 | "internal", 497 | "internal/app_identity", 498 | "internal/base", 499 | "internal/datastore", 500 | "internal/log", 501 | "internal/modules", 502 | "internal/remote_api", 503 | "internal/urlfetch", 504 | "urlfetch", 505 | ] 506 | pruneopts = "NT" 507 | revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1" 508 | version = "v1.4.0" 509 | 510 | [[projects]] 511 | digest = "1:2d1fbdc6777e5408cabeb02bf336305e724b925ff4546ded0fa8715a7267922a" 512 | name = "gopkg.in/inf.v0" 513 | packages = ["."] 514 | pruneopts = "NT" 515 | revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" 516 | version = "v0.9.1" 517 | 518 | [[projects]] 519 | digest = "1:18108594151654e9e696b27b181b953f9a90b16bf14d253dd1b397b025a1487f" 520 | name = "gopkg.in/yaml.v2" 521 | packages = ["."] 522 | pruneopts = "NT" 523 | revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" 524 | version = "v2.2.2" 525 | 526 | [[projects]] 527 | digest = "1:b3f8152a68d73095a40fdcf329a93fc42e8eadb3305171df23fdb6b4e41a6417" 528 | name = "k8s.io/api" 529 | packages = [ 530 | "admission/v1beta1", 531 | "admissionregistration/v1alpha1", 532 | "admissionregistration/v1beta1", 533 | "apps/v1", 534 | "apps/v1beta1", 535 | "apps/v1beta2", 536 | "authentication/v1", 537 | "authentication/v1beta1", 538 | "authorization/v1", 539 | "authorization/v1beta1", 540 | "autoscaling/v1", 541 | "autoscaling/v2beta1", 542 | "autoscaling/v2beta2", 543 | "batch/v1", 544 | "batch/v1beta1", 545 | "batch/v2alpha1", 546 | "certificates/v1beta1", 547 | "coordination/v1beta1", 548 | "core/v1", 549 | "events/v1beta1", 550 | "extensions/v1beta1", 551 | "networking/v1", 552 | "policy/v1beta1", 553 | "rbac/v1", 554 | "rbac/v1alpha1", 555 | "rbac/v1beta1", 556 | "scheduling/v1alpha1", 557 | "scheduling/v1beta1", 558 | "settings/v1alpha1", 559 | "storage/v1", 560 | "storage/v1alpha1", 561 | "storage/v1beta1", 562 | ] 563 | pruneopts = "NT" 564 | revision = "b503174bad5991eb66f18247f52e41c3258f6348" 565 | 566 | [[projects]] 567 | digest = "1:868de7cbaa0ecde6dc231c1529a10ae01bb05916095c0c992186e2a5cac57e79" 568 | name = "k8s.io/apimachinery" 569 | packages = [ 570 | "pkg/api/errors", 571 | "pkg/api/meta", 572 | "pkg/api/resource", 573 | "pkg/apis/meta/internalversion", 574 | "pkg/apis/meta/v1", 575 | "pkg/apis/meta/v1/unstructured", 576 | "pkg/apis/meta/v1beta1", 577 | "pkg/conversion", 578 | "pkg/conversion/queryparams", 579 | "pkg/fields", 580 | "pkg/labels", 581 | "pkg/runtime", 582 | "pkg/runtime/schema", 583 | "pkg/runtime/serializer", 584 | "pkg/runtime/serializer/json", 585 | "pkg/runtime/serializer/protobuf", 586 | "pkg/runtime/serializer/recognizer", 587 | "pkg/runtime/serializer/streaming", 588 | "pkg/runtime/serializer/versioning", 589 | "pkg/selection", 590 | "pkg/types", 591 | "pkg/util/cache", 592 | "pkg/util/clock", 593 | "pkg/util/diff", 594 | "pkg/util/errors", 595 | "pkg/util/framer", 596 | "pkg/util/intstr", 597 | "pkg/util/json", 598 | "pkg/util/mergepatch", 599 | "pkg/util/naming", 600 | "pkg/util/net", 601 | "pkg/util/runtime", 602 | "pkg/util/sets", 603 | "pkg/util/strategicpatch", 604 | "pkg/util/uuid", 605 | "pkg/util/validation", 606 | "pkg/util/validation/field", 607 | "pkg/util/wait", 608 | "pkg/util/yaml", 609 | "pkg/version", 610 | "pkg/watch", 611 | "third_party/forked/golang/json", 612 | "third_party/forked/golang/reflect", 613 | ] 614 | pruneopts = "NT" 615 | revision = "eddba98df674a16931d2d4ba75edc3a389bf633a" 616 | 617 | [[projects]] 618 | digest = "1:00089f60de414edb1a51e63efde2480ce87c95d2cb3536ea240afe483905d736" 619 | name = "k8s.io/client-go" 620 | packages = [ 621 | "discovery", 622 | "dynamic", 623 | "kubernetes", 624 | "kubernetes/scheme", 625 | "kubernetes/typed/admissionregistration/v1alpha1", 626 | "kubernetes/typed/admissionregistration/v1beta1", 627 | "kubernetes/typed/apps/v1", 628 | "kubernetes/typed/apps/v1beta1", 629 | "kubernetes/typed/apps/v1beta2", 630 | "kubernetes/typed/authentication/v1", 631 | "kubernetes/typed/authentication/v1beta1", 632 | "kubernetes/typed/authorization/v1", 633 | "kubernetes/typed/authorization/v1beta1", 634 | "kubernetes/typed/autoscaling/v1", 635 | "kubernetes/typed/autoscaling/v2beta1", 636 | "kubernetes/typed/autoscaling/v2beta2", 637 | "kubernetes/typed/batch/v1", 638 | "kubernetes/typed/batch/v1beta1", 639 | "kubernetes/typed/batch/v2alpha1", 640 | "kubernetes/typed/certificates/v1beta1", 641 | "kubernetes/typed/coordination/v1beta1", 642 | "kubernetes/typed/core/v1", 643 | "kubernetes/typed/events/v1beta1", 644 | "kubernetes/typed/extensions/v1beta1", 645 | "kubernetes/typed/networking/v1", 646 | "kubernetes/typed/policy/v1beta1", 647 | "kubernetes/typed/rbac/v1", 648 | "kubernetes/typed/rbac/v1alpha1", 649 | "kubernetes/typed/rbac/v1beta1", 650 | "kubernetes/typed/scheduling/v1alpha1", 651 | "kubernetes/typed/scheduling/v1beta1", 652 | "kubernetes/typed/settings/v1alpha1", 653 | "kubernetes/typed/storage/v1", 654 | "kubernetes/typed/storage/v1alpha1", 655 | "kubernetes/typed/storage/v1beta1", 656 | "pkg/apis/clientauthentication", 657 | "pkg/apis/clientauthentication/v1alpha1", 658 | "pkg/apis/clientauthentication/v1beta1", 659 | "pkg/version", 660 | "plugin/pkg/client/auth/exec", 661 | "plugin/pkg/client/auth/gcp", 662 | "rest", 663 | "rest/watch", 664 | "restmapper", 665 | "testing", 666 | "third_party/forked/golang/template", 667 | "tools/auth", 668 | "tools/cache", 669 | "tools/clientcmd", 670 | "tools/clientcmd/api", 671 | "tools/clientcmd/api/latest", 672 | "tools/clientcmd/api/v1", 673 | "tools/leaderelection", 674 | "tools/leaderelection/resourcelock", 675 | "tools/metrics", 676 | "tools/pager", 677 | "tools/record", 678 | "tools/reference", 679 | "transport", 680 | "util/buffer", 681 | "util/cert", 682 | "util/connrotation", 683 | "util/flowcontrol", 684 | "util/homedir", 685 | "util/integer", 686 | "util/jsonpath", 687 | "util/retry", 688 | "util/workqueue", 689 | ] 690 | pruneopts = "NT" 691 | revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8" 692 | 693 | [[projects]] 694 | digest = "1:4e2addcdbe0330f43800c1fcb905fc7a21b86415dfcca619e5c606c87257af1b" 695 | name = "k8s.io/code-generator" 696 | packages = [ 697 | "cmd/client-gen", 698 | "cmd/client-gen/args", 699 | "cmd/client-gen/generators", 700 | "cmd/client-gen/generators/fake", 701 | "cmd/client-gen/generators/scheme", 702 | "cmd/client-gen/generators/util", 703 | "cmd/client-gen/path", 704 | "cmd/client-gen/types", 705 | "cmd/conversion-gen", 706 | "cmd/conversion-gen/args", 707 | "cmd/conversion-gen/generators", 708 | "cmd/deepcopy-gen", 709 | "cmd/deepcopy-gen/args", 710 | "cmd/defaulter-gen", 711 | "cmd/defaulter-gen/args", 712 | "cmd/informer-gen", 713 | "cmd/informer-gen/args", 714 | "cmd/informer-gen/generators", 715 | "cmd/lister-gen", 716 | "cmd/lister-gen/args", 717 | "cmd/lister-gen/generators", 718 | "cmd/openapi-gen", 719 | "cmd/openapi-gen/args", 720 | "pkg/util", 721 | ] 722 | pruneopts = "T" 723 | revision = "3dcf91f64f638563e5106f21f50c31fa361c918d" 724 | 725 | [[projects]] 726 | branch = "master" 727 | digest = "1:5edbd655d7ee65178fd5750bda9a3d3cd7fb96291937926f4969e6b2dfbc5743" 728 | name = "k8s.io/gengo" 729 | packages = [ 730 | "args", 731 | "examples/deepcopy-gen/generators", 732 | "examples/defaulter-gen/generators", 733 | "examples/set-gen/sets", 734 | "generator", 735 | "namer", 736 | "parser", 737 | "types", 738 | ] 739 | pruneopts = "NT" 740 | revision = "fd15ee9cc2f77baa4f31e59e6acbf21146455073" 741 | 742 | [[projects]] 743 | digest = "1:f3b42f307c7f49a1a7276c48d4b910db76e003220e88797f7acd41e3a9277ddf" 744 | name = "k8s.io/klog" 745 | packages = ["."] 746 | pruneopts = "NT" 747 | revision = "a5bc97fbc634d635061f3146511332c7e313a55a" 748 | version = "v0.1.0" 749 | 750 | [[projects]] 751 | branch = "master" 752 | digest = "1:9ac2fdede4a8304e3b00ea3b36526536339f306d0306e320fc74f6cefeead18e" 753 | name = "k8s.io/kube-openapi" 754 | packages = [ 755 | "cmd/openapi-gen/args", 756 | "pkg/common", 757 | "pkg/generators", 758 | "pkg/generators/rules", 759 | "pkg/util/proto", 760 | "pkg/util/sets", 761 | ] 762 | pruneopts = "NT" 763 | revision = "0317810137be915b9cf888946c6e115c1bfac693" 764 | 765 | [[projects]] 766 | digest = "1:e03ddaf9f31bccbbb8c33eabad2c85025a95ca98905649fd744e0a54c630a064" 767 | name = "sigs.k8s.io/controller-runtime" 768 | packages = [ 769 | "pkg/cache", 770 | "pkg/cache/internal", 771 | "pkg/client", 772 | "pkg/client/apiutil", 773 | "pkg/client/config", 774 | "pkg/client/fake", 775 | "pkg/controller", 776 | "pkg/event", 777 | "pkg/handler", 778 | "pkg/internal/controller", 779 | "pkg/internal/controller/metrics", 780 | "pkg/internal/recorder", 781 | "pkg/leaderelection", 782 | "pkg/manager", 783 | "pkg/metrics", 784 | "pkg/patch", 785 | "pkg/predicate", 786 | "pkg/reconcile", 787 | "pkg/recorder", 788 | "pkg/runtime/inject", 789 | "pkg/runtime/log", 790 | "pkg/runtime/scheme", 791 | "pkg/runtime/signals", 792 | "pkg/source", 793 | "pkg/source/internal", 794 | "pkg/webhook/admission", 795 | "pkg/webhook/admission/types", 796 | "pkg/webhook/types", 797 | ] 798 | pruneopts = "NT" 799 | revision = "c63ebda0bf4be5f0a8abd4003e4ea546032545ba" 800 | version = "v0.1.8" 801 | 802 | [solve-meta] 803 | analyzer-name = "dep" 804 | analyzer-version = 1 805 | input-imports = [ 806 | "github.com/go-logr/logr", 807 | "github.com/operator-framework/operator-sdk/pkg/k8sutil", 808 | "github.com/operator-framework/operator-sdk/pkg/leader", 809 | "github.com/operator-framework/operator-sdk/pkg/ready", 810 | "github.com/operator-framework/operator-sdk/version", 811 | "k8s.io/api/core/v1", 812 | "k8s.io/apimachinery/pkg/api/errors", 813 | "k8s.io/apimachinery/pkg/apis/meta/v1", 814 | "k8s.io/apimachinery/pkg/runtime", 815 | "k8s.io/apimachinery/pkg/runtime/schema", 816 | "k8s.io/apimachinery/pkg/types", 817 | "k8s.io/client-go/kubernetes/scheme", 818 | "k8s.io/client-go/plugin/pkg/client/auth/gcp", 819 | "k8s.io/code-generator/cmd/client-gen", 820 | "k8s.io/code-generator/cmd/conversion-gen", 821 | "k8s.io/code-generator/cmd/deepcopy-gen", 822 | "k8s.io/code-generator/cmd/defaulter-gen", 823 | "k8s.io/code-generator/cmd/informer-gen", 824 | "k8s.io/code-generator/cmd/lister-gen", 825 | "k8s.io/code-generator/cmd/openapi-gen", 826 | "k8s.io/gengo/args", 827 | "sigs.k8s.io/controller-runtime/pkg/client", 828 | "sigs.k8s.io/controller-runtime/pkg/client/config", 829 | "sigs.k8s.io/controller-runtime/pkg/client/fake", 830 | "sigs.k8s.io/controller-runtime/pkg/controller", 831 | "sigs.k8s.io/controller-runtime/pkg/handler", 832 | "sigs.k8s.io/controller-runtime/pkg/manager", 833 | "sigs.k8s.io/controller-runtime/pkg/reconcile", 834 | "sigs.k8s.io/controller-runtime/pkg/runtime/log", 835 | "sigs.k8s.io/controller-runtime/pkg/runtime/scheme", 836 | "sigs.k8s.io/controller-runtime/pkg/runtime/signals", 837 | "sigs.k8s.io/controller-runtime/pkg/source", 838 | ] 839 | solver-name = "gps-cdcl" 840 | solver-version = 1 841 | -------------------------------------------------------------------------------- /Gopkg.toml: -------------------------------------------------------------------------------- 1 | # Force dep to vendor the code generators, which aren't imported just used at dev time. 2 | required = [ 3 | "k8s.io/code-generator/cmd/defaulter-gen", 4 | "k8s.io/code-generator/cmd/deepcopy-gen", 5 | "k8s.io/code-generator/cmd/conversion-gen", 6 | "k8s.io/code-generator/cmd/client-gen", 7 | "k8s.io/code-generator/cmd/lister-gen", 8 | "k8s.io/code-generator/cmd/informer-gen", 9 | "k8s.io/code-generator/cmd/openapi-gen", 10 | "k8s.io/gengo/args", 11 | ] 12 | 13 | [[override]] 14 | name = "k8s.io/code-generator" 15 | # revision for tag "kubernetes-1.12.3" 16 | revision = "3dcf91f64f638563e5106f21f50c31fa361c918d" 17 | 18 | [[override]] 19 | name = "k8s.io/api" 20 | # revision for tag "kubernetes-1.12.3" 21 | revision = "b503174bad5991eb66f18247f52e41c3258f6348" 22 | 23 | [[override]] 24 | name = "k8s.io/apiextensions-apiserver" 25 | # revision for tag "kubernetes-1.12.3" 26 | revision = "0cd23ebeb6882bd1cdc2cb15fc7b2d72e8a86a5b" 27 | 28 | [[override]] 29 | name = "k8s.io/apimachinery" 30 | # revision for tag "kubernetes-1.12.3" 31 | revision = "eddba98df674a16931d2d4ba75edc3a389bf633a" 32 | 33 | [[override]] 34 | name = "k8s.io/client-go" 35 | # revision for tag "kubernetes-1.12.3" 36 | revision = "d082d5923d3cc0bfbb066ee5fbdea3d0ca79acf8" 37 | 38 | [[override]] 39 | name = "github.com/coreos/prometheus-operator" 40 | version = "=v0.26.0" 41 | 42 | [[override]] 43 | name = "sigs.k8s.io/controller-runtime" 44 | version = "=v0.1.8" 45 | 46 | [[constraint]] 47 | name = "github.com/operator-framework/operator-sdk" 48 | # The version rule is used for a specific release and the master branch for in between releases. 49 | branch = "master" #osdk_branch_annotation 50 | # version = "=v0.3.0" #osdk_version_annotation 51 | 52 | [prune] 53 | go-tests = true 54 | non-go = true 55 | 56 | [[prune.project]] 57 | name = "k8s.io/code-generator" 58 | non-go = false 59 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Deprecation notice 2 | 3 | This repository is deprecated, and will soon be archived. 4 | Instructions for the new operator can be found [here](https://vitess.io/docs/get-started/operator/). 5 | 6 | # Vitess Operator 7 | 8 | The Vitess Operator provides automation that simplifies the administration 9 | of [Vitess](https://vitess.io) clusters on Kubernetes. 10 | 11 | The Operator installs a custom resource for objects of the custom type 12 | VitessCluster. 13 | This custom resource allows you to configure the high-level aspects of 14 | your Vitess deployment, while the details of how to run Vitess on Kubernetes 15 | are abstracted and automated. 16 | 17 | ## Vitess Components 18 | 19 | A typical VitessCluster object might expand to the following tree once it's 20 | fully deployed. 21 | Objects in **bold** are custom resource kinds defined by this Operator. 22 | 23 | * **VitessCluster**: The top-level specification for a Vitess cluster. 24 | This is the only one the user creates. 25 | * **VitessCell**: Each Vitess [cell](https://vitess.io/overview/concepts/#cell-data-center) 26 | represents an independent failure domain (e.g. a Zone or Availability Zone). 27 | * Lockserver ([etcd-operator](https://github.com/coreos/etcd-operator)): 28 | Vitess needs its own etcd cluster to coordinate its built-in load-balancing 29 | and automatic shard routing. Vitess supports multiple lockservers but the operator 30 | only supports etcd right now. 31 | * Deployment ([orchestrator](https://github.com/github/orchestrator)): 32 | An optional automated failover tool that works with Vitess. 33 | * Deployment ([vtctld](https://vitess.io/overview/#vtctld)): 34 | A pool of stateless Vitess admin servers, which serve a dashboard UI as well 35 | as being an endpoint for the Vitess CLI tool (vtctlclient). 36 | * Deployment ([vtgate](https://vitess.io/overview/#vtgate)): 37 | A pool of stateless Vitess query routers. 38 | The client application can use any one of these vtgate Pods as the entry 39 | point into Vitess, through a MySQL-compatible interface. 40 | * **VitessKeyspace** (db1): Each Vitess [keyspace](https://vitess.io/overview/concepts/#keyspace) 41 | is a logical database that may be composed of many MySQL databases (shards). 42 | * **VitessShard** (db1/0): Each Vitess [shard](https://vitess.io/overview/concepts/#shard) 43 | is a single-master tree of replicating MySQL instances. 44 | * StatefulSet(s) ([vttablet](https://vitess.io/overview/#vttablet)): Within a shard, there may be many Vitess [tablets](https://vitess.io/overview/concepts/#tablet) 45 | (individual MySQL instances). 46 | * PersistentVolumeClaim(s) 47 | * **VitessShard** (db1/1) 48 | * StatefulSet(s) (vttablet) 49 | * PersistentVolumeClaim(s) 50 | * **VitessKeyspace** (db2) 51 | * **VitessShard** (db2/0) 52 | * StatefulSet(s) (vttablet) 53 | * PersistentVolumeClaim(s) 54 | 55 | ## Prerequisites 56 | 57 | * Kubernetes 1.8+ is required for its improved CRD support, especially garbage 58 | collection. 59 | * This config currently requires a dynamic PersistentVolume provisioner and a 60 | default StorageClass. 61 | * [etcd-operator](https://github.com/coreos/etcd-operator) 62 | 63 | ## Deploy the Operator 64 | 65 | Once the Operator is installed, you can create VitessCluster 66 | objects in any namespace as long as the etcd operator is runing 67 | in that namespace or is running [clusterwide](https://github.com/coreos/etcd-operator/blob/master/doc/user/clusterwide.md) mode. 68 | 69 | ```sh 70 | kubectl apply -R -f deploy 71 | ``` 72 | 73 | ### Create a VitessCluster 74 | 75 | ```sh 76 | kubectl apply -f my-vitess.yaml 77 | ``` 78 | 79 | ### View the Vitess Dashboards 80 | 81 | Wait until the cluster is ready: 82 | 83 | ```sh 84 | kubectl get vitessclusters -o 'custom-columns=NAME:.metadata.name,READY:.status.phase' 85 | ``` 86 | 87 | You should see: 88 | 89 | ```console 90 | NAME PHASE 91 | vitess Ready 92 | ``` 93 | 94 | Start a kubectl proxy: 95 | 96 | ```sh 97 | kubectl proxy --port=8001 98 | ``` 99 | 100 | Then visit: 101 | 102 | ``` 103 | http://localhost:8001/api/v1/namespaces/default/services/vt-zone1-vtctld:web/proxy/app/ 104 | ``` 105 | 106 | ### Clean Up 107 | 108 | ```sh 109 | # Delete the VitessCluster and etcd objects 110 | kubectl delete -f my-vitess.yaml 111 | # Uninstall the Vitess Operator 112 | kubectl delete -R -f deploy 113 | ``` 114 | 115 | ## TODO 116 | 117 | - [x] Create a StatefulSet for each VitessTablet in a VitessCluster 118 | - [x] Create a Job to elect the initial master in each VitessShard 119 | - [X] Fix parenting and normalization 120 | - [x] Create vtctld Deployment and Service 121 | - [X] Create vttablet service 122 | - [X] Create vtgate Deployment and Service 123 | - [ ] Create PodDisruptionBudgets 124 | - [ ] Reconcile all the things! 125 | - [ ] Label pods when they become shard masters 126 | - [ ] Add the ability to automatically merge/split a shard 127 | - [ ] Add the ability to automatically export/import resources from embedded objects to separate objects and back 128 | - [ ] Move shard master election into the operator 129 | 130 | ## Dev 131 | 132 | - Install the [operator sdk](https://github.com/operator-framework/operator-sdk) 133 | - Configure local kubectl access to a test Kubernetes cluster 134 | - Create the CRDs in your Kubernetes cluster 135 | - `kubectl apply -f deploy/crds` 136 | - Run the operator locally 137 | - `operator-sdk up local` 138 | - Create the sample cluster 139 | - `kubectl create -f my-vitess.yaml` 140 | -------------------------------------------------------------------------------- /build/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.8 2 | 3 | RUN apk upgrade --update --no-cache 4 | 5 | USER nobody 6 | 7 | ADD build/_output/bin/vitess-operator /usr/local/bin/vitess-operator 8 | -------------------------------------------------------------------------------- /cmd/manager/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "flag" 6 | "fmt" 7 | "os" 8 | "runtime" 9 | 10 | "github.com/operator-framework/operator-sdk/pkg/k8sutil" 11 | "github.com/operator-framework/operator-sdk/pkg/leader" 12 | "github.com/operator-framework/operator-sdk/pkg/ready" 13 | sdkVersion "github.com/operator-framework/operator-sdk/version" 14 | "vitess.io/vitess-operator/pkg/apis" 15 | "vitess.io/vitess-operator/pkg/controller" 16 | _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" 17 | "sigs.k8s.io/controller-runtime/pkg/client/config" 18 | "sigs.k8s.io/controller-runtime/pkg/manager" 19 | logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" 20 | "sigs.k8s.io/controller-runtime/pkg/runtime/signals" 21 | ) 22 | 23 | var log = logf.Log.WithName("cmd") 24 | 25 | func printVersion() { 26 | log.Info(fmt.Sprintf("Go Version: %s", runtime.Version())) 27 | log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) 28 | log.Info(fmt.Sprintf("operator-sdk Version: %v", sdkVersion.Version)) 29 | } 30 | 31 | func main() { 32 | flag.Parse() 33 | 34 | // The logger instantiated here can be changed to any logger 35 | // implementing the logr.Logger interface. This logger will 36 | // be propagated through the whole operator, generating 37 | // uniform and structured logs. 38 | logf.SetLogger(logf.ZapLogger(false)) 39 | 40 | printVersion() 41 | 42 | namespace, err := k8sutil.GetWatchNamespace() 43 | if err != nil { 44 | log.Error(err, "failed to get watch namespace") 45 | os.Exit(1) 46 | } 47 | 48 | // Get a config to talk to the apiserver 49 | cfg, err := config.GetConfig() 50 | if err != nil { 51 | log.Error(err, "") 52 | os.Exit(1) 53 | } 54 | 55 | // Become the leader before proceeding 56 | leader.Become(context.TODO(), "vitess-operator-lock") 57 | 58 | r := ready.NewFileReady() 59 | err = r.Set() 60 | if err != nil { 61 | log.Error(err, "") 62 | os.Exit(1) 63 | } 64 | defer r.Unset() 65 | 66 | // Create a new Cmd to provide shared dependencies and start components 67 | mgr, err := manager.New(cfg, manager.Options{Namespace: namespace}) 68 | if err != nil { 69 | log.Error(err, "") 70 | os.Exit(1) 71 | } 72 | 73 | log.Info("Registering Components.") 74 | 75 | // Setup Scheme for all resources 76 | if err := apis.AddToScheme(mgr.GetScheme()); err != nil { 77 | log.Error(err, "") 78 | os.Exit(1) 79 | } 80 | 81 | // Setup all Controllers 82 | if err := controller.AddToManager(mgr); err != nil { 83 | log.Error(err, "") 84 | os.Exit(1) 85 | } 86 | 87 | log.Info("Starting the Cmd.") 88 | 89 | // Start the Cmd 90 | if err := mgr.Start(signals.SetupSignalHandler()); err != nil { 91 | log.Error(err, "manager exited non-zero") 92 | os.Exit(1) 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /deploy/crds/vitess_v1alpha2_vitesscell_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vitesscells.vitess.io 5 | spec: 6 | group: vitess.io 7 | names: 8 | kind: VitessCell 9 | listKind: VitessCellList 10 | plural: vitesscells 11 | singular: vitesscell 12 | scope: Namespaced 13 | version: v1alpha2 14 | subresources: 15 | status: {} 16 | -------------------------------------------------------------------------------- /deploy/crds/vitess_v1alpha2_vitesscluster_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vitessclusters.vitess.io 5 | spec: 6 | group: vitess.io 7 | names: 8 | kind: VitessCluster 9 | listKind: VitessClusterList 10 | plural: vitessclusters 11 | singular: vitesscluster 12 | scope: Namespaced 13 | version: v1alpha2 14 | subresources: 15 | status: {} 16 | -------------------------------------------------------------------------------- /deploy/crds/vitess_v1alpha2_vitesskeyspace_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vitesskeyspaces.vitess.io 5 | spec: 6 | group: vitess.io 7 | names: 8 | kind: VitessKeyspace 9 | listKind: VitessKeyspaceList 10 | plural: vitesskeyspaces 11 | singular: vitesskeyspace 12 | scope: Namespaced 13 | version: v1alpha2 14 | subresources: 15 | status: {} 16 | -------------------------------------------------------------------------------- /deploy/crds/vitess_v1alpha2_vitesslockserver_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vitesslockservers.vitess.io 5 | spec: 6 | group: vitess.io 7 | names: 8 | kind: VitessLockserver 9 | listKind: VitessLockserverList 10 | plural: vitesslockservers 11 | singular: vitesslockserver 12 | scope: Namespaced 13 | version: v1alpha2 14 | subresources: 15 | status: {} 16 | -------------------------------------------------------------------------------- /deploy/crds/vitess_v1alpha2_vitessshard_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vitessshards.vitess.io 5 | spec: 6 | group: vitess.io 7 | names: 8 | kind: VitessShard 9 | listKind: VitessShardList 10 | plural: vitessshards 11 | singular: vitessshard 12 | scope: Namespaced 13 | version: v1alpha2 14 | subresources: 15 | status: {} 16 | -------------------------------------------------------------------------------- /deploy/crds/vitess_v1alpha2_vitesstablet_crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vitesstablets.vitess.io 5 | spec: 6 | group: vitess.io 7 | names: 8 | kind: VitessTablet 9 | listKind: VitessTabletList 10 | plural: vitesstablets 11 | singular: vitesstablet 12 | scope: Namespaced 13 | version: v1alpha2 14 | subresources: 15 | status: {} 16 | -------------------------------------------------------------------------------- /deploy/crds/vitess_v1alpha2_vitesstablet_crd.yaml.bak: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: vitesstablets.vitess.io 5 | spec: 6 | group: vitess.io 7 | names: 8 | kind: VitessTablet 9 | listKind: VitessTabletList 10 | plural: vitesstablets 11 | singular: vitesstablet 12 | scope: Namespaced 13 | versions: 14 | - name: v1alpha2 15 | served: true 16 | storage: true 17 | subresources: 18 | status: {} 19 | validation: 20 | openAPIV3Schema: 21 | properties: 22 | spec: 23 | properties: 24 | tabletID: 25 | type: integer 26 | format: int64 27 | replicas: 28 | type: integer 29 | format: int32 30 | CellID: 31 | type: string 32 | pattern: '^[a-z0-9]([-a-z0-9]*[a-z0-9])?$' 33 | type: 34 | type: string 35 | enum: 36 | - master 37 | - replica 38 | - readonly 39 | - backup 40 | - restore 41 | - drained 42 | datastore: 43 | type: string 44 | enum: 45 | - "" 46 | - local 47 | containers: 48 | type: object 49 | properties: 50 | dbflavor: 51 | type: string 52 | enum: 53 | - "" 54 | - mysql 55 | mysql: 56 | type: object 57 | properties: 58 | image: 59 | type: string 60 | resources: 61 | type: object 62 | properties: 63 | limits: 64 | type: object 65 | additionalProperties: 66 | type: string 67 | requests: 68 | type: object 69 | additionalProperties: 70 | type: string 71 | dbflavor: 72 | type: string 73 | vttablet: 74 | type: object 75 | properties: 76 | image: 77 | type: string 78 | resources: 79 | type: object 80 | properties: 81 | limits: 82 | type: object 83 | additionalProperties: 84 | type: string 85 | requests: 86 | type: object 87 | additionalProperties: 88 | type: string 89 | dbflavor: 90 | type: string 91 | enum: 92 | - "" 93 | - mysql 94 | -------------------------------------------------------------------------------- /deploy/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: vitess-operator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | name: vitess-operator 10 | template: 11 | metadata: 12 | labels: 13 | name: vitess-operator 14 | spec: 15 | serviceAccountName: vitess-operator 16 | containers: 17 | - name: vitess-operator 18 | image: vitess/operator:v0.0.2 19 | ports: 20 | - containerPort: 60000 21 | name: metrics 22 | command: 23 | - vitess-operator 24 | imagePullPolicy: Always 25 | readinessProbe: 26 | exec: 27 | command: 28 | - stat 29 | - /tmp/operator-sdk-ready 30 | initialDelaySeconds: 4 31 | periodSeconds: 10 32 | failureThreshold: 1 33 | env: 34 | - name: WATCH_NAMESPACE 35 | valueFrom: 36 | fieldRef: 37 | fieldPath: metadata.namespace 38 | - name: POD_NAME 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.name 42 | - name: OPERATOR_NAME 43 | value: "vitess-operator" 44 | -------------------------------------------------------------------------------- /deploy/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | creationTimestamp: null 5 | name: vitess-operator 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | - services 12 | - endpoints 13 | - persistentvolumeclaims 14 | - events 15 | - configmaps 16 | - secrets 17 | verbs: 18 | - '*' 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - namespaces 23 | verbs: 24 | - get 25 | - apiGroups: 26 | - batch 27 | resources: 28 | - jobs 29 | verbs: 30 | - '*' 31 | - apiGroups: 32 | - apps 33 | resources: 34 | - deployments 35 | - daemonsets 36 | - replicasets 37 | - statefulsets 38 | verbs: 39 | - '*' 40 | - apiGroups: 41 | - monitoring.coreos.com 42 | resources: 43 | - servicemonitors 44 | verbs: 45 | - get 46 | - create 47 | - apiGroups: 48 | - vitess.io 49 | resources: 50 | - '*' 51 | - vitesscells 52 | - vitesskeyspaces 53 | - vitessshards 54 | - vitesstablets 55 | - vitesslockservers 56 | verbs: 57 | - '*' 58 | -------------------------------------------------------------------------------- /deploy/role_binding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: vitess-operator 5 | subjects: 6 | - kind: ServiceAccount 7 | name: vitess-operator 8 | roleRef: 9 | kind: Role 10 | name: vitess-operator 11 | apiGroup: rbac.authorization.k8s.io 12 | -------------------------------------------------------------------------------- /deploy/service_account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: vitess-operator 5 | -------------------------------------------------------------------------------- /examples/all-in-one.yaml: -------------------------------------------------------------------------------- 1 | ## Sample VitessCluster all-in-one resource 2 | apiVersion: vitess.io/v1alpha2 3 | kind: VitessCluster 4 | metadata: 5 | name: aio 6 | labels: 7 | app: vitess 8 | spec: 9 | lockserver: 10 | metadata: 11 | name: global 12 | spec: 13 | type: etcd2 14 | etcd2: 15 | address: etcd-global-client:2379 16 | pathPrefix: /vitess/global 17 | cells: 18 | - metadata: 19 | name: zone1 20 | spec: 21 | lockserver: 22 | metadata: 23 | name: zone1 24 | spec: 25 | type: etcd2 26 | etcd2: 27 | address: etcd-zone1-client:2379 28 | pathPrefix: /vitess/zone1 29 | defaults: 30 | replicas: 1 31 | image: vitess/vttablet:helm-1.0.4 32 | keyspaces: 33 | - metadata: 34 | name: unsharded-dbname 35 | spec: 36 | shards: 37 | - metadata: 38 | name: "0" 39 | spec: 40 | defaults: 41 | replicas: 2 42 | containers: 43 | mysql: 44 | image: percona:5.7.23 45 | vttablet: 46 | image: vitess/vttablet:helm-1.0.4 47 | tablets: 48 | - metadata: 49 | name: zone1 50 | spec: 51 | cellID: zone1 52 | tabletID: 101 53 | type: replica 54 | - metadata: 55 | name: sharded-dbname 56 | spec: 57 | shards: 58 | - metadata: 59 | name: "x-80" 60 | spec: 61 | keyRange: { to: "80" } 62 | defaults: 63 | replicas: 2 64 | containers: 65 | mysql: 66 | image: percona:5.7.23 67 | vttablet: 68 | image: vitess/vttablet:helm-1.0.4 69 | tablets: 70 | - metadata: 71 | name: zone1 72 | spec: 73 | cellID: zone1 74 | tabletID: 102 75 | type: replica 76 | - metadata: 77 | name: "80-x" 78 | spec: 79 | keyRange: { from: "80" } 80 | defaults: 81 | replicas: 2 82 | containers: 83 | mysql: 84 | image: percona:5.7.23 85 | vttablet: 86 | image: vitess/vttablet:helm-1.0.4 87 | tablets: 88 | - metadata: 89 | name: zone1 90 | spec: 91 | cellID: zone1 92 | tabletID: 103 93 | type: replica 94 | -------------------------------------------------------------------------------- /examples/distributed.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Sample VitessTablet 3 | apiVersion: vitess.io/v1alpha2 4 | kind: VitessTablet 5 | metadata: 6 | name: replica 7 | labels: 8 | cluster: complex 9 | spec: 10 | cellID: zone1 11 | tabletID: 101 12 | type: replica 13 | --- 14 | ## Sample VitessShard selecting tablets 15 | apiVersion: vitess.io/v1alpha2 16 | kind: VitessShard 17 | metadata: 18 | name: zero 19 | labels: 20 | cluster: complex 21 | spec: 22 | defaults: 23 | replicas: 2 24 | containers: 25 | mysql: 26 | image: percona:5.7.23 27 | vttablet: 28 | image: vitess/vttablet:helm-1.0.4 29 | tabletSelector: 30 | - key: "cluster" 31 | operator: "In" 32 | values: 33 | - "complex" 34 | --- 35 | ## Sample VitessKeyspace selecting shards 36 | apiVersion: vitess.io/v1alpha2 37 | kind: VitessKeyspace 38 | metadata: 39 | name: unsharded-dbname 40 | labels: 41 | cluster: complex 42 | spec: 43 | shardSelector: 44 | - key: "cluster" 45 | operator: "In" 46 | values: 47 | - "complex" 48 | --- 49 | ## Sample VitessCell 50 | apiVersion: vitess.io/v1alpha2 51 | kind: VitessCell 52 | metadata: 53 | name: zone1 54 | labels: 55 | cluster: complex 56 | spec: 57 | lockserver: 58 | metadata: 59 | name: zone1 60 | spec: 61 | type: etcd2 62 | etcd2: 63 | address: etcd-zone1-client:2379 64 | pathPrefix: /vitess/zone1 65 | defaults: 66 | replicas: 1 67 | image: vitess/vttablet:helm-1.0.4 68 | --- 69 | ## Sample VitessCluster selecting cells and keyspaces 70 | apiVersion: vitess.io/v1alpha2 71 | kind: VitessCluster 72 | metadata: 73 | name: dist 74 | labels: 75 | app: vitess 76 | spec: 77 | lockserver: 78 | metadata: 79 | name: global 80 | spec: 81 | etcd2: 82 | address: etcd-global-client:2379 83 | pathPrefix: /vitess/global 84 | cellSelector: 85 | - key: "cluster" 86 | operator: "In" 87 | values: 88 | - "complex" 89 | keyspaceSelector: 90 | - key: "cluster" 91 | operator: "In" 92 | values: 93 | - "complex" 94 | -------------------------------------------------------------------------------- /examples/etcd-clusters-minimal.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: etcd.database.coreos.com/v1beta2 4 | kind: EtcdCluster 5 | metadata: 6 | name: etcd-zone1 7 | spec: 8 | pod: 9 | affinity: 10 | podAntiAffinity: 11 | preferredDuringSchedulingIgnoredDuringExecution: 12 | - podAffinityTerm: 13 | labelSelector: 14 | matchLabels: 15 | etcd_cluster: etcd-zone1 16 | topologyKey: kubernetes.io/hostname 17 | weight: 100 18 | resources: 19 | requests: 20 | cpu: 200m 21 | memory: 100Mi 22 | repository: quay.io/coreos/etcd 23 | size: 1 24 | version: 3.3.10 25 | kind: List 26 | metadata: 27 | resourceVersion: "" 28 | selfLink: "" 29 | -------------------------------------------------------------------------------- /examples/etcd-clusters.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: etcd.database.coreos.com/v1beta2 4 | kind: EtcdCluster 5 | metadata: 6 | name: etcd-global 7 | spec: 8 | pod: 9 | affinity: 10 | podAntiAffinity: 11 | preferredDuringSchedulingIgnoredDuringExecution: 12 | - podAffinityTerm: 13 | labelSelector: 14 | matchLabels: 15 | etcd_cluster: etcd-global 16 | topologyKey: kubernetes.io/hostname 17 | weight: 100 18 | resources: 19 | requests: 20 | cpu: 200m 21 | memory: 100Mi 22 | repository: quay.io/coreos/etcd 23 | size: 3 24 | version: 3.3.10 25 | - apiVersion: etcd.database.coreos.com/v1beta2 26 | kind: EtcdCluster 27 | metadata: 28 | name: etcd-zone1 29 | spec: 30 | pod: 31 | affinity: 32 | podAntiAffinity: 33 | preferredDuringSchedulingIgnoredDuringExecution: 34 | - podAffinityTerm: 35 | labelSelector: 36 | matchLabels: 37 | etcd_cluster: etcd-zone1 38 | topologyKey: kubernetes.io/hostname 39 | weight: 100 40 | resources: 41 | requests: 42 | cpu: 200m 43 | memory: 100Mi 44 | repository: quay.io/coreos/etcd 45 | size: 3 46 | version: 3.3.10 47 | kind: List 48 | metadata: 49 | resourceVersion: "" 50 | selfLink: "" 51 | -------------------------------------------------------------------------------- /my-vitess.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # To keep the example workload to a minimum, a single 3 | # topology server is used for the local and global scope 4 | # This is fine in small installations but may not be 5 | # ideal for large, multi-zone installations 6 | apiVersion: etcd.database.coreos.com/v1beta2 7 | kind: EtcdCluster 8 | metadata: 9 | name: etcd-zone1 10 | spec: 11 | pod: 12 | affinity: 13 | podAntiAffinity: 14 | preferredDuringSchedulingIgnoredDuringExecution: 15 | - podAffinityTerm: 16 | labelSelector: 17 | matchLabels: 18 | etcd_cluster: etcd-vitess 19 | topologyKey: kubernetes.io/hostname 20 | weight: 100 21 | resources: 22 | requests: 23 | cpu: 200m 24 | memory: 100Mi 25 | repository: quay.io/coreos/etcd 26 | size: 1 27 | version: 3.3.10 28 | --- 29 | # Sample VitessCluster with all resources embedded within 30 | # the VitessCluster object. It is also possible to split 31 | # all or some of the resources into their own objects for easier 32 | # management. See the examples directory for more information 33 | apiVersion: vitess.io/v1alpha2 34 | kind: VitessCluster 35 | metadata: 36 | name: vt 37 | labels: 38 | app: vitess 39 | spec: 40 | lockserver: 41 | metadata: 42 | name: global 43 | spec: 44 | type: etcd2 45 | etcd2: 46 | address: etcd-global-client:2379 47 | pathPrefix: /vitess/global 48 | cells: 49 | - metadata: 50 | name: zone1 51 | spec: 52 | lockserver: 53 | metadata: 54 | name: zone1 55 | spec: 56 | type: etcd2 57 | etcd2: 58 | address: etcd-zone1-client:2379 59 | pathPrefix: /vitess/zone1 60 | defaults: 61 | replicas: 1 62 | image: vitess/vttablet:helm-1.0.4 63 | keyspaces: 64 | - metadata: 65 | name: unsharded-dbname 66 | spec: 67 | shards: 68 | - metadata: 69 | name: "0" 70 | spec: 71 | defaults: 72 | replicas: 2 73 | containers: 74 | mysql: 75 | image: percona:5.7.23 76 | vttablet: 77 | image: vitess/vttablet:helm-1.0.4 78 | tablets: 79 | - metadata: 80 | name: zone1 81 | spec: 82 | cellID: zone1 83 | tabletID: 101 84 | type: replica 85 | - metadata: 86 | name: sharded-dbname 87 | spec: 88 | shards: 89 | - metadata: 90 | name: "x-80" 91 | spec: 92 | keyRange: { to: "80" } 93 | defaults: 94 | replicas: 2 95 | containers: 96 | mysql: 97 | image: percona:5.7.23 98 | vttablet: 99 | image: vitess/vttablet:helm-1.0.4 100 | tablets: 101 | - metadata: 102 | name: zone1 103 | spec: 104 | cellID: zone1 105 | tabletID: 102 106 | type: replica 107 | - metadata: 108 | name: "80-x" 109 | spec: 110 | keyRange: { from: "80" } 111 | defaults: 112 | replicas: 2 113 | containers: 114 | mysql: 115 | image: percona:5.7.23 116 | vttablet: 117 | image: vitess/vttablet:helm-1.0.4 118 | tablets: 119 | - metadata: 120 | name: zone1 121 | spec: 122 | cellID: zone1 123 | tabletID: 103 124 | type: replica 125 | -------------------------------------------------------------------------------- /pkg/apis/addtoscheme_vitess_v1alpha2.go: -------------------------------------------------------------------------------- 1 | package apis 2 | 3 | import ( 4 | "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 5 | ) 6 | 7 | func init() { 8 | // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back 9 | AddToSchemes = append(AddToSchemes, v1alpha2.SchemeBuilder.AddToScheme) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/apis/apis.go: -------------------------------------------------------------------------------- 1 | package apis 2 | 3 | import ( 4 | "k8s.io/apimachinery/pkg/runtime" 5 | ) 6 | 7 | // AddToSchemes may be used to add all resources defined in the project to a Scheme 8 | var AddToSchemes runtime.SchemeBuilder 9 | 10 | // AddToScheme adds all Resources to the Scheme 11 | func AddToScheme(s *runtime.Scheme) error { 12 | return AddToSchemes.AddToScheme(s) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/doc.go: -------------------------------------------------------------------------------- 1 | // Package v1alpha2 contains API Schema definitions for the vitess v1alpha2 API group 2 | // +k8s:deepcopy-gen=package,register 3 | // +groupName=vitess.io 4 | package v1alpha2 5 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/interfaces.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | type ConfigProvider interface { 4 | GetTabletContainers() *TabletContainers 5 | } 6 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/register.go: -------------------------------------------------------------------------------- 1 | // NOTE: Boilerplate only. Ignore this file. 2 | 3 | // Package v1alpha2 contains API Schema definitions for the vitess v1alpha2 API group 4 | // +k8s:deepcopy-gen=package,register 5 | // +groupName=vitess.io 6 | package v1alpha2 7 | 8 | import ( 9 | "k8s.io/apimachinery/pkg/runtime/schema" 10 | "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" 11 | ) 12 | 13 | var ( 14 | // SchemeGroupVersion is group version used to register these objects 15 | SchemeGroupVersion = schema.GroupVersion{Group: "vitess.io", Version: "v1alpha2"} 16 | 17 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 18 | SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} 19 | ) 20 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/samples.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: vitess.io/v1alpha2 3 | kind: VitessCluster 4 | metadata: 5 | name: superawesomecluster 6 | labels: 7 | app: vitess 8 | spec: 9 | lockserver: 10 | type: etcd3 11 | address: etcd-cluster-client:2379 12 | path: /vitess/global 13 | cells: 14 | cellSelector: 15 | matchLabels: 16 | matchExpression: 17 | keyspaces: 18 | keyspaceSelector: 19 | matchLabels: 20 | matchExpression: 21 | --- 22 | apiVersion: vitess.io/v1alpha2 23 | kind: VitessCell 24 | metadata: 25 | name: uswest 26 | labels: 27 | app: vitess 28 | spec: 29 | lockserver: 30 | type: etcd3 31 | address: etcd-cluster-client:2379 32 | path: /vitess/uswest 33 | vtgate: 34 | - count: 35 | containers: 36 | vtgate: 37 | image: 38 | resources: 39 | ... 40 | affinity: 41 | ... 42 | credentials: 43 | secret: 44 | name: 45 | key: 46 | cells: 47 | - uswest 48 | - useast 49 | cellSelector: 50 | matchLabels: 51 | matchExpression: 52 | vtworker: 53 | - count: 54 | containers: 55 | vtworker: 56 | image: 57 | resources: 58 | ... 59 | affinity: 60 | ... 61 | vtctld: 62 | - count: 63 | containers: 64 | vtctld: 65 | image: 66 | resources: 67 | ... 68 | afinity: 69 | ... 70 | --- 71 | apiVersion: vitess.io/v1alpha2 72 | kind: VitessKeyspace 73 | metadata: 74 | name: messagedb 75 | labels: 76 | app: vitess 77 | cluster: superawesomecluster 78 | spec: 79 | defaults: 80 | shards: 81 | count: 82 | replicas: 83 | count: 84 | batch: 85 | count: 86 | containers: 87 | vttablet: 88 | mysql: 89 | ... 90 | cells: 91 | ... 92 | cellSelector: 93 | ... 94 | shards: 95 | ... 96 | shardSelector: 97 | ... 98 | --- 99 | apiVersion: vitess.io/v1alpha2 100 | kind: VitessShard 101 | metadata: 102 | name: "-80" 103 | labels: 104 | keyspace: messagedb 105 | cluster: superawesomecluster 106 | app: vitess 107 | spec: 108 | defaults: 109 | replicas: 110 | batch: 111 | containers: 112 | vttablet: 113 | mysql: 114 | ... 115 | volumeClaim: 116 | ... 117 | keyrange: 118 | from: 119 | to: 120 | tablets: 121 | tabletSelector: 122 | --- 123 | apiVersion: vitess.io/v1alpha2 124 | kind: VitessTablet 125 | metadata: 126 | name: "" 127 | labels: 128 | shard: "-80" 129 | keyspace: messagedb 130 | cluster: superawesomecluster 131 | cell: uswest 132 | spec: 133 | tabletId: 101 134 | cell: uswest 135 | keyrange: 136 | from: 137 | to: 138 | type: "replica|rdonly" 139 | datastore: 140 | type: local 141 | containers: 142 | vttablet: 143 | image: vitess/base 144 | resources: 145 | limit: {cpu: "100m", memory: "128mi"} 146 | mysql: 147 | image: 148 | resources: 149 | volumeClaim: 150 | ... 151 | credentials: 152 | secret: 153 | name: 154 | key: 155 | --- 156 | apiVersion: vitess.io/v1alpha2 157 | kind: VitessCluster 158 | metadata: 159 | name: superawesomecluster 160 | labels: 161 | app: vitess 162 | spec: 163 | lockserver: 164 | provision: true 165 | etcd3: 166 | address: etcd-cluster-client:2379 167 | path: /vitess/global 168 | # lockserverRef: 169 | # name: etcd 170 | cells: 171 | - metadata: 172 | name: uswest 173 | spec: 174 | lockserver: 175 | etcd3: 176 | address: etc-cluster-client:2379 177 | path: /vitess/uswest 178 | vtgate: 179 | - count: 2 180 | vtworker: 181 | - count: 2 182 | vtctld: 183 | - count: 1 184 | keyspaces: 185 | - metadata: 186 | name: messagedb 187 | spec: 188 | shards: 189 | - metadata: 190 | name: "-80" 191 | spec: 192 | keyrange: { to: "80" } 193 | tablets: 194 | - metadata: 195 | name: "uswest-101" 196 | spec: 197 | tabletId: 101 198 | cell: uswest 199 | type: "replica" 200 | keyrange: { to: "80" } 201 | - metadata: 202 | name: "80-" 203 | spec: 204 | keyrange: { from: "80" } 205 | tablets: 206 | - metadata: 207 | name: "uswest-201" 208 | spec: 209 | tabletId: 201 210 | cell: uswest 211 | type: "replica" 212 | keyrange: { from: "80" } 213 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/shared_helpers.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | func (kr *KeyRange) String() string { 4 | if kr.From != "" || kr.To != "" { 5 | return kr.From + "-" + kr.To 6 | } 7 | 8 | // If no From or To is set, then default to the Vitess convention of 0 as they Keyrange string 9 | return "0" 10 | } 11 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/shared_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | ) 6 | 7 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 8 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 9 | 10 | type ResourceSelector struct { 11 | // The label key that the selector applies to. 12 | Key string `json:"key"` 13 | // Represents a key's relationship to a set of values. 14 | // Valid operators are In, NotIn, Exists, DoesNotExist 15 | Operator ResourceSelectorOperator `json:"operator"` 16 | // An array of string values. If the operator is In or NotIn, 17 | // the values array must be non-empty. If the operator is Exists or DoesNotExist, 18 | // This array is replaced during a strategic merge patch. 19 | // +optional 20 | Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` 21 | } 22 | 23 | type ResourceSelectorOperator string 24 | 25 | const ( 26 | ResourceSelectorOpIn ResourceSelectorOperator = "In" 27 | ResourceSelectorOpNotIn ResourceSelectorOperator = "NotIn" 28 | ResourceSelectorOpExists ResourceSelectorOperator = "Exists" 29 | ResourceSelectorOpDoesNotExist ResourceSelectorOperator = "DoesNotExist" 30 | ) 31 | 32 | type TabletContainers struct { 33 | DBFlavor string `json:"dbFlavor,omitempty"` 34 | 35 | MySQL *MySQLContainer `json:"mysql"` 36 | 37 | VTTablet *VTTabletContainer `json:"vttablet"` 38 | } 39 | 40 | type MySQLContainer struct { 41 | Image string `json:"image"` 42 | 43 | Resources corev1.ResourceRequirements `json:"resources,omitempty"` 44 | 45 | DBFlavor string `json:"dbFlavor,omitempty"` 46 | } 47 | 48 | type VTTabletContainer struct { 49 | Image string `json:"image"` 50 | 51 | Resources corev1.ResourceRequirements `json:"resources,omitempty"` 52 | 53 | DBFlavor string `json:"dbFlavor,omitempty"` 54 | } 55 | 56 | type KeyRange struct { 57 | From string `json:"from,omitempty"` 58 | 59 | To string `json:"to,omitempty"` 60 | } 61 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesscell_helpers.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func (cell *VitessCell) SetParentCluster(cluster *VitessCluster) { 8 | cell.Spec.parent.Cluster = cluster 9 | } 10 | 11 | func (cell *VitessCell) Cluster() *VitessCluster { 12 | return cell.Spec.parent.Cluster 13 | } 14 | 15 | func (cell *VitessCell) Lockserver() *VitessLockserver { 16 | return cell.Spec.Lockserver 17 | } 18 | 19 | func (cell *VitessCell) GetScopedName(extra ...string) string { 20 | return strings.Join(append( 21 | []string{ 22 | cell.Cluster().GetName(), 23 | cell.GetName(), 24 | }, 25 | extra...), "-") 26 | } 27 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesscell_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 9 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 10 | 11 | // VitessCellSpec defines the desired state of VitessCell 12 | type VitessCellSpec struct { 13 | Lockserver *VitessLockserver `json:"lockserver"` 14 | 15 | LockserverRef *corev1.LocalObjectReference `json:"lockserverRef,omitempty"` 16 | 17 | Defaults *VitessCellDefaults `json:"defaults"` 18 | 19 | MySQLProtocol *VitessCellMySQLProtocol `json:"mysqlProtocol"` 20 | 21 | VTGate []VTComponent `json:"vtgate"` 22 | 23 | VTWorker []VTComponent `json:"vtworker"` 24 | 25 | VTCtld []VTComponent `json:"vtctld"` 26 | 27 | Orchestrator []VTComponent `json:"orchestrator"` 28 | 29 | // parent is unexported on purpose. 30 | // It should only be used during processing and never stored 31 | parent VitessCellParents 32 | } 33 | 34 | type VitessCellParents struct { 35 | Cluster *VitessCluster 36 | } 37 | 38 | type VitessCellDefaults struct { 39 | Replicas *int32 `json:"replicas"` 40 | 41 | Image string `json:"image"` 42 | } 43 | 44 | type VitessCellMySQLProtocol struct { 45 | AuthType VitessMySQLAuthType `json:"authType,omitempty"` 46 | 47 | Username string `json:"image,omitempty"` 48 | 49 | // Password string `json:"password"` 50 | 51 | PasswordSecretRef *corev1.SecretKeySelector `json:"passwordSecretRef,omitempty"` 52 | } 53 | 54 | type VitessMySQLAuthType string 55 | 56 | const ( 57 | VitessMySQLAuthTypeNone VitessMySQLAuthType = "none" 58 | ) 59 | 60 | type VTGate struct { 61 | // Inline common component struct members 62 | VTComponent `json:",inline"` 63 | 64 | Credentials VTGateCredentials `json:"credentials,omitempty"` 65 | 66 | Cells []string `json:"cells:` 67 | 68 | CellSelector *CellSelector `json:"cellSelector,omitempty"` 69 | } 70 | 71 | type VTGateCredentials struct { 72 | // SecretRef points a Secret resource which contains the credentials 73 | // +optional 74 | SecretRef *corev1.SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` 75 | } 76 | 77 | type CellSelector struct { 78 | MatchLabels map[string]string `json:"matchLabels,omitempty"` 79 | 80 | MatchExpressions []ResourceSelector `json:"matchExpressions,omitempty"` 81 | } 82 | 83 | type VTComponent struct { 84 | Replicas int64 `json:"replicas,omitempty"` 85 | 86 | ContainerSpec []*corev1.Container `json:"containerSpec,omitempty"` 87 | } 88 | 89 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 90 | 91 | // VitessCell is the Schema for the vitesscells API 92 | // +k8s:openapi-gen=true 93 | type VitessCell struct { 94 | metav1.TypeMeta `json:",inline"` 95 | metav1.ObjectMeta `json:"metadata,omitempty"` 96 | 97 | Spec VitessCellSpec `json:"spec,omitempty"` 98 | } 99 | 100 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 101 | 102 | // VitessCellList contains a list of VitessCell 103 | type VitessCellList struct { 104 | metav1.TypeMeta `json:",inline"` 105 | metav1.ListMeta `json:"metadata,omitempty"` 106 | Items []VitessCell `json:"items"` 107 | } 108 | 109 | func init() { 110 | SchemeBuilder.Register(&VitessCell{}, &VitessCellList{}) 111 | } 112 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesscluster_helpers.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func (cluster *VitessCluster) Cells() []*VitessCell { 8 | return cluster.Spec.Cells 9 | } 10 | 11 | func (cluster *VitessCluster) EmbedCellCopy(cell *VitessCell) { 12 | cluster.Spec.Cells = append(cluster.Spec.Cells, cell.DeepCopy()) 13 | } 14 | 15 | func (cluster *VitessCluster) Keyspaces() []*VitessKeyspace { 16 | return cluster.Spec.Keyspaces 17 | } 18 | 19 | func (cluster *VitessCluster) EmbedKeyspaceCopy(keyspace *VitessKeyspace) { 20 | cluster.Spec.Keyspaces = append(cluster.Spec.Keyspaces, keyspace.DeepCopy()) 21 | } 22 | 23 | func (cluster *VitessCluster) Shards() []*VitessShard { 24 | var shards []*VitessShard 25 | for _, keyspace := range cluster.Keyspaces() { 26 | shards = append(shards, keyspace.Shards()...) 27 | } 28 | return shards 29 | } 30 | 31 | func (cluster *VitessCluster) Tablets() []*VitessTablet { 32 | var tablets []*VitessTablet 33 | for _, shard := range cluster.Shards() { 34 | tablets = append(tablets, shard.Tablets()...) 35 | } 36 | return tablets 37 | } 38 | 39 | func (cluster *VitessCluster) Lockserver() *VitessLockserver { 40 | return cluster.Spec.Lockserver 41 | } 42 | 43 | func (cluster *VitessCluster) GetCellByID(cellID string) *VitessCell { 44 | for _, cell := range cluster.Cells() { 45 | if cell.GetName() == cellID { 46 | return cell 47 | } 48 | } 49 | 50 | return nil 51 | } 52 | 53 | func (cluster *VitessCluster) GetScopedName(extra ...string) string { 54 | return strings.Join(append( 55 | []string{ 56 | cluster.GetName(), 57 | }, 58 | extra...), "-") 59 | } 60 | 61 | func (cluster *VitessCluster) GetTabletServiceName() string { 62 | return cluster.GetScopedName("tab") 63 | } 64 | 65 | func (cluster *VitessCluster) Phase() ClusterPhase { 66 | return cluster.Status.Phase 67 | } 68 | 69 | func (cluster *VitessCluster) SetPhase(p ClusterPhase) { 70 | cluster.Status.Phase = p 71 | } 72 | 73 | func (cluster *VitessCluster) InPhase(p ClusterPhase) bool { 74 | return cluster.Status.Phase == p 75 | } 76 | 77 | func (cluster *VitessCluster) AllTabletsReady() bool { 78 | for _, tablet := range cluster.Tablets() { 79 | if !tablet.InPhase(TabletPhaseReady) { 80 | return false 81 | } 82 | } 83 | return true 84 | } 85 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesscluster_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 9 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 10 | 11 | // VitessClusterSpec defines the desired state of VitessCluster 12 | type VitessClusterSpec struct { 13 | Lockserver *VitessLockserver `json:"lockserver,omitempty"` 14 | 15 | LockserverRef *corev1.LocalObjectReference `json:"lockserverRef,omitempty"` 16 | 17 | Cells []*VitessCell `json:"cells,omitempty"` 18 | 19 | CellSelector []ResourceSelector `json:"cellSelector,omitempty"` 20 | 21 | Keyspaces []*VitessKeyspace `json:"keyspaces,omitempty"` 22 | 23 | KeyspaceSelector []ResourceSelector `json:"keyspaceSelector,omitempty"` 24 | } 25 | 26 | // VitessClusterStatus defines the observed state of VitessCluster 27 | type VitessClusterStatus struct { 28 | Phase ClusterPhase `json:"phase,omitempty"` 29 | 30 | Reason string `json:"reason,omitempty"` 31 | 32 | Message string `json:"reason,omitempty"` 33 | 34 | Conditions []VitessClusterCondition `json:"conditions,omitempty"` 35 | 36 | Lockserver *VitessLockserverStatus `json:"lockserver,omitempty"` 37 | } 38 | 39 | type ClusterPhase string 40 | 41 | const ( 42 | ClusterPhaseNone ClusterPhase = "" 43 | ClusterPhaseCreating ClusterPhase = "Creating" 44 | ClusterPhaseReady ClusterPhase = "Ready" 45 | ) 46 | 47 | type VitessClusterCondition struct { 48 | // Type of cluster condition. 49 | Type ClusterConditionType `json:"type"` 50 | 51 | // Status of the condition, one of True, False, Unknown. 52 | Status corev1.ConditionStatus `json:"status"` 53 | 54 | // The last time this condition was updated. 55 | LastUpdateTime string `json:"lastUpdateTime,omitempty"` 56 | 57 | // Last time the condition transitioned from one status to another. 58 | LastTransitionTime string `json:"lastTransitionTime,omitempty"` 59 | 60 | // The reason for the condition's last transition. 61 | Reason string `json:"reason,omitempty"` 62 | 63 | // A human readable message indicating details about the transition. 64 | Message string `json:"message,omitempty"` 65 | } 66 | 67 | type ClusterConditionType string 68 | 69 | const ( 70 | VitessClusterConditionAvailable ClusterConditionType = "Available" 71 | VitessClusterConditionRecovering ClusterConditionType = "Recovering" 72 | VitessClusterConditionScaling ClusterConditionType = "Scaling" 73 | VitessClusterConditionUpgrading ClusterConditionType = "Upgrading" 74 | ) 75 | 76 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 77 | 78 | // VitessCluster is the Schema for the vitessclusters API 79 | // +k8s:openapi-gen=true 80 | type VitessCluster struct { 81 | metav1.TypeMeta `json:",inline"` 82 | metav1.ObjectMeta `json:"metadata,omitempty"` 83 | 84 | Spec VitessClusterSpec `json:"spec,omitempty"` 85 | Status VitessClusterStatus `json:"status,omitempty"` 86 | } 87 | 88 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 89 | 90 | // VitessClusterList contains a list of VitessCluster 91 | type VitessClusterList struct { 92 | metav1.TypeMeta `json:",inline"` 93 | metav1.ListMeta `json:"metadata,omitempty"` 94 | Items []VitessCluster `json:"items"` 95 | } 96 | 97 | func init() { 98 | SchemeBuilder.Register(&VitessCluster{}, &VitessClusterList{}) 99 | } 100 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesskeyspace_helpers.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func (keyspace *VitessKeyspace) SetParentCluster(cluster *VitessCluster) { 8 | keyspace.Spec.parent.Cluster = cluster 9 | } 10 | 11 | func (keyspace *VitessKeyspace) Cluster() *VitessCluster { 12 | return keyspace.Spec.parent.Cluster 13 | } 14 | 15 | // GetTabletContainers satisfies ConfigProvider 16 | func (keyspace *VitessKeyspace) GetTabletContainers() *TabletContainers { 17 | if keyspace.Spec.Defaults != nil { 18 | return keyspace.Spec.Defaults.Containers 19 | } 20 | return nil 21 | } 22 | 23 | func (keyspace *VitessKeyspace) Shards() []*VitessShard { 24 | return keyspace.Spec.Shards 25 | } 26 | 27 | func (keyspace *VitessKeyspace) EmbedShardCopy(shard *VitessShard) { 28 | keyspace.Spec.Shards = append(keyspace.Spec.Shards, shard.DeepCopy()) 29 | } 30 | 31 | func (keyspace *VitessKeyspace) GetScopedName(extra ...string) string { 32 | return strings.Join(append( 33 | []string{ 34 | keyspace.Cluster().GetScopedName(), 35 | }, 36 | extra...), "-") 37 | } 38 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesskeyspace_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 8 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 9 | 10 | // VitessKeyspaceSpec defines the desired state of VitessKeyspace 11 | type VitessKeyspaceSpec struct { 12 | Defaults *VitessShardOptions `json:"defaults"` 13 | 14 | Shards []*VitessShard `json:"shards"` 15 | 16 | ShardSelector []ResourceSelector `json:"shardSelector,omitempty"` 17 | 18 | // parent is unexported on purpose. 19 | // It should only be used during processing and never stored 20 | parent VitessKeyspaceParents 21 | } 22 | 23 | type VitessKeyspaceParents struct { 24 | Cluster *VitessCluster 25 | } 26 | 27 | type VitessBatchOptions struct { 28 | Count int64 `json:"count"` 29 | } 30 | 31 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 32 | 33 | // VitessKeyspace is the Schema for the vitesskeyspaces API 34 | // +k8s:openapi-gen=true 35 | type VitessKeyspace struct { 36 | metav1.TypeMeta `json:",inline"` 37 | metav1.ObjectMeta `json:"metadata,omitempty"` 38 | 39 | Spec VitessKeyspaceSpec `json:"spec,omitempty"` 40 | } 41 | 42 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 43 | 44 | // VitessKeyspaceList contains a list of VitessKeyspace 45 | type VitessKeyspaceList struct { 46 | metav1.TypeMeta `json:",inline"` 47 | metav1.ListMeta `json:"metadata,omitempty"` 48 | Items []VitessKeyspace `json:"items"` 49 | } 50 | 51 | func init() { 52 | SchemeBuilder.Register(&VitessKeyspace{}, &VitessKeyspaceList{}) 53 | } 54 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesslockserver_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 8 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 9 | 10 | // VitessLockserverSpec defines the desired state of VitessLockserver 11 | type VitessLockserverSpec struct { 12 | Provision bool `json:"provision,omitempty"` 13 | 14 | Type LockserverType `json:"type"` 15 | 16 | Etcd2 *Etcd2Lockserver `json:"etcd2,omitempty"` 17 | } 18 | 19 | type LockserverType string 20 | 21 | const ( 22 | LockserverTypeEtcd2 LockserverType = "etcd2" 23 | ) 24 | 25 | const LockserverTypeDefault LockserverType = LockserverTypeEtcd2 26 | 27 | type Etcd2Lockserver struct { 28 | Address string `json:"address"` 29 | Path string `json:"path"` 30 | } 31 | 32 | // VitessLockserverStatus defines the observed state of VitessLockserver 33 | type VitessLockserverStatus struct { 34 | State string `json:"state,omitempty"` 35 | } 36 | 37 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 38 | 39 | // VitessLockserver is the Schema for the vitesslockservers API 40 | // +k8s:openapi-gen=true 41 | type VitessLockserver struct { 42 | metav1.TypeMeta `json:",inline"` 43 | metav1.ObjectMeta `json:"metadata,omitempty"` 44 | 45 | Spec VitessLockserverSpec `json:"spec,omitempty"` 46 | Status VitessLockserverStatus `json:"status,omitempty"` 47 | } 48 | 49 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 50 | 51 | // VitessLockserverList contains a list of VitessLockserver 52 | type VitessLockserverList struct { 53 | metav1.TypeMeta `json:",inline"` 54 | metav1.ListMeta `json:"metadata,omitempty"` 55 | Items []VitessLockserver `json:"items"` 56 | } 57 | 58 | func init() { 59 | SchemeBuilder.Register(&VitessLockserver{}, &VitessLockserverList{}) 60 | } 61 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitessshard_helpers.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | "strings" 5 | ) 6 | 7 | func (shard *VitessShard) Cluster() *VitessCluster { 8 | return shard.Spec.parent.Cluster 9 | } 10 | 11 | func (shard *VitessShard) SetParentCluster(cluster *VitessCluster) { 12 | shard.Spec.parent.Cluster = cluster 13 | } 14 | 15 | func (shard *VitessShard) Keyspace() *VitessKeyspace { 16 | return shard.Spec.parent.Keyspace 17 | } 18 | 19 | func (shard *VitessShard) SetParentKeyspace(keyspace *VitessKeyspace) { 20 | shard.Spec.parent.Keyspace = keyspace 21 | } 22 | 23 | func (shard *VitessShard) Tablets() []*VitessTablet { 24 | return shard.Spec.Tablets 25 | } 26 | 27 | func (shard *VitessShard) EmbedTabletCopy(tablet *VitessTablet) { 28 | shard.Spec.Tablets = append(shard.Spec.Tablets, tablet.DeepCopy()) 29 | } 30 | 31 | // GetTabletContainers satisfies ConfigProvider 32 | func (shard *VitessShard) GetTabletContainers() *TabletContainers { 33 | if shard.Spec.Defaults != nil { 34 | return shard.Spec.Defaults.Containers 35 | } 36 | return nil 37 | } 38 | 39 | func (shard *VitessShard) GetScopedName(extra ...string) string { 40 | return strings.Join(append( 41 | []string{ 42 | shard.Keyspace().GetScopedName(), 43 | }, 44 | extra...), "-") 45 | } 46 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitessshard_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 8 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 9 | 10 | // VitessShardSpec defines the desired state of VitessShard 11 | type VitessShardSpec struct { 12 | Defaults *VitessShardOptions `json:"defaults"` 13 | 14 | KeyRange KeyRange `json:"keyRange,omitempty"` 15 | 16 | Tablets []*VitessTablet `json:"tablets"` 17 | 18 | TabletSelector []ResourceSelector `json:"tabletSelector,omitempty"` 19 | 20 | // parent is unexported on purpose. 21 | // It should only be used during processing and never stored 22 | parent VitessShardParents 23 | } 24 | 25 | type VitessShardParents struct { 26 | Cluster *VitessCluster 27 | Keyspace *VitessKeyspace 28 | } 29 | 30 | type VitessShardOptions struct { 31 | Replicas *int32 `json:"replicas"` 32 | 33 | Batch VitessBatchOptions `json:""batch` 34 | 35 | Containers *TabletContainers `json:"containers"` 36 | 37 | Cells []string `json:"cells"` 38 | 39 | CellSelector []ResourceSelector `json:"cellSelector,omitempty"` 40 | } 41 | 42 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 43 | 44 | // VitessShard is the Schema for the vitessshards API 45 | // +k8s:openapi-gen=true 46 | type VitessShard struct { 47 | metav1.TypeMeta `json:",inline"` 48 | metav1.ObjectMeta `json:"metadata,omitempty"` 49 | 50 | Spec VitessShardSpec `json:"spec,omitempty"` 51 | } 52 | 53 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 54 | 55 | // VitessShardList contains a list of VitessShard 56 | type VitessShardList struct { 57 | metav1.TypeMeta `json:",inline"` 58 | metav1.ListMeta `json:"metadata,omitempty"` 59 | Items []VitessShard `json:"items"` 60 | } 61 | 62 | func init() { 63 | SchemeBuilder.Register(&VitessShard{}, &VitessShardList{}) 64 | } 65 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesstablet_helpers.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | // "fmt" 5 | "strconv" 6 | "strings" 7 | ) 8 | 9 | // GetTabletContainers satisfies ConfigProvider 10 | func (tablet *VitessTablet) GetTabletContainers() *TabletContainers { 11 | return tablet.Spec.Containers 12 | } 13 | 14 | func (tablet *VitessTablet) SetParentCluster(cluster *VitessCluster) { 15 | tablet.Spec.parent.Cluster = cluster 16 | } 17 | 18 | func (tablet *VitessTablet) SetParentCell(cell *VitessCell) { 19 | tablet.Spec.parent.Cell = cell 20 | } 21 | 22 | func (tablet *VitessTablet) SetParentKeyspace(keyspace *VitessKeyspace) { 23 | tablet.Spec.parent.Keyspace = keyspace 24 | } 25 | 26 | func (tablet *VitessTablet) SetParentShard(shard *VitessShard) { 27 | tablet.Spec.parent.Shard = shard 28 | } 29 | 30 | func (tablet *VitessTablet) Lockserver() *VitessLockserver { 31 | return tablet.Cell().Lockserver() 32 | } 33 | 34 | func (tablet *VitessTablet) Cluster() *VitessCluster { 35 | return tablet.Spec.parent.Cluster 36 | } 37 | 38 | func (tablet *VitessTablet) Cell() *VitessCell { 39 | return tablet.Spec.parent.Cell 40 | } 41 | 42 | func (tablet *VitessTablet) Keyspace() *VitessKeyspace { 43 | return tablet.Spec.parent.Keyspace 44 | } 45 | 46 | func (tablet *VitessTablet) Shard() *VitessShard { 47 | return tablet.Spec.parent.Shard 48 | } 49 | 50 | func (tablet *VitessTablet) GetStatefulSetName() string { 51 | return tablet.GetScopedName(string(tablet.Spec.Type)) 52 | } 53 | 54 | func (tablet *VitessTablet) GetScopedName(extra ...string) string { 55 | return strings.Join(append( 56 | []string{ 57 | tablet.Cluster().GetName(), 58 | tablet.Cell().GetName(), 59 | tablet.Keyspace().GetName(), 60 | tablet.Shard().GetName(), 61 | }, 62 | extra...), "-") 63 | } 64 | 65 | func (tablet *VitessTablet) GetReplicas() *int32 { 66 | if tablet.Spec.Replicas != nil { 67 | return tablet.Spec.Replicas 68 | } 69 | 70 | if tablet.Shard().Spec.Defaults != nil && tablet.Shard().Spec.Defaults.Replicas != nil { 71 | return tablet.Shard().Spec.Defaults.Replicas 72 | } 73 | 74 | var def int32 75 | return &def 76 | } 77 | 78 | func (tablet *VitessTablet) GetMySQLContainer() *MySQLContainer { 79 | // Inheritance order, with most specific first 80 | providers := []ConfigProvider{ 81 | tablet, 82 | tablet.Spec.parent.Shard, 83 | tablet.Spec.parent.Keyspace, 84 | } 85 | 86 | for _, p := range providers { 87 | if containers := p.GetTabletContainers(); containers != nil && containers.MySQL != nil { 88 | // TODO get defaults from full range of providers 89 | if containers.MySQL.DBFlavor == "" && containers.DBFlavor != "" { 90 | containers.MySQL.DBFlavor = containers.DBFlavor 91 | } 92 | if containers.MySQL.DBFlavor == "" { 93 | containers.MySQL.DBFlavor = "mysql56" 94 | } 95 | return containers.MySQL 96 | } 97 | } 98 | return nil 99 | } 100 | 101 | func (tablet *VitessTablet) GetVTTabletContainer() *VTTabletContainer { 102 | // Inheritance order, with most specific first 103 | providers := []ConfigProvider{ 104 | tablet, 105 | tablet.Shard(), 106 | tablet.Keyspace(), 107 | } 108 | 109 | for _, p := range providers { 110 | if containers := p.GetTabletContainers(); containers != nil && containers.VTTablet != nil { 111 | // TODO get defaults from full range of providers 112 | if containers.VTTablet.DBFlavor == "" && containers.DBFlavor != "" { 113 | containers.VTTablet.DBFlavor = containers.DBFlavor 114 | } 115 | if containers.VTTablet.DBFlavor == "" { 116 | containers.VTTablet.DBFlavor = "mysql56" 117 | } 118 | return containers.VTTablet 119 | } 120 | } 121 | return nil 122 | } 123 | 124 | func (tablet *VitessTablet) GetTabletID() string { 125 | return strconv.FormatInt(tablet.Spec.TabletID, 10) 126 | } 127 | 128 | func (tablet *VitessTablet) Phase() TabletPhase { 129 | return tablet.status.Phase 130 | } 131 | 132 | func (tablet *VitessTablet) SetPhase(p TabletPhase) { 133 | tablet.status.Phase = p 134 | } 135 | 136 | func (tablet *VitessTablet) InPhase(p TabletPhase) bool { 137 | return tablet.status.Phase == p 138 | } 139 | -------------------------------------------------------------------------------- /pkg/apis/vitess/v1alpha2/vitesstablet_types.go: -------------------------------------------------------------------------------- 1 | package v1alpha2 2 | 3 | import ( 4 | corev1 "k8s.io/api/core/v1" 5 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 6 | ) 7 | 8 | // NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. 9 | // Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file 10 | 11 | // VitessTabletSpec defines the desired state of VitessTablet 12 | type VitessTabletSpec struct { 13 | TabletID int64 `json:"tabletID"` 14 | 15 | Replicas *int32 `json:"replicas"` 16 | 17 | CellID string `json:"cellID"` 18 | 19 | Type TabletType `json:"type"` 20 | 21 | Datastore TabletDatastore `json:"datastore"` 22 | 23 | Containers *TabletContainers `json:"containers"` 24 | 25 | VolumeClaim *corev1.PersistentVolumeClaimVolumeSource `json:"volumeclaim, omitempty"` 26 | 27 | Credentials *TabletCredentials `json:"credentials,omitempty"` 28 | 29 | // parent is unexported on purpose. 30 | // It should only be used during processing and never stored 31 | parent VitessTabletParents 32 | } 33 | 34 | type VitessTabletParents struct { 35 | Cluster *VitessCluster 36 | Cell *VitessCell 37 | Keyspace *VitessKeyspace 38 | Shard *VitessShard 39 | } 40 | 41 | type TabletType string 42 | 43 | const ( 44 | TabletTypeMaster TabletType = "master" 45 | TabletTypeReplica TabletType = "replica" 46 | TabletTypeReadOnly TabletType = "readonly" 47 | TabletTypeBackup TabletType = "backup" 48 | TabletTypeRestore TabletType = "restore" 49 | TabletTypeDrained TabletType = "drained" 50 | ) 51 | 52 | const TabletTypeDefault TabletType = TabletTypeReplica 53 | 54 | type TabletDatastore struct { 55 | Type TabletDatastoreType `json:"type"` 56 | } 57 | 58 | type TabletDatastoreType string 59 | 60 | const ( 61 | TabletDatastoreTypeLocal TabletDatastoreType = "local" 62 | ) 63 | 64 | const TabletDatastoreTypeDefault TabletDatastoreType = TabletDatastoreTypeLocal 65 | 66 | type TabletCredentials struct { 67 | // SecretRef points a Secret resource which contains the credentials 68 | // +optional 69 | SecretRef *corev1.SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` 70 | } 71 | 72 | // status is for internal use only. If it was exported then it would dirty-up the 73 | // tablet objects embedded in other resources and would result in mixed status and spec data 74 | // it is here for use by the VitessCluster object and its controller 75 | type VitessTabletStatus struct { 76 | Phase TabletPhase `json:"-"` 77 | } 78 | 79 | type TabletPhase string 80 | 81 | const ( 82 | TabletPhaseNone TabletPhase = "" 83 | TabletPhaseReady TabletPhase = "Ready" 84 | ) 85 | 86 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 87 | 88 | // VitessTablet is the Schema for the vitesstablets API 89 | // +k8s:openapi-gen=true 90 | type VitessTablet struct { 91 | metav1.TypeMeta `json:",inline"` 92 | metav1.ObjectMeta `json:"metadata,omitempty"` 93 | 94 | Spec VitessTabletSpec `json:"spec,omitempty"` 95 | 96 | // internal use only. See struct def for details 97 | status VitessTabletStatus `json:"-"` 98 | } 99 | 100 | // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object 101 | 102 | // VitessTabletList contains a list of VitessTablet 103 | type VitessTabletList struct { 104 | metav1.TypeMeta `json:",inline"` 105 | metav1.ListMeta `json:"metadata,omitempty"` 106 | Items []VitessTablet `json:"items"` 107 | } 108 | 109 | func init() { 110 | SchemeBuilder.Register(&VitessTablet{}, &VitessTabletList{}) 111 | } 112 | -------------------------------------------------------------------------------- /pkg/controller/add_vitesscluster.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "vitess.io/vitess-operator/pkg/controller/vitesscluster" 5 | ) 6 | 7 | func init() { 8 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 9 | AddToManagerFuncs = append(AddToManagerFuncs, vitesscluster.Add) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/controller/add_vitesslockserver.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "vitess.io/vitess-operator/pkg/controller/vitesslockserver" 5 | ) 6 | 7 | func init() { 8 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 9 | AddToManagerFuncs = append(AddToManagerFuncs, vitesslockserver.Add) 10 | } 11 | -------------------------------------------------------------------------------- /pkg/controller/controller.go: -------------------------------------------------------------------------------- 1 | package controller 2 | 3 | import ( 4 | "sigs.k8s.io/controller-runtime/pkg/manager" 5 | ) 6 | 7 | // AddToManagerFuncs is a list of functions to add all Controllers to the Manager 8 | var AddToManagerFuncs []func(manager.Manager) error 9 | 10 | // AddToManager adds all Controllers to the Manager 11 | func AddToManager(m manager.Manager) error { 12 | for _, f := range AddToManagerFuncs { 13 | if err := f(m); err != nil { 14 | return err 15 | } 16 | } 17 | return nil 18 | } 19 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/reconcile_cell.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "context" 5 | 6 | appsv1 "k8s.io/api/apps/v1" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/types" 11 | "k8s.io/apimachinery/pkg/util/intstr" 12 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 13 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 14 | 15 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 16 | "vitess.io/vitess-operator/pkg/util/scripts" 17 | ) 18 | 19 | func (r *ReconcileVitessCluster) ReconcileCell(cell *vitessv1alpha2.VitessCell) (reconcile.Result, error) { 20 | log.Info("Reconciling Cell", "Namespace", cell.GetNamespace(), "VitessCluster.Name", cell.Cluster().GetName(), "Cell.Name", cell.GetName()) 21 | 22 | if r, err := r.ReconcileCellVTctld(cell); err != nil { 23 | log.Error(err, "Failed to reconcile vtctl", "Namespace", cell.GetName(), "VitessCluster.Name", cell.Cluster().GetName(), "Cell.Name", cell.GetName()) 24 | return r, err 25 | } else if r.Requeue { 26 | return r, err 27 | } 28 | 29 | if r, err := r.ReconcileCellVTGate(cell); err != nil { 30 | log.Error(err, "Failed to reconcile vtgate", "Namespace", cell.GetName(), "VitessCluster.Name", cell.Cluster().GetName(), "Cell.Name", cell.GetName()) 31 | return r, err 32 | } else if r.Requeue { 33 | return r, err 34 | } 35 | 36 | return reconcile.Result{}, nil 37 | } 38 | 39 | func (r *ReconcileVitessCluster) ReconcileCellVTctld(cell *vitessv1alpha2.VitessCell) (reconcile.Result, error) { 40 | deploy, service, deployErr := GetCellVTctldResources(cell) 41 | if deployErr != nil { 42 | log.Error(deployErr, "failed to generate Vtctld Deployment for VitessCell", "VitessCell.Namespace", cell.GetNamespace(), "VitessCell.Name", cell.GetNamespace()) 43 | return reconcile.Result{}, deployErr 44 | } 45 | 46 | foundDeployment := &appsv1.Deployment{} 47 | err := r.client.Get(context.TODO(), types.NamespacedName{Name: deploy.GetName(), Namespace: deploy.GetNamespace()}, foundDeployment) 48 | if err != nil && errors.IsNotFound(err) { 49 | controllerutil.SetControllerReference(cell.Cluster(), deploy, r.scheme) 50 | err = r.client.Create(context.TODO(), deploy) 51 | if err != nil { 52 | return reconcile.Result{}, err 53 | } 54 | } else if err != nil { 55 | log.Error(err, "failed to get Deployment") 56 | return reconcile.Result{}, err 57 | } 58 | 59 | foundService := &corev1.Service{} 60 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: service.GetName(), Namespace: service.GetNamespace()}, foundService) 61 | if err != nil && errors.IsNotFound(err) { 62 | controllerutil.SetControllerReference(cell.Cluster(), service, r.scheme) 63 | err = r.client.Create(context.TODO(), service) 64 | if err != nil { 65 | return reconcile.Result{}, err 66 | } 67 | } else if err != nil { 68 | log.Error(err, "failed to get Service") 69 | return reconcile.Result{}, err 70 | } 71 | 72 | return reconcile.Result{}, nil 73 | 74 | } 75 | 76 | func GetCellVTctldResources(cell *vitessv1alpha2.VitessCell) (*appsv1.Deployment, *corev1.Service, error) { 77 | name := cell.GetScopedName("vtctld") 78 | 79 | scripts := scripts.NewContainerScriptGenerator("vtctld", cell) 80 | if err := scripts.Generate(); err != nil { 81 | return nil, nil, err 82 | } 83 | 84 | labels := map[string]string{ 85 | "app": "vitess", 86 | "cluster": cell.Cluster().GetName(), 87 | "cell": cell.GetName(), 88 | "component": "vtctld", 89 | } 90 | 91 | deployment := &appsv1.Deployment{ 92 | ObjectMeta: metav1.ObjectMeta{ 93 | Name: name, 94 | Namespace: cell.Cluster().GetNamespace(), 95 | Labels: labels, 96 | }, 97 | Spec: appsv1.DeploymentSpec{ 98 | ProgressDeadlineSeconds: getInt32Ptr(1), 99 | Replicas: getInt32Ptr(1), 100 | Selector: &metav1.LabelSelector{ 101 | MatchLabels: labels, 102 | }, 103 | Template: corev1.PodTemplateSpec{ 104 | ObjectMeta: metav1.ObjectMeta{ 105 | Labels: labels, 106 | }, 107 | Spec: corev1.PodSpec{ 108 | Containers: []corev1.Container{ 109 | { 110 | Name: "vtctld", 111 | Image: "vitess/vtctld:helm-1.0.3", // TODO use CRD w/default 112 | Command: []string{ 113 | "bash", 114 | }, 115 | Args: []string{ 116 | "-c", 117 | scripts.Start, 118 | }, 119 | LivenessProbe: &corev1.Probe{ 120 | Handler: corev1.Handler{ 121 | HTTPGet: &corev1.HTTPGetAction{ 122 | Path: "/debug/status", 123 | Port: intstr.FromInt(15000), 124 | Scheme: corev1.URISchemeHTTP, 125 | }, 126 | }, 127 | InitialDelaySeconds: 30, 128 | TimeoutSeconds: 5, 129 | PeriodSeconds: 10, 130 | SuccessThreshold: 1, 131 | FailureThreshold: 3, 132 | }, 133 | ReadinessProbe: &corev1.Probe{ 134 | Handler: corev1.Handler{ 135 | HTTPGet: &corev1.HTTPGetAction{ 136 | Path: "/debug/health", 137 | Port: intstr.FromInt(15000), 138 | Scheme: corev1.URISchemeHTTP, 139 | }, 140 | }, 141 | InitialDelaySeconds: 30, 142 | TimeoutSeconds: 5, 143 | PeriodSeconds: 10, 144 | SuccessThreshold: 1, 145 | FailureThreshold: 3, 146 | }, 147 | }, 148 | }, 149 | SecurityContext: &corev1.PodSecurityContext{ 150 | FSGroup: getInt64Ptr(2000), 151 | RunAsUser: getInt64Ptr(1000), 152 | }, 153 | }, 154 | }, 155 | }, 156 | } 157 | 158 | service := &corev1.Service{ 159 | ObjectMeta: metav1.ObjectMeta{ 160 | Name: name, 161 | Namespace: cell.Cluster().GetNamespace(), 162 | Labels: labels, 163 | }, 164 | Spec: corev1.ServiceSpec{ 165 | Selector: labels, 166 | Type: corev1.ServiceTypeClusterIP, 167 | Ports: []corev1.ServicePort{ 168 | { 169 | Name: "web", 170 | Port: 15000, 171 | }, 172 | { 173 | Name: "grpc", 174 | Port: 15999, 175 | }, 176 | }, 177 | }, 178 | } 179 | 180 | return deployment, service, nil 181 | } 182 | 183 | func (r *ReconcileVitessCluster) ReconcileCellVTGate(cell *vitessv1alpha2.VitessCell) (reconcile.Result, error) { 184 | deploy, service, deployErr := GetCellVTGateResources(cell) 185 | if deployErr != nil { 186 | log.Error(deployErr, "failed to generate VTGate Deployment for VitessCell", "VitessCell.Namespace", cell.GetNamespace(), "VitessCell.Name", cell.GetNamespace()) 187 | return reconcile.Result{}, deployErr 188 | } 189 | 190 | foundDeployment := &appsv1.Deployment{} 191 | err := r.client.Get(context.TODO(), types.NamespacedName{Name: deploy.GetName(), Namespace: deploy.GetNamespace()}, foundDeployment) 192 | if err != nil && errors.IsNotFound(err) { 193 | controllerutil.SetControllerReference(cell.Cluster(), deploy, r.scheme) 194 | err = r.client.Create(context.TODO(), deploy) 195 | if err != nil { 196 | return reconcile.Result{}, err 197 | } 198 | } else if err != nil { 199 | log.Error(err, "failed to get Deployment") 200 | return reconcile.Result{}, err 201 | } 202 | 203 | foundService := &corev1.Service{} 204 | err = r.client.Get(context.TODO(), types.NamespacedName{Name: service.GetName(), Namespace: service.GetNamespace()}, foundService) 205 | if err != nil && errors.IsNotFound(err) { 206 | controllerutil.SetControllerReference(cell.Cluster(), service, r.scheme) 207 | err = r.client.Create(context.TODO(), service) 208 | if err != nil { 209 | return reconcile.Result{}, err 210 | } 211 | } else if err != nil { 212 | log.Error(err, "failed to get Service") 213 | return reconcile.Result{}, err 214 | } 215 | 216 | return reconcile.Result{}, nil 217 | 218 | } 219 | 220 | func GetCellVTGateResources(cell *vitessv1alpha2.VitessCell) (*appsv1.Deployment, *corev1.Service, error) { 221 | name := cell.GetScopedName("vtgate") 222 | 223 | scriptGen := scripts.NewContainerScriptGenerator("vtgate", cell) 224 | if err := scriptGen.Generate(); err != nil { 225 | return nil, nil, err 226 | } 227 | 228 | vtgateLabels := map[string]string{ 229 | "app": "vitess", 230 | "cluster": cell.Cluster().GetName(), 231 | "cell": cell.GetName(), 232 | "component": "vtgate", 233 | } 234 | 235 | vttabletLabels := map[string]string{ 236 | "app": "vitess", 237 | "cluster": cell.Cluster().GetName(), 238 | "cell": cell.GetName(), 239 | "component": "vttabletLabels", 240 | } 241 | 242 | // Build affinity 243 | affinity := &corev1.Affinity{ 244 | PodAffinity: &corev1.PodAffinity{ 245 | // Prefer to run on the same host as a vtgate pod 246 | PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ 247 | { 248 | Weight: 10, 249 | PodAffinityTerm: corev1.PodAffinityTerm{ 250 | LabelSelector: &metav1.LabelSelector{ 251 | MatchLabels: vttabletLabels, 252 | }, 253 | TopologyKey: "kubernetes.io/hostname", 254 | }, 255 | }, 256 | }, 257 | }, 258 | PodAntiAffinity: &corev1.PodAntiAffinity{ 259 | PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ 260 | { 261 | Weight: 100, 262 | PodAffinityTerm: corev1.PodAffinityTerm{ 263 | LabelSelector: &metav1.LabelSelector{ 264 | MatchLabels: vtgateLabels, 265 | }, 266 | TopologyKey: "kubernetes.io/hostname", 267 | }, 268 | }, 269 | }, 270 | }, 271 | } 272 | 273 | deployment := &appsv1.Deployment{ 274 | ObjectMeta: metav1.ObjectMeta{ 275 | Name: name, 276 | Namespace: cell.Cluster().GetNamespace(), 277 | Labels: vtgateLabels, 278 | }, 279 | Spec: appsv1.DeploymentSpec{ 280 | ProgressDeadlineSeconds: getInt32Ptr(600), 281 | Replicas: getInt32Ptr(2), 282 | Selector: &metav1.LabelSelector{ 283 | MatchLabels: vtgateLabels, 284 | }, 285 | Template: corev1.PodTemplateSpec{ 286 | ObjectMeta: metav1.ObjectMeta{ 287 | Labels: vtgateLabels, 288 | }, 289 | Spec: corev1.PodSpec{ 290 | Affinity: affinity, 291 | Containers: []corev1.Container{ 292 | { 293 | Name: "vtgate", 294 | Image: "vitess/vtgate:helm-1.0.3", // TODO use CRD w/default 295 | Command: []string{ 296 | "bash", 297 | }, 298 | Args: []string{ 299 | "-c", 300 | scriptGen.Start, 301 | }, 302 | LivenessProbe: &corev1.Probe{ 303 | Handler: corev1.Handler{ 304 | HTTPGet: &corev1.HTTPGetAction{ 305 | Path: "/debug/status", 306 | Port: intstr.FromInt(15001), 307 | Scheme: corev1.URISchemeHTTP, 308 | }, 309 | }, 310 | InitialDelaySeconds: 30, 311 | TimeoutSeconds: 5, 312 | PeriodSeconds: 10, 313 | SuccessThreshold: 1, 314 | FailureThreshold: 3, 315 | }, 316 | ReadinessProbe: &corev1.Probe{ 317 | Handler: corev1.Handler{ 318 | HTTPGet: &corev1.HTTPGetAction{ 319 | Path: "/debug/health", 320 | Port: intstr.FromInt(15001), 321 | Scheme: corev1.URISchemeHTTP, 322 | }, 323 | }, 324 | InitialDelaySeconds: 30, 325 | TimeoutSeconds: 5, 326 | PeriodSeconds: 10, 327 | SuccessThreshold: 1, 328 | FailureThreshold: 3, 329 | }, 330 | VolumeMounts: []corev1.VolumeMount{ 331 | { 332 | MountPath: "/mysqlcreds", 333 | Name: "creds", 334 | }, 335 | }, 336 | }, 337 | }, 338 | SecurityContext: &corev1.PodSecurityContext{ 339 | FSGroup: getInt64Ptr(2000), 340 | RunAsUser: getInt64Ptr(1000), 341 | }, 342 | Volumes: []corev1.Volume{ 343 | { 344 | Name: "creds", 345 | VolumeSource: corev1.VolumeSource{ 346 | EmptyDir: &corev1.EmptyDirVolumeSource{}, 347 | }, 348 | }, 349 | }, 350 | }, 351 | }, 352 | }, 353 | } 354 | 355 | service := &corev1.Service{ 356 | ObjectMeta: metav1.ObjectMeta{ 357 | Name: name, 358 | Namespace: cell.Cluster().GetNamespace(), 359 | Labels: vtgateLabels, 360 | }, 361 | Spec: corev1.ServiceSpec{ 362 | Selector: vtgateLabels, 363 | Type: corev1.ServiceTypeClusterIP, 364 | Ports: []corev1.ServicePort{ 365 | { 366 | Name: "web", 367 | Port: 15001, 368 | }, 369 | { 370 | Name: "grpc", 371 | Port: 15991, 372 | }, 373 | }, 374 | }, 375 | } 376 | 377 | if cell.Spec.MySQLProtocol != nil { 378 | // Add Service Port 379 | service.Spec.Ports = append(service.Spec.Ports, corev1.ServicePort{ 380 | Name: "mysql", 381 | Port: 3306, 382 | }) 383 | 384 | // Setup credential init container 385 | if cell.Spec.MySQLProtocol.PasswordSecretRef != nil { 386 | scriptGen := scripts.NewContainerScriptGenerator("init-mysql-creds", cell) 387 | if err := scriptGen.Generate(); err != nil { 388 | return nil, nil, err 389 | } 390 | 391 | // Add deployment initContainer to bootstrap creds 392 | deployment.Spec.Template.Spec.InitContainers = append(deployment.Spec.Template.Spec.InitContainers, corev1.Container{ 393 | Name: "init-mysql-creds", 394 | Image: "vitess/vtgate:helm-1.0.3", // TODO use CRD w/default 395 | Env: []corev1.EnvVar{ 396 | { 397 | Name: "MYSQL_PASSWORD", 398 | ValueFrom: &corev1.EnvVarSource{ 399 | SecretKeyRef: cell.Spec.MySQLProtocol.PasswordSecretRef, 400 | }, 401 | }, 402 | }, 403 | Command: []string{ 404 | "bash", 405 | }, 406 | Args: []string{ 407 | "-c", 408 | scriptGen.Start, 409 | }, 410 | VolumeMounts: []corev1.VolumeMount{ 411 | { 412 | MountPath: "/mysqlcreds", 413 | Name: "creds", 414 | }, 415 | }, 416 | }) 417 | } 418 | } 419 | 420 | return deployment, service, nil 421 | } 422 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/reconcile_cell_test.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | appsv1 "k8s.io/api/apps/v1" 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | 11 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 12 | // "vitess.io/vitess-operator/pkg/normalizer" 13 | ) 14 | 15 | func TestGetCellVTGateResources(t *testing.T) { 16 | 17 | // Define a minimal cluster 18 | cluster := &vitessv1alpha2.VitessCluster{ 19 | ObjectMeta: metav1.ObjectMeta{ 20 | Name: "testcluster", 21 | Namespace: "vitess", 22 | }, 23 | Spec: vitessv1alpha2.VitessClusterSpec{ 24 | Lockserver: &vitessv1alpha2.VitessLockserver{ 25 | Spec: vitessv1alpha2.VitessLockserverSpec{ 26 | Type: vitessv1alpha2.LockserverTypeEtcd2, 27 | Etcd2: &vitessv1alpha2.Etcd2Lockserver{ 28 | Address: "global-lockserver:8080", 29 | Path: "/global", 30 | }, 31 | }, 32 | }, 33 | }, 34 | } 35 | 36 | // Define a basic cell 37 | cell := &vitessv1alpha2.VitessCell{ 38 | ObjectMeta: metav1.ObjectMeta{ 39 | Name: "zone0", 40 | Namespace: "vitess", 41 | }, 42 | Spec: vitessv1alpha2.VitessCellSpec{ 43 | Lockserver: &vitessv1alpha2.VitessLockserver{ 44 | ObjectMeta: metav1.ObjectMeta{ 45 | Name: "cell-lockserver", 46 | }, 47 | Spec: vitessv1alpha2.VitessLockserverSpec{}, 48 | }, 49 | }, 50 | } 51 | 52 | cell.SetParentCluster(cluster) 53 | 54 | // Get the resources 55 | deployment, service, err := GetCellVTGateResources(cell) 56 | 57 | if err != nil { 58 | t.Errorf("Got error generating vtgate resources for cell: %s", err) 59 | } 60 | 61 | // Validate basic returns 62 | if deployment == nil { 63 | t.Error("Got nil vtgate deployment for cell") 64 | } 65 | 66 | if service == nil { 67 | t.Error("Got nil vtgate service for cell") 68 | } 69 | 70 | // Test no mysql protocol 71 | 72 | if vtGateServiceHasMySQLPort(service) { 73 | t.Error("vtgate service had mysql port set and shouldn't have") 74 | } 75 | 76 | if vtGateDeploymentHasMySQLOpts(deployment, "-mysql_auth") { 77 | t.Error("vtgate deployment had mysql auth flags set and shouldn't have") 78 | } 79 | 80 | // Test mysql protocol with explict auth disable 81 | cell.Spec.MySQLProtocol = &vitessv1alpha2.VitessCellMySQLProtocol{ 82 | AuthType: vitessv1alpha2.VitessMySQLAuthTypeNone, 83 | } 84 | 85 | deployment, service, err = GetCellVTGateResources(cell) 86 | 87 | if err != nil { 88 | t.Errorf("Got error generating vtgate resources for cell with mysql and no auth: %s", err) 89 | } 90 | 91 | if !vtGateServiceHasMySQLPort(service) { 92 | t.Error("vtgate service did not have mysql port set") 93 | } 94 | 95 | if !vtGateDeploymentHasMySQLOpts(deployment, "-mysql_auth_server_impl=\"none\"") { 96 | t.Error("vtgate deployment did not have mysql no auth flag") 97 | } 98 | 99 | // Test mysql protocol with static auth 100 | cell.Spec.MySQLProtocol = &vitessv1alpha2.VitessCellMySQLProtocol{ 101 | Username: "test", 102 | PasswordSecretRef: &corev1.SecretKeySelector{}, 103 | } 104 | 105 | deployment, service, err = GetCellVTGateResources(cell) 106 | 107 | if err != nil { 108 | t.Errorf("Got error generating vtgate resources for cell with mysql and basic auth: %s", err) 109 | } 110 | 111 | if !vtGateServiceHasMySQLPort(service) { 112 | t.Error("vtgate service did not have mysql port set") 113 | } 114 | 115 | if !vtGateDeploymentHasMySQLOpts(deployment, "-mysql_auth_server_impl=\"static\"") { 116 | t.Error("vtgate deployment did not have mysql static auth flag") 117 | } 118 | } 119 | 120 | func vtGateServiceHasMySQLPort(service *corev1.Service) bool { 121 | for _, port := range service.Spec.Ports { 122 | if port.Name == "mysql" { 123 | return true 124 | } 125 | } 126 | return false 127 | } 128 | 129 | func vtGateDeploymentHasMySQLOpts(deployment *appsv1.Deployment, optstr string) bool { 130 | if strings.Contains(deployment.Spec.Template.Spec.Containers[0].Args[1], optstr) { 131 | return true 132 | } 133 | 134 | return false 135 | } 136 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/reconcile_cluster.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "context" 5 | 6 | corev1 "k8s.io/api/core/v1" 7 | "k8s.io/apimachinery/pkg/api/errors" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/types" 10 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 11 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 12 | 13 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 14 | lockserver_controller "vitess.io/vitess-operator/pkg/controller/vitesslockserver" 15 | ) 16 | 17 | // ReconcileClusterResources should only be called against a fully-populated and verified VitessCluster object 18 | func (r *ReconcileVitessCluster) ReconcileClusterResources(cluster *vitessv1alpha2.VitessCluster) (reconcile.Result, error) { 19 | if r, err := r.ReconcileClusterLockserver(cluster); err != nil || r.Requeue { 20 | return r, err 21 | } 22 | 23 | if r, err := r.ReconcileClusterTabletService(cluster); err != nil || r.Requeue { 24 | return r, err 25 | } 26 | 27 | for _, cell := range cluster.Cells() { 28 | if r, err := r.ReconcileCell(cell); err != nil || r.Requeue { 29 | return r, err 30 | } 31 | } 32 | 33 | for _, keyspace := range cluster.Keyspaces() { 34 | if r, err := r.ReconcileKeyspace(keyspace); err != nil || r.Requeue { 35 | return r, err 36 | } 37 | } 38 | 39 | return reconcile.Result{}, nil 40 | } 41 | 42 | func (r *ReconcileVitessCluster) ReconcileClusterLockserver(cluster *vitessv1alpha2.VitessCluster) (reconcile.Result, error) { 43 | log.Info("Reconciling Embedded Lockserver") 44 | 45 | // Build a complete VitessLockserver 46 | lockserver := cluster.Spec.Lockserver.DeepCopy() 47 | 48 | if cluster.Status.Lockserver != nil { 49 | // If status is not empty, deepcopy it into the tmp object 50 | cluster.Status.Lockserver.DeepCopyInto(&lockserver.Status) 51 | } 52 | 53 | // Run it through the controller's reconcile func 54 | recResult, recErr := lockserver_controller.ReconcileObject(lockserver, log) 55 | 56 | // Split and store the spec and status in the parent VitessCluster 57 | cluster.Spec.Lockserver = lockserver.DeepCopy() 58 | cluster.Status.Lockserver = lockserver.Status.DeepCopy() 59 | 60 | // Using the split client here breaks the cluster normalization 61 | // TODO Fix and re-enable 62 | 63 | // if err := r.client.Status().Update(context.TODO(), cluster); err != nil { 64 | // log.Error(err, "Failed to update VitessCluster status after lockserver change.") 65 | // return reconcile.Result{}, err 66 | // } 67 | 68 | return recResult, recErr 69 | } 70 | 71 | func (r *ReconcileVitessCluster) ReconcileClusterTabletService(cluster *vitessv1alpha2.VitessCluster) (reconcile.Result, error) { 72 | service, serviceErr := getServiceForClusterTablets(cluster) 73 | if serviceErr != nil { 74 | log.Error(serviceErr, "failed to generate service for VitessCluster tablets", "VitessCluster.Namespace", cluster.GetNamespace(), "VitessCluster.Name", cluster.GetNamespace()) 75 | return reconcile.Result{}, serviceErr 76 | } 77 | foundService := &corev1.Service{} 78 | err := r.client.Get(context.TODO(), types.NamespacedName{Name: service.GetName(), Namespace: service.GetNamespace()}, foundService) 79 | if err != nil && errors.IsNotFound(err) { 80 | controllerutil.SetControllerReference(cluster, service, r.scheme) 81 | err = r.client.Create(context.TODO(), service) 82 | if err != nil { 83 | return reconcile.Result{}, err 84 | } 85 | } else if err != nil { 86 | log.Error(err, "failed to get Service") 87 | return reconcile.Result{}, err 88 | } 89 | 90 | return reconcile.Result{}, nil 91 | } 92 | 93 | // getServiceForClusterTablets takes a vitess cluster and returns a headless service that will point to all of the cluster's tablets 94 | func getServiceForClusterTablets(cluster *vitessv1alpha2.VitessCluster) (*corev1.Service, error) { 95 | labels := map[string]string{ 96 | "app": "vitess", 97 | "cluster": cluster.GetName(), 98 | "component": "vttablet", 99 | } 100 | 101 | service := &corev1.Service{ 102 | ObjectMeta: metav1.ObjectMeta{ 103 | Name: cluster.GetTabletServiceName(), 104 | Namespace: cluster.GetNamespace(), 105 | Labels: labels, 106 | Annotations: map[string]string{ 107 | "service.alpha.kubernetes.io/tolerate-unready-endpoints": "true", 108 | }, 109 | }, 110 | Spec: corev1.ServiceSpec{ 111 | ClusterIP: corev1.ClusterIPNone, 112 | Selector: labels, 113 | Type: corev1.ServiceTypeClusterIP, 114 | PublishNotReadyAddresses: true, 115 | Ports: []corev1.ServicePort{ 116 | { 117 | Name: "web", 118 | Port: 15002, 119 | }, 120 | { 121 | Name: "grpc", 122 | Port: 16002, 123 | }, 124 | // TODO: Configure ports below only if if ppm is enabled 125 | { 126 | Name: "query-data", 127 | Port: 42001, 128 | }, 129 | { 130 | Name: "mysql-metrics", 131 | Port: 42002, 132 | }, 133 | }, 134 | }, 135 | } 136 | 137 | // The error return is always nil right now, but it still returns one just 138 | // in case there are error states in the future 139 | return service, nil 140 | } 141 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/reconcile_keyspace.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 5 | 6 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 7 | ) 8 | 9 | func (r *ReconcileVitessCluster) ReconcileKeyspace(keyspace *vitessv1alpha2.VitessKeyspace) (reconcile.Result, error) { 10 | log.Info("Reconciling Keyspace", "Namespace", keyspace.GetNamespace(), "VitessCluster.Name", keyspace.Cluster().GetName(), "Keyspace.Name", keyspace.GetName()) 11 | 12 | // Reconcile all shards 13 | for _, shard := range keyspace.Shards() { 14 | if result, err := r.ReconcileShard(shard); err != nil { 15 | return result, err 16 | } 17 | } 18 | 19 | return reconcile.Result{}, nil 20 | } 21 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/reconcile_shard.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 5 | 6 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 7 | ) 8 | 9 | func (r *ReconcileVitessCluster) ReconcileShard(shard *vitessv1alpha2.VitessShard) (reconcile.Result, error) { 10 | log.Info("Reconciling Shard", "Namespace", shard.GetNamespace(), "VitessCluster.Name", shard.Cluster().GetName(), "Shard.Name", shard.GetName()) 11 | 12 | // Reconcile all shard tablets 13 | for _, tablet := range shard.Tablets() { 14 | if result, err := r.ReconcileTablet(tablet); err != nil { 15 | return result, err 16 | } 17 | } 18 | 19 | return reconcile.Result{}, nil 20 | } 21 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/reconcile_tablet.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "reflect" 7 | 8 | appsv1 "k8s.io/api/apps/v1" 9 | batchv1 "k8s.io/api/batch/v1" 10 | corev1 "k8s.io/api/core/v1" 11 | "k8s.io/apimachinery/pkg/api/errors" 12 | "k8s.io/apimachinery/pkg/api/resource" 13 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 | "k8s.io/apimachinery/pkg/types" 15 | "k8s.io/apimachinery/pkg/util/intstr" 16 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 17 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 18 | 19 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 20 | "vitess.io/vitess-operator/pkg/util/scripts" 21 | ) 22 | 23 | func (r *ReconcileVitessCluster) ReconcileTablet(tablet *vitessv1alpha2.VitessTablet) (reconcile.Result, error) { 24 | log.Info("Reconciling Tablet", "Namespace", tablet.GetNamespace(), "VitessCluster.Name", tablet.Cluster().GetName(), "Tablet.Name", tablet.GetName()) 25 | 26 | if r, err := r.ReconcileTabletResources(tablet); err != nil { 27 | log.Error(err, "Failed to reconcile tablet statefulset", "Namespace", tablet.GetName(), "VitessCluster.Name", tablet.Cluster().GetName(), "Tablet.Name", tablet.GetName()) 28 | return r, err 29 | } else if r.Requeue { 30 | return r, err 31 | } 32 | 33 | // Create an init job for tablets of type replica 34 | // TODO replace this with direct election via the operator 35 | if tablet.Spec.Type == vitessv1alpha2.TabletTypeReplica { 36 | if r, err := r.ReconcileReplicaTabletInitJob(tablet); err != nil { 37 | log.Error(err, "Failed to reconcile replica tablet master init job", "Namespace", tablet.GetName(), "VitessCluster.Name", tablet.Cluster().GetName(), "Tablet.Name", tablet.GetName()) 38 | return r, err 39 | } else if r.Requeue { 40 | return r, err 41 | } 42 | } 43 | 44 | return reconcile.Result{}, nil 45 | } 46 | 47 | func (r *ReconcileVitessCluster) ReconcileTabletResources(tablet *vitessv1alpha2.VitessTablet) (reconcile.Result, error) { 48 | statefulSet, statefulSetErr := getStatefulSetForTablet(tablet) 49 | if statefulSetErr != nil { 50 | log.Error(statefulSetErr, "failed to generate StatefulSet for VitessTablet", "VitessTablet.Namespace", tablet.GetNamespace(), "VitessTablet.Name", tablet.GetNamespace()) 51 | return reconcile.Result{}, statefulSetErr 52 | } 53 | 54 | foundStatefulSet := &appsv1.StatefulSet{} 55 | err := r.client.Get(context.TODO(), types.NamespacedName{Name: statefulSet.GetName(), Namespace: tablet.Cluster().GetNamespace()}, foundStatefulSet) 56 | if err != nil && errors.IsNotFound(err) { 57 | controllerutil.SetControllerReference(tablet.Cluster(), statefulSet, r.scheme) 58 | 59 | err = r.client.Create(context.TODO(), statefulSet) 60 | if err != nil { 61 | return reconcile.Result{}, err 62 | } 63 | } else if err != nil { 64 | log.Error(err, "failed to get StatefulSet") 65 | return reconcile.Result{}, err 66 | } else { 67 | // This is a cheap way to detect changes and it works for now. However it is not perfect 68 | // it will always detect changes because of the defaulting values that get placed 69 | // on a statefulset when it is created in the cluster. Those values are not set in the 70 | // generated statefulset so it is always different. The extra updates are harmless and don't actually 71 | // trigger statefulset upgrades. 72 | // TODO more exact diff detection 73 | if !reflect.DeepEqual(foundStatefulSet.Spec.Template, statefulSet.Spec.Template) || 74 | !reflect.DeepEqual(foundStatefulSet.Spec.Replicas, statefulSet.Spec.Replicas) || 75 | !reflect.DeepEqual(foundStatefulSet.Spec.UpdateStrategy, statefulSet.Spec.UpdateStrategy) { 76 | log.Info("Updating statefulSet for tablet", "Namespace", tablet.GetNamespace(), "VitessCluster.Name", tablet.Cluster().GetName(), "Tablet.Name", tablet.GetName()) 77 | 78 | // Update foundStatefulSet with changable fields from the generated StatefulSet 79 | 80 | // Only Template, replicas and updateStrategy may be updated on existing StatefulSet spec 81 | statefulSet.Spec.Template.DeepCopyInto(&foundStatefulSet.Spec.Template) 82 | statefulSet.Spec.Replicas = foundStatefulSet.Spec.Replicas 83 | statefulSet.Spec.UpdateStrategy.DeepCopyInto(&foundStatefulSet.Spec.UpdateStrategy) 84 | 85 | err = r.client.Update(context.TODO(), foundStatefulSet) 86 | if err != nil { 87 | return reconcile.Result{}, err 88 | } 89 | } 90 | 91 | // Set the tablet status based on the StatefulSet status 92 | // this is for use by the VitessCluster controller later 93 | if foundStatefulSet.Status.Replicas == foundStatefulSet.Status.ReadyReplicas { 94 | tablet.SetPhase(vitessv1alpha2.TabletPhaseReady) 95 | } 96 | } 97 | 98 | return reconcile.Result{}, nil 99 | } 100 | 101 | func getStatefulSetForTablet(tablet *vitessv1alpha2.VitessTablet) (*appsv1.StatefulSet, error) { 102 | selfLabels := map[string]string{ 103 | "tabletname": tablet.GetName(), 104 | "app": "vitess", 105 | "cluster": tablet.Cluster().GetName(), 106 | "cell": tablet.Cell().GetName(), 107 | "keyspace": tablet.Keyspace().GetName(), 108 | "shard": tablet.Shard().GetName(), 109 | "component": "vttablet", 110 | "type": string(tablet.Spec.Type), 111 | } 112 | 113 | vtgateLabels := map[string]string{ 114 | "app": "vitess", 115 | "cluster": tablet.Cluster().GetName(), 116 | "cell": tablet.Cell().GetName(), 117 | "component": "vtgate", 118 | } 119 | 120 | sameClusterTabletLabels := map[string]string{ 121 | "app": "vitess", 122 | "cluster": tablet.Cluster().GetName(), 123 | "component": "vttablet", 124 | } 125 | 126 | sameShardTabletLabels := map[string]string{ 127 | "app": "vitess", 128 | "cluster": tablet.Cluster().GetName(), 129 | "cell": tablet.Cell().GetName(), 130 | "keyspace": tablet.Keyspace().GetName(), 131 | "shard": tablet.Shard().GetName(), 132 | "component": "vttablet", 133 | } 134 | 135 | // Build affinity 136 | affinity := &corev1.Affinity{ 137 | PodAffinity: &corev1.PodAffinity{ 138 | // Prefer to run on the same host as a vtgate pod 139 | PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ 140 | { 141 | Weight: 10, 142 | PodAffinityTerm: corev1.PodAffinityTerm{ 143 | LabelSelector: &metav1.LabelSelector{ 144 | MatchLabels: vtgateLabels, 145 | }, 146 | TopologyKey: "kubernetes.io/hostname", 147 | }, 148 | }, 149 | }, 150 | }, 151 | PodAntiAffinity: &corev1.PodAntiAffinity{ 152 | PreferredDuringSchedulingIgnoredDuringExecution: []corev1.WeightedPodAffinityTerm{ 153 | // Hard preference to avoid running on the same host as another tablet in the same shard/keyspace 154 | { 155 | Weight: 100, 156 | PodAffinityTerm: corev1.PodAffinityTerm{ 157 | LabelSelector: &metav1.LabelSelector{ 158 | MatchLabels: sameShardTabletLabels, 159 | }, 160 | TopologyKey: "kubernetes.io/hostname", 161 | }, 162 | }, 163 | // Soft preference to avoid running on the same host as another tablet in the same cluster 164 | { 165 | Weight: 10, 166 | PodAffinityTerm: corev1.PodAffinityTerm{ 167 | LabelSelector: &metav1.LabelSelector{ 168 | MatchLabels: sameClusterTabletLabels, 169 | }, 170 | TopologyKey: "kubernetes.io/hostname", 171 | }, 172 | }, 173 | }, 174 | }, 175 | } 176 | 177 | dbContainers, dbInitContainers, err := GetTabletMysqlContainers(tablet) 178 | if err != nil { 179 | return nil, err 180 | } 181 | 182 | vttabletContainers, vttabletInitContainers, err := GetTabletVTTabletContainers(tablet) 183 | if err != nil { 184 | return nil, err 185 | } 186 | 187 | // build containers 188 | containers := []corev1.Container{} 189 | containers = append(containers, dbContainers...) 190 | containers = append(containers, vttabletContainers...) 191 | 192 | // build initcontainers 193 | initContainers := []corev1.Container{} 194 | initContainers = append(initContainers, dbInitContainers...) 195 | initContainers = append(initContainers, vttabletInitContainers...) 196 | 197 | // setup volume requests 198 | volumeRequests := make(corev1.ResourceList) 199 | volumeRequests[corev1.ResourceStorage] = resource.MustParse("10Gi") 200 | 201 | return &appsv1.StatefulSet{ 202 | ObjectMeta: metav1.ObjectMeta{ 203 | Name: tablet.GetStatefulSetName(), 204 | Namespace: tablet.Cluster().GetNamespace(), 205 | Labels: selfLabels, 206 | }, 207 | Spec: appsv1.StatefulSetSpec{ 208 | //PodManagementPolicy: appsv1.PodManagementPolicyParallel{}, 209 | PodManagementPolicy: appsv1.ParallelPodManagement, 210 | Replicas: tablet.GetReplicas(), 211 | Selector: &metav1.LabelSelector{ 212 | MatchLabels: selfLabels, 213 | }, 214 | UpdateStrategy: appsv1.StatefulSetUpdateStrategy{ 215 | Type: appsv1.RollingUpdateStatefulSetStrategyType, 216 | }, 217 | ServiceName: tablet.Cluster().GetTabletServiceName(), 218 | Template: corev1.PodTemplateSpec{ 219 | ObjectMeta: metav1.ObjectMeta{ 220 | Labels: selfLabels, 221 | }, 222 | Spec: corev1.PodSpec{ 223 | Affinity: affinity, 224 | Containers: containers, 225 | InitContainers: initContainers, 226 | Volumes: []corev1.Volume{ 227 | { 228 | Name: "vt", 229 | VolumeSource: corev1.VolumeSource{ 230 | EmptyDir: &corev1.EmptyDirVolumeSource{}, 231 | }, 232 | }, 233 | }, 234 | SecurityContext: &corev1.PodSecurityContext{ 235 | FSGroup: getInt64Ptr(2000), 236 | RunAsUser: getInt64Ptr(1000), 237 | }, 238 | TerminationGracePeriodSeconds: getInt64Ptr(60000000), 239 | }, 240 | }, 241 | VolumeClaimTemplates: []corev1.PersistentVolumeClaim{ 242 | { 243 | ObjectMeta: metav1.ObjectMeta{ 244 | Name: "vtdataroot", 245 | }, 246 | Spec: corev1.PersistentVolumeClaimSpec{ 247 | AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, 248 | Resources: corev1.ResourceRequirements{ 249 | Requests: volumeRequests, 250 | }, 251 | }, 252 | }, 253 | }, 254 | }, 255 | }, nil 256 | } 257 | 258 | func GetTabletMysqlContainers(tablet *vitessv1alpha2.VitessTablet) (containers []corev1.Container, initContainers []corev1.Container, err error) { 259 | mysql := tablet.GetMySQLContainer() 260 | if mysql == nil { 261 | return containers, initContainers, fmt.Errorf("No database container configuration found") 262 | } 263 | 264 | dbScripts := scripts.NewContainerScriptGenerator("mysql", tablet) 265 | if err := dbScripts.Generate(); err != nil { 266 | return containers, initContainers, fmt.Errorf("Error generating DB container scripts: %s", err) 267 | } 268 | 269 | initContainers = append(initContainers, 270 | corev1.Container{ 271 | Name: "init-mysql", 272 | Image: "vitess/mysqlctld:helm-1.0.3", // TODO get this from a crd w/default 273 | ImagePullPolicy: corev1.PullIfNotPresent, 274 | Command: []string{"bash"}, 275 | Args: []string{ 276 | "-c", 277 | dbScripts.Init, 278 | }, 279 | VolumeMounts: []corev1.VolumeMount{ 280 | { 281 | Name: "vtdataroot", 282 | MountPath: "/vtdataroot", 283 | }, 284 | { 285 | Name: "vt", 286 | MountPath: "/vttmp", 287 | }, 288 | }, 289 | }) 290 | 291 | containers = append(containers, corev1.Container{ 292 | Name: "mysql", 293 | Image: mysql.Image, 294 | ImagePullPolicy: corev1.PullIfNotPresent, 295 | Command: []string{"bash"}, 296 | Args: []string{ 297 | "-c", 298 | dbScripts.Start, 299 | }, 300 | Lifecycle: &corev1.Lifecycle{ 301 | PreStop: &corev1.Handler{ 302 | Exec: &corev1.ExecAction{ 303 | Command: []string{ 304 | "bash", 305 | "-c", 306 | dbScripts.PreStop, 307 | }, 308 | }, 309 | }, 310 | }, 311 | ReadinessProbe: &corev1.Probe{ 312 | Handler: corev1.Handler{ 313 | Exec: &corev1.ExecAction{ 314 | Command: []string{ 315 | "mysqladmin", 316 | "ping", 317 | "-uroot", 318 | "--socket=/vtdataroot/tabletdata/mysql.sock", 319 | }, 320 | }, 321 | }, 322 | InitialDelaySeconds: 60, 323 | TimeoutSeconds: 10, 324 | PeriodSeconds: 10, 325 | SuccessThreshold: 1, 326 | FailureThreshold: 3, 327 | }, 328 | Resources: mysql.Resources, 329 | VolumeMounts: []corev1.VolumeMount{ 330 | { 331 | Name: "vtdataroot", 332 | MountPath: "/vtdataroot", 333 | }, 334 | { 335 | Name: "vt", 336 | MountPath: "/vt", 337 | }, 338 | }, 339 | Env: []corev1.EnvVar{ 340 | { 341 | Name: "VTROOT", 342 | Value: "/vt", 343 | }, 344 | { 345 | Name: "VTDATAROOT", 346 | Value: "/vtdataroot", 347 | }, 348 | { 349 | Name: "GOBIN", 350 | Value: "/vt/bin", 351 | }, 352 | { 353 | Name: "VT_MYSQL_ROOT", 354 | Value: "/usr", 355 | }, 356 | { 357 | Name: "PKG_CONFIG_PATH", 358 | Value: "/vt/lib", 359 | }, 360 | { 361 | Name: "VT_DB_FLAVOR", 362 | Value: mysql.DBFlavor, 363 | }, 364 | }, 365 | }) 366 | 367 | return 368 | } 369 | 370 | func GetTabletVTTabletContainers(tablet *vitessv1alpha2.VitessTablet) (containers []corev1.Container, initContainers []corev1.Container, err error) { 371 | vttablet := tablet.GetVTTabletContainer() 372 | if vttablet == nil { 373 | err = fmt.Errorf("No database container configuration found") 374 | return 375 | } 376 | 377 | vtScripts := scripts.NewContainerScriptGenerator("vttablet", tablet) 378 | if err = vtScripts.Generate(); err != nil { 379 | err = fmt.Errorf("Error generating DB container scripts: %s", err) 380 | return 381 | } 382 | 383 | initContainers = append(initContainers, 384 | corev1.Container{ 385 | Name: "init-vttablet", 386 | Image: "vitess/vtctl:helm-1.0.3", // TODO get this from a crd w/default 387 | ImagePullPolicy: corev1.PullIfNotPresent, 388 | Command: []string{"bash"}, 389 | Args: []string{ 390 | "-c", 391 | vtScripts.Init, 392 | }, 393 | VolumeMounts: []corev1.VolumeMount{ 394 | { 395 | Name: "vtdataroot", 396 | MountPath: "/vtdataroot", 397 | }, 398 | }, 399 | }) 400 | 401 | containers = append(containers, 402 | corev1.Container{ 403 | Name: "vttablet", 404 | Image: vttablet.Image, 405 | ImagePullPolicy: corev1.PullIfNotPresent, 406 | Command: []string{"bash"}, 407 | Args: []string{ 408 | "-c", 409 | vtScripts.Start, 410 | }, 411 | Lifecycle: &corev1.Lifecycle{ 412 | PreStop: &corev1.Handler{ 413 | Exec: &corev1.ExecAction{ 414 | Command: []string{ 415 | "bash", 416 | "-c", 417 | vtScripts.PreStop, 418 | }, 419 | }, 420 | }, 421 | }, 422 | ReadinessProbe: &corev1.Probe{ 423 | Handler: corev1.Handler{ 424 | HTTPGet: &corev1.HTTPGetAction{ 425 | Path: "/debug/health", 426 | Port: intstr.FromInt(15002), 427 | Scheme: corev1.URISchemeHTTP, 428 | }, 429 | }, 430 | InitialDelaySeconds: 60, 431 | TimeoutSeconds: 10, 432 | PeriodSeconds: 10, 433 | SuccessThreshold: 1, 434 | FailureThreshold: 3, 435 | }, 436 | LivenessProbe: &corev1.Probe{ 437 | Handler: corev1.Handler{ 438 | HTTPGet: &corev1.HTTPGetAction{ 439 | Path: "/debug/status", 440 | Port: intstr.FromInt(15002), 441 | Scheme: corev1.URISchemeHTTP, 442 | }, 443 | }, 444 | InitialDelaySeconds: 60, 445 | TimeoutSeconds: 10, 446 | PeriodSeconds: 10, 447 | SuccessThreshold: 1, 448 | FailureThreshold: 3, 449 | }, 450 | Ports: []corev1.ContainerPort{ 451 | { 452 | ContainerPort: 15002, 453 | Name: "web", 454 | Protocol: corev1.ProtocolTCP, 455 | }, 456 | { 457 | ContainerPort: 16002, 458 | Name: "grpc", 459 | Protocol: corev1.ProtocolTCP, 460 | }, 461 | }, 462 | Resources: corev1.ResourceRequirements{ 463 | // Limits: corev1.ResourceList{}, 464 | // Requests: corev1.ResourceList{}, 465 | }, 466 | VolumeMounts: []corev1.VolumeMount{ 467 | { 468 | Name: "vtdataroot", 469 | MountPath: "/vtdataroot", 470 | }, 471 | }, 472 | Env: []corev1.EnvVar{ 473 | { 474 | Name: "VTROOT", 475 | Value: "/vt", 476 | }, 477 | { 478 | Name: "VTDATAROOT", 479 | Value: "/vtdataroot", 480 | }, 481 | { 482 | Name: "GOBIN", 483 | Value: "/vt/bin", 484 | }, 485 | { 486 | Name: "VT_MYSQL_ROOT", 487 | Value: "/usr", 488 | }, 489 | { 490 | Name: "PKG_CONFIG_PATH", 491 | Value: "/vt/lib", 492 | }, 493 | { 494 | Name: "VT_DB_FLAVOR", 495 | Value: vttablet.DBFlavor, 496 | }, 497 | }, 498 | }, 499 | corev1.Container{ 500 | Name: "logrotate", 501 | Image: "vitess/logrotate:helm-1.0.4", // TODO get this from a crd w/default 502 | ImagePullPolicy: corev1.PullIfNotPresent, 503 | VolumeMounts: []corev1.VolumeMount{ 504 | { 505 | Name: "vtdataroot", 506 | MountPath: "/vtdataroot", 507 | }, 508 | }, 509 | }) 510 | 511 | // add log containers with a slice of filename + containername slices 512 | for _, logtype := range [][]string{ 513 | {"general", "general"}, 514 | {"error", "error"}, 515 | {"slow-query", "slow"}, 516 | } { 517 | containers = append(containers, corev1.Container{ 518 | Name: logtype[1] + "-log", 519 | Image: "vitess/logtail:helm-1.0.4", // TODO get this from a crd w/default 520 | ImagePullPolicy: corev1.PullIfNotPresent, 521 | Env: []corev1.EnvVar{ 522 | { 523 | Name: "TAIL_FILEPATH", 524 | Value: fmt.Sprintf("/vtdataroot/tabletdata/%s.log", logtype[0]), 525 | }, 526 | }, 527 | VolumeMounts: []corev1.VolumeMount{ 528 | { 529 | Name: "vtdataroot", 530 | MountPath: "/vtdataroot", 531 | }, 532 | }, 533 | }) 534 | } 535 | 536 | return 537 | } 538 | 539 | func (r *ReconcileVitessCluster) ReconcileReplicaTabletInitJob(tablet *vitessv1alpha2.VitessTablet) (reconcile.Result, error) { 540 | job, jobErr := GetReplicaTabletInitMasterJob(tablet) 541 | if jobErr != nil { 542 | log.Error(jobErr, "failed to generate master elect job for replica VitessTablet", "VitessTablet.Namespace", tablet.GetNamespace(), "VitessTablet.Name", tablet.GetNamespace()) 543 | return reconcile.Result{}, jobErr 544 | } 545 | 546 | found := &batchv1.Job{} 547 | err := r.client.Get(context.TODO(), types.NamespacedName{Name: job.GetName(), Namespace: job.GetNamespace()}, found) 548 | if err != nil && errors.IsNotFound(err) { 549 | controllerutil.SetControllerReference(tablet.Cluster(), job, r.scheme) 550 | err = r.client.Create(context.TODO(), job) 551 | if err != nil { 552 | return reconcile.Result{}, err 553 | } 554 | // Job created successfully - return and requeue 555 | return reconcile.Result{Requeue: true}, nil 556 | } else if err != nil { 557 | log.Error(err, "failed to get Job") 558 | return reconcile.Result{}, err 559 | } 560 | 561 | return reconcile.Result{}, nil 562 | } 563 | 564 | func GetReplicaTabletInitMasterJob(tablet *vitessv1alpha2.VitessTablet) (*batchv1.Job, error) { 565 | jobName := tablet.GetScopedName("init-replica-master") 566 | 567 | scripts := scripts.NewContainerScriptGenerator("init_replica_master", tablet) 568 | if err := scripts.Generate(); err != nil { 569 | return nil, err 570 | } 571 | 572 | jobLabels := map[string]string{ 573 | "app": "vitess", 574 | "cluster": tablet.Cluster().GetName(), 575 | "keyspace": tablet.Keyspace().GetName(), 576 | "shard": tablet.Shard().GetName(), 577 | "component": "vttablet-replica-elector", 578 | "initShardMasterJob": "true", 579 | "job-name": jobName, 580 | } 581 | 582 | return &batchv1.Job{ 583 | ObjectMeta: metav1.ObjectMeta{ 584 | Name: jobName, 585 | Namespace: tablet.Cluster().GetNamespace(), 586 | Labels: jobLabels, 587 | }, 588 | Spec: batchv1.JobSpec{ 589 | BackoffLimit: getInt32Ptr(1), 590 | Completions: getInt32Ptr(1), 591 | Parallelism: getInt32Ptr(1), 592 | Template: corev1.PodTemplateSpec{ 593 | ObjectMeta: metav1.ObjectMeta{ 594 | Labels: jobLabels, 595 | }, 596 | Spec: corev1.PodSpec{ 597 | Containers: []corev1.Container{ 598 | { 599 | Name: "init-master", 600 | Image: "vitess/vtctlclient:helm-1.0.3", // TODO use CRD w/default 601 | Command: []string{ 602 | "bash", 603 | }, 604 | Args: []string{ 605 | "-c", 606 | scripts.Start, 607 | }, 608 | }, 609 | }, 610 | RestartPolicy: corev1.RestartPolicyOnFailure, 611 | }, 612 | }, 613 | }, 614 | }, nil 615 | } 616 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/utils.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | func getInt32Ptr(id int32) *int32 { 4 | return &id 5 | } 6 | 7 | func getInt64Ptr(id int64) *int64 { 8 | return &id 9 | } 10 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/vitesscluster_controller.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "time" 7 | 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/apimachinery/pkg/types" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | "sigs.k8s.io/controller-runtime/pkg/controller" 13 | "sigs.k8s.io/controller-runtime/pkg/handler" 14 | "sigs.k8s.io/controller-runtime/pkg/manager" 15 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 16 | logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" 17 | "sigs.k8s.io/controller-runtime/pkg/source" 18 | 19 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 20 | "vitess.io/vitess-operator/pkg/normalizer" 21 | ) 22 | 23 | var log = logf.Log.WithName("controller_vitesscluster") 24 | 25 | // Add creates a new VitessCluster Controller and adds it to the Manager. The Manager will set fields on the Controller 26 | // and Start it when the Manager is Started. 27 | func Add(mgr manager.Manager) error { 28 | return add(mgr, newReconciler(mgr)) 29 | } 30 | 31 | // newReconciler returns a new reconcile.Reconciler 32 | func newReconciler(mgr manager.Manager) reconcile.Reconciler { 33 | return &ReconcileVitessCluster{client: mgr.GetClient(), scheme: mgr.GetScheme()} 34 | } 35 | 36 | // add adds a new Controller to mgr with r as the reconcile.Reconciler 37 | func add(mgr manager.Manager, r reconcile.Reconciler) error { 38 | // Create a new controller 39 | c, err := controller.New("vitesscluster-controller", mgr, controller.Options{Reconciler: r}) 40 | if err != nil { 41 | return err 42 | } 43 | 44 | // Watch for changes to primary resource VitessCluster 45 | err = c.Watch(&source.Kind{Type: &vitessv1alpha2.VitessCluster{}}, &handler.EnqueueRequestForObject{}) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | for _, childType := range []runtime.Object{ 51 | &vitessv1alpha2.VitessLockserver{}, 52 | &vitessv1alpha2.VitessCell{}, 53 | &vitessv1alpha2.VitessKeyspace{}, 54 | &vitessv1alpha2.VitessShard{}, 55 | &vitessv1alpha2.VitessTablet{}, 56 | } { 57 | // Watch for changes to child type and requeue the owner VitessCluster 58 | err = c.Watch(&source.Kind{Type: childType}, &handler.EnqueueRequestForOwner{ 59 | IsController: true, 60 | OwnerType: &vitessv1alpha2.VitessCluster{}, 61 | }) 62 | if err != nil { 63 | return err 64 | } 65 | } 66 | 67 | return nil 68 | } 69 | 70 | var _ reconcile.Reconciler = &ReconcileVitessCluster{} 71 | 72 | // ReconcileVitessCluster reconciles a VitessCluster object 73 | type ReconcileVitessCluster struct { 74 | // This client, initialized using mgr.Client() above, is a split client 75 | // that reads objects from the cache and writes to the apiserver 76 | client client.Client 77 | scheme *runtime.Scheme 78 | } 79 | 80 | // Reconcile reads that state of the cluster for a VitessCluster object and makes changes based on the state read 81 | // and what is in the VitessCluster.Spec 82 | // The Controller will requeue the Request to be processed again if the returned error is non-nil or 83 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. 84 | func (r *ReconcileVitessCluster) Reconcile(request reconcile.Request) (reconcile.Result, error) { 85 | reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) 86 | reqLogger.Info("Reconciling VitessCluster") 87 | 88 | // Fetch the VitessCluster instance 89 | cluster := &vitessv1alpha2.VitessCluster{} 90 | err := r.client.Get(context.TODO(), request.NamespacedName, cluster) 91 | if err != nil { 92 | if errors.IsNotFound(err) { 93 | // Request object not found, could have been deleted after reconcile request. 94 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 95 | // Return and don't requeue 96 | return reconcile.Result{}, nil 97 | } 98 | // Error reading the object - requeue the request. 99 | return reconcile.Result{}, err 100 | } 101 | 102 | n := normalizer.New(r.client) 103 | 104 | // Check 105 | if err := n.TestClusterSanity(cluster); err != nil { 106 | reqLogger.Error(err, "Cluster failed sanity test") 107 | return reconcile.Result{Requeue: false}, err 108 | } 109 | 110 | // Normalize 111 | if err := n.NormalizeCluster(cluster); err != nil { 112 | return reconcile.Result{Requeue: false}, err 113 | } 114 | 115 | // Validate 116 | if err := n.ValidateCluster(cluster); err != nil { 117 | reqLogger.Error(err, "Cluster failed validation") 118 | return reconcile.Result{Requeue: false}, err 119 | } 120 | 121 | // Reconcile 122 | if result, err := r.ReconcileClusterResources(cluster); err != nil { 123 | reqLogger.Info("Error reconciling cluster member resources") 124 | return result, err 125 | } else if result.Requeue { 126 | reqLogger.Info("Requeue after reconciling cluster member resources") 127 | return result, nil 128 | } 129 | 130 | // Status updates 131 | 132 | switch cluster.Phase() { 133 | // Set cluster status to Created if it's a new cluster 134 | case vitessv1alpha2.ClusterPhaseNone: 135 | r.SetClusterPhase(cluster, vitessv1alpha2.ClusterPhaseCreating) 136 | // Set a creating cluster status to Ready if all tablet sets are ready 137 | case vitessv1alpha2.ClusterPhaseCreating: 138 | if cluster.AllTabletsReady() { 139 | r.SetClusterPhase(cluster, vitessv1alpha2.ClusterPhaseReady) 140 | } else { 141 | // Requeue to re-check for readiness later 142 | reqLogger.Info("Cluster created but not ready. Will try again later.") 143 | return reconcile.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil 144 | } 145 | } 146 | 147 | // Nothing to do - don't reqeue 148 | reqLogger.Info("Skip reconcile: all managed services in sync") 149 | return reconcile.Result{}, nil 150 | } 151 | 152 | func (r *ReconcileVitessCluster) SetClusterPhase(cluster *vitessv1alpha2.VitessCluster, p vitessv1alpha2.ClusterPhase) error { 153 | log.Info(fmt.Sprintf("Setting VitessCluster to %s phase", p)) 154 | 155 | // Get latest cluster 156 | foundCluster := &vitessv1alpha2.VitessCluster{} 157 | if err := r.client.Get(context.TODO(), types.NamespacedName{Name: cluster.GetName(), Namespace: cluster.GetNamespace()}, foundCluster); err != nil { 158 | return err 159 | } 160 | 161 | // set phase 162 | foundCluster.SetPhase(p) 163 | 164 | // update 165 | if err := r.client.Status().Update(context.TODO(), foundCluster); err != nil { 166 | log.Error(err, "Failed to update VitessCluster phase") 167 | return err 168 | } 169 | 170 | return nil 171 | } 172 | -------------------------------------------------------------------------------- /pkg/controller/vitesscluster/vitesscluster_controller_test.go: -------------------------------------------------------------------------------- 1 | package vitesscluster 2 | 3 | import ( 4 | "strings" 5 | "testing" 6 | 7 | corev1 "k8s.io/api/core/v1" 8 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "k8s.io/apimachinery/pkg/types" 11 | "k8s.io/client-go/kubernetes/scheme" 12 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 13 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 14 | // logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" 15 | 16 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 17 | "vitess.io/vitess-operator/pkg/normalizer" 18 | ) 19 | 20 | // TestLockserverLockserverRefMutuallyExclusive makes sure that lockserver and lockserverRef are mutually exclusive 21 | func TestLockserverLockserverRefMutuallyExclusive(t *testing.T) { 22 | // Set the logger to development mode for verbose logs. 23 | // logf.SetLogger(logf.ZapLogger(true)) 24 | 25 | var ( 26 | namespace = "vitess" 27 | clusterName = "vitess-operator" 28 | ) 29 | 30 | // Define a minimal cluster with both a lockserver and lockserverRef given 31 | cluster := &vitessv1alpha2.VitessCluster{ 32 | ObjectMeta: metav1.ObjectMeta{ 33 | Name: clusterName, 34 | Namespace: namespace, 35 | }, 36 | Spec: vitessv1alpha2.VitessClusterSpec{ 37 | Lockserver: &vitessv1alpha2.VitessLockserver{}, 38 | LockserverRef: &corev1.LocalObjectReference{ 39 | Name: "exists", 40 | }, 41 | }, 42 | } 43 | 44 | // Objects to track in the fake client. 45 | objs := []runtime.Object{ 46 | cluster, 47 | } 48 | 49 | // Register operator types with the runtime scheme. 50 | s := scheme.Scheme 51 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, cluster) 52 | // Create a fake client to mock API calls. 53 | cl := fake.NewFakeClient(objs...) 54 | // Create a ReconcileVitessCluster object with the scheme and fake client. 55 | r := &ReconcileVitessCluster{client: cl, scheme: s} 56 | 57 | // Mock request to simulate Reconcile() being called on an event for a 58 | // watched resource . 59 | req := reconcile.Request{ 60 | NamespacedName: types.NamespacedName{ 61 | Name: clusterName, 62 | Namespace: namespace, 63 | }, 64 | } 65 | res, err := r.Reconcile(req) 66 | if err == nil { 67 | t.Error("Sanity check failure not caught") 68 | } 69 | 70 | // Check the result of reconciliation to make sure it has the desired state. 71 | if res.Requeue { 72 | t.Error("reconcile requeued request and should not have") 73 | } 74 | } 75 | 76 | // TestTabletTemplates ensures that tablet templates are generated properly 77 | func TestTabletTemplates(t *testing.T) { 78 | // Set the logger to development mode for verbose logs. 79 | // logf.SetLogger(logf.ZapLogger(true)) 80 | 81 | var ( 82 | namespace = "vitess" 83 | clusterName = "vitess-operator" 84 | etcd2Address = "etcd2.test.address:12345" 85 | etcd2Path = "etcd2/test/path" 86 | ) 87 | 88 | // Define a minimal cluster which matches one of the cells above 89 | cluster := &vitessv1alpha2.VitessCluster{ 90 | ObjectMeta: metav1.ObjectMeta{ 91 | Name: clusterName, 92 | Namespace: namespace, 93 | }, 94 | Spec: vitessv1alpha2.VitessClusterSpec{ 95 | Lockserver: &vitessv1alpha2.VitessLockserver{ 96 | Spec: vitessv1alpha2.VitessLockserverSpec{ 97 | Type: vitessv1alpha2.LockserverTypeEtcd2, 98 | Etcd2: &vitessv1alpha2.Etcd2Lockserver{ 99 | Address: etcd2Address, 100 | Path: etcd2Path, 101 | }, 102 | }, 103 | }, 104 | Cells: []*vitessv1alpha2.VitessCell{ 105 | { 106 | ObjectMeta: metav1.ObjectMeta{ 107 | Name: "default", 108 | }, 109 | Spec: vitessv1alpha2.VitessCellSpec{ 110 | Lockserver: &vitessv1alpha2.VitessLockserver{ 111 | Spec: vitessv1alpha2.VitessLockserverSpec{ 112 | Type: vitessv1alpha2.LockserverTypeEtcd2, 113 | Etcd2: &vitessv1alpha2.Etcd2Lockserver{ 114 | Address: etcd2Address, 115 | Path: etcd2Path, 116 | }, 117 | }, 118 | }, 119 | }, 120 | }, 121 | }, 122 | Keyspaces: []*vitessv1alpha2.VitessKeyspace{ 123 | { 124 | Spec: vitessv1alpha2.VitessKeyspaceSpec{ 125 | Shards: []*vitessv1alpha2.VitessShard{ 126 | { 127 | Spec: vitessv1alpha2.VitessShardSpec{ 128 | Defaults: &vitessv1alpha2.VitessShardOptions{ 129 | Containers: &vitessv1alpha2.TabletContainers{ 130 | VTTablet: &vitessv1alpha2.VTTabletContainer{ 131 | Image: "test", 132 | }, 133 | MySQL: &vitessv1alpha2.MySQLContainer{ 134 | Image: "test", 135 | }, 136 | }, 137 | }, 138 | Tablets: []*vitessv1alpha2.VitessTablet{ 139 | { 140 | Spec: vitessv1alpha2.VitessTabletSpec{ 141 | TabletID: 101, 142 | CellID: "default", 143 | }, 144 | }, 145 | }, 146 | }, 147 | }, 148 | }, 149 | }, 150 | }, 151 | }, 152 | }, 153 | } 154 | 155 | // Populate the client with initial data 156 | objs := []runtime.Object{ 157 | cluster, 158 | } 159 | 160 | // Register operator types with the runtime scheme. 161 | s := scheme.Scheme 162 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCluster{}) 163 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessClusterList{}) 164 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCell{}) 165 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCellList{}) 166 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTablet{}) 167 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTabletList{}) 168 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShard{}) 169 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShardList{}) 170 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspace{}) 171 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspaceList{}) 172 | 173 | // Create a fake client to mock API calls. 174 | cl := fake.NewFakeClient(objs...) 175 | 176 | norm := normalizer.New(cl) 177 | 178 | // Call the normalize function for the cluster 179 | if err := norm.NormalizeCluster(cluster); err != nil { 180 | t.Fatalf("Error normalizing cluster: %s", err) 181 | } 182 | 183 | // Call the validate function for the cluster 184 | if err := norm.ValidateCluster(cluster); err != nil { 185 | t.Fatalf("Error validating cluster: %s", err) 186 | } 187 | 188 | for _, tablet := range cluster.Tablets() { 189 | vttabletContainers, vttabletInitContainers, err := GetTabletVTTabletContainers(tablet) 190 | if err != nil { 191 | t.Fatalf("Error generating vttablet container for tablet: %s", err) 192 | } 193 | 194 | for _, container := range vttabletContainers { 195 | // make sure that the etcdpath and etcdaddress end up in the generated scripts for the vttablet container 196 | if container.Name == "vttablet" { 197 | if !strings.Contains(container.Args[len(container.Args)-1], etcd2Address) { 198 | t.Fatalf("Generated start script for vttablet container does not contain the etcd address: %s", container.Args[len(container.Args)-1]) 199 | } 200 | 201 | // make sure that the etcdpath and etcdaddress end up in the generated scripts for the vttablet container 202 | if !strings.Contains(container.Args[len(container.Args)-1], etcd2Path) { 203 | t.Fatalf("Generated start script for vttablet container does not contain the etcd path") 204 | } 205 | } 206 | } 207 | 208 | for _, container := range vttabletInitContainers { 209 | // make sure that the etcdpath and etcdaddress end up in the generated scripts for the vttablet container 210 | if container.Name == "init-vttablet" { 211 | if !strings.Contains(container.Args[len(container.Args)-1], etcd2Address) { 212 | t.Fatalf("Generated start script for init-vttablet container does not contain the etcd address") 213 | } 214 | 215 | // make sure that the etcdpath and etcdaddress end up in the generated scripts for the vttablet container 216 | if !strings.Contains(container.Args[len(container.Args)-1], etcd2Path) { 217 | t.Fatalf("Generated start script for init-vttablet container does not contain the etcd path") 218 | } 219 | } 220 | } 221 | } 222 | } 223 | -------------------------------------------------------------------------------- /pkg/controller/vitesslockserver/vitesslockserver_controller.go: -------------------------------------------------------------------------------- 1 | package vitesslockserver 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/go-logr/logr" 7 | corev1 "k8s.io/api/core/v1" 8 | "k8s.io/apimachinery/pkg/api/errors" 9 | "k8s.io/apimachinery/pkg/runtime" 10 | "sigs.k8s.io/controller-runtime/pkg/client" 11 | "sigs.k8s.io/controller-runtime/pkg/controller" 12 | "sigs.k8s.io/controller-runtime/pkg/handler" 13 | "sigs.k8s.io/controller-runtime/pkg/manager" 14 | "sigs.k8s.io/controller-runtime/pkg/reconcile" 15 | logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" 16 | "sigs.k8s.io/controller-runtime/pkg/source" 17 | 18 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 19 | ) 20 | 21 | var log = logf.Log.WithName("controller_vitesslockserver") 22 | 23 | // Add creates a new VitessLockserver Controller and adds it to the Manager. The Manager will set fields on the Controller 24 | // and Start it when the Manager is Started. 25 | func Add(mgr manager.Manager) error { 26 | return add(mgr, newReconciler(mgr)) 27 | } 28 | 29 | // newReconciler returns a new reconcile.Reconciler 30 | func newReconciler(mgr manager.Manager) reconcile.Reconciler { 31 | return &ReconcileVitessLockserver{client: mgr.GetClient(), scheme: mgr.GetScheme()} 32 | } 33 | 34 | // add adds a new Controller to mgr with r as the reconcile.Reconciler 35 | func add(mgr manager.Manager, r reconcile.Reconciler) error { 36 | // Create a new controller 37 | c, err := controller.New("vitesslockserver-controller", mgr, controller.Options{Reconciler: r}) 38 | if err != nil { 39 | return err 40 | } 41 | 42 | // Watch for changes to primary resource VitessLockserver 43 | err = c.Watch(&source.Kind{Type: &vitessv1alpha2.VitessLockserver{}}, &handler.EnqueueRequestForObject{}) 44 | if err != nil { 45 | return err 46 | } 47 | 48 | // TODO(user): Modify this to be the types you create that are owned by the primary resource 49 | // Watch for changes to secondary resource Pods and requeue the owner VitessLockserver 50 | err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &handler.EnqueueRequestForOwner{ 51 | IsController: true, 52 | OwnerType: &vitessv1alpha2.VitessLockserver{}, 53 | }) 54 | if err != nil { 55 | return err 56 | } 57 | 58 | return nil 59 | } 60 | 61 | var _ reconcile.Reconciler = &ReconcileVitessLockserver{} 62 | 63 | // ReconcileVitessLockserver reconciles a VitessLockserver object 64 | type ReconcileVitessLockserver struct { 65 | // This client, initialized using mgr.Client() above, is a split client 66 | // that reads objects from the cache and writes to the apiserver 67 | client client.Client 68 | scheme *runtime.Scheme 69 | } 70 | 71 | // Reconcile reads that state of the cluster for a VitessLockserver object and makes changes based on the state read 72 | // and what is in the VitessLockserver.Spec 73 | // The Controller will requeue the Request to be processed again if the returned error is non-nil or 74 | // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. 75 | func (r *ReconcileVitessLockserver) Reconcile(request reconcile.Request) (reconcile.Result, error) { 76 | reqLogger := log.WithValues("Request.Namespace", request.Namespace, "Request.Name", request.Name) 77 | // reqLogger.Info("Reconciling VitessLockserver") 78 | 79 | // Fetch the VitessLockserver instance 80 | instance := &vitessv1alpha2.VitessLockserver{} 81 | err := r.client.Get(context.TODO(), request.NamespacedName, instance) 82 | if err != nil { 83 | if errors.IsNotFound(err) { 84 | // Request object not found, could have been deleted after reconcile request. 85 | // Owned objects are automatically garbage collected. For additional cleanup logic use finalizers. 86 | // Return and don't requeue 87 | return reconcile.Result{}, nil 88 | } 89 | // Error reading the object - requeue the request. 90 | return reconcile.Result{}, err 91 | } 92 | 93 | rr, err := ReconcileObject(instance, reqLogger) 94 | 95 | return rr, err 96 | } 97 | 98 | // ReconcileObject does all the actual reconcile work 99 | func ReconcileObject(instance *vitessv1alpha2.VitessLockserver, upstreamLog logr.Logger) (reconcile.Result, error) { 100 | reqLogger := upstreamLog.WithValues() 101 | reqLogger.Info("Reconciling VitessLockserver") 102 | 103 | // TODO actual reconcile 104 | // if instance.Status.State != "Ready" { 105 | // instance.Status.State = "Ready" 106 | // return reconcile.Result{Requeue: true}, nil 107 | // } 108 | 109 | return reconcile.Result{}, nil 110 | } 111 | -------------------------------------------------------------------------------- /pkg/normalizer/errors.go: -------------------------------------------------------------------------------- 1 | package normalizer 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | ) 7 | 8 | type ValidationError error 9 | 10 | var ( 11 | ValidationErrorNoLockserverForCluster ValidationError = errors.New("No Lockserver in Cluster") 12 | ValidationErrorNoLockserverForCell ValidationError = errors.New("No Lockserver in Cell") 13 | 14 | ValidationErrorNoCells ValidationError = errors.New("No Cells in Cluster") 15 | ValidationErrorNoShards ValidationError = errors.New("No Shards in Cluster") 16 | ValidationErrorNoTablets ValidationError = errors.New("No Tablets in Cluster") 17 | ValidationErrorNoKeyspaces ValidationError = errors.New("No Keyspaces in Cluster") 18 | 19 | ValidationErrorOverlappingKeyrange ValidationError = errors.New("Multiple shards provided with the same keyrange") 20 | 21 | ValidationErrorNoCellForTablet ValidationError = errors.New("No Cell for Tablet") 22 | ValidationErrorTabletNameTooLong ValidationError = errors.New("Tablet name is too long and would break mysql replication") 23 | ) 24 | 25 | var ClientError = errors.New("Client Error") 26 | 27 | func NewClientError(err error) error { 28 | return fmt.Errorf("Client Error: %s", err) 29 | } 30 | -------------------------------------------------------------------------------- /pkg/normalizer/normalizer.go: -------------------------------------------------------------------------------- 1 | package normalizer 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | 7 | "k8s.io/apimachinery/pkg/labels" 8 | "k8s.io/apimachinery/pkg/runtime" 9 | "k8s.io/apimachinery/pkg/selection" 10 | "k8s.io/apimachinery/pkg/types" 11 | "sigs.k8s.io/controller-runtime/pkg/client" 12 | 13 | logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" 14 | 15 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 16 | ) 17 | 18 | var log = logf.Log.WithName("normalizer") 19 | 20 | type Normalizer struct { 21 | client client.Client 22 | } 23 | 24 | func New(client client.Client) *Normalizer { 25 | return &Normalizer{ 26 | client: client, 27 | } 28 | } 29 | 30 | func (n *Normalizer) NormalizeCluster(cluster *vitessv1alpha2.VitessCluster) error { 31 | if err := n.NormalizeClusterLockserver(cluster); err != nil { 32 | return err 33 | } 34 | 35 | if err := n.NormalizeClusterCells(cluster); err != nil { 36 | return err 37 | } 38 | 39 | if err := n.NormalizeClusterKeyspaces(cluster); err != nil { 40 | return err 41 | } 42 | 43 | // if err := n.NormalizeClusterTabletParentage(cluster); err != nil { 44 | // return err 45 | // } 46 | 47 | return nil 48 | } 49 | 50 | func (n *Normalizer) NormalizeClusterLockserver(cluster *vitessv1alpha2.VitessCluster) error { 51 | // Populate the embedded lockserver spec from Ref if given 52 | if cluster.Spec.LockserverRef != nil { 53 | ls := &vitessv1alpha2.VitessLockserver{} 54 | err := n.client.Get(context.TODO(), types.NamespacedName{Name: cluster.Spec.LockserverRef.Name, Namespace: cluster.GetNamespace()}, ls) 55 | if err != nil { 56 | return NewClientError(err) 57 | } 58 | 59 | // Since Lockserver and Lockserver Ref are mutually-exclusive, it should be safe 60 | // to simply populate the Lockserver struct member with a pointer to the fetched lockserver 61 | cluster.Spec.Lockserver = ls 62 | } 63 | 64 | return nil 65 | } 66 | 67 | func (n *Normalizer) NormalizeClusterCells(cluster *vitessv1alpha2.VitessCluster) error { 68 | if len(cluster.Spec.CellSelector) != 0 { 69 | cellList := &vitessv1alpha2.VitessCellList{} 70 | if err := n.ListFromSelectors(context.TODO(), cluster.Spec.CellSelector, cellList); err != nil { 71 | return fmt.Errorf("Error getting cells for cluster %s", err) 72 | } 73 | 74 | log.Info(fmt.Sprintf("VitessCluster's cellSelector matched %d cells", len(cellList.Items))) 75 | for _, cell := range cellList.Items { 76 | cluster.EmbedCellCopy(&cell) 77 | } 78 | } 79 | 80 | for _, cell := range cluster.Cells() { 81 | cell.SetParentCluster(cluster) 82 | 83 | n.NormalizeCellLockserver(cell) 84 | } 85 | 86 | return nil 87 | } 88 | 89 | func (n *Normalizer) NormalizeCellLockserver(cell *vitessv1alpha2.VitessCell) error { 90 | // Populate the embedded lockserver spec from Ref if given 91 | if cell.Spec.LockserverRef != nil { 92 | ls := &vitessv1alpha2.VitessLockserver{} 93 | err := n.client.Get(context.TODO(), types.NamespacedName{Name: cell.Spec.LockserverRef.Name, Namespace: cell.Cluster().GetNamespace()}, ls) 94 | if err != nil { 95 | return NewClientError(err) 96 | } 97 | 98 | // Since Lockserver and Lockserver Ref are mutually-exclusive, it should be safe 99 | // to simply populate the Lockserver struct member with a pointer to the fetched lockserver 100 | cell.Spec.Lockserver = ls 101 | } 102 | 103 | return nil 104 | } 105 | 106 | func (n *Normalizer) NormalizeClusterKeyspaces(cluster *vitessv1alpha2.VitessCluster) error { 107 | if len(cluster.Spec.KeyspaceSelector) != 0 { 108 | keyspaceList := &vitessv1alpha2.VitessKeyspaceList{} 109 | if err := n.ListFromSelectors(context.TODO(), cluster.Spec.KeyspaceSelector, keyspaceList); err != nil { 110 | return fmt.Errorf("Error getting keyspaces for cluster %s", err) 111 | } 112 | 113 | log.Info(fmt.Sprintf("VitessCluster's keyspaceSelector matched %d keyspaces", len(keyspaceList.Items))) 114 | for _, keyspace := range keyspaceList.Items { 115 | cluster.EmbedKeyspaceCopy(&keyspace) 116 | } 117 | } 118 | 119 | for _, keyspace := range cluster.Keyspaces() { 120 | keyspace.SetParentCluster(cluster) 121 | 122 | if err := n.NormalizeClusterKeyspaceShards(cluster, keyspace); err != nil { 123 | return err 124 | } 125 | } 126 | 127 | return nil 128 | } 129 | 130 | func (n *Normalizer) NormalizeClusterKeyspaceShards(cluster *vitessv1alpha2.VitessCluster, keyspace *vitessv1alpha2.VitessKeyspace) error { 131 | shardList := &vitessv1alpha2.VitessShardList{} 132 | err := n.ListFromSelectors(context.TODO(), keyspace.Spec.ShardSelector, shardList) 133 | if err != nil { 134 | return fmt.Errorf("Error getting shards for keyspace %s", err) 135 | } 136 | 137 | log.Info(fmt.Sprintf("VitessKeyspace's shardSelector matched %d shards", len(shardList.Items))) 138 | for _, shard := range shardList.Items { 139 | keyspace.EmbedShardCopy(&shard) 140 | } 141 | 142 | for _, shard := range keyspace.Shards() { 143 | shard.SetParentCluster(keyspace.Cluster()) 144 | shard.SetParentKeyspace(keyspace) 145 | 146 | if err := n.NormalizeClusterShardTablets(cluster, shard); err != nil { 147 | return err 148 | } 149 | } 150 | 151 | return nil 152 | } 153 | 154 | func (n *Normalizer) NormalizeClusterShardTablets(cluster *vitessv1alpha2.VitessCluster, shard *vitessv1alpha2.VitessShard) error { 155 | tabletList := &vitessv1alpha2.VitessTabletList{} 156 | err := n.ListFromSelectors(context.TODO(), shard.Spec.TabletSelector, tabletList) 157 | if err != nil { 158 | return fmt.Errorf("Error getting tablets for shard %s", err) 159 | } 160 | 161 | log.Info(fmt.Sprintf("VitessShard's tabletSelector matched %d tablets", len(tabletList.Items))) 162 | for _, tablet := range tabletList.Items { 163 | shard.EmbedTabletCopy(&tablet) 164 | } 165 | 166 | for _, tablet := range shard.Tablets() { 167 | tablet.SetParentCluster(cluster) 168 | tablet.SetParentCell(cluster.GetCellByID(tablet.Spec.CellID)) 169 | tablet.SetParentKeyspace(shard.Keyspace()) 170 | tablet.SetParentShard(shard) 171 | } 172 | 173 | return nil 174 | } 175 | 176 | func (n *Normalizer) ListFromSelectors(ctx context.Context, rSels []vitessv1alpha2.ResourceSelector, retList runtime.Object) error { 177 | labelSelector, err := ResourceSelectorsAsLabelSelector(rSels) 178 | if err == nil { 179 | err := n.client.List(ctx, &client.ListOptions{LabelSelector: labelSelector}, retList) 180 | if err != nil { 181 | return err 182 | } 183 | return nil 184 | } 185 | return err 186 | } 187 | 188 | // ResourceSelectorsAsLabelSelector converts the []ResourceSelector api type into a struct that implements 189 | // labels.Selector. 190 | func ResourceSelectorsAsLabelSelector(rSels []vitessv1alpha2.ResourceSelector) (labels.Selector, error) { 191 | if len(rSels) == 0 { 192 | return labels.Nothing(), nil 193 | } 194 | 195 | selector := labels.NewSelector() 196 | for _, expr := range rSels { 197 | var op selection.Operator 198 | switch expr.Operator { 199 | case vitessv1alpha2.ResourceSelectorOpIn: 200 | op = selection.In 201 | case vitessv1alpha2.ResourceSelectorOpNotIn: 202 | op = selection.NotIn 203 | case vitessv1alpha2.ResourceSelectorOpExists: 204 | op = selection.Exists 205 | case vitessv1alpha2.ResourceSelectorOpDoesNotExist: 206 | op = selection.DoesNotExist 207 | default: 208 | return nil, fmt.Errorf("%q is not a valid resource selector operator", expr.Operator) 209 | } 210 | r, err := labels.NewRequirement(expr.Key, op, expr.Values) 211 | if err != nil { 212 | return nil, err 213 | } 214 | selector = selector.Add(*r) 215 | } 216 | return selector, nil 217 | } 218 | -------------------------------------------------------------------------------- /pkg/normalizer/normalizer_test.go: -------------------------------------------------------------------------------- 1 | package normalizer 2 | 3 | import ( 4 | "context" 5 | "strings" 6 | "testing" 7 | 8 | corev1 "k8s.io/api/core/v1" 9 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 10 | "k8s.io/apimachinery/pkg/runtime" 11 | // "k8s.io/apimachinery/pkg/types" 12 | "k8s.io/client-go/kubernetes/scheme" 13 | "sigs.k8s.io/controller-runtime/pkg/client/fake" 14 | // "sigs.k8s.io/controller-runtime/pkg/reconcile" 15 | // logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" 16 | 17 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 18 | ) 19 | 20 | var ( 21 | testNamespace = "vitess" 22 | testClusterName = "vitess-operator" 23 | 24 | // simple labels for all resources 25 | testLabels = map[string]string{ 26 | "app": "yes", 27 | } 28 | 29 | // simple selector for all resources 30 | testSel = []vitessv1alpha2.ResourceSelector{ 31 | { 32 | Key: "app", 33 | Operator: vitessv1alpha2.ResourceSelectorOpIn, 34 | Values: []string{"yes"}, 35 | }, 36 | } 37 | ) 38 | 39 | func TestSanity(t *testing.T) { 40 | // Set the logger to development mode for verbose logs. 41 | // logf.SetLogger(logf.ZapLogger(true)) 42 | 43 | // Define a minimal cluster which matches one of the cells above 44 | cluster := &vitessv1alpha2.VitessCluster{ 45 | ObjectMeta: metav1.ObjectMeta{ 46 | Name: testClusterName, 47 | Namespace: testNamespace, 48 | }, 49 | Spec: vitessv1alpha2.VitessClusterSpec{ 50 | LockserverRef: &corev1.LocalObjectReference{ 51 | Name: "lockserver", 52 | }, 53 | }, 54 | } 55 | 56 | // Populate the client with initial data 57 | objs := []runtime.Object{ 58 | cluster, 59 | &vitessv1alpha2.VitessLockserver{ 60 | ObjectMeta: metav1.ObjectMeta{ 61 | Name: "lockserver", 62 | Namespace: testNamespace, 63 | }, 64 | }, 65 | } 66 | 67 | // Register operator types with the runtime scheme. 68 | s := scheme.Scheme 69 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCluster{}) 70 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessClusterList{}) 71 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCell{}) 72 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCellList{}) 73 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTablet{}) 74 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTabletList{}) 75 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShard{}) 76 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShardList{}) 77 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspace{}) 78 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspaceList{}) 79 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessLockserver{}) 80 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessLockserverList{}) 81 | 82 | // Create a fake client to mock API calls. 83 | client := fake.NewFakeClient(objs...) 84 | 85 | n := New(client) 86 | 87 | // Call the normalize function for the cluster 88 | if err := n.NormalizeCluster(cluster); err != nil { 89 | t.Fatalf("Error normalizing cluster: %s", err) 90 | } 91 | 92 | // Ensure that all matched objects were embedded properly 93 | if err := n.TestClusterSanity(cluster); err == nil { 94 | t.Fatalf("Cluster passed sanity test and shouldn't have") 95 | } 96 | } 97 | 98 | func TestValidation(t *testing.T) { 99 | // Set the logger to development mode for verbose logs. 100 | // logf.SetLogger(logf.ZapLogger(true)) 101 | 102 | tests := []struct { 103 | obj runtime.Object 104 | missingErr ValidationError 105 | }{ 106 | { 107 | &vitessv1alpha2.VitessLockserver{ 108 | ObjectMeta: metav1.ObjectMeta{ 109 | Name: "cluster-lockserver", 110 | Namespace: testNamespace, 111 | Labels: testLabels, 112 | }, 113 | }, 114 | ValidationErrorNoLockserverForCluster, 115 | }, 116 | { 117 | &vitessv1alpha2.VitessCell{ 118 | ObjectMeta: metav1.ObjectMeta{ 119 | Name: "cell", 120 | Namespace: testNamespace, 121 | Labels: testLabels, 122 | }, 123 | Spec: vitessv1alpha2.VitessCellSpec{ 124 | LockserverRef: &corev1.LocalObjectReference{ 125 | Name: "cell-lockserver", 126 | }, 127 | }, 128 | }, 129 | ValidationErrorNoCells, 130 | }, 131 | { 132 | &vitessv1alpha2.VitessLockserver{ 133 | ObjectMeta: metav1.ObjectMeta{ 134 | Name: "cell-lockserver", 135 | Namespace: testNamespace, 136 | Labels: testLabels, 137 | }, 138 | }, 139 | ValidationErrorNoLockserverForCell, 140 | }, 141 | { 142 | &vitessv1alpha2.VitessKeyspace{ 143 | ObjectMeta: metav1.ObjectMeta{ 144 | Name: "keyspace", 145 | Namespace: testNamespace, 146 | Labels: testLabels, 147 | }, 148 | Spec: vitessv1alpha2.VitessKeyspaceSpec{ 149 | ShardSelector: testSel, 150 | }, 151 | }, 152 | ValidationErrorNoKeyspaces, 153 | }, 154 | { 155 | &vitessv1alpha2.VitessShard{ 156 | ObjectMeta: metav1.ObjectMeta{ 157 | Name: "shard", 158 | Namespace: testNamespace, 159 | Labels: testLabels, 160 | }, 161 | Spec: vitessv1alpha2.VitessShardSpec{ 162 | TabletSelector: testSel, 163 | }, 164 | }, 165 | ValidationErrorNoShards, 166 | }, 167 | { 168 | &vitessv1alpha2.VitessTablet{ 169 | ObjectMeta: metav1.ObjectMeta{ 170 | Name: "tablet", 171 | Namespace: testNamespace, 172 | Labels: testLabels, 173 | }, 174 | Spec: vitessv1alpha2.VitessTabletSpec{ 175 | TabletID: 101, 176 | CellID: "cell", 177 | }, 178 | }, 179 | ValidationErrorNoTablets, 180 | }, 181 | } 182 | 183 | // Register operator types with the runtime scheme. 184 | s := scheme.Scheme 185 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCluster{}) 186 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessClusterList{}) 187 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCell{}) 188 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCellList{}) 189 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTablet{}) 190 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTabletList{}) 191 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShard{}) 192 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShardList{}) 193 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspace{}) 194 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspaceList{}) 195 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessLockserver{}) 196 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessLockserverList{}) 197 | 198 | // Create a fake client to mock API calls. 199 | client := fake.NewFakeClient() 200 | 201 | n := New(client) 202 | 203 | // loop through and add objs one at a time 204 | // Cluster should not be valid until all objs have been added 205 | for _, test := range tests { 206 | cluster := &vitessv1alpha2.VitessCluster{ 207 | ObjectMeta: metav1.ObjectMeta{ 208 | Name: testClusterName, 209 | Namespace: testNamespace, 210 | }, 211 | Spec: vitessv1alpha2.VitessClusterSpec{ 212 | CellSelector: testSel, 213 | KeyspaceSelector: testSel, 214 | }, 215 | } 216 | 217 | // handle special case for cluster lockserverRef 218 | if test.missingErr != ValidationErrorNoLockserverForCluster { 219 | cluster.Spec.LockserverRef = &corev1.LocalObjectReference{ 220 | Name: "cluster-lockserver", 221 | } 222 | } 223 | 224 | // check for expected error when obj is missing 225 | if err := n.NormalizeCluster(cluster); err != nil { 226 | t.Fatalf("Error normalizing cluster: %s", err) 227 | } 228 | 229 | if err := n.ValidateCluster(cluster); err != test.missingErr { 230 | t.Fatalf("Wrong error for missing resource, got: '%s'; expected: '%s'", err, test.missingErr) 231 | } 232 | 233 | // add obj 234 | // t.Logf("Creating obj of kind: %s", test.obj.(metav1.Object).GetName()) 235 | if err := client.Create(context.Background(), test.obj); err != nil { 236 | t.Fatalf("Error creating object: %s", err) 237 | } 238 | 239 | // redeclare empty cluster 240 | cluster = &vitessv1alpha2.VitessCluster{ 241 | ObjectMeta: metav1.ObjectMeta{ 242 | Name: testClusterName, 243 | Namespace: testNamespace, 244 | }, 245 | Spec: vitessv1alpha2.VitessClusterSpec{ 246 | LockserverRef: &corev1.LocalObjectReference{ 247 | Name: "cluster-lockserver", 248 | }, 249 | CellSelector: testSel, 250 | KeyspaceSelector: testSel, 251 | }, 252 | } 253 | 254 | // Make sure there is a different error 255 | if err := n.NormalizeCluster(cluster); err != nil { 256 | t.Fatalf("Error normalizing cluster: %s", err) 257 | } 258 | 259 | if err := n.ValidateCluster(cluster); err == test.missingErr { 260 | t.Fatalf("Wrong error for missing resource, got: '%s' again; expected new error", err) 261 | } 262 | } 263 | 264 | } 265 | 266 | func TestValidateTabletHostnameSizeLimit(t *testing.T) { 267 | cluster := &vitessv1alpha2.VitessCluster{} 268 | cell := &vitessv1alpha2.VitessCell{} 269 | keyspace := &vitessv1alpha2.VitessKeyspace{} 270 | shard := &vitessv1alpha2.VitessShard{} 271 | tablet := &vitessv1alpha2.VitessTablet{} 272 | 273 | tablet.SetParentCluster(cluster) 274 | tablet.SetParentCell(cell) 275 | tablet.SetParentKeyspace(keyspace) 276 | tablet.SetParentShard(shard) 277 | 278 | baseLen := getMaxExpectedTabletHostLength(tablet) 279 | 280 | tests := []struct { 281 | numChars int 282 | expected ValidationError 283 | }{ 284 | { 285 | (MaxTabletHostnameLength - baseLen) - 1, // one under max 286 | nil, 287 | }, 288 | { 289 | MaxTabletHostnameLength - baseLen, // exactly max 290 | ValidationErrorTabletNameTooLong, 291 | }, 292 | } 293 | 294 | n := New(fake.NewFakeClient()) 295 | 296 | for _, tc := range tests { 297 | // increase the final hostname by the test size 298 | tablet.Keyspace().Name = strings.Repeat("x", tc.numChars) 299 | t.Logf("%s", tablet.GetStatefulSetName()) 300 | err := n.ValidateTablet(tablet) 301 | if err != tc.expected { 302 | t.Errorf("Unexpected error: Got: %s; Expected: %s", err, tc.expected) 303 | } 304 | } 305 | } 306 | 307 | // TestSaneNormalAndValidCluster makes sure that a perfect cluster works as expected 308 | func TestSaneNormalAndValidCluster(t *testing.T) { 309 | // Set the logger to development mode for verbose logs. 310 | // logf.SetLogger(logf.ZapLogger(true)) 311 | 312 | // Define a minimal cluster which matches one of the cells above 313 | cluster := &vitessv1alpha2.VitessCluster{ 314 | ObjectMeta: metav1.ObjectMeta{ 315 | Name: testClusterName, 316 | Namespace: testNamespace, 317 | }, 318 | Spec: vitessv1alpha2.VitessClusterSpec{ 319 | LockserverRef: &corev1.LocalObjectReference{ 320 | Name: "cluster-lockserver", 321 | }, 322 | CellSelector: testSel, 323 | KeyspaceSelector: testSel, 324 | }, 325 | } 326 | 327 | // Populate the client with initial data 328 | objs := []runtime.Object{ 329 | &vitessv1alpha2.VitessLockserver{ 330 | ObjectMeta: metav1.ObjectMeta{ 331 | Name: "cluster-lockserver", 332 | Namespace: testNamespace, 333 | Labels: testLabels, 334 | }, 335 | }, 336 | &vitessv1alpha2.VitessLockserver{ 337 | ObjectMeta: metav1.ObjectMeta{ 338 | Name: "cell-lockserver", 339 | Namespace: testNamespace, 340 | Labels: testLabels, 341 | }, 342 | }, 343 | &vitessv1alpha2.VitessCell{ 344 | ObjectMeta: metav1.ObjectMeta{ 345 | Name: "cell", 346 | Namespace: testNamespace, 347 | Labels: testLabels, 348 | }, 349 | Spec: vitessv1alpha2.VitessCellSpec{ 350 | LockserverRef: &corev1.LocalObjectReference{ 351 | Name: "cell-lockserver", 352 | }, 353 | }, 354 | }, 355 | &vitessv1alpha2.VitessKeyspace{ 356 | ObjectMeta: metav1.ObjectMeta{ 357 | Name: "keyspace", 358 | Namespace: testNamespace, 359 | Labels: testLabels, 360 | }, 361 | Spec: vitessv1alpha2.VitessKeyspaceSpec{ 362 | ShardSelector: testSel, 363 | }, 364 | }, 365 | &vitessv1alpha2.VitessShard{ 366 | ObjectMeta: metav1.ObjectMeta{ 367 | Name: "shard", 368 | Namespace: testNamespace, 369 | Labels: testLabels, 370 | }, 371 | Spec: vitessv1alpha2.VitessShardSpec{ 372 | TabletSelector: testSel, 373 | }, 374 | }, 375 | &vitessv1alpha2.VitessTablet{ 376 | ObjectMeta: metav1.ObjectMeta{ 377 | Name: "tablet", 378 | Namespace: testNamespace, 379 | Labels: testLabels, 380 | }, 381 | Spec: vitessv1alpha2.VitessTabletSpec{ 382 | TabletID: 101, 383 | CellID: "cell", 384 | }, 385 | }, 386 | cluster, 387 | } 388 | 389 | // Register operator types with the runtime scheme. 390 | s := scheme.Scheme 391 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCluster{}) 392 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessClusterList{}) 393 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCell{}) 394 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessCellList{}) 395 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTablet{}) 396 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessTabletList{}) 397 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShard{}) 398 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessShardList{}) 399 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspace{}) 400 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessKeyspaceList{}) 401 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessLockserver{}) 402 | s.AddKnownTypes(vitessv1alpha2.SchemeGroupVersion, &vitessv1alpha2.VitessLockserverList{}) 403 | 404 | // Create a fake client to mock API calls. 405 | client := fake.NewFakeClient(objs...) 406 | 407 | n := New(client) 408 | 409 | // Check Sanity 410 | if err := n.TestClusterSanity(cluster); err != nil { 411 | t.Fatalf("Cluster Sanity Test failed: %s", err) 412 | } 413 | 414 | // Call the normalize function for the cluster 415 | if err := n.NormalizeCluster(cluster); err != nil { 416 | t.Fatalf("Error normalizing cluster: %s", err) 417 | } 418 | 419 | // Ensure that all matched objects were embedded properly 420 | if err := n.ValidateCluster(cluster); err != nil { 421 | t.Fatalf("Cluster Sanity Test failed: %s", err) 422 | } 423 | 424 | // Test Parenting 425 | for _, keyspace := range cluster.Keyspaces() { 426 | shards := keyspace.Shards() 427 | if len(shards) == 0 { 428 | t.Fatalf("No embedded shards from keyspace after normalization") 429 | } 430 | 431 | for _, shard := range shards { 432 | tablets := shard.Tablets() 433 | if len(tablets) == 0 { 434 | t.Fatalf("No embedded tablets from shard after normalization") 435 | } 436 | } 437 | } 438 | 439 | // Child tests from the top down 440 | lockserver := cluster.Lockserver() 441 | if lockserver == nil { 442 | t.Errorf("No embeddded lockserver from cluster after normalization") 443 | } 444 | 445 | cells := cluster.Cells() 446 | if len(cells) == 0 { 447 | t.Errorf("No embedded cells from cluster after normalization") 448 | } 449 | 450 | shards := cluster.Shards() 451 | if len(shards) == 0 { 452 | t.Errorf("No embedded shards from cluster after normalization") 453 | } 454 | 455 | tablets := cluster.Tablets() 456 | if len(tablets) == 0 { 457 | t.Errorf("No embedded tablets from cluster after normalization") 458 | } 459 | 460 | keyspaces := cluster.Keyspaces() 461 | if len(keyspaces) == 0 { 462 | t.Errorf("No embedded keyspaces from cluster after normalization") 463 | } 464 | 465 | for _, keyspace := range keyspaces { 466 | shards := keyspace.Shards() 467 | if len(shards) == 0 { 468 | t.Errorf("No embedded shards from keyspace after normalization") 469 | } 470 | 471 | for _, shard := range shards { 472 | tablets := shard.Tablets() 473 | if len(tablets) == 0 { 474 | t.Errorf("No embedded tablets from shard after normalization") 475 | } 476 | } 477 | } 478 | 479 | // Parent tests from the bottom up 480 | 481 | // every tablet should have a parent cell, cluster, keyspace, and shard 482 | for _, tablet := range tablets { 483 | if tablet.Cell() == nil { 484 | t.Errorf("No parent cell in tablet after normalization") 485 | } 486 | if tablet.Cluster() == nil { 487 | t.Errorf("No parent cluster in tablet after normalization") 488 | } 489 | if tablet.Keyspace() == nil { 490 | t.Errorf("No parent keyspace in tablet after normalization") 491 | } 492 | if tablet.Shard() == nil { 493 | t.Errorf("No parent shard in tablet after normalization") 494 | } 495 | if tablet.Lockserver() == nil { 496 | t.Errorf("No lockserver in tablet after normalization") 497 | } else if tablet.Lockserver().GetName() != "cell-lockserver" { 498 | t.Errorf("Wrong lockserver in tablet after normalization. Should be 'cell-lockserver', not %s", tablet.Lockserver().GetName()) 499 | } 500 | } 501 | 502 | // every shard should have a parent keyspace and cluster 503 | for _, shard := range shards { 504 | if shard.Keyspace() == nil { 505 | t.Errorf("No parent keyspace in shard after normalization") 506 | } 507 | 508 | if shard.Cluster() == nil { 509 | t.Errorf("No parent cluster in shard after normalization") 510 | } 511 | } 512 | 513 | // every keyspace should have a parent cluster 514 | for _, keyspace := range keyspaces { 515 | if keyspace.Cluster() == nil { 516 | t.Errorf("No parent cluster in keyspace after normalization") 517 | } 518 | } 519 | 520 | // every cell should have a parent cluster 521 | for _, cell := range cells { 522 | if cell.Cluster() == nil { 523 | t.Errorf("No parent cluster in cell after normalization") 524 | } 525 | } 526 | } 527 | -------------------------------------------------------------------------------- /pkg/normalizer/sanity.go: -------------------------------------------------------------------------------- 1 | package normalizer 2 | 3 | import ( 4 | "fmt" 5 | 6 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 7 | ) 8 | 9 | func (n *Normalizer) TestClusterSanity(cluster *vitessv1alpha2.VitessCluster) error { 10 | // Lockserver and LockserverRef are mutuallly exclusive 11 | if cluster.Spec.Lockserver != nil && cluster.Spec.LockserverRef != nil { 12 | return fmt.Errorf("Cannot specify both a lockserver and lockserverRef") 13 | } 14 | 15 | return nil 16 | } 17 | -------------------------------------------------------------------------------- /pkg/normalizer/validation.go: -------------------------------------------------------------------------------- 1 | package normalizer 2 | 3 | import ( 4 | "strings" 5 | 6 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 7 | ) 8 | 9 | const ( 10 | // If a tablet's hostname ever goes over 60 chars then it will not be able 11 | // to boostrap properly because it will truncate the master hostname and replication will fail. 12 | // See the chart at the bottom of https://dev.mysql.com/doc/refman/8.0/en/change-master-to.html 13 | MaxTabletHostnameLength = 60 14 | 15 | // allow up to 99 replicas. Statefulsets can go higher, but it's not likely for this use case 16 | MaxTabletOrdinalLength = 2 17 | ) 18 | 19 | func (n *Normalizer) ValidateCluster(cluster *vitessv1alpha2.VitessCluster) error { 20 | if cluster.Lockserver() == nil { 21 | return ValidationErrorNoLockserverForCluster 22 | } 23 | 24 | if len(cluster.Cells()) == 0 { 25 | return ValidationErrorNoCells 26 | } 27 | 28 | for _, cell := range cluster.Cells() { 29 | if cell.Lockserver() == nil { 30 | return ValidationErrorNoLockserverForCell 31 | } 32 | } 33 | 34 | if len(cluster.Keyspaces()) == 0 { 35 | return ValidationErrorNoKeyspaces 36 | } 37 | 38 | if len(cluster.Shards()) == 0 { 39 | return ValidationErrorNoShards 40 | } 41 | 42 | // check for overlapping keyranges 43 | for _, shard := range cluster.Shards() { 44 | // store matched keyranges 45 | keyranges := make(map[string]struct{}) 46 | 47 | // if keyrange string is already in the map then it is a duplicate 48 | if _, ok := keyranges[shard.Spec.KeyRange.String()]; ok { 49 | return ValidationErrorOverlappingKeyrange 50 | } 51 | 52 | // set keyrange string as existing 53 | keyranges[shard.Spec.KeyRange.String()] = struct{}{} 54 | } 55 | 56 | if len(cluster.Tablets()) == 0 { 57 | return ValidationErrorNoTablets 58 | } 59 | 60 | for _, tablet := range cluster.Tablets() { 61 | if tablet.Cell() == nil { 62 | return ValidationErrorNoCellForTablet 63 | } 64 | } 65 | 66 | return nil 67 | } 68 | 69 | func (n *Normalizer) ValidateTablet(tablet *vitessv1alpha2.VitessTablet) error { 70 | if getMaxExpectedTabletHostLength(tablet) >= MaxTabletHostnameLength { 71 | return ValidationErrorTabletNameTooLong 72 | } 73 | 74 | return nil 75 | } 76 | 77 | // getMaxExpectedTabletHostLength returns the maximum possible hostname of 78 | // this tablet given the max oridinal length allowed 79 | func getMaxExpectedTabletHostLength(tablet *vitessv1alpha2.VitessTablet) int { 80 | return len(strings.Join([]string{ 81 | tablet.GetStatefulSetName(), 82 | "-", 83 | strings.Repeat("9", MaxTabletOrdinalLength), 84 | ".", 85 | tablet.Cluster().GetTabletServiceName(), 86 | }, "")) 87 | } 88 | -------------------------------------------------------------------------------- /pkg/util/scripts/init-mysql-creds.go: -------------------------------------------------------------------------------- 1 | package scripts 2 | 3 | var ( 4 | InitMySQLCreds = ` 5 | set -ex 6 | creds=$(cat < /mysqlcreds/creds.json 19 | ` 20 | ) 21 | -------------------------------------------------------------------------------- /pkg/util/scripts/init_replica_master.go: -------------------------------------------------------------------------------- 1 | package scripts 2 | 3 | var ( 4 | InitReplicaMaster = ` 5 | set -ex 6 | 7 | VTCTLD_SVC={{ .Cluster.Name }}-{{ .Cell.Name }}-vtctld.{{ .Cluster.Namespace }}:15999 8 | SECONDS=0 9 | TIMEOUT_SECONDS=600 10 | VTCTL_EXTRA_FLAGS=() 11 | 12 | # poll every 5 seconds to see if vtctld is ready 13 | until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ .Cell.Name }} > /dev/null 2>&1; do 14 | if (( $SECONDS > $TIMEOUT_SECONDS )); then 15 | echo "timed out waiting for vtctlclient to be ready" 16 | exit 1 17 | fi 18 | sleep 5 19 | done 20 | 21 | until [ $TABLETS_READY ]; do 22 | # get all the tablets in the current cell 23 | cellTablets="$(vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC ListAllTablets {{ .Cell.Name }})" 24 | 25 | # filter to only the tablets in our current shard 26 | shardTablets=$( echo "$cellTablets" | grep -w '{{ .Cluster.Name }}-{{ .Cell.Name }}-{{ .Keyspace.Name }}-{{ .Shard.Name }}' || : ) 27 | 28 | # check for a master tablet from the ListAllTablets call 29 | masterTablet=$( echo "$shardTablets" | awk '$4 == "master" {print $1}') 30 | if [ $masterTablet ]; then 31 | echo "'$masterTablet' is already the master tablet, exiting without running InitShardMaster" 32 | exit 33 | fi 34 | 35 | # check for a master tablet from the GetShard call 36 | master_alias=$(vtctlclient ${VTLCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC GetShard {{ .Keyspace.Name }}/{{ .Shard.Spec.KeyRange }} | jq '.master_alias.uid') 37 | if [ "$master_alias" != "null" -a "$master_alias" != "" ]; then 38 | echo "'$master_alias' is already the master tablet, exiting without running InitShardMaster" 39 | exit 40 | fi 41 | 42 | # count the number of newlines for the given shard to get the tablet count 43 | tabletCount=$( echo "$shardTablets" | wc | awk '{print $1}') 44 | 45 | # check to see if the tablet count equals the expected tablet count 46 | if [ $tabletCount == 2 ]; then 47 | TABLETS_READY=true 48 | else 49 | if (( $SECONDS > $TIMEOUT_SECONDS )); then 50 | echo "timed out waiting for tablets to be ready" 51 | exit 1 52 | fi 53 | 54 | # wait 5 seconds for vttablets to continue getting ready 55 | sleep 5 56 | fi 57 | 58 | done 59 | 60 | # find the tablet id for the "-replica-0" stateful set for a given cell, keyspace and shard 61 | tablet_id=$( echo "$shardTablets" | grep -w '{{ .ScopedName }}-replica-0' | awk '{print $1}') 62 | 63 | # initialize the shard master 64 | until vtctlclient ${VTCTL_EXTRA_FLAGS[@]} -server $VTCTLD_SVC InitShardMaster -force {{ .Keyspace.Name }}/{{ .Shard.Spec.KeyRange }} $tablet_id; do 65 | if (( $SECONDS > $TIMEOUT_SECONDS )); then 66 | echo "timed out waiting for InitShardMaster to succeed" 67 | exit 1 68 | fi 69 | sleep 5 70 | done 71 | ` 72 | ) 73 | -------------------------------------------------------------------------------- /pkg/util/scripts/main.go: -------------------------------------------------------------------------------- 1 | package scripts 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "text/template" 7 | 8 | "k8s.io/apimachinery/pkg/runtime" 9 | 10 | vitessv1alpha2 "vitess.io/vitess-operator/pkg/apis/vitess/v1alpha2" 11 | ) 12 | 13 | type ContainerScriptGenerator struct { 14 | ContainerType string 15 | Object runtime.Object 16 | Init string 17 | Start string 18 | PreStop string 19 | } 20 | 21 | func NewContainerScriptGenerator(containerType string, obj runtime.Object) *ContainerScriptGenerator { 22 | return &ContainerScriptGenerator{ 23 | ContainerType: containerType, 24 | Object: obj, 25 | } 26 | } 27 | 28 | func (csg *ContainerScriptGenerator) Generate() error { 29 | var err error 30 | switch csg.ContainerType { 31 | case "vttablet": 32 | csg.Init, err = csg.getTemplatedScript("vttabletinit", VTTabletInitTemplate) 33 | if err != nil { 34 | return err 35 | } 36 | csg.Start, err = csg.getTemplatedScript("vttabletstart", VTTabletStartTemplate) 37 | if err != nil { 38 | return err 39 | } 40 | csg.PreStop, err = csg.getTemplatedScript("vttabletPreStop", VTTabletPreStopTemplate) 41 | if err != nil { 42 | return err 43 | } 44 | case "mysql": 45 | csg.Init, err = csg.getTemplatedScript("mysqlinit", MySQLInitTemplate) 46 | if err != nil { 47 | return err 48 | } 49 | csg.Start, err = csg.getTemplatedScript("mysqlstart", MySQLStartTemplate) 50 | if err != nil { 51 | return err 52 | } 53 | csg.PreStop, err = csg.getTemplatedScript("mysqlPreStop", MySQLPreStopTemplate) 54 | if err != nil { 55 | return err 56 | } 57 | case "init_replica_master": 58 | csg.Start, err = csg.getTemplatedScript("init_replica_master", InitReplicaMaster) 59 | if err != nil { 60 | return err 61 | } 62 | case "vtctld": 63 | csg.Start, err = csg.getTemplatedScript("vtctld", VtCtldStart) 64 | if err != nil { 65 | return err 66 | } 67 | case "vtgate": 68 | csg.Start, err = csg.getTemplatedScript("vtgate", VTGateStart) 69 | if err != nil { 70 | return err 71 | } 72 | case "init-mysql-creds": 73 | csg.Start, err = csg.getTemplatedScript("init-mysql-creds", InitMySQLCreds) 74 | if err != nil { 75 | return err 76 | } 77 | default: 78 | return fmt.Errorf("Unsupported container type: %s", csg.ContainerType) 79 | } 80 | 81 | return nil 82 | } 83 | 84 | func (csg *ContainerScriptGenerator) getTemplatedScript(name string, templateStr string) (string, error) { 85 | tmpl, err := template.New(name).Parse(templateStr) 86 | if err != nil { 87 | return "", err 88 | } 89 | 90 | // if tablet, ok := csg.Object.(*vitessv1alpha2.VitessTablet); ok { 91 | // return getTemplatedScriptForTablet(name, templateStr) 92 | // } 93 | 94 | // if cell, ok := csg.Object.(*vitessv1alpha2.VitessTablet); ok { 95 | // return getTemplatedScriptForTablet(name, templateStr) 96 | // } 97 | // } 98 | 99 | // func (csg *ContainerScriptGenerator) getTemplatedScriptForTablet(name string, templateStr string) (string, error) { 100 | // Params are different depending on the resource type 101 | 102 | // For simplicity, the tablet and all parent objects are passed to the template. 103 | // This is safe while the templates are hard-coded. But if templates are ever made 104 | // end-user configurable could would potentially expose too much data and would need to be sanitized 105 | var params map[string]interface{} 106 | 107 | // Configure tablet params 108 | if tablet, ok := csg.Object.(*vitessv1alpha2.VitessTablet); ok { 109 | params = map[string]interface{}{ 110 | "LocalLockserver": tablet.Lockserver(), 111 | "GlobalLockserver": tablet.Cluster().Lockserver(), 112 | "Cluster": tablet.Cluster(), 113 | "Cell": tablet.Cell(), 114 | "Keyspace": tablet.Keyspace(), 115 | "Shard": tablet.Shard(), 116 | "Tablet": tablet, 117 | "ScopedName": tablet.GetScopedName(), 118 | } 119 | } 120 | 121 | // Configure shard params 122 | if cell, ok := csg.Object.(*vitessv1alpha2.VitessCell); ok { 123 | params = map[string]interface{}{ 124 | "LocalLockserver": cell.Lockserver(), 125 | "GlobalLockserver": cell.Cluster().Lockserver(), 126 | "Cluster": cell.Cluster(), 127 | "Cell": cell, 128 | "ScopedName": cell.GetScopedName(), 129 | } 130 | } 131 | 132 | var out bytes.Buffer 133 | err = tmpl.Execute(&out, params) 134 | if err != nil { 135 | return "", err 136 | } 137 | 138 | return out.String(), nil 139 | } 140 | -------------------------------------------------------------------------------- /pkg/util/scripts/mysql.go: -------------------------------------------------------------------------------- 1 | package scripts 2 | 3 | var ( 4 | MySQLInitTemplate = ` 5 | set -ex 6 | # set up the directories vitess needs 7 | mkdir -p /vttmp/bin 8 | mkdir -p /vtdataroot/tabletdata 9 | 10 | # copy necessary assets to the volumeMounts 11 | cp /vt/bin/mysqlctld /vttmp/bin/ 12 | cp /bin/busybox /vttmp/bin/ 13 | cp -R /vt/config /vttmp/ 14 | 15 | # make sure the log files exist 16 | touch /vtdataroot/tabletdata/error.log 17 | touch /vtdataroot/tabletdata/slow-query.log 18 | touch /vtdataroot/tabletdata/general.log 19 | 20 | # remove the old socket file if it is still around 21 | rm -f /vtdataroot/tabletdata/mysql.sock 22 | ` 23 | 24 | MySQLStartTemplate = ` 25 | set -ex 26 | if [ "$VT_DB_FLAVOR" = "percona" ]; then 27 | MYSQL_FLAVOR=Percona 28 | 29 | elif [ "$VT_DB_FLAVOR" = "mysql" ]; then 30 | MYSQL_FLAVOR=MySQL56 31 | 32 | elif [ "$VT_DB_FLAVOR" = "mysql56" ]; then 33 | MYSQL_FLAVOR=MySQL56 34 | 35 | elif [ "$VT_DB_FLAVOR" = "maria" ]; then 36 | MYSQL_FLAVOR=MariaDB 37 | 38 | elif [ "$VT_DB_FLAVOR" = "mariadb" ]; then 39 | MYSQL_FLAVOR=MariaDB 40 | 41 | elif [ "$VT_DB_FLAVOR" = "mariadb103" ]; then 42 | MYSQL_FLAVOR=MariaDB103 43 | 44 | fi 45 | 46 | export MYSQL_FLAVOR 47 | export EXTRA_MY_CNF="/vtdataroot/tabletdata/report-host.cnf:/vt/config/mycnf/rbr.cnf" 48 | 49 | 50 | 51 | eval exec /vt/bin/mysqlctld $(cat < /vtdataroot/tabletdata/tablet-uid 24 | 25 | # Tell MySQL what hostname to report in SHOW SLAVE HOSTS. 26 | echo report-host=$hostname.{{ .Cluster.Name }}-tab > /vtdataroot/tabletdata/report-host.cnf 27 | 28 | # Orchestrator looks there, so it should match -tablet_hostname above. 29 | 30 | # make sure that etcd is initialized 31 | eval exec /vt/bin/vtctl $(cat <