├── .circleci └── config.yml ├── .gitignore ├── Build.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── OWNERS ├── README.md ├── build-community-repo.sh ├── build-operator.sh ├── clean-build.sh ├── docs ├── Developing.md ├── Publishing.md └── README.md ├── kudo-test.yaml.tmpl ├── kuttl-test.yaml ├── repository ├── cassandra │ └── 3.11 │ │ ├── README.md │ │ ├── docs │ │ ├── README.md │ │ ├── accessing.md │ │ ├── architecture.md │ │ ├── backup.md │ │ ├── decommission.md │ │ ├── evicting-nodes.md │ │ ├── images │ │ │ ├── architecture.png │ │ │ ├── branch.png │ │ │ ├── cassandra-dashboard.png │ │ │ ├── multi-dc-arch.png │ │ │ ├── run-on-tag.png │ │ │ ├── run-with-param.png │ │ │ ├── tag.png │ │ │ └── upload.png │ │ ├── installing.md │ │ ├── managing.md │ │ ├── monitoring.md │ │ ├── multidatacenter.md │ │ ├── parameters.md │ │ ├── production.md │ │ ├── repair.md │ │ ├── resources.md │ │ ├── security.md │ │ └── upgrading.md │ │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ ├── backup-job.yaml │ │ ├── cassandra-env-sh.yaml │ │ ├── cassandra-exporter-config-yml.yaml │ │ ├── cassandra-role-sa.yaml │ │ ├── cassandra-topology.yaml │ │ ├── external-service.yaml │ │ ├── generate-cassandra-yaml.yaml │ │ ├── generate-cqlshrc-sh.yaml │ │ ├── generate-nodetool-ssl-properties.yaml │ │ ├── generate-tls-artifacts-sh.yaml │ │ ├── jvm-options.yaml │ │ ├── medusa-config-ini.yaml │ │ ├── node-rbac.yaml │ │ ├── node-resolver-rbac.yaml │ │ ├── node-scripts.yaml │ │ ├── pdb.yaml │ │ ├── recovery-controller-rbac.yaml │ │ ├── recovery-controller.yaml │ │ ├── repair-job.yaml │ │ ├── service-monitor.yaml │ │ ├── service.yaml │ │ ├── stateful-set.yaml │ │ └── tls-store-credentials.yaml ├── confluent-rest-proxy │ ├── README.md │ ├── docs │ │ └── v1.0 │ │ │ ├── runbook_custom_configuration.md │ │ │ └── security.md │ ├── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ │ ├── bootstrap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── enable-tls.yaml │ │ │ └── service.yaml │ └── tests │ │ └── rest-proxy-upgrade-test │ │ ├── 00-assert.yaml │ │ └── 00-install.yaml ├── confluent-schema-registry │ ├── README.md │ ├── docs │ │ └── v1.0 │ │ │ ├── runbook_custom_configuration.md │ │ │ └── security.md │ ├── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ │ ├── bootstrap.yaml │ │ │ ├── deployment.yaml │ │ │ ├── enable-tls.yaml │ │ │ └── service.yaml │ └── tests │ │ └── schema-registry-upgrade-test │ │ ├── 00-assert.yaml │ │ └── 00-install.yaml ├── cowsay │ ├── README.md │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ ├── deployment.yaml │ │ └── pipe-pod.yaml ├── elastic │ ├── docs │ │ ├── README.md │ │ ├── install.md │ │ ├── udpate.md │ │ └── use.md │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ ├── coordinator-service.yaml │ │ ├── coordinator.yaml │ │ ├── data-service.yaml │ │ ├── data.yaml │ │ ├── ingest-service.yaml │ │ ├── ingest.yaml │ │ ├── master-service.yaml │ │ └── master.yaml ├── first-operator │ ├── docs │ │ └── README.md │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ └── deployment.yaml ├── flink │ ├── docs │ │ ├── README.md │ │ └── demo │ │ │ ├── financial-fraud │ │ │ ├── README.md │ │ │ └── demo-operator │ │ │ │ ├── operator.yaml │ │ │ │ ├── params.yaml │ │ │ │ └── templates │ │ │ │ ├── actor.yaml │ │ │ │ ├── flink-params.yaml │ │ │ │ ├── generator.yaml │ │ │ │ ├── kafka-params.yaml │ │ │ │ ├── uploader.yaml │ │ │ │ └── zookeeper-params.yaml │ │ │ └── modifications │ │ │ ├── README.md │ │ │ ├── flinkapplication-framework.yaml │ │ │ ├── flinkapplication-frameworkversion.yaml │ │ │ ├── flinkapplication-instance.yaml │ │ │ ├── flinkcluster-framework.yaml │ │ │ ├── flinkcluster-frameworkversion.yaml │ │ │ ├── flinkcluster-instance.yaml │ │ │ ├── scratch │ │ │ ├── restart.yaml │ │ │ ├── stop.yaml │ │ │ └── submit.yaml │ │ │ └── submitter │ │ │ ├── Dockerfile │ │ │ ├── shutdown.sh │ │ │ └── submit.sh │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ ├── jobmanager-pdb.yaml │ │ ├── jobmanager-statefulset.yaml │ │ ├── services.yaml │ │ ├── storage.yaml │ │ └── taskmanager-deployment.yaml ├── kafka │ ├── README.md │ ├── docs │ │ ├── latest │ │ │ ├── README.md │ │ │ ├── concepts.md │ │ │ ├── configuration.md │ │ │ ├── cruise-control.md │ │ │ ├── custom.md │ │ │ ├── debug-kafka.md │ │ │ ├── external-access-runbook.md │ │ │ ├── external-access.md │ │ │ ├── install.md │ │ │ ├── kafka-connect.md │ │ │ ├── kudo-kafka-runbook.md │ │ │ ├── limitations.md │ │ │ ├── mirrormaker.md │ │ │ ├── monitoring.md │ │ │ ├── production.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── grafana-capture.png │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── images │ │ │ │ │ ├── external-access-loadbalancer.png │ │ │ │ │ ├── external-access-nodeports.png │ │ │ │ │ ├── grafana-user-workload.png │ │ │ │ │ ├── internal-access.png │ │ │ │ │ ├── kafka-cluster.png │ │ │ │ │ ├── kafka-producer-consumer.png │ │ │ │ │ ├── kudo-controller-kafka.png │ │ │ │ │ ├── kudo-installs-kafka.png │ │ │ │ │ ├── kudo-kafka.png │ │ │ │ │ ├── kudo-update-kafka.png │ │ │ │ │ ├── operator-upgrade-1.png │ │ │ │ │ └── operator-upgrade-2.png │ │ │ │ └── service-monitor.yaml │ │ │ ├── security.md │ │ │ ├── update.md │ │ │ ├── upgrade-kafka.md │ │ │ ├── upgrade.md │ │ │ └── versions.md │ │ ├── v1.0 │ │ │ ├── README.md │ │ │ ├── concepts.md │ │ │ ├── configuration.md │ │ │ ├── cruise-control.md │ │ │ ├── custom.md │ │ │ ├── external-access.md │ │ │ ├── install.md │ │ │ ├── kudo-kafka-runbook.md │ │ │ ├── limitations.md │ │ │ ├── mirrormaker.md │ │ │ ├── monitoring.md │ │ │ ├── production.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── grafana-capture.png │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── images │ │ │ │ │ ├── external-access-loadbalancer.png │ │ │ │ │ ├── external-access-nodeports.png │ │ │ │ │ ├── grafana-user-workload.png │ │ │ │ │ ├── internal-access.png │ │ │ │ │ ├── kafka-cluster.png │ │ │ │ │ ├── kafka-producer-consumer.png │ │ │ │ │ ├── kudo-controller-kafka.png │ │ │ │ │ ├── kudo-installs-kafka.png │ │ │ │ │ ├── kudo-kafka.png │ │ │ │ │ ├── kudo-update-kafka.png │ │ │ │ │ ├── operator-upgrade-1.png │ │ │ │ │ └── operator-upgrade-2.png │ │ │ │ └── service-monitor.yaml │ │ │ ├── security.md │ │ │ ├── update.md │ │ │ ├── upgrade.md │ │ │ └── versions.md │ │ ├── v1.1 │ │ │ ├── README.md │ │ │ ├── concepts.md │ │ │ ├── configuration.md │ │ │ ├── custom.md │ │ │ ├── external-access.md │ │ │ ├── install.md │ │ │ ├── kudo-kafka-runbook.md │ │ │ ├── limitations.md │ │ │ ├── mirrormaker.md │ │ │ ├── monitoring.md │ │ │ ├── production.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── grafana-capture.png │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── images │ │ │ │ │ ├── external-access-loadbalancer.png │ │ │ │ │ ├── external-access-nodeports.png │ │ │ │ │ ├── grafana-user-workload.png │ │ │ │ │ ├── internal-access.png │ │ │ │ │ ├── kafka-cluster.png │ │ │ │ │ ├── kafka-producer-consumer.png │ │ │ │ │ ├── kudo-controller-kafka.png │ │ │ │ │ ├── kudo-installs-kafka.png │ │ │ │ │ ├── kudo-kafka.png │ │ │ │ │ ├── kudo-update-kafka.png │ │ │ │ │ ├── operator-upgrade-1.png │ │ │ │ │ └── operator-upgrade-2.png │ │ │ │ └── service-monitor.yaml │ │ │ ├── security.md │ │ │ ├── update.md │ │ │ ├── upgrade.md │ │ │ └── versions.md │ │ ├── v1.2 │ │ │ ├── README.md │ │ │ ├── concepts.md │ │ │ ├── configuration.md │ │ │ ├── cruise-control.md │ │ │ ├── custom.md │ │ │ ├── debug-kafka.md │ │ │ ├── external-access-runbook.md │ │ │ ├── external-access.md │ │ │ ├── install.md │ │ │ ├── kafka-connect.md │ │ │ ├── kudo-kafka-runbook.md │ │ │ ├── limitations.md │ │ │ ├── mirrormaker.md │ │ │ ├── monitoring.md │ │ │ ├── production.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── grafana-capture.png │ │ │ │ ├── grafana-dashboard.json │ │ │ │ ├── images │ │ │ │ │ ├── external-access-loadbalancer.png │ │ │ │ │ ├── external-access-nodeports.png │ │ │ │ │ ├── grafana-user-workload.png │ │ │ │ │ ├── internal-access.png │ │ │ │ │ ├── kafka-cluster.png │ │ │ │ │ ├── kafka-producer-consumer.png │ │ │ │ │ ├── kudo-controller-kafka.png │ │ │ │ │ ├── kudo-installs-kafka.png │ │ │ │ │ ├── kudo-kafka.png │ │ │ │ │ ├── kudo-update-kafka.png │ │ │ │ │ ├── operator-upgrade-1.png │ │ │ │ │ └── operator-upgrade-2.png │ │ │ │ └── service-monitor.yaml │ │ │ ├── security.md │ │ │ ├── update.md │ │ │ ├── upgrade-kafka.md │ │ │ ├── upgrade.md │ │ │ └── versions.md │ │ └── v1.3 │ │ │ ├── README.md │ │ │ ├── concepts.md │ │ │ ├── configuration.md │ │ │ ├── cruise-control.md │ │ │ ├── custom.md │ │ │ ├── debug-kafka.md │ │ │ ├── external-access-runbook.md │ │ │ ├── external-access.md │ │ │ ├── install.md │ │ │ ├── kafka-connect.md │ │ │ ├── kudo-kafka-runbook.md │ │ │ ├── limitations.md │ │ │ ├── mirrormaker.md │ │ │ ├── monitoring.md │ │ │ ├── production.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ ├── grafana-capture.png │ │ │ ├── grafana-dashboard.json │ │ │ ├── images │ │ │ │ ├── external-access-loadbalancer.png │ │ │ │ ├── external-access-nodeports.png │ │ │ │ ├── grafana-user-workload.png │ │ │ │ ├── internal-access.png │ │ │ │ ├── kafka-cluster.png │ │ │ │ ├── kafka-producer-consumer.png │ │ │ │ ├── kudo-controller-kafka.png │ │ │ │ ├── kudo-installs-kafka.png │ │ │ │ ├── kudo-kafka.png │ │ │ │ ├── kudo-update-kafka.png │ │ │ │ ├── operator-upgrade-1.png │ │ │ │ └── operator-upgrade-2.png │ │ │ └── service-monitor.yaml │ │ │ ├── security.md │ │ │ ├── update.md │ │ │ ├── upgrade-kafka.md │ │ │ ├── upgrade.md │ │ │ └── versions.md │ ├── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ │ ├── bootstrap.yaml │ │ │ ├── cert-generator.yaml │ │ │ ├── clusterrole.yaml │ │ │ ├── clusterrolebinding.yaml │ │ │ ├── cruise-control-config.yaml │ │ │ ├── cruise-control-service.yaml │ │ │ ├── cruise-control.yaml │ │ │ ├── enable-tls.yaml │ │ │ ├── external-service.yaml │ │ │ ├── health-check.yaml │ │ │ ├── jaas-config.yaml │ │ │ ├── kafka-connect-config.yaml │ │ │ ├── kafka-connect-service.yaml │ │ │ ├── kafka-connect-setup.yaml │ │ │ ├── kafka-connect.yaml │ │ │ ├── krb5-config.yaml │ │ │ ├── metrics-config.yaml │ │ │ ├── mirror-maker-config.yaml │ │ │ ├── mirror-maker.yaml │ │ │ ├── pdb.yaml │ │ │ ├── role.yaml │ │ │ ├── rolebinding.yaml │ │ │ ├── server.properties.yaml │ │ │ ├── service-monitor.yaml │ │ │ ├── service.yaml │ │ │ ├── serviceaccount.yaml │ │ │ ├── statefulset.yaml │ │ │ └── user-workload.yaml │ └── tests │ │ └── kafka-upgrade-test │ │ ├── 00-assert.yaml │ │ ├── 00-install.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-install.yaml │ │ ├── 02-assert.yaml │ │ └── 02-resize.yaml ├── mysql │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ ├── backup-pv.yaml │ │ ├── backup.yaml │ │ ├── init.yaml │ │ ├── mysql.yaml │ │ └── restore.yaml ├── rabbitmq │ ├── docs │ │ └── README.md │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ ├── configmap.yaml │ │ ├── role.yaml │ │ ├── rolebinding.yaml │ │ ├── service.yaml │ │ ├── serviceaccount.yaml │ │ └── statefulset.yaml ├── redis │ ├── docs │ │ └── README.md │ └── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ ├── configmap.yaml │ │ ├── init.yaml │ │ ├── pdb.yaml │ │ ├── service.yaml │ │ └── statefulset.yaml ├── spark │ ├── README.md │ ├── docs │ │ ├── 2.4.4-0.2.0 │ │ │ ├── README.md │ │ │ ├── advanced-configuration.md │ │ │ ├── configuration.md │ │ │ ├── history-server.md │ │ │ ├── installation.md │ │ │ ├── limitations.md │ │ │ ├── monitoring.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── dashboards │ │ │ │ │ ├── grafana_spark_applications.json │ │ │ │ │ └── grafana_spark_operator.json │ │ │ │ ├── img │ │ │ │ │ ├── ha.png │ │ │ │ │ ├── spark-ui-1.png │ │ │ │ │ └── spark-ui-2.png │ │ │ │ ├── monitoring │ │ │ │ │ └── spark-application-with-metrics.yaml │ │ │ │ ├── spark-pi-service.yaml │ │ │ │ └── spark-pi.yaml │ │ │ ├── submission.md │ │ │ └── versions.md │ │ ├── 2.4.5-1.0.0 │ │ │ ├── README.md │ │ │ ├── advanced-configuration.md │ │ │ ├── configuration.md │ │ │ ├── history-server.md │ │ │ ├── installation.md │ │ │ ├── kerberos.md │ │ │ ├── limitations.md │ │ │ ├── monitoring.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── dashboards │ │ │ │ │ ├── grafana_spark_applications.json │ │ │ │ │ └── grafana_spark_operator.json │ │ │ │ ├── img │ │ │ │ │ ├── ha.png │ │ │ │ │ ├── spark-ui-1.png │ │ │ │ │ └── spark-ui-2.png │ │ │ │ ├── monitoring │ │ │ │ │ └── spark-application-with-metrics.yaml │ │ │ │ ├── spark-pi-service.yaml │ │ │ │ └── spark-pi.yaml │ │ │ ├── security.md │ │ │ ├── submission.md │ │ │ └── versions.md │ │ ├── 2.4.5-1.0.1 │ │ │ ├── README.md │ │ │ ├── advanced-configuration.md │ │ │ ├── configuration.md │ │ │ ├── history-server.md │ │ │ ├── installation.md │ │ │ ├── kerberos.md │ │ │ ├── limitations.md │ │ │ ├── monitoring.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── dashboards │ │ │ │ │ ├── grafana_spark_applications.json │ │ │ │ │ └── grafana_spark_operator.json │ │ │ │ ├── img │ │ │ │ │ ├── ha.png │ │ │ │ │ ├── spark-ui-1.png │ │ │ │ │ └── spark-ui-2.png │ │ │ │ ├── monitoring │ │ │ │ │ └── spark-application-with-metrics.yaml │ │ │ │ ├── spark-pi-service.yaml │ │ │ │ └── spark-pi.yaml │ │ │ ├── security.md │ │ │ ├── submission.md │ │ │ └── versions.md │ │ ├── 3.0.0-1.1.0 │ │ │ ├── README.md │ │ │ ├── advanced-configuration.md │ │ │ ├── configuration.md │ │ │ ├── history-server.md │ │ │ ├── installation.md │ │ │ ├── kerberos.md │ │ │ ├── limitations.md │ │ │ ├── monitoring.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── dashboards │ │ │ │ │ ├── grafana_spark_applications.json │ │ │ │ │ └── grafana_spark_operator.json │ │ │ │ ├── img │ │ │ │ │ ├── ha.png │ │ │ │ │ ├── spark-ui-1.png │ │ │ │ │ └── spark-ui-2.png │ │ │ │ ├── monitoring │ │ │ │ │ └── spark-application-with-metrics.yaml │ │ │ │ ├── spark-pi-service.yaml │ │ │ │ └── spark-pi.yaml │ │ │ ├── security.md │ │ │ ├── submission.md │ │ │ └── versions.md │ │ ├── beta1 │ │ │ ├── README.md │ │ │ ├── configuration.md │ │ │ ├── history-server.md │ │ │ ├── installation.md │ │ │ ├── limitations.md │ │ │ ├── monitoring.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ │ ├── dashboards │ │ │ │ │ ├── grafana_spark_applications.json │ │ │ │ │ └── grafana_spark_operator.json │ │ │ │ ├── img │ │ │ │ │ ├── spark-ui-1.png │ │ │ │ │ └── spark-ui-2.png │ │ │ │ ├── monitoring │ │ │ │ │ ├── spark-application-metrics-service.yaml │ │ │ │ │ ├── spark-application-with-metrics.yaml │ │ │ │ │ ├── spark-operator-metrics-service.yaml │ │ │ │ │ └── spark-service-monitor.yaml │ │ │ │ ├── spark-pi-service.yaml │ │ │ │ └── spark-pi.yaml │ │ │ ├── submission.md │ │ │ └── versions.md │ │ └── latest │ │ │ ├── README.md │ │ │ ├── advanced-configuration.md │ │ │ ├── configuration.md │ │ │ ├── history-server.md │ │ │ ├── installation.md │ │ │ ├── kerberos.md │ │ │ ├── limitations.md │ │ │ ├── monitoring.md │ │ │ ├── release-notes.md │ │ │ ├── resources │ │ │ ├── dashboards │ │ │ │ ├── grafana_spark_applications.json │ │ │ │ └── grafana_spark_operator.json │ │ │ ├── img │ │ │ │ ├── ha.png │ │ │ │ ├── spark-ui-1.png │ │ │ │ └── spark-ui-2.png │ │ │ ├── monitoring │ │ │ │ └── spark-application-with-metrics.yaml │ │ │ ├── spark-pi-service.yaml │ │ │ └── spark-pi.yaml │ │ │ ├── security.md │ │ │ ├── submission.md │ │ │ └── versions.md │ ├── operator │ │ ├── operator.yaml │ │ ├── params.yaml │ │ └── templates │ │ │ ├── spark-history-server-deployment.yaml │ │ │ ├── spark-history-server-service.yaml │ │ │ ├── spark-monitoring.yaml │ │ │ ├── spark-operator-crds.yaml │ │ │ ├── spark-operator-deployment.yaml │ │ │ ├── spark-operator-rbac.yaml │ │ │ ├── spark-operator-serviceaccount.yaml │ │ │ ├── spark-rbac.yaml │ │ │ ├── spark-serviceaccount.yaml │ │ │ ├── webhook-init-job.yaml │ │ │ └── webhook-service.yaml │ └── tests │ │ └── spark-job │ │ ├── 00-assert.yaml │ │ ├── 01-assert.yaml │ │ ├── 01-create-ns.yaml │ │ ├── 02-assert.yaml │ │ ├── 02-install-spark.yaml │ │ ├── 03-assert.yaml │ │ ├── 03-submit-spark-job.yaml │ │ ├── 04-delete-ns.yaml │ │ ├── 04-errors.yaml │ │ └── spark-pi.yaml └── zookeeper │ ├── README.md │ ├── docs │ └── latest │ │ ├── README.md │ │ ├── configuration.md │ │ ├── install.md │ │ └── limitations.md │ ├── operator │ ├── operator.yaml │ ├── params.yaml │ └── templates │ │ ├── bootstrap.sh.yaml │ │ ├── healthcheck.sh.yaml │ │ ├── pdb.yaml │ │ ├── services.yaml │ │ ├── statefulset.yaml │ │ └── validation.yaml │ └── tests │ └── zookeeper-upgrade-test │ ├── 00-assert.yaml │ ├── 00-install.yaml │ ├── 01-assert.yaml │ └── 01-resize.yaml └── test └── kind ├── kubernetes-1.16.9.yaml └── kubernetes-1.17.5.yaml /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | jobs: 4 | test: 5 | machine: 6 | enabled: true 7 | parameters: 8 | kubernetes_version: 9 | type: string 10 | description: "Run KUDO Operators tests against a Kubernetes cluster." 11 | steps: 12 | - checkout 13 | - run: make KUBERNETES_VERSION=<< parameters.kubernetes_version >> test 14 | 15 | workflows: 16 | test-1.17: 17 | jobs: 18 | - test: 19 | kubernetes_version: 1.17.5 20 | 21 | test-1.16: 22 | jobs: 23 | - test: 24 | kubernetes_version: 1.16.9 25 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Binaries for programs and plugins 2 | /.idea 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | build/* 9 | .kudo/ 10 | 11 | # Test binary, build with `go test -c` 12 | *.test 13 | 14 | # Output of the go coverage tool, specifically when used with LiteIDE 15 | *.out 16 | 17 | # binaries kind and kubectl from make are in bin 18 | bin 19 | # from make create-cluster. We don't store these. 20 | go.mod 21 | go.sum 22 | kubeconfig 23 | .kube 24 | artifacts/ 25 | 26 | # OSX files 27 | .DS_Store 28 | 29 | # tarballs 30 | *.tgz 31 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md 2 | 3 | approvers: 4 | - alenkacz 5 | - fabianbaier 6 | - gerred 7 | - kensipe 8 | - jbarrick-mesosphere 9 | - zmalik 10 | 11 | reviewers: 12 | - alenkacz 13 | - fabianbaier 14 | - gerred 15 | - guenter 16 | - jbarrick-mesosphere 17 | - joerg84 18 | - kensipe 19 | - runyontr 20 | - zmalik 21 | -------------------------------------------------------------------------------- /build-community-repo.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # script to make "community" operator repository index file 4 | # using the build/repo as a source and merging with an operator repository 5 | 6 | 7 | set -o errexit 8 | set -o nounset 9 | set -o pipefail 10 | 11 | CMD=kubectl-kudo 12 | REPO_DIR=build/repo 13 | 14 | 15 | # script requires kudo `kubectl-kudo` to be installed and in the path 16 | command -v $CMD >/dev/null 2>&1 || { echo >&2 "$CMD is required in the path. Aborting."; exit 1; } 17 | 18 | 19 | # output of kudo version for human or ci logs 20 | version=$($CMD version) 21 | echo "Using $version" 22 | 23 | # the build dir must be created 24 | # we don't "clean" it because a user may want to build multiple operators to it at a time 25 | 26 | if [[ ! -d "${REPO_DIR}" ]]; then 27 | # this script doesn't make a repo build dir... fails if it doesn't exist 28 | echo 1>&2 "$REPO_DIR does not exist" 29 | exit 1 30 | fi 31 | 32 | 33 | # test to confirm community repo is configured 34 | # this command will fail non-zero unless it exists 35 | $CMD repo context community 36 | 37 | $CMD repo index $REPO_DIR --merge-repo community --url-repo community 38 | -------------------------------------------------------------------------------- /build-operator.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # script to make kudo operators 4 | # takes form of `./build-operator.sh repository/kafka/operator/` 5 | # or `./build-operator kafka` which assumes the above common layout 6 | # or `./build-operator cassandra 3.11` 7 | 8 | set -o errexit 9 | set -o nounset 10 | set -o pipefail 11 | 12 | CMD=kubectl-kudo 13 | REPO_DIR=build/repo 14 | 15 | 16 | # script requires kudo `kubectl-kudo` to be installed and in the path 17 | command -v $CMD >/dev/null 2>&1 || { echo >&2 "$CMD is required in the path. Aborting."; exit 1; } 18 | 19 | # which operator to build must be passed 20 | if [ $# -lt 1 ]; then 21 | echo 1>&2 "Usage: $0 path_to_operator [optional_version]" 22 | exit 1 23 | fi 24 | 25 | # convenience assumption which allow for operator name only 26 | # for pass in. `./build-operator kafka` 27 | # or `./build-operator cassandra 3.11` 28 | if [ $# -eq 2 ]; then 29 | OP_DIR="repository/${1}/${2}/operator/" 30 | else 31 | OP_DIR="repository/${1}/operator/" 32 | fi 33 | 34 | # the passed in operator must be the operator folder 35 | if [[ ! -d "${OP_DIR}" ]]; then 36 | OP_DIR="${1}" 37 | fi 38 | 39 | # the passed in operator must be the operator folder 40 | if [[ ! -d "${OP_DIR}" ]]; then 41 | echo 1>&2 "Usage: $0 path_to_operator [optional_version]" 42 | echo 1>&2 "$1 is not a directory" 43 | exit 1 44 | fi 45 | 46 | # output of kudo version for human or ci logs 47 | version=$($CMD version) 48 | echo "Using $version" 49 | 50 | # the build dir must be created 51 | # we don't "clean" it because a user may want to build multiple operators to it at a time 52 | 53 | if [[ ! -d "${REPO_DIR}" ]]; then 54 | # if repo dir doesn't exist create it 55 | mkdir -p $REPO_DIR 56 | fi 57 | 58 | $CMD package create "$OP_DIR" --destination $REPO_DIR 59 | -------------------------------------------------------------------------------- /clean-build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # script to remove kudo operator build directory 4 | 5 | set -o errexit 6 | set -o nounset 7 | set -o pipefail 8 | 9 | REPO_DIR=build/repo 10 | BIN_DIR=bin 11 | 12 | if [[ -d "${REPO_DIR}" ]]; then 13 | rm -rf $REPO_DIR 14 | fi 15 | 16 | if [[ -d "${BIN_DIR}" ]]; then 17 | rm -rf $BIN_DIR 18 | fi 19 | -------------------------------------------------------------------------------- /docs/Developing.md: -------------------------------------------------------------------------------- 1 | # Developing an Operator with KUDO 2 | 3 | ## Running tests 4 | 5 | If you do not have the kudo CLI installed already, the Makefile will download it: 6 | 7 | ``` 8 | make test 9 | ``` 10 | 11 | Otherwise, just run: 12 | 13 | ``` 14 | kubectl kudo test 15 | ``` 16 | 17 | By default, the tests will run inside of a kind cluster. If you want to test on a different cluster, just disable kind: 18 | 19 | ``` 20 | kubectl kudo test --start-kind=false 21 | ``` 22 | 23 | Please see the [testing documentation](https://kudo.dev/docs/testing/) or reach out to us on the KUDO slack channel if you have any questions. 24 | -------------------------------------------------------------------------------- /docs/Publishing.md: -------------------------------------------------------------------------------- 1 | # Publishing to official hosted Bucket -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation 2 | 3 | - [Publishing to official Bucket](Publishing.md) 4 | - [Developing an Operator with KUDO](Developing.md) -------------------------------------------------------------------------------- /kudo-test.yaml.tmpl: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1alpha1 2 | kind: TestSuite 3 | commands: 4 | - command: ./bin/kubectl-kudo init --unsafe-self-signed-webhook-ca --kudo-image kudobuilder/controller:%version% --kudo-image-pull-policy Never --wait 5 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/cassandra/3.11/operator/ 6 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/confluent-rest-proxy/operator/ 7 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/confluent-schema-registry/operator/ 8 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/cowsay/operator/ 9 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/elastic/operator/ 10 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/first-operator/operator/ 11 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/flink/docs/demo/financial-fraud/demo-operator/ 12 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/flink/operator/ 13 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/kafka/operator/ 14 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/mysql/operator/ 15 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/rabbitmq/operator/ 16 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/redis/operator/ 17 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/spark/operator/ 18 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/zookeeper/operator/ 19 | testDirs: 20 | - ./repository/zookeeper/tests 21 | - ./repository/kafka/tests 22 | startKIND: true 23 | timeout: 300 24 | kindContainers: 25 | - kudobuilder/controller:%version% 26 | -------------------------------------------------------------------------------- /kuttl-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1alpha1 2 | kind: TestSuite 3 | commands: 4 | - command: ./bin/kubectl-kudo init --unsafe-self-signed-webhook-ca --wait 5 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/cassandra/3.11/operator/ 6 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/confluent-rest-proxy/operator/ 7 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/confluent-schema-registry/operator/ 8 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/cowsay/operator/ 9 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/elastic/operator/ 10 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/first-operator/operator/ 11 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/flink/docs/demo/financial-fraud/demo-operator/ 12 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/flink/operator/ 13 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/kafka/operator/ 14 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/mysql/operator/ 15 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/rabbitmq/operator/ 16 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/redis/operator/ 17 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/spark/operator/ 18 | - command: ./bin/kubectl-kudo install --skip-instance ./repository/zookeeper/operator/ 19 | testDirs: 20 | - ./repository/zookeeper/tests 21 | - ./repository/kafka/tests 22 | startKIND: true 23 | timeout: 300 24 | 25 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/architecture.md: -------------------------------------------------------------------------------- 1 | # KUDO Cassandra Architecture 2 | 3 | Apache Cassandra is a stateful workload. KUDO Cassandra uses kubernetes 4 | statefulset as the basic piece of the KUDO Cassandra Architecture 5 | 6 | As a StatefulSet maintains sticky identities for each of its Pods, this helps 7 | KUDO Cassandra to automate all necessary operations with Apache Cassandra nodes. 8 | 9 | To help with updates and upgrades, KUDO Cassandra comes with a custom config 10 | maps thats helps for rolling updates for KUDO Cassandra. Apache Cassandra 11 | maintenance jobs like `repair` and `backup/restore` are configured as kubernetes 12 | jobs and are only deployed on-demand when configuring their respective 13 | parameters. 14 | 15 | ![](images/architecture.png) 16 | 17 | ## Multi-Datacenter Architecture 18 | 19 | KUDO Cassandra can span a ring across multiple kubernetes clusters, to 20 | facilitate the deployment across various regions and zones. Read more about 21 | multidataceneter configuration options in the 22 | [multi-dataceneter](./multidataceneter.md) docs. 23 | 24 | ![](images/multi-dc-arch.png) 25 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/decommission.md: -------------------------------------------------------------------------------- 1 | # Decommission KUDO Cassandra nodes 2 | 3 | KUDO Cassandra does not provide an automated way to scale down the Cassandra 4 | cluster, as this is a critical operation that should not be repeated frequently, 5 | and to discourage anti-patterns when managing an Apache Cassandra cluster. 6 | 7 | ## Manually decommissioning KUDO Cassandra nodes 8 | 9 | KUDO Cassandra only supports decommissioning the node with the highest pod 10 | ordinal index. e.g. when having a cluster with following pods: 11 | 12 | ``` 13 | NAME READY STATUS RESTARTS AGE 14 | analytics-cassandra-node-0 2/2 Running 0 124m 15 | analytics-cassandra-node-1 2/2 Running 0 123m 16 | analytics-cassandra-node-2 2/2 Running 0 120m 17 | analytics-cassandra-node-3 2/2 Running 0 118m 18 | analytics-cassandra-node-4 2/2 Running 0 117m 19 | ``` 20 | 21 | we can only decommission `analytics-cassandra-node-4` as it has the highest pod 22 | ordinal index `4`. 23 | 24 | ### Decomission the node 25 | 26 | ```bash 27 | kubectl exec -it pod/analytics-cassandra-node-4 \ 28 | -n dev \ 29 | -c cassandra \ 30 | -- \ 31 | nodetool decommission 32 | ``` 33 | 34 | Once the operation is completed, we can update the KUDO Cassandra Instance 35 | 36 | ``` 37 | kubectl kudo update -p NODE_COUNT=4 --instance analytics-cassandra -n dev 38 | ``` 39 | 40 | Once the update plan is complete, we can delete the PVC that was attached to the 41 | KUDO Cassandra `pod/analytics-cassandra-node-4`. Not deleting or cleaning the 42 | PVC will result in issues when scaling the cluster up next time. 43 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/architecture.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/branch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/branch.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/cassandra-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/cassandra-dashboard.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/multi-dc-arch.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/multi-dc-arch.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/run-on-tag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/run-on-tag.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/run-with-param.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/run-with-param.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/tag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/tag.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/docs/images/upload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/cassandra/3.11/docs/images/upload.png -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/backup-job.yaml: -------------------------------------------------------------------------------- 1 | {{ range $i, $v := until (int .Params.NODE_COUNT) }} 2 | --- 3 | apiVersion: batch/v1 4 | kind: Job 5 | metadata: 6 | name: backup-node-{{ $v }} 7 | spec: 8 | backoffLimit: 0 9 | template: 10 | spec: 11 | serviceAccountName: {{ $.Name }}-sa 12 | containers: 13 | - name: backup 14 | image: bitnami/kubectl:{{ $.Params.KUBECTL_VERSION }} 15 | command: 16 | - bash 17 | - -c 18 | args: 19 | - kubectl -n {{ $.Namespace }} exec {{ $.Name }}-node-{{ $v }} --container medusa-backup -- python3 /usr/local/bin/medusa backup --backup-name {{ $.Params.BACKUP_NAME }} 20 | restartPolicy: Never 21 | {{ end }} -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/cassandra-role-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: {{ .Name }}-binding 5 | namespace: {{ .Namespace }} 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: {{ .Name }}-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: {{ .Name }}-sa 13 | namespace: {{ .Namespace }} 14 | --- 15 | apiVersion: v1 16 | kind: ServiceAccount 17 | metadata: 18 | name: {{ .Name }}-sa 19 | namespace: {{ .Namespace }} 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: Role 23 | metadata: 24 | namespace: {{ .Namespace }} 25 | name: {{ .Name }}-role 26 | rules: 27 | - apiGroups: [""] 28 | resources: ["configmaps"] 29 | verbs: ["update", "get", "list"] 30 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/cassandra-topology.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Name }}-topology-lock 5 | namespace: {{ .Namespace }} 6 | data: -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/external-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-svc-external 5 | namespace: {{ .Namespace }} 6 | spec: 7 | type: LoadBalancer 8 | externalTrafficPolicy: Local 9 | selector: 10 | app: {{ .Name }} 11 | kudo.dev/instance: {{ .Name }} 12 | ports: 13 | {{ if eq .Params.EXTERNAL_NATIVE_TRANSPORT "true" }} 14 | - protocol: TCP 15 | name: native-transport 16 | port: {{ .Params.EXTERNAL_NATIVE_TRANSPORT_PORT }} 17 | targetPort: {{ .Params.NATIVE_TRANSPORT_PORT }} 18 | {{ end }} 19 | {{ if and (eq .Params.EXTERNAL_RPC "true") (eq .Params.START_RPC "true") }} 20 | - protocol: TCP 21 | name: rpc 22 | port: {{ .Params.EXTERNAL_RPC_PORT }} 23 | targetPort: {{ .Params.RPC_PORT }} 24 | {{ end }} -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/generate-cqlshrc-sh.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Name }}-generate-cqlshrc-sh 5 | namespace: {{ .Namespace }} 6 | data: 7 | generate-cqlshrc.sh: | 8 | cat <> ~/.cassandra/cqlshrc 9 | [connection] 10 | factory = cqlshlib.ssl.ssl_transport_factory 11 | hostname = ${POD_NAME}.{{ .Name }}-svc.{{ .Namespace }}.svc.cluster.local 12 | port = {{ .Params.NATIVE_TRANSPORT_PORT }} 13 | {{ if eq .Params.TRANSPORT_ENCRYPTION_CLIENT_ENABLED "true" }} 14 | ssl = true 15 | {{ end }} 16 | 17 | {{ if eq .Params.TRANSPORT_ENCRYPTION_CLIENT_ENABLED "true" }} 18 | [ssl] 19 | certfile = /etc/tls/certs/tls.crt 20 | {{ if eq .Params.TRANSPORT_ENCRYPTION_CLIENT_REQUIRE_CLIENT_AUTH "true" }} 21 | userkey = /etc/tls/certs/tls.key 22 | usercert = /etc/tls/certs/tls.crt 23 | {{ end }} 24 | {{ end }} 25 | 26 | {{ if .Params.AUTHENTICATION_SECRET_NAME }} 27 | [authentication] 28 | username = $(cat /etc/cassandra/authentication/username) 29 | password = $(cat /etc/cassandra/authentication/password) 30 | {{ end }} 31 | EOT 32 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/generate-nodetool-ssl-properties.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Name }}-generate-nodetool-ssl-properties 5 | namespace: {{ .Namespace }} 6 | data: 7 | generate-nodetool-ssl-properties.sh: | 8 | #!/usr/bin/env bash 9 | 10 | set -euxo pipefail 11 | 12 | set +x 13 | readonly truststore_password=$(cat /etc/cassandra/truststore/truststore_password) 14 | readonly keystore_password=$(cat /etc/cassandra/truststore/keystore_password) 15 | 16 | cat < /etc/cassandra/nodetool-ssl.properties 17 | {{ if ne .Params.JMX_LOCAL_ONLY "true" }} 18 | -Dcom.sun.management.jmxremote.ssl=true 19 | -Dcom.sun.management.jmxremote.ssl.need.client.auth=true 20 | -Dcom.sun.management.jmxremote.registry.ssl=true 21 | -Djavax.net.ssl.keyStore=/etc/cassandra/tls/cassandra.server.keystore.jks 22 | -Djavax.net.ssl.keyStorePassword=${keystore_password} 23 | -Djavax.net.ssl.trustStore=/etc/cassandra/tls/cassandra.server.truststore.jks 24 | -Djavax.net.ssl.trustStorePassword=${truststore_password} 25 | {{ end }} 26 | EOF 27 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/node-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | name: {{ .Name }}-node-role 5 | namespace: {{ .Namespace }} 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods"] 9 | verbs: ["get"] 10 | - apiGroups: [""] 11 | resources: ["pods/exec"] 12 | verbs: ["create"] 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | kind: RoleBinding 16 | metadata: 17 | name: {{ .Name }}-node-{{ .Namespace }}-binding 18 | subjects: 19 | - kind: ServiceAccount 20 | name: {{ .Name }}-sa 21 | namespace: {{ .Namespace }} 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: Role 25 | name: {{ .Name }}-node-role 26 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/node-resolver-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: {{ .Name }}-{{ .Namespace }}-node-role 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["nodes"] 8 | verbs: ["get", "watch", "list"] 9 | --- 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRoleBinding 12 | metadata: 13 | name: {{ .Name }}-{{ .Namespace }}-node-role-binding 14 | subjects: 15 | - kind: ServiceAccount 16 | name: {{ .Name }}-sa 17 | namespace: {{ .Namespace }} 18 | roleRef: 19 | apiGroup: rbac.authorization.k8s.io 20 | kind: ClusterRole 21 | name: {{ .Name }}-{{ .Namespace }}-node-role -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | {{ $topology := list 1 }} 2 | {{ $nodeCount := $.Params.NODE_COUNT}} 3 | {{ if $.Params.NODE_TOPOLOGY }} 4 | {{ $topology = $.Params.NODE_TOPOLOGY }} 5 | {{ end }} 6 | {{ range $datacenter := $topology }} 7 | {{ if $.Params.NODE_TOPOLOGY }} 8 | {{ $nodeCount := $datacenter.nodes }} 9 | {{ end }} 10 | {{ $minAvailable := sub $nodeCount 1}} 11 | --- 12 | apiVersion: policy/v1beta1 13 | kind: PodDisruptionBudget 14 | metadata: 15 | {{ if $.Params.NODE_TOPOLOGY }} 16 | name: {{ $.Name }}-{{ $datacenter.datacenter }}-pdb 17 | {{ else }} 18 | name: {{ $.Name }}-pdb 19 | {{ end }} 20 | namespace: {{ $.Namespace }} 21 | spec: 22 | selector: 23 | matchLabels: 24 | app: {{ $.Name }} 25 | cassandra: {{ $.OperatorName }} 26 | {{ if $.Params.NODE_TOPOLOGY }} 27 | cassandra-dc: {{ $.OperatorName }}-{{ $datacenter.datacenter }} 28 | {{ end }} 29 | kudo.dev/instance: {{ $.Name }} 30 | minAvailable: {{ $minAvailable }} 31 | {{ end }} -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/recovery-controller-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: {{ .Name }}-recovery-role 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["nodes"] 8 | verbs: ["get", "list", "watch"] 9 | - apiGroups: [""] 10 | resources: ["pods"] 11 | verbs: ["get", "list", "watch", "delete"] 12 | - apiGroups: [""] 13 | resources: ["persistentvolumeclaims"] 14 | verbs: ["get", "list", "watch", "delete"] 15 | - apiGroups: [""] 16 | resources: ["persistentvolumes"] 17 | verbs: ["get", "list", "watch", "update", "delete"] 18 | --- 19 | apiVersion: v1 20 | kind: ServiceAccount 21 | metadata: 22 | name: {{ .Name }}-recovery-controller 23 | namespace: {{ .Namespace }} 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | name: {{ .Name }}-recovery-rolebinding 29 | subjects: 30 | - kind: ServiceAccount 31 | name: {{ .Name }}-recovery-controller 32 | namespace: {{ .Namespace }} 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: {{ .Name }}-recovery-role -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/recovery-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: {{ $.Name }}-recovery-controller 5 | namespace: {{ $.Namespace }} 6 | labels: 7 | app: {{ $.Name }}-recovery-controller 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: {{ $.Name }}-recovery-controller 12 | serviceName: {{ $.Name }}-svc 13 | replicas: 1 14 | template: 15 | metadata: 16 | labels: 17 | app: {{ $.Name }}-recovery-controller 18 | kudo.dev/instance: {{ $.Name }} 19 | spec: 20 | serviceAccount: {{ $.Name }}-recovery-controller 21 | containers: 22 | - name: recovery-controller 23 | image: {{ $.Params.RECOVERY_CONTROLLER_DOCKER_IMAGE }} 24 | imagePullPolicy: {{ $.Params.RECOVERY_CONTROLLER_DOCKER_IMAGE_PULL_POLICY }} 25 | env: 26 | - name: NAMESPACE 27 | valueFrom: 28 | fieldRef: 29 | fieldPath: metadata.namespace 30 | - name: INSTANCE_NAME 31 | value: {{ $.Name }} 32 | - name: EVICTION_LABEL 33 | value: "kudo-cassandra/evict" 34 | resources: 35 | requests: 36 | memory: "{{ $.Params.RECOVERY_CONTROLLER_MEM_MIB }}Mi" 37 | cpu: "{{ $.Params.RECOVERY_CONTROLLER_CPU_MC }}m" 38 | limits: 39 | memory: "{{ $.Params.RECOVERY_CONTROLLER_MEM_LIMIT_MIB }}Mi" 40 | cpu: "{{ $.Params.RECOVERY_CONTROLLER_CPU_LIMIT_MC }}m" 41 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/repair-job.yaml: -------------------------------------------------------------------------------- 1 | {{ $auth_params := "" }} 2 | {{ if .Params.AUTHENTICATION_SECRET_NAME }} 3 | {{ $auth_params = "-u \\\\$(cat /etc/cassandra/authentication/username) -pwf <(paste -d ' ' /etc/cassandra/authentication/username /etc/cassandra/authentication/password)" }} 4 | {{ end }} 5 | --- 6 | apiVersion: batch/v1 7 | kind: Job 8 | metadata: 9 | name: {{ $.Name }}-node-repair-job 10 | namespace: {{ $.Namespace }} 11 | labels: 12 | cassandra: {{ $.OperatorName }} 13 | app: {{ $.Name }} 14 | spec: 15 | backoffLimit: 0 16 | template: 17 | spec: 18 | containers: 19 | - name: repair-job 20 | image: bitnami/kubectl:{{ $.Params.KUBECTL_VERSION }} 21 | command: ["/bin/bash"] 22 | args: [ "-c", "kubectl exec {{ $.Params.REPAIR_POD }} -- /bin/bash -c \"nodetool {{ $auth_params }} repair\""] 23 | restartPolicy: Never 24 | serviceAccountName: {{ .Name }}-sa 25 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: {{ .Name }}-monitor 5 | namespace: {{ .Namespace }} 6 | labels: 7 | app: prometheus-operator 8 | release: prometheus-kubeaddons 9 | spec: 10 | endpoints: 11 | - interval: 30s 12 | port: prometheus-exporter-port 13 | namespaceSelector: 14 | matchNames: 15 | - {{ .Namespace }} 16 | selector: 17 | matchLabels: 18 | kudo.dev/instance: {{ .Name }} 19 | kudo.dev/servicemonitor: "true" 20 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-svc 5 | namespace: {{ .Namespace }} 6 | {{ if eq .Params.PROMETHEUS_EXPORTER_ENABLED "true" }} 7 | labels: 8 | kudo.dev/servicemonitor: "true" 9 | {{ end }} 10 | spec: 11 | ports: 12 | - port: {{ .Params.STORAGE_PORT }} 13 | name: storage 14 | - port: {{ .Params.SSL_STORAGE_PORT }} 15 | name: ssl-storage 16 | - port: {{ .Params.NATIVE_TRANSPORT_PORT }} 17 | name: native-transport 18 | {{ if eq .Params.START_RPC "true" }} 19 | - port: {{ .Params.RPC_PORT }} 20 | name: rpc 21 | {{ end }} 22 | {{ if ne .Params.JMX_LOCAL_ONLY "true" }} 23 | - port: {{ .Params.JMX_PORT }} 24 | name: jmx 25 | - port: {{ .Params.RMI_PORT }} 26 | name: rmi 27 | {{ end }} 28 | {{ if eq .Params.PROMETHEUS_EXPORTER_ENABLED "true" }} 29 | - port: {{ .Params.PROMETHEUS_EXPORTER_PORT }} 30 | name: prometheus-exporter-port 31 | {{ end }} 32 | selector: 33 | app: {{ .Name }} 34 | kudo.dev/instance: {{ .Name }} 35 | -------------------------------------------------------------------------------- /repository/cassandra/3.11/operator/templates/tls-store-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: {{ $.Name}}-tls-store-credentials 5 | type: Opaque 6 | data: 7 | keystore_password: Y2Fzc2FuZHJh 8 | truststore_password: Y2Fzc2FuZHJh 9 | -------------------------------------------------------------------------------- /repository/confluent-rest-proxy/README.md: -------------------------------------------------------------------------------- 1 | # Confluent REST Proxy 2 | 3 | The Confluent REST Proxy provides a RESTful interface to a Kafka cluster, making it easy to produce and consume messages, view the state of the cluster, and perform administrative actions without using the native Kafka protocol or clients. 4 | 5 | Source: [Confluent at GitHub](https://github.com/confluentinc/kafka-rest) 6 | DockerFile: [Rest Proxy 5.3.2](https://github.com/confluentinc/cp-docker-images/blob/v5.3.2/debian/kafka-rest/Dockerfile) 7 | 8 | ## Documentation 9 | 10 | - [Custom Configuration](./docs/v1.0/runbook_custom_configuration.md) 11 | - [Security](./docs/v1.0/security.md) 12 | -------------------------------------------------------------------------------- /repository/confluent-rest-proxy/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: "confluent-rest-proxy" 3 | operatorVersion: "0.3.0" 4 | kudoVersion: 0.10.0 5 | kubernetesVersion: 1.15.0 6 | appVersion: 5.3.2 7 | maintainers: 8 | - name: Vibhu Jain 9 | email: vjain.c@d2iq.com 10 | - name: Shubhanil Bag 11 | email: sbag.c@d2iq.com 12 | url: https://www.confluent.io/ 13 | tasks: 14 | - name: deploy 15 | kind: Apply 16 | spec: 17 | resources: 18 | - service.yaml 19 | - bootstrap.yaml 20 | - deployment.yaml 21 | - enable-tls.yaml 22 | plans: 23 | deploy: 24 | strategy: serial 25 | phases: 26 | - name: deploy 27 | strategy: serial 28 | steps: 29 | - name: deploy 30 | tasks: 31 | - deploy 32 | -------------------------------------------------------------------------------- /repository/confluent-rest-proxy/operator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }} 5 | namespace: {{ .Namespace }} 6 | spec: 7 | ports: 8 | - port: {{ .Params.REST_PROXY_PORT }} 9 | name: http 10 | {{ if eq .Params.TRANSPORT_ENCRYPTION_CLIENT_ENABLED "true" }} 11 | - port: {{ .Params.REST_PROXY_PORT_TLS }} 12 | name: https 13 | {{ end }} 14 | selector: 15 | app: confluent-rest-proxy 16 | clusterIP: None 17 | -------------------------------------------------------------------------------- /repository/confluent-rest-proxy/tests/rest-proxy-upgrade-test/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: confluent-rest-proxy 5 | status: 6 | planStatus: 7 | deploy: 8 | status: COMPLETE 9 | --- 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | metadata: 13 | name: confluent-rest-proxy 14 | spec: 15 | template: 16 | spec: 17 | containers: 18 | - name: confluent-rest-proxy 19 | resources: 20 | requests: 21 | memory: "256Mi" 22 | cpu: "250m" 23 | status: 24 | readyReplicas: 1 25 | -------------------------------------------------------------------------------- /repository/confluent-rest-proxy/tests/rest-proxy-upgrade-test/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: confluent-rest-proxy 5 | spec: 6 | operatorVersion: 7 | name: confluent-rest-proxy-0.1.0 8 | namespace: default 9 | kind: OperatorVersion 10 | name: "confluent-rest-proxy" 11 | parameters: 12 | MEMORY: "256Mi" 13 | CPUS: "0.25" 14 | -------------------------------------------------------------------------------- /repository/confluent-schema-registry/README.md: -------------------------------------------------------------------------------- 1 | # Confluent Schema Registry 2 | 3 | Confluent Schema Registry provides a serving layer for your metadata. It provides a RESTful interface for storing and retrieving Apache Avro® schemas. It stores a versioned history of all schemas based on a specified subject name strategy, provides multiple compatibility settings and allows evolution of schemas according to the configured compatibility settings and expanded Avro support. It provides serializers that plug into Apache Kafka® clients that handle schema storage and retrieval for Kafka messages that are sent in the Avro format. 4 | 5 | Source: [Confluent at GitHub](https://github.com/confluentinc/schema-registry) 6 | DockerFile: [Schema Registry 5.3.2](https://github.com/confluentinc/cp-docker-images/blob/v5.3.2/debian/schema-registry/Dockerfile) 7 | 8 | ## Documentation 9 | 10 | - [Custom Configuration](./docs/v1.0/runbook_custom_configuration.md) 11 | - [Security](./docs/v1.0/security.md) -------------------------------------------------------------------------------- /repository/confluent-schema-registry/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: "confluent-schema-registry" 3 | operatorVersion: "0.3.0" 4 | kudoVersion: 0.10.0 5 | kubernetesVersion: 1.15.0 6 | appVersion: 5.3.2 7 | maintainers: 8 | - name: Vibhu Jain 9 | email: vjain.c@d2iq.com 10 | - name: Shubhanil Bag 11 | email: sbag.c@d2iq.com 12 | url: https://www.confluent.io/ 13 | tasks: 14 | - name: deploy 15 | kind: Apply 16 | spec: 17 | resources: 18 | - service.yaml 19 | - enable-tls.yaml 20 | - bootstrap.yaml 21 | - deployment.yaml 22 | plans: 23 | deploy: 24 | strategy: serial 25 | phases: 26 | - name: deploy 27 | strategy: serial 28 | steps: 29 | - name: deploy 30 | tasks: 31 | - deploy 32 | -------------------------------------------------------------------------------- /repository/confluent-schema-registry/operator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }} 5 | namespace: {{ .Namespace }} 6 | spec: 7 | ports: 8 | - port: {{ .Params.SCHEMA_REGISTRY_PORT }} 9 | selector: 10 | app: confluent-schema-registry 11 | clusterIP: None 12 | -------------------------------------------------------------------------------- /repository/confluent-schema-registry/tests/schema-registry-upgrade-test/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: confluent-schema-registry 5 | status: 6 | planStatus: 7 | deploy: 8 | status: COMPLETE 9 | --- 10 | apiVersion: apps/v1 11 | kind: Deployment 12 | metadata: 13 | name: confluent-schema-registry 14 | spec: 15 | template: 16 | spec: 17 | containers: 18 | - name: confluent-schema-registry 19 | resources: 20 | requests: 21 | memory: "256Mi" 22 | cpu: "250m" 23 | status: 24 | readyReplicas: 1 25 | -------------------------------------------------------------------------------- /repository/confluent-schema-registry/tests/schema-registry-upgrade-test/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: confluent-schema-registry 5 | spec: 6 | operatorVersion: 7 | name: confluent-schema-registry-0.1.0 8 | namespace: default 9 | kind: OperatorVersion 10 | name: "confluent-schema-registry" 11 | parameters: 12 | MEMORY: "256Mi" 13 | CPUS: "0.25" 14 | -------------------------------------------------------------------------------- /repository/cowsay/README.md: -------------------------------------------------------------------------------- 1 | ![kudo](https://kudo.dev/images/kudo_horizontal_color@2x.png) 2 | 3 | # KUDO Cowsay operator 4 | 5 | The KUDO cowsay operator is a small demo for the KUDO [Pipe-Tasks](https://github.com/kudobuilder/kudo/blob/master/keps/0017-pipe-tasks.md). 6 | 7 | ### Overview 8 | 9 | KUDO Cowsay operator: 10 | 11 | - Uses KUDO pipe-tasks and [cowsay.morecode.org](http://cowsay.morecode.org) to generate a customized `index.html` 12 | ```yaml 13 | - name: genwww 14 | kind: Pipe 15 | spec: 16 | pod: pipe-pod.yaml 17 | pipe: 18 | - file: /tmp/index.html 19 | kind: ConfigMap 20 | key: indexHtml 21 | ``` 22 | - Launches an nginx webserver with it 23 | 24 | 25 | ``` 26 | ______________________________________ 27 | / Good things come when you least expect \ 28 | \ them / 29 | -------------------------------------- 30 | \ ^__^ 31 | \ (oo)\_______ 32 | (__)\ )\/\ 33 | ||----w | 34 | || || 35 | ``` 36 | -------------------------------------------------------------------------------- /repository/cowsay/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: "cowsay" 3 | operatorVersion: "0.2.0" 4 | kubernetesVersion: 1.13.0 5 | maintainers: 6 | - name: zen-dog 7 | email: 8 | url: https://kudo.dev 9 | tasks: 10 | - name: app 11 | kind: Apply 12 | spec: 13 | resources: 14 | - deployment.yaml 15 | - name: genwww 16 | kind: Pipe 17 | spec: 18 | pod: pipe-pod.yaml 19 | pipe: 20 | - file: /tmp/index.html 21 | kind: ConfigMap 22 | key: indexHtml 23 | plans: 24 | deploy: 25 | strategy: serial 26 | phases: 27 | - name: main 28 | strategy: serial 29 | steps: 30 | - name: genfiles 31 | tasks: 32 | - genwww 33 | - name: app 34 | tasks: 35 | - app 36 | -------------------------------------------------------------------------------- /repository/cowsay/operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: replicas 4 | description: Number of replicas that should be run as part of the deployment 5 | default: 1 6 | -------------------------------------------------------------------------------- /repository/cowsay/operator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ .Name }}-deployment 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | replicas: {{ .Params.replicas }} 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.7.9 18 | ports: 19 | - containerPort: 80 20 | volumeMounts: 21 | - name: www 22 | mountPath: /usr/share/nginx/html/ 23 | volumes: 24 | - name: www 25 | configMap: 26 | name: {{ .Pipes.indexHtml }} 27 | -------------------------------------------------------------------------------- /repository/cowsay/operator/templates/pipe-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | spec: 4 | volumes: 5 | - name: shared-data 6 | emptyDir: {} 7 | initContainers: 8 | - name: init 9 | image: busybox 10 | command: [ "/bin/sh", "-c" ] 11 | args: 12 | - wget -O /tmp/index.html 'http://cowsay.morecode.org/say?message=Good+things+come+when+you+least+expect+them&format=html' 13 | volumeMounts: 14 | - name: shared-data 15 | mountPath: /tmp 16 | -------------------------------------------------------------------------------- /repository/elastic/docs/README.md: -------------------------------------------------------------------------------- 1 | # Elastic 2 | 3 | Elasticsearch is a distributed, RESTful search and analytics engine. It is based on Apache Lucene. 4 | 5 | This Framework is deploying an Elasticsearch Cluster. 6 | 7 | ## Prerequisites 8 | 9 | You need a `Kubernetes cluster` up and running and `Persistent Storage` available with a default `Storage Class` defined. 10 | 11 | If you use `minikube` then launch it with the following resource options. 12 | 13 | ```sh 14 | minikube start --vm-driver=hyperkit --cpus=3 --memory=9216 --disk-size=10g 15 | ``` 16 | 17 | ## Runbooks 18 | 19 | - [Installing an instance](install.md) 20 | - [Using an instance](use.md) 21 | - [Updating an instance](udpate.md) 22 | -------------------------------------------------------------------------------- /repository/elastic/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: "elastic" 3 | operatorVersion: "0.2.2" 4 | kudoVersion: 0.10.0 5 | kubernetesVersion: 1.15.0 6 | appVersion: 7.0.0 7 | maintainers: 8 | - name: Michael Beisiegel 9 | email: michael.beisiegel@gmail.com 10 | url: https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html 11 | tasks: 12 | - name: deploy-master 13 | kind: Apply 14 | spec: 15 | resources: 16 | - master-service.yaml 17 | - master.yaml 18 | - name: deploy-data 19 | kind: Apply 20 | spec: 21 | resources: 22 | - data-service.yaml 23 | - data.yaml 24 | - name: deploy-coordinator 25 | kind: Apply 26 | spec: 27 | resources: 28 | - coordinator-service.yaml 29 | - coordinator.yaml 30 | - name: deploy-ingest 31 | kind: Apply 32 | spec: 33 | resources: 34 | - ingest-service.yaml 35 | - ingest.yaml 36 | plans: 37 | deploy: 38 | strategy: serial 39 | phases: 40 | - name: deploy-master 41 | strategy: parallel 42 | steps: 43 | - name: deploy-master 44 | tasks: 45 | - deploy-master 46 | - name: deploy-data 47 | strategy: parallel 48 | steps: 49 | - name: deploy-data 50 | tasks: 51 | - deploy-data 52 | - name: deploy-coordinator 53 | strategy: parallel 54 | steps: 55 | - name: deploy-coordinator 56 | tasks: 57 | - deploy-coordinator 58 | - name: deploy-ingest 59 | strategy: parallel 60 | steps: 61 | - name: deploy-ingest 62 | tasks: 63 | - deploy-ingest 64 | -------------------------------------------------------------------------------- /repository/elastic/operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: DATA_NODE_COUNT 4 | default: "2" 5 | - name: COORDINATOR_NODE_COUNT 6 | default: "1" 7 | - name: INGEST_NODE_COUNT 8 | default: "0" 9 | - name: MASTER_CPU 10 | default: "0.25" 11 | - name: MASTER_MEM 12 | default: "1Gi" 13 | - name: MASTER_DISK 14 | default: "2Gi" 15 | - name: DATA_CPU 16 | default: "0.25" 17 | - name: DATA_MEM 18 | default: "2Gi" 19 | - name: DATA_DISK 20 | default: "4Gi" 21 | - name: COORDINATOR_CPU 22 | default: "0.1" 23 | - name: COORDINATOR_MEM 24 | default: "1Gi" 25 | - name: COORDINATOR_DISK 26 | default: "2Gi" 27 | - name: INGEST_CPU 28 | default: "0.1" 29 | - name: INGEST_MEM 30 | default: "1Gi" 31 | - name: INGEST_DISK 32 | default: "2Gi" 33 | - name: STORAGE_CLASS 34 | description: "The storage class to be used in volumeClaimTemplates. By default its not required and the default storage class is used." 35 | required: false 36 | -------------------------------------------------------------------------------- /repository/elastic/operator/templates/coordinator-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: {{ .Name }}-coordinator-hs 5 | namespace: {{ .Namespace }} 6 | spec: 7 | selector: 8 | app: coordinator 9 | ports: 10 | - protocol: TCP 11 | port: 9200 12 | clusterIP: None 13 | -------------------------------------------------------------------------------- /repository/elastic/operator/templates/data-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: {{ .Name }}-data-hs 5 | namespace: {{ .Namespace }} 6 | spec: 7 | selector: 8 | app: data 9 | ports: 10 | - protocol: TCP 11 | port: 9200 12 | clusterIP: None 13 | -------------------------------------------------------------------------------- /repository/elastic/operator/templates/ingest-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: {{ .Name }}-ingest-hs 5 | namespace: {{ .Namespace }} 6 | spec: 7 | selector: 8 | app: ingest 9 | ports: 10 | - protocol: TCP 11 | port: 9200 12 | clusterIP: None 13 | -------------------------------------------------------------------------------- /repository/elastic/operator/templates/master-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: {{ .Name }}-master-hs 5 | namespace: {{ .Namespace }} 6 | spec: 7 | selector: 8 | app: master 9 | ports: 10 | - protocol: TCP 11 | port: 9200 12 | clusterIP: None 13 | -------------------------------------------------------------------------------- /repository/first-operator/docs/README.md: -------------------------------------------------------------------------------- 1 | # First Operator 2 | 3 | This is an example operator as described in the [KUDO documentation](https://kudo.dev/docs/developing-operators.html). 4 | -------------------------------------------------------------------------------- /repository/first-operator/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: "first-operator" 3 | operatorVersion: "0.2.0" 4 | kubernetesVersion: 1.13.0 5 | maintainers: 6 | - name: Your name 7 | email: 8 | url: https://kudo.dev 9 | tasks: 10 | - name: app 11 | kind: Apply 12 | spec: 13 | resources: 14 | - deployment.yaml 15 | plans: 16 | deploy: 17 | strategy: serial 18 | phases: 19 | - name: main 20 | strategy: parallel 21 | steps: 22 | - name: everything 23 | tasks: 24 | - app 25 | -------------------------------------------------------------------------------- /repository/first-operator/operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: replicas 4 | description: Number of replicas that should be run as part of the deployment 5 | default: 2 6 | -------------------------------------------------------------------------------- /repository/first-operator/operator/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | replicas: {{ .Params.replicas }} 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.7.9 18 | ports: 19 | - containerPort: 80 20 | -------------------------------------------------------------------------------- /repository/flink/docs/README.md: -------------------------------------------------------------------------------- 1 | # Flink Documentation 2 | 3 | This document links to further documentation around Flink and its demos. 4 | 5 | ## Flink Demos 6 | 7 | - [Financial Fraud Demo](demo/financial-fraud) -------------------------------------------------------------------------------- /repository/flink/docs/demo/financial-fraud/demo-operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: download_url 4 | description: URL To download the application from 5 | default: "https://downloads.mesosphere.com/dcos-demo/flink/flink-job-1.0.jar" 6 | -------------------------------------------------------------------------------- /repository/flink/docs/demo/financial-fraud/demo-operator/templates/actor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: actor 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | actor: {{ .Name }} 10 | template: 11 | metadata: 12 | name: flink-demo-actor 13 | labels: 14 | actor: {{ .Name }} 15 | spec: 16 | containers: 17 | - name: actor 18 | image: dcoslabs/flink-demo-actor:0.3 19 | command: ["/fraudDisplay-linux"] 20 | imagePullPolicy: Always 21 | args: ["--broker", "kafka-kafka-0.kafka-svc:9093"] 22 | -------------------------------------------------------------------------------- /repository/flink/docs/demo/financial-fraud/demo-operator/templates/flink-params.yaml: -------------------------------------------------------------------------------- 1 | high_availability: ZOOKEEPER 2 | zookeeper_url: "zk-zookeeper-0.zk-hs:2181,zk-zookeeper-1.zk-hs:2181,zk-zookeeper-2.zk-hs:2181" 3 | zookeeper_path: "/flink-demo-flink" -------------------------------------------------------------------------------- /repository/flink/docs/demo/financial-fraud/demo-operator/templates/generator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: generator 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | generator: {{ .Name }} 10 | template: 11 | metadata: 12 | name: flink-demo-generator 13 | labels: 14 | generator: {{ .Name }} 15 | spec: 16 | containers: 17 | - name: flink-demo-generator 18 | image: mesosphere/flink-generator:0.1 19 | command: ["/generator-linux"] 20 | imagePullPolicy: Always 21 | args: ["--broker", "kafka-kafka-0.kafka-svc:9093"] 22 | -------------------------------------------------------------------------------- /repository/flink/docs/demo/financial-fraud/demo-operator/templates/kafka-params.yaml: -------------------------------------------------------------------------------- 1 | ZOOKEEPER_URI: "zk-zookeeper-0.zk-hs:2181,zk-zookeeper-1.zk-hs:2181,zk-zookeeper-2.zk-hs:2181" 2 | ZOOKEEPER_PATH: "/flink-demo-kafka" 3 | BROKER_COUNT: "3" 4 | AUTO_CREATE_TOPICS_ENABLE: "true" 5 | BROKER_MEM: "1024Mi" -------------------------------------------------------------------------------- /repository/flink/docs/demo/financial-fraud/demo-operator/templates/zookeeper-params.yaml: -------------------------------------------------------------------------------- 1 | CPUS: "0.3" 2 | MEMORY: "256Mi" -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/flinkapplication-framework.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Operator 3 | metadata: 4 | labels: 5 | controller-tools.k8s.io: "1.0" 6 | name: flinkapplication -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/flinkapplication-instance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | labels: 5 | controller-tools.k8s.io: "1.0" 6 | framework: flink 7 | name: application # this is the instance label which will lead the pod name 8 | spec: 9 | frameworkVersion: 10 | name: flink-1.0 11 | namespace: default 12 | type: FrameworkVersion 13 | parameters: 14 | DEPLOY_OWN_CLUSTER: "yes" 15 | JAR_URL: "https://downloads.mesosphere.com/dcos-demo/flink/flink-job-1.0.jar" 16 | JAR_PATH: "/ha/artifacts/flink-job-1.0.jar" 17 | JOB_ARGUMENTS: "--kafka_host small-kafka-0.small-svc.default.svc.cluster.local:9093" 18 | CLASSNAME: "io.dcos.FinancialTransactionJob" -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/flinkcluster-framework.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Framework 3 | metadata: 4 | labels: 5 | controller-tools.k8s.io: "1.0" 6 | name: flink-cluster # this will be the "app" label but not the pod name (thats what instance is for) -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/flinkcluster-instance.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | labels: 5 | controller-tools.k8s.io: "1.0" 6 | framework: flinkcluster 7 | name: flinkcluster # this is the instance label which will lead the pod name 8 | spec: 9 | frameworkVersion: 10 | name: flinkcluster-1.7 11 | namespace: default 12 | type: FrameworkVersion 13 | parameters: 14 | HIGH_AVAILABILITY: ZOOKEEPER -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/scratch/restart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: PlanExecution 3 | metadata: 4 | name: restart 5 | namespace: default 6 | spec: 7 | instance: 8 | kind: Instance 9 | name: application 10 | namespace: default 11 | planName: restart -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/scratch/stop.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: PlanExecution 3 | metadata: 4 | name: stop 5 | namespace: default 6 | spec: 7 | instance: 8 | kind: Instance 9 | name: application 10 | namespace: default 11 | planName: stop -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/scratch/submit.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: PlanExecution 3 | metadata: 4 | name: submit 5 | namespace: default 6 | spec: 7 | instance: 8 | kind: Instance 9 | name: application 10 | namespace: default 11 | planName: submit -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/submitter/Dockerfile: -------------------------------------------------------------------------------- 1 | #kudobuilder/flink-submission:1.7 2 | FROM flink:1.7 3 | COPY --from=bitnami/kubectl:1.13 /opt/bitnami/kubectl/bin/kubectl /usr/local/bin/kubectl 4 | 5 | RUN apt-get update && apt-get install -y jq 6 | 7 | ADD submit.sh . 8 | ADD shutdown.sh . 9 | 10 | ENTRYPOINT [ "./submit.sh" ] 11 | -------------------------------------------------------------------------------- /repository/flink/docs/demo/modifications/submitter/submit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | #Requires the following environment variables 6 | # CONFIGMAP - name fo the configmap that will get patched with the jobid 7 | 8 | # Variables 9 | PARALLELISM=${PARALLELISM:-1} 10 | 11 | ls -la ${JAR_PATH} 12 | cp ${JAR_PATH} . 13 | JAR=$(basename ${JAR_PATH}) 14 | echo "Local jar is $JAR" 15 | ls -la ${JAR} 16 | 17 | # Assume local JAR for upload 18 | filename=`curl -X POST -H "Expect:" -F "jarfile=@${JAR_PATH}" $JOBMANAGER:8081/jars/upload` 19 | 20 | echo "Filename: $filename" 21 | raw=`echo $filename | jq -r .filename` 22 | echo "Raw: $raw" 23 | # Jar ID is just the last part of the filename 24 | jar_id=`basename $raw` 25 | echo "JarID: $jar_id" 26 | # Start the job 27 | 28 | job_response=`curl -s -XPOST \ 29 | -d '{"entryClass":"'"${CLASSNAME}"'","programArgs":"'"${PROGRAM_ARGS}"'","parallelism":"'"${PARALLELISM}"'","savepointPath":"'"${SAVEPOINT_PATH}"'"}' \ 30 | $JOBMANAGER:8081/jars/$jar_id/run` 31 | 32 | echo "Submitting Job... Response: $job_response" 33 | 34 | job_id=`echo $job_response | jq -r .jobid` 35 | 36 | echo "JobID: $job_id" 37 | kubectl patch configmap $CONFIGMAP -p '{"data": {"jobid": "'$job_id'"}}' 38 | 39 | 40 | -------------------------------------------------------------------------------- /repository/flink/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: "flink" 3 | operatorVersion: "0.2.1" 4 | kudoVersion: 0.10.0 5 | kubernetesVersion: 1.15.0 6 | appVersion: 1.7.2 7 | maintainers: 8 | - name: Fabian Baier 9 | email: fabian@mesosphere.io 10 | - name: Tom Runyon 11 | email: runyontr@gmail.com 12 | url: https://zookeeper.apache.org/ 13 | tasks: 14 | - name: storage 15 | kind: Apply 16 | spec: 17 | resources: 18 | - storage.yaml 19 | - name: jobmanager 20 | kind: Apply 21 | spec: 22 | resources: 23 | - jobmanager-pdb.yaml 24 | - jobmanager-statefulset.yaml 25 | - name: jobmanager-service 26 | kind: Apply 27 | spec: 28 | resources: 29 | - services.yaml 30 | - name: taskmanager 31 | kind: Apply 32 | spec: 33 | resources: 34 | - taskmanager-deployment.yaml 35 | plans: 36 | deploy: 37 | strategy: serial 38 | phases: 39 | - name: flink 40 | strategy: serial 41 | steps: 42 | - name: jobmanager 43 | tasks: 44 | - storage 45 | - jobmanager 46 | - jobmanager-service 47 | - taskmanager 48 | -------------------------------------------------------------------------------- /repository/flink/operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: flink_storage_accessmodes 4 | description: "Defines the access modes for Persistent Volume Claims e.g., can be mounted once read/write or many times read-only" 5 | default: "ReadWriteOnce" 6 | - name: flink_taskmanager_replicas 7 | description: "Number of task managers to run" 8 | default: "2" 9 | - name: flink_jobmanager_replicas 10 | description: "Number of job managers to run" 11 | default: "1" 12 | - name: zookeeper_url 13 | description: "Connection information for Zookeeper" 14 | default: "zk-zk-0.zk-hs:2181,zk-zk-1.zk-hs:2181,zk-zk-2.zk-hs:2181" 15 | - name: zookeeper_path 16 | description: Path to store Flink data in Zookeeper 17 | default: "/flink" 18 | - name: high_availability 19 | description: "Defines high-availability mode used for the cluster execution. To enable high-availability, set this mode to \"ZOOKEEPER\" or specify FQN of factory class." 20 | default: NONE -------------------------------------------------------------------------------- /repository/flink/operator/templates/jobmanager-pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: {{ .Name }}-pdb 5 | namespace: {{ .Namespace }} 6 | labels: 7 | app: flink 8 | component: {{ .Name }}-jobmanager 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: flink 13 | zookeeper: {{ .Name }} -------------------------------------------------------------------------------- /repository/flink/operator/templates/services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-jobmanager 5 | namespace: {{ .Namespace }} 6 | spec: 7 | ports: 8 | - name: rpc 9 | port: 6123 10 | - name: blob 11 | port: 6124 12 | - name: query 13 | port: 6125 14 | - name: ui 15 | port: 8081 16 | - name: ha 17 | port: 8082 18 | - name: metrics 19 | port: 8083 20 | selector: 21 | app: {{ .Name }} 22 | component: {{ .Name }}-jobmanager 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: {{ .Name }}-hs 28 | namespace: {{ .Namespace }} 29 | labels: 30 | app: flink 31 | component: {{ .Name }}-jobmanager 32 | spec: 33 | ports: 34 | - name: rpc 35 | port: 6123 36 | - name: blob 37 | port: 6124 38 | - name: query 39 | port: 6125 40 | - name: ui 41 | port: 8081 42 | - name: ha 43 | port: 8082 44 | - name: metrics 45 | port: 8083 46 | selector: 47 | app: {{ .Name }} 48 | component: {{ .Name }}-jobmanager 49 | clusterIP: None -------------------------------------------------------------------------------- /repository/flink/operator/templates/storage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: {{ .Name }}-snapshots 5 | namespace: {{ .Namespace }} 6 | spec: 7 | accessModes: 8 | - {{ .Params.flink_storage_accessmodes }} 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | --- 13 | apiVersion: v1 14 | kind: PersistentVolumeClaim 15 | metadata: 16 | name: {{ .Name }}-ha 17 | namespace: {{ .Namespace }} 18 | spec: 19 | accessModes: 20 | - {{ .Params.flink_storage_accessmodes }} 21 | resources: 22 | requests: 23 | storage: 1Gi 24 | -------------------------------------------------------------------------------- /repository/kafka/README.md: -------------------------------------------------------------------------------- 1 | ![kudo-kafka](./docs/latest/resources/images/kudo-kafka.png) 2 | 3 | # KUDO Kafka Operator 4 | 5 | The KUDO Kafka operator creates, configures and manages [Apache Kafka](https://kafka.apache.org/) clusters running on Kubernetes. 6 | 7 | ### Overview 8 | 9 | KUDO Kafka is a Kubernetes operator built on [KUDO](kudo.dev) to manage Apache Kafka in a scalable, repeatable, and standardized way over Kubernetes. Currently KUDO Kafka supports: 10 | 11 | - Securing the cluster in various ways: TLS encryption, Kerberos authentication, Kafka AuthZ 12 | - Prometheus metrics right out of the box with example Grafana dashboards 13 | - Kerberos support 14 | - Graceful rolling updates for any cluster configuration changes 15 | - Graceful rolling upgrades when upgrading the operator version 16 | - External access through LB/Nodeports 17 | - Mirror-maker integration 18 | - Cruise Control integration 19 | - Connect integration 20 | 21 | To get more information around KUDO Kafka architecture please take a look on the [KUDO Kafka concepts](./docs/latest/concepts.md) document. 22 | 23 | ## Getting started 24 | 25 | The latest stable version of Kafka operator is `1.3.0` 26 | For more details, please see the [v1.3 docs](./docs/v1.3) folder. 27 | 28 | 29 | ## Releases 30 | 31 | | KUDO Kafka | Apache Kafka | Minimum KUDO Version | 32 | | ---------- | ------------ | -------------------- | 33 | | 1.2.1 | 2.4.1 | 0.11.0 | 34 | | 1.3.0 | 2.5.0 | 0.11.0 | 35 | | **1.3.1** | **2.5.0** | **0.13.0** | 36 | 37 | ## Unreleased version 38 | 39 | | KUDO Kafka | Apache Kafka | Minimum KUDO Version | 40 | | ---------- | ------------ | -------------------- | 41 | | 1.3.2 | 2.5.1 | 0.14.0 | 42 | -------------------------------------------------------------------------------- /repository/kafka/docs/latest/README.md: -------------------------------------------------------------------------------- 1 | ![kudo-kafka](./resources/images/kudo-kafka.png) 2 | 3 | ## KUDO Kafka latest 4 | 5 | - [Installation](./install.md) 6 | - [Concepts](./concepts.md) 7 | - [Configuration](./configuration.md) 8 | - [Custom Configuration](./custom.md) 9 | - [Monitoring](./monitoring.md) 10 | - [Mirror Maker](./mirrormaker.md) 11 | - [Kafka Connect](./kafka-connect.md) 12 | - [Cruise Control](./cruise-control.md) 13 | - [Upgrading the Kafka operator](./upgrade.md) 14 | - [Scaling & Updating Cluster](./update.md) 15 | - [Running in Production](./production.md) 16 | - [Release notes](./release-notes.md) 17 | - [Limitations](./limitations.md) 18 | - [Versions](./versions.md) 19 | -------------------------------------------------------------------------------- /repository/kafka/docs/latest/concepts.md: -------------------------------------------------------------------------------- 1 | # KUDO Kafka Concepts 2 | 3 | KUDO Kafka is a Kubernetes operator built on top of [KUDO](http://kudo.dev) and requires KUDO 4 | 5 | #### KUDO Kafka CRDs 6 | 7 | There are three CRDs that are installed when deploying KUDO Kafka: 8 | 9 | - Operator: the definition that describes the Kudo Kafka operator. 10 | - OperatorVersion: the definition that describes the Kudo Kafka operator for a specific version. 11 | - Instance: the instantiation of a KUDO Kafka cluster based on the OperatorVersion. 12 | 13 | #### KUDO Controller Reconcile Cycle 14 | 15 | The KUDO controller continually watches the Operator, OperatorVersion and Instance CRDs via the Kubernetes API. 16 | 17 | ![kudo-kafka](./resources/images/kudo-controller-kafka.png) 18 | 19 | When a user installs KUDO Kafka using the `kudo-cli`, the controller creates the KUDO Kafka CRDs for Operator, OperatorVersion and Instance. More information can be read in [KUDO Architecture](https://kudo.dev/docs/architecture.html#architecture-diagram) 20 | 21 | ![kudo-kafka](./resources/images/kudo-installs-kafka.png) 22 | 23 | When the KUDO Controller detects a new `Instance`, it creates all the resources required to reach the desired state of the configuration. 24 | 25 | ![kudo-kafka](./resources/images/kafka-cluster.png) 26 | 27 | The same process is followed for any updates or deletions. Everything is handled by the KUDO Controller. 28 | 29 | ![kudo-kafka](./resources/images/kudo-update-kafka.png) 30 | -------------------------------------------------------------------------------- /repository/kafka/docs/latest/cruise-control.md: -------------------------------------------------------------------------------- 1 | # Cruise Control 2 | 3 | ## Overview 4 | 5 | KUDO Kafka operator comes with builtin integration of [Cruise Control](https://github.com/linkedin/cruise-control). 6 | 7 | Cruise Control is a tool to fully automate the dynamic workload rebalance and self-healing of a kafka cluster. It provides great value to Kafka users by simplifying the operation of Kafka clusters. 8 | 9 | Cruise Control integration is disabled by default. 10 | 11 | ## Setup Cruise Control 12 | 13 | ### Start Cruise Control 14 | 15 | Update the instance with `ENABLE_CRUISE_CONTROL` set to start Cruise Control alongside the KUDO Kafka instance. 16 | 17 | ```bash 18 | $ kubectl kudo update --instance=kafka \ 19 | -p ENABLE_CRUISE_CONTROL=true 20 | ``` 21 | ## Advanced Options 22 | 23 | |Parameter|Description|Example| 24 | |--|--|--| 25 | | CRUISE_CONTROL_PORT | Port for the Cruise Control server to listen to |
  • 9090 (default)
| 26 | | CRUISE_CONTROL_WEBSERVER_API_URLPREFIX | Cruise Control REST API default prefix |
  • "/kafkacruisecontrol/*" (default)
| 27 | | CRUISE_CONTROL_WEBSERVER_UI_URLPREFIX | Cruise Control REST Web UI default path prefix |
  • "/*" (default)
| 28 | 29 | ## Disable Cruise Control 30 | 31 | To disable Cruise Control, update the 32 | instance with `ENABLE_CRUISE_CONTROL` set to `false`, using the following command: 33 | 34 | ```bash 35 | $ kubectl kudo update --instance=kafka \ 36 | -p ENABLE_CRUISE_CONTROL=false 37 | ``` 38 | 39 | ## Limitations 40 | 41 | Currently Cruise Control works with Kafka protocol `PLAINTEXT` only. It will not work if Kerberos and or TLS is 42 | enabled in the Kafka instance. Future releases of KUDO Kafka will 43 | address this limitation through a Cruise Control operator. 44 | -------------------------------------------------------------------------------- /repository/kafka/docs/latest/limitations.md: -------------------------------------------------------------------------------- 1 | # Limitations 2 | 3 | Below is a list of parameters that can only be configured during bootstrap time. 4 | 5 | |Immutable Parameters| 6 | | ------------------ | 7 | | DISK_SIZE | 8 | | STORAGE_CLASS | 9 | | PERSISTENT_STORAGE | 10 | 11 | These storage-related parameters cannot be changed after initial deployment. Repeat: using parameters to resize disk, change storage class, or switch between persistent/ephemeral storage is not supported. 12 | 13 | Changing the above parameters will trigger a `not-allowed` plan. Which basically skips updating any resources. 14 | This is to avoid any update done by mistake or human error. 15 | 16 | ### Resizing the PVC 17 | 18 | Resizing the disk depends on the storage class of your Kubernetes cluster. 19 | To resize the disk being used by a broker, users can edit the `pvc` and expand the disk. 20 | You can read more about it in [resizing the PVC](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) post. 21 | -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/grafana-capture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/grafana-capture.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/external-access-loadbalancer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/external-access-loadbalancer.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/external-access-nodeports.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/external-access-nodeports.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/grafana-user-workload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/grafana-user-workload.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/internal-access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/internal-access.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/kafka-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/kafka-cluster.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/kafka-producer-consumer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/kafka-producer-consumer.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/kudo-controller-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/kudo-controller-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/kudo-installs-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/kudo-installs-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/kudo-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/kudo-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/kudo-update-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/kudo-update-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/operator-upgrade-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/operator-upgrade-1.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/images/operator-upgrade-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/latest/resources/images/operator-upgrade-2.png -------------------------------------------------------------------------------- /repository/kafka/docs/latest/resources/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: prometheus-operator 6 | release: prometheus-kubeaddons 7 | name: kafka-cluster-monitor 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: metrics 12 | namespaceSelector: 13 | matchNames: 14 | - default 15 | selector: 16 | matchLabels: 17 | kudo.dev/servicemonitor: "true" 18 | -------------------------------------------------------------------------------- /repository/kafka/docs/latest/versions.md: -------------------------------------------------------------------------------- 1 | ### Versions 2 | 3 | - Apache Kafka 2.12-2.5.1 4 | - OpenJDK 8 5 | - JMX Exporter 0.1.0 6 | - Prometheus Node Exporter v0.18.1 7 | 8 | 9 | ### KUDO Version Requirement 10 | 11 | KUDO version 0.11.0 or later 12 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/README.md: -------------------------------------------------------------------------------- 1 | ## KUDO Kafka latest 2 | 3 | - [Installation](./install.md) 4 | - [Concepts](./concepts.md) 5 | - [Configuration](./configuration.md) 6 | - [Custom Configuration](./custom.md) 7 | - [Monitoring](./monitoring.md) 8 | - [Mirror Maker](./mirrormaker.md) 9 | - [Upgrading the Kafka operator](./upgrade.md) 10 | - [Scaling & Updating Cluster](./update.md) 11 | - [Running in Production](./production.md) 12 | - [Release notes](./release-notes.md) 13 | - [Limitations](./limitations.md) 14 | - [Versions](./versions.md) 15 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/concepts.md: -------------------------------------------------------------------------------- 1 | ![kudo-kafka](./resources/images/kudo-kafka.png) 2 | 3 | # KUDO Kafka Concepts 4 | 5 | KUDO Kafka is a Kubernetes operator built on top of [KUDO](kudo.dev) and requires KUDO 6 | 7 | #### KUDO Kafka CRDs 8 | 9 | There are three CRDs that are installed when deploying KUDO Kafka: 10 | 11 | - Operator: the definition that describes the Kudo Kafka operator. 12 | - OperatorVersion: the definition that describes the Kudo Kafka operator for a specific version. 13 | - Instance: the instantiation of a KUDO Kafka cluster based on the OperatorVersion. 14 | 15 | #### KUDO Controller Reconcile Cycle 16 | 17 | The KUDO controller continually watches the Operator, OperatorVersion and Instance CRDs via the Kubernetes API. 18 | 19 | ![kudo-kafka](./resources/images/kudo-controller-kafka.png) 20 | 21 | When a user installs KUDO Kafka using the `kudo-cli`, the controller creates the KUDO Kafka CRDs for Operator, OperatorVersion and Instance. More information can be read in [KUDO Architecture](https://kudo.dev/docs/architecture.html#architecture-diagram) 22 | 23 | ![kudo-kafka](./resources/images/kudo-installs-kafka.png) 24 | 25 | When the KUDO Controller detects a new `Instance`, it creates all the resources required to reach the desired state of the configuration. 26 | 27 | ![kudo-kafka](./resources/images/kafka-cluster.png) 28 | 29 | The same process is followed for any updates or deletions. Everything is handled by the KUDO Controller. 30 | 31 | ![kudo-kafka](./resources/images/kudo-update-kafka.png) 32 | 33 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/cruise-control.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/cruise-control.md -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/install.md: -------------------------------------------------------------------------------- 1 | # Installing the KUDO Kafka Operator 2 | 3 | Requirements: 4 | 5 | - Install the [KUDO controller](https://kudo.dev/docs/getting-started/) 6 | - Install the [KUDO CLI](https://kudo.dev/docs/cli/) 7 | 8 | This guide explains the basic installation for both KUDO Kafka and KUDO Zookeeper. 9 | To run a production-grade KUDO Kafka cluster, please read [KUDO Kafka in production](./production.md) 10 | 11 | ## Installing the Operator 12 | 13 | #### Install Zookeeper 14 | ``` 15 | kubectl kudo install zookeeper --instance=zk 16 | ``` 17 | 18 | #### Install Kafka 19 | 20 | Please read the [limitations](./limitations.md) docs before creating the KUDO Kafka cluster. 21 | 22 | ``` 23 | kubectl kudo install kafka --instance=kafka 24 | ``` 25 | 26 | Verify the if the deploy plan for `--instance=kafka` is complete. 27 | ``` 28 | kubectl kudo plan status --instance=kafka 29 | Plan(s) for "kafka" in namespace "default": 30 | . 31 | └── kafka (Operator-Version: "kafka-0.1.2" Active-Plan: "kafka-deploy-177524647") 32 | └── Plan deploy (serial strategy) [COMPLETE] 33 | └── Phase deploy-kafka (serial strategy) [COMPLETE] 34 | └── Step deploy (COMPLETE) 35 | ``` 36 | 37 | You can view all configuration options [here](./configuration.md) 38 | 39 | #### Installing multiple Kafka Clusters 40 | 41 | ``` 42 | kubectl kudo install kafka --instance=kafka-1 43 | kubectl kudo install kafka --instance=kafka-2 44 | kubectl kudo install kafka --instance=kafka-3 45 | ``` 46 | 47 | The above commands will install three kafka clusters using the same zookeeper. 48 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/limitations.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Limitations 4 | 5 | Below is a list of parameters that can only be configured during bootstrap time. 6 | 7 | |Immutable Parameters| 8 | | ------------------ | 9 | | DISK_SIZE | 10 | | STORAGE_CLASS | 11 | | PERSISTENT_STORAGE | 12 | 13 | These storage-related parameters cannot be changed after initial deployment. Repeat: using parameters to resize disk, change storage class, or switch between persistent/ephemeral storage is not supported. 14 | 15 | Changing the above parameters will trigger a `not-allowed` plan. Which basically skips updating any resources. 16 | This is to avoid any update done by mistake or human error. 17 | 18 | ### Resizing the PVC 19 | 20 | Resizing the disk depends on the storage class of your Kubernetes cluster. 21 | To resize the disk being used by a broker, users can edit the `pvc` and expand the disk. 22 | You can read more about it in [resizing the PVC](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) post. 23 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/release-notes.md: -------------------------------------------------------------------------------- 1 | # Release notes 2 | 3 | 4 | ## v1.0.2 5 | 6 | - Add parameter `KERBEROS_USE_TCP` to use TCP protocol for KDC connection 7 | - Add parameter `ADD_SERVICE_MONITOR` to create a service monitor for KUDO Kafka cluster. 8 | - Enabled [external KUDO Kafka access](./external-access.md). 9 | - Add parameter `RUN_USER_WORKLOAD` to run some [testing workload over Kafka service](./kudo-kafka-runbook.md). 10 | - Add disk usage [metrics](./monitoring.md) 11 | - Add built-in [mirror-maker integration](./mirrormaker.md) 12 | 13 | ## v1.0.1 14 | 15 | - Apache Kafka upgraded to 2.3.1 16 | 17 | ## v1.0.0 18 | 19 | - Exposed configuration for livenessProbe and readinessProbe 20 | - User can enable advanced service health checks. Option to choose between a simple port-based check and an advanced producer-consumer check based on a custom heartbeat topic 21 | - Support for TLS encryption with custom certificates 22 | - Support for Kerberos authentication 23 | - Support for Kafka AuthZ 24 | 25 | ## v0.2.0 26 | July 30th, 2019 27 | 28 | - Apache Kafka upgraded to 2.3.0 29 | - livenessProbe and readinessProbes made less aggressive 30 | - Added CPU/Memory limits for the stateful pods 31 | - Updated the default value of Zookeeper URI 32 | - not-allowed plan added to prevent updating storage parameters -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/grafana-capture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/grafana-capture.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/external-access-loadbalancer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/external-access-loadbalancer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/external-access-nodeports.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/external-access-nodeports.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/grafana-user-workload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/grafana-user-workload.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/internal-access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/internal-access.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/kafka-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/kafka-cluster.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/kafka-producer-consumer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/kafka-producer-consumer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/kudo-controller-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/kudo-controller-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/kudo-installs-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/kudo-installs-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/kudo-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/kudo-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/kudo-update-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/kudo-update-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/operator-upgrade-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/operator-upgrade-1.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/images/operator-upgrade-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.0/resources/images/operator-upgrade-2.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/resources/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: prometheus-operator 6 | release: prometheus-kubeaddons 7 | name: kafka-cluster-monitor 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: metrics 12 | namespaceSelector: 13 | matchNames: 14 | - default 15 | selector: 16 | matchLabels: 17 | kudo.dev/servicemonitor: "true" 18 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.0/versions.md: -------------------------------------------------------------------------------- 1 | ### Versions 2 | 3 | - Apache Kafka 2.12-2.3.1 4 | - OpenJDK 8 5 | - JMX Exporter 0.1.0 6 | 7 | 8 | ### KUDO Version Requirement 9 | 10 | KUDO version 0.8.0 or later 11 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/README.md: -------------------------------------------------------------------------------- 1 | ## KUDO Kafka latest 2 | 3 | - [Installation](./install.md) 4 | - [Concepts](./concepts.md) 5 | - [Configuration](./configuration.md) 6 | - [Custom Configuration](./custom.md) 7 | - [Monitoring](./monitoring.md) 8 | - [Mirror Maker](./mirrormaker.md) 9 | - [Upgrading the Kafka operator](./upgrade.md) 10 | - [Scaling & Updating Cluster](./update.md) 11 | - [Running in Production](./production.md) 12 | - [Release notes](./release-notes.md) 13 | - [Limitations](./limitations.md) 14 | - [Versions](./versions.md) 15 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/concepts.md: -------------------------------------------------------------------------------- 1 | ![kudo-kafka](./resources/images/kudo-kafka.png) 2 | 3 | # KUDO Kafka Concepts 4 | 5 | KUDO Kafka is a Kubernetes operator built on top of [KUDO](kudo.dev) and requires KUDO 6 | 7 | #### KUDO Kafka CRDs 8 | 9 | There are three CRDs that are installed when deploying KUDO Kafka: 10 | 11 | - Operator: the definition that describes the Kudo Kafka operator. 12 | - OperatorVersion: the definition that describes the Kudo Kafka operator for a specific version. 13 | - Instance: the instantiation of a KUDO Kafka cluster based on the OperatorVersion. 14 | 15 | #### KUDO Controller Reconcile Cycle 16 | 17 | The KUDO controller continually watches the Operator, OperatorVersion and Instance CRDs via the Kubernetes API. 18 | 19 | ![kudo-kafka](./resources/images/kudo-controller-kafka.png) 20 | 21 | When a user installs KUDO Kafka using the `kudo-cli`, the controller creates the KUDO Kafka CRDs for Operator, OperatorVersion and Instance. More information can be read in [KUDO Architecture](https://kudo.dev/docs/architecture.html#architecture-diagram) 22 | 23 | ![kudo-kafka](./resources/images/kudo-installs-kafka.png) 24 | 25 | When the KUDO Controller detects a new `Instance`, it creates all the resources required to reach the desired state of the configuration. 26 | 27 | ![kudo-kafka](./resources/images/kafka-cluster.png) 28 | 29 | The same process is followed for any updates or deletions. Everything is handled by the KUDO Controller. 30 | 31 | ![kudo-kafka](./resources/images/kudo-update-kafka.png) 32 | 33 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/install.md: -------------------------------------------------------------------------------- 1 | # Installing the KUDO Kafka Operator 2 | 3 | Requirements: 4 | 5 | - Install the [KUDO controller](https://kudo.dev/docs/getting-started/) 6 | - Install the [KUDO CLI](https://kudo.dev/docs/cli/) 7 | 8 | This guide explains the basic installation for both KUDO Kafka and KUDO Zookeeper. 9 | To run a production-grade KUDO Kafka cluster, please read [KUDO Kafka in production](./production.md) 10 | 11 | ## Installing the Operator 12 | 13 | #### Install Zookeeper 14 | ``` 15 | kubectl kudo install zookeeper --instance=zk 16 | ``` 17 | 18 | #### Install Kafka 19 | 20 | Please read the [limitations](./limitations.md) docs before creating the KUDO Kafka cluster. 21 | 22 | ``` 23 | kubectl kudo install kafka 24 | ``` 25 | 26 | Verify the if the deploy plan for `--instance=kafka` is complete. 27 | ``` 28 | kubectl kudo plan status --instance=kafka 29 | Plan(s) for "kafka" in namespace "default": 30 | . 31 | └── kafka (Operator-Version: "kafka-1.1.0" Active-Plan: "kafka-deploy-177524647") 32 | └── Plan deploy (serial strategy) [COMPLETE] 33 | └── Phase deploy-kafka (serial strategy) [COMPLETE] 34 | └── Step deploy (COMPLETE) 35 | ``` 36 | 37 | You can view all configuration options [here](./configuration.md) 38 | 39 | #### Installing multiple Kafka Clusters 40 | 41 | ``` 42 | kubectl kudo install kafka --instance=kafka-1 43 | kubectl kudo install kafka --instance=kafka-2 44 | kubectl kudo install kafka --instance=kafka-3 45 | ``` 46 | 47 | The above commands will install three kafka clusters using the same zookeeper. 48 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/limitations.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Limitations 4 | 5 | Below is a list of parameters that can only be configured during bootstrap time. 6 | 7 | |Immutable Parameters| 8 | | ------------------ | 9 | | DISK_SIZE | 10 | | STORAGE_CLASS | 11 | | PERSISTENT_STORAGE | 12 | 13 | These storage-related parameters cannot be changed after initial deployment. Repeat: using parameters to resize disk, change storage class, or switch between persistent/ephemeral storage is not supported. 14 | 15 | Changing the above parameters will trigger a `not-allowed` plan. Which basically skips updating any resources. 16 | This is to avoid any update done by mistake or human error. 17 | 18 | ### Resizing the PVC 19 | 20 | Resizing the disk depends on the storage class of your Kubernetes cluster. 21 | To resize the disk being used by a broker, users can edit the `pvc` and expand the disk. 22 | You can read more about it in [resizing the PVC](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) post. 23 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/release-notes.md: -------------------------------------------------------------------------------- 1 | # Release notes 2 | 3 | 4 | ## v1.1.0 5 | 6 | - Apache Kafka upgraded to 2.4.0 7 | 8 | ## v1.0.2 9 | 10 | - Add parameter `KERBEROS_USE_TCP` to use TCP protocol for KDC connection 11 | - Add parameter `ADD_SERVICE_MONITOR` to create a service monitor for KUDO Kafka cluster. 12 | - Enabled [external KUDO Kafka access](./external-access.md). 13 | - Add parameter `RUN_USER_WORKLOAD` to run some [testing workload over Kafka service](./kudo-kafka-runbook.md). 14 | - Add disk usage [metrics](./monitoring.md) 15 | - Add built-in [mirror-maker integration](./mirrormaker.md) 16 | 17 | ## v1.0.1 18 | 19 | - Apache Kafka upgraded to 2.3.1 20 | 21 | ## v1.0.0 22 | 23 | - Exposed configuration for livenessProbe and readinessProbe 24 | - User can enable advanced service health checks. Option to choose between a simple port-based check and an advanced producer-consumer check based on a custom heartbeat topic 25 | - Support for TLS encryption with custom certificates 26 | - Support for Kerberos authentication 27 | - Support for Kafka AuthZ 28 | 29 | ## v0.2.0 30 | July 30th, 2019 31 | 32 | - Apache Kafka upgraded to 2.3.0 33 | - livenessProbe and readinessProbes made less aggressive 34 | - Added CPU/Memory limits for the stateful pods 35 | - Updated the default value of Zookeeper URI 36 | - not-allowed plan added to prevent updating storage parameters -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/grafana-capture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/grafana-capture.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/external-access-loadbalancer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/external-access-loadbalancer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/external-access-nodeports.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/external-access-nodeports.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/grafana-user-workload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/grafana-user-workload.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/internal-access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/internal-access.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/kafka-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/kafka-cluster.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/kafka-producer-consumer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/kafka-producer-consumer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/kudo-controller-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/kudo-controller-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/kudo-installs-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/kudo-installs-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/kudo-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/kudo-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/kudo-update-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/kudo-update-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/operator-upgrade-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/operator-upgrade-1.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/images/operator-upgrade-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.1/resources/images/operator-upgrade-2.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/resources/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: prometheus-operator 6 | release: prometheus-kubeaddons 7 | name: kafka-cluster-monitor 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: metrics 12 | namespaceSelector: 13 | matchNames: 14 | - default 15 | selector: 16 | matchLabels: 17 | kudo.dev/servicemonitor: "true" 18 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.1/versions.md: -------------------------------------------------------------------------------- 1 | ### Versions 2 | 3 | - Apache Kafka 2.12-2.4.0 4 | - OpenJDK 8 5 | - JMX Exporter 0.1.0 6 | 7 | 8 | ### KUDO Version Requirement 9 | 10 | KUDO version 0.8.0 or later 11 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/README.md: -------------------------------------------------------------------------------- 1 | ![kudo-kafka](./resources/images/kudo-kafka.png) 2 | 3 | ## KUDO Kafka latest 4 | 5 | - [Installation](./install.md) 6 | - [Concepts](./concepts.md) 7 | - [Configuration](./configuration.md) 8 | - [Custom Configuration](./custom.md) 9 | - [Monitoring](./monitoring.md) 10 | - [Mirror Maker](./mirrormaker.md) 11 | - [Kafka Connect](./kafka-connect.md) 12 | - [Cruise Control](./cruise-control.md) 13 | - [Upgrading the Kafka operator](./upgrade.md) 14 | - [Scaling & Updating Cluster](./update.md) 15 | - [Running in Production](./production.md) 16 | - [Release notes](./release-notes.md) 17 | - [Limitations](./limitations.md) 18 | - [Versions](./versions.md) 19 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/concepts.md: -------------------------------------------------------------------------------- 1 | # KUDO Kafka Concepts 2 | 3 | KUDO Kafka is a Kubernetes operator built on top of [KUDO](http://kudo.dev) and requires KUDO 4 | 5 | #### KUDO Kafka CRDs 6 | 7 | There are three CRDs that are installed when deploying KUDO Kafka: 8 | 9 | - Operator: the definition that describes the Kudo Kafka operator. 10 | - OperatorVersion: the definition that describes the Kudo Kafka operator for a specific version. 11 | - Instance: the instantiation of a KUDO Kafka cluster based on the OperatorVersion. 12 | 13 | #### KUDO Controller Reconcile Cycle 14 | 15 | The KUDO controller continually watches the Operator, OperatorVersion and Instance CRDs via the Kubernetes API. 16 | 17 | ![kudo-kafka](./resources/images/kudo-controller-kafka.png) 18 | 19 | When a user installs KUDO Kafka using the `kudo-cli`, the controller creates the KUDO Kafka CRDs for Operator, OperatorVersion and Instance. More information can be read in [KUDO Architecture](https://kudo.dev/docs/architecture.html#architecture-diagram) 20 | 21 | ![kudo-kafka](./resources/images/kudo-installs-kafka.png) 22 | 23 | When the KUDO Controller detects a new `Instance`, it creates all the resources required to reach the desired state of the configuration. 24 | 25 | ![kudo-kafka](./resources/images/kafka-cluster.png) 26 | 27 | The same process is followed for any updates or deletions. Everything is handled by the KUDO Controller. 28 | 29 | ![kudo-kafka](./resources/images/kudo-update-kafka.png) 30 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/cruise-control.md: -------------------------------------------------------------------------------- 1 | # Cruise Control 2 | 3 | ## Overview 4 | 5 | KUDO Kafka operator comes with builtin integration of [Cruise Control](https://github.com/linkedin/cruise-control). 6 | 7 | Cruise Control is a tool to fully automate the dynamic workload rebalance and self-healing of a kafka cluster. It provides great value to Kafka users by simplifying the operation of Kafka clusters. 8 | 9 | Cruise Control integration is disabled by default. 10 | 11 | ## Setup Cruise Control 12 | 13 | ### Start Cruise Control 14 | 15 | Update the instance with `ENABLE_CRUISE_CONTROL` set to start Cruise Control alongside the KUDO Kafka instance. 16 | 17 | ```bash 18 | $ kubectl kudo update --instance=kafka \ 19 | -p ENABLE_CRUISE_CONTROL=true 20 | ``` 21 | ## Advanced Options 22 | 23 | |Parameter|Description|Example| 24 | |--|--|--| 25 | | CRUISE_CONTROL_PORT | Port for the Cruise Control server to listen to |
  • 9090 (default)
| 26 | | CRUISE_CONTROL_WEBSERVER_API_URLPREFIX | Cruise Control REST API default prefix |
  • "/kafkacruisecontrol/*" (default)
| 27 | | CRUISE_CONTROL_WEBSERVER_UI_URLPREFIX | Cruise Control REST Web UI default path prefix |
  • "/*" (default)
| 28 | 29 | ## Disable Cruise Control 30 | 31 | To disable Cruise Control, update the 32 | instance with `ENABLE_CRUISE_CONTROL` set to `false`, using the following command: 33 | 34 | ```bash 35 | $ kubectl kudo update --instance=kafka \ 36 | -p ENABLE_CRUISE_CONTROL=false 37 | ``` 38 | 39 | ## Limitations 40 | 41 | Currently Cruise Control works with Kafka protocol `PLAINTEXT` only. It will not work if Kerberos and or TLS is 42 | enabled in the Kafka instance. Future releases of KUDO Kafka will 43 | address this limitation through a Cruise Control operator. 44 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/limitations.md: -------------------------------------------------------------------------------- 1 | # Limitations 2 | 3 | Below is a list of parameters that can only be configured during bootstrap time. 4 | 5 | |Immutable Parameters| 6 | | ------------------ | 7 | | DISK_SIZE | 8 | | STORAGE_CLASS | 9 | | PERSISTENT_STORAGE | 10 | 11 | These storage-related parameters cannot be changed after initial deployment. Repeat: using parameters to resize disk, change storage class, or switch between persistent/ephemeral storage is not supported. 12 | 13 | Changing the above parameters will trigger a `not-allowed` plan. Which basically skips updating any resources. 14 | This is to avoid any update done by mistake or human error. 15 | 16 | ### Resizing the PVC 17 | 18 | Resizing the disk depends on the storage class of your Kubernetes cluster. 19 | To resize the disk being used by a broker, users can edit the `pvc` and expand the disk. 20 | You can read more about it in [resizing the PVC](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) post. 21 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/grafana-capture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/grafana-capture.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/external-access-loadbalancer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/external-access-loadbalancer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/external-access-nodeports.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/external-access-nodeports.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/grafana-user-workload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/grafana-user-workload.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/internal-access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/internal-access.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/kafka-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/kafka-cluster.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/kafka-producer-consumer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/kafka-producer-consumer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/kudo-controller-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/kudo-controller-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/kudo-installs-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/kudo-installs-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/kudo-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/kudo-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/kudo-update-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/kudo-update-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/operator-upgrade-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/operator-upgrade-1.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/images/operator-upgrade-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.2/resources/images/operator-upgrade-2.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/resources/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: prometheus-operator 6 | release: prometheus-kubeaddons 7 | name: kafka-cluster-monitor 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: metrics 12 | namespaceSelector: 13 | matchNames: 14 | - default 15 | selector: 16 | matchLabels: 17 | kudo.dev/servicemonitor: "true" 18 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.2/versions.md: -------------------------------------------------------------------------------- 1 | ### Versions 2 | 3 | - Apache Kafka 2.12-2.4.1 4 | - OpenJDK 8 5 | - JMX Exporter 0.1.0 6 | - Prometheus Node Exporter v0.18.1 7 | 8 | 9 | ### KUDO Version Requirement 10 | 11 | KUDO version 0.11.0 or later 12 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/README.md: -------------------------------------------------------------------------------- 1 | ![kudo-kafka](./resources/images/kudo-kafka.png) 2 | 3 | ## KUDO Kafka latest 4 | 5 | - [Installation](./install.md) 6 | - [Concepts](./concepts.md) 7 | - [Configuration](./configuration.md) 8 | - [Custom Configuration](./custom.md) 9 | - [Monitoring](./monitoring.md) 10 | - [Mirror Maker](./mirrormaker.md) 11 | - [Kafka Connect](./kafka-connect.md) 12 | - [Cruise Control](./cruise-control.md) 13 | - [Upgrading the Kafka operator](./upgrade.md) 14 | - [Scaling & Updating Cluster](./update.md) 15 | - [Running in Production](./production.md) 16 | - [Release notes](./release-notes.md) 17 | - [Limitations](./limitations.md) 18 | - [Versions](./versions.md) 19 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/concepts.md: -------------------------------------------------------------------------------- 1 | # KUDO Kafka Concepts 2 | 3 | KUDO Kafka is a Kubernetes operator built on top of [KUDO](http://kudo.dev) and requires KUDO 4 | 5 | #### KUDO Kafka CRDs 6 | 7 | There are three CRDs that are installed when deploying KUDO Kafka: 8 | 9 | - Operator: the definition that describes the Kudo Kafka operator. 10 | - OperatorVersion: the definition that describes the Kudo Kafka operator for a specific version. 11 | - Instance: the instantiation of a KUDO Kafka cluster based on the OperatorVersion. 12 | 13 | #### KUDO Controller Reconcile Cycle 14 | 15 | The KUDO controller continually watches the Operator, OperatorVersion and Instance CRDs via the Kubernetes API. 16 | 17 | ![kudo-kafka](./resources/images/kudo-controller-kafka.png) 18 | 19 | When a user installs KUDO Kafka using the `kudo-cli`, the controller creates the KUDO Kafka CRDs for Operator, OperatorVersion and Instance. More information can be read in [KUDO Architecture](https://kudo.dev/docs/architecture.html#architecture-diagram) 20 | 21 | ![kudo-kafka](./resources/images/kudo-installs-kafka.png) 22 | 23 | When the KUDO Controller detects a new `Instance`, it creates all the resources required to reach the desired state of the configuration. 24 | 25 | ![kudo-kafka](./resources/images/kafka-cluster.png) 26 | 27 | The same process is followed for any updates or deletions. Everything is handled by the KUDO Controller. 28 | 29 | ![kudo-kafka](./resources/images/kudo-update-kafka.png) 30 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/cruise-control.md: -------------------------------------------------------------------------------- 1 | # Cruise Control 2 | 3 | ## Overview 4 | 5 | KUDO Kafka operator comes with builtin integration of [Cruise Control](https://github.com/linkedin/cruise-control). 6 | 7 | Cruise Control is a tool to fully automate the dynamic workload rebalance and self-healing of a kafka cluster. It provides great value to Kafka users by simplifying the operation of Kafka clusters. 8 | 9 | Cruise Control integration is disabled by default. 10 | 11 | ## Setup Cruise Control 12 | 13 | ### Start Cruise Control 14 | 15 | Update the instance with `ENABLE_CRUISE_CONTROL` set to start Cruise Control alongside the KUDO Kafka instance. 16 | 17 | ```bash 18 | $ kubectl kudo update --instance=kafka \ 19 | -p ENABLE_CRUISE_CONTROL=true 20 | ``` 21 | ## Advanced Options 22 | 23 | |Parameter|Description|Example| 24 | |--|--|--| 25 | | CRUISE_CONTROL_PORT | Port for the Cruise Control server to listen to |
  • 9090 (default)
| 26 | | CRUISE_CONTROL_WEBSERVER_API_URLPREFIX | Cruise Control REST API default prefix |
  • "/kafkacruisecontrol/*" (default)
| 27 | | CRUISE_CONTROL_WEBSERVER_UI_URLPREFIX | Cruise Control REST Web UI default path prefix |
  • "/*" (default)
| 28 | 29 | ## Disable Cruise Control 30 | 31 | To disable Cruise Control, update the 32 | instance with `ENABLE_CRUISE_CONTROL` set to `false`, using the following command: 33 | 34 | ```bash 35 | $ kubectl kudo update --instance=kafka \ 36 | -p ENABLE_CRUISE_CONTROL=false 37 | ``` 38 | 39 | ## Limitations 40 | 41 | Currently Cruise Control works with Kafka protocol `PLAINTEXT` only. It will not work if Kerberos and or TLS is 42 | enabled in the Kafka instance. Future releases of KUDO Kafka will 43 | address this limitation through a Cruise Control operator. 44 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/limitations.md: -------------------------------------------------------------------------------- 1 | # Limitations 2 | 3 | Below is a list of parameters that can only be configured during bootstrap time. 4 | 5 | |Immutable Parameters| 6 | | ------------------ | 7 | | DISK_SIZE | 8 | | STORAGE_CLASS | 9 | | PERSISTENT_STORAGE | 10 | 11 | These storage-related parameters cannot be changed after initial deployment. Repeat: using parameters to resize disk, change storage class, or switch between persistent/ephemeral storage is not supported. 12 | 13 | Changing the above parameters will trigger a `not-allowed` plan. Which basically skips updating any resources. 14 | This is to avoid any update done by mistake or human error. 15 | 16 | ### Resizing the PVC 17 | 18 | Resizing the disk depends on the storage class of your Kubernetes cluster. 19 | To resize the disk being used by a broker, users can edit the `pvc` and expand the disk. 20 | You can read more about it in [resizing the PVC](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) post. 21 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/grafana-capture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/grafana-capture.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/external-access-loadbalancer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/external-access-loadbalancer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/external-access-nodeports.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/external-access-nodeports.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/grafana-user-workload.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/grafana-user-workload.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/internal-access.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/internal-access.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/kafka-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/kafka-cluster.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/kafka-producer-consumer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/kafka-producer-consumer.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/kudo-controller-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/kudo-controller-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/kudo-installs-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/kudo-installs-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/kudo-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/kudo-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/kudo-update-kafka.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/kudo-update-kafka.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/operator-upgrade-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/operator-upgrade-1.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/images/operator-upgrade-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/kafka/docs/v1.3/resources/images/operator-upgrade-2.png -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/resources/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: prometheus-operator 6 | release: prometheus-kubeaddons 7 | name: kafka-cluster-monitor 8 | spec: 9 | endpoints: 10 | - interval: 30s 11 | port: metrics 12 | namespaceSelector: 13 | matchNames: 14 | - default 15 | selector: 16 | matchLabels: 17 | kudo.dev/servicemonitor: "true" 18 | -------------------------------------------------------------------------------- /repository/kafka/docs/v1.3/versions.md: -------------------------------------------------------------------------------- 1 | ### Versions 2 | 3 | - Apache Kafka 2.12-2.5.1 4 | - OpenJDK 8 5 | - JMX Exporter 0.1.0 6 | - Prometheus Node Exporter v0.18.1 7 | 8 | 9 | ### KUDO Version Requirement 10 | 11 | KUDO version 0.11.0 or later 12 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/cert-generator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | spec: 4 | volumes: 5 | - name: cert-out 6 | emptyDir: {} 7 | initContainers: 8 | - name: init 9 | {{ if eq .Params.USE_AUTO_TLS_CERTIFICATE "true" }} 10 | image: mesosphere/kafka:1.1.0-2.4.0 11 | {{ else }} 12 | image: busybox:1.31.1 13 | {{ end }} 14 | command: [ "/bin/sh", "-c" ] 15 | args: 16 | {{ if eq .Params.USE_AUTO_TLS_CERTIFICATE "true" }} 17 | - openssl req -x509 -newkey rsa:4096 -sha256 -nodes -keyout /tmp/tls.key -out /tmp/tls.crt -subj "/CN=KudoKafkaCA" -days 365 18 | {{ else }} 19 | - touch /tmp/tls.key && touch /tmp/tls.crt 20 | {{ end }} 21 | volumeMounts: 22 | - name: cert-out 23 | mountPath: /tmp 24 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{ if eq .Params.EXTERNAL_ADVERTISED_LISTENER_TYPE "NodePort" }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ .Name }}-clusterrole 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["nodes"] 9 | verbs: ["get", "list", "watch"] 10 | {{ end }} 11 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{ if eq .Params.EXTERNAL_ADVERTISED_LISTENER_TYPE "NodePort" }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ .Name }}-clusterscope-binding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: {{ .Name }}-clusterrole 10 | subjects: 11 | - kind: ServiceAccount 12 | name: {{ .Name }} 13 | namespace: {{ .Namespace }} 14 | {{ end }} 15 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/cruise-control-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-cruise-control-svc 5 | namespace: {{ .Namespace }} 6 | {{ if eq .Params.METRICS_ENABLED "true" }} 7 | labels: 8 | "kudo.dev/servicemonitor": "true" 9 | {{ end }} 10 | spec: 11 | ports: 12 | - port: {{ .Params.CRUISE_CONTROL_PORT }} 13 | name: http 14 | clusterIP: None 15 | selector: 16 | app: cruise-control 17 | kudo.dev/instance: {{ .Name }} 18 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/cruise-control.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: {{ .Name }}-cruise-control 5 | namespace: {{ .Namespace }} 6 | labels: 7 | app: cruise-control 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: cruise-control 12 | replicas: 1 13 | template: 14 | metadata: 15 | labels: 16 | app: cruise-control 17 | spec: 18 | terminationGracePeriodSeconds: 30 19 | containers: 20 | - name: cruise-control 21 | image: mesosphere/cruise-control:2.0.77-1.3.0 22 | imagePullPolicy: Always 23 | ports: 24 | - name: http 25 | containerPort: {{ .Params.CRUISE_CONTROL_PORT }} 26 | command: 27 | - bash 28 | - -c 29 | args: 30 | - /opt/cruise-control/start.sh 31 | env: 32 | - name: CRUISE_CONTROL_NAMESPACE 33 | valueFrom: 34 | fieldRef: 35 | fieldPath: metadata.namespace 36 | - name: CRUISE_CONTROL_INSTANCE_NAME 37 | value: "{{ .Name }}" 38 | resources: 39 | requests: 40 | memory: {{ .Params.CRUISE_CONTROL_MEM }} 41 | cpu: {{ .Params.CRUISE_CONTROL_CPUS }} 42 | limits: 43 | memory: {{ .Params.CRUISE_CONTROL_MEM_LIMIT }} 44 | cpu: {{ .Params.CRUISE_CONTROL_CPUS_LIMIT }} 45 | readinessProbe: 46 | tcpSocket: 47 | port: {{ .Params.CRUISE_CONTROL_PORT }} 48 | timeoutSeconds: 1 49 | volumeMounts: 50 | - name: cc-props 51 | mountPath: /etc/cruise-control/config 52 | volumes: 53 | - name: cc-props 54 | configMap: 55 | name: {{ .Name }}-cruisecontrol-props 56 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/external-service.yaml: -------------------------------------------------------------------------------- 1 | {{ range $i, $v := until (int .Params.BROKER_COUNT) }} 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: {{ $.Name }}-kafka-{{ $v }}-external 7 | namespace: {{ $.Namespace }} 8 | spec: 9 | type: {{ $.Params.EXTERNAL_ADVERTISED_LISTENER_TYPE }} 10 | externalTrafficPolicy: Local 11 | selector: 12 | statefulset.kubernetes.io/pod-name: {{ $.Name }}-kafka-{{ $v }} 13 | ports: 14 | - protocol: TCP 15 | {{ if eq $.Params.EXTERNAL_ADVERTISED_LISTENER_TYPE "LoadBalancer" }} 16 | port: {{ $.Params.EXTERNAL_ADVERTISED_LISTENER_PORT }} 17 | targetPort: {{ $.Params.EXTERNAL_ADVERTISED_LISTENER_PORT }} 18 | {{ end }} 19 | {{ if eq $.Params.EXTERNAL_ADVERTISED_LISTENER_TYPE "NodePort" }} 20 | port: {{ add (int $.Params.EXTERNAL_NODE_PORT) $v }} 21 | targetPort: {{ add (int $.Params.EXTERNAL_NODE_PORT) $v }} 22 | nodePort: {{ add (int $.Params.EXTERNAL_NODE_PORT) $v }} 23 | {{ end }} 24 | {{ end }} 25 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/jaas-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Name }}-jaas-config 5 | namespace: {{ .Namespace }} 6 | data: 7 | kafka_server_jaas.conf: | 8 | {{ if eq .Params.KERBEROS_ENABLED "true" }} 9 | KafkaServer { 10 | com.sun.security.auth.module.Krb5LoginModule required 11 | useKeyTab=true 12 | storeKey=true 13 | useTicketCache=false 14 | keyTab="kafka.keytab" 15 | principal="{{ .Params.KERBEROS_PRIMARY }}/@{{ .Params.KERBEROS_REALM }}"; 16 | }; 17 | 18 | {{ if eq .Params.KERBEROS_ENABLED_FOR_ZOOKEEPER "true" }} 19 | // Zookeeper client authentication 20 | Client { 21 | com.sun.security.auth.module.Krb5LoginModule required 22 | useKeyTab=true 23 | storeKey=true 24 | useTicketCache=false 25 | keyTab="kafka.keytab" 26 | principal="{{ .Params.KERBEROS_PRIMARY }}/@{{ .Params.KERBEROS_REALM }}"; 27 | }; 28 | {{ end }} 29 | {{ end }} 30 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/kafka-connect-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-connect-svc 5 | namespace: {{ .Namespace }} 6 | {{ if eq .Params.METRICS_ENABLED "true" }} 7 | labels: 8 | "kudo.dev/servicemonitor": "true" 9 | {{ end }} 10 | spec: 11 | ports: 12 | - port: {{ .Params.KAFKA_CONNECT_REST_PORT }} 13 | name: server 14 | clusterIP: None 15 | selector: 16 | app: kafka-connect 17 | kudo.dev/instance: {{ .Name }} -------------------------------------------------------------------------------- /repository/kafka/operator/templates/krb5-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Name }}-krb5-config 5 | namespace: {{ .Namespace }} 6 | data: 7 | krb5.conf: | 8 | [libdefaults] 9 | default_realm = {{ .Params.KERBEROS_REALM }} 10 | {{ if eq .Params.KERBEROS_USE_TCP "true" }} 11 | udp_preference_limit = 1 12 | {{ end }} 13 | 14 | [realms] 15 | {{ .Params.KERBEROS_REALM }} = { 16 | kdc = {{ .Params.KERBEROS_KDC_HOSTNAME }}:{{ .Params.KERBEROS_KDC_PORT }} 17 | } 18 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/mirror-maker-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ .Name }}-mirror-maker-config 5 | namespace: {{ .Namespace }} 6 | data: 7 | external.properties: | 8 | bootstrap.servers={{ .Params.MIRROR_MAKER_EXTERNAL_BOOTSTRAP_SERVERS }} 9 | {{ if eq .Params.MIRROR_MAKER_EXTERNAL_CLUSTER_TYPE "SOURCE" }} 10 | # Used when MIRROR_MAKER_EXTERNAL_CLUSTER_TYPE is SOURCE 11 | exclude.internal.topics=true 12 | group.id={{ .Name }}_mirror_maker_consumer 13 | auto.offset.reset=earliest 14 | {{ else }} 15 | # Used when MIRROR_MAKER_EXTERNAL_CLUSTER_TYPE is DESTINATION 16 | acks=1 17 | batch.size=100 18 | client.id={{ .Name }}_mirror_maker_producer 19 | {{ end }} 20 | self.properties: | 21 | bootstrap.servers={{ .Name }}-svc.{{ .Namespace }}.svc.cluster.local:{{ .Params.BROKER_PORT }} 22 | {{ if eq .Params.MIRROR_MAKER_EXTERNAL_CLUSTER_TYPE "SOURCE" }} 23 | # Used when MIRROR_MAKER_EXTERNAL_CLUSTER_TYPE is SOURCE 24 | acks=1 25 | batch.size=100 26 | client.id={{ .Name }}_mirror_maker_producer 27 | {{ else }} 28 | # Used when MIRROR_MAKER_EXTERNAL_CLUSTER_TYPE is DESTINATION 29 | exclude.internal.topics=true 30 | group.id={{ .Name }}_mirror_maker_consumer 31 | auto.offset.reset=earliest 32 | {{ end }} 33 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: {{ .Name }}-pdb 5 | namespace: {{ .Namespace }} 6 | spec: 7 | selector: 8 | matchLabels: 9 | app: kafka 10 | kudo.dev/instance: {{ .Name }} 11 | maxUnavailable: 1 -------------------------------------------------------------------------------- /repository/kafka/operator/templates/role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | namespace: {{ .Namespace }} 5 | name: {{ .Name }}-role 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["pods"] 9 | verbs: ["get", "list", "watch"] 10 | - apiGroups: [""] 11 | resources: ["services"] 12 | verbs: ["get", "list", "watch"] 13 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: {{ .Name }}-binding 5 | namespace: {{ .Namespace }} 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: {{ .Name }}-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: {{ .Name }} 13 | namespace: {{ .Namespace }} 14 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | {{ if eq .Params.ADD_SERVICE_MONITOR "true" }} 2 | # service-monitor cannot use toggle task 3 | # as KUDO Kafka cannot guarantee the third-party CRDs to be present 4 | apiVersion: monitoring.coreos.com/v1 5 | kind: ServiceMonitor 6 | metadata: 7 | labels: 8 | app: prometheus-operator 9 | release: prometheus-kubeaddons 10 | name: {{ .Name }}-monitor 11 | namespace: {{ .Namespace }} 12 | spec: 13 | endpoints: 14 | - interval: 30s 15 | port: metrics 16 | - interval: 30s 17 | port: ne-metrics 18 | namespaceSelector: 19 | matchNames: 20 | - {{ .Namespace }} 21 | selector: 22 | matchLabels: 23 | kudo.dev/instance: {{ .Name }} 24 | kudo.dev/servicemonitor: "true" 25 | {{ end }} 26 | -------------------------------------------------------------------------------- /repository/kafka/operator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-svc 5 | namespace: {{ .Namespace }} 6 | {{ if eq .Params.METRICS_ENABLED "true" }} 7 | labels: 8 | "kudo.dev/servicemonitor": "true" 9 | {{ end }} 10 | spec: 11 | ports: 12 | - port: {{ .Params.BROKER_PORT }} 13 | name: server 14 | {{ if eq .Params.TRANSPORT_ENCRYPTION_ENABLED "true" }} 15 | - port: {{ .Params.BROKER_PORT_TLS }} 16 | name: server-tls 17 | {{ end }} 18 | - port: {{ .Params.CLIENT_PORT }} 19 | name: client 20 | {{ if eq .Params.METRICS_ENABLED "true" }} 21 | - port: {{ .Params.METRICS_PORT }} 22 | name: metrics 23 | - port: {{ .Params.KAFKA_NODE_EXPORTER_PORT }} 24 | name: ne-metrics 25 | {{ end }} 26 | clusterIP: None 27 | selector: 28 | app: kafka 29 | kudo.dev/instance: {{ .Name }} -------------------------------------------------------------------------------- /repository/kafka/operator/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ .Name }} 5 | namespace: {{ .Namespace }} 6 | -------------------------------------------------------------------------------- /repository/kafka/tests/kafka-upgrade-test/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: zk 5 | status: 6 | planStatus: 7 | deploy: 8 | status: COMPLETE 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: zk-zookeeper 14 | spec: 15 | template: 16 | spec: 17 | containers: 18 | - name: kubernetes-zookeeper 19 | resources: 20 | requests: 21 | memory: "256Mi" 22 | cpu: "300m" 23 | status: 24 | readyReplicas: 1 25 | -------------------------------------------------------------------------------- /repository/kafka/tests/kafka-upgrade-test/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: zk 5 | spec: 6 | operatorVersion: 7 | name: zookeeper-3.4.14-0.3.1 8 | namespace: default 9 | kind: OperatorVersion 10 | name: "zk" 11 | parameters: 12 | NODE_COUNT: "1" 13 | MEMORY: "256Mi" 14 | CPUS: "0.3" 15 | NE_MEM: "10Mi" 16 | NE_CPUS: "50m" 17 | -------------------------------------------------------------------------------- /repository/kafka/tests/kafka-upgrade-test/01-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: kafka 5 | spec: 6 | operatorVersion: 7 | name: kafka-2.5.1-1.3.2 8 | namespace: default 9 | kind: OperatorVersion 10 | name: "kafka" 11 | parameters: 12 | BROKER_COUNT: "1" 13 | BROKER_MEM: "300Mi" 14 | BROKER_CPUS: "300m" 15 | ZOOKEEPER_URI: "zk-zookeeper-0.zk-hs:2181" 16 | NE_MEM: "10Mi" 17 | NE_CPUS: "50m" 18 | -------------------------------------------------------------------------------- /repository/kafka/tests/kafka-upgrade-test/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: kafka 5 | status: 6 | planStatus: 7 | deploy: 8 | status: COMPLETE 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: kafka-kafka 14 | spec: 15 | template: 16 | spec: 17 | containers: 18 | - name: kafka-node-exporter 19 | resources: 20 | requests: 21 | memory: "10Mi" 22 | cpu: "50m" 23 | - name: k8skafka 24 | resources: 25 | requests: 26 | memory: "400Mi" 27 | cpu: "300m" 28 | status: 29 | readyReplicas: 1 30 | -------------------------------------------------------------------------------- /repository/kafka/tests/kafka-upgrade-test/02-resize.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: kafka 5 | spec: 6 | operatorVersion: 7 | name: kafka-2.5.1-1.3.2 8 | namespace: default 9 | kind: OperatorVersion 10 | name: "kafka" 11 | parameters: 12 | BROKER_MEM: "400Mi" 13 | BROKER_CPUS: "300m" 14 | NE_MEM: "10Mi" 15 | NE_CPUS: "50m" 16 | -------------------------------------------------------------------------------- /repository/mysql/operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: BACKUP_FILE 4 | description: "Filename to save the backups to" 5 | default: "backup.sql" 6 | displayName: "BackupFile" 7 | trigger: backup 8 | - name: PASSWORD 9 | default: "password" 10 | - name: RESTORE_FILE 11 | default: "backup.sql" 12 | description: "Filename to restore the db from" 13 | trigger: restore 14 | - name: STORAGE 15 | default: 1Gi 16 | description: "Size of volume to store MySQL data" 17 | -------------------------------------------------------------------------------- /repository/mysql/operator/templates/backup-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: {{ .Name }}-backup-pv 5 | namespace: {{ .Namespace }} 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /repository/mysql/operator/templates/backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: {{ .PlanName }}-job 5 | namespace: {{ .Namespace }} 6 | spec: 7 | template: 8 | metadata: 9 | name: {{ .PlanName }}-job 10 | spec: 11 | restartPolicy: OnFailure 12 | containers: 13 | - name: {{ .PlanName }} 14 | image: mysql:5.7 15 | imagePullPolicy: IfNotPresent 16 | command: 17 | - /bin/sh 18 | - -c 19 | - "mysqldump -u root -h {{ .Name }}-svc -p{{ .Params.PASSWORD }} kudo > /backups/{{ .Params.BACKUP_FILE }}" 20 | volumeMounts: 21 | - name: backup-pv 22 | mountPath: /backups 23 | volumes: 24 | - name: backup-pv 25 | persistentVolumeClaim: 26 | claimName: {{ .Name }}-backup-pv 27 | -------------------------------------------------------------------------------- /repository/mysql/operator/templates/init.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: {{ .PlanName }}-job 5 | namespace: {{ .Namespace }} 6 | spec: 7 | template: 8 | metadata: 9 | name: {{ .PlanName }}-job 10 | spec: 11 | restartPolicy: OnFailure 12 | containers: 13 | - name: {{ .PlanName }} 14 | image: mysql:5.7 15 | imagePullPolicy: IfNotPresent 16 | command: 17 | - /bin/sh 18 | - -c 19 | - "mysql -u root -h {{ .Name }}-svc -p{{ .Params.PASSWORD }} -e \"CREATE TABLE example ( id smallint unsigned not null auto_increment, name varchar(20) not null, constraint pk_example primary key (id) );\" kudo " 20 | -------------------------------------------------------------------------------- /repository/mysql/operator/templates/restore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: {{ .PlanName }}-job 5 | namespace: {{ .Namespace }} 6 | spec: 7 | template: 8 | metadata: 9 | name: {{ .PlanName }}-job 10 | spec: 11 | restartPolicy: OnFailure 12 | containers: 13 | - name: {{ .PlanName }} 14 | image: mysql:5.7 15 | imagePullPolicy: IfNotPresent 16 | command: 17 | - /bin/sh 18 | - -c 19 | - "mysql -u root -h {{ .Name }}-svc -p{{ .Params.PASSWORD }} --database=kudo < /backups/{{ .Params.RESTORE_FILE }}" 20 | volumeMounts: 21 | - name: backup-pv 22 | mountPath: /backups 23 | volumes: 24 | - name: backup-pv 25 | persistentVolumeClaim: 26 | claimName: {{ .Name }}-backup-pv 27 | -------------------------------------------------------------------------------- /repository/rabbitmq/docs/README.md: -------------------------------------------------------------------------------- 1 | # RabbitMQ 2 | 3 | RabbitMQ is the most widely deployed open source message broker. The KUDO RabbitMQ Operator makes it easy to deploy and manage 4 | [RabbitMQ](https://www.rabbitmq.com/) on Kubernetes. 5 | -------------------------------------------------------------------------------- /repository/rabbitmq/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: "rabbitmq" 3 | operatorVersion: "0.1.1" 4 | kudoVersion: 0.10.0 5 | kubernetesVersion: 1.13.0 6 | appVersion: 3.8.0 7 | maintainers: 8 | - name: Harry Ge 9 | email: 10 | url: https://www.rabbitmq.com 11 | tasks: 12 | - name: node 13 | kind: Apply 14 | spec: 15 | resources: 16 | - serviceaccount.yaml 17 | - role.yaml 18 | - rolebinding.yaml 19 | - configmap.yaml 20 | - service.yaml 21 | - statefulset.yaml 22 | - name: not-allowed 23 | kind: Dummy 24 | spec: 25 | resources: 26 | plans: 27 | deploy: 28 | strategy: serial 29 | phases: 30 | - name: main 31 | strategy: parallel 32 | steps: 33 | - name: everything 34 | tasks: 35 | - node 36 | not-allowed: 37 | strategy: serial 38 | phases: 39 | - name: not-allowed 40 | strategy: serial 41 | steps: 42 | - name: not-allowed 43 | tasks: 44 | - not-allowed -------------------------------------------------------------------------------- /repository/rabbitmq/operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: NODE_COUNT 4 | description: "Number of rabbitmq servers." 5 | default: "3" 6 | displayName: "Node Count" 7 | - name: AMQP_PORT 8 | description: "AMQP port" 9 | default: "5672" 10 | - name: MANAGEMENT_PORT 11 | description: "Management port" 12 | default: "15672" 13 | - name: MEMORY 14 | description: Amount of memory to provide to a RabbitMQ pod 15 | default: "512Mi" 16 | - name: MEM_LIMIT 17 | description: "Memory (limit) for the RabbitMQ pod. spec.containers[].resources.limits.memory" 18 | default: "1024Mi" 19 | - name: CPUS 20 | description: Amount of cpu to provide to a RabbitMQ pod 21 | default: "250m" 22 | - name: CPUS_LIMIT 23 | description: "CPUs (limit) for the RabbitMQ pod. spec.containers[].resources.limits.cpu" 24 | default: "1000m" 25 | - name: DISK_SIZE 26 | description: "Disk size for the RabbitMQ servers" 27 | default: "1Gi" 28 | trigger: "not-allowed" 29 | -------------------------------------------------------------------------------- /repository/rabbitmq/operator/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: rabbitmq-config 5 | namespace: {{ .Namespace }} 6 | data: 7 | enabled_plugins: | 8 | [rabbitmq_management,rabbitmq_peer_discovery_k8s]. 9 | rabbitmq.conf: | 10 | ## Cluster formation. See http://www.rabbitmq.com/cluster-formation.html to learn more. 11 | cluster_formation.peer_discovery_backend = rabbit_peer_discovery_k8s 12 | cluster_formation.k8s.host = kubernetes.default.svc.cluster.local 13 | ## Should RabbitMQ node name be computed from the pod's hostname or IP address? 14 | ## IP addresses are not stable, so using [stable] hostnames is recommended when possible. 15 | ## Set to "hostname" to use pod hostnames. 16 | ## When this value is changed, so should the variable used to set the RABBITMQ_NODENAME 17 | ## environment variable. 18 | cluster_formation.k8s.address_type = hostname 19 | ## How often should node cleanup checks run? 20 | cluster_formation.node_cleanup.interval = 30 21 | ## Set to false if automatic removal of unknown/absent nodes 22 | ## is desired. This can be dangerous, see 23 | ## * http://www.rabbitmq.com/cluster-formation.html#node-health-checks-and-cleanup 24 | ## * https://groups.google.com/forum/#!msg/rabbitmq-users/wuOfzEywHXo/k8z_HWIkBgAJ 25 | cluster_formation.node_cleanup.only_log_warning = true 26 | cluster_partition_handling = autoheal 27 | ## See http://www.rabbitmq.com/ha.html#master-migration-data-locality 28 | queue_master_locator=min-masters 29 | ## enable guest user 30 | loopback_users.guest = false -------------------------------------------------------------------------------- /repository/rabbitmq/operator/templates/role.yaml: -------------------------------------------------------------------------------- 1 | kind: Role 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: rabbitmq-peer-discovery-rbac 5 | namespace: {{ .Namespace }} 6 | rules: 7 | - apiGroups: [""] 8 | resources: ["endpoints"] 9 | verbs: ["get"] -------------------------------------------------------------------------------- /repository/rabbitmq/operator/templates/rolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: RoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | metadata: 4 | name: rabbitmq-peer-discovery-rbac 5 | namespace: {{ .Namespace }} 6 | subjects: 7 | - kind: ServiceAccount 8 | name: {{ .Name }}-{{ .OperatorName }} 9 | roleRef: 10 | apiGroup: rbac.authorization.k8s.io 11 | kind: Role 12 | name: rabbitmq-peer-discovery-rbac -------------------------------------------------------------------------------- /repository/rabbitmq/operator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-svc 5 | namespace: {{ .Namespace }} 6 | spec: 7 | type: ClusterIP 8 | ports: 9 | - port: {{ .Params.AMQP_PORT }} 10 | name: amqp 11 | - port: {{ .Params.MANAGEMENT_PORT }} 12 | name: management 13 | selector: 14 | app: rabbitmq 15 | kudo.dev/instance: {{ .Name }} -------------------------------------------------------------------------------- /repository/rabbitmq/operator/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ .Name }}-{{ .OperatorName }} 5 | namespace: {{ .Namespace }} -------------------------------------------------------------------------------- /repository/redis/docs/README.md: -------------------------------------------------------------------------------- 1 | # Redis 2 | 3 | Redis is an open source (BSD licensed), in-memory data structure store, used as a database, cache and message broker. 4 | 5 | Redis Cluster provides a way to run a Redis installation where data is automatically sharded across multiple Redis nodes. 6 | 7 | This Operator is deploying a Redis Cluster. 8 | 9 | ## Prerequisites 10 | 11 | You need a Kubernetes cluster up and running and Persistent Storage available with a default `Storage Class` defined. 12 | 13 | ## Getting Started 14 | 15 | Deploy the `Operator` using the following command: 16 | 17 | `kubectl kudo install redis` 18 | 19 | It deploys a Redis Cluster composed of 6 instances. There are 3 masters and 1 replica per master. 20 | -------------------------------------------------------------------------------- /repository/redis/operator/operator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | name: redis 3 | operatorVersion: 0.2.0 4 | kudoVersion: 0.10.0 5 | kubernetesVersion: 1.15.0 6 | appVersion: 5.0.1 7 | maintainers: 8 | - name: Denis Jannot 9 | email: djannot@mesosphere.io 10 | - name: Fabian Baier 11 | email: fabian@mesosphere.io 12 | url: https://redis.io/ 13 | tasks: 14 | - name: deploy 15 | kind: Apply 16 | spec: 17 | resources: 18 | - service.yaml 19 | - pdb.yaml 20 | - configmap.yaml 21 | - statefulset.yaml 22 | - name: init 23 | kind: Apply 24 | spec: 25 | resources: 26 | - init.yaml 27 | plans: 28 | deploy: 29 | strategy: serial 30 | phases: 31 | - name: deploy-redis 32 | strategy: serial 33 | steps: 34 | - name: deploy 35 | tasks: 36 | - deploy 37 | - name: init 38 | tasks: 39 | - init 40 | -------------------------------------------------------------------------------- /repository/redis/operator/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | parameters: 3 | - name: masters 4 | description: "Number of Redis masters" 5 | default: "3" 6 | displayName: "Redis masters" 7 | - name: client_port 8 | description: "Client port" 9 | default: "6379" 10 | - name: gossip_port 11 | description: "Gossip port" 12 | default: "16379" -------------------------------------------------------------------------------- /repository/redis/operator/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: {{ .Namespace }} 5 | name: {{ .Name }}-{{ .OperatorName }} 6 | labels: 7 | app: redis 8 | redis: {{ .OperatorName }} 9 | instance: {{ .Name }} 10 | data: 11 | update-node.sh: | 12 | #!/bin/sh 13 | REDIS_NODES="/data/nodes.conf" 14 | sed -i -e "/myself/ s/[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}/${POD_IP}/" ${REDIS_NODES} 15 | exec "$@" 16 | redis.conf: |+ 17 | cluster-enabled yes 18 | cluster-require-full-coverage no 19 | cluster-node-timeout 15000 20 | cluster-config-file /data/nodes.conf 21 | cluster-migration-barrier 1 22 | appendonly yes 23 | protected-mode no 24 | -------------------------------------------------------------------------------- /repository/redis/operator/templates/init.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | namespace: {{ .Namespace }} 5 | name: {{ .PlanName }}-{{ .Name }}-{{ .OperatorName }}-job 6 | labels: 7 | app: redis 8 | redis: {{ .OperatorName }} 9 | instance: {{ .Name }} 10 | plan: {{ .PlanName }} 11 | spec: 12 | template: 13 | metadata: 14 | name: {{ .PlanName }}-{{ .Name }}-{{ .OperatorName }}-job 15 | spec: 16 | restartPolicy: OnFailure 17 | containers: 18 | - name: {{ .PlanName }} 19 | image: redis:5.0.1-alpine 20 | imagePullPolicy: IfNotPresent 21 | command: 22 | - /bin/sh 23 | - -c 24 | - 'echo yes | redis-cli -x -h {{ .Name }}-{{ .OperatorName }}-svc --cluster create --cluster-replicas 1 $(i=0; while [ $i -lt {{ .Params.masters | mul 2 }} ]; do printf $(nslookup {{ .Name }}-{{ .OperatorName }}-${i}.{{ .Name }}-{{ .OperatorName }}-svc | grep "Address" | awk "{ print \$3 }")":{{ .Params.client_port }} "; i=$((i+1)); done)' 25 | -------------------------------------------------------------------------------- /repository/redis/operator/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | namespace: {{ .Namespace }} 5 | name: {{ .Name }}-{{ .OperatorName }}-pdb 6 | labels: 7 | app: redis 8 | redis: {{ .OperatorName }} 9 | instance: {{ .Name }} 10 | spec: 11 | selector: 12 | matchLabels: 13 | app: redis 14 | redis: {{ .OperatorName }} 15 | instance: {{ .Name }} 16 | minAvailable: 2 17 | -------------------------------------------------------------------------------- /repository/redis/operator/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | namespace: {{ .Namespace }} 5 | name: {{ .Name }}-{{ .OperatorName }}-svc 6 | labels: 7 | app: redis 8 | redis: {{ .OperatorName }} 9 | instance: {{ .Name }} 10 | spec: 11 | type: ClusterIP 12 | ports: 13 | - port: {{ .Params.client_port }} 14 | targetPort: {{ .Params.client_port }} 15 | name: client 16 | - port: {{ .Params.gossip_port }} 17 | targetPort: {{ .Params.gossip_port }} 18 | name: gossip 19 | selector: 20 | app: redis 21 | redis: {{ .OperatorName }} 22 | instance: {{ .Name }} 23 | -------------------------------------------------------------------------------- /repository/redis/operator/templates/statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | namespace: {{ .Namespace }} 5 | name: {{ .Name }}-{{ .OperatorName }} 6 | labels: 7 | app: redis 8 | redis: {{ .OperatorName }} 9 | instance: {{ .Name }} 10 | spec: 11 | serviceName: {{ .Name }}-{{ .OperatorName }}-svc 12 | replicas: {{ .Params.masters | mul 2 }} 13 | selector: 14 | matchLabels: 15 | app: redis 16 | redis: {{ .OperatorName }} 17 | instance: {{ .Name }} 18 | template: 19 | metadata: 20 | labels: 21 | app: redis 22 | redis: {{ .OperatorName }} 23 | instance: {{ .Name }} 24 | spec: 25 | containers: 26 | - name: redis 27 | image: redis:5.0.1-alpine 28 | ports: 29 | - containerPort: {{ .Params.client_port }} 30 | name: client 31 | - containerPort: {{ .Params.gossip_port }} 32 | name: gossip 33 | command: ["/conf/update-node.sh", "redis-server", "/conf/redis.conf"] 34 | env: 35 | - name: POD_IP 36 | valueFrom: 37 | fieldRef: 38 | fieldPath: status.podIP 39 | volumeMounts: 40 | - name: conf 41 | mountPath: /conf 42 | readOnly: false 43 | - name: data 44 | mountPath: /data 45 | readOnly: false 46 | volumes: 47 | - name: conf 48 | configMap: 49 | name: {{ .Name }}-{{ .OperatorName }} 50 | defaultMode: 0755 51 | volumeClaimTemplates: 52 | - metadata: 53 | name: data 54 | spec: 55 | accessModes: [ "ReadWriteOnce" ] 56 | resources: 57 | requests: 58 | storage: 1Gi 59 | -------------------------------------------------------------------------------- /repository/spark/README.md: -------------------------------------------------------------------------------- 1 | # Spark Operator 2 | 3 | The KUDO Spark Operator creates, configures, and manages instances of [Spark Operator](https://github.com/mesosphere/spark-on-k8s-operator) running on Kubernetes. 4 | 5 | ## Getting started 6 | The latest stable version of the operator is `3.0.0-1.1.0`. 7 | The documentation is available in [docs/3.0.0-1.1.0](./docs/3.0.0-1.1.0) folder. 8 | 9 | ## Version Chart 10 | 11 | | KUDO Spark version | Apache Spark version | Operator version | Minimum KUDO Version | Status | 12 | | ------------------ | -------------------- | ----------------------- | -------------------- | ------ | 13 | | latest | 3.0.0 (Hadoop 2.9.2) | v1beta2-1.2.2 | 0.15.0 | dev | 14 | | **3.0.0-1.1.0** | **3.0.0 (Hadoop 2.9.2)** | **v1beta2-1.2.2** | **0.15.0** | GA | 15 | | 2.4.5-1.0.1 | 2.4.5 (Hadoop 2.9.2) | v1beta2-1.1.1 | 0.13.0 | GA | 16 | | 2.4.5-1.0.0 | 2.4.5 (Hadoop 2.9.2) | v1beta2-1.1.0 | 0.10.1 | GA | 17 | | 2.4.4-0.2.0 | 2.4.4 (Hadoop 2.9.2) | v1beta2-1.0.1 | 0.10.1 | beta | 18 | | beta1 | 2.4.3 (Hadoop 2.9.2) | v1beta2-1.0.1 | 0.8.0 | beta | 19 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/README.md: -------------------------------------------------------------------------------- 1 | KUDO Spark 2.4.4-0.2.0 2 | --- 3 | 4 | * [Installation](installation.md) 5 | * [Configuration](configuration.md) 6 | * [Submitting Spark applications](submission.md) 7 | * [Monitoring](monitoring.md) 8 | * [Spark History Server](history-server.md) 9 | * [Advanced configuration options](advanced-configuration.md) 10 | * [Release notes](release-notes.md) 11 | * [Limitations](limitations.md) 12 | * [Versions](versions.md) 13 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/limitations.md: -------------------------------------------------------------------------------- 1 | Limitations 2 | --- 3 | 4 | ## Multi-instance installation 5 | * Currently, multi-instance (multi-tenant) operator installation supports only a single instance per namespace to 6 | allow Spark applications be launched in the namespace they've been submitted to. Multiple operator instances 7 | installed in the same namespace run job submissions in parallel which can potentially lead to race conditions 8 | and inconsistent application state. 9 | * Operator instances must have unique names to avoid clashes when `createRBAC` property is set to `true`. 10 | KUDO Controller will reject new instance installation because it will try to create a `ClusterRole` with the same name. 11 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/release-notes.md: -------------------------------------------------------------------------------- 1 | Release Notes 2 | --- 3 | 4 | ## 2.4.4-0.2.0 (latest) 5 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.4 with Hadoop 2.9.2 support 6 | * Added Python and R support to Spark Operator image 7 | * Added support for automatic installation of monitoring resources 8 | * Added configuration parameters and documentation for HA installation 9 | * Added documentation describing integration with Volcano batch scheduler 10 | 11 | ## beta1 12 | * Spark Operator Docker image based on [mesosphere/spark](https://github.com/mesosphere/spark/) 2.4.3 with Hadoop 2.9.2 support 13 | * Spark Operator based on version `v1beta2-1.0.1` 14 | * Added Spark History Server support 15 | * Added `ServiceMonitors` for integration with the Prometheus Operator 16 | * Prometheus Java agent updated to version `0.11.0` 17 | * Kubernetes Java client library updated to version `4.4.2` 18 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/resources/img/ha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.4-0.2.0/resources/img/ha.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/resources/img/spark-ui-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.4-0.2.0/resources/img/spark-ui-1.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/resources/img/spark-ui-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.4-0.2.0/resources/img/spark-ui-2.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/resources/monitoring/spark-application-with-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: mock-task-runner 5 | spec: 6 | type: Scala 7 | mode: cluster 8 | image: mesosphere/spark:spark-2.4.4-hadoop-2.9-k8s 9 | imagePullPolicy: Always 10 | mainClass: MockTaskRunner 11 | mainApplicationFile: "https://infinity-artifacts.s3.amazonaws.com/scale-tests/dcos-spark-scala-tests-assembly-2.4.0-20190325.jar" 12 | arguments: 13 | - "1" 14 | - "120" 15 | sparkConf: 16 | "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s" 17 | "spark.scheduler.minRegisteredResourcesRatio": "1.0" 18 | sparkVersion: "2.4.4" 19 | restartPolicy: 20 | type: Never 21 | driver: 22 | cores: 1 23 | memory: "512m" 24 | labels: 25 | version: 2.4.3 26 | metrics-exposed: "true" 27 | serviceAccount: spark-driver 28 | executor: 29 | cores: 1 30 | instances: 1 31 | memory: "512m" 32 | labels: 33 | version: 2.4.4 34 | metrics-exposed: "true" 35 | monitoring: 36 | exposeDriverMetrics: true 37 | exposeExecutorMetrics: true 38 | prometheus: 39 | jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" 40 | port: 8090 41 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/resources/spark-pi-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: spark-pi-loadbalancer 6 | name: spark-pi-loadbalancer 7 | spec: 8 | type: LoadBalancer 9 | selector: 10 | spark-role: driver 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 4041 15 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/resources/spark-pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: spark-pi 5 | namespace: spark 6 | spec: 7 | type: Scala 8 | mode: cluster 9 | image: "mesosphere/spark:spark-2.4.4-hadoop-2.9-k8s" 10 | imagePullPolicy: Always 11 | mainClass: org.apache.spark.examples.SparkPi 12 | mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.4.jar" 13 | arguments: 14 | - "150000" 15 | sparkConf: 16 | "spark.ui.port": "4041" 17 | sparkVersion: "2.4.3" 18 | restartPolicy: 19 | type: Never 20 | driver: 21 | cores: 1 22 | memory: "512m" 23 | labels: 24 | version: 2.4.4 25 | serviceAccount: spark-driver 26 | executor: 27 | cores: 1 28 | instances: 4 29 | memory: "512m" 30 | labels: 31 | version: 2.4.4 32 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.4-0.2.0/versions.md: -------------------------------------------------------------------------------- 1 | Versions 2 | --- 3 | 4 | ## Component Versions 5 | * Apache Spark 2.4.4 built with Hadoop 2.9.2 and Scala 2.11 6 | * Spark Operator v1beta2-1.0.1 7 | * OpenJDK 8 8 | * Prometheus Java agent 0.11.0 9 | 10 | ## KUDO Version Requirement 11 | * KUDO version 0.10.1 or later 12 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/README.md: -------------------------------------------------------------------------------- 1 | # KUDO Spark 2.4.5-1.0.0 (GA) 2 | 3 | * [Installation](installation.md) 4 | * [Configuration](configuration.md) 5 | * [Submitting Spark applications](submission.md) 6 | * [Monitoring](monitoring.md) 7 | * [Spark History Server](history-server.md) 8 | * [Security](security.md) 9 | * [Kerberos](kerberos.md) 10 | * [Advanced configuration options](advanced-configuration.md) 11 | * [Release notes](release-notes.md) 12 | * [Limitations](limitations.md) 13 | * [Versions](versions.md) 14 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/limitations.md: -------------------------------------------------------------------------------- 1 | # Limitations 2 | 3 | ## Multi-instance installation 4 | * Currently, multi-instance (multi-tenant) operator installation supports only a single instance per namespace to 5 | allow Spark applications be launched in the namespace they've been submitted to. Multiple operator instances 6 | installed in the same namespace run job submissions in parallel which can potentially lead to race conditions 7 | and inconsistent application state. 8 | * Operator instances must have unique names to avoid clashes when `createRBAC` property is set to `true`. 9 | KUDO Controller will reject new instance installation because it will try to create a `ClusterRole` with the same name. 10 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/release-notes.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | ## 2.4.5-1.0.0 (latest) 4 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.5 with Hadoop 2.9.2 support 5 | * Spark Operator based on version `v1beta2-1.1.0` 6 | * Security features: RPC Auth with Encryption, TLS support, Kerberos 7 | * Additional features for Spark and Spark History Server integration with popular data stores, such as Amazon S3 and HDFS 8 | 9 | ## 2.4.4-0.2.0 10 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.4 with Hadoop 2.9.2 support 11 | * Added Python and R support to Spark Operator image 12 | * Added support for automatic installation of monitoring resources 13 | * Added configuration parameters and documentation for HA installation 14 | * Added documentation describing integration with Volcano batch scheduler 15 | 16 | ## beta1 17 | * Spark Operator Docker image based on [mesosphere/spark](https://github.com/mesosphere/spark/) 2.4.3 with Hadoop 2.9.2 support 18 | * Spark Operator based on version `v1beta2-1.0.1` 19 | * Added Spark History Server support 20 | * Added `ServiceMonitors` for integration with the Prometheus Operator 21 | * Prometheus Java agent updated to version `0.11.0` 22 | * Kubernetes Java client library updated to version `4.4.2` 23 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/resources/img/ha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.5-1.0.0/resources/img/ha.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/resources/img/spark-ui-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.5-1.0.0/resources/img/spark-ui-1.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/resources/img/spark-ui-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.5-1.0.0/resources/img/spark-ui-2.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/resources/monitoring/spark-application-with-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: mock-task-runner 5 | spec: 6 | type: Scala 7 | mode: cluster 8 | image: mesosphere/spark:spark-2.4.5-hadoop-2.9-k8s 9 | imagePullPolicy: Always 10 | mainClass: MockTaskRunner 11 | mainApplicationFile: "https://infinity-artifacts.s3.amazonaws.com/scale-tests/dcos-spark-scala-tests-assembly-2.4.0-20190325.jar" 12 | arguments: 13 | - "1" 14 | - "120" 15 | sparkConf: 16 | "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s" 17 | "spark.scheduler.minRegisteredResourcesRatio": "1.0" 18 | sparkVersion: "2.4.5" 19 | restartPolicy: 20 | type: Never 21 | driver: 22 | cores: 1 23 | memory: "512m" 24 | labels: 25 | version: 2.4.3 26 | metrics-exposed: "true" 27 | serviceAccount: spark-driver 28 | executor: 29 | cores: 1 30 | instances: 1 31 | memory: "512m" 32 | labels: 33 | version: 2.4.5 34 | metrics-exposed: "true" 35 | monitoring: 36 | exposeDriverMetrics: true 37 | exposeExecutorMetrics: true 38 | prometheus: 39 | jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" 40 | port: 8090 41 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/resources/spark-pi-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: spark-pi-loadbalancer 6 | name: spark-pi-loadbalancer 7 | spec: 8 | type: LoadBalancer 9 | selector: 10 | spark-role: driver 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 4041 15 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/resources/spark-pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: spark-pi 5 | namespace: spark 6 | spec: 7 | type: Scala 8 | mode: cluster 9 | image: "mesosphere/spark:spark-2.4.5-hadoop-2.9-k8s" 10 | imagePullPolicy: Always 11 | mainClass: org.apache.spark.examples.SparkPi 12 | mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.5.jar" 13 | arguments: 14 | - "150000" 15 | sparkConf: 16 | "spark.ui.port": "4041" 17 | sparkVersion: "2.4.3" 18 | restartPolicy: 19 | type: Never 20 | driver: 21 | cores: 1 22 | memory: "512m" 23 | labels: 24 | version: 2.4.5 25 | serviceAccount: spark-driver 26 | executor: 27 | cores: 1 28 | instances: 4 29 | memory: "512m" 30 | labels: 31 | version: 2.4.5 32 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.0/versions.md: -------------------------------------------------------------------------------- 1 | # Versions 2 | 3 | ## Component Versions 4 | * Apache Spark 2.4.5 built with Hadoop 2.9.2 and Scala 2.11 5 | * Spark Operator v1beta2-1.1.0 6 | * OpenJDK 8 7 | * Prometheus Java agent 0.11.0 8 | 9 | ## KUDO Version Requirement 10 | * KUDO version 0.10.1 or later 11 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/README.md: -------------------------------------------------------------------------------- 1 | # KUDO Spark latest 2 | 3 | * [Installation](installation.md) 4 | * [Configuration](configuration.md) 5 | * [Submitting Spark applications](submission.md) 6 | * [Monitoring](monitoring.md) 7 | * [Spark History Server](history-server.md) 8 | * [Security](security.md) 9 | * [Kerberos](kerberos.md) 10 | * [Advanced configuration options](advanced-configuration.md) 11 | * [Release notes](release-notes.md) 12 | * [Limitations](limitations.md) 13 | * [Versions](versions.md) 14 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/limitations.md: -------------------------------------------------------------------------------- 1 | # Limitations 2 | 3 | ## Multi-instance installation 4 | * Currently, multi-instance (multi-tenant) operator installation supports only a single instance per namespace to 5 | allow Spark applications be launched in the namespace they've been submitted to. Multiple operator instances 6 | installed in the same namespace run job submissions in parallel which can potentially lead to race conditions 7 | and inconsistent application state. 8 | * Operator instances must have unique names to avoid clashes when `createRBAC` property is set to `true`. 9 | KUDO Controller will reject new instance installation because it will try to create a `ClusterRole` with the same name. 10 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/release-notes.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | ## 2.4.5-1.0.1 (latest) 4 | * Fix for `sparkJobNamespace` parameter, which allows the operator to manage Spark jobs only from selected namespace 5 | * Spark Operator based on version `v1beta2-1.1.1` 6 | * KUDO version updated to `0.13.0` 7 | 8 | ## 2.4.5-1.0.0 9 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.5 with Hadoop 2.9.2 support 10 | * Spark Operator based on version `v1beta2-1.1.0` 11 | * Security features: RPC Auth with Encryption, TLS support, Kerberos 12 | * Additional features for Spark and Spark History Server integration with popular data stores, such as Amazon S3 and HDFS 13 | 14 | ## 2.4.4-0.2.0 15 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.4 with Hadoop 2.9.2 support 16 | * Added Python and R support to Spark Operator image 17 | * Added support for automatic installation of monitoring resources 18 | * Added configuration parameters and documentation for HA installation 19 | * Added documentation describing integration with Volcano batch scheduler 20 | 21 | ## beta1 22 | * Spark Operator Docker image based on [mesosphere/spark](https://github.com/mesosphere/spark/) 2.4.3 with Hadoop 2.9.2 support 23 | * Spark Operator based on version `v1beta2-1.0.1` 24 | * Added Spark History Server support 25 | * Added `ServiceMonitors` for integration with the Prometheus Operator 26 | * Prometheus Java agent updated to version `0.11.0` 27 | * Kubernetes Java client library updated to version `4.4.2` 28 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/resources/img/ha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.5-1.0.1/resources/img/ha.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/resources/img/spark-ui-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.5-1.0.1/resources/img/spark-ui-1.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/resources/img/spark-ui-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/2.4.5-1.0.1/resources/img/spark-ui-2.png -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/resources/monitoring/spark-application-with-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: mock-task-runner 5 | spec: 6 | type: Scala 7 | mode: cluster 8 | image: mesosphere/spark:spark-2.4.5-hadoop-2.9-k8s 9 | imagePullPolicy: Always 10 | mainClass: MockTaskRunner 11 | mainApplicationFile: "https://infinity-artifacts.s3.amazonaws.com/scale-tests/dcos-spark-scala-tests-assembly-2.4.0-20190325.jar" 12 | arguments: 13 | - "1" 14 | - "120" 15 | sparkConf: 16 | "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s" 17 | "spark.scheduler.minRegisteredResourcesRatio": "1.0" 18 | sparkVersion: "2.4.5" 19 | restartPolicy: 20 | type: Never 21 | driver: 22 | cores: 1 23 | memory: "512m" 24 | labels: 25 | version: 2.4.3 26 | metrics-exposed: "true" 27 | serviceAccount: spark-driver 28 | executor: 29 | cores: 1 30 | instances: 1 31 | memory: "512m" 32 | labels: 33 | version: 2.4.5 34 | metrics-exposed: "true" 35 | monitoring: 36 | exposeDriverMetrics: true 37 | exposeExecutorMetrics: true 38 | prometheus: 39 | jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" 40 | port: 8090 41 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/resources/spark-pi-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: spark-pi-loadbalancer 6 | name: spark-pi-loadbalancer 7 | spec: 8 | type: LoadBalancer 9 | selector: 10 | spark-role: driver 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 4041 15 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/resources/spark-pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: spark-pi 5 | namespace: spark 6 | spec: 7 | type: Scala 8 | mode: cluster 9 | image: "mesosphere/spark:spark-2.4.5-hadoop-2.9-k8s" 10 | imagePullPolicy: Always 11 | mainClass: org.apache.spark.examples.SparkPi 12 | mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.5.jar" 13 | arguments: 14 | - "150000" 15 | sparkConf: 16 | "spark.ui.port": "4041" 17 | sparkVersion: "2.4.3" 18 | restartPolicy: 19 | type: Never 20 | driver: 21 | cores: 1 22 | memory: "512m" 23 | labels: 24 | version: 2.4.5 25 | serviceAccount: spark-driver 26 | executor: 27 | cores: 1 28 | instances: 4 29 | memory: "512m" 30 | labels: 31 | version: 2.4.5 32 | -------------------------------------------------------------------------------- /repository/spark/docs/2.4.5-1.0.1/versions.md: -------------------------------------------------------------------------------- 1 | # Versions 2 | 3 | ## Component Versions 4 | * Apache Spark 2.4.5 built with Hadoop 2.9.2 and Scala 2.11 5 | * Spark Operator v1beta2-1.1.1 6 | * OpenJDK 8 7 | * Prometheus Java agent 0.11.0 8 | 9 | ## KUDO Version Requirement 10 | * KUDO version 0.13.0 or later 11 | -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/README.md: -------------------------------------------------------------------------------- 1 | # KUDO Spark latest 2 | 3 | * [Installation](installation.md) 4 | * [Configuration](configuration.md) 5 | * [Submitting Spark applications](submission.md) 6 | * [Monitoring](monitoring.md) 7 | * [Spark History Server](history-server.md) 8 | * [Security](security.md) 9 | * [Kerberos](kerberos.md) 10 | * [Advanced configuration options](advanced-configuration.md) 11 | * [Release notes](release-notes.md) 12 | * [Limitations](limitations.md) 13 | * [Versions](versions.md) 14 | -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/limitations.md: -------------------------------------------------------------------------------- 1 | # Limitations 2 | 3 | ## Multi-instance installation 4 | * Currently, multi-instance (multi-tenant) operator installation supports only a single instance per namespace to allow Spark applications be launched in the namespace they've been submitted to. Multiple operator instances installed in the same namespace run job submissions in parallel which can potentially lead to race conditions and inconsistent application state. 5 | * Operator instances must have unique names to avoid clashes when `createRBAC` property is set to `true`. KUDO Controller will reject new instance installation because it will try to create a `ClusterRole` with the same name. 6 | -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/release-notes.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | ## 3.0.0-1.1.0 (latest) 4 | * Upgraded to Spark 3.0.0 with Scala 2.12 and Hadoop 2.9.2 support 5 | * Spark Operator based on version `v1beta2-1.2.2` 6 | * KUDO version updated to `0.15.0` 7 | 8 | ## 2.4.5-1.0.1 9 | * Changed `sparkJobNamespace` parameter propagation logic, making the operator manage Spark jobs across all namespaces by default 10 | * Spark Operator based on version `v1beta2-1.1.1` 11 | * KUDO version updated to `0.13.0` 12 | 13 | ## 2.4.5-1.0.0 14 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.5 with Hadoop 2.9.2 support 15 | * Spark Operator based on version `v1beta2-1.1.0` 16 | * Security features: RPC Auth with Encryption, TLS support, Kerberos 17 | * Additional features for Spark and Spark History Server integration with popular data stores, such as Amazon S3 and HDFS 18 | 19 | ## 2.4.4-0.2.0 20 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.4 with Hadoop 2.9.2 support 21 | * Added Python and R support to Spark Operator image 22 | * Added support for automatic installation of monitoring resources 23 | * Added configuration parameters and documentation for HA installation 24 | * Added documentation describing integration with Volcano batch scheduler 25 | 26 | ## beta1 27 | * Spark Operator Docker image based on [mesosphere/spark](https://github.com/mesosphere/spark/) 2.4.3 with Hadoop 2.9.2 support 28 | * Spark Operator based on version `v1beta2-1.0.1` 29 | * Added Spark History Server support 30 | * Added `ServiceMonitors` for integration with the Prometheus Operator 31 | * Prometheus Java agent updated to version `0.11.0` 32 | * Kubernetes Java client library updated to version `4.4.2` 33 | -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/resources/img/ha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/3.0.0-1.1.0/resources/img/ha.png -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/resources/img/spark-ui-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/3.0.0-1.1.0/resources/img/spark-ui-1.png -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/resources/img/spark-ui-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/3.0.0-1.1.0/resources/img/spark-ui-2.png -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/resources/monitoring/spark-application-with-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: mock-task-runner 5 | spec: 6 | type: Scala 7 | mode: cluster 8 | image: mesosphere/spark:spark-3.0.0-hadoop-2.9-k8s 9 | imagePullPolicy: Always 10 | mainClass: MockTaskRunner 11 | mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar" 12 | arguments: 13 | - "1" 14 | - "120" 15 | sparkConf: 16 | "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s" 17 | "spark.scheduler.minRegisteredResourcesRatio": "1.0" 18 | sparkVersion: "3.0.0" 19 | restartPolicy: 20 | type: Never 21 | driver: 22 | cores: 1 23 | memory: "512m" 24 | labels: 25 | version: 3.0.0 26 | metrics-exposed: "true" 27 | serviceAccount: spark-driver 28 | executor: 29 | cores: 1 30 | instances: 1 31 | memory: "512m" 32 | labels: 33 | version: 3.0.0 34 | metrics-exposed: "true" 35 | monitoring: 36 | exposeDriverMetrics: true 37 | exposeExecutorMetrics: true 38 | prometheus: 39 | jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" 40 | port: 8090 41 | -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/resources/spark-pi-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: spark-pi-loadbalancer 6 | name: spark-pi-loadbalancer 7 | spec: 8 | type: LoadBalancer 9 | selector: 10 | spark-role: driver 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 4041 15 | -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/resources/spark-pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: spark-pi 5 | namespace: spark 6 | spec: 7 | type: Scala 8 | mode: cluster 9 | image: "mesosphere/spark:spark-3.0.0-hadoop-2.9-k8s" 10 | imagePullPolicy: Always 11 | mainClass: org.apache.spark.examples.SparkPi 12 | mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar" 13 | arguments: 14 | - "150000" 15 | sparkConf: 16 | "spark.ui.port": "4041" 17 | sparkVersion: "3.0.0" 18 | restartPolicy: 19 | type: Never 20 | driver: 21 | cores: 1 22 | memory: "512m" 23 | labels: 24 | version: 3.0.0 25 | serviceAccount: spark-driver 26 | executor: 27 | cores: 1 28 | instances: 4 29 | memory: "512m" 30 | labels: 31 | version: 3.0.0 32 | -------------------------------------------------------------------------------- /repository/spark/docs/3.0.0-1.1.0/versions.md: -------------------------------------------------------------------------------- 1 | # Versions 2 | 3 | ## Component Versions 4 | * Apache Spark 3.0.0 built with Hadoop 2.9.2 and Scala 2.12 5 | * Spark Operator v1beta2-1.2.2 6 | * OpenJDK 8 7 | * Prometheus Java agent 0.11.0 8 | 9 | ## KUDO Version Requirement 10 | * KUDO version 0.15.0 or later 11 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/README.md: -------------------------------------------------------------------------------- 1 | KUDO Spark beta1 2 | --- 3 | 4 | * [Installation](installation.md) 5 | * [Configuration](configuration.md) 6 | * [Submitting Spark applications](submission.md) 7 | * [Monitoring](monitoring.md) 8 | * [Spark History Server](history-server.md) 9 | * [Release notes](release-notes.md) 10 | * [Limitations](limitations.md) 11 | * [Versions](versions.md) 12 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/limitations.md: -------------------------------------------------------------------------------- 1 | Limitations 2 | --- 3 | 4 | ## Multi-instance installation 5 | * Currently, multi-instance (multi-tenant) operator installation supports only a single instance per namespace to 6 | allow Spark applications be launched in the namespace they've been submitted to. Multiple operator instances 7 | installed in the same namespace run job submissions in parallel which can potentially lead to race conditions 8 | and inconsistent application state. 9 | * Operator instances must have unique names to avoid clashes when `createRBAC` property is set to `true`. 10 | KUDO Controller will reject new instance installation because it will try to create a `ClusterRole` with the same name. 11 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/release-notes.md: -------------------------------------------------------------------------------- 1 | Release Notes 2 | --- 3 | 4 | ## beta1 5 | * Spark Operator Docker image based on [mesosphere/spark](https://github.com/mesosphere/spark/) 2.4.3 with Hadoop 2.9.2 support 6 | * Spark Operator based on version `v1beta2-1.0.1` 7 | * Added Spark History Server support 8 | * Added `ServiceMonitors` for integration with the Prometheus Operator 9 | * Prometheus Java agent updated to version `0.11.0` 10 | * Kubernetes Java client library updated to version `4.4.2` 11 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/img/spark-ui-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/beta1/resources/img/spark-ui-1.png -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/img/spark-ui-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/beta1/resources/img/spark-ui-2.png -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/monitoring/spark-application-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: spark-application-metrics 5 | labels: 6 | "spark/servicemonitor": "true" 7 | spec: 8 | ports: 9 | - port: 8090 10 | name: metrics 11 | clusterIP: None 12 | selector: 13 | "metrics-exposed": "true" 14 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/monitoring/spark-application-with-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: mock-task-runner 5 | spec: 6 | type: Scala 7 | mode: cluster 8 | image: mesosphere/spark:spark-2.4.3-hadoop-2.9-k8s 9 | imagePullPolicy: Always 10 | mainClass: MockTaskRunner 11 | mainApplicationFile: "https://infinity-artifacts.s3.amazonaws.com/scale-tests/dcos-spark-scala-tests-assembly-2.4.0-20190325.jar" 12 | arguments: 13 | - "1" 14 | - "120" 15 | sparkConf: 16 | "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s" 17 | "spark.scheduler.minRegisteredResourcesRatio": "1.0" 18 | sparkVersion: "2.4.3" 19 | restartPolicy: 20 | type: Never 21 | driver: 22 | cores: 1 23 | memory: "512m" 24 | labels: 25 | version: 2.4.3 26 | metrics-exposed: "true" 27 | serviceAccount: spark-driver 28 | executor: 29 | cores: 1 30 | instances: 1 31 | memory: "512m" 32 | labels: 33 | version: 2.4.3 34 | metrics-exposed: "true" 35 | monitoring: 36 | exposeDriverMetrics: true 37 | exposeExecutorMetrics: true 38 | prometheus: 39 | jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" 40 | port: 8090 41 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/monitoring/spark-operator-metrics-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: spark-operator-metrics 5 | labels: 6 | "spark/servicemonitor": "true" 7 | spec: 8 | ports: 9 | - port: 10254 10 | name: metrics 11 | clusterIP: None 12 | selector: 13 | "app.kubernetes.io/name": spark 14 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/monitoring/spark-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | labels: 5 | app: prometheus-operator 6 | release: prometheus-kubeaddons 7 | name: spark-cluster-monitor 8 | spec: 9 | endpoints: 10 | - interval: 5s 11 | port: metrics 12 | selector: 13 | matchLabels: 14 | spark/servicemonitor: "true" 15 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/spark-pi-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: spark-pi-loadbalancer 6 | name: spark-pi-loadbalancer 7 | spec: 8 | type: LoadBalancer 9 | selector: 10 | spark-role: driver 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 4041 15 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/resources/spark-pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: spark-pi 5 | namespace: spark 6 | spec: 7 | type: Scala 8 | mode: cluster 9 | image: "mesosphere/spark:spark-2.4.3-hadoop-2.9-k8s" 10 | imagePullPolicy: Always 11 | mainClass: org.apache.spark.examples.SparkPi 12 | mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.11-2.4.3.jar" 13 | arguments: 14 | - "150000" 15 | sparkConf: 16 | "spark.ui.port": "4041" 17 | sparkVersion: "2.4.3" 18 | restartPolicy: 19 | type: Never 20 | driver: 21 | cores: 1 22 | memory: "512m" 23 | labels: 24 | version: 2.4.3 25 | serviceAccount: spark-driver 26 | executor: 27 | cores: 1 28 | instances: 4 29 | memory: "512m" 30 | labels: 31 | version: 2.4.3 32 | -------------------------------------------------------------------------------- /repository/spark/docs/beta1/versions.md: -------------------------------------------------------------------------------- 1 | Versions 2 | --- 3 | 4 | ## Component Versions 5 | * Apache Spark 2.4.3 built with Hadoop 2.9.2 and Scala 2.11 6 | * Spark Operator v1beta2-1.0.1 7 | * OpenJDK 8 8 | * Prometheus Java agent 0.11.0 9 | 10 | ## KUDO Version Requirement 11 | * KUDO version 0.8.0 or later 12 | -------------------------------------------------------------------------------- /repository/spark/docs/latest/README.md: -------------------------------------------------------------------------------- 1 | # KUDO Spark latest 2 | 3 | * [Installation](installation.md) 4 | * [Configuration](configuration.md) 5 | * [Submitting Spark applications](submission.md) 6 | * [Monitoring](monitoring.md) 7 | * [Spark History Server](history-server.md) 8 | * [Security](security.md) 9 | * [Kerberos](kerberos.md) 10 | * [Advanced configuration options](advanced-configuration.md) 11 | * [Release notes](release-notes.md) 12 | * [Limitations](limitations.md) 13 | * [Versions](versions.md) 14 | -------------------------------------------------------------------------------- /repository/spark/docs/latest/limitations.md: -------------------------------------------------------------------------------- 1 | # Limitations 2 | 3 | ## Multi-instance installation 4 | * Currently, multi-instance (multi-tenant) operator installation supports only a single instance per namespace to allow Spark applications be launched in the namespace they've been submitted to. Multiple operator instances installed in the same namespace run job submissions in parallel which can potentially lead to race conditions and inconsistent application state. 5 | * Operator instances must have unique names to avoid clashes when `createRBAC` property is set to `true`. KUDO Controller will reject new instance installation because it will try to create a `ClusterRole` with the same name. 6 | -------------------------------------------------------------------------------- /repository/spark/docs/latest/release-notes.md: -------------------------------------------------------------------------------- 1 | # Release Notes 2 | 3 | ## 3.0.0-1.1.0 (latest) 4 | * Upgraded to Spark 3.0.0 with Scala 2.12 and Hadoop 2.9.2 support 5 | * Spark Operator based on version `v1beta2-1.2.2` 6 | * KUDO version updated to `0.15.0` 7 | 8 | ## 2.4.5-1.0.1 9 | * Changed `sparkJobNamespace` parameter propagation logic, making the operator manage Spark jobs across all namespaces by default 10 | * Spark Operator based on version `v1beta2-1.1.1` 11 | * KUDO version updated to `0.13.0` 12 | 13 | ## 2.4.5-1.0.0 14 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.5 with Hadoop 2.9.2 support 15 | * Spark Operator based on version `v1beta2-1.1.0` 16 | * Security features: RPC Auth with Encryption, TLS support, Kerberos 17 | * Additional features for Spark and Spark History Server integration with popular data stores, such as Amazon S3 and HDFS 18 | 19 | ## 2.4.4-0.2.0 20 | * Spark Operator Docker image based on [apache/spark](https://github.com/apache/spark/) 2.4.4 with Hadoop 2.9.2 support 21 | * Added Python and R support to Spark Operator image 22 | * Added support for automatic installation of monitoring resources 23 | * Added configuration parameters and documentation for HA installation 24 | * Added documentation describing integration with Volcano batch scheduler 25 | 26 | ## beta1 27 | * Spark Operator Docker image based on [mesosphere/spark](https://github.com/mesosphere/spark/) 2.4.3 with Hadoop 2.9.2 support 28 | * Spark Operator based on version `v1beta2-1.0.1` 29 | * Added Spark History Server support 30 | * Added `ServiceMonitors` for integration with the Prometheus Operator 31 | * Prometheus Java agent updated to version `0.11.0` 32 | * Kubernetes Java client library updated to version `4.4.2` 33 | -------------------------------------------------------------------------------- /repository/spark/docs/latest/resources/img/ha.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/latest/resources/img/ha.png -------------------------------------------------------------------------------- /repository/spark/docs/latest/resources/img/spark-ui-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/latest/resources/img/spark-ui-1.png -------------------------------------------------------------------------------- /repository/spark/docs/latest/resources/img/spark-ui-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kudobuilder/operators/6f7fbecaf6a79e22328149062ea4d3e6dd4082b9/repository/spark/docs/latest/resources/img/spark-ui-2.png -------------------------------------------------------------------------------- /repository/spark/docs/latest/resources/monitoring/spark-application-with-metrics.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: mock-task-runner 5 | spec: 6 | type: Scala 7 | mode: cluster 8 | image: mesosphere/spark:spark-3.0.0-hadoop-2.9-k8s 9 | imagePullPolicy: Always 10 | mainClass: MockTaskRunner 11 | mainApplicationFile: "https://kudo-spark.s3-us-west-2.amazonaws.com/spark-scala-tests-3.0.0-20200819.jar" 12 | arguments: 13 | - "1" 14 | - "120" 15 | sparkConf: 16 | "spark.scheduler.maxRegisteredResourcesWaitingTime": "2400s" 17 | "spark.scheduler.minRegisteredResourcesRatio": "1.0" 18 | sparkVersion: "3.0.0" 19 | restartPolicy: 20 | type: Never 21 | driver: 22 | cores: 1 23 | memory: "512m" 24 | labels: 25 | version: 3.0.0 26 | metrics-exposed: "true" 27 | serviceAccount: spark-driver 28 | executor: 29 | cores: 1 30 | instances: 1 31 | memory: "512m" 32 | labels: 33 | version: 3.0.0 34 | metrics-exposed: "true" 35 | monitoring: 36 | exposeDriverMetrics: true 37 | exposeExecutorMetrics: true 38 | prometheus: 39 | jmxExporterJar: "/prometheus/jmx_prometheus_javaagent-0.11.0.jar" 40 | port: 8090 41 | -------------------------------------------------------------------------------- /repository/spark/docs/latest/resources/spark-pi-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: spark-pi-loadbalancer 6 | name: spark-pi-loadbalancer 7 | spec: 8 | type: LoadBalancer 9 | selector: 10 | spark-role: driver 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 4041 15 | -------------------------------------------------------------------------------- /repository/spark/docs/latest/resources/spark-pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: spark-pi 5 | namespace: spark 6 | spec: 7 | type: Scala 8 | mode: cluster 9 | image: "mesosphere/spark:spark-3.0.0-hadoop-2.9-k8s" 10 | imagePullPolicy: Always 11 | mainClass: org.apache.spark.examples.SparkPi 12 | mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar" 13 | arguments: 14 | - "150000" 15 | sparkConf: 16 | "spark.ui.port": "4041" 17 | sparkVersion: "3.0.0" 18 | restartPolicy: 19 | type: Never 20 | driver: 21 | cores: 1 22 | memory: "512m" 23 | labels: 24 | version: 3.0.0 25 | serviceAccount: spark-driver 26 | executor: 27 | cores: 1 28 | instances: 4 29 | memory: "512m" 30 | labels: 31 | version: 3.0.0 32 | -------------------------------------------------------------------------------- /repository/spark/docs/latest/versions.md: -------------------------------------------------------------------------------- 1 | # Versions 2 | 3 | ## Component Versions 4 | * Apache Spark 3.0.0 built with Hadoop 2.9.2 and Scala 2.12 5 | * Spark Operator v1beta2-1.2.2 6 | * OpenJDK 8 7 | * Prometheus Java agent 0.11.0 8 | 9 | ## KUDO Version Requirement 10 | * KUDO version 0.15.0 or later 11 | -------------------------------------------------------------------------------- /repository/spark/operator/templates/spark-history-server-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Params.enableHistoryServer "true" }} 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: {{ .Name }}-history-server 6 | namespace: {{ .Namespace }} 7 | labels: 8 | app.kubernetes.io/name: {{ .OperatorName }}-history-server 9 | app.kubernetes.io/instance: {{ .Name }} 10 | app.kubernetes.io/version: {{ .Params.operatorVersion }} 11 | spec: 12 | ports: 13 | - port: 18080 14 | name: history-server 15 | selector: 16 | app.kubernetes.io/name: {{ .OperatorName }}-history-server 17 | app.kubernetes.io/instance: {{ .Name }} 18 | app.kubernetes.io/version: {{ .Params.operatorVersion }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /repository/spark/operator/templates/spark-monitoring.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Params.enableMetrics "true" }} 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: spark-operator-metrics 6 | namespace: {{ .Namespace }} 7 | labels: 8 | spark/servicemonitor: "true" 9 | spec: 10 | ports: 11 | - port: {{ .Params.operatorMetricsPort }} 12 | name: metrics 13 | clusterIP: None 14 | selector: 15 | app.kubernetes.io/name: spark 16 | --- 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: spark-application-metrics 21 | namespace: {{ .Namespace }} 22 | labels: 23 | spark/servicemonitor: "true" 24 | spec: 25 | ports: 26 | - port: {{ .Params.appMetricsPort }} 27 | name: metrics 28 | clusterIP: None 29 | selector: 30 | metrics-exposed: "true" 31 | --- 32 | apiVersion: monitoring.coreos.com/v1 33 | kind: ServiceMonitor 34 | metadata: 35 | labels: 36 | app: prometheus-operator 37 | release: prometheus-kubeaddons 38 | name: spark-monitor 39 | namespace: {{ .Namespace }} 40 | spec: 41 | endpoints: 42 | - interval: {{ .Params.metricsPollingInterval }} 43 | port: metrics 44 | selector: 45 | matchLabels: 46 | spark/servicemonitor: "true" 47 | {{- end }} 48 | -------------------------------------------------------------------------------- /repository/spark/operator/templates/spark-operator-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Params.createOperatorServiceAccount "true" }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ .Name }}-{{ .Params.operatorServiceAccountName }} 6 | namespace: {{ .Namespace }} 7 | labels: 8 | app.kubernetes.io/name: {{ .Name }}-{{ .Params.operatorServiceAccountName }} 9 | app.kubernetes.io/instance: {{ .Name }} 10 | {{- end }} 11 | -------------------------------------------------------------------------------- /repository/spark/operator/templates/spark-rbac.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Params.createRBAC "true" }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | {{- if (ne .Params.sparkJobNamespace "") }} 6 | namespace: {{ .Params.sparkJobNamespace }} 7 | {{- else }} 8 | namespace: {{ .Namespace }} 9 | {{- end }} 10 | name: {{ .Name }}-spark-role 11 | labels: 12 | app.kubernetes.io/name: {{ .Name }}-spark-role 13 | app.kubernetes.io/instance: {{ .Name }} 14 | rules: 15 | - apiGroups: [""] 16 | resources: ["pods"] 17 | verbs: ["*"] 18 | - apiGroups: [""] 19 | resources: ["services"] 20 | verbs: ["*"] 21 | --- 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: RoleBinding 24 | metadata: 25 | name: {{ .Name }}-spark-rb 26 | {{- if (ne .Params.sparkJobNamespace "") }} 27 | namespace: {{ .Params.sparkJobNamespace }} 28 | {{- else }} 29 | namespace: {{ .Namespace }} 30 | {{- end }} 31 | labels: 32 | app.kubernetes.io/name: {{ .Name }}-spark-rb 33 | app.kubernetes.io/instance: {{ .Name }} 34 | subjects: 35 | - kind: ServiceAccount 36 | {{- if eq .Params.createSparkServiceAccount "true" }} 37 | name: {{ .Name }}-{{ .Params.sparkServiceAccountName }} 38 | {{- else }} 39 | name: {{ .Params.sparkServiceAccountName }} 40 | {{- end }} 41 | {{- if (ne .Params.sparkJobNamespace "") }} 42 | namespace: {{ .Params.sparkJobNamespace }} 43 | {{- else }} 44 | namespace: {{ .Namespace }} 45 | {{- end }} 46 | roleRef: 47 | kind: Role 48 | name: {{ .Name }}-spark-role 49 | apiGroup: rbac.authorization.k8s.io 50 | {{- end }} 51 | -------------------------------------------------------------------------------- /repository/spark/operator/templates/spark-serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Params.createSparkServiceAccount "true" }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ .Name }}-{{ .Params.sparkServiceAccountName }} 6 | {{- if (ne .Params.sparkJobNamespace "") }} 7 | namespace: {{ .Params.sparkJobNamespace }} 8 | {{- else }} 9 | namespace: {{ .Namespace }} 10 | {{- end }} 11 | labels: 12 | app.kubernetes.io/name: {{ .Name }}-{{ .Params.sparkServiceAccountName }} 13 | app.kubernetes.io/instance: {{ .Name }} 14 | {{- end }} 15 | -------------------------------------------------------------------------------- /repository/spark/operator/templates/webhook-init-job.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Params.enableWebhook "true" }} 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: {{ .Name }}-init 6 | namespace: {{ .Namespace }} 7 | labels: 8 | app.kubernetes.io/name: {{ .Name }}-init 9 | app.kubernetes.io/instance: {{ .Name }} 10 | spec: 11 | template: 12 | spec: 13 | {{- if eq .Params.createOperatorServiceAccount "true" }} 14 | serviceAccountName: {{ .Name }}-{{ .Params.operatorServiceAccountName }} 15 | {{- else }} 16 | serviceAccountName: {{ .Params.operatorServiceAccountName }} 17 | {{- end }} 18 | restartPolicy: OnFailure 19 | containers: 20 | - name: main 21 | image: {{ .Params.operatorImageName }}:{{ .Params.operatorVersion }} 22 | imagePullPolicy: {{ .Params.imagePullPolicy }} 23 | command: ["/usr/bin/gencerts.sh", "-n", "{{ .Namespace }}", "-s", "{{ .Name }}-webhook", "-p"] 24 | {{- end }} 25 | -------------------------------------------------------------------------------- /repository/spark/operator/templates/webhook-service.yaml: -------------------------------------------------------------------------------- 1 | {{- if eq .Params.enableWebhook "true" }} 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: {{ .Name }}-webhook 6 | namespace: {{ .Namespace }} 7 | labels: 8 | app.kubernetes.io/name: {{ .OperatorName }} 9 | app.kubernetes.io/instance: {{ .Name }} 10 | spec: 11 | ports: 12 | - port: 443 13 | targetPort: {{ .Params.webhookPort }} 14 | name: webhook 15 | selector: 16 | app.kubernetes.io/name: {{ .OperatorName }} 17 | app.kubernetes.io/instance: {{ .Name }} 18 | app.kubernetes.io/version: {{ .Params.operatorVersion }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: kudo-controller-manager-0 5 | namespace: kudo-system 6 | status: 7 | phase: Running 8 | 9 | # confirms that kudo is running 10 | -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: spark 5 | -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/01-create-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: spark 5 | 6 | # create 'spark' namespace -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/02-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kuttl.dev/v1beta1 2 | kind: TestAssert 3 | timeout: 600 4 | --- 5 | apiVersion: kudo.dev/v1beta1 6 | kind: Instance 7 | metadata: 8 | name: spark 9 | namespace: spark 10 | status: 11 | planStatus: 12 | deploy: 13 | status: COMPLETE 14 | 15 | # this can take time, 3+ mins locally is common -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/02-install-spark.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kuttl.dev/v1beta1 2 | kind: TestStep 3 | commands: 4 | - command: kubectl kudo install spark --namespace spark --instance=spark -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/03-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kuttl.dev/v1beta1 2 | kind: TestAssert 3 | timeout: 600 4 | collectors: 5 | - pod: spark-pi-driver 6 | namespace: spark 7 | --- 8 | apiVersion: sparkoperator.k8s.io/v1beta2 9 | kind: SparkApplication 10 | metadata: 11 | name: spark-pi 12 | namespace: spark 13 | status: 14 | applicationState: 15 | state: COMPLETED 16 | --- 17 | apiVersion: v1 18 | kind: Event 19 | reason: SparkApplicationCompleted 20 | metadata: 21 | namespace: spark 22 | source: 23 | component: spark-operator 24 | involvedObject: 25 | apiVersion: sparkoperator.k8s.io/v1beta2 26 | kind: SparkApplication 27 | name: spark-pi 28 | namespace: spark -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/03-submit-spark-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kuttl.dev/v1beta1 2 | kind: TestStep 3 | apply: 4 | - spark-pi.yaml -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/04-delete-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1alpha1 2 | kind: TestStep 3 | commands: 4 | - command: kubectl delete ns spark -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/04-errors.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: spark -------------------------------------------------------------------------------- /repository/spark/tests/spark-job/spark-pi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "sparkoperator.k8s.io/v1beta2" 2 | kind: SparkApplication 3 | metadata: 4 | name: spark-pi 5 | namespace: spark 6 | spec: 7 | type: Scala 8 | mode: cluster 9 | image: "mesosphere/spark:spark-3.0.0-hadoop-2.9-k8s" 10 | imagePullPolicy: Always 11 | mainClass: org.apache.spark.examples.SparkPi 12 | mainApplicationFile: "local:///opt/spark/examples/jars/spark-examples_2.12-3.0.0.jar" 13 | arguments: 14 | - "1000" 15 | sparkVersion: "3.0.0" 16 | restartPolicy: 17 | type: Never 18 | driver: 19 | cores: 1 20 | memory: "512m" 21 | labels: 22 | version: 3.0.0 23 | serviceAccount: spark-spark-service-account 24 | executor: 25 | cores: 1 26 | instances: 2 27 | memory: "512m" 28 | labels: 29 | version: 3.0.0 -------------------------------------------------------------------------------- /repository/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | # Zookeeper Operator 2 | 3 | The KUDO Zookeeper operator creates, configures and manages [Apache Zookeeper](https://zookeeper.apache.org/) clusters running on Kubernetes. 4 | 5 | ## Getting started 6 | 7 | The latest stable version of Zookeeper operator is `0.3.3` 8 | 9 | ## Version Chart 10 | 11 | | KUDO Zookeeper Version | Apache Zookeeper Version | 12 | | ---------------------- | ------------------------ | 13 | | 0.3.3 | 3.6.2 | 14 | | 0.3.2 | 3.4.14 | 15 | | 0.3.1 | 3.4.14 | 16 | | 0.3.0 | 3.4.14 | 17 | | latest | 3.6.2 | 18 | -------------------------------------------------------------------------------- /repository/zookeeper/docs/latest/README.md: -------------------------------------------------------------------------------- 1 | ## KUDO Kafka latest 2 | 3 | - [Installation](./install.md) 4 | - [Configuration](./configuration.md) 5 | - [Limitations](./limitations.md) 6 | -------------------------------------------------------------------------------- /repository/zookeeper/docs/latest/configuration.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | ### Resources 4 | 5 | By default, KUDO Zookeeper resource configuration is set to the minimum recommended values for production usage. 6 | But users can and should tune the configurations based on the workload requirements of whatever service makes use of Zookeeper. 7 | 8 | ##### Tuning the resources for the Zookeeper Cluster 9 | 10 | ``` 11 | kubectl kudo install zookeeper --instance=my-zookeeper-name \ 12 | -p CPUS=1 \ 13 | -p NODE_COUNT=5 \ 14 | -p MEMORY=1Gi \ 15 | -p DISK_SIZE=5Gi \ 16 | ``` 17 | 18 | ##### Ports 19 | 20 | The parameter `CLIENT_PORT`(default: 2181) sets the port for listening to client requests. 21 | Similarly, `SERVER_PORT`(default: 2888) is used to set the port which zookeeper will listen on for requests from other servers in the ensemble and `ELECTION_PORT`(default: 3888) can be used to set the port on which the Zookeeper process will perform leader election. 22 | 23 | 24 | ##### Storage 25 | 26 | By default, the Zookeeper operator will use the default storage class of the Kubernetes cluster. 27 | 28 | To deploy Zookeeper using a different storage class, you can use the parameter `STORAGE_CLASS` 29 | 30 | ``` 31 | kubectl kudo install zookeeper --instance=my-zookeeper-name -p STORAGE_CLASS= 32 | ``` 33 | 34 | ##### Docker image 35 | 36 | The Dockerfile used to build the KUDO Zookeeper operator is hosted [here](https://github.com/31z4/zookeeper-docker/blob/5a82d0b90d055f39d50e0a64ae2e00da15f9b8b1/3.4.14/Dockerfile). For more details, please check [Zookeeper's Dockerhub Repository](https://hub.docker.com/_/zookeeper). 37 | -------------------------------------------------------------------------------- /repository/zookeeper/docs/latest/install.md: -------------------------------------------------------------------------------- 1 | # Installing the KUDO Zookeeper Operator 2 | 3 | Requirements: 4 | 5 | - Install the [KUDO controller](https://kudo.dev/docs/getting-started/) 6 | - Install the [KUDO CLI](https://kudo.dev/docs/cli/) 7 | 8 | 9 | ## Installing the Operator 10 | 11 | #### Install Zookeeper 12 | 13 | Please read the [limitations](./limitations.md) docs before creating the KUDO Zookeeper cluster. 14 | 15 | ``` 16 | kubectl kudo install zookeeper 17 | ``` 18 | 19 | Verify the if the deploy plan for `--instance=zookeeper-instance`, default instance name, is complete. 20 | ``` 21 | kubectl kudo plan status --instance=zookeeper-instance 22 | Plan(s) for "zookeeper-instance" in namespace "default": 23 | . 24 | └── zookeeper-instance (Operator-Version: "zookeeper-0.3.0" Active-Plan: "deploy") 25 | ├── Plan deploy (serial strategy) [COMPLETE] 26 | │ ├── Phase zookeeper [COMPLETE] 27 | │ │ └── Step deploy (COMPLETE) 28 | │ └── Phase validation [COMPLETE] 29 | │ └── Step validation (COMPLETE) 30 | └── Plan validation (serial strategy) [NOT ACTIVE] 31 | └── Phase connection (parallel strategy) [NOT ACTIVE] 32 | └── Step connection (parallel strategy) [NOT ACTIVE] 33 | └── connection [NOT ACTIVE] 34 | ``` 35 | 36 | You can view all configuration options [here](./configuration.md) 37 | 38 | #### Installing multiple Zookeeper Clusters 39 | 40 | ``` 41 | kubectl kudo install zookeeper --instance=zk-1 42 | kubectl kudo install zookeeper --instance=zk-2 43 | kubectl kudo install zookeeper --instance=zk-3 44 | ``` 45 | 46 | The above commands will install three zookeeper clusters. 47 | -------------------------------------------------------------------------------- /repository/zookeeper/docs/latest/limitations.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Limitations 4 | 5 | Below is a list of parameters that can only be configured during bootstrap time. 6 | 7 | |Immutable Parameters| 8 | | ------------------ | 9 | | DISK_SIZE | 10 | | STORAGE_CLASS | 11 | 12 | These storage-related parameters cannot be changed after initial deployment. Repeat: using parameters to resize disk, change storage class, or switch between persistent/ephemeral storage is not supported. 13 | 14 | Changing the above parameters will trigger a `not-allowed` plan. Which basically skips updating any resources. 15 | This is to avoid any update done by mistake or human error. 16 | 17 | ### Resizing the PVC 18 | 19 | Resizing the disk depends on the storage class of your Kubernetes cluster. 20 | To resize the disk being used by zookeeper, users can edit the `pvc` and expand the disk. 21 | You can read more about it in [resizing the PVC](https://kubernetes.io/blog/2018/07/12/resizing-persistent-volumes-using-kubernetes/) post. 22 | -------------------------------------------------------------------------------- /repository/zookeeper/operator/templates/healthcheck.sh.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | data: 3 | healthcheck.sh: | 4 | #!/usr/bin/env bash 5 | # Copyright 2017 The Kubernetes Authors. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | 19 | # zkOk.sh uses the ruok ZooKeeper four letter work to determine if the instance 20 | # is health. The $? variable will be set to 0 if server responds that it is 21 | # healthy, or 1 if the server fails to respond. 22 | 23 | OK=$(echo ruok | nc 127.0.0.1 $1) 24 | if [ "$OK" == "imok" ]; then 25 | exit 0 26 | else 27 | exit 1 28 | fi 29 | 30 | kind: ConfigMap 31 | metadata: 32 | name: {{ .Name }}-healthcheck -------------------------------------------------------------------------------- /repository/zookeeper/operator/templates/pdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: policy/v1beta1 2 | kind: PodDisruptionBudget 3 | metadata: 4 | name: {{ .Name }}-pdb 5 | namespace: {{ .Namespace }} 6 | labels: 7 | app: zookeeper 8 | zookeeper: {{ .Name }} 9 | spec: 10 | selector: 11 | matchLabels: 12 | app: zookeeper 13 | kudo.dev/instance: {{ .Name }} 14 | maxUnavailable: 1 15 | -------------------------------------------------------------------------------- /repository/zookeeper/operator/templates/services.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Name }}-hs 5 | namespace: {{ .Namespace }} 6 | labels: 7 | app: zookeeper 8 | zookeeper: {{ .Name }} 9 | spec: 10 | ports: 11 | - port: {{ .Params.SERVER_PORT }} 12 | name: server 13 | - port: {{ .Params.ELECTION_PORT }} 14 | name: leader-election 15 | clusterIP: None 16 | selector: 17 | app: zookeeper 18 | instance: {{ .Name }} 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: {{ .Name }}-cs 24 | namespace: {{ .Namespace }} 25 | labels: 26 | app: zookeeper 27 | zookeeper: {{ .Name }} 28 | spec: 29 | ports: 30 | - port: {{ .Params.CLIENT_PORT }} 31 | name: client 32 | selector: 33 | app: zookeeper 34 | instance: {{ .Name }} -------------------------------------------------------------------------------- /repository/zookeeper/operator/templates/validation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: {{ .Name }}-validation 5 | spec: 6 | template: 7 | metadata: 8 | name: "validation" 9 | spec: 10 | restartPolicy: Never 11 | containers: 12 | - name: kubernetes-zookeeper 13 | imagePullPolicy: {{ .Params.IMAGE_PULL_POLICY }} 14 | image: "zookeeper:3.6.2" 15 | env: 16 | - name: CONN 17 | value: {{ if gt (int .Params.NODE_COUNT) 0 }} {{ .Name }}-zookeeper-0.{{ .Name }}-hs:{{ .Params.CLIENT_PORT }}{{- $root := . -}}{{ range $i, $v := untilStep 1 (int .Params.NODE_COUNT) 1 }},{{ $root.Name }}-zookeeper-{{ $v }}.{{ $root.Name }}-hs:{{ $root.Params.CLIENT_PORT }}{{ end }}{{ end }} 18 | resources: 19 | requests: 20 | memory: "64Mi" 21 | cpu: "0.1" 22 | command: 23 | - bash 24 | - -c 25 | - "until bin/zkCli.sh -server $CONN ls /; do sleep 5; done" 26 | -------------------------------------------------------------------------------- /repository/zookeeper/tests/zookeeper-upgrade-test/00-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: zk 5 | status: 6 | planStatus: 7 | deploy: 8 | status: COMPLETE 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: zk-zookeeper 14 | spec: 15 | template: 16 | spec: 17 | containers: 18 | - name: kubernetes-zookeeper 19 | resources: 20 | requests: 21 | memory: "256Mi" 22 | cpu: "300m" 23 | status: 24 | readyReplicas: 1 25 | -------------------------------------------------------------------------------- /repository/zookeeper/tests/zookeeper-upgrade-test/00-install.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: zk 5 | spec: 6 | operatorVersion: 7 | name: zookeeper-3.6.2-0.3.3 8 | namespace: default 9 | kind: OperatorVersion 10 | name: "zk" 11 | parameters: 12 | NODE_COUNT: "1" 13 | MEMORY: "256Mi" 14 | CPUS: "0.3" 15 | -------------------------------------------------------------------------------- /repository/zookeeper/tests/zookeeper-upgrade-test/01-assert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: zk 5 | status: 6 | planStatus: 7 | deploy: 8 | status: COMPLETE 9 | --- 10 | apiVersion: apps/v1 11 | kind: StatefulSet 12 | metadata: 13 | name: zk-zookeeper 14 | spec: 15 | template: 16 | spec: 17 | containers: 18 | - name: kubernetes-zookeeper 19 | resources: 20 | requests: 21 | memory: "256Mi" 22 | cpu: "200m" 23 | status: 24 | readyReplicas: 1 25 | -------------------------------------------------------------------------------- /repository/zookeeper/tests/zookeeper-upgrade-test/01-resize.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kudo.dev/v1beta1 2 | kind: Instance 3 | metadata: 4 | name: zk 5 | spec: 6 | operatorVersion: 7 | name: zookeeper-3.6.2-0.3.3 8 | namespace: default 9 | kind: OperatorVersion 10 | name: "zk" 11 | parameters: 12 | MEMORY: "256Mi" 13 | CPUS: "0.2" 14 | -------------------------------------------------------------------------------- /test/kind/kubernetes-1.16.9.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.sigs.k8s.io/v1alpha3 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.16.9 6 | -------------------------------------------------------------------------------- /test/kind/kubernetes-1.17.5.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.sigs.k8s.io/v1alpha3 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.17.5 6 | --------------------------------------------------------------------------------