├── .gitignore ├── tools └── multicluster │ ├── .gitignore │ ├── main.go │ ├── Dockerfile │ ├── cmd │ └── multicluster.go │ └── pkg │ ├── common │ └── utils.go │ └── debug │ ├── anonymize.go │ └── anonymize_test.go ├── .github ├── CODEOWNERS ├── PULL_REQUEST_TEMPLATE.md └── ISSUE_TEMPLATE │ └── config.yml ├── architectures ├── ops-manager-multi-cluster │ ├── code_snippets │ │ ├── 0615_stop_forwarding_om_api.sh │ │ ├── 9200_delete_om.sh │ │ ├── 0605_start_forwarding_om_api.sh │ │ ├── 9100_delete_backup_namespaces.sh │ │ ├── 0311_ops_manager_wait_for_pending_state.sh │ │ ├── 0300_ops_manager_create_admin_credentials.sh │ │ ├── 0321_ops_manager_wait_for_pending_state.sh │ │ ├── 0500_ops_manager_prepare_s3_backup_secrets.sh │ │ ├── 0400_install_minio_s3.sh │ │ ├── 0250_generate_certs.sh │ │ ├── 0312_ops_manager_wait_for_running_state.sh │ │ ├── 0322_ops_manager_wait_for_running_state.sh │ │ └── 0310_ops_manager_deploy_on_single_member_cluster.sh │ ├── output │ │ ├── 0311_ops_manager_wait_for_pending_state.out │ │ ├── 0321_ops_manager_wait_for_pending_state.out │ │ └── 0312_ops_manager_wait_for_running_state.out │ ├── teardown.sh │ ├── env_variables.sh │ └── test.sh ├── setup-multi-cluster │ ├── setup-cert-manager │ │ ├── output │ │ │ ├── 0215_helm_configure_repo.out │ │ │ ├── 0221_verify_issuer.out │ │ │ └── 0216_helm_install_cert_manager.out │ │ ├── code_snippets │ │ │ ├── 0215_helm_configure_repo.sh │ │ │ ├── 0216_helm_install_cert_manager.sh │ │ │ ├── 0221_verify_issuer.sh │ │ │ └── 0225_create_ca_configmap.sh │ │ └── test.sh │ ├── setup-gke │ │ ├── code_snippets │ │ │ ├── 0005_gcloud_set_current_project.sh │ │ │ ├── 0020_get_gke_credentials.sh │ │ │ ├── 0010_create_gke_cluster_0.sh │ │ │ ├── 0010_create_gke_cluster_1.sh │ │ │ ├── 0010_create_gke_cluster_2.sh │ │ │ ├── 9010_delete_gke_clusters.sh │ │ │ └── 0030_verify_access_to_clusters.sh │ │ ├── teardown.sh │ │ └── test.sh │ ├── setup-externaldns │ │ ├── code_snippets │ │ │ ├── 0100_create_gke_sa.sh │ │ │ ├── 0120_add_role_to_sa.sh │ │ │ ├── 0130_create_sa_key.sh │ │ │ ├── 9000_delete_sa.sh │ │ │ ├── 0140_create_namespaces.sh │ │ │ ├── 9050_delete_namespace.sh │ │ │ ├── 0200_install_externaldns.sh │ │ │ ├── 9100_delete_dns_zone.sh │ │ │ ├── 0300_setup_dns_zone.sh │ │ │ └── 0150_create_sa_secrets.sh │ │ ├── teardown.sh │ │ ├── test.sh │ │ └── env_variables.sh │ ├── setup-operator │ │ ├── code_snippets │ │ │ ├── 0205_helm_configure_repo.sh │ │ │ ├── 0211_check_operator_deployment.sh │ │ │ ├── 9000_delete_namespaces.sh │ │ │ ├── 0045_create_namespaces.sh │ │ │ ├── 0210_helm_install_operator.sh │ │ │ └── 0200_kubectl_mongodb_configure_multi_cluster.sh │ │ ├── teardown.sh │ │ ├── output │ │ │ ├── 0205_helm_configure_repo.out │ │ │ └── 0211_check_operator_deployment.out │ │ ├── test.sh │ │ └── env_variables.sh │ ├── setup-istio │ │ ├── code_snippets │ │ │ └── 0040_install_istio.sh │ │ └── test.sh │ └── verify-connectivity │ │ ├── output │ │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out │ │ └── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out │ │ └── code_snippets │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_0.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_1.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_2.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_0.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_1.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_2.sh │ │ ├── 0060_check_cluster_connectivity_wait_for_sts.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_0.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_1.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_2.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh │ │ └── 0045_create_connectivity_test_namespaces.sh ├── ops-manager-mc-no-mesh │ ├── code_snippets │ │ ├── 9200_delete_om.sh │ │ ├── 9100_delete_backup_namespaces.sh │ │ ├── 0160_add_dns_record.sh │ │ ├── 0300_ops_manager_create_admin_credentials.sh │ │ ├── 0325_set_up_lb_services.sh │ │ ├── 0326_set_up_lb_services.sh │ │ ├── 0321_ops_manager_wait_for_pending_state.sh │ │ ├── 0110_add_cert_to_gcp.sh │ │ ├── 9000_cleanup_gke_lb.sh │ │ ├── 0500_ops_manager_prepare_s3_backup_secrets.sh │ │ ├── 0400_install_minio_s3.sh │ │ ├── 0150_om_load_balancer.sh │ │ ├── 0330_ops_manager_wait_for_running_state.sh │ │ └── 0100_generate_certs.sh │ ├── output │ │ ├── 0321_ops_manager_wait_for_pending_state.out │ │ └── 0150_om_load_balancer.out │ ├── teardown.sh │ └── test.sh ├── mongodb-sharded-mc-no-mesh │ ├── code_snippets │ │ ├── 9000_delete_resources.sh │ │ ├── 2210_verify_mongosh_connection.sh │ │ ├── 2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh │ │ └── 2200_create_mongodb_user.sh │ ├── teardown.sh │ ├── output │ │ └── 2210_verify_mongosh_connection.out │ ├── test.sh │ └── env_variables.sh ├── mongodb-sharded-multi-cluster │ ├── code_snippets │ │ ├── 9000_delete_resources.sh │ │ ├── 2210_verify_mongosh_connection.sh │ │ ├── 2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh │ │ └── 2200_create_mongodb_user.sh │ ├── teardown.sh │ ├── env_variables.sh │ ├── output │ │ └── 2210_verify_mongosh_connection.out │ └── test.sh ├── mongodb-replicaset-mc-no-mesh │ ├── code_snippets │ │ ├── 9000_delete_resources.sh │ │ ├── 1210_verify_mongosh_connection.sh │ │ ├── 1050_generate_certs.sh │ │ ├── 1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh │ │ └── 1200_create_mongodb_user.sh │ ├── teardown.sh │ ├── output │ │ └── 1210_verify_mongosh_connection.out │ ├── test.sh │ └── env_variables.sh └── mongodb-replicaset-multi-cluster │ ├── code_snippets │ ├── 9000_delete_resources.sh │ ├── 1050_generate_certs.sh │ ├── 1210_verify_mongosh_connection.sh │ ├── 1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh │ ├── 1200_create_mongodb_user.sh │ └── 1100_mongodb_replicaset_multi_cluster.sh │ ├── teardown.sh │ ├── env_variables.sh │ ├── output │ └── 1210_verify_mongosh_connection.out │ └── test.sh ├── docs └── assets │ ├── image--000.png │ ├── image--002.png │ ├── image--004.png │ ├── image--008.png │ ├── image--014.png │ ├── image--030.png │ ├── image--032.png │ └── image--034.png ├── samples ├── ops-manager-multi-cluster │ ├── code_snippets │ │ ├── 0011_gcloud_set_current_project.sh │ │ ├── 0205_helm_configure_repo.sh │ │ ├── 0040_install_istio.sh │ │ ├── 0311_ops_manager_wait_for_pending_state.sh │ │ ├── 0321_ops_manager_wait_for_pending_state.sh │ │ ├── 0010_create_gke_cluster_0.sh │ │ ├── 0010_create_gke_cluster_1.sh │ │ ├── 0010_create_gke_cluster_2.sh │ │ ├── 0020_get_gke_credentials.sh │ │ ├── 9010_delete_gke_clusters.sh │ │ ├── 0300_ops_manager_create_admin_credentials.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_0.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_1.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_2.sh │ │ ├── 0030_verify_access_to_clusters.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_0.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_1.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_2.sh │ │ ├── 0211_check_operator_deployment.sh │ │ ├── 0200_kubectl_mongodb_configure_multi_cluster.sh │ │ ├── 9000_delete_namespaces.sh │ │ ├── 0060_check_cluster_connectivity_wait_for_sts.sh │ │ ├── 0500_ops_manager_prepare_s3_backup_secrets.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh │ │ ├── 0255_create_cert_secrets.sh │ │ ├── 0045_create_ops_manager_namespace.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_0.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_1.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_2.sh │ │ ├── 0400_install_minio_s3.sh │ │ ├── 0045_create_operator_namespace.sh │ │ ├── 0210_helm_install_operator.sh │ │ ├── 0310_ops_manager_deploy_on_single_member_cluster.sh │ │ ├── 0322_ops_manager_wait_for_running_state.sh │ │ ├── 0100_check_cluster_connectivity_cleanup.sh │ │ ├── 0046_create_image_pull_secrets.sh │ │ └── 0320_ops_manager_add_second_cluster.sh │ ├── output │ │ ├── 0311_ops_manager_wait_for_pending_state.out │ │ ├── 0321_ops_manager_wait_for_pending_state.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out │ │ ├── 0205_helm_configure_repo.out │ │ ├── 0211_check_operator_deployment.out │ │ └── 0200_kubectl_mongodb_configure_multi_cluster.out │ └── test_cleanup.sh ├── mongodb │ ├── authentication │ │ ├── scram │ │ │ ├── standalone │ │ │ │ ├── standalone-scram-password.yaml │ │ │ │ ├── standalone-scram-user.yaml │ │ │ │ └── standalone-scram-sha.yaml │ │ │ ├── replica-set │ │ │ │ ├── replica-set-scram-password.yaml │ │ │ │ ├── replica-set-scram-user.yaml │ │ │ │ └── replica-set-scram-sha.yaml │ │ │ └── sharded-cluster │ │ │ │ ├── sharded-cluster-scram-password.yaml │ │ │ │ └── sharded-cluster-scram-user.yaml │ │ ├── x509 │ │ │ ├── replica-set │ │ │ │ └── user.yaml │ │ │ └── sharded-cluster │ │ │ │ └── user.yaml │ │ └── ldap │ │ │ ├── replica-set │ │ │ └── replica-set-ldap-user.yaml │ │ │ └── sharded-cluster │ │ │ └── sharded-cluster-ldap-user.yaml │ ├── backup │ │ ├── replica-set-backup.yaml │ │ └── replica-set-backup-disabled.yaml │ ├── project.yaml │ ├── pod-template │ │ ├── standalone-pod-template.yaml │ │ └── initcontainer-sysctl_config.yaml │ ├── mongodb-options │ │ ├── replica-set-mongod-options.yaml │ │ └── sharded-cluster-mongod-options.yaml │ ├── agent-startup-options │ │ ├── replica-set-agent-startup-options.yaml │ │ └── standalone-agent-startup-options.yaml │ ├── tls │ │ └── standalone │ │ │ └── standalone-tls.yaml │ └── minimal │ │ └── replica-set.yaml ├── ops-manager │ ├── ops-manager-disable-appdb-process.yaml │ ├── ops-manager-non-root.yaml │ ├── ops-manager-scram.yaml │ ├── ops-manager-appdb-custom-images.yaml │ └── ops-manager-ignore-ui-setup.yaml ├── multi-cluster-cli-gitops │ ├── argocd │ │ ├── project.yaml │ │ └── application.yaml │ └── resources │ │ └── replica-set.yaml ├── mongodb_multicluster │ └── replica-set.yaml ├── mongodb_multi │ └── replica-set.yaml └── single-sharded-overrides.yaml ├── LICENSE ├── vault_policies ├── appdb-policy.hcl ├── database-policy.hcl ├── opsmanager-policy.hcl └── operator-policy.hcl ├── opa_examples ├── mongodb_strict_tls │ └── constraints.yaml ├── mongodb_allow_replicaset │ ├── constraints.yaml │ └── mongodb_allow_replicaset.yaml ├── mongodb_allowed_versions │ ├── constraints.yaml │ └── mongodb_allowed_versions.yaml ├── ops_manager_wizardless │ ├── constraints.yaml │ └── ops_manager_wizardless_template.yaml ├── ops_manager_replica_members │ └── constraints.yaml ├── ops_manager_allowed_versions │ ├── constraints.yaml │ └── ops_manager_allowed_versions.yaml └── debugging │ ├── constraints.yaml │ └── constraint_template.yaml └── dockerfiles ├── mongodb-enterprise-init-ops-manager ├── 1.0.3 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.4 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.5 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.6 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.7 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.8 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.9 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.10 │ ├── ubuntu │ │ └── Dockerfile │ └── ubi │ │ └── Dockerfile ├── 1.0.11 │ └── ubi │ │ └── Dockerfile ├── 1.0.12 │ └── ubi │ │ └── Dockerfile ├── 1.23.0 │ └── ubi │ │ └── Dockerfile ├── 1.24.0 │ └── ubi │ │ └── Dockerfile ├── 1.25.0 │ └── ubi │ │ └── Dockerfile ├── 1.26.0 │ └── ubi │ │ └── Dockerfile ├── 1.27.0 │ └── ubi │ │ └── Dockerfile ├── 1.28.0 │ └── ubi │ │ └── Dockerfile ├── 1.29.0 │ └── ubi │ │ └── Dockerfile ├── 1.30.0 │ └── ubi │ │ └── Dockerfile ├── 1.31.0 │ └── ubi │ │ └── Dockerfile ├── 1.32.0 │ └── ubi │ │ └── Dockerfile └── 1.33.0 │ └── ubi │ └── Dockerfile ├── mongodb-enterprise-operator ├── 1.9.0 │ └── ubi │ │ └── Dockerfile ├── 1.11.0 │ └── ubuntu │ │ └── Dockerfile ├── 1.12.0 │ └── ubuntu │ │ └── Dockerfile ├── 1.13.0 │ └── ubuntu │ │ └── Dockerfile ├── 1.14.0 │ └── ubuntu │ │ └── Dockerfile ├── 1.15.0 │ └── ubuntu │ │ └── Dockerfile ├── 1.15.1 │ └── ubuntu │ │ └── Dockerfile ├── 1.15.2 │ └── ubuntu │ │ └── Dockerfile ├── 1.16.0 │ └── ubuntu │ │ └── Dockerfile └── 1.16.1 │ └── ubuntu │ └── Dockerfile └── mongodb-enterprise-init-database ├── 1.0.10 └── ubuntu │ └── Dockerfile ├── 1.0.11 └── ubuntu │ └── Dockerfile ├── 1.0.12 └── ubuntu │ └── Dockerfile ├── 1.0.13 └── ubuntu │ └── Dockerfile ├── 1.0.14 └── ubuntu │ └── Dockerfile ├── 1.0.15 └── ubuntu │ └── Dockerfile ├── 1.0.2 └── ubuntu │ └── Dockerfile ├── 1.0.3 └── ubuntu │ └── Dockerfile ├── 1.0.4 └── ubuntu │ └── Dockerfile ├── 1.0.5 └── ubuntu │ └── Dockerfile ├── 1.0.6 └── ubuntu │ └── Dockerfile ├── 1.0.7 └── ubuntu │ └── Dockerfile ├── 1.0.8 └── ubuntu │ └── Dockerfile └── 1.0.9 └── ubuntu └── Dockerfile /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.iml 3 | .DS_Store 4 | tools/multicluster/linux_amd64/* 5 | -------------------------------------------------------------------------------- /tools/multicluster/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .idea 3 | .vscode 4 | *.log 5 | tmp/ 6 | dist/ 7 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @mircea-cosbuc @lsierant @nammn @Julien-Ben @MaciejKaras @lucian-tosa @fealebenpae @m1kola 2 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0615_stop_forwarding_om_api.sh: -------------------------------------------------------------------------------- 1 | pkill -f "kubectl port-forward" 2 | -------------------------------------------------------------------------------- /docs/assets/image--000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--000.png -------------------------------------------------------------------------------- /docs/assets/image--002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--002.png -------------------------------------------------------------------------------- /docs/assets/image--004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--004.png -------------------------------------------------------------------------------- /docs/assets/image--008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--008.png -------------------------------------------------------------------------------- /docs/assets/image--014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--014.png -------------------------------------------------------------------------------- /docs/assets/image--030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--030.png -------------------------------------------------------------------------------- /docs/assets/image--032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--032.png -------------------------------------------------------------------------------- /docs/assets/image--034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/HEAD/docs/assets/image--034.png -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0011_gcloud_set_current_project.sh: -------------------------------------------------------------------------------- 1 | gcloud config set project "${MDB_GKE_PROJECT}" 2 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/output/0215_helm_configure_repo.out: -------------------------------------------------------------------------------- 1 | "jetstack" has been added to your repositories 2 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0005_gcloud_set_current_project.sh: -------------------------------------------------------------------------------- 1 | gcloud config set project "${MDB_GKE_PROJECT}" 2 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/9200_delete_om.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" delete om/om 2 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/9200_delete_om.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" delete om/om 2 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/code_snippets/0215_helm_configure_repo.sh: -------------------------------------------------------------------------------- 1 | helm repo add jetstack https://charts.jetstack.io --force-update 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Usage of the MongoDB Enterprise Operator for Kubernetes indicates agreement with the MongoDB Customer Agreement. 2 | 3 | https://www.mongodb.com/customer-agreement/ 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0100_create_gke_sa.sh: -------------------------------------------------------------------------------- 1 | gcloud iam service-accounts create "${DNS_SA_NAME}" --display-name "${DNS_SA_NAME}" 2 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0311_ops_manager_wait_for_pending_state.out: -------------------------------------------------------------------------------- 1 | Waiting for Application Database to reach Pending phase... 2 | mongodbopsmanager.mongodb.com/om condition met 3 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0321_ops_manager_wait_for_pending_state.out: -------------------------------------------------------------------------------- 1 | Waiting for Application Database to reach Pending phase... 2 | mongodbopsmanager.mongodb.com/om condition met 3 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0605_start_forwarding_om_api.sh: -------------------------------------------------------------------------------- 1 | kubectl port-forward --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" svc/om-svc 8443:8443 & 2 | sleep 3 3 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/output/0311_ops_manager_wait_for_pending_state.out: -------------------------------------------------------------------------------- 1 | Waiting for Application Database to reach Pending phase... 2 | mongodbopsmanager.mongodb.com/om condition met 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0120_add_role_to_sa.sh: -------------------------------------------------------------------------------- 1 | gcloud projects add-iam-policy-binding "${MDB_GKE_PROJECT}" --member serviceAccount:"${DNS_SA_EMAIL}" --role roles/dns.admin 2 | -------------------------------------------------------------------------------- /vault_policies/appdb-policy.hcl: -------------------------------------------------------------------------------- 1 | path "secret/data/mongodbenterprise/appdb/*" { 2 | capabilities = ["read", "list"] 3 | } 4 | path "secret/metadata/mongodbenterprise/appdb/*" { 5 | capabilities = ["list"] 6 | } 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0130_create_sa_key.sh: -------------------------------------------------------------------------------- 1 | mkdir -p secrets 2 | 3 | gcloud iam service-accounts keys create secrets/external-dns-sa-key.json --iam-account="${DNS_SA_EMAIL}" 4 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0205_helm_configure_repo.sh: -------------------------------------------------------------------------------- 1 | helm repo add mongodb https://mongodb.github.io/helm-charts 2 | helm repo update mongodb 3 | helm search repo "${OFFICIAL_OPERATOR_HELM_CHART}" 4 | 5 | -------------------------------------------------------------------------------- /vault_policies/database-policy.hcl: -------------------------------------------------------------------------------- 1 | path "secret/data/mongodbenterprise/database/*" { 2 | capabilities = ["read", "list"] 3 | } 4 | path "secret/metadata/mongodbenterprise/database/*" { 5 | capabilities = ["list"] 6 | } 7 | -------------------------------------------------------------------------------- /vault_policies/opsmanager-policy.hcl: -------------------------------------------------------------------------------- 1 | path "secret/data/mongodbenterprise/opsmanager/*" { 2 | capabilities = ["read", "list"] 3 | } 4 | path "secret/metadata/mongodbenterprise/opsmanager/*" { 5 | capabilities = ["list"] 6 | } 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0205_helm_configure_repo.sh: -------------------------------------------------------------------------------- 1 | helm repo add mongodb https://mongodb.github.io/helm-charts 2 | helm repo update mongodb 3 | helm search repo "${OFFICIAL_OPERATOR_HELM_CHART}" 4 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/standalone/standalone-scram-password.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: my-scram-secret 6 | type: Opaque 7 | stringData: 8 | password: my-standalone-password 9 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/replica-set/replica-set-scram-password.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: my-scram-secret 6 | type: Opaque 7 | stringData: 8 | password: my-replica-set-password 9 | -------------------------------------------------------------------------------- /tools/multicluster/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/10gen/ops-manager-kubernetes/multi/cmd" 7 | ) 8 | 9 | func main() { 10 | ctx := context.Background() 11 | cmd.Execute(ctx) 12 | } 13 | -------------------------------------------------------------------------------- /vault_policies/operator-policy.hcl: -------------------------------------------------------------------------------- 1 | path "secret/data/mongodbenterprise/*" { 2 | capabilities = ["create", "read", "update", "delete", "list"] 3 | } 4 | path "secret/metadata/mongodbenterprise/*" { 5 | capabilities = ["list", "read"] 6 | } 7 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/sharded-cluster/sharded-cluster-scram-password.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: my-scram-secret 6 | type: Opaque 7 | stringData: 8 | password: my-sharded-cluster-password 9 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/9100_delete_backup_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "minio-operator" & 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "tenant-tiny" & 3 | wait 4 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/9100_delete_backup_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "minio-operator" & 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "tenant-tiny" & 3 | wait 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/output/0221_verify_issuer.out: -------------------------------------------------------------------------------- 1 | certificate.cert-manager.io/test-selfsigned-cert created 2 | certificate.cert-manager.io/test-selfsigned-cert condition met 3 | certificate.cert-manager.io "test-selfsigned-cert" deleted 4 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver1-0 in gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 to echoserver0-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 to echoserver1-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver2-0 in gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 to echoserver1-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 to echoserver2-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/9000_delete_sa.sh: -------------------------------------------------------------------------------- 1 | gcloud projects remove-iam-policy-binding "${MDB_GKE_PROJECT}" --member serviceAccount:"${DNS_SA_EMAIL}" --role roles/dns.admin 2 | 3 | gcloud iam service-accounts delete "${DNS_SA_EMAIL}" -q 4 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/sc-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdb/${SC_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/sc-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdb/${SC_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /opa_examples/mongodb_strict_tls/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: MongoDBStrictTLS 3 | metadata: 4 | name: mongodb-strict-tls-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] 10 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/rs-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdbmc/${RS_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/rs-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdbmc/${RS_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-istio/code_snippets/0040_install_istio.sh: -------------------------------------------------------------------------------- 1 | CTX_CLUSTER1=${K8S_CLUSTER_0_CONTEXT_NAME} \ 2 | CTX_CLUSTER2=${K8S_CLUSTER_1_CONTEXT_NAME} \ 3 | CTX_CLUSTER3=${K8S_CLUSTER_2_CONTEXT_NAME} \ 4 | ISTIO_VERSION="1.20.2" \ 5 | ./install_istio_separate_network.sh 6 | -------------------------------------------------------------------------------- /opa_examples/mongodb_allow_replicaset/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: MongoDBAllowReplicaset 3 | metadata: 4 | name: mongodb-allow-replicaset-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0040_install_istio.sh: -------------------------------------------------------------------------------- 1 | CTX_CLUSTER1=${K8S_CLUSTER_0_CONTEXT_NAME} \ 2 | CTX_CLUSTER2=${K8S_CLUSTER_1_CONTEXT_NAME} \ 3 | CTX_CLUSTER3=${K8S_CLUSTER_2_CONTEXT_NAME} \ 4 | ISTIO_VERSION="1.20.2" \ 5 | ../multi-cluster/install_istio_separate_network.sh 6 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/output/0321_ops_manager_wait_for_pending_state.out: -------------------------------------------------------------------------------- 1 | Waiting for Application Database to reach Pending phase... 2 | mongodbopsmanager.mongodb.com/om condition met 3 | Waiting for Ops Manager to reach Pending phase... 4 | mongodbopsmanager.mongodb.com/om condition met 5 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/output/0321_ops_manager_wait_for_pending_state.out: -------------------------------------------------------------------------------- 1 | Waiting for Application Database to reach Pending phase... 2 | mongodbopsmanager.mongodb.com/om condition met 3 | Waiting for Ops Manager to reach Pending phase... 4 | mongodbopsmanager.mongodb.com/om condition met 5 | -------------------------------------------------------------------------------- /opa_examples/mongodb_allowed_versions/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: MongoDBAllowedVersions 3 | metadata: 4 | name: mongodb-allowed-versions-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] 10 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/code_snippets/0216_helm_install_cert_manager.sh: -------------------------------------------------------------------------------- 1 | helm upgrade --install \ 2 | cert-manager jetstack/cert-manager \ 3 | --kube-context "${K8S_CLUSTER_0_CONTEXT_NAME}" \ 4 | --namespace cert-manager \ 5 | --create-namespace \ 6 | --set crds.enabled=true 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver1-0 in gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1-67d0389d75b70a0007e5894a to echoserver0-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0-67d0389d75b70a0007e5894a to echoserver1-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver2-0 in gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2-67d0389d75b70a0007e5894a to echoserver1-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0-67d0389d75b70a0007e5894a to echoserver2-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_wizardless/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: OpsManagerWizardless 3 | metadata: 4 | name: ops-manager-wizardless-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDBOpsManager"] 10 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0140_create_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create ns external-dns 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create ns external-dns 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create ns external-dns 4 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0160_add_dns_record.sh: -------------------------------------------------------------------------------- 1 | ip_address=$(gcloud compute forwarding-rules describe om-forwarding-rule --global --format="get(IPAddress)") 2 | 3 | gcloud dns record-sets create "${OPS_MANAGER_EXTERNAL_DOMAIN}" --zone="${DNS_ZONE}" --type="A" --ttl="300" --rrdatas="${ip_address}" 4 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_replica_members/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: OpsManagerReplicaMembers 3 | metadata: 4 | name: ops-manager-replicamembers-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDBOpsManager"] 10 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0311_ops_manager_wait_for_pending_state.sh: -------------------------------------------------------------------------------- 1 | echo "Waiting for Application Database to reach Pending phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" wait --for=jsonpath='{.status.applicationDatabase.phase}'=Pending opsmanager/om --timeout=30s 3 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0321_ops_manager_wait_for_pending_state.sh: -------------------------------------------------------------------------------- 1 | echo "Waiting for Application Database to reach Pending phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" wait --for=jsonpath='{.status.applicationDatabase.phase}'=Pending opsmanager/om --timeout=30s 3 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_allowed_versions/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: OpsManagerAllowedVersions 3 | metadata: 4 | name: ops-manager-allowed-versions-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDBOpsManager"] 10 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0311_ops_manager_wait_for_pending_state.sh: -------------------------------------------------------------------------------- 1 | echo "Waiting for Application Database to reach Pending phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" wait --for=jsonpath='{.status.applicationDatabase.phase}'=Pending opsmanager/om --timeout=30s 3 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_0.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_0}" \ 2 | --zone="${K8S_CLUSTER_0_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_0_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_0_MACHINE_TYPE}" \ 5 | ${GKE_SPOT_INSTANCES_SWITCH:-""} 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_1.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_1}" \ 2 | --zone="${K8S_CLUSTER_1_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_1_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_1_MACHINE_TYPE}" \ 5 | ${GKE_SPOT_INSTANCES_SWITCH:-""} 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_2.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_2}" \ 2 | --zone="${K8S_CLUSTER_2_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_2_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_2_MACHINE_TYPE}" \ 5 | ${GKE_SPOT_INSTANCES_SWITCH:-""} 6 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/9050_delete_namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "external-dns" & 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" delete ns "external-dns" & 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" delete ns "external-dns" & 4 | wait 5 | -------------------------------------------------------------------------------- /tools/multicluster/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.24 as builder 2 | WORKDIR /go/src 3 | ADD . . 4 | 5 | RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /go/bin/mongodb-multicluster 6 | 7 | FROM scratch 8 | COPY --from=builder /go/bin/mongodb-multicluster /go/bin/mongodb-multicluster 9 | 10 | ENTRYPOINT [ "/go/bin/mongodb-multicluster" ] 11 | -------------------------------------------------------------------------------- /opa_examples/debugging/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sDenyAll 3 | metadata: 4 | name: deny-all-namespaces 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] 10 | - apiGroups: ["mongodb.com"] 11 | kinds: ["MongoDBOpsManager"] 12 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0020_get_gke_credentials.sh: -------------------------------------------------------------------------------- 1 | 2 | gcloud container clusters get-credentials "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" 3 | gcloud container clusters get-credentials "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" 4 | gcloud container clusters get-credentials "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" 5 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0020_get_gke_credentials.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters get-credentials "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" 2 | gcloud container clusters get-credentials "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" 3 | gcloud container clusters get-credentials "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" 4 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/9010_delete_gke_clusters.sh: -------------------------------------------------------------------------------- 1 | yes | gcloud container clusters delete "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" & 2 | yes | gcloud container clusters delete "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" & 3 | yes | gcloud container clusters delete "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" & 4 | wait 5 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0010_create_gke_cluster_0.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_0}" \ 2 | --zone="${K8S_CLUSTER_0_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_0_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_0_MACHINE_TYPE}" \ 5 | --tags=mongodb \ 6 | "${GKE_SPOT_INSTANCES_SWITCH:-""}" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0010_create_gke_cluster_1.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_1}" \ 2 | --zone="${K8S_CLUSTER_1_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_1_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_1_MACHINE_TYPE}" \ 5 | --tags=mongodb \ 6 | "${GKE_SPOT_INSTANCES_SWITCH:-""}" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0010_create_gke_cluster_2.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_2}" \ 2 | --zone="${K8S_CLUSTER_2_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_2_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_2_MACHINE_TYPE}" \ 5 | --tags=mongodb \ 6 | "${GKE_SPOT_INSTANCES_SWITCH:-""}" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/9010_delete_gke_clusters.sh: -------------------------------------------------------------------------------- 1 | yes | gcloud container clusters delete "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" & 2 | yes | gcloud container clusters delete "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" & 3 | yes | gcloud container clusters delete "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" & 4 | wait 5 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0300_ops_manager_create_admin_credentials.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" --namespace "${NAMESPACE}" create secret generic om-admin-user-credentials \ 2 | --from-literal=Username="admin" \ 3 | --from-literal=Password="Passw0rd@" \ 4 | --from-literal=FirstName="Jane" \ 5 | --from-literal=LastName="Doe" 6 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /samples/mongodb/backup/replica-set-backup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-replica-set-backup 6 | spec: 7 | members: 3 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | 11 | opsManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | backup: 16 | mode: enabled 17 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0300_ops_manager_create_admin_credentials.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" --namespace "${OM_NAMESPACE}" create secret generic om-admin-user-credentials \ 2 | --from-literal=Username="admin" \ 3 | --from-literal=Password="Passw0rd@" \ 4 | --from-literal=FirstName="Jane" \ 5 | --from-literal=LastName="Doe" 6 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0300_ops_manager_create_admin_credentials.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" --namespace "${OM_NAMESPACE}" create secret generic om-admin-user-credentials \ 2 | --from-literal=Username="admin" \ 3 | --from-literal=Password="Passw0rd@" \ 4 | --from-literal=FirstName="Jane" \ 5 | --from-literal=LastName="Doe" 6 | -------------------------------------------------------------------------------- /samples/mongodb/project.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: my-project 6 | data: 7 | projectName: My Ops/Cloud Manager Project 8 | baseUrl: http://my-ops-cloud-manager-url 9 | 10 | # Optional parameters 11 | 12 | # If orgId is omitted a new organization will be created, with the same name as the Project. 13 | orgId: my-org-id 14 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9010_delete_gke_clusters.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_namespaces.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/test_cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # This script only cleans up local directory to prepare to a fresh run. It's not cleaning up any deployed resources/clusters. 4 | 5 | set -eou pipefail 6 | 7 | source env_variables.sh 8 | source ../../scripts/sample_test_runner.sh 9 | 10 | run_cleanup "test.sh" 11 | rm -rf istio* 12 | rm -rf certs 13 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0200_install_externaldns.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n external-dns apply -f yamls/externaldns.yaml 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n external-dns apply -f yamls/externaldns.yaml 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n external-dns apply -f yamls/externaldns.yaml 4 | -------------------------------------------------------------------------------- /samples/mongodb/backup/replica-set-backup-disabled.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-replica-set-backup-disabled 6 | spec: 7 | members: 3 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | 11 | opsManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | backup: 16 | mode: disabled 17 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/x509/replica-set/user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-replica-set-x509-user 6 | spec: 7 | username: CN=my-replica-set-x509-user,OU=cloud,O=MongoDB,L=New York,ST=New York,C=US 8 | db: $external 9 | mongodbResourceRef: 10 | name: my-replica-set 11 | roles: 12 | - db: admin 13 | name: dbOwner 14 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-istio/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0040_install_istio.sh 15 | run 0050_label_namespaces.sh 16 | 17 | popd 18 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/x509/sharded-cluster/user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-sharded-cluster-x509-user 6 | spec: 7 | username: CN=my-sharded-cluster-x509-user,OU=cloud,O=MongoDB,L=New York,ST=New York,C=US 8 | db: $external 9 | mongodbResourceRef: 10 | name: my-replica-set 11 | roles: 12 | - db: admin 13 | name: dbOwner 14 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0080_check_cluster_connectivity_create_round_robin_service_0.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - < certs/tls.crt 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" get secret cert-prefix-om-cert -o jsonpath="{.data['tls\.key']}" | base64 --decode > certs/tls.key 5 | 6 | gcloud compute ssl-certificates create om-certificate --certificate=certs/tls.crt --private-key=certs/tls.key 7 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/output/0150_om_load_balancer.out: -------------------------------------------------------------------------------- 1 | NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED 2 | fw-ops-manager-hc default INGRESS 1000 tcp:8443 False 3 | NAME PROTOCOL 4 | om-healthcheck HTTPS 5 | NAME BACKENDS PROTOCOL 6 | om-backend-service HTTPS 7 | NAME DEFAULT_SERVICE 8 | om-url-map backendServices/om-backend-service 9 | NAME SSL_CERTIFICATES URL_MAP REGION CERTIFICATE_MAP 10 | om-lb-proxy om-certificate om-url-map 11 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run_for_output 0215_helm_configure_repo.sh 15 | run_for_output 0216_helm_install_cert_manager.sh 16 | run 0220_create_issuer.sh 17 | run_for_output 0221_verify_issuer.sh 18 | run 0225_create_ca_configmap.sh 19 | 20 | popd 21 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/output/1210_verify_mongosh_connection.out: -------------------------------------------------------------------------------- 1 | { 2 | authInfo: { 3 | authenticatedUsers: [ { user: 'rs-user', db: 'admin' } ], 4 | authenticatedUserRoles: [ { role: 'root', db: 'admin' } ] 5 | }, 6 | ok: 1, 7 | '$clusterTime': { 8 | clusterTime: Timestamp({ t: 1743589744, i: 1 }), 9 | signature: { 10 | hash: Binary.createFromBase64('fiBrPX9aaxTmMmLb1K2q6d4/XfQ=', 0), 11 | keyId: Long('7488660369775263749') 12 | } 13 | }, 14 | operationTime: Timestamp({ t: 1743589744, i: 1 }) 15 | } 16 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/output/2210_verify_mongosh_connection.out: -------------------------------------------------------------------------------- 1 | { 2 | authInfo: { 3 | authenticatedUsers: [ { user: 'sc-user', db: 'admin' } ], 4 | authenticatedUserRoles: [ { role: 'root', db: 'admin' } ] 5 | }, 6 | ok: 1, 7 | '$clusterTime': { 8 | clusterTime: Timestamp({ t: 1743590424, i: 1 }), 9 | signature: { 10 | hash: Binary.createFromBase64('1+SD+TJDayNhxsFsJzaGb2mtd+c=', 0), 11 | keyId: Long('7488663363367469079') 12 | } 13 | }, 14 | operationTime: Timestamp({ t: 1743590424, i: 1 }) 15 | } 16 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/output/2210_verify_mongosh_connection.out: -------------------------------------------------------------------------------- 1 | { 2 | authInfo: { 3 | authenticatedUsers: [ { user: 'sc-user', db: 'admin' } ], 4 | authenticatedUserRoles: [ { role: 'root', db: 'admin' } ] 5 | }, 6 | ok: 1, 7 | '$clusterTime': { 8 | clusterTime: Timestamp({ t: 1741702735, i: 1 }), 9 | signature: { 10 | hash: Binary.createFromBase64('kVqqNDHTI1zxYrPsU0QaYqyksJA=', 0), 11 | keyId: Long('7480555706358169606') 12 | } 13 | }, 14 | operationTime: Timestamp({ t: 1741702735, i: 1 }) 15 | } 16 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/code_snippets/1050_generate_certs.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - < certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username rs-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0005_gcloud_set_current_project.sh 15 | run 0010_create_gke_cluster_0.sh & 16 | run 0010_create_gke_cluster_1.sh & 17 | run 0010_create_gke_cluster_2.sh & 18 | wait 19 | 20 | run 0020_get_gke_credentials.sh 21 | run_for_output 0030_verify_access_to_clusters.sh 22 | 23 | popd 24 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/code_snippets/1210_verify_mongosh_connection.sh: -------------------------------------------------------------------------------- 1 | external_ip="$(kubectl get --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" svc "${RS_RESOURCE_NAME}-0-0-svc-external" -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")" 2 | 3 | mkdir -p certs 4 | kubectl get --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" cm/ca-issuer -o=jsonpath='{.data.ca-pem}' > certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username rs-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/code_snippets/2210_verify_mongosh_connection.sh: -------------------------------------------------------------------------------- 1 | external_ip="$(kubectl get --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" svc "${SC_RESOURCE_NAME}-mongos-0-0-svc-external" -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")" 2 | 3 | mkdir -p certs 4 | kubectl get --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" cm/ca-issuer -o=jsonpath='{.data.ca-pem}' > certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username sc-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/code_snippets/2210_verify_mongosh_connection.sh: -------------------------------------------------------------------------------- 1 | external_ip="$(kubectl get --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" svc "${SC_RESOURCE_NAME}-mongos-0-0-svc-external" -o=jsonpath="{.status.loadBalancer.ingress[0].ip}")" 2 | 3 | mkdir -p certs 4 | kubectl get --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" cm/ca-issuer -o=jsonpath='{.data.ca-pem}' > certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username sc-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0300_setup_dns_zone.sh: -------------------------------------------------------------------------------- 1 | FQ_CLUSTER_0="projects/${MDB_GKE_PROJECT}/locations/${K8S_CLUSTER_0_ZONE}/clusters/${K8S_CLUSTER_0}" 2 | FQ_CLUSTER_1="projects/${MDB_GKE_PROJECT}/locations/${K8S_CLUSTER_1_ZONE}/clusters/${K8S_CLUSTER_1}" 3 | FQ_CLUSTER_2="projects/${MDB_GKE_PROJECT}/locations/${K8S_CLUSTER_2_ZONE}/clusters/${K8S_CLUSTER_2}" 4 | 5 | gcloud dns managed-zones create "${DNS_ZONE}" \ 6 | --description="" \ 7 | --dns-name="${CUSTOM_DOMAIN}" \ 8 | --visibility="private" \ 9 | --gkeclusters="${FQ_CLUSTER_0}","${FQ_CLUSTER_1}","${FQ_CLUSTER_2}" 10 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0060_check_cluster_connectivity_wait_for_sts.sh: -------------------------------------------------------------------------------- 1 | kubectl wait --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "connectivity-test" --for=condition=ready pod -l statefulset.kubernetes.io/pod-name=echoserver0-0 --timeout=60s 2 | kubectl wait --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "connectivity-test" --for=condition=ready pod -l statefulset.kubernetes.io/pod-name=echoserver1-0 --timeout=60s 3 | kubectl wait --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "connectivity-test" --for=condition=ready pod -l statefulset.kubernetes.io/pod-name=echoserver2-0 --timeout=60s 4 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0500_ops_manager_prepare_s3_backup_secrets.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic s3-access-secret \ 2 | --from-literal=accessKey="${S3_ACCESS_KEY}" \ 3 | --from-literal=secretKey="${S3_SECRET_KEY}" 4 | 5 | # minio TLS secrets are signed with the default k8s root CA 6 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic s3-ca-cert \ 7 | --from-literal=ca.crt="$(kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n kube-system get configmap kube-root-ca.crt -o jsonpath="{.data.ca\.crt}")" 8 | -------------------------------------------------------------------------------- /samples/ops-manager/ops-manager-disable-appdb-process.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBOpsManager 4 | metadata: 5 | name: ops-manager 6 | spec: 7 | replicas: 3 8 | version: 5.0.5 9 | adminCredentials: ops-manager-admin-secret 10 | configuration: 11 | mms.fromEmailAddr: "admin@example.com" 12 | 13 | applicationDatabase: 14 | members: 3 15 | version: 4.4.11-ent 16 | automationConfig: 17 | processes: 18 | # this will disable the second AppDB process to allow for manual backups to be taken. 19 | - name: ops-manager-db-1 20 | disabled: true 21 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0500_ops_manager_prepare_s3_backup_secrets.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" create secret generic s3-access-secret \ 2 | --from-literal=accessKey="${S3_ACCESS_KEY}" \ 3 | --from-literal=secretKey="${S3_SECRET_KEY}" 4 | 5 | # minio TLS secrets are signed with the default k8s root CA 6 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" create secret generic s3-ca-cert \ 7 | --from-literal=ca.crt="$(kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n kube-system get configmap kube-root-ca.crt -o jsonpath="{.data.ca\.crt}")" 8 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0500_ops_manager_prepare_s3_backup_secrets.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" create secret generic s3-access-secret \ 2 | --from-literal=accessKey="${S3_ACCESS_KEY}" \ 3 | --from-literal=secretKey="${S3_SECRET_KEY}" 4 | 5 | # minio TLS secrets are signed with the default k8s root CA 6 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" create secret generic s3-ca-cert \ 7 | --from-literal=ca.crt="$(kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n kube-system get configmap kube-root-ca.crt -o jsonpath="{.data.ca\.crt}")" 8 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0045_create_namespaces.sh 15 | run 0046_create_image_pull_secrets.sh 16 | 17 | run_for_output 0200_kubectl_mongodb_configure_multi_cluster.sh 18 | run_for_output 0205_helm_configure_repo.sh 19 | run_for_output 0210_helm_install_operator.sh 20 | run_for_output 0211_check_operator_deployment.sh 21 | 22 | popd 23 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0150_create_sa_secrets.sh: -------------------------------------------------------------------------------- 1 | # create secret with service account key 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n external-dns create secret generic external-dns-sa-secret --from-file credentials.json=secrets/external-dns-sa-key.json 3 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n external-dns create secret generic external-dns-sa-secret --from-file credentials.json=secrets/external-dns-sa-key.json 4 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n external-dns create secret generic external-dns-sa-secret --from-file credentials.json=secrets/external-dns-sa-key.json 5 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0100_create_gke_sa.sh 15 | # need to wait as the SA is not immediately available 16 | sleep 10 17 | run 0120_add_role_to_sa.sh 18 | run 0130_create_sa_key.sh 19 | run 0140_create_namespaces.sh 20 | run 0150_create_sa_secrets.sh 21 | run 0200_install_externaldns.sh 22 | run 0300_setup_dns_zone.sh 23 | 24 | popd 25 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/code_snippets/1050_generate_certs.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - <&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_2_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver2-0" 4 | target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver2-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/standalone/standalone-scram-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-scram-user 6 | spec: 7 | passwordSecretKeyRef: 8 | name: my-scram-secret # the name of the secret that stores this user's password 9 | key: password # the key in the secret that stores the password 10 | username: my-scram-user 11 | db: admin 12 | mongodbResourceRef: 13 | name: my-scram-enabled-standalone # The name of the MongoDB resource this user will be added to 14 | roles: 15 | - db: admin 16 | name: readWrite 17 | - db: admin 18 | name: userAdminAnyDatabase 19 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/sharded-cluster/sharded-cluster-scram-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-scram-user 6 | spec: 7 | passwordSecretKeyRef: 8 | name: my-scram-secret # the name of the secret that stores this user's password 9 | key: password # the key in the secret that stores the password 10 | username: my-scram-user 11 | db: admin 12 | mongodbResourceRef: 13 | name: my-scram-enabled-sharded-cluster # The name of the MongoDB resource this user will be added to 14 | roles: 15 | - db: admin 16 | name: clusterAdmin 17 | - db: admin 18 | name: userAdminAnyDatabase 19 | 20 | -------------------------------------------------------------------------------- /samples/mongodb/pod-template/initcontainer-sysctl_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: my-replica-set 5 | namespace: mongodb 6 | spec: 7 | members: 3 8 | version: 4.2.2 9 | type: ReplicaSet 10 | 11 | cloudManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | 16 | persistent: false 17 | podSpec: 18 | podTemplate: 19 | spec: 20 | initContainers: 21 | - name: "apply-sysctl-test" 22 | image: "busybox:latest" 23 | securityContext: 24 | privileged: true 25 | command: ["sysctl", "-w", "net.ipv4.tcp_keepalive_time=120"] 26 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0255_create_cert_secrets.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret tls cert-prefix-om-cert \ 2 | --cert=certs/om.crt \ 3 | --key=certs/om.key 4 | 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret tls cert-prefix-om-db-cert \ 6 | --cert=certs/appdb.crt \ 7 | --key=certs/appdb.key 8 | 9 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create configmap om-cert-ca --from-file="mms-ca.crt=certs/ca.crt" 10 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create configmap appdb-cert-ca --from-file="ca-pem=certs/ca.crt" 11 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0045_create_ops_manager_namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${NAMESPACE}" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite 3 | 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${NAMESPACE}" 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite 6 | 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${NAMESPACE}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite 9 | -------------------------------------------------------------------------------- /samples/multi-cluster-cli-gitops/argocd/project.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: AppProject 3 | metadata: 4 | name: my-project 5 | namespace: argocd 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | spec: 9 | description: Example Project 10 | sourceRepos: 11 | - '*' 12 | destinations: 13 | - namespace: mongodb 14 | server: https://central.mongokubernetes.com 15 | clusterResourceWhitelist: 16 | # Allow MongoDBMulti resources to be synced 17 | - group: '' 18 | kind: MongoDBMultiCluster 19 | # Allow Jobs to be created (used for sync hooks in this example) 20 | - group: '' 21 | kind: Job 22 | - group: '' 23 | kind: Namespace 24 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_0.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - <&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.connectivity-test.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "connectivity-test" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_2_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver2-0" 4 | target_url="http://${target_pod}.connectivity-test.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "connectivity-test" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver2-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.connectivity-test.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "connectivity-test" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.3/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.3" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | USER 2000 19 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/mmsconfiguration", "/opt/scripts/" ] 20 | 21 | 22 | -------------------------------------------------------------------------------- /samples/multi-cluster-cli-gitops/resources/replica-set.yaml: -------------------------------------------------------------------------------- 1 | # sample mongodb-multi replicaset yaml 2 | --- 3 | apiVersion: mongodb.com/v1 4 | kind: MongoDBMultiCluster 5 | metadata: 6 | name: multi-replica-set 7 | spec: 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | persistent: false 11 | duplicateServiceObjects: false 12 | credentials: my-credentials 13 | opsManager: 14 | configMapRef: 15 | name: my-project 16 | clusterSpecList: 17 | # cluster names where you want to deploy the replicaset 18 | - clusterName: cluster1.mongokubernetes.com 19 | members: 2 20 | - clusterName: cluster2.mongokubernetes.com 21 | members: 1 22 | - clusterName: cluster4.mongokubernetes.com 23 | members: 2 24 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0045_create_operator_namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite 3 | 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite 6 | 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite 9 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0045_create_connectivity_test_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "connectivity-test" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "connectivity-test" istio-injection=enabled --overwrite 3 | 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "connectivity-test" 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "connectivity-test" istio-injection=enabled --overwrite 6 | 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "connectivity-test" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "connectivity-test" istio-injection=enabled --overwrite 9 | -------------------------------------------------------------------------------- /samples/mongodb/agent-startup-options/replica-set-agent-startup-options.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: my-replica-set-agent-parameters 5 | spec: 6 | members: 3 7 | version: 4.4.0-ent 8 | type: ReplicaSet 9 | opsManager: 10 | configMapRef: 11 | name: my-project 12 | credentials: my-credentials 13 | persistent: true 14 | # optional. Allows to pass custom flags that will be used 15 | # when launching the mongodb agent. All values must be strings 16 | # The full list of available settings is at: 17 | # https://docs.opsmanager.mongodb.com/current/reference/mongodb-agent-settings/ 18 | agent: 19 | startupOptions: 20 | maxLogFiles: "30" 21 | dialTimeoutSeconds: "40" 22 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0211_check_operator_deployment.out: -------------------------------------------------------------------------------- 1 | Waiting for deployment "mongodb-enterprise-operator-multi-cluster" rollout to finish: 0 of 1 updated replicas are available... 2 | deployment "mongodb-enterprise-operator-multi-cluster" successfully rolled out 3 | Operator deployment in mongodb-operator namespace 4 | NAME READY UP-TO-DATE AVAILABLE AGE 5 | mongodb-enterprise-operator-multi-cluster 1/1 1 1 10s 6 | 7 | Operator pod in mongodb-operator namespace 8 | NAME READY STATUS RESTARTS AGE 9 | mongodb-enterprise-operator-multi-cluster-54d786b796-7l5ct 2/2 Running 1 (4s ago) 10s 10 | -------------------------------------------------------------------------------- /tools/multicluster/pkg/debug/anonymize.go: -------------------------------------------------------------------------------- 1 | package debug 2 | 3 | import v1 "k8s.io/api/core/v1" 4 | 5 | const ( 6 | MASKED_TEXT = "***MASKED***" 7 | ) 8 | 9 | type Anonymizer interface { 10 | AnonymizeSecret(secret *v1.Secret) *v1.Secret 11 | } 12 | 13 | var _ Anonymizer = &NoOpAnonymizer{} 14 | 15 | type NoOpAnonymizer struct{} 16 | 17 | func (n *NoOpAnonymizer) AnonymizeSecret(secret *v1.Secret) *v1.Secret { 18 | return secret 19 | } 20 | 21 | var _ Anonymizer = &SensitiveDataAnonymizer{} 22 | 23 | type SensitiveDataAnonymizer struct{} 24 | 25 | func (n *SensitiveDataAnonymizer) AnonymizeSecret(secret *v1.Secret) *v1.Secret { 26 | for key := range secret.Data { 27 | secret.Data[key] = []byte(MASKED_TEXT) 28 | } 29 | return secret 30 | } 31 | -------------------------------------------------------------------------------- /samples/mongodb/agent-startup-options/standalone-agent-startup-options.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-standalone 6 | spec: 7 | version: 4.4.0-ent 8 | service: my-service 9 | 10 | opsManager: 11 | configMapRef: 12 | name: my-project 13 | credentials: my-credentials 14 | type: Standalone 15 | 16 | persistent: true 17 | # optional. Allows to pass custom flags that will be used 18 | # when launching the mongodb agent. All values must be strings 19 | # The full list of available settings is at: 20 | # https://docs.opsmanager.mongodb.com/current/reference/mongodb-agent-settings/ 21 | agent: 22 | startupOptions: 23 | maxLogFiles: "30" 24 | dialTimeoutSeconds: "40" 25 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/output/0211_check_operator_deployment.out: -------------------------------------------------------------------------------- 1 | Waiting for deployment "mongodb-enterprise-operator-multi-cluster" rollout to finish: 0 of 1 updated replicas are available... 2 | deployment "mongodb-enterprise-operator-multi-cluster" successfully rolled out 3 | Operator deployment in mongodb-operator namespace 4 | NAME READY UP-TO-DATE AVAILABLE AGE 5 | mongodb-enterprise-operator-multi-cluster 1/1 1 1 9s 6 | 7 | Operator pod in mongodb-operator namespace 8 | NAME READY STATUS RESTARTS AGE 9 | mongodb-enterprise-operator-multi-cluster-786c8fcd9b-9k465 2/2 Running 1 (3s ago) 10s 10 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.3/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.3" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | USER 2000 19 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/mmsconfiguration", "/opt/scripts/" ] 20 | 21 | 22 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 5 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 6 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 7 | # ${MDB_NAMESPACE} 8 | # ${CUSTOM_DOMAIN} 9 | 10 | export RS_RESOURCE_NAME=mdb 11 | export MONGODB_VERSION="8.0.5-ent" 12 | 13 | export MDB_CLUSTER_0_EXTERNAL_DOMAIN="${K8S_CLUSTER_0}.${CUSTOM_DOMAIN}" 14 | export MDB_CLUSTER_1_EXTERNAL_DOMAIN="${K8S_CLUSTER_1}.${CUSTOM_DOMAIN}" 15 | export MDB_CLUSTER_2_EXTERNAL_DOMAIN="${K8S_CLUSTER_2}.${CUSTOM_DOMAIN}" 16 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 5 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 6 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 7 | # ${MDB_NAMESPACE} 8 | # ${CUSTOM_DOMAIN} 9 | 10 | export SC_RESOURCE_NAME=mdb-sh 11 | export MONGODB_VERSION="8.0.5-ent" 12 | 13 | export MDB_CLUSTER_0_EXTERNAL_DOMAIN="${K8S_CLUSTER_0}.${CUSTOM_DOMAIN}" 14 | export MDB_CLUSTER_1_EXTERNAL_DOMAIN="${K8S_CLUSTER_1}.${CUSTOM_DOMAIN}" 15 | export MDB_CLUSTER_2_EXTERNAL_DOMAIN="${K8S_CLUSTER_2}.${CUSTOM_DOMAIN}" 16 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/code_snippets/2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh: -------------------------------------------------------------------------------- 1 | echo; echo "Waiting for MongoDB to reach Running phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Running "mdb/${SC_RESOURCE_NAME}" --timeout=900s 3 | echo; echo "Pods running in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 5 | echo; echo "Pods running in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 7 | echo; echo "Pods running in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 9 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/code_snippets/2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh: -------------------------------------------------------------------------------- 1 | echo; echo "Waiting for MongoDB to reach Running phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Running "mdb/${SC_RESOURCE_NAME}" --timeout=900s 3 | echo; echo "Pods running in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 5 | echo; echo "Pods running in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 7 | echo; echo "Pods running in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 9 | -------------------------------------------------------------------------------- /samples/mongodb_multi/replica-set.yaml: -------------------------------------------------------------------------------- 1 | # sample mongodb-multi replicaset yaml 2 | --- 3 | apiVersion: mongodb.com/v1 4 | kind: MongoDBMulti 5 | metadata: 6 | name: multi-replica-set 7 | spec: 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | persistent: false 11 | duplicateServiceObjects: false 12 | credentials: my-credentials 13 | opsManager: 14 | configMapRef: 15 | name: my-project 16 | clusterSpecList: 17 | # provide spec per cluster 18 | clusterSpecs: 19 | # cluster names where you want to deploy the replicaset 20 | - clusterName: cluster1.mongokubernetes.com 21 | members: 2 22 | - clusterName: cluster2.mongokubernetes.com 23 | members: 1 24 | - clusterName: cluster3.mongokubernetes.com 25 | members: 2 26 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/code_snippets/1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh: -------------------------------------------------------------------------------- 1 | echo; echo "Waiting for MongoDB to reach Running phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Running "mdbmc/${RS_RESOURCE_NAME}" --timeout=900s 3 | echo; echo "Pods running in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 5 | echo; echo "Pods running in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 7 | echo; echo "Pods running in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 9 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/code_snippets/1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh: -------------------------------------------------------------------------------- 1 | echo; echo "Waiting for MongoDB to reach Running phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Running "mdbmc/${RS_RESOURCE_NAME}" --timeout=900s 3 | echo; echo "Pods running in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 5 | echo; echo "Pods running in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 7 | echo; echo "Pods running in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 9 | -------------------------------------------------------------------------------- /samples/single-sharded-overrides.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: sh-single-overrides 5 | spec: 6 | shardCount: 2 7 | mongodsPerShardCount: 1 8 | mongosCount: 1 9 | configServerCount: 1 10 | version: "7.0.15-ent" 11 | type: ShardedCluster 12 | configSrvPodSpec: 13 | persistence: 14 | single: 15 | storage: 0.5G 16 | shardPodSpec: 17 | persistence: 18 | single: 19 | storage: 1G 20 | shardOverrides: 21 | - shardNames: [sh-single-overrides-0] 22 | members: 3 23 | - shardNames: [sh-single-overrides-1] 24 | podSpec: 25 | persistence: 26 | single: 27 | storage: 2Gi 28 | opsManager: 29 | configMapRef: 30 | name: my-project 31 | credentials: my-credentials -------------------------------------------------------------------------------- /samples/mongodb/tls/standalone/standalone-tls.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-tls-standalone 6 | spec: 7 | version: 4.0.14-ent 8 | 9 | opsManager: 10 | configMapRef: 11 | name: my-project 12 | credentials: my-credentials 13 | type: Standalone 14 | 15 | persistent: true 16 | 17 | # This will create a TLS enabled Standalone which means that the 18 | # traffic will be encrypted using TLS certificates. These 19 | # certificates will be generated on the fly by the operatror using 20 | # the Kubernetes CA. 21 | # Please refer to Kubernetes TLS Documentation on how to approve these certs: 22 | # 23 | # https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ 24 | # 25 | security: 26 | tls: 27 | enabled: true 28 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/output/0200_kubectl_mongodb_configure_multi_cluster.out: -------------------------------------------------------------------------------- 1 | Ensured namespaces exist in all clusters. 2 | creating central cluster roles in cluster: gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 3 | creating member roles in cluster: gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1 4 | creating member roles in cluster: gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2 5 | Ensured ServiceAccounts and Roles. 6 | Creating KubeConfig secret mongodb-operator/mongodb-enterprise-operator-multi-cluster-kubeconfig in cluster gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 7 | Ensured database Roles in member clusters. 8 | Creating Member list Configmap mongodb-operator/mongodb-enterprise-operator-member-list in cluster gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0 9 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/code_snippets/2200_create_mongodb_user.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - <- 7 | Allows only replica set deployment of MongoDB 8 | 9 | The type setting for MongoDB should be replicaset 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: MongoDBAllowReplicaset 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package mongodballowreplicaset 19 | 20 | violation[{"msg": msg}] { 21 | deployment_type = object.get(input.review.object.spec, "type", "none") 22 | not deployment_type == "replicaset" 23 | msg := sprintf("Only replicaset deployment of MongoDB allowed, requested %v", [deployment_type]) 24 | } 25 | 26 | -------------------------------------------------------------------------------- /samples/ops-manager/ops-manager-appdb-custom-images.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDBOpsManager 3 | metadata: 4 | name: ops-manager 5 | spec: 6 | version: 5.0.5 7 | replicas: 3 8 | adminCredentials: ops-manager-admin-secret 9 | backup: 10 | enabled: false 11 | applicationDatabase: 12 | # The version specified must match the one in the image provided in the `mongod` field 13 | version: 4.4.11-ent 14 | members: 3 15 | podSpec: 16 | podTemplate: 17 | spec: 18 | containers: 19 | - name: mongodb-agent 20 | image: 'quay.io/mongodb/mongodb-agent:10.29.0.6830-1' 21 | - name: mongod 22 | image: 'quay.io/mongodb/mongodb-enterprise-appdb-database:4.4.11-ent' 23 | - name: mongodb-agent-monitoring 24 | image: 'quay.io/mongodb/mongodb-agent:10.29.0.6830-1' 25 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/code_snippets/0225_create_ca_configmap.sh: -------------------------------------------------------------------------------- 1 | mkdir -p certs 2 | 3 | openssl s_client -showcerts -verify 2 \ 4 | -connect downloads.mongodb.com:443 -servername downloads.mongodb.com < /dev/null \ 5 | | awk '/BEGIN/,/END/{ if(/BEGIN/){a++}; out="certs/cert"a".crt"; print >out}' 6 | 7 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" get secret root-secret -n cert-manager -o jsonpath="{.data['ca\.crt']}" | base64 --decode > certs/ca.crt 8 | cat certs/ca.crt certs/cert2.crt certs/cert3.crt >> certs/mms-ca.crt 9 | 10 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create cm ca-issuer -n "${MDB_NAMESPACE}" --from-file=ca-pem=certs/mms-ca.crt --from-file=mms-ca.crt=certs/mms-ca.crt 11 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create cm ca-issuer -n "${OM_NAMESPACE}" --from-file=ca-pem=certs/mms-ca.crt --from-file=mms-ca.crt=certs/mms-ca.crt 12 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0045_create_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 4 | 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${OM_NAMESPACE}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${OM_NAMESPACE}" 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${OM_NAMESPACE}" 8 | 9 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${MDB_NAMESPACE}" 10 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${MDB_NAMESPACE}" 11 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${MDB_NAMESPACE}" 12 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0210_helm_install_operator.sh: -------------------------------------------------------------------------------- 1 | helm upgrade --install \ 2 | --debug \ 3 | --kube-context "${K8S_CLUSTER_0_CONTEXT_NAME}" \ 4 | mongodb-enterprise-operator-multi-cluster \ 5 | "${OPERATOR_HELM_CHART}" \ 6 | --namespace="${OPERATOR_NAMESPACE}" \ 7 | --set namespace="${OPERATOR_NAMESPACE}" \ 8 | --set operator.namespace="${OPERATOR_NAMESPACE}" \ 9 | --set operator.watchNamespace="${OM_NAMESPACE}\,${MDB_NAMESPACE}" \ 10 | --set operator.name=mongodb-enterprise-operator-multi-cluster \ 11 | --set operator.createOperatorServiceAccount=false \ 12 | --set operator.createResourcesServiceAccountsAndRoles=false \ 13 | --set "multiCluster.clusters={${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}}" \ 14 | --set "${OPERATOR_ADDITIONAL_HELM_VALUES:-"dummy=value"}" \ 15 | --set operator.env=dev 16 | -------------------------------------------------------------------------------- /samples/mongodb/minimal/replica-set.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-replica-set 6 | spec: 7 | members: 3 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | 11 | opsManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | 16 | persistent: false 17 | 18 | podSpec: 19 | # 'podTemplate' allows to set custom fields in PodTemplateSpec. 20 | # (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#podtemplatespec-v1-core) 21 | # for the Database StatefulSet. 22 | podTemplate: 23 | spec: 24 | containers: 25 | - name: mongodb-enterprise-database 26 | resources: 27 | limits: 28 | cpu: "2" 29 | memory: 700M 30 | requests: 31 | cpu: "1" 32 | memory: 500M 33 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_wizardless/ops_manager_wizardless_template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: opsmanagerwizardless 5 | annotations: 6 | description: >- 7 | Requires Ops Manager install to be wizardless 8 | 9 | The setting mms.ignoreInitiaUiSetup needs to be true 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: OpsManagerWizardless 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package opsmanagerwizardless 19 | 20 | violation[{"msg": msg}] { 21 | value := object.get(input.review.object.spec.configuration, "mms.ignoreInitialUiSetup", "false") 22 | not value == "true" 23 | msg := sprintf("Wizard based setup of Ops Manager is not allowed. mms.ignoreInitialUiSetup needs to be true, currently is %v", [value]) 24 | } 25 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 5 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 6 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 7 | # ${OM_NAMESPACE} 8 | 9 | export S3_OPLOG_BUCKET_NAME=s3-oplog-store 10 | export S3_SNAPSHOT_BUCKET_NAME=s3-snapshot-store 11 | 12 | # If you use your own S3 storage - set the values accordingly. 13 | # By default we install Minio to handle S3 storage and here are set the default credentials. 14 | export S3_ENDPOINT="minio.tenant-tiny.svc.cluster.local" 15 | export S3_ACCESS_KEY="console" 16 | export S3_SECRET_KEY="console123" 17 | 18 | export OPS_MANAGER_VERSION="8.0.5" 19 | export APPDB_VERSION="8.0.5-ent" 20 | -------------------------------------------------------------------------------- /tools/multicluster/pkg/debug/anonymize_test.go: -------------------------------------------------------------------------------- 1 | package debug 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/stretchr/testify/assert" 7 | v1 "k8s.io/api/core/v1" 8 | ) 9 | 10 | func TestNoOpAnonymizer_AnonymizeSecret(t *testing.T) { 11 | // given 12 | text := "test" 13 | anonymizer := NoOpAnonymizer{} 14 | 15 | // when 16 | result := anonymizer.AnonymizeSecret(&v1.Secret{ 17 | Data: map[string][]byte{ 18 | text: []byte(text), 19 | }, 20 | }) 21 | 22 | // then 23 | assert.Equal(t, text, string(result.Data[text])) 24 | } 25 | 26 | func TestSensitiveDataAnonymizer_AnonymizeSecret(t *testing.T) { 27 | // given 28 | text := "test" 29 | anonymizer := SensitiveDataAnonymizer{} 30 | 31 | // when 32 | result := anonymizer.AnonymizeSecret(&v1.Secret{ 33 | Data: map[string][]byte{ 34 | text: []byte(text), 35 | }, 36 | }) 37 | 38 | // then 39 | assert.Equal(t, MASKED_TEXT, string(result.Data[text])) 40 | } 41 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.4/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.4" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.5/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.5" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.6/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.6" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.7/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.7" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.8/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.8" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.9/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.9" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.10/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.10" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.11/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.11" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.12/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.12" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.23.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.23.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.24.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.24.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.25.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.25.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.26.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.26.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.27.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.27.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.28.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.28.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.29.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.29.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.30.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.30.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.31.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.31.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.32.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.32.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.33.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.33.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0310_ops_manager_deploy_on_single_member_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - <= 4.0 will enable SCRAM-SHA-256 authentication 10 | version: 4.4.0-ent 11 | 12 | opsManager: 13 | configMapRef: 14 | name: my-project 15 | credentials: my-credentials 16 | 17 | security: 18 | authentication: 19 | enabled: true 20 | modes: ["SCRAM"] # Valid authentication modes are "SCRAM' and "X509" 21 | 22 | # Optional field - ignoreUnknownUsers 23 | # A value of true means that any users not configured via the Operator or the Ops Manager or Cloud Manager UI 24 | # will not be altered in any way 25 | 26 | # If you need to manage MongoDB users directly via the mongods, set this value to true 27 | ignoreUnknownUsers: true # default value false 28 | 29 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0250_generate_certs.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" apply -f - <- 7 | Requires MongoDB deployment to be within the allowed versions 8 | 9 | The setting version should be within the pinned allowed values 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: MongoDBAllowedVersions 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package mongodballowedversions 19 | 20 | allowed_versions = ["4.5.0", "5.0.0"] 21 | 22 | violation[{"msg": msg}] { 23 | version = object.get(input.review.object.spec, "version", "none") 24 | not q[version] 25 | msg := sprintf("MongoDB deployment needs to be one of the allowed versions: ", [allowed_versions]) 26 | } 27 | 28 | q[version] { version := allowed_versions[_] } 29 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0310_ops_manager_deploy_on_single_member_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" -f - <- 7 | Requires Ops Manager to be within the allowed versions 8 | 9 | The setting version should be within the pinned allowed values 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: OpsManagerAllowedVersions 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package opsmanagerallowedversions 19 | 20 | allowed_versions = ["4.4.5", "5.0.0"] 21 | 22 | violation[{"msg": msg}] { 23 | version = object.get(input.review.object.spec, "version", "none") 24 | not q[version] 25 | msg := sprintf("Ops Manager needs to be one of the allowed versions: ", [allowed_versions]) 26 | } 27 | 28 | q[version] { version := allowed_versions[_] } 29 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0200_kubectl_mongodb_configure_multi_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl mongodb multicluster setup \ 2 | --central-cluster="${K8S_CLUSTER_0_CONTEXT_NAME}" \ 3 | --member-clusters="${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}" \ 4 | --member-cluster-namespace="${OM_NAMESPACE}" \ 5 | --central-cluster-namespace="${OPERATOR_NAMESPACE}" \ 6 | --create-service-account-secrets \ 7 | --install-database-roles=true \ 8 | --image-pull-secrets=image-registries-secret 9 | 10 | kubectl mongodb multicluster setup \ 11 | --central-cluster="${K8S_CLUSTER_0_CONTEXT_NAME}" \ 12 | --member-clusters="${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}" \ 13 | --member-cluster-namespace="${MDB_NAMESPACE}" \ 14 | --central-cluster-namespace="${OPERATOR_NAMESPACE}" \ 15 | --create-service-account-secrets \ 16 | --install-database-roles=true \ 17 | --image-pull-secrets=image-registries-secret 18 | 19 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/code_snippets/1100_mongodb_replicaset_multi_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - <= 4.0 will enable SCRAM-SHA-256 authentication 11 | # setting a version < 4.0 will enable SCRAM-SHA-1/MONGODB-CR authentication 12 | version: 4.0.4-ent 13 | 14 | opsManager: 15 | configMapRef: 16 | name: my-project 17 | credentials: my-credentials 18 | 19 | security: 20 | authentication: 21 | enabled: true 22 | modes: ["SCRAM"] # Valid authentication modes are "SCRAM', "SCRAM-SHA-1", "MONGODB-CR", "X509" and "LDAP" 23 | 24 | # Optional field - ignoreUnknownUsers 25 | # A value of true means that any users not configured via the Operator or the Ops Manager or Cloud Manager UI 26 | # will not be altered in any way 27 | 28 | # If you need to manage MongoDB users directly via the mongods, set this value to true 29 | ignoreUnknownUsers: true # default value false 30 | 31 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0320_ops_manager_add_second_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - <