├── .evergreen.yml ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── config.yml └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── LICENSE ├── README.md ├── architectures ├── mongodb-replicaset-mc-no-mesh │ ├── code_snippets │ │ ├── 1050_generate_certs.sh │ │ ├── 1100_mongodb_replicaset_multi_cluster.sh │ │ ├── 1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh │ │ ├── 1200_create_mongodb_user.sh │ │ ├── 1210_verify_mongosh_connection.sh │ │ └── 9000_delete_resources.sh │ ├── env_variables.sh │ ├── output │ │ └── 1210_verify_mongosh_connection.out │ ├── teardown.sh │ └── test.sh ├── mongodb-replicaset-multi-cluster │ ├── code_snippets │ │ ├── 1050_generate_certs.sh │ │ ├── 1100_mongodb_replicaset_multi_cluster.sh │ │ ├── 1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh │ │ ├── 1200_create_mongodb_user.sh │ │ ├── 1210_verify_mongosh_connection.sh │ │ └── 9000_delete_resources.sh │ ├── env_variables.sh │ ├── output │ │ └── 1210_verify_mongosh_connection.out │ ├── teardown.sh │ └── test.sh ├── mongodb-sharded-mc-no-mesh │ ├── code_snippets │ │ ├── 2050_generate_certs.sh │ │ ├── 2100_mongodb_sharded_multi_cluster.sh │ │ ├── 2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh │ │ ├── 2200_create_mongodb_user.sh │ │ ├── 2210_verify_mongosh_connection.sh │ │ └── 9000_delete_resources.sh │ ├── env_variables.sh │ ├── output │ │ └── 2210_verify_mongosh_connection.out │ ├── teardown.sh │ └── test.sh ├── mongodb-sharded-multi-cluster │ ├── code_snippets │ │ ├── 2050_generate_certs.sh │ │ ├── 2100_mongodb_sharded_multi_cluster.sh │ │ ├── 2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh │ │ ├── 2120_mongodb_sharded_multi_cluster_external_access.sh │ │ ├── 2200_create_mongodb_user.sh │ │ ├── 2210_verify_mongosh_connection.sh │ │ └── 9000_delete_resources.sh │ ├── env_variables.sh │ ├── output │ │ └── 2210_verify_mongosh_connection.out │ ├── teardown.sh │ └── test.sh ├── ops-manager-mc-no-mesh │ ├── code_snippets │ │ ├── 0100_generate_certs.sh │ │ ├── 0110_add_cert_to_gcp.sh │ │ ├── 0150_om_load_balancer.sh │ │ ├── 0160_add_dns_record.sh │ │ ├── 0300_ops_manager_create_admin_credentials.sh │ │ ├── 0320_ops_manager_no_mesh.sh │ │ ├── 0321_ops_manager_wait_for_pending_state.sh │ │ ├── 0325_set_up_lb_services.sh │ │ ├── 0326_set_up_lb_services.sh │ │ ├── 0330_ops_manager_wait_for_running_state.sh │ │ ├── 0400_install_minio_s3.sh │ │ ├── 0500_ops_manager_prepare_s3_backup_secrets.sh │ │ ├── 0510_ops_manager_enable_s3_backup.sh │ │ ├── 0522_ops_manager_wait_for_running_state.sh │ │ ├── 0610_create_mdb_org_and_get_credentials.sh │ │ ├── 9000_cleanup_gke_lb.sh │ │ ├── 9100_delete_backup_namespaces.sh │ │ └── 9200_delete_om.sh │ ├── env_variables.sh │ ├── output │ │ ├── 0150_om_load_balancer.out │ │ ├── 0321_ops_manager_wait_for_pending_state.out │ │ ├── 0330_ops_manager_wait_for_running_state.out │ │ └── 0522_ops_manager_wait_for_running_state.out │ ├── teardown.sh │ └── test.sh ├── ops-manager-multi-cluster │ ├── code_snippets │ │ ├── 0250_generate_certs.sh │ │ ├── 0300_ops_manager_create_admin_credentials.sh │ │ ├── 0310_ops_manager_deploy_on_single_member_cluster.sh │ │ ├── 0311_ops_manager_wait_for_pending_state.sh │ │ ├── 0312_ops_manager_wait_for_running_state.sh │ │ ├── 0320_ops_manager_add_second_cluster.sh │ │ ├── 0321_ops_manager_wait_for_pending_state.sh │ │ ├── 0322_ops_manager_wait_for_running_state.sh │ │ ├── 0400_install_minio_s3.sh │ │ ├── 0500_ops_manager_prepare_s3_backup_secrets.sh │ │ ├── 0510_ops_manager_enable_s3_backup.sh │ │ ├── 0522_ops_manager_wait_for_running_state.sh │ │ ├── 0605_start_forwarding_om_api.sh │ │ ├── 0610_create_mdb_org_and_get_credentials.sh │ │ ├── 0615_stop_forwarding_om_api.sh │ │ ├── 9100_delete_backup_namespaces.sh │ │ └── 9200_delete_om.sh │ ├── env_variables.sh │ ├── output │ │ ├── 0311_ops_manager_wait_for_pending_state.out │ │ ├── 0312_ops_manager_wait_for_running_state.out │ │ ├── 0321_ops_manager_wait_for_pending_state.out │ │ ├── 0322_ops_manager_wait_for_running_state.out │ │ ├── 0522_ops_manager_wait_for_running_state.out │ │ └── 0610_create_mdb_org_and_get_credentials.out │ ├── teardown.sh │ └── test.sh └── setup-multi-cluster │ ├── setup-cert-manager │ ├── code_snippets │ │ ├── 0215_helm_configure_repo.sh │ │ ├── 0216_helm_install_cert_manager.sh │ │ ├── 0220_create_issuer.sh │ │ ├── 0221_verify_issuer.sh │ │ └── 0225_create_ca_configmap.sh │ ├── output │ │ ├── 0215_helm_configure_repo.out │ │ ├── 0216_helm_install_cert_manager.out │ │ └── 0221_verify_issuer.out │ └── test.sh │ ├── setup-externaldns │ ├── code_snippets │ │ ├── 0100_create_gke_sa.sh │ │ ├── 0120_add_role_to_sa.sh │ │ ├── 0130_create_sa_key.sh │ │ ├── 0140_create_namespaces.sh │ │ ├── 0150_create_sa_secrets.sh │ │ ├── 0200_install_externaldns.sh │ │ ├── 0300_setup_dns_zone.sh │ │ ├── 9000_delete_sa.sh │ │ ├── 9050_delete_namespace.sh │ │ └── 9100_delete_dns_zone.sh │ ├── env_variables.sh │ ├── teardown.sh │ ├── test.sh │ └── yamls │ │ └── externaldns.yaml │ ├── setup-gke │ ├── code_snippets │ │ ├── 0005_gcloud_set_current_project.sh │ │ ├── 0010_create_gke_cluster_0.sh │ │ ├── 0010_create_gke_cluster_1.sh │ │ ├── 0010_create_gke_cluster_2.sh │ │ ├── 0020_get_gke_credentials.sh │ │ ├── 0030_verify_access_to_clusters.sh │ │ └── 9010_delete_gke_clusters.sh │ ├── env_variables.sh │ ├── output │ │ └── 0030_verify_access_to_clusters.out │ ├── teardown.sh │ └── test.sh │ ├── setup-istio │ ├── code_snippets │ │ ├── 0040_install_istio.sh │ │ └── 0050_label_namespaces.sh │ ├── install_istio_separate_network.sh │ └── test.sh │ ├── setup-operator │ ├── code_snippets │ │ ├── 0045_create_namespaces.sh │ │ ├── 0046_create_image_pull_secrets.sh │ │ ├── 0200_kubectl_mongodb_configure_multi_cluster.sh │ │ ├── 0205_helm_configure_repo.sh │ │ ├── 0210_helm_install_operator.sh │ │ ├── 0211_check_operator_deployment.sh │ │ └── 9000_delete_namespaces.sh │ ├── env_variables.sh │ ├── output │ │ ├── 0200_kubectl_mongodb_configure_multi_cluster.out │ │ ├── 0205_helm_configure_repo.out │ │ ├── 0210_helm_install_operator.out │ │ └── 0211_check_operator_deployment.out │ ├── teardown.sh │ └── test.sh │ └── verify-connectivity │ ├── code_snippets │ ├── 0045_create_connectivity_test_namespaces.sh │ ├── 0050_check_cluster_connectivity_create_sts_0.sh │ ├── 0050_check_cluster_connectivity_create_sts_1.sh │ ├── 0050_check_cluster_connectivity_create_sts_2.sh │ ├── 0060_check_cluster_connectivity_wait_for_sts.sh │ ├── 0070_check_cluster_connectivity_create_pod_service_0.sh │ ├── 0070_check_cluster_connectivity_create_pod_service_1.sh │ ├── 0070_check_cluster_connectivity_create_pod_service_2.sh │ ├── 0080_check_cluster_connectivity_create_round_robin_service_0.sh │ ├── 0080_check_cluster_connectivity_create_round_robin_service_1.sh │ ├── 0080_check_cluster_connectivity_create_round_robin_service_2.sh │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.sh │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh │ ├── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh │ └── 0100_check_cluster_connectivity_cleanup.sh │ ├── output │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out │ └── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out │ └── test.sh ├── crds.yaml ├── dockerfiles ├── mongodb-agent │ ├── 10.29.0.6830-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 107.0.0.8465-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.0.8502-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.1.8507-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.1.8507-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.1.8507-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.1.8507-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.1.8507-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.1.8507-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.1.8507-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.10.8627-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.11.8645-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.12.8669-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.12.8669-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.12.8669-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.12.8669-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.12.8669-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.12.8669-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.13.8702-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.13.8702-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.13.8702-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.13.8702-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.13.8702-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.13.8702-1_1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.15.8741-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.15.8741-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.15.8741-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.15.8741-1_1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.2.8531-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.2.8531-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.2.8531-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.2.8531-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.2.8531-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.2.8531-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.2.8531-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.3.8550-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.4.8567-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.6.8587-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.7.8596-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.8.8615-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 107.0.9.8621-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.0.8694-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.1.8718-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.1.8718-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.1.8718-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.1.8718-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.1.8718-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.1.8718-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.1.8718-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.2.8729-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.2.8729-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.2.8729-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.2.8729-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.2.8729-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.2.8729-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.3.8758-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.3.8758-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.3.8758-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.3.8758-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.3.8758-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.4.8770-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.4.8770-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.4.8770-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.4.8770-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.4.8770-1_1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.6.8796-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.6.8796-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.6.8796-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 108.0.6.8796-1_1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 11.0.1.6929-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.11.7036-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.12.7051-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.13.7055-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.14.7064-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.15.7073-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.16.7080-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.17.7084-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.19.7094-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.0.5.6963-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 11.12.0.7388-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 12.0.10.7591-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.11.7606-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.15.7646-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.20.7686-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.21.7698-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.23.7711-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.24.7719-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.25.7724-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.28.7763-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.29.7785-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.29.7785-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.29.7785-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.29.7785-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.29.7785-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.30.7791-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.30.7791-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.30.7791-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.30.7791-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.30.7791-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.30.7791-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.30.7791-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.31.7825-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.32.7857-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.33.7866-1_1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.34.7888-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.34.7888-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.34.7888-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.34.7888-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.34.7888-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.34.7888-1_1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.35.7911-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.35.7911-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.35.7911-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.35.7911-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.35.7911-1_1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 12.0.4.7554-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 12.0.8.7575-1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 13.10.0.8620-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.15.0.8788-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.17.0.8870-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.17.0.8870-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.21.0.9059-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.21.0.9059-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.21.0.9059-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.22.0.9099-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.22.0.9099-1_1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.22.0.9099-1_1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.22.0.9099-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.25.0.9175-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.25.0.9175-1_1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.25.0.9175-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.27.0.9284-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.27.0.9284-1_1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.27.0.9284-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.27.0.9284-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.29.0.9339-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.29.0.9339-1_1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.29.0.9339-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.29.0.9339-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.30.0.9350-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.30.0.9350-1_1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.30.0.9350-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.30.0.9350-1_1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.32.0.9397-1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 13.32.0.9397-1_1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ └── 13.32.0.9397-1_1.32.0 │ │ └── ubi │ │ └── Dockerfile ├── mongodb-enterprise-appdb │ └── 10.2.15.5958-1_4.2.11-ent │ │ ├── ubi │ │ └── Dockerfile │ │ └── ubuntu │ │ └── Dockerfile ├── mongodb-enterprise-database │ ├── 1.23.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.24.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 2.0.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 2.0.1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ └── 2.0.2 │ │ ├── ubi │ │ └── Dockerfile │ │ └── ubuntu │ │ └── Dockerfile ├── mongodb-enterprise-init-appdb │ ├── 1.0.10 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.11 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.12 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.13 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.14 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.15 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.16 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.17 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.18 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.6 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.7 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.8 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.9 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.23.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.24.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ └── 1.33.0 │ │ └── ubi │ │ └── Dockerfile ├── mongodb-enterprise-init-database │ ├── 1.0.10 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.11 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.12 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.13 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.14 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.15 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.16 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.17 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.18 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.19 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.2 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.3 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.4 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.5 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.6 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.7 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.8 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.9 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.23.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.24.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ └── 1.33.0 │ │ └── ubi │ │ └── Dockerfile ├── mongodb-enterprise-init-ops-manager │ ├── 1.0.10 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.11 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.12 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.0.3 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.4 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.5 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.6 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.7 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.8 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.0.9 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.23.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.24.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ └── 1.33.0 │ │ └── ubi │ │ └── Dockerfile ├── mongodb-enterprise-operator │ ├── 1.10.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.11.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.12.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.13.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.14.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.15.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.15.1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.15.2 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.16.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.16.1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.16.2 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.16.3 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.16.4 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.17.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.17.1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.17.2 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.18.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.19.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.19.1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.20.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.20.1 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.21.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.22.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.23.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.24.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.25.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.26.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.27.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.28.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.29.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.30.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.31.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.32.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.33.0 │ │ └── ubi │ │ │ └── Dockerfile │ ├── 1.9.0 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ ├── 1.9.1 │ │ ├── ubi │ │ │ └── Dockerfile │ │ └── ubuntu │ │ │ └── Dockerfile │ └── 1.9.2 │ │ ├── ubi │ │ └── Dockerfile │ │ └── ubuntu │ │ └── Dockerfile └── mongodb-enterprise-ops-manager │ ├── 4.2.26 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.10 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.11 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.12 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.13 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.14 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.15 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.16 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.17 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.18 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.19 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.20 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.21 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.22 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.23 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.24 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.7 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 4.4.9 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.0 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.1 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.10 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.11 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.12 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.13 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.14 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.15 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.16 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.17 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.18 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.19 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.2 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.20 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.21 │ └── ubi │ │ └── Dockerfile │ ├── 5.0.22 │ └── ubi │ │ └── Dockerfile │ ├── 5.0.3 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.4 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.5 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.6 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.7 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.8 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 5.0.9 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.0 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.1 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.10 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.11 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.12 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.13 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.14 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.15 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.16 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.17 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.18 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.19 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.2 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.20 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.21 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.22 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.23 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.24 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.25 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.26 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.27 │ └── ubi │ │ └── Dockerfile │ ├── 6.0.3 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.4 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.5 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.6 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.7 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.8 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 6.0.9 │ ├── ubi │ │ └── Dockerfile │ └── ubuntu │ │ └── Dockerfile │ ├── 7.0.0 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.1 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.10 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.11 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.12 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.13 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.14 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.15 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.2 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.3 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.4 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.6 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.7 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.8 │ └── ubi │ │ └── Dockerfile │ ├── 7.0.9 │ └── ubi │ │ └── Dockerfile │ ├── 8.0.0 │ └── ubi │ │ └── Dockerfile │ ├── 8.0.1 │ └── ubi │ │ └── Dockerfile │ ├── 8.0.2 │ └── ubi │ │ └── Dockerfile │ ├── 8.0.3 │ └── ubi │ │ └── Dockerfile │ ├── 8.0.4 │ └── ubi │ │ └── Dockerfile │ ├── 8.0.5 │ └── ubi │ │ └── Dockerfile │ └── 8.0.6 │ └── ubi │ └── Dockerfile ├── docs ├── assets │ ├── image--000.png │ ├── image--002.png │ ├── image--004.png │ ├── image--008.png │ ├── image--014.png │ ├── image--030.png │ ├── image--032.png │ └── image--034.png ├── openshift-marketplace.md └── upgrading-to-ops-manager-5.md ├── grafana └── sample_dashboard.json ├── mongodb-enterprise-multi-cluster.yaml ├── mongodb-enterprise-openshift.yaml ├── mongodb-enterprise.yaml ├── multi_cluster_verify └── sample-service.yaml ├── opa_examples ├── README.md ├── debugging │ ├── constraint_template.yaml │ └── constraints.yaml ├── mongodb_allow_replicaset │ ├── constraints.yaml │ └── mongodb_allow_replicaset.yaml ├── mongodb_allowed_versions │ ├── constraints.yaml │ └── mongodb_allowed_versions.yaml ├── mongodb_strict_tls │ ├── constraints.yaml │ └── mongodb_strict_tls.yaml ├── ops_manager_allowed_versions │ ├── constraints.yaml │ └── ops_manager_allowed_versions.yaml ├── ops_manager_replica_members │ ├── constraints.yaml │ └── ops_manager_replica_members.yaml └── ops_manager_wizardless │ ├── constraints.yaml │ └── ops_manager_wizardless_template.yaml ├── samples ├── appdb_multicluster │ └── ops-manager-multi-cluster-appdb.yaml ├── mongodb │ ├── affinity │ │ ├── replica-set-affinity.yaml │ │ ├── sharded-cluster-affinity.yaml │ │ └── standalone-affinity.yaml │ ├── agent-startup-options │ │ ├── replica-set-agent-startup-options.yaml │ │ ├── sharded-cluster-agent-startup-options.yaml │ │ └── standalone-agent-startup-options.yaml │ ├── authentication │ │ ├── ldap │ │ │ ├── replica-set │ │ │ │ ├── replica-set-ldap-user.yaml │ │ │ │ └── replica-set-ldap.yaml │ │ │ └── sharded-cluster │ │ │ │ ├── sharded-cluster-ldap-user.yaml │ │ │ │ └── sharded-cluster-ldap.yaml │ │ ├── scram │ │ │ ├── replica-set │ │ │ │ ├── replica-set-scram-password.yaml │ │ │ │ ├── replica-set-scram-sha.yaml │ │ │ │ └── replica-set-scram-user.yaml │ │ │ ├── sharded-cluster │ │ │ │ ├── sharded-cluster-scram-password.yaml │ │ │ │ ├── sharded-cluster-scram-sha.yaml │ │ │ │ └── sharded-cluster-scram-user.yaml │ │ │ └── standalone │ │ │ │ ├── standalone-scram-password.yaml │ │ │ │ ├── standalone-scram-sha.yaml │ │ │ │ └── standalone-scram-user.yaml │ │ └── x509 │ │ │ ├── replica-set │ │ │ ├── replica-set-x509.yaml │ │ │ └── user.yaml │ │ │ └── sharded-cluster │ │ │ ├── sharded-cluster-x509.yaml │ │ │ └── user.yaml │ ├── backup │ │ ├── replica-set-backup-disabled.yaml │ │ └── replica-set-backup.yaml │ ├── external-connectivity │ │ └── replica-set-external.yaml │ ├── minimal │ │ ├── replica-set.yaml │ │ ├── sharded-cluster.yaml │ │ └── standalone.yaml │ ├── mongodb-options │ │ ├── replica-set-mongod-options.yaml │ │ └── sharded-cluster-mongod-options.yaml │ ├── persistent-volumes │ │ ├── replica-set-persistent-volumes.yaml │ │ ├── sharded-cluster-persistent-volumes.yaml │ │ └── standalone-persistent-volumes.yaml │ ├── pod-template │ │ ├── initcontainer-sysctl_config.yaml │ │ ├── replica-set-pod-template.yaml │ │ ├── sharded-cluster-pod-template.yaml │ │ └── standalone-pod-template.yaml │ ├── podspec │ │ ├── replica-set-podspec.yaml │ │ ├── sharded-cluster-podspec.yaml │ │ └── standalone-podspec.yaml │ ├── project.yaml │ ├── prometheus │ │ ├── replica-set.yaml │ │ └── sharded-cluster.yaml │ └── tls │ │ ├── replica-set │ │ └── replica-set-tls.yaml │ │ ├── sharded-cluster │ │ └── sharded-cluster-tls.yaml │ │ └── standalone │ │ └── standalone-tls.yaml ├── mongodb_multi │ ├── replica-set-configure-storage.yaml │ ├── replica-set-sts-override.yaml │ └── replica-set.yaml ├── mongodb_multicluster │ ├── replica-set-configure-storage.yaml │ ├── replica-set-sts-override.yaml │ └── replica-set.yaml ├── multi-cluster-cli-gitops │ ├── README.md │ ├── argocd │ │ ├── application.yaml │ │ └── project.yaml │ └── resources │ │ ├── job.yaml │ │ ├── rbac │ │ ├── cluster_scoped_central_cluster.yaml │ │ ├── cluster_scoped_member_cluster.yaml │ │ ├── namespace_scoped_central_cluster.yaml │ │ └── namespace_scoped_member_cluster.yaml │ │ └── replica-set.yaml ├── multi-cluster │ └── install_istio_separate_network.sh ├── ops-manager-multi-cluster │ ├── code_snippets │ │ ├── 0010_create_gke_cluster_0.sh │ │ ├── 0010_create_gke_cluster_1.sh │ │ ├── 0010_create_gke_cluster_2.sh │ │ ├── 0011_gcloud_set_current_project.sh │ │ ├── 0020_get_gke_credentials.sh │ │ ├── 0030_verify_access_to_clusters.sh │ │ ├── 0040_install_istio.sh │ │ ├── 0045_create_operator_namespace.sh │ │ ├── 0045_create_ops_manager_namespace.sh │ │ ├── 0046_create_image_pull_secrets.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_0.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_1.sh │ │ ├── 0050_check_cluster_connectivity_create_sts_2.sh │ │ ├── 0060_check_cluster_connectivity_wait_for_sts.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_0.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_1.sh │ │ ├── 0070_check_cluster_connectivity_create_pod_service_2.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_0.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_1.sh │ │ ├── 0080_check_cluster_connectivity_create_round_robin_service_2.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh │ │ ├── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh │ │ ├── 0100_check_cluster_connectivity_cleanup.sh │ │ ├── 0200_kubectl_mongodb_configure_multi_cluster.sh │ │ ├── 0205_helm_configure_repo.sh │ │ ├── 0210_helm_install_operator.sh │ │ ├── 0211_check_operator_deployment.sh │ │ ├── 0250_generate_certs.sh │ │ ├── 0255_create_cert_secrets.sh │ │ ├── 0300_ops_manager_create_admin_credentials.sh │ │ ├── 0310_ops_manager_deploy_on_single_member_cluster.sh │ │ ├── 0311_ops_manager_wait_for_pending_state.sh │ │ ├── 0312_ops_manager_wait_for_running_state.sh │ │ ├── 0320_ops_manager_add_second_cluster.sh │ │ ├── 0321_ops_manager_wait_for_pending_state.sh │ │ ├── 0322_ops_manager_wait_for_running_state.sh │ │ ├── 0400_install_minio_s3.sh │ │ ├── 0500_ops_manager_prepare_s3_backup_secrets.sh │ │ ├── 0510_ops_manager_enable_s3_backup.sh │ │ ├── 0522_ops_manager_wait_for_running_state.sh │ │ ├── 9000_delete_namespaces.sh │ │ └── 9010_delete_gke_clusters.sh │ ├── env_variables.sh │ ├── output │ │ ├── 0030_verify_access_to_clusters.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out │ │ ├── 0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out │ │ ├── 0200_kubectl_mongodb_configure_multi_cluster.out │ │ ├── 0205_helm_configure_repo.out │ │ ├── 0210_helm_install_operator.out │ │ ├── 0211_check_operator_deployment.out │ │ ├── 0311_ops_manager_wait_for_pending_state.out │ │ ├── 0312_ops_manager_wait_for_running_state.out │ │ ├── 0321_ops_manager_wait_for_pending_state.out │ │ ├── 0322_ops_manager_wait_for_running_state.out │ │ └── 0522_ops_manager_wait_for_running_state.out │ ├── test.sh │ └── test_cleanup.sh ├── ops-manager │ ├── ops-manager-appdb-agent-startup-parameters.yaml │ ├── ops-manager-appdb-custom-images.yaml │ ├── ops-manager-backup.yaml │ ├── ops-manager-disable-appdb-process.yaml │ ├── ops-manager-external.yaml │ ├── ops-manager-ignore-ui-setup.yaml │ ├── ops-manager-local-mode.yaml │ ├── ops-manager-non-root.yaml │ ├── ops-manager-pod-spec.yaml │ ├── ops-manager-remote-mode.yaml │ ├── ops-manager-scram.yaml │ ├── ops-manager-tls.yaml │ └── ops-manager.yaml ├── sharded_multicluster │ ├── example-sharded-cluster-deployment.yaml │ ├── pod_template_config_servers.yaml │ ├── pod_template_shards_0.yaml │ ├── pod_template_shards_1.yaml │ └── shardSpecificPodSpec_migration.yaml └── single-sharded-overrides.yaml ├── scripts └── sample_test_runner.sh ├── support ├── certificate_rotation.sh └── mdb_operator_diagnostic_data.sh ├── tools └── multicluster │ ├── .gitignore │ ├── .goreleaser.yaml │ ├── Dockerfile │ ├── LICENSE │ ├── cmd │ ├── common.go │ ├── common_test.go │ ├── debug.go │ ├── multicluster.go │ ├── recover.go │ ├── root.go │ └── setup.go │ ├── go.mod │ ├── go.sum │ ├── install_istio_separate_network.sh │ ├── kubectl_mac_notarize.sh │ ├── licenses.csv │ ├── main.go │ ├── main_test.go │ ├── pkg │ ├── common │ │ ├── common.go │ │ ├── common_test.go │ │ ├── kubeclientcontainer.go │ │ ├── kubeconfig.go │ │ └── utils.go │ └── debug │ │ ├── anonymize.go │ │ ├── anonymize_test.go │ │ ├── collectors.go │ │ ├── collectors_test.go │ │ ├── writer.go │ │ └── writer_test.go │ ├── setup_tls.sh │ ├── sign.sh │ └── verify.sh └── vault_policies ├── appdb-policy.hcl ├── database-policy.hcl ├── operator-policy.hcl └── opsmanager-policy.hcl /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @mircea-cosbuc @lsierant @nammn @Julien-Ben @MaciejKaras @lucian-tosa @fealebenpae @m1kola 2 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | blank_issues_enabled: false 2 | contact_links: 3 | - name: MongoDB Support 4 | url: https://support.mongodb.com 5 | about: Please use the Support Center to receive official support within a timeline. Use this for urgent requests. 6 | - name: MongoDB Feedback 7 | url: https://feedback.mongodb.com/forums/924355-ops-tools 8 | about: Use our Feedback page for making feature requests. 9 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ### All Submissions: 2 | 3 | * [ ] Have you opened an Issue before filing this PR? 4 | * [ ] Have you signed our [CLA](https://www.mongodb.com/legal/contributor-agreement)? 5 | * [ ] Have you checked to ensure there aren't other open [Pull Requests](../../../pulls) for the same update/change? 6 | * [ ] Put `closes #XXXX` in your comment to auto-close the issue that your PR fixes (if such). 7 | 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | *.iml 3 | .DS_Store 4 | tools/multicluster/linux_amd64/* 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Usage of the MongoDB Enterprise Operator for Kubernetes indicates agreement with the MongoDB Customer Agreement. 2 | 3 | https://www.mongodb.com/customer-agreement/ 4 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/code_snippets/1050_generate_certs.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - < certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username rs-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/rs-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdbmc/${RS_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 5 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 6 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 7 | # ${MDB_NAMESPACE} 8 | # ${CUSTOM_DOMAIN} 9 | 10 | export RS_RESOURCE_NAME=mdb 11 | export MONGODB_VERSION="8.0.5-ent" 12 | 13 | export MDB_CLUSTER_0_EXTERNAL_DOMAIN="${K8S_CLUSTER_0}.${CUSTOM_DOMAIN}" 14 | export MDB_CLUSTER_1_EXTERNAL_DOMAIN="${K8S_CLUSTER_1}.${CUSTOM_DOMAIN}" 15 | export MDB_CLUSTER_2_EXTERNAL_DOMAIN="${K8S_CLUSTER_2}.${CUSTOM_DOMAIN}" 16 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/output/1210_verify_mongosh_connection.out: -------------------------------------------------------------------------------- 1 | { 2 | authInfo: { 3 | authenticatedUsers: [ { user: 'rs-user', db: 'admin' } ], 4 | authenticatedUserRoles: [ { role: 'root', db: 'admin' } ] 5 | }, 6 | ok: 1, 7 | '$clusterTime': { 8 | clusterTime: Timestamp({ t: 1743589744, i: 1 }), 9 | signature: { 10 | hash: Binary.createFromBase64('fiBrPX9aaxTmMmLb1K2q6d4/XfQ=', 0), 11 | keyId: Long('7488660369775263749') 12 | } 13 | }, 14 | operationTime: Timestamp({ t: 1743589744, i: 1 }) 15 | } 16 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-mc-no-mesh/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 1050_generate_certs.sh 15 | run 1100_mongodb_replicaset_multi_cluster.sh 16 | run 1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh 17 | 18 | run 1200_create_mongodb_user.sh 19 | sleep 10 20 | run_for_output 1210_verify_mongosh_connection.sh 21 | 22 | popd 23 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/code_snippets/1050_generate_certs.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - < certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username rs-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/rs-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdbmc/${RS_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 5 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 6 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 7 | # ${MDB_NAMESPACE} 8 | 9 | export RS_RESOURCE_NAME=mdb 10 | export MONGODB_VERSION="8.0.5-ent" 11 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/output/1210_verify_mongosh_connection.out: -------------------------------------------------------------------------------- 1 | { 2 | authInfo: { 3 | authenticatedUsers: [ { user: 'rs-user', db: 'admin' } ], 4 | authenticatedUserRoles: [ { role: 'root', db: 'admin' } ] 5 | }, 6 | ok: 1, 7 | '$clusterTime': { 8 | clusterTime: Timestamp({ t: 1741701953, i: 1 }), 9 | signature: { 10 | hash: Binary.createFromBase64('uhYReuUiWNWP6m1lZ5umgDVgO48=', 0), 11 | keyId: Long('7480552820140146693') 12 | } 13 | }, 14 | operationTime: Timestamp({ t: 1741701953, i: 1 }) 15 | } 16 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/mongodb-replicaset-multi-cluster/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 1050_generate_certs.sh 15 | run 1100_mongodb_replicaset_multi_cluster.sh 16 | run 1110_mongodb_replicaset_multi_cluster_wait_for_running_state.sh 17 | 18 | run 1200_create_mongodb_user.sh 19 | sleep 10 20 | run_for_output 1210_verify_mongosh_connection.sh 21 | 22 | popd 23 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/code_snippets/2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh: -------------------------------------------------------------------------------- 1 | echo; echo "Waiting for MongoDB to reach Running phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Running "mdb/${SC_RESOURCE_NAME}" --timeout=900s 3 | echo; echo "Pods running in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 5 | echo; echo "Pods running in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 7 | echo; echo "Pods running in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 9 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/code_snippets/2200_create_mongodb_user.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - < certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username sc-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/sc-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdb/${SC_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 5 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 6 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 7 | # ${MDB_NAMESPACE} 8 | # ${CUSTOM_DOMAIN} 9 | 10 | export SC_RESOURCE_NAME=mdb-sh 11 | export MONGODB_VERSION="8.0.5-ent" 12 | 13 | export MDB_CLUSTER_0_EXTERNAL_DOMAIN="${K8S_CLUSTER_0}.${CUSTOM_DOMAIN}" 14 | export MDB_CLUSTER_1_EXTERNAL_DOMAIN="${K8S_CLUSTER_1}.${CUSTOM_DOMAIN}" 15 | export MDB_CLUSTER_2_EXTERNAL_DOMAIN="${K8S_CLUSTER_2}.${CUSTOM_DOMAIN}" 16 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/output/2210_verify_mongosh_connection.out: -------------------------------------------------------------------------------- 1 | { 2 | authInfo: { 3 | authenticatedUsers: [ { user: 'sc-user', db: 'admin' } ], 4 | authenticatedUserRoles: [ { role: 'root', db: 'admin' } ] 5 | }, 6 | ok: 1, 7 | '$clusterTime': { 8 | clusterTime: Timestamp({ t: 1743590424, i: 1 }), 9 | signature: { 10 | hash: Binary.createFromBase64('1+SD+TJDayNhxsFsJzaGb2mtd+c=', 0), 11 | keyId: Long('7488663363367469079') 12 | } 13 | }, 14 | operationTime: Timestamp({ t: 1743590424, i: 1 }) 15 | } 16 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-mc-no-mesh/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 2050_generate_certs.sh 15 | run 2100_mongodb_sharded_multi_cluster.sh 16 | run 2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh 17 | 18 | run 2200_create_mongodb_user.sh 19 | sleep 10 20 | run_for_output 2210_verify_mongosh_connection.sh 21 | 22 | popd 23 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/code_snippets/2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh: -------------------------------------------------------------------------------- 1 | echo; echo "Waiting for MongoDB to reach Running phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" wait --for=jsonpath='{.status.phase}'=Running "mdb/${SC_RESOURCE_NAME}" --timeout=900s 3 | echo; echo "Pods running in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 5 | echo; echo "Pods running in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 7 | echo; echo "Pods running in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" get pods 9 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/code_snippets/2200_create_mongodb_user.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" -f - < certs/ca.crt 5 | 6 | mongosh --host "${external_ip}" --username sc-user --password password --tls --tlsCAFile certs/ca.crt --tlsAllowInvalidHostnames --eval "db.runCommand({connectionStatus : 1})" 7 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/code_snippets/9000_delete_resources.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete mdbu/sc-user 2 | 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${MDB_NAMESPACE}" delete "mdb/${SC_RESOURCE_NAME}" 4 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 5 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 6 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 7 | # ${MDB_NAMESPACE} 8 | 9 | export SC_RESOURCE_NAME=mdb-sh 10 | export MONGODB_VERSION="8.0.5-ent" 11 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/output/2210_verify_mongosh_connection.out: -------------------------------------------------------------------------------- 1 | { 2 | authInfo: { 3 | authenticatedUsers: [ { user: 'sc-user', db: 'admin' } ], 4 | authenticatedUserRoles: [ { role: 'root', db: 'admin' } ] 5 | }, 6 | ok: 1, 7 | '$clusterTime': { 8 | clusterTime: Timestamp({ t: 1741702735, i: 1 }), 9 | signature: { 10 | hash: Binary.createFromBase64('kVqqNDHTI1zxYrPsU0QaYqyksJA=', 0), 11 | keyId: Long('7480555706358169606') 12 | } 13 | }, 14 | operationTime: Timestamp({ t: 1741702735, i: 1 }) 15 | } 16 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_resources.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/mongodb-sharded-multi-cluster/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 2050_generate_certs.sh 15 | run 2100_mongodb_sharded_multi_cluster.sh 16 | run 2110_mongodb_sharded_multi_cluster_wait_for_running_state.sh 17 | 18 | run 2200_create_mongodb_user.sh 19 | sleep 10 20 | run_for_output 2210_verify_mongosh_connection.sh 21 | 22 | popd 23 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0100_generate_certs.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" apply -f - < certs/tls.crt 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" get secret cert-prefix-om-cert -o jsonpath="{.data['tls\.key']}" | base64 --decode > certs/tls.key 5 | 6 | gcloud compute ssl-certificates create om-certificate --certificate=certs/tls.crt --private-key=certs/tls.key 7 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0150_om_load_balancer.sh: -------------------------------------------------------------------------------- 1 | gcloud compute firewall-rules create fw-ops-manager-hc \ 2 | --action=allow \ 3 | --direction=ingress \ 4 | --target-tags=mongodb \ 5 | --source-ranges=130.211.0.0/22,35.191.0.0/16 \ 6 | --rules=tcp:8443 7 | 8 | gcloud compute health-checks create https om-healthcheck \ 9 | --use-serving-port \ 10 | --request-path=/monitor/health 11 | 12 | gcloud compute backend-services create om-backend-service \ 13 | --protocol HTTPS \ 14 | --health-checks om-healthcheck \ 15 | --global 16 | 17 | gcloud compute url-maps create om-url-map \ 18 | --default-service om-backend-service 19 | 20 | gcloud compute target-https-proxies create om-lb-proxy \ 21 | --url-map om-url-map \ 22 | --ssl-certificates=om-certificate 23 | 24 | gcloud compute forwarding-rules create om-forwarding-rule \ 25 | --global \ 26 | --target-https-proxy=om-lb-proxy \ 27 | --ports=443 28 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0160_add_dns_record.sh: -------------------------------------------------------------------------------- 1 | ip_address=$(gcloud compute forwarding-rules describe om-forwarding-rule --global --format="get(IPAddress)") 2 | 3 | gcloud dns record-sets create "${OPS_MANAGER_EXTERNAL_DOMAIN}" --zone="${DNS_ZONE}" --type="A" --ttl="300" --rrdatas="${ip_address}" 4 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0300_ops_manager_create_admin_credentials.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" --namespace "${OM_NAMESPACE}" create secret generic om-admin-user-credentials \ 2 | --from-literal=Username="admin" \ 3 | --from-literal=Password="Passw0rd@" \ 4 | --from-literal=FirstName="Jane" \ 5 | --from-literal=LastName="Doe" 6 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0321_ops_manager_wait_for_pending_state.sh: -------------------------------------------------------------------------------- 1 | echo "Waiting for Application Database to reach Pending phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" wait --for=jsonpath='{.status.applicationDatabase.phase}'=Pending opsmanager/om --timeout=30s 3 | 4 | echo "Waiting for Ops Manager to reach Pending phase..." 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" wait --for=jsonpath='{.status.opsManager.phase}'=Pending opsmanager/om --timeout=600s 6 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0325_set_up_lb_services.sh: -------------------------------------------------------------------------------- 1 | svcneg0=$(kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" get svcneg -o=jsonpath='{.items[0].metadata.name}') 2 | 3 | gcloud compute backend-services add-backend om-backend-service \ 4 | --global \ 5 | --network-endpoint-group="${svcneg0}" \ 6 | --network-endpoint-group-zone="${K8S_CLUSTER_0_ZONE}" \ 7 | --balancing-mode RATE --max-rate-per-endpoint 5 8 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0326_set_up_lb_services.sh: -------------------------------------------------------------------------------- 1 | svcneg1=$(kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${OM_NAMESPACE}" get svcneg -o=jsonpath='{.items[0].metadata.name}') 2 | 3 | gcloud compute backend-services add-backend om-backend-service \ 4 | --global \ 5 | --network-endpoint-group="${svcneg1}" \ 6 | --network-endpoint-group-zone="${K8S_CLUSTER_1_ZONE}" \ 7 | --balancing-mode RATE --max-rate-per-endpoint 5 8 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0330_ops_manager_wait_for_running_state.sh: -------------------------------------------------------------------------------- 1 | echo "Waiting for Application Database to reach Running phase..." 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" wait --for=jsonpath='{.status.applicationDatabase.phase}'=Running opsmanager/om --timeout=900s 3 | echo; echo "Waiting for Ops Manager to reach Running phase..." 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" wait --for=jsonpath='{.status.opsManager.phase}'=Running opsmanager/om --timeout=900s 5 | echo; echo "MongoDBOpsManager resource" 6 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" get opsmanager/om 7 | echo; echo "Pods running in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 8 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" get pods 9 | echo; echo "Pods running in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 10 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${OM_NAMESPACE}" get pods 11 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0400_install_minio_s3.sh: -------------------------------------------------------------------------------- 1 | kubectl kustomize "github.com/minio/operator/resources/?timeout=120&ref=v5.0.12" | \ 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" apply -f - 3 | 4 | kubectl kustomize "github.com/minio/operator/examples/kustomization/tenant-tiny?timeout=120&ref=v5.0.12" | \ 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" apply -f - 6 | 7 | # add two buckets to the tenant config 8 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "tenant-tiny" patch tenant/myminio \ 9 | --type='json' \ 10 | -p="[{\"op\": \"add\", \"path\": \"/spec/buckets\", \"value\": [{\"name\": \"${S3_OPLOG_BUCKET_NAME}\"}, {\"name\": \"${S3_SNAPSHOT_BUCKET_NAME}\"}]}]" 11 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/0500_ops_manager_prepare_s3_backup_secrets.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" create secret generic s3-access-secret \ 2 | --from-literal=accessKey="${S3_ACCESS_KEY}" \ 3 | --from-literal=secretKey="${S3_SECRET_KEY}" 4 | 5 | # minio TLS secrets are signed with the default k8s root CA 6 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" create secret generic s3-ca-cert \ 7 | --from-literal=ca.crt="$(kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n kube-system get configmap kube-root-ca.crt -o jsonpath="{.data.ca\.crt}")" 8 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/9000_cleanup_gke_lb.sh: -------------------------------------------------------------------------------- 1 | gcloud compute firewall-rules delete fw-ops-manager-hc -q || true 2 | 3 | gcloud compute forwarding-rules delete om-forwarding-rule --global -q || true 4 | 5 | gcloud compute target-https-proxies delete om-lb-proxy -q || true 6 | 7 | gcloud compute ssl-certificates delete om-certificate -q || true 8 | 9 | gcloud compute url-maps delete om-url-map -q || true 10 | 11 | gcloud compute backend-services delete om-backend-service --global -q || true 12 | 13 | gcloud compute health-checks delete om-healthcheck -q || true 14 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/9100_delete_backup_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "minio-operator" & 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "tenant-tiny" & 3 | wait 4 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/code_snippets/9200_delete_om.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" delete om/om 2 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/output/0150_om_load_balancer.out: -------------------------------------------------------------------------------- 1 | NAME NETWORK DIRECTION PRIORITY ALLOW DENY DISABLED 2 | fw-ops-manager-hc default INGRESS 1000 tcp:8443 False 3 | NAME PROTOCOL 4 | om-healthcheck HTTPS 5 | NAME BACKENDS PROTOCOL 6 | om-backend-service HTTPS 7 | NAME DEFAULT_SERVICE 8 | om-url-map backendServices/om-backend-service 9 | NAME SSL_CERTIFICATES URL_MAP REGION CERTIFICATE_MAP 10 | om-lb-proxy om-certificate om-url-map 11 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/output/0321_ops_manager_wait_for_pending_state.out: -------------------------------------------------------------------------------- 1 | Waiting for Application Database to reach Pending phase... 2 | mongodbopsmanager.mongodb.com/om condition met 3 | Waiting for Ops Manager to reach Pending phase... 4 | mongodbopsmanager.mongodb.com/om condition met 5 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_cleanup_gke_lb.sh & 15 | run 9100_delete_backup_namespaces.sh & 16 | run 9200_delete_om.sh & 17 | wait 18 | 19 | popd 20 | -------------------------------------------------------------------------------- /architectures/ops-manager-mc-no-mesh/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0100_generate_certs.sh 15 | run 0110_add_cert_to_gcp.sh 16 | 17 | run_for_output 0150_om_load_balancer.sh 18 | 19 | run 0160_add_dns_record.sh 20 | 21 | run 0300_ops_manager_create_admin_credentials.sh 22 | 23 | run 0320_ops_manager_no_mesh.sh 24 | 25 | run_for_output 0321_ops_manager_wait_for_pending_state.sh 26 | 27 | run 0325_set_up_lb_services.sh 28 | run 0326_set_up_lb_services.sh 29 | 30 | run_for_output 0330_ops_manager_wait_for_running_state.sh 31 | 32 | run 0400_install_minio_s3.sh 33 | run 0500_ops_manager_prepare_s3_backup_secrets.sh 34 | run 0510_ops_manager_enable_s3_backup.sh 35 | run_for_output 0522_ops_manager_wait_for_running_state.sh 36 | 37 | run 0610_create_mdb_org_and_get_credentials.sh 38 | 39 | popd 40 | -------------------------------------------------------------------------------- /architectures/ops-manager-multi-cluster/code_snippets/0250_generate_certs.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OM_NAMESPACE}" apply -f - <out}' 6 | 7 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" get secret root-secret -n cert-manager -o jsonpath="{.data['ca\.crt']}" | base64 --decode > certs/ca.crt 8 | cat certs/ca.crt certs/cert2.crt certs/cert3.crt >> certs/mms-ca.crt 9 | 10 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create cm ca-issuer -n "${MDB_NAMESPACE}" --from-file=ca-pem=certs/mms-ca.crt --from-file=mms-ca.crt=certs/mms-ca.crt 11 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create cm ca-issuer -n "${OM_NAMESPACE}" --from-file=ca-pem=certs/mms-ca.crt --from-file=mms-ca.crt=certs/mms-ca.crt 12 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/output/0215_helm_configure_repo.out: -------------------------------------------------------------------------------- 1 | "jetstack" has been added to your repositories 2 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/output/0216_helm_install_cert_manager.out: -------------------------------------------------------------------------------- 1 | Release "cert-manager" does not exist. Installing it now. 2 | NAME: cert-manager 3 | LAST DEPLOYED: Wed Apr 2 18:07:45 2025 4 | NAMESPACE: cert-manager 5 | STATUS: deployed 6 | REVISION: 1 7 | TEST SUITE: None 8 | NOTES: 9 | cert-manager v1.17.1 has been deployed successfully! 10 | 11 | In order to begin issuing certificates, you will need to set up a ClusterIssuer 12 | or Issuer resource (for example, by creating a 'letsencrypt-staging' issuer). 13 | 14 | More information on the different types of issuers and how to configure them 15 | can be found in our documentation: 16 | 17 | https://cert-manager.io/docs/configuration/ 18 | 19 | For information on how to configure cert-manager to automatically provision 20 | Certificates for Ingress resources, take a look at the `ingress-shim` 21 | documentation: 22 | 23 | https://cert-manager.io/docs/usage/ingress/ 24 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/output/0221_verify_issuer.out: -------------------------------------------------------------------------------- 1 | certificate.cert-manager.io/test-selfsigned-cert created 2 | certificate.cert-manager.io/test-selfsigned-cert condition met 3 | certificate.cert-manager.io "test-selfsigned-cert" deleted 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-cert-manager/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run_for_output 0215_helm_configure_repo.sh 15 | run_for_output 0216_helm_install_cert_manager.sh 16 | run 0220_create_issuer.sh 17 | run_for_output 0221_verify_issuer.sh 18 | run 0225_create_ca_configmap.sh 19 | 20 | popd 21 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0100_create_gke_sa.sh: -------------------------------------------------------------------------------- 1 | gcloud iam service-accounts create "${DNS_SA_NAME}" --display-name "${DNS_SA_NAME}" 2 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0120_add_role_to_sa.sh: -------------------------------------------------------------------------------- 1 | gcloud projects add-iam-policy-binding "${MDB_GKE_PROJECT}" --member serviceAccount:"${DNS_SA_EMAIL}" --role roles/dns.admin 2 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0130_create_sa_key.sh: -------------------------------------------------------------------------------- 1 | mkdir -p secrets 2 | 3 | gcloud iam service-accounts keys create secrets/external-dns-sa-key.json --iam-account="${DNS_SA_EMAIL}" 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0140_create_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create ns external-dns 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create ns external-dns 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create ns external-dns 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0150_create_sa_secrets.sh: -------------------------------------------------------------------------------- 1 | # create secret with service account key 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n external-dns create secret generic external-dns-sa-secret --from-file credentials.json=secrets/external-dns-sa-key.json 3 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n external-dns create secret generic external-dns-sa-secret --from-file credentials.json=secrets/external-dns-sa-key.json 4 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n external-dns create secret generic external-dns-sa-secret --from-file credentials.json=secrets/external-dns-sa-key.json 5 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0200_install_externaldns.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n external-dns apply -f yamls/externaldns.yaml 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n external-dns apply -f yamls/externaldns.yaml 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n external-dns apply -f yamls/externaldns.yaml 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/0300_setup_dns_zone.sh: -------------------------------------------------------------------------------- 1 | FQ_CLUSTER_0="projects/${MDB_GKE_PROJECT}/locations/${K8S_CLUSTER_0_ZONE}/clusters/${K8S_CLUSTER_0}" 2 | FQ_CLUSTER_1="projects/${MDB_GKE_PROJECT}/locations/${K8S_CLUSTER_1_ZONE}/clusters/${K8S_CLUSTER_1}" 3 | FQ_CLUSTER_2="projects/${MDB_GKE_PROJECT}/locations/${K8S_CLUSTER_2_ZONE}/clusters/${K8S_CLUSTER_2}" 4 | 5 | gcloud dns managed-zones create "${DNS_ZONE}" \ 6 | --description="" \ 7 | --dns-name="${CUSTOM_DOMAIN}" \ 8 | --visibility="private" \ 9 | --gkeclusters="${FQ_CLUSTER_0}","${FQ_CLUSTER_1}","${FQ_CLUSTER_2}" 10 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/9000_delete_sa.sh: -------------------------------------------------------------------------------- 1 | gcloud projects remove-iam-policy-binding "${MDB_GKE_PROJECT}" --member serviceAccount:"${DNS_SA_EMAIL}" --role roles/dns.admin 2 | 3 | gcloud iam service-accounts delete "${DNS_SA_EMAIL}" -q 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/9050_delete_namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "external-dns" & 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" delete ns "external-dns" & 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" delete ns "external-dns" & 4 | wait 5 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/code_snippets/9100_delete_dns_zone.sh: -------------------------------------------------------------------------------- 1 | gcloud dns record-sets list --zone="${DNS_ZONE}" --format=json | jq -c '.[]' | while read -r record; do 2 | NAME=$(echo "${record}" | jq -r '.name') 3 | TYPE=$(echo "${record}" | jq -r '.type') 4 | 5 | if [[ "${TYPE}" == "A" || "${TYPE}" == "TXT" ]]; then 6 | gcloud dns record-sets delete "${NAME}" --zone="${DNS_ZONE}" --type="${TYPE}" 7 | fi 8 | done 9 | 10 | gcloud dns managed-zones delete "${DNS_ZONE}" -q 11 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/env_variables.sh: -------------------------------------------------------------------------------- 1 | # This script builds on top of the environment configured in the setup guides. 2 | # It depends (uses) the following env variables defined there to work correctly. 3 | # If you don't use the setup guide to bootstrap the environment, then define them here. 4 | # ${K8S_CLUSTER_0} 5 | # ${K8S_CLUSTER_1} 6 | # ${K8S_CLUSTER_2} 7 | # ${K8S_CLUSTER_0_ZONE} 8 | # ${K8S_CLUSTER_1_ZONE} 9 | # ${K8S_CLUSTER_2_ZONE} 10 | # ${K8S_CLUSTER_0_CONTEXT_NAME} 11 | # ${K8S_CLUSTER_1_CONTEXT_NAME} 12 | # ${K8S_CLUSTER_2_CONTEXT_NAME} 13 | # ${MDB_GKE_PROJECT} 14 | 15 | export DNS_SA_NAME="external-dns-sa" 16 | export DNS_SA_EMAIL="${DNS_SA_NAME}@${MDB_GKE_PROJECT}.iam.gserviceaccount.com" 17 | 18 | export CUSTOM_DOMAIN="mongodb.custom" 19 | export DNS_ZONE="mongodb" 20 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_sa.sh 15 | run 9050_delete_namespace.sh 16 | run 9100_delete_dns_zone.sh 17 | 18 | popd 19 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-externaldns/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0100_create_gke_sa.sh 15 | # need to wait as the SA is not immediately available 16 | sleep 10 17 | run 0120_add_role_to_sa.sh 18 | run 0130_create_sa_key.sh 19 | run 0140_create_namespaces.sh 20 | run 0150_create_sa_secrets.sh 21 | run 0200_install_externaldns.sh 22 | run 0300_setup_dns_zone.sh 23 | 24 | popd 25 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0005_gcloud_set_current_project.sh: -------------------------------------------------------------------------------- 1 | gcloud config set project "${MDB_GKE_PROJECT}" 2 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0010_create_gke_cluster_0.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_0}" \ 2 | --zone="${K8S_CLUSTER_0_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_0_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_0_MACHINE_TYPE}" \ 5 | --tags=mongodb \ 6 | "${GKE_SPOT_INSTANCES_SWITCH:-""}" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0010_create_gke_cluster_1.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_1}" \ 2 | --zone="${K8S_CLUSTER_1_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_1_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_1_MACHINE_TYPE}" \ 5 | --tags=mongodb \ 6 | "${GKE_SPOT_INSTANCES_SWITCH:-""}" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0010_create_gke_cluster_2.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_2}" \ 2 | --zone="${K8S_CLUSTER_2_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_2_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_2_MACHINE_TYPE}" \ 5 | --tags=mongodb \ 6 | "${GKE_SPOT_INSTANCES_SWITCH:-""}" 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0020_get_gke_credentials.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters get-credentials "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" 2 | gcloud container clusters get-credentials "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" 3 | gcloud container clusters get-credentials "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/0030_verify_access_to_clusters.sh: -------------------------------------------------------------------------------- 1 | echo "Nodes in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" get nodes 3 | echo; echo "Nodes in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" get nodes 5 | echo; echo "Nodes in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" get nodes 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/code_snippets/9010_delete_gke_clusters.sh: -------------------------------------------------------------------------------- 1 | yes | gcloud container clusters delete "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" & 2 | yes | gcloud container clusters delete "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" & 3 | yes | gcloud container clusters delete "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" & 4 | wait 5 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9010_delete_gke_clusters.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-gke/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0005_gcloud_set_current_project.sh 15 | run 0010_create_gke_cluster_0.sh & 16 | run 0010_create_gke_cluster_1.sh & 17 | run 0010_create_gke_cluster_2.sh & 18 | wait 19 | 20 | run 0020_get_gke_credentials.sh 21 | run_for_output 0030_verify_access_to_clusters.sh 22 | 23 | popd 24 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-istio/code_snippets/0040_install_istio.sh: -------------------------------------------------------------------------------- 1 | CTX_CLUSTER1=${K8S_CLUSTER_0_CONTEXT_NAME} \ 2 | CTX_CLUSTER2=${K8S_CLUSTER_1_CONTEXT_NAME} \ 3 | CTX_CLUSTER3=${K8S_CLUSTER_2_CONTEXT_NAME} \ 4 | ISTIO_VERSION="1.20.2" \ 5 | ./install_istio_separate_network.sh 6 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-istio/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0040_install_istio.sh 15 | run 0050_label_namespaces.sh 16 | 17 | popd 18 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0045_create_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 4 | 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${OM_NAMESPACE}" 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${OM_NAMESPACE}" 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${OM_NAMESPACE}" 8 | 9 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${MDB_NAMESPACE}" 10 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${MDB_NAMESPACE}" 11 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${MDB_NAMESPACE}" 12 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0200_kubectl_mongodb_configure_multi_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl mongodb multicluster setup \ 2 | --central-cluster="${K8S_CLUSTER_0_CONTEXT_NAME}" \ 3 | --member-clusters="${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}" \ 4 | --member-cluster-namespace="${OM_NAMESPACE}" \ 5 | --central-cluster-namespace="${OPERATOR_NAMESPACE}" \ 6 | --create-service-account-secrets \ 7 | --install-database-roles=true \ 8 | --image-pull-secrets=image-registries-secret 9 | 10 | kubectl mongodb multicluster setup \ 11 | --central-cluster="${K8S_CLUSTER_0_CONTEXT_NAME}" \ 12 | --member-clusters="${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}" \ 13 | --member-cluster-namespace="${MDB_NAMESPACE}" \ 14 | --central-cluster-namespace="${OPERATOR_NAMESPACE}" \ 15 | --create-service-account-secrets \ 16 | --install-database-roles=true \ 17 | --image-pull-secrets=image-registries-secret 18 | 19 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0205_helm_configure_repo.sh: -------------------------------------------------------------------------------- 1 | helm repo add mongodb https://mongodb.github.io/helm-charts 2 | helm repo update mongodb 3 | helm search repo "${OFFICIAL_OPERATOR_HELM_CHART}" 4 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0210_helm_install_operator.sh: -------------------------------------------------------------------------------- 1 | helm upgrade --install \ 2 | --debug \ 3 | --kube-context "${K8S_CLUSTER_0_CONTEXT_NAME}" \ 4 | mongodb-enterprise-operator-multi-cluster \ 5 | "${OPERATOR_HELM_CHART}" \ 6 | --namespace="${OPERATOR_NAMESPACE}" \ 7 | --set namespace="${OPERATOR_NAMESPACE}" \ 8 | --set operator.namespace="${OPERATOR_NAMESPACE}" \ 9 | --set operator.watchNamespace="${OM_NAMESPACE}\,${MDB_NAMESPACE}" \ 10 | --set operator.name=mongodb-enterprise-operator-multi-cluster \ 11 | --set operator.createOperatorServiceAccount=false \ 12 | --set operator.createResourcesServiceAccountsAndRoles=false \ 13 | --set "multiCluster.clusters={${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}}" \ 14 | --set "${OPERATOR_ADDITIONAL_HELM_VALUES:-"dummy=value"}" \ 15 | --set operator.env=dev 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/0211_check_operator_deployment.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" rollout status deployment/mongodb-enterprise-operator-multi-cluster 2 | echo "Operator deployment in ${OPERATOR_NAMESPACE} namespace" 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" get deployments 4 | echo; echo "Operator pod in ${OPERATOR_NAMESPACE} namespace" 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" get pods 6 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/code_snippets/9000_delete_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "${OM_NAMESPACE}" & 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" delete ns "${OM_NAMESPACE}" & 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" delete ns "${OM_NAMESPACE}" & 4 | 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "${OPERATOR_NAMESPACE}" & 6 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" delete ns "${OPERATOR_NAMESPACE}" & 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" delete ns "${OPERATOR_NAMESPACE}" & 8 | 9 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" delete ns "${MDB_NAMESPACE}" & 10 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" delete ns "${MDB_NAMESPACE}" & 11 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" delete ns "${MDB_NAMESPACE}" & 12 | wait 13 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/env_variables.sh: -------------------------------------------------------------------------------- 1 | # Namespace in which Ops Manager and AppDB will be deployed 2 | export OM_NAMESPACE="mongodb-om" 3 | # Namespace in which the operator will be installed 4 | export OPERATOR_NAMESPACE="mongodb-operator" 5 | # Namespace in which MongoDB resources will be deployed 6 | export MDB_NAMESPACE="mongodb" 7 | 8 | # comma-separated key=value pairs for additional parameters passed to the helm-chart installing the operator 9 | export OPERATOR_ADDITIONAL_HELM_VALUES="${OPERATOR_ADDITIONAL_HELM_VALUES:-""}" 10 | 11 | export OFFICIAL_OPERATOR_HELM_CHART="mongodb/enterprise-operator" 12 | export OPERATOR_HELM_CHART="${OPERATOR_HELM_CHART:-${OFFICIAL_OPERATOR_HELM_CHART}}" 13 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/output/0205_helm_configure_repo.out: -------------------------------------------------------------------------------- 1 | "mongodb" has been added to your repositories 2 | Hang tight while we grab the latest from your chart repositories... 3 | ...Successfully got an update from the "mongodb" chart repository 4 | Update Complete. ⎈Happy Helming!⎈ 5 | NAME CHART VERSION APP VERSION DESCRIPTION 6 | mongodb/enterprise-operator 1.32.0 MongoDB Kubernetes Enterprise Operator 7 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/output/0211_check_operator_deployment.out: -------------------------------------------------------------------------------- 1 | Waiting for deployment "mongodb-enterprise-operator-multi-cluster" rollout to finish: 0 of 1 updated replicas are available... 2 | deployment "mongodb-enterprise-operator-multi-cluster" successfully rolled out 3 | Operator deployment in mongodb-operator namespace 4 | NAME READY UP-TO-DATE AVAILABLE AGE 5 | mongodb-enterprise-operator-multi-cluster 1/1 1 1 9s 6 | 7 | Operator pod in mongodb-operator namespace 8 | NAME READY STATUS RESTARTS AGE 9 | mongodb-enterprise-operator-multi-cluster-786c8fcd9b-9k465 2/2 Running 1 (3s ago) 10s 10 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/teardown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 9000_delete_namespaces.sh 15 | 16 | popd 17 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/setup-operator/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eou pipefail 4 | 5 | script_name=$(readlink -f "${BASH_SOURCE[0]}") 6 | script_dir=$(dirname "${script_name}") 7 | 8 | source scripts/code_snippets/sample_test_runner.sh 9 | 10 | pushd "${script_dir}" 11 | 12 | prepare_snippets 13 | 14 | run 0045_create_namespaces.sh 15 | run 0046_create_image_pull_secrets.sh 16 | 17 | run_for_output 0200_kubectl_mongodb_configure_multi_cluster.sh 18 | run_for_output 0205_helm_configure_repo.sh 19 | run_for_output 0210_helm_install_operator.sh 20 | run_for_output 0211_check_operator_deployment.sh 21 | 22 | popd 23 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0045_create_connectivity_test_namespaces.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "connectivity-test" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "connectivity-test" istio-injection=enabled --overwrite 3 | 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "connectivity-test" 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "connectivity-test" istio-injection=enabled --overwrite 6 | 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "connectivity-test" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "connectivity-test" istio-injection=enabled --overwrite 9 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0050_check_cluster_connectivity_create_sts_0.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "connectivity-test" -f - <&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.connectivity-test.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "connectivity-test" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_2_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver2-0" 4 | target_url="http://${target_pod}.connectivity-test.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "connectivity-test" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver2-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.connectivity-test.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "connectivity-test" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | 9 | if grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" 10 | then 11 | echo "SUCCESS" 12 | else 13 | echo "ERROR: ${out}" 14 | return 1 15 | fi 16 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_0_0_from_cluster_1.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver1-0 in gke_scratch-kubernetes-team_europe-central2-b_k8s-mdb-1-67d0389d75b70a0007e5894a to echoserver0-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0-67d0389d75b70a0007e5894a to echoserver1-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver2-0 in gke_scratch-kubernetes-team_europe-central2-c_k8s-mdb-2-67d0389d75b70a0007e5894a to echoserver1-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /architectures/setup-multi-cluster/verify-connectivity/output/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.out: -------------------------------------------------------------------------------- 1 | Checking cross-cluster DNS resolution and connectivity from echoserver0-0 in gke_scratch-kubernetes-team_europe-central2-a_k8s-mdb-0-67d0389d75b70a0007e5894a to echoserver2-0 2 | SUCCESS 3 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.10/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.11/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.12/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.13/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.14/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.15/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.2/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.3/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.4/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.5/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.6/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.7/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.8/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-database/1.0.9/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | ARG version 7 | LABEL name="MongoDB Enterprise Init Database" \ 8 | version="mongodb-enterprise-init-database-${version}" \ 9 | summary="MongoDB Enterprise Database Init Image" \ 10 | description="Startup Scripts for MongoDB Enterprise Database" \ 11 | release="1" \ 12 | vendor="MongoDB" \ 13 | maintainer="support@mongodb.com" 14 | 15 | COPY --from=base /data/readinessprobe /probes/readinessprobe 16 | COPY --from=base /data/probe.sh /probes/probe.sh 17 | COPY --from=base /data/scripts/ /scripts/ 18 | COPY --from=base /data/licenses /licenses/ 19 | 20 | 21 | COPY --from=base /data/mongodb_tools_ubuntu.tgz /tools/mongodb_tools.tgz 22 | 23 | 24 | RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ 25 | && rm /tools/mongodb_tools.tgz 26 | 27 | USER 2000 28 | ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] 29 | 30 | 31 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.10/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.10" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.10/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.10" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | 20 | 21 | USER 2000 22 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 23 | 24 | 25 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.11/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.11" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.12/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.12" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.3/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.3" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | USER 2000 19 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/mmsconfiguration", "/opt/scripts/" ] 20 | 21 | 22 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.3/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.3" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | USER 2000 19 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/mmsconfiguration", "/opt/scripts/" ] 20 | 21 | 22 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.4/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.4" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.4/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.4" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | 20 | 21 | USER 2000 22 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 23 | 24 | 25 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.5/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.5" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.5/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.5" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | 20 | 21 | USER 2000 22 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 23 | 24 | 25 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.6/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.6" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.6/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.6" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | 20 | 21 | USER 2000 22 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 23 | 24 | 25 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.7/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.7" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.7/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.7" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | 20 | 21 | USER 2000 22 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 23 | 24 | 25 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.8/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.8" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.8/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.8" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | 20 | 21 | USER 2000 22 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 23 | 24 | 25 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.9/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.9" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.0.9/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM busybox 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.0.9" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | 20 | 21 | USER 2000 22 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 23 | 24 | 25 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.23.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.23.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.24.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.24.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.25.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.25.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.26.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.26.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.27.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi8/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.27.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.28.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.28.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.29.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.29.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.30.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.30.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.31.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.31.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.32.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.32.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-init-ops-manager/1.33.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG imagebase 2 | FROM ${imagebase} as base 3 | 4 | FROM registry.access.redhat.com/ubi9/ubi-minimal 5 | 6 | LABEL name="MongoDB Enterprise Ops Manager Init" \ 7 | maintainer="support@mongodb.com" \ 8 | vendor="MongoDB" \ 9 | version="mongodb-enterprise-init-ops-manager-1.33.0" \ 10 | release="1" \ 11 | summary="MongoDB Enterprise Ops Manager Init Image" \ 12 | description="Startup Scripts for MongoDB Enterprise Ops Manager" 13 | 14 | 15 | COPY --from=base /data/scripts /scripts 16 | COPY --from=base /data/licenses /licenses 17 | 18 | 19 | RUN microdnf -y update --nodocs \ 20 | && microdnf clean all 21 | 22 | 23 | USER 2000 24 | ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] 25 | 26 | 27 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.11.0/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:16.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.11.0" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.12.0/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.12.0" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.13.0/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.13.0" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.14.0/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.14.0" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.15.0/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.15.0" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.15.1/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.15.1" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.15.2/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.15.2" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.16.0/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.16.0" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.16.1/ubuntu/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM ubuntu:20.04 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.16.1" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | 21 | # Adds up-to-date CA certificates. 22 | RUN apt-get -qq update && \ 23 | apt-get -y -qq install ca-certificates curl && \ 24 | apt-get upgrade -y -qq && \ 25 | apt-get dist-upgrade -y -qq && \ 26 | rm -rf /var/lib/apt/lists/* 27 | 28 | 29 | 30 | 31 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 32 | COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json 33 | COPY --from=base /data/licenses /licenses/ 34 | 35 | USER 2000 36 | 37 | 38 | 39 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 40 | 41 | 42 | -------------------------------------------------------------------------------- /dockerfiles/mongodb-enterprise-operator/1.9.0/ubi/Dockerfile: -------------------------------------------------------------------------------- 1 | # 2 | # Base Template Dockerfile for Operator Image. 3 | # 4 | 5 | ARG imagebase 6 | FROM ${imagebase} as base 7 | 8 | FROM registry.access.redhat.com/ubi8/ubi 9 | 10 | 11 | LABEL name="MongoDB Enterprise Operator" \ 12 | maintainer="support@mongodb.com" \ 13 | vendor="MongoDB" \ 14 | version="1.9.0-23-g9050b474" \ 15 | release="1" \ 16 | summary="MongoDB Enterprise Operator Image" \ 17 | description="MongoDB Enterprise Operator Image" 18 | 19 | 20 | RUN yum -y --disableplugin=subscription-manager update && \ 21 | yum -y --disableplugin=subscription-manager clean all 22 | 23 | 24 | 25 | 26 | COPY --from=base /data/mongodb-enterprise-operator /usr/local/bin/mongodb-enterprise-operator 27 | COPY --from=base /data/version_manifest.json /var/lib/mongodb-enterprise-operator/version_manifest.json 28 | COPY --from=base /data/licenses /licenses/ 29 | RUN chmod a+r /var/lib/mongodb-enterprise-operator/version_manifest.json 30 | 31 | USER 2000 32 | 33 | ENTRYPOINT exec /usr/local/bin/mongodb-enterprise-operator 34 | 35 | 36 | -------------------------------------------------------------------------------- /docs/assets/image--000.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--000.png -------------------------------------------------------------------------------- /docs/assets/image--002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--002.png -------------------------------------------------------------------------------- /docs/assets/image--004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--004.png -------------------------------------------------------------------------------- /docs/assets/image--008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--008.png -------------------------------------------------------------------------------- /docs/assets/image--014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--014.png -------------------------------------------------------------------------------- /docs/assets/image--030.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--030.png -------------------------------------------------------------------------------- /docs/assets/image--032.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--032.png -------------------------------------------------------------------------------- /docs/assets/image--034.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mongodb/mongodb-enterprise-kubernetes/86f9fa709f7279e34f4920ea6654bc9f75f61524/docs/assets/image--034.png -------------------------------------------------------------------------------- /opa_examples/debugging/constraint_template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: k8sdenyall 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: K8sDenyAll 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | rego: | 13 | package k8sdenyall 14 | 15 | violation[{"msg": msg}] { 16 | msg := sprintf("REVIEW OBJECT: %v", [input.review]) 17 | } 18 | -------------------------------------------------------------------------------- /opa_examples/debugging/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: K8sDenyAll 3 | metadata: 4 | name: deny-all-namespaces 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] 10 | - apiGroups: ["mongodb.com"] 11 | kinds: ["MongoDBOpsManager"] 12 | -------------------------------------------------------------------------------- /opa_examples/mongodb_allow_replicaset/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: MongoDBAllowReplicaset 3 | metadata: 4 | name: mongodb-allow-replicaset-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] -------------------------------------------------------------------------------- /opa_examples/mongodb_allow_replicaset/mongodb_allow_replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: mongodballowreplicaset 5 | annotations: 6 | description: >- 7 | Allows only replica set deployment of MongoDB 8 | 9 | The type setting for MongoDB should be replicaset 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: MongoDBAllowReplicaset 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package mongodballowreplicaset 19 | 20 | violation[{"msg": msg}] { 21 | deployment_type = object.get(input.review.object.spec, "type", "none") 22 | not deployment_type == "replicaset" 23 | msg := sprintf("Only replicaset deployment of MongoDB allowed, requested %v", [deployment_type]) 24 | } 25 | 26 | -------------------------------------------------------------------------------- /opa_examples/mongodb_allowed_versions/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: MongoDBAllowedVersions 3 | metadata: 4 | name: mongodb-allowed-versions-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] 10 | -------------------------------------------------------------------------------- /opa_examples/mongodb_allowed_versions/mongodb_allowed_versions.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: mongodballowedversions 5 | annotations: 6 | description: >- 7 | Requires MongoDB deployment to be within the allowed versions 8 | 9 | The setting version should be within the pinned allowed values 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: MongoDBAllowedVersions 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package mongodballowedversions 19 | 20 | allowed_versions = ["4.5.0", "5.0.0"] 21 | 22 | violation[{"msg": msg}] { 23 | version = object.get(input.review.object.spec, "version", "none") 24 | not q[version] 25 | msg := sprintf("MongoDB deployment needs to be one of the allowed versions: ", [allowed_versions]) 26 | } 27 | 28 | q[version] { version := allowed_versions[_] } 29 | -------------------------------------------------------------------------------- /opa_examples/mongodb_strict_tls/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: MongoDBStrictTLS 3 | metadata: 4 | name: mongodb-strict-tls-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDB"] 10 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_allowed_versions/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: OpsManagerAllowedVersions 3 | metadata: 4 | name: ops-manager-allowed-versions-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDBOpsManager"] 10 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_allowed_versions/ops_manager_allowed_versions.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: opsmanagerallowedversions 5 | annotations: 6 | description: >- 7 | Requires Ops Manager to be within the allowed versions 8 | 9 | The setting version should be within the pinned allowed values 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: OpsManagerAllowedVersions 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package opsmanagerallowedversions 19 | 20 | allowed_versions = ["4.4.5", "5.0.0"] 21 | 22 | violation[{"msg": msg}] { 23 | version = object.get(input.review.object.spec, "version", "none") 24 | not q[version] 25 | msg := sprintf("Ops Manager needs to be one of the allowed versions: ", [allowed_versions]) 26 | } 27 | 28 | q[version] { version := allowed_versions[_] } 29 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_replica_members/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: OpsManagerReplicaMembers 3 | metadata: 4 | name: ops-manager-replicamembers-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDBOpsManager"] 10 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_wizardless/constraints.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: OpsManagerWizardless 3 | metadata: 4 | name: ops-manager-wizardless-only 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["mongodb.com"] 9 | kinds: ["MongoDBOpsManager"] 10 | -------------------------------------------------------------------------------- /opa_examples/ops_manager_wizardless/ops_manager_wizardless_template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: opsmanagerwizardless 5 | annotations: 6 | description: >- 7 | Requires Ops Manager install to be wizardless 8 | 9 | The setting mms.ignoreInitiaUiSetup needs to be true 10 | spec: 11 | crd: 12 | spec: 13 | names: 14 | kind: OpsManagerWizardless 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | package opsmanagerwizardless 19 | 20 | violation[{"msg": msg}] { 21 | value := object.get(input.review.object.spec.configuration, "mms.ignoreInitialUiSetup", "false") 22 | not value == "true" 23 | msg := sprintf("Wizard based setup of Ops Manager is not allowed. mms.ignoreInitialUiSetup needs to be true, currently is %v", [value]) 24 | } 25 | -------------------------------------------------------------------------------- /samples/mongodb/agent-startup-options/replica-set-agent-startup-options.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: my-replica-set-agent-parameters 5 | spec: 6 | members: 3 7 | version: 4.4.0-ent 8 | type: ReplicaSet 9 | opsManager: 10 | configMapRef: 11 | name: my-project 12 | credentials: my-credentials 13 | persistent: true 14 | # optional. Allows to pass custom flags that will be used 15 | # when launching the mongodb agent. All values must be strings 16 | # The full list of available settings is at: 17 | # https://docs.opsmanager.mongodb.com/current/reference/mongodb-agent-settings/ 18 | agent: 19 | startupOptions: 20 | maxLogFiles: "30" 21 | dialTimeoutSeconds: "40" 22 | -------------------------------------------------------------------------------- /samples/mongodb/agent-startup-options/standalone-agent-startup-options.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-standalone 6 | spec: 7 | version: 4.4.0-ent 8 | service: my-service 9 | 10 | opsManager: 11 | configMapRef: 12 | name: my-project 13 | credentials: my-credentials 14 | type: Standalone 15 | 16 | persistent: true 17 | # optional. Allows to pass custom flags that will be used 18 | # when launching the mongodb agent. All values must be strings 19 | # The full list of available settings is at: 20 | # https://docs.opsmanager.mongodb.com/current/reference/mongodb-agent-settings/ 21 | agent: 22 | startupOptions: 23 | maxLogFiles: "30" 24 | dialTimeoutSeconds: "40" 25 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/ldap/replica-set/replica-set-ldap-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-ldap-user 6 | spec: 7 | username: my-ldap-user 8 | db: $external 9 | mongodbResourceRef: 10 | name: my-ldap-enabled-replica-set # The name of the MongoDB resource this user will be added to 11 | roles: 12 | - db: admin 13 | name: clusterAdmin 14 | - db: admin 15 | name: userAdminAnyDatabase 16 | - db: admin 17 | name: readWrite 18 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/ldap/sharded-cluster/sharded-cluster-ldap-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-ldap-user 6 | spec: 7 | username: my-ldap-user 8 | db: $external 9 | mongodbResourceRef: 10 | name: my-ldap-enabled-sharded-cluster # The name of the MongoDB resource this user will be added to 11 | roles: 12 | - db: admin 13 | name: clusterAdmin 14 | - db: admin 15 | name: userAdminAnyDatabase 16 | - db: admin 17 | name: readWrite 18 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/replica-set/replica-set-scram-password.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: my-scram-secret 6 | type: Opaque 7 | stringData: 8 | password: my-replica-set-password 9 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/replica-set/replica-set-scram-sha.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-scram-enabled-replica-set 6 | spec: 7 | type: ReplicaSet 8 | members: 3 9 | 10 | # Using a version >= 4.0 will enable SCRAM-SHA-256 authentication 11 | # setting a version < 4.0 will enable SCRAM-SHA-1/MONGODB-CR authentication 12 | version: 4.0.4-ent 13 | 14 | opsManager: 15 | configMapRef: 16 | name: my-project 17 | credentials: my-credentials 18 | 19 | security: 20 | authentication: 21 | enabled: true 22 | modes: ["SCRAM"] # Valid authentication modes are "SCRAM', "SCRAM-SHA-1", "MONGODB-CR", "X509" and "LDAP" 23 | 24 | # Optional field - ignoreUnknownUsers 25 | # A value of true means that any users not configured via the Operator or the Ops Manager or Cloud Manager UI 26 | # will not be altered in any way 27 | 28 | # If you need to manage MongoDB users directly via the mongods, set this value to true 29 | ignoreUnknownUsers: true # default value false 30 | 31 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/replica-set/replica-set-scram-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-scram-user 6 | spec: 7 | passwordSecretKeyRef: 8 | name: my-scram-secret # the name of the secret that stores this user's password 9 | key: password # the key in the secret that stores the password 10 | username: my-scram-user 11 | db: admin 12 | mongodbResourceRef: 13 | name: my-scram-enabled-replica-set # The name of the MongoDB resource this user will be added to 14 | roles: 15 | - db: admin 16 | name: clusterAdmin 17 | - db: admin 18 | name: readWriteAnyDatabase 19 | - db: admin 20 | name: dbAdminAnyDatabase 21 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/sharded-cluster/sharded-cluster-scram-password.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: my-scram-secret 6 | type: Opaque 7 | stringData: 8 | password: my-sharded-cluster-password 9 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/sharded-cluster/sharded-cluster-scram-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-scram-user 6 | spec: 7 | passwordSecretKeyRef: 8 | name: my-scram-secret # the name of the secret that stores this user's password 9 | key: password # the key in the secret that stores the password 10 | username: my-scram-user 11 | db: admin 12 | mongodbResourceRef: 13 | name: my-scram-enabled-sharded-cluster # The name of the MongoDB resource this user will be added to 14 | roles: 15 | - db: admin 16 | name: clusterAdmin 17 | - db: admin 18 | name: userAdminAnyDatabase 19 | 20 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/standalone/standalone-scram-password.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: my-scram-secret 6 | type: Opaque 7 | stringData: 8 | password: my-standalone-password 9 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/standalone/standalone-scram-sha.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-scram-enabled-standalone 6 | spec: 7 | type: Standalone 8 | 9 | # Using a version >= 4.0 will enable SCRAM-SHA-256 authentication 10 | version: 4.4.0-ent 11 | 12 | opsManager: 13 | configMapRef: 14 | name: my-project 15 | credentials: my-credentials 16 | 17 | security: 18 | authentication: 19 | enabled: true 20 | modes: ["SCRAM"] # Valid authentication modes are "SCRAM' and "X509" 21 | 22 | # Optional field - ignoreUnknownUsers 23 | # A value of true means that any users not configured via the Operator or the Ops Manager or Cloud Manager UI 24 | # will not be altered in any way 25 | 26 | # If you need to manage MongoDB users directly via the mongods, set this value to true 27 | ignoreUnknownUsers: true # default value false 28 | 29 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/scram/standalone/standalone-scram-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-scram-user 6 | spec: 7 | passwordSecretKeyRef: 8 | name: my-scram-secret # the name of the secret that stores this user's password 9 | key: password # the key in the secret that stores the password 10 | username: my-scram-user 11 | db: admin 12 | mongodbResourceRef: 13 | name: my-scram-enabled-standalone # The name of the MongoDB resource this user will be added to 14 | roles: 15 | - db: admin 16 | name: readWrite 17 | - db: admin 18 | name: userAdminAnyDatabase 19 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/x509/replica-set/user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-replica-set-x509-user 6 | spec: 7 | username: CN=my-replica-set-x509-user,OU=cloud,O=MongoDB,L=New York,ST=New York,C=US 8 | db: $external 9 | mongodbResourceRef: 10 | name: my-replica-set 11 | roles: 12 | - db: admin 13 | name: dbOwner 14 | -------------------------------------------------------------------------------- /samples/mongodb/authentication/x509/sharded-cluster/user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDBUser 4 | metadata: 5 | name: my-sharded-cluster-x509-user 6 | spec: 7 | username: CN=my-sharded-cluster-x509-user,OU=cloud,O=MongoDB,L=New York,ST=New York,C=US 8 | db: $external 9 | mongodbResourceRef: 10 | name: my-replica-set 11 | roles: 12 | - db: admin 13 | name: dbOwner 14 | -------------------------------------------------------------------------------- /samples/mongodb/backup/replica-set-backup-disabled.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-replica-set-backup-disabled 6 | spec: 7 | members: 3 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | 11 | opsManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | backup: 16 | mode: disabled 17 | -------------------------------------------------------------------------------- /samples/mongodb/backup/replica-set-backup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-replica-set-backup 6 | spec: 7 | members: 3 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | 11 | opsManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | backup: 16 | mode: enabled 17 | -------------------------------------------------------------------------------- /samples/mongodb/minimal/replica-set.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-replica-set 6 | spec: 7 | members: 3 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | 11 | opsManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | 16 | persistent: false 17 | 18 | podSpec: 19 | # 'podTemplate' allows to set custom fields in PodTemplateSpec. 20 | # (https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#podtemplatespec-v1-core) 21 | # for the Database StatefulSet. 22 | podTemplate: 23 | spec: 24 | containers: 25 | - name: mongodb-enterprise-database 26 | resources: 27 | limits: 28 | cpu: "2" 29 | memory: 700M 30 | requests: 31 | cpu: "1" 32 | memory: 500M 33 | -------------------------------------------------------------------------------- /samples/mongodb/mongodb-options/replica-set-mongod-options.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: my-replica-set-options 5 | spec: 6 | members: 3 7 | version: 4.2.8-ent 8 | type: ReplicaSet 9 | opsManager: 10 | configMapRef: 11 | name: my-project 12 | credentials: my-credentials 13 | persistent: true 14 | # optional. Allows to pass custom MongoDB process configuration 15 | additionalMongodConfig: 16 | systemLog: 17 | logAppend: true 18 | systemLog.verbosity: 4 19 | operationProfiling.mode: slowOp 20 | -------------------------------------------------------------------------------- /samples/mongodb/mongodb-options/sharded-cluster-mongod-options.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: my-sharded-cluster-options 5 | spec: 6 | version: 4.2.8-ent 7 | type: ShardedCluster 8 | opsManager: 9 | configMapRef: 10 | name: my-project 11 | credentials: my-credentials 12 | persistent: true 13 | shardCount: 2 14 | mongodsPerShardCount: 3 15 | mongosCount: 2 16 | configServerCount: 1 17 | mongos: 18 | # optional. Allows to pass custom configuration for mongos processes 19 | additionalMongodConfig: 20 | systemLog: 21 | logAppend: true 22 | verbosity: 4 23 | configSrv: 24 | # optional. Allows to pass custom configuration for Config Server mongod processes 25 | additionalMongodConfig: 26 | operationProfiling: 27 | mode: slowOp 28 | shard: 29 | additionalMongodConfig: 30 | # optional. Allows to pass custom configuration for Shards mongod processes 31 | storage: 32 | journal: 33 | commitIntervalMs: 50 34 | -------------------------------------------------------------------------------- /samples/mongodb/pod-template/initcontainer-sysctl_config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: my-replica-set 5 | namespace: mongodb 6 | spec: 7 | members: 3 8 | version: 4.2.2 9 | type: ReplicaSet 10 | 11 | cloudManager: 12 | configMapRef: 13 | name: my-project 14 | credentials: my-credentials 15 | 16 | persistent: false 17 | podSpec: 18 | podTemplate: 19 | spec: 20 | initContainers: 21 | - name: "apply-sysctl-test" 22 | image: "busybox:latest" 23 | securityContext: 24 | privileged: true 25 | command: ["sysctl", "-w", "net.ipv4.tcp_keepalive_time=120"] 26 | -------------------------------------------------------------------------------- /samples/mongodb/pod-template/standalone-pod-template.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: mongodb.com/v1 2 | kind: MongoDB 3 | metadata: 4 | name: my-standalone-pod-template 5 | spec: 6 | version: 4.2.11-ent 7 | type: Standalone 8 | opsManager: 9 | configMapRef: 10 | name: my-project 11 | credentials: my-credentials 12 | podSpec: 13 | podTemplate: 14 | spec: 15 | hostAliases: 16 | - ip: "1.2.3.4" 17 | hostnames: ["hostname"] 18 | -------------------------------------------------------------------------------- /samples/mongodb/project.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: my-project 6 | data: 7 | projectName: My Ops/Cloud Manager Project 8 | baseUrl: http://my-ops-cloud-manager-url 9 | 10 | # Optional parameters 11 | 12 | # If orgId is omitted a new organization will be created, with the same name as the Project. 13 | orgId: my-org-id 14 | -------------------------------------------------------------------------------- /samples/mongodb/tls/standalone/standalone-tls.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: mongodb.com/v1 3 | kind: MongoDB 4 | metadata: 5 | name: my-tls-standalone 6 | spec: 7 | version: 4.0.14-ent 8 | 9 | opsManager: 10 | configMapRef: 11 | name: my-project 12 | credentials: my-credentials 13 | type: Standalone 14 | 15 | persistent: true 16 | 17 | # This will create a TLS enabled Standalone which means that the 18 | # traffic will be encrypted using TLS certificates. These 19 | # certificates will be generated on the fly by the operatror using 20 | # the Kubernetes CA. 21 | # Please refer to Kubernetes TLS Documentation on how to approve these certs: 22 | # 23 | # https://kubernetes.io/docs/tasks/tls/managing-tls-in-a-cluster/ 24 | # 25 | security: 26 | tls: 27 | enabled: true 28 | -------------------------------------------------------------------------------- /samples/mongodb_multi/replica-set.yaml: -------------------------------------------------------------------------------- 1 | # sample mongodb-multi replicaset yaml 2 | --- 3 | apiVersion: mongodb.com/v1 4 | kind: MongoDBMulti 5 | metadata: 6 | name: multi-replica-set 7 | spec: 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | persistent: false 11 | duplicateServiceObjects: false 12 | credentials: my-credentials 13 | opsManager: 14 | configMapRef: 15 | name: my-project 16 | clusterSpecList: 17 | # provide spec per cluster 18 | clusterSpecs: 19 | # cluster names where you want to deploy the replicaset 20 | - clusterName: cluster1.mongokubernetes.com 21 | members: 2 22 | - clusterName: cluster2.mongokubernetes.com 23 | members: 1 24 | - clusterName: cluster3.mongokubernetes.com 25 | members: 2 26 | -------------------------------------------------------------------------------- /samples/mongodb_multicluster/replica-set.yaml: -------------------------------------------------------------------------------- 1 | # sample mongodb-multi replicaset yaml 2 | --- 3 | apiVersion: mongodb.com/v1 4 | kind: MongoDBMultiCluster 5 | metadata: 6 | name: multi-replica-set 7 | spec: 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | persistent: false 11 | duplicateServiceObjects: false 12 | credentials: my-credentials 13 | opsManager: 14 | configMapRef: 15 | name: my-project 16 | clusterSpecList: 17 | # cluster names where you want to deploy the replicaset 18 | - clusterName: cluster1.mongokubernetes.com 19 | members: 2 20 | - clusterName: cluster2.mongokubernetes.com 21 | members: 1 22 | - clusterName: cluster3.mongokubernetes.com 23 | members: 2 24 | -------------------------------------------------------------------------------- /samples/multi-cluster-cli-gitops/argocd/application.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: multi-cluster-replica-set 5 | namespace: argocd 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | labels: 9 | name: database 10 | spec: 11 | project: my-project 12 | source: 13 | repoURL: https://github.com/mongodb/mongodb-enterprise-kubernetes 14 | targetRevision: "fix/ubi-8-repo-names" 15 | path: samples/multi-cluster-cli-gitops 16 | destination: 17 | server: https://central.mongokubernetes.com 18 | namespace: mongodb 19 | syncPolicy: 20 | automated: 21 | prune: true 22 | syncOptions: 23 | - CreateNamespace=true 24 | -------------------------------------------------------------------------------- /samples/multi-cluster-cli-gitops/argocd/project.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: AppProject 3 | metadata: 4 | name: my-project 5 | namespace: argocd 6 | finalizers: 7 | - resources-finalizer.argocd.argoproj.io 8 | spec: 9 | description: Example Project 10 | sourceRepos: 11 | - '*' 12 | destinations: 13 | - namespace: mongodb 14 | server: https://central.mongokubernetes.com 15 | clusterResourceWhitelist: 16 | # Allow MongoDBMulti resources to be synced 17 | - group: '' 18 | kind: MongoDBMultiCluster 19 | # Allow Jobs to be created (used for sync hooks in this example) 20 | - group: '' 21 | kind: Job 22 | - group: '' 23 | kind: Namespace 24 | -------------------------------------------------------------------------------- /samples/multi-cluster-cli-gitops/resources/replica-set.yaml: -------------------------------------------------------------------------------- 1 | # sample mongodb-multi replicaset yaml 2 | --- 3 | apiVersion: mongodb.com/v1 4 | kind: MongoDBMultiCluster 5 | metadata: 6 | name: multi-replica-set 7 | spec: 8 | version: 4.4.0-ent 9 | type: ReplicaSet 10 | persistent: false 11 | duplicateServiceObjects: false 12 | credentials: my-credentials 13 | opsManager: 14 | configMapRef: 15 | name: my-project 16 | clusterSpecList: 17 | # cluster names where you want to deploy the replicaset 18 | - clusterName: cluster1.mongokubernetes.com 19 | members: 2 20 | - clusterName: cluster2.mongokubernetes.com 21 | members: 1 22 | - clusterName: cluster4.mongokubernetes.com 23 | members: 2 24 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_0.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_0}" \ 2 | --zone="${K8S_CLUSTER_0_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_0_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_0_MACHINE_TYPE}" \ 5 | ${GKE_SPOT_INSTANCES_SWITCH:-""} 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_1.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_1}" \ 2 | --zone="${K8S_CLUSTER_1_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_1_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_1_MACHINE_TYPE}" \ 5 | ${GKE_SPOT_INSTANCES_SWITCH:-""} 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0010_create_gke_cluster_2.sh: -------------------------------------------------------------------------------- 1 | gcloud container clusters create "${K8S_CLUSTER_2}" \ 2 | --zone="${K8S_CLUSTER_2_ZONE}" \ 3 | --num-nodes="${K8S_CLUSTER_2_NUMBER_OF_NODES}" \ 4 | --machine-type "${K8S_CLUSTER_2_MACHINE_TYPE}" \ 5 | ${GKE_SPOT_INSTANCES_SWITCH:-""} 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0011_gcloud_set_current_project.sh: -------------------------------------------------------------------------------- 1 | gcloud config set project "${MDB_GKE_PROJECT}" 2 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0020_get_gke_credentials.sh: -------------------------------------------------------------------------------- 1 | 2 | gcloud container clusters get-credentials "${K8S_CLUSTER_0}" --zone="${K8S_CLUSTER_0_ZONE}" 3 | gcloud container clusters get-credentials "${K8S_CLUSTER_1}" --zone="${K8S_CLUSTER_1_ZONE}" 4 | gcloud container clusters get-credentials "${K8S_CLUSTER_2}" --zone="${K8S_CLUSTER_2_ZONE}" 5 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0030_verify_access_to_clusters.sh: -------------------------------------------------------------------------------- 1 | echo "Nodes in cluster ${K8S_CLUSTER_0_CONTEXT_NAME}" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" get nodes 3 | echo; echo "Nodes in cluster ${K8S_CLUSTER_1_CONTEXT_NAME}" 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" get nodes 5 | echo; echo "Nodes in cluster ${K8S_CLUSTER_2_CONTEXT_NAME}" 6 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" get nodes 7 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0040_install_istio.sh: -------------------------------------------------------------------------------- 1 | CTX_CLUSTER1=${K8S_CLUSTER_0_CONTEXT_NAME} \ 2 | CTX_CLUSTER2=${K8S_CLUSTER_1_CONTEXT_NAME} \ 3 | CTX_CLUSTER3=${K8S_CLUSTER_2_CONTEXT_NAME} \ 4 | ISTIO_VERSION="1.20.2" \ 5 | ../multi-cluster/install_istio_separate_network.sh 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0045_create_operator_namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite 3 | 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite 6 | 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${OPERATOR_NAMESPACE}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "${OPERATOR_NAMESPACE}" istio-injection=enabled --overwrite 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0045_create_ops_manager_namespace.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" create namespace "${NAMESPACE}" 2 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite 3 | 4 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" create namespace "${NAMESPACE}" 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite 6 | 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" create namespace "${NAMESPACE}" 8 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" label namespace "${NAMESPACE}" istio-injection=enabled --overwrite 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0046_create_image_pull_secrets.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" create secret generic "image-registries-secret" \ 2 | --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic "image-registries-secret" \ 4 | --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic "image-registries-secret" \ 6 | --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson 7 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" create secret generic "image-registries-secret" \ 8 | --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0050_check_cluster_connectivity_create_sts_0.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - <&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_1_0_from_cluster_2.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_2_CONTEXT_NAME} 2 | target_pod="echoserver1-0" 3 | source_pod="echoserver2-0" 4 | target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0090_check_cluster_connectivity_verify_pod_2_0_from_cluster_0.sh: -------------------------------------------------------------------------------- 1 | source_cluster=${K8S_CLUSTER_0_CONTEXT_NAME} 2 | target_pod="echoserver2-0" 3 | source_pod="echoserver0-0" 4 | target_url="http://${target_pod}.${NAMESPACE}.svc.cluster.local:8080" 5 | echo "Checking cross-cluster DNS resolution and connectivity from ${source_pod} in ${source_cluster} to ${target_pod}" 6 | out=$(kubectl exec --context "${source_cluster}" -n "${NAMESPACE}" "${source_pod}" -- \ 7 | /bin/bash -c "curl -v ${target_url}" 2>&1); 8 | grep "Hostname: ${target_pod}" &>/dev/null <<< "${out}" && echo "SUCCESS" || (echo "ERROR: ${out}" && return 1) 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0100_check_cluster_connectivity_cleanup.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" delete statefulset echoserver0 2 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" delete statefulset echoserver1 3 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" delete statefulset echoserver2 4 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver 5 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver 6 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver 7 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver0-0 8 | kubectl --context "${K8S_CLUSTER_1_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver1-0 9 | kubectl --context "${K8S_CLUSTER_2_CONTEXT_NAME}" -n "${NAMESPACE}" delete service echoserver2-0 10 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0200_kubectl_mongodb_configure_multi_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl mongodb multicluster setup \ 2 | --central-cluster="${K8S_CLUSTER_0_CONTEXT_NAME}" \ 3 | --member-clusters="${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}" \ 4 | --member-cluster-namespace="${NAMESPACE}" \ 5 | --central-cluster-namespace="${OPERATOR_NAMESPACE}" \ 6 | --create-service-account-secrets \ 7 | --install-database-roles=true \ 8 | --image-pull-secrets=image-registries-secret 9 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0205_helm_configure_repo.sh: -------------------------------------------------------------------------------- 1 | helm repo add mongodb https://mongodb.github.io/helm-charts 2 | helm repo update mongodb 3 | helm search repo "${OFFICIAL_OPERATOR_HELM_CHART}" 4 | 5 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0210_helm_install_operator.sh: -------------------------------------------------------------------------------- 1 | helm upgrade --install \ 2 | --debug \ 3 | --kube-context "${K8S_CLUSTER_0_CONTEXT_NAME}" \ 4 | mongodb-enterprise-operator-multi-cluster \ 5 | "${OPERATOR_HELM_CHART}" \ 6 | --namespace="${OPERATOR_NAMESPACE}" \ 7 | --set namespace="${OPERATOR_NAMESPACE}" \ 8 | --set operator.namespace="${OPERATOR_NAMESPACE}" \ 9 | --set operator.watchNamespace="${NAMESPACE}" \ 10 | --set operator.name=mongodb-enterprise-operator-multi-cluster \ 11 | --set operator.createOperatorServiceAccount=false \ 12 | --set operator.createResourcesServiceAccountsAndRoles=false \ 13 | --set "multiCluster.clusters={${K8S_CLUSTER_0_CONTEXT_NAME},${K8S_CLUSTER_1_CONTEXT_NAME},${K8S_CLUSTER_2_CONTEXT_NAME}}" \ 14 | --set "${OPERATOR_ADDITIONAL_HELM_VALUES:-"dummy=value"}" 15 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0211_check_operator_deployment.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" rollout status deployment/mongodb-enterprise-operator-multi-cluster 2 | echo "Operator deployment in ${OPERATOR_NAMESPACE} namespace" 3 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" get deployments 4 | echo; echo "Operator pod in ${OPERATOR_NAMESPACE} namespace" 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${OPERATOR_NAMESPACE}" get pods 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0255_create_cert_secrets.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret tls cert-prefix-om-cert \ 2 | --cert=certs/om.crt \ 3 | --key=certs/om.key 4 | 5 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create secret tls cert-prefix-om-db-cert \ 6 | --cert=certs/appdb.crt \ 7 | --key=certs/appdb.key 8 | 9 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create configmap om-cert-ca --from-file="mms-ca.crt=certs/ca.crt" 10 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" create configmap appdb-cert-ca --from-file="ca-pem=certs/ca.crt" 11 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0300_ops_manager_create_admin_credentials.sh: -------------------------------------------------------------------------------- 1 | kubectl --context "${K8S_CLUSTER_0_CONTEXT_NAME}" --namespace "${NAMESPACE}" create secret generic om-admin-user-credentials \ 2 | --from-literal=Username="admin" \ 3 | --from-literal=Password="Passw0rd@" \ 4 | --from-literal=FirstName="Jane" \ 5 | --from-literal=LastName="Doe" 6 | -------------------------------------------------------------------------------- /samples/ops-manager-multi-cluster/code_snippets/0310_ops_manager_deploy_on_single_member_cluster.sh: -------------------------------------------------------------------------------- 1 | kubectl apply --context "${K8S_CLUSTER_0_CONTEXT_NAME}" -n "${NAMESPACE}" -f - <