├── .gitattributes ├── .gitignore ├── .gitpod.Dockerfile ├── .gitpod.yml ├── .vscode └── settings.json ├── Dockerfile ├── LICENSE ├── README.md ├── bin ├── argo_get_password.sh ├── aws_cost_estimate.sh ├── create_datatap_to_standalone_df.sh ├── create_new_54_environment_from_scratch_with_picasso.sh ├── create_new_54_environment_from_scratch_with_picasso_and_mlops.sh ├── create_new_environment_from_scratch.sh ├── create_new_environment_from_scratch_with_picasso_and_mlops.sh ├── delete_k8scluster.sh ├── delete_k8sworker.sh ├── delete_tenant_and_k8scluster.sh ├── df-cluster-acl-ad_admin1.sh ├── dfcluster_edf_add_ad_users.sh ├── dfcluster_edf_mapr_password.sh ├── dfcluster_edf_maprcli.sh ├── dfcluster_edf_shutdown.sh ├── dfcluster_edf_startup_resume.sh ├── ec2_delete_mapr_all_cluster_instances.sh ├── ec2_delete_mapr_clus1_instances.sh ├── ec2_delete_mapr_clus2_instances.sh ├── ec2_gpu_instances_types.sh ├── ec2_instance_status.sh ├── ec2_instance_status_all_regions.sh ├── ec2_list_supported_instance_types.sh ├── ec2_start_ad_instance.sh ├── ec2_start_all_instances.sh ├── ec2_start_cp_instances.sh ├── ec2_start_cp_instances_except_gpu.sh ├── ec2_start_gpu_instances.sh ├── ec2_start_mapr_instances.sh ├── ec2_stop_ad_instance.sh ├── ec2_stop_all_instances.sh ├── ec2_stop_mapr_clus1_instances.sh ├── ec2_stop_mapr_clus2_instances.sh ├── ec2_stop_mapr_instances.sh ├── ec2_stop_worker_gpu_instances.sh ├── eks_list_all_clusters.sh ├── experimental │ ├── 01_configure_global_active_directory.sh │ ├── 02_gateway_add.sh │ ├── 03_k8sworkers_add.sh │ ├── 03_k8sworkers_add_with_picasso_tag.sh │ ├── 04_k8scluster_create.sh │ ├── 05_kubedirector_spark_create.sh │ ├── epic_catalog_image_install_all.sh │ ├── epic_catalog_image_install_by_name.sh │ ├── epic_catalog_image_install_spark23.sh │ ├── epic_catalog_image_install_spark24.sh │ ├── epic_catalog_image_status.sh │ ├── epic_enable_virtual_node_assignment.sh │ ├── epic_set_cpu_allocation_ratio.sh │ ├── epic_spark24_cluster_deploy.sh │ ├── epic_workers_add.sh │ ├── gitea_setup.sh │ ├── gitea_url.sh │ ├── install_hpecp_cli.sh │ ├── minio_create_bucket.sh │ ├── minio_get_gw_host_and_port.sh │ ├── minio_wait_for_mlflow_configured_state.sh │ ├── mlflow_cluster_create.sh │ ├── mlops_kubeflow_setup.sh │ ├── mlops_with_kubeflow_create.sh │ ├── policy_setup_gitea.sh │ ├── run_notebook_tests.sh │ ├── set_gateway_ssl.sh │ ├── setup_demo_tenant_ad.sh │ ├── setup_notebook.sh │ └── verify_kf.sh ├── get_argo_password.sh ├── get_k8s_host_ip.sh ├── get_k8s_masters.sh ├── get_kf_dashboard_auth_token.sh ├── get_kf_dashboard_url.sh ├── get_logs.sh ├── hosts_for_dev_env.sh ├── istio_ingress_details.sh ├── kubectl_as_admin.sh ├── ldapsearch.sh ├── list_k8sclusters.sh ├── list_k8sworkers.sh ├── list_tenants.sh ├── mapr_edge_demo │ ├── mapr_edge_demo_docs.sh │ ├── mapr_edge_demo_edge_watch.sh │ ├── mapr_edge_demo_hq_watch.sh │ ├── mapr_edge_demo_mapruserticket_setup.sh │ ├── mapr_edge_demo_poststartup.sh │ ├── mapr_edge_demo_poststartup_auditing.sh │ ├── mapr_edge_demo_poststartup_edge_replica.sh │ ├── mapr_edge_demo_poststartup_mirror.sh │ ├── mapr_edge_demo_query_db.sh │ ├── mapr_edge_demo_query_stream.sh │ ├── mapr_edge_demo_recreate_mapruserticket.sh │ ├── mapr_edge_demo_restart_vol_mirror.sh │ ├── mapr_edge_demo_start.sh │ ├── mapr_edge_demo_urls.sh │ ├── mapr_edge_demo_verify_setup.sh │ └── mapr_edge_demo_watch_mirror.sh ├── net_connectivity.sh ├── rdp_add_user.sh ├── rdp_credentials.sh ├── register_picasso.sh ├── ssh_ad_server.sh ├── ssh_controller.sh ├── ssh_gateway.sh ├── ssh_rdp_linux_server.sh ├── ssh_worker.sh ├── sshfs_rdp_linux_server.sh ├── terraform_apply.sh ├── terraform_apply_accept.sh ├── terraform_destroy.sh ├── terraform_destroy_accept.sh ├── terraform_get_worker_hosts_private_ips_by_index.py ├── terraform_get_worker_hosts_private_ips_by_index_as_array_of_strings.py ├── terraform_output.sh ├── terraform_plan.sh ├── terraform_refresh.sh ├── updates │ ├── fix_notebook_hadoop_perms.sh │ └── restart_trainingengineinstance_haproxy.sh ├── vpn_connectivity.sh ├── vpn_mac_connect.sh ├── vpn_mac_connect_with_keepalive.sh └── vpn_server_setup.sh ├── bluedata_infra_main.tf ├── bluedata_infra_main_eks.tf ├── bluedata_infra_main_iam.tf ├── bluedata_infra_main_scripts.tf ├── bluedata_infra_main_worker.tf ├── bluedata_infra_main_worker_gpu.tf ├── bluedata_infra_mapr_cluster1_worker.tf ├── bluedata_infra_mapr_cluster2_worker.tf ├── bluedata_infra_outputs.tf ├── bluedata_infra_variables.tf ├── build_ide.sh ├── docs ├── README-AD.md ├── README-ADDING-MORE-WORKERS.md ├── README-CONNECT-TO-CEPH-FROM-PYSPARK-NB.ipynb ├── README-CONNECT-TO-CEPH-FROM-PYTHON-NB.ipynb ├── README-COST-ESTIMATES.MD ├── README-DATA-FABRIC-ARCHITECTURE-OVERVIEW.md ├── README-DATA-TIERING.md ├── README-DATA-TIERING │ ├── image001.png │ ├── image002.png │ ├── image003.png │ ├── image004.png │ ├── image005.png │ ├── image006.png │ ├── image007.png │ ├── image008.png │ ├── image009.png │ ├── image010.png │ ├── image011.png │ ├── image012.png │ ├── image013.png │ ├── image014.png │ ├── image015.png │ ├── image016.png │ ├── image017.png │ ├── image018.png │ ├── image019.png │ ├── image020.png │ ├── image021.png │ └── image022.png ├── README-DESIGN-PRINCIPLES.md ├── README-DESTROY-DEMO-ENV.md ├── README-DF-EDGE-CORE-CLOUD.md ├── README-DF-EDGE-CORE-CLOUD2.md ├── README-EC2-START-STOP-STATUS.md ├── README-EC2-START-STOP-STATUS │ ├── running_instances.gif │ ├── starting_instances.gif │ └── stopping_instances.gif ├── README-EKS.md ├── README-EXTERNAL-MAPR.md ├── README-F5-BIGIP.md ├── README-F5-BIGIP │ └── bluedata_infra_main_bigip.tf ├── README-INSPECT-API.md ├── README-KNATIVE.md ├── README-LOGIN-MAPR-CONTROL-SYSTEM.md ├── README-MAPR-LDAP.md ├── README-MAPR-LDAP │ ├── add_ad_admin_user.png │ ├── add_ad_user_group.png │ ├── create_volume.png │ ├── create_volume_button.png │ ├── datatap_browser.png │ ├── login_ad_admin1.png │ ├── login_ad_user1.png │ ├── settings.py │ ├── spark_success.png │ ├── user_settings.png │ ├── volume_authorization.png │ └── volume_menu.png ├── README-POLICY-DEMO.md ├── README-POLICY-DEMO │ ├── add_policies.png │ ├── add_policies2.png │ ├── add_policy_to_cluster.png │ ├── add_policy_to_cluster2.png │ ├── fork_repo.png │ ├── view_argo.png │ └── view_argo2.png ├── README-RDP.md ├── README-RDP │ ├── rdp_browser.gif │ └── rdp_credentials.gif ├── README-SHARING-NON-TERRAFORM.md ├── README-SPARK-OPERATOR.ipynb ├── README-SSL-CERTIFICATES.md ├── README-SSL-CERTIFICATES │ └── install_docs_ssl_instruction.png ├── README-TAINT.MD ├── README-TROUBLESHOOTING.MD ├── README-TROUBLESHOOTING │ ├── mapr_datatap_browser_empty.png │ ├── mapr_datatap_browser_fixed.png │ └── unsupported_source_instance_type.png ├── README-VPN.md ├── README-VPN │ ├── mac-setup01.png │ ├── mac-setup02.png │ ├── vpn_mac_connect.gif │ └── vpn_server_setup.gif └── README │ ├── create_from_scratch.gif │ └── project_init.gif ├── etc ├── bluedata_infra.tfvars_example ├── bluedata_infra.tfvars_example_mlops ├── bluedata_infra.tfvars_example_picasso ├── bluedata_infra.tfvars_example_picasso_mlops ├── bluedata_infra.tfvars_template ├── bluedata_infra_eks.tfvars_template ├── bluedata_infra_gpu_workers.tfvars_template ├── bluedata_infra_instance_types.tfvars ├── bluedata_infra_mapr.tfvars_template ├── bluedata_infra_misc.tfvars ├── hpecp_cli_logging.conf ├── port_forwards.sh_template ├── postcreate.sh_template └── postcreate_core.sh_template ├── grep_log.sh ├── hpecp_env_conf.sh ├── learn ├── COURSE_CONTENT_TEMPLATE.md ├── Data_Fabric_Administration │ └── tiering.md ├── HCP_Administration │ ├── add_a_gateway_host.md │ ├── configure_active_directory.md │ ├── manual_install_ad.md │ ├── manual_install_deploying_the_platform.md │ ├── manual_install_deploying_the_platform │ │ └── docs_menu.png │ ├── manual_install_overview.md │ ├── manual_install_planning_the_deployment.md │ ├── manual_install_system_requirements.md │ ├── provision_demo_environment.md │ ├── shutdown_demo_environment.md │ ├── startup_demo_environment.md │ ├── status_demo_environment.md │ └── teardown_demo_environment.md ├── HCP_Advanced_Concepts │ ├── universal_concepts.md │ └── universal_concepts │ │ └── docs_menu.png ├── HCP_Foundation_Concepts │ ├── controller_gateway_and_worker_hosts.md │ ├── datataps.md │ └── software-components.md ├── Product_Overview │ └── hpe_container_strategy_market_overview.md ├── README.md └── hpe_container_strategy_market_overview.md ├── modules ├── module-ad-server │ ├── files │ │ ├── ad_set_posix_classes.ldif │ │ ├── ad_user_setup.sh │ │ ├── ldif_modify.sh │ │ └── run_ad.sh │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── module-controller │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── module-gateway │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── module-network │ ├── main-dns-knative.tf │ ├── main-dns.tf │ ├── main-nacl.tf │ ├── main-security-group.tf │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── module-nfs-server │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── module-rdp-server-linux │ ├── Desktop │ │ ├── bluedata-docs.desktop │ │ ├── code.desktop │ │ ├── github-project.desktop │ │ ├── mapr-password.desktop │ │ ├── mate-terminal.desktop │ │ ├── ssh_ad.desktop │ │ ├── ssh_controller.desktop │ │ └── ssh_gateway.desktop │ ├── Templates │ │ ├── HCP.admin.desktop.tpl │ │ ├── MCS.admin.desktop.tpl │ │ ├── ca-certs-setup.sh.tpl │ │ └── startup.desktop.tpl │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf └── module-rdp-server │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── picasso_deploy_loop.sh ├── resize_cloud9_ebs.sh ├── run_ide.sh ├── scripts ├── bluedata_install.sh ├── bluedata_prepare_all_unprepared_workers.sh ├── bluedata_prepare_worker.sh ├── check_client_ip.sh ├── check_prerequisites.sh ├── eks_import.sh ├── eks_setup.sh ├── end_user_scripts │ ├── ceph │ │ └── 1_demo_server_setup.sh │ ├── embedded_mapr │ │ ├── 1_setup_epic_mapr_sssd.sh │ │ ├── 2_setup_ubuntu_mapr_sssd_and_mapr_client.sh │ │ ├── 3_setup_datatap_5.0.sh │ │ └── 3_setup_datatap_new.sh │ ├── patch_datatap_5.1.1.sh │ └── standalone_mapr │ │ ├── register_license.sh │ │ ├── setup_datatap_5.1.sh │ │ ├── setup_edge_demo.sh │ │ ├── setup_mapr.sh │ │ ├── setup_ubuntu_mapr_client.sh │ │ └── setup_ubuntu_mapr_sssd.sh ├── functions.sh ├── install_spark3.sh ├── mapr_install.sh ├── mapr_update.sh ├── post_refresh_or_apply.sh ├── utility │ ├── credentials.json │ └── presign_upload.py ├── variables.sh ├── variables_dump.sh ├── velero_backup.sh ├── velero_install.sh ├── velero_restore.sh └── verify_ad_server_config.sh ├── static ├── basic-r-test.ipynb ├── datatap.ipynb ├── get_kf_dashboard_auth_token.sh ├── mlflow-seldon-serving.ipynb ├── pytest-launcher.ipynb ├── setLivy.ipynb ├── training-cluster-connection-test.ipynb └── wine-quality.csv ├── terragrunt.hcl └── versions.tf /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sh text eol=lf 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | .terraform* 3 | 4 | *.log 5 | *-logs.tar.gz 6 | generated/log- 7 | 8 | terraform.tfstate 9 | terraform.tfstate.backup 10 | terraform.tfstate.*.backup 11 | 12 | terraform-plan-*.out 13 | 14 | etc/bluedata_infra.tfvars 15 | etc/port_forwards.sh 16 | 17 | public-ip-gateway.txt 18 | post_provision_steps.sh 19 | controller_rsa.prv 20 | hostdetails.txt 21 | controller.prv_key 22 | output.json 23 | generated/ 24 | tmp/ 25 | log/ 26 | .DS_Store 27 | bluedata_infra_main_bigip.tf 28 | etc/postcreate.sh 29 | 30 | credentials-velero 31 | velero-policy.json 32 | terraform.jq 33 | etc/bluedata_infra_eks.tfvars 34 | etc/bluedata_infra_gpu_workers.tfvars 35 | etc/bluedata_infra_mapr.tfvars 36 | -------------------------------------------------------------------------------- /.gitpod.Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gitpod/workspace-full 2 | 3 | USER gitpod 4 | 5 | # Install custom tools, runtime, etc. using apt-get 6 | # For example, the command below would install "bastet" - a command line tetris clone: 7 | # 8 | # RUN sudo apt-get -q update && # sudo apt-get install -yq bastet && # sudo rm -rf /var/lib/apt/lists/* 9 | # 10 | # More information: https://www.gitpod.io/docs/config-docker/ 11 | -------------------------------------------------------------------------------- /.gitpod.yml: -------------------------------------------------------------------------------- 1 | tasks: 2 | - init: echo "Replace me with a build script for the project." 3 | command: echo "Replace me with something that should run on every start, or just 4 | remove me entirely." 5 | image: 6 | file: .gitpod.Dockerfile 7 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.exclude": { 3 | "**/.pyc": true 4 | }, 5 | "shellcheck.enable": true, 6 | "shellcheck.enableQuickFix": false, 7 | "shellcheck.run": "onSave", 8 | "shellcheck.executablePath": "/usr/local/bin/shellcheck" 9 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM theiaide/theia-full:next 2 | 3 | RUN sudo apt-get update \ 4 | && sudo apt-get install -y software-properties-common \ 5 | && sudo add-apt-repository -y ppa:deadsnakes/ppa \ 6 | && sudo apt-get update \ 7 | && sudo apt-get install -y python3.5 python3.6 python3.7 python3.8 python3.9 tox python3-sphinx \ 8 | && pip3 install -U pylint 9 | -------------------------------------------------------------------------------- /bin/argo_get_password.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 CLUSTERNAME 8 | exit 1 9 | fi 10 | 11 | CLUSTERNAME=$1 12 | 13 | set -u 14 | 15 | source ./scripts/check_prerequisites.sh 16 | source ./scripts/variables.sh 17 | 18 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 19 | 20 | CLUSTERNAME=$CLUSTERNAME 21 | 22 | echo username: admin 23 | echo -n "password: " 24 | 25 | kubectl --kubeconfig <(./get_admin_kubeconfig.sh $CLUSTERNAME) get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 26 | 27 | EOF1 -------------------------------------------------------------------------------- /bin/aws_cost_estimate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | # See here for more information: https://github.com/antonbabenko/terraform-cost-estimation 7 | 8 | if [[ "$EUID" != "0" ]]; then 9 | echo "This script must be run as root - e.g." 10 | echo "sudo $0" 11 | exit 1 12 | fi 13 | 14 | 15 | 16 | 17 | echo -n "Checking jq version ... " 18 | if ! command -v jq >/dev/null 2>&1 || ! jq --version | grep 'jq-1.6.*'; then 19 | 20 | if [[ -z $C9_PROJECT ]]; then 21 | rm -f jq-linux64 22 | wget https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 23 | mv -f jq-linux64 /usr/local/bin/jq 24 | chmod +x /usr/local/bin/jq 25 | else 26 | echo "I need jq 1.6+. Aborting." 27 | echo "Linux: https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64" 28 | exit 1 29 | fi 30 | fi 31 | 32 | curl -sLO https://raw.githubusercontent.com/antonbabenko/terraform-cost-estimation/master/terraform.jq 33 | 34 | 35 | STATE=$(terraform state pull | /usr/local/bin/jq -cf terraform.jq) 36 | echo 37 | echo "Sending data to https://cost.modules.tf/" 38 | echo "DATA: ${STATE}" 39 | echo 40 | echo ${STATE} | curl -s -X POST -H "Content-Type: application/json" -d @- https://cost.modules.tf/ 41 | echo 42 | 43 | 44 | -------------------------------------------------------------------------------- /bin/create_datatap_to_standalone_df.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "Use: ./scripts/end_user_scripts/standalone_mapr/setup_datatap_5.1.sh" 4 | -------------------------------------------------------------------------------- /bin/delete_k8scluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 CLUSTER_ID 8 | echo Where: CLUSTER_ID = /api/v2/k8scluster/[0-9]* 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | ./scripts/check_prerequisites.sh 15 | source ./scripts/variables.sh 16 | 17 | export TENANT_ID=$1 18 | 19 | # use the project's HPECP CLI config file 20 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 21 | 22 | FOUND_ID=$(hpecp k8scluster list --query "[?_links.self.href == '$CLUSTER_ID'] | [0] | [_links.self.href]" --output text) 23 | 24 | if [[ ! $FOUND_ID =~ ^\/api\/v2\/k8scluster\/[0-9]* ]]; 25 | then 26 | echo "Aborting. Tenant $CLUSTER_ID not found." 27 | exit 1 28 | fi 29 | 30 | hpecp k8scluster delete --id $CLUSTER_ID 31 | 32 | echo ' 33 | Delete submitted. To check progress run: 34 | 35 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 36 | hpecp k8scluster list 37 | ' -------------------------------------------------------------------------------- /bin/delete_k8sworker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 WORKER_ID 8 | echo Where: WORKER_ID = /api/v2/worker/k8shost/[0-9]* 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | ./scripts/check_prerequisites.sh 15 | source ./scripts/variables.sh 16 | 17 | export WORKER_ID=$1 18 | 19 | # use the project's HPECP CLI config file 20 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 21 | 22 | FOUND_ID=$(hpecp k8sworker list --query "[?_links.self.href == '$WORKER_ID'] | [0] | [_links.self.href]" --output text) 23 | 24 | if [[ ! $FOUND_ID =~ ^\/api\/v2\/worker\/k8shost\/[0-9]* ]]; 25 | then 26 | echo "Aborting. K8S WORKER $WORKER_ID not found." 27 | exit 1 28 | fi 29 | 30 | hpecp k8sworker delete --id $WORKER_ID 31 | 32 | echo ' 33 | Delete submitted. To check progress run: 34 | 35 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 36 | hpecp k8sworker list 37 | ' -------------------------------------------------------------------------------- /bin/delete_tenant_and_k8scluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 TENANT_ID 8 | echo Where: TENANT_ID = /api/v1/tenant/[0-9]* 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | ./scripts/check_prerequisites.sh 15 | source ./scripts/variables.sh 16 | 17 | export TENANT_ID=$1 18 | 19 | # use the project's HPECP CLI config file 20 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 21 | 22 | FOUND_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.self.href]" --output text) 23 | 24 | if [[ ! $FOUND_ID =~ ^\/api\/v1\/tenant\/[0-9]* ]]; 25 | then 26 | echo "Aborting. Tenant $TENANT_ID not found." 27 | exit 1 28 | fi 29 | 30 | export CLUSTER_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 31 | 32 | hpecp tenant delete --id $TENANT_ID --wait-for-delete-sec 1800 # 30 minutes 33 | 34 | if [[ $CLUSTER_ID =~ ^\/api\/v2\/k8scluster\/[0-9]* ]]; 35 | then 36 | hpecp k8scluster delete --id $CLUSTER_ID --wait-for-delete-sec 1800 # 30 minutes 37 | fi -------------------------------------------------------------------------------- /bin/df-cluster-acl-ad_admin1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | set -e # abort on error 5 | set -u # abort on undefined variable 6 | 7 | source "scripts/variables.sh" 8 | 9 | if [[ "$AD_SERVER_ENABLED" == False ]]; then 10 | echo "Skipping script '$0' because AD Server is not enabled" 11 | exit 12 | fi 13 | 14 | ssh -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -tt -T centos@${CTRL_PUB_IP} <<-SSH_EOF 15 | set -eu 16 | bdmapr maprcli acl edit -type cluster -user ad_admin1:fc 17 | SSH_EOF 18 | -------------------------------------------------------------------------------- /bin/dfcluster_edf_add_ad_users.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./bin/kubectl_as_admin.sh dfcluster exec admincli-0 -n dfdemo -- maprcli acl edit -type cluster -user ad_admin1:fc 4 | ./bin/kubectl_as_admin.sh dfcluster exec admincli-0 -n dfdemo -- maprcli acl edit -type cluster -user ad_user1:login -------------------------------------------------------------------------------- /bin/dfcluster_edf_mapr_password.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./bin/kubectl_as_admin.sh dfcluster -n dfdemo get secret system -o yaml | grep MAPR_PASSWORD | head -1 | awk '{print $2}' | base64 --decode 4 | 5 | echo 6 | -------------------------------------------------------------------------------- /bin/dfcluster_edf_maprcli.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./bin/kubectl_as_admin.sh dfcluster exec admincli-0 -n dfdemo -- maprcli $@ -------------------------------------------------------------------------------- /bin/dfcluster_edf_shutdown.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ./bin/kubectl_as_admin.sh dfcluster exec admincli-0 -n dfdemo -- edf shutdown cluster -------------------------------------------------------------------------------- /bin/dfcluster_edf_startup_resume.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -x 4 | 5 | ./bin/kubectl_as_admin.sh dfcluster exec admincli-0 -n dfdemo -- edf startup resume -------------------------------------------------------------------------------- /bin/ec2_delete_mapr_all_cluster_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | if [[ $MAPR_CLUSTER1_COUNT == 3 ]]; then 6 | (set -x; ./bin/terraform_apply.sh -var='mapr_cluster_1_count=0' -var='mapr_cluster_2_count=0') 7 | 8 | echo "NOTE: Deleted MAPR clusters will be reinstated after running './bin/terraform_apply.sh'" 9 | fi 10 | 11 | 12 | -------------------------------------------------------------------------------- /bin/ec2_delete_mapr_clus1_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | if [[ $MAPR_CLUSTER1_COUNT == 3 ]]; then 6 | (set -x; ./bin/terraform_apply.sh -var='mapr_cluster_1_count=0') 7 | 8 | echo "NOTE: Deleted MAPR cluster will be reinstated after running './bin/terraform_apply.sh'" 9 | fi 10 | 11 | 12 | -------------------------------------------------------------------------------- /bin/ec2_delete_mapr_clus2_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | if [[ $MAPR_CLUSTER1_COUNT == 3 ]]; then 6 | (set -x; ./bin/terraform_apply.sh -var='mapr_cluster_2_count=0') 7 | 8 | echo "NOTE: Deleted MAPR cluster will be reinstated after running './bin/terraform_apply.sh'" 9 | fi 10 | 11 | 12 | -------------------------------------------------------------------------------- /bin/ec2_gpu_instances_types.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | REGION=$1 4 | 5 | if [[ -z "$REGION" ]]; 6 | then 7 | echo Usage: $0 AWS-REGION 8 | echo 9 | echo Example: 10 | echo -------- 11 | echo $0 eu-west-2 12 | exit 1 13 | fi 14 | 15 | 16 | aws ec2 describe-instance-types --query 'InstanceTypes[?GpuInfo!=null].[InstanceType]' --output text --region "${REGION}" 17 | -------------------------------------------------------------------------------- /bin/ec2_instance_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source "./scripts/variables.sh" 3 | 4 | aws --region ${REGION} --profile ${PROFILE} ec2 describe-instances \ 5 | --instance-ids ${ALL_INSTANCE_IDS} \ 6 | --output table \ 7 | --query "Reservations[*].Instances[*].{ExtIP:PublicIpAddress,IntIP:PrivateIpAddress,ID:InstanceId,Type:InstanceType,State:State.Name,Name:Tags[?Key=='Name']|[0].Value} | [][] | sort_by(@, &Name)" 8 | 9 | 10 | if [[ "$CREATE_EKS_CLUSTER" == "True" ]]; then 11 | EKS_CLUSTER_NAME=$(terraform output eks-cluster-name) 12 | 13 | CLUSTER_STATUS=$(aws eks --region eu-west-3 --profile default \ 14 | describe-cluster --name ${EKS_CLUSTER_NAME} \ 15 | --query 'cluster.status') 16 | 17 | echo 18 | echo "You have the following EKS clusters:" 19 | echo 20 | echo ${EKS_CLUSTER_NAME}: ${CLUSTER_STATUS} 21 | 22 | echo 23 | echo "You have the following EKS EC2 instances:" 24 | echo 25 | 26 | aws ec2 describe-instances --region $REGION --profile $PROFILE --filters Name=tag:eks:nodegroup-name,Values=${EKS_CLUSTER_NAME} --output table \ 27 | --query "Reservations[*].Instances[*].{ExtIP:PublicIpAddress,IntIP:PrivateIpAddress,ID:InstanceId,Type:InstanceType,State:State.Name,Name:Tags[?Key=='Name']|[0].Value} | [][] | sort_by(@, &Name)" 28 | 29 | echo 30 | echo "To delete the EKS instances you need to remove the node group: e.g." 31 | echo 32 | tput setaf 1 33 | echo "aws eks delete-nodegroup --region $REGION --profile $PROFILE --cluster-name $EKS_CLUSTER_NAME --nodegroup-name $EKS_CLUSTER_NAME" 34 | tput sgr0 35 | echo 36 | echo "The instances will be reinstated with './bin/terraform_apply.sh'" 37 | echo 38 | fi -------------------------------------------------------------------------------- /bin/ec2_instance_status_all_regions.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | for curr_region in $(aws ec2 --region ${REGION} describe-regions --output text | cut -f4); do 6 | echo -e "\nListing Instances Status in region:'${curr_region}' ... matching '${USER_TAG}' "; 7 | aws ec2 --region $curr_region describe-instances \ 8 | --query "Reservations[*].Instances[*].{IP:PublicIpAddress,ID:InstanceId,Type:InstanceType,State:State.Name,Name:Tags[0].Value}" \ 9 | --filters Name=instance-state-name,Values=running \ 10 | --filters Name=tag:user,Values=${USER_TAG} \ 11 | --output=table 12 | done -------------------------------------------------------------------------------- /bin/ec2_list_supported_instance_types.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | REGION=$1 4 | FILTER=$2 5 | 6 | if [[ -z $REGION ]]; 7 | then 8 | echo "Usage: $0 aws-region [instance-type-filter]" 9 | echo 10 | echo "Examples:" 11 | echo " $0 eu-west-3" 12 | echo " $0 eu-west-3 m5" 13 | echo 14 | exit 1 15 | fi 16 | 17 | aws ec2 describe-instance-type-offerings --region $REGION --filters Name=instance-type,Values=${FILTER}* --output text 18 | -------------------------------------------------------------------------------- /bin/ec2_start_ad_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | aws --region $REGION --profile $PROFILE ec2 start-instances \ 6 | --instance-ids $AD_INSTANCE_ID \ 7 | --output table \ 8 | --query "StartingInstances[*].{ID:InstanceId,State:CurrentState.Name}" 9 | 10 | CURR_CLIENT_CIDR_BLOCK="$(curl -s http://ipinfo.io/ip)/32" 11 | 12 | # check if the client IP address has changed 13 | if [[ "$CLIENT_CIDR_BLOCK" = "$CURR_CLIENT_CIDR_BLOCK" ]]; then 14 | UPDATE_COMMAND="refresh" 15 | else 16 | UPDATE_COMMAND="apply" 17 | fi 18 | 19 | echo "***********************************************************************************************************" 20 | echo "IMPORTANT: You need to run the following command to update your local state:" 21 | echo 22 | echo " ./bin/terraform_$UPDATE_COMMAND.sh" 23 | echo 24 | echo " If you encounter an error running ./bin/terraform_$UPDATE_COMMAND.sh it is probably because your" 25 | echo " instances are not ready yet. You can check the instances status with:" 26 | echo 27 | echo " ./generated/cli_running_ec2_instances.sh" 28 | echo "***********************************************************************************************************" -------------------------------------------------------------------------------- /bin/ec2_start_all_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | aws --region $REGION --profile $PROFILE ec2 start-instances \ 6 | --instance-ids $ALL_INSTANCE_IDS \ 7 | --output table \ 8 | --query "StartingInstances[*].{ID:InstanceId,State:CurrentState.Name}" 9 | 10 | CURR_CLIENT_CIDR_BLOCK="$(curl -s http://ipinfo.io/ip)/32" 11 | 12 | # check if the client IP address has changed 13 | if [[ "$CLIENT_CIDR_BLOCK" = "$CURR_CLIENT_CIDR_BLOCK" ]]; then 14 | UPDATE_COMMAND="refresh" 15 | else 16 | UPDATE_COMMAND="apply" 17 | fi 18 | 19 | echo "***********************************************************************************************************" 20 | echo "IMPORTANT: You need to run the following command to update your local state:" 21 | echo 22 | echo " ./bin/terraform_$UPDATE_COMMAND.sh" 23 | echo 24 | echo " If you encounter an error running ./bin/terraform_$UPDATE_COMMAND.sh it is probably because your" 25 | echo " instances are not ready yet. You can check the instances status with:" 26 | echo 27 | echo " ./generated/cli_running_ec2_instances.sh" 28 | echo "***********************************************************************************************************" -------------------------------------------------------------------------------- /bin/ec2_start_cp_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | aws --region $REGION --profile $PROFILE ec2 start-instances \ 6 | --instance-ids $ALL_CP_INSTANCE_IDS \ 7 | --output table \ 8 | --query "StartingInstances[*].{ID:InstanceId,State:CurrentState.Name}" 9 | 10 | CURR_CLIENT_CIDR_BLOCK="$(curl -s http://ipinfo.io/ip)/32" 11 | 12 | # check if the client IP address has changed 13 | if [[ "$CLIENT_CIDR_BLOCK" = "$CURR_CLIENT_CIDR_BLOCK" ]]; then 14 | UPDATE_COMMAND="refresh" 15 | else 16 | UPDATE_COMMAND="apply" 17 | fi 18 | 19 | echo "***********************************************************************************************************" 20 | echo "IMPORTANT: You need to run the following command to update your local state:" 21 | echo 22 | echo " ./bin/terraform_$UPDATE_COMMAND.sh" 23 | echo 24 | echo " If you encounter an error running ./bin/terraform_$UPDATE_COMMAND.sh it is probably because your" 25 | echo " instances are not ready yet. You can check the instances status with:" 26 | echo 27 | echo " ./generated/cli_running_ec2_instances.sh" 28 | echo "***********************************************************************************************************" -------------------------------------------------------------------------------- /bin/ec2_start_cp_instances_except_gpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | aws --region $REGION --profile $PROFILE ec2 start-instances \ 6 | --instance-ids ${CTRL_INSTANCE_ID} ${GATW_INSTANCE_ID} ${WRKR_INSTANCE_IDS} ${NFS_INSTANCE_ID} ${AD_INSTANCE_ID} ${RDP_INSTANCE_ID} \ 7 | --output table \ 8 | --query "StartingInstances[*].{ID:InstanceId,State:CurrentState.Name}" 9 | 10 | CURR_CLIENT_CIDR_BLOCK="$(curl -s http://ipinfo.io/ip)/32" 11 | 12 | # check if the client IP address has changed 13 | if [[ "$CLIENT_CIDR_BLOCK" = "$CURR_CLIENT_CIDR_BLOCK" ]]; then 14 | UPDATE_COMMAND="refresh" 15 | else 16 | UPDATE_COMMAND="apply" 17 | fi 18 | 19 | echo "***********************************************************************************************************" 20 | echo "IMPORTANT: You need to run the following command to update your local state:" 21 | echo 22 | echo " ./bin/terraform_$UPDATE_COMMAND.sh" 23 | echo 24 | echo " If you encounter an error running ./bin/terraform_$UPDATE_COMMAND.sh it is probably because your" 25 | echo " instances are not ready yet. You can check the instances status with:" 26 | echo 27 | echo " ./generated/cli_running_ec2_instances.sh" 28 | echo "***********************************************************************************************************" -------------------------------------------------------------------------------- /bin/ec2_start_gpu_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | aws --region $REGION --profile $PROFILE ec2 start-instances \ 6 | --instance-ids $WRKR_GPU_INSTANCE_IDS \ 7 | --output table \ 8 | --query "StartingInstances[*].{ID:InstanceId,State:CurrentState.Name}" 9 | 10 | CURR_CLIENT_CIDR_BLOCK="$(curl -s http://ipinfo.io/ip)/32" 11 | 12 | # check if the client IP address has changed 13 | if [[ "$CLIENT_CIDR_BLOCK" = "$CURR_CLIENT_CIDR_BLOCK" ]]; then 14 | UPDATE_COMMAND="refresh" 15 | else 16 | UPDATE_COMMAND="apply" 17 | fi 18 | 19 | echo "***********************************************************************************************************" 20 | echo "IMPORTANT: You need to run the following command to update your local state:" 21 | echo 22 | echo " ./bin/terraform_$UPDATE_COMMAND.sh" 23 | echo 24 | echo " If you encounter an error running ./bin/terraform_$UPDATE_COMMAND.sh it is probably because your" 25 | echo " instances are not ready yet. You can check the instances status with:" 26 | echo 27 | echo " ./generated/cli_running_ec2_instances.sh" 28 | echo "***********************************************************************************************************" -------------------------------------------------------------------------------- /bin/ec2_start_mapr_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | aws --region $REGION --profile $PROFILE ec2 start-instances \ 6 | --instance-ids $ALL_MAPR_INSTANCE_IDS \ 7 | --output table \ 8 | --query "StartingInstances[*].{ID:InstanceId,State:CurrentState.Name}" 9 | 10 | CURR_CLIENT_CIDR_BLOCK="$(curl -s http://ipinfo.io/ip)/32" 11 | 12 | # check if the client IP address has changed 13 | if [[ "$CLIENT_CIDR_BLOCK" = "$CURR_CLIENT_CIDR_BLOCK" ]]; then 14 | UPDATE_COMMAND="refresh" 15 | else 16 | UPDATE_COMMAND="apply" 17 | fi 18 | 19 | echo "***********************************************************************************************************" 20 | echo "IMPORTANT: You need to run the following command to update your local state:" 21 | echo 22 | echo " ./bin/terraform_$UPDATE_COMMAND.sh" 23 | echo 24 | echo " If you encounter an error running ./bin/terraform_$UPDATE_COMMAND.sh it is probably because your" 25 | echo " instances are not ready yet. You can check the instances status with:" 26 | echo 27 | echo " ./generated/cli_running_ec2_instances.sh" 28 | echo "***********************************************************************************************************" -------------------------------------------------------------------------------- /bin/ec2_stop_ad_instance.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source "./scripts/variables.sh" 3 | 4 | set +u 5 | 6 | echo "Stopping instances" 7 | aws --region $REGION --profile $PROFILE ec2 stop-instances \ 8 | --instance-ids $AD_INSTANCE_ID \ 9 | --output table \ 10 | --query "StoppingInstances[*].{ID:InstanceId,State:CurrentState.Name}" -------------------------------------------------------------------------------- /bin/ec2_stop_mapr_clus1_instances.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source "./scripts/variables.sh" 3 | 4 | set +u 5 | 6 | SSH_OPTS="-o StrictHostKeyChecking=no -o ConnectTimeout=10 -o ConnectionAttempts=1 -q" 7 | CMD='nohup sudo halt -n "$JSON_FILE"<<-EOF 26 | { 27 | "external_identity_server": { 28 | "bind_pwd":"5ambaPwd@", 29 | "user_attribute":"sAMAccountName", 30 | "bind_type":"search_bind", 31 | "bind_dn":"cn=Administrator,CN=Users,DC=samdom,DC=example,DC=com", 32 | "host":"${AD_PRV_IP}", 33 | "security_protocol":"ldaps", 34 | "base_dn":"CN=Users,DC=samdom,DC=example,DC=com", 35 | "verify_peer": false, 36 | "type":"Active Directory", 37 | "port":636 38 | } 39 | } 40 | EOF 41 | hpecp httpclient post /api/v2/config/auth --json-file "${JSON_FILE}" 42 | -------------------------------------------------------------------------------- /bin/experimental/02_gateway_add.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | if [[ ! -d generated ]]; then 7 | echo "This file should be executed from the project directory" 8 | exit 1 9 | fi 10 | 11 | ./scripts/check_prerequisites.sh 12 | source ./scripts/variables.sh 13 | 14 | # use the project's HPECP CLI config file 15 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 16 | 17 | echo "Deleting and creating lock" 18 | hpecp lock delete-all 19 | hpecp lock create "Install Gateway" 20 | 21 | if [[ "${CREATE_EIP_GATEWAY}" == "True" ]]; 22 | then 23 | CONFIG_GATEWAY_DNS=$GATW_PUB_DNS 24 | else 25 | CONFIG_GATEWAY_DNS=$GATW_PRV_DNS 26 | fi 27 | 28 | echo "Configuring the Gateway" 29 | GATEWAY_ID=$(hpecp gateway create-with-ssh-key "$GATW_PRV_IP" "$CONFIG_GATEWAY_DNS" --ssh-key-file ./generated/controller.prv_key) 30 | 31 | echo "Waiting for gateway to have state 'installed'" 32 | hpecp gateway wait-for-state "${GATEWAY_ID}" --states "['installed']" --timeout-secs 1800 33 | 34 | echo "Removing locks" 35 | hpecp gateway list 36 | hpecp lock delete-all --timeout-secs 1800 37 | 38 | -------------------------------------------------------------------------------- /bin/experimental/epic_catalog_image_install_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | 5 | echo "Installing all catalog images - note that this script does NOT check for status=success." 6 | 7 | CATALOG_IMAGES=$(hpecp catalog list --query "[?state!='installed' && state!='installing' && state!='verifying' && state!='downloading'] | [*].[_links.self.href] | []" --output text) 8 | 9 | for IMG_ID in $CATALOG_IMAGES 10 | do 11 | echo "Installing: $IMG_ID" 12 | hpecp catalog install $IMG_ID 13 | done 14 | 15 | . ${SCRIPT_DIR}/epic_catalog_image_status.sh 16 | -------------------------------------------------------------------------------- /bin/experimental/epic_catalog_image_install_by_name.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $# != 1 ]]; then 4 | echo Usage: $0 IMAGE_NAME 5 | exit 1 6 | fi 7 | 8 | IMG_NAME="$1" 9 | 10 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 11 | 12 | echo "Installing '${IMG_NAME}' catalog image" 13 | 14 | CATALOG_IMAGES=$(hpecp catalog list --query "[? contains(label.name, '${IMG_NAME}')] | [?state!='installed' && state!='installing' && state!='verifying' && state!='downloading'] | [*].[_links.self.href] | []" --output text) 15 | 16 | if [[ ${#CATALOG_IMAGES[@]} == 0 ]]; then 17 | echo "'${IMG_NAME}' not found - exiting." 18 | exit 0 19 | fi 20 | 21 | for IMG_ID in $CATALOG_IMAGES 22 | do 23 | echo "Installing: $IMG_ID" 24 | hpecp catalog install $IMG_ID 25 | 26 | for i in {1..1000}; do 27 | STATE=$(hpecp catalog list --query "[?_links.self.href=='${IMG_ID}'] | [*].[state]" --output text) 28 | echo "State: $STATE" 29 | if [[ "$STATE" == "installed" ]]; then 30 | break 31 | else 32 | sleep 60 33 | fi 34 | done 35 | done 36 | 37 | -------------------------------------------------------------------------------- /bin/experimental/epic_catalog_image_install_spark23.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | 5 | echo "Installing Spark23* catalog image" 6 | 7 | CATALOG_IMAGES=$(hpecp catalog list --query "[? contains(label.name, 'Spark23')] | [?state!='installed' && state!='installing' && state!='verifying' && state!='downloading'] | [*].[_links.self.href] | []" --output text) 8 | 9 | for IMG_ID in $CATALOG_IMAGES 10 | do 11 | echo "Installing: $IMG_ID" 12 | hpecp catalog install $IMG_ID 13 | 14 | for i in {1..1000}; do 15 | STATE=$(hpecp catalog list --query "[?_links.self.href=='${IMG_ID}'] | [*].[state]" --output text) 16 | echo "State: $STATE" 17 | if [[ "$STATE" == "installed" ]]; then 18 | break 19 | else 20 | sleep 60 21 | fi 22 | done 23 | done 24 | 25 | -------------------------------------------------------------------------------- /bin/experimental/epic_catalog_image_install_spark24.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | 5 | echo "Installing Spark24* catalog image" 6 | 7 | CATALOG_IMAGES=$(hpecp catalog list --query "[? contains(label.name, 'Spark24')] | [?state!='installed' && state!='installing' && state!='verifying' && state!='downloading'] | [*].[_links.self.href] | []" --output text) 8 | 9 | for IMG_ID in $CATALOG_IMAGES 10 | do 11 | echo "Installing: $IMG_ID" 12 | hpecp catalog install $IMG_ID 13 | 14 | for i in {1..1000}; do 15 | STATE=$(hpecp catalog list --query "[?_links.self.href=='${IMG_ID}'] | [*].[state]" --output text) 16 | echo "State: $STATE" 17 | if [[ "$STATE" == "installed" ]]; then 18 | break 19 | else 20 | sleep 60 21 | fi 22 | done 23 | done 24 | 25 | -------------------------------------------------------------------------------- /bin/experimental/epic_catalog_image_status.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ./scripts/functions.sh 4 | 5 | RED='\033[0;31m' 6 | NC='\033[0m' # No Color 7 | 8 | echo 9 | echo -e "${RED}EPIC Catalog Images : Installed${NC}" 10 | hpecp catalog list --query "[?state=='installed'] | [*].[_links.self.href,state,label.name]" --output text 11 | 12 | echo 13 | echo -e "${RED}EPIC Catalog Images : NOT Installed${NC}" 14 | hpecp catalog list --query "[?state!='installed'] | [*].[_links.self.href,state,label.name]" --output text 15 | -------------------------------------------------------------------------------- /bin/experimental/epic_enable_virtual_node_assignment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | source ./scripts/functions.sh 6 | 7 | hpecp httpclient put /api/v1/workers/1 --json-file <(echo '{"operation": "schedule", "id": "/api/v1/workers/1", "schedule": true}') 8 | 9 | echo "Request completed successfully" -------------------------------------------------------------------------------- /bin/experimental/epic_set_cpu_allocation_ratio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | source ./scripts/variables.sh 6 | source ./scripts/functions.sh 7 | 8 | pip3 install --quiet --upgrade --user hpecp 9 | 10 | # use the project's HPECP CLI config file 11 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 12 | 13 | echo "Deleting and creating lock" 14 | hpecp lock delete-all 15 | hpecp lock create "Set CPU allocation ratio" 16 | 17 | hpecp httpclient put /api/v1/install/?install_reconfig --json-file <(echo '{"cpu_allocation_ratio": 2}') 18 | 19 | echo "Request successful - exiting site lock-down" 20 | 21 | hpecp lock delete-all -------------------------------------------------------------------------------- /bin/experimental/epic_spark24_cluster_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | HOST_IPS=( "$@" ) 4 | 5 | set -e # abort on error 6 | set -u # abort on undefined variable 7 | 8 | if [[ ! -d generated ]]; then 9 | echo "This file should be executed from the project directory" 10 | exit 1 11 | fi 12 | 13 | ./scripts/check_prerequisites.sh 14 | source ./scripts/variables.sh 15 | 16 | pip3 install --quiet --upgrade --user hpecp 17 | 18 | # use the project's HPECP CLI config file 19 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 20 | 21 | # Test CLI is able to connect 22 | echo "Platform ID: $(hpecp license platform-id)" 23 | 24 | DISTRO_ID="$(hpecp catalog list --query "[? contains(label.name, 'Spark24')] | [0] | [distro_id]" --output text)" 25 | 26 | PROFILE=tenant2 hpecp httpclient post /api/v2/cluster/ <(echo " 27 | { 28 | \"isolated\": false, 29 | \"label\": { 30 | \"name\": \"spark cluster 1\", 31 | \"description\": \"\" 32 | }, 33 | \"dependent_nodegroups\": [], 34 | \"debug\": false, 35 | \"two_phase_delete\": false, 36 | \"nodegroup\": { 37 | \"role_configs\": [ 38 | { 39 | \"node_count\": 1, 40 | \"flavor\": \"/api/v1/flavor/3\", 41 | \"role_id\": \"controller\" 42 | }, 43 | { 44 | \"node_count\": 0, 45 | \"flavor\": \"/api/v1/flavor/1\", 46 | \"role_id\": \"worker\" 47 | }, 48 | { 49 | \"node_count\": 0, 50 | \"flavor\": \"/api/v1/flavor/1\", 51 | \"role_id\": \"jupyter\" 52 | }, 53 | { 54 | \"node_count\": 1, 55 | \"flavor\": \"/api/v1/flavor/3\", 56 | \"role_id\": \"jupyterhub\" 57 | }, 58 | { 59 | \"node_count\": 0, 60 | \"flavor\": \"/api/v1/flavor/1\", 61 | \"role_id\": \"rstudio\" 62 | }, 63 | { 64 | \"node_count\": 0, 65 | \"flavor\": \"/api/v1/flavor/1\", 66 | \"role_id\": \"gateway\" 67 | } 68 | ], 69 | \"catalog_entry_distro_id\": \"${DISTRO_ID}\", 70 | \"config_choice_selections\": [], 71 | \"constraints\": [] 72 | } 73 | } 74 | ") -------------------------------------------------------------------------------- /bin/experimental/epic_workers_add.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | HOST_IPS=( "$@" ) 4 | 5 | set -e # abort on error 6 | set -u # abort on undefined variable 7 | 8 | if [[ ! -d generated ]]; then 9 | echo "This file should be executed from the project directory" 10 | exit 1 11 | fi 12 | 13 | ./scripts/check_prerequisites.sh 14 | source ./scripts/variables.sh 15 | 16 | pip3 install --quiet --upgrade --user hpecp 17 | 18 | # use the project's HPECP CLI config file 19 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 20 | 21 | # Test CLI is able to connect 22 | echo "Platform ID: $(hpecp license platform-id)" 23 | 24 | echo "Deleting and creating lock" 25 | hpecp lock delete-all 26 | hpecp lock create "Install EPIC Workers" 27 | 28 | echo "Adding workers" 29 | WRKR_IDS=() 30 | for WRKR in ${HOST_IPS[@]}; do 31 | echo " worker $WRKR" 32 | CMD="hpecp epicworker create-with-ssh-key --ip ${WRKR} --ssh-key-file ./generated/controller.prv_key" 33 | WRKR_ID="$($CMD)" 34 | echo " id $WRKR_ID" 35 | WRKR_IDS+=($WRKR_ID) 36 | done 37 | 38 | echo "Waiting for workers to have state 'ready'" 39 | for WRKR in ${WRKR_IDS[@]}; do 40 | echo " worker $WRKR" 41 | hpecp epicworker wait-for-state ${WRKR} --states [ready] --timeout-secs 1800 42 | done 43 | 44 | echo "Setting worker storage" 45 | for WRKR in ${WRKR_IDS[@]}; do 46 | echo " worker $WRKR" 47 | hpecp epicworker set-storage --id ${WRKR} --persistent-disks=/dev/nvme1n1 --ephemeral-disks=/dev/nvme2n1 48 | done 49 | 50 | echo "Waiting for workers to have state 'installed'" 51 | for WRKR in ${WRKR_IDS[@]}; do 52 | echo " worker $WRKR" 53 | hpecp epicworker wait-for-state ${WRKR} --states [installed] --timeout-secs 1800 54 | done 55 | 56 | echo "Removing locks" 57 | hpecp gateway list 58 | hpecp lock delete-all --timeout-secs 1800 59 | -------------------------------------------------------------------------------- /bin/experimental/gitea_url.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z "$1" ]]; then 7 | echo "Usage: $0 TENANT_ID " 8 | exit 1 9 | fi 10 | 11 | set -u 12 | 13 | ./scripts/check_prerequisites.sh 14 | source ./scripts/variables.sh 15 | source ./scripts/functions.sh 16 | 17 | # use the project's HPECP CLI config file 18 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 19 | 20 | export TENANT_ID=$1 21 | export CLUSTER_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 22 | export TENANT_NS=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [namespace]" --output text) 23 | 24 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 25 | 26 | set -e 27 | set -u 28 | set -o pipefail 29 | 30 | EXTERNAL_URL=\$(kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) -n $TENANT_NS get service gitea-service \ 31 | -o 'jsonpath={..annotations.hpecp-internal-gateway/3000}') 32 | echo "http://\$EXTERNAL_URL" 33 | 34 | EOF1 -------------------------------------------------------------------------------- /bin/experimental/install_hpecp_cli.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 7 | 8 | source "$SCRIPT_DIR/../../scripts/variables.sh" 9 | 10 | ssh -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T centos@${CTRL_PUB_IP} <<-SSH_EOF 11 | set -e 12 | set -x 13 | 14 | echo "Deprecated. See 'bin/create_datatap_to_standalone_df.sh' for alternatives" 15 | exit 0 16 | 17 | SSH_EOF 18 | -------------------------------------------------------------------------------- /bin/experimental/minio_create_bucket.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | exec > >(tee -i generated/log-$(basename $0).txt) 4 | exec 2>&1 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 HOST:PORT 8 | exit 1 9 | fi 10 | 11 | echo "Running script: $0 $@" 12 | 13 | export HOST=$1 14 | echo HOST=$HOST 15 | 16 | ./scripts/check_prerequisites.sh 17 | source ./scripts/variables.sh 18 | 19 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-SSH_EOF 20 | 21 | export PYTHONPATH=\$PYTHONPATH:~/.local/lib/python3.6/site-packages/ 22 | export PYTHONWARNINGS="ignore:Unverified HTTPS request" 23 | 24 | pip3 install minio --user --quiet 25 | pip3 install requests --user --quiet 26 | 27 | python3 - < >(tee -i generated/log-$(basename $0).txt) 4 | exec 2>&1 5 | 6 | set -e 7 | 8 | if [[ -z $1 || -z $2 ]]; then 9 | echo Usage: $0 TENANT_ID MLFLOW_KD_CLUSTERNAME 10 | exit 1 11 | fi 12 | 13 | set -u 14 | 15 | ./scripts/check_prerequisites.sh 16 | source ./scripts/variables.sh 17 | 18 | export TENANT_ID=$1 19 | export MLFLOW_CLUSTER_NAME=$2 20 | 21 | 22 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 23 | 24 | export CLUSTER_ID=\$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 25 | export TENANT_NS=\$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [namespace]" --output text) 26 | 27 | HOST_AND_PORT=\$(kubectl --kubeconfig <(hpecp k8scluster --id \$CLUSTER_ID admin-kube-config) \ 28 | get service -l kubedirector.hpe.com/kdcluster=$MLFLOW_CLUSTER_NAME -n \$TENANT_NS \ 29 | -o jsonpath={.items[0].metadata.annotations.'hpecp-internal-gateway/9000'}) 30 | 31 | echo \$HOST_AND_PORT 32 | 33 | EOF1 -------------------------------------------------------------------------------- /bin/experimental/minio_wait_for_mlflow_configured_state.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec > >(tee -i generated/log-$(basename $0).txt) 4 | exec 2>&1 5 | 6 | set -e 7 | 8 | if [[ -z $1 || -z $2 ]]; then 9 | echo Usage: $0 TENANT_ID MLFLOW_KD_CLUSTERNAME 10 | exit 1 11 | fi 12 | 13 | set -u 14 | 15 | echo "Running script: $0 $@" 16 | 17 | ./scripts/check_prerequisites.sh 18 | source ./scripts/variables.sh 19 | 20 | export TENANT_ID=$1 21 | export MLFLOW_CLUSTER_NAME=$2 22 | 23 | 24 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 25 | 26 | set -eu 27 | set -o pipefail 28 | 29 | export CLUSTER_ID=\$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 30 | export TENANT_NS=\$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [namespace]" --output text) 31 | 32 | echo Waiting for Notebook to have state==configured 33 | COUNTER=0 34 | while [ \$COUNTER -lt 30 ]; 35 | do 36 | STATE=\$(kubectl --kubeconfig <(hpecp k8scluster --id \$CLUSTER_ID admin-kube-config) \ 37 | get kubedirectorcluster -n \$TENANT_NS $MLFLOW_CLUSTER_NAME -o 'jsonpath={.status.state}') 38 | echo STATE=\$STATE 39 | [[ \$STATE == "configured" ]] && break 40 | sleep 1m 41 | let COUNTER=COUNTER+1 42 | done 43 | 44 | if [[ \$STATE != "configured" ]]; 45 | then 46 | echo "State is not configured after 30 minutes. Raising an error." 47 | exit 1 48 | fi 49 | 50 | EOF1 -------------------------------------------------------------------------------- /bin/experimental/policy_setup_gitea.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z "$1" ]]; then 7 | echo "Usage: $0 TENANT_ID" 8 | echo "Where:" 9 | echo " TENANT_ID = tenant where gitea is deployed" 10 | exit 1 11 | fi 12 | 13 | set -u 14 | 15 | ./scripts/check_prerequisites.sh 16 | source ./scripts/variables.sh 17 | source ./scripts/functions.sh 18 | 19 | # use the project's HPECP CLI config file 20 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 21 | 22 | export TENANT_ID=$1 23 | export CLUSTER_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 24 | export TENANT_NS=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [namespace]" --output text) 25 | 26 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 27 | 28 | set -e 29 | set -u 30 | set -o pipefail 31 | 32 | EXTERNAL_URL=\$(kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) -n $TENANT_NS get service gitea-service \ 33 | -o 'jsonpath={..annotations.hpecp-internal-gateway/3000}') 34 | echo "http://\$EXTERNAL_URL" 35 | 36 | hpecp httpclient post /api/v2/gitops_repo <(echo " 37 | { 38 | \"repository_url\": \"http://\$EXTERNAL_URL/administrator/gatekeeper-library.git\", 39 | \"repository_username\": \"administrator\", 40 | \"repository_password\": \"admin123\" 41 | } 42 | ") || true # ignore conflicts 43 | 44 | 45 | REPO_ID=\$( 46 | hpecp httpclient get /api/v2/gitops_repo \ 47 | | python3 -c "import json,sys;obj=json.load(sys.stdin);[ print(t['_links']['self']['href']) for t in obj['_embedded']['gitops_repos'] if t['repository_url'] == 'http://\$EXTERNAL_URL/administrator/gatekeeper-library']" 48 | ) 49 | echo REPO_ID=\$REPO_ID 50 | 51 | ##### 52 | 53 | LOG_LEVEL=DEBUG hpecp httpclient post /api/v2/gitops_app <(echo " 54 | { 55 | \"gitops_repo\": \"\$REPO_ID\", 56 | \"app_source\": { 57 | \"path\": \"library/pod-security-policy/read-only-root-filesystem\", 58 | \"target_revision\": \"HEAD\" 59 | }, 60 | \"label\": { 61 | \"name\": \"read-only-root-filesystem\", 62 | \"description\": \"\" 63 | } 64 | } 65 | ") 66 | 67 | 68 | EOF1 69 | 70 | 71 | -------------------------------------------------------------------------------- /bin/experimental/set_gateway_ssl.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | if [[ ! -d generated ]]; then 7 | echo "This file should be executed from the project directory" 8 | exit 1 9 | fi 10 | 11 | ./scripts/check_prerequisites.sh 12 | source ./scripts/variables.sh 13 | 14 | # use the project's HPECP CLI config file 15 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 16 | 17 | echo "Deleting and creating lock" 18 | hpecp lock delete-all 19 | hpecp lock create "Install Gateway" 20 | 21 | echo "SSL info:" 22 | hpecp config get --query 'objects.gateway_ssl_cert_info' --output json 23 | 24 | if [[ -f generated/cert.pem ]] && [[ -f generated/key.pem ]]; then 25 | echo "Setting up Gateway SSL certificate and key" 26 | hpecp install set-gateway-ssl --cert-file generated/cert.pem --key-file generated/key.pem 27 | 28 | GATEWAY_SSL_CONFIGURED=$(hpecp config get --query 'objects.gateway_ssl_cert_info | length(@)' --output json) 29 | if [[ ${GATEWAY_SSL_CONFIGURED} == 0 ]]; then 30 | echo "Gateway SSL was not configured. Aborting." 31 | exit 1 32 | fi 33 | fi 34 | 35 | echo "SSL info:" 36 | hpecp config get --query 'objects.gateway_ssl_cert_info' --output json 37 | 38 | echo "Removing locks" 39 | hpecp lock delete-all --timeout-secs 1800 40 | 41 | -------------------------------------------------------------------------------- /bin/experimental/setup_demo_tenant_ad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | if [[ ! -d generated ]]; then 7 | echo "This file should be executed from the project directory" 8 | exit 1 9 | fi 10 | 11 | ./scripts/check_prerequisites.sh 12 | source ./scripts/variables.sh 13 | 14 | if [[ "$AD_SERVER_ENABLED" == False ]]; then 15 | echo "Skipping script '$0' because AD Server is not enabled" 16 | exit 17 | fi 18 | 19 | # use the project's HPECP CLI config file 20 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 21 | 22 | # set the log level for the HPE CP CLI 23 | # export LOG_LEVEL=DEBUG 24 | 25 | # test connectivity to HPE CP with the CLI 26 | hpecp license platform-id 27 | 28 | # setup AD user for tenant Administrator 29 | # NOTE: 30 | # - /api/v1/role/2 = Admins 31 | # - /api/v1/role/3 = Members 32 | 33 | hpecp httpclient put "/api/v1/tenant/2?external_user_groups" --json-file <(echo ' 34 | { 35 | "external_user_groups": [ 36 | { 37 | "role": "/api/v1/role/2", 38 | "group":"CN=AD_ADMIN_GROUP,CN=Users,DC=samdom,DC=example,DC=com" 39 | }, 40 | { 41 | "role": "/api/v1/role/3", 42 | "group": "CN=AD_MEMBER_GROUP,CN=Users,DC=samdom,DC=example,DC=com" 43 | } 44 | ] 45 | }' | sed s/AD_ADMIN_GROUP/${AD_ADMIN_GROUP}/g | sed s/AD_MEMBER_GROUP/${AD_MEMBER_GROUP}/g) 46 | -------------------------------------------------------------------------------- /bin/get_argo_password.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 CLUSTERNAME 8 | exit 1 9 | fi 10 | 11 | CLUSTERNAME=$1 12 | 13 | set -u 14 | 15 | source ./scripts/check_prerequisites.sh 16 | source ./scripts/variables.sh 17 | 18 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 19 | 20 | CLUSTERNAME=$CLUSTERNAME 21 | 22 | echo username: admin 23 | echo -n "password: " 24 | 25 | kubectl --kubeconfig <(./get_admin_kubeconfig.sh $CLUSTERNAME) get pods -n argocd -l app.kubernetes.io/name=argocd-server -o name | cut -d'/' -f 2 26 | 27 | EOF1 -------------------------------------------------------------------------------- /bin/get_k8s_host_ip.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 WORKER_ID 8 | echo Where: WORKER_ID = /api/v2/worker/k8shost/[0-9]* 9 | exit 1 10 | fi 11 | 12 | WORKER_ID=$1 13 | 14 | set -u 15 | 16 | source ./scripts/check_prerequisites.sh 17 | source ./scripts/variables.sh 18 | 19 | hpecp k8sworker list -query "[?_links.self.href == '${WORKER_ID}'] | [0] | [ipaddr]" -o text -------------------------------------------------------------------------------- /bin/get_k8s_masters.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 CLUSTER_ID 8 | echo Where: CLUSTER_ID = /api/v2/k8scluster/[0-9]* 9 | exit 1 10 | fi 11 | 12 | CLUSTER_ID=$1 13 | 14 | set -u 15 | 16 | source ./scripts/check_prerequisites.sh 17 | source ./scripts/variables.sh 18 | 19 | hpecp k8scluster list --query "[?_links.self.href == '${CLUSTER_ID}'] | [0] | [k8shosts_config] | [0] | [?role == 'master'] | [*][node]" -o text -------------------------------------------------------------------------------- /bin/get_kf_dashboard_auth_token.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | 7 | if [[ -z $3 ]]; then 8 | echo Usage: $0 TENANT_ID USERNAME PASSWORD 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | ./scripts/check_prerequisites.sh 15 | source ./scripts/variables.sh 16 | 17 | # use the project's HPECP CLI config file 18 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 19 | 20 | export TENANT_ID=$1 21 | 22 | KFURL="https://$(./bin/get_kf_dashboard_url.sh $TENANT_ID)" 23 | UNAME="$2" 24 | PSWRD="$3" 25 | 26 | 27 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 28 | 29 | set -e 30 | 31 | STATE=\$(curl -s -k ${KFURL} | grep -oP '(?<=state=)[^ ]*"' | cut -d \" -f1) 32 | 33 | REQ=\$(curl -s -k "${KFURL}/dex/auth?client_id=kubeflow-oidc-authservice&redirect_uri=%2Flogin%2Foidc&response_type=code&scope=profile+email+groups+openid&state=\$STATE" | grep -oP '(?<=req=)\w+') 34 | 35 | curl -s -k "${KFURL}/dex/auth/ad?req=\$REQ" -H 'Content-Type: application/x-www-form-urlencoded' --data "login=$UNAME&password=$PSWRD" 36 | 37 | CODE=\$(curl -s -k "${KFURL}/dex/approval?req=\$REQ" | grep -oP '(?<=code=)\w+') 38 | ret=\$? 39 | if [ \$ret -ne 0 ]; then 40 | echo "Error" 41 | exit 1 42 | fi 43 | curl -s -k --cookie-jar - "${KFURL}/login/oidc?code=\$CODE&state=\$STATE" > .dex_session 44 | 45 | echo \$(cat .dex_session | grep 'authservice_session' | awk '{ORS="" ; printf "%s", \$NF}') 46 | 47 | EOF1 -------------------------------------------------------------------------------- /bin/get_kf_dashboard_url.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | 7 | if [[ -z $1 ]]; then 8 | echo Usage: $0 TENANT_ID 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | ./scripts/check_prerequisites.sh 15 | source ./scripts/variables.sh 16 | 17 | # use the project's HPECP CLI config file 18 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 19 | 20 | export TENANT_ID=$1 21 | 22 | export CLUSTER_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 23 | 24 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 25 | 26 | set -e 27 | set -u 28 | set -o pipefail 29 | 30 | kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) \ 31 | describe svc/istio-ingressgateway -n istio-system \ 32 | | grep hpecp-internal-gateway/80: \ 33 | | sed -e 's/^[ \t]*hpecp-internal-gateway\/80: //' 34 | EOF1 35 | -------------------------------------------------------------------------------- /bin/get_logs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source "./scripts/variables.sh" 4 | 5 | 6 | THE_DATE=$(date +"%Y-%m-%dT%H-%M-%S%z") 7 | 8 | set +e 9 | 10 | ./bin/ssh_controller.sh sudo tar czf - /var/log/bluedata/ > ./generated/${THE_DATE}-controller-logs.tar.gz 11 | 12 | for i in "${!WRKR_PUB_IPS[@]}"; do 13 | ssh -o StrictHostKeyChecking=no -i "./generated/controller.prv_key" centos@${WRKR_PUB_IPS[$i]} sudo tar czf - /var/log/bluedata/ > ./generated/${THE_DATE}-worker-${i}-${WRKR_PRV_IPS[$i]}-logs.tar.gz 14 | done 15 | -------------------------------------------------------------------------------- /bin/hosts_for_dev_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source "./scripts/variables.sh" 3 | 4 | echo $CTRL_PUB_IP $CTRL_PRV_DNS 5 | echo $GATW_PUB_IP $GATW_PRV_DNS -------------------------------------------------------------------------------- /bin/istio_ingress_details.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 CLUSTERNAME 8 | exit 1 9 | fi 10 | 11 | CLUSTERNAME=$1 12 | 13 | set -u 14 | 15 | source ./scripts/check_prerequisites.sh 16 | source ./scripts/variables.sh 17 | 18 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF 19 | 20 | echo -n "Host IPs: " 21 | kubectl --kubeconfig <(./get_admin_kubeconfig.sh $CLUSTERNAME) get po -l istio=ingressgateway -n istio-system \ 22 | -o jsonpath='{.items[*].status.hostIP}' 23 | echo 24 | 25 | echo -n "HTTP Port: " 26 | kubectl --kubeconfig <(./get_admin_kubeconfig.sh $CLUSTERNAME) -n istio-system get service istio-ingressgateway \ 27 | -o jsonpath='{.spec.ports[?(@.name=="http2")].nodePort}' 28 | echo 29 | 30 | echo -n "HTTPS Port: " 31 | kubectl --kubeconfig <(./get_admin_kubeconfig.sh $CLUSTERNAME) -n istio-system get service istio-ingressgateway \ 32 | -o jsonpath='{.spec.ports[?(@.name=="https")].nodePort}' 33 | echo 34 | 35 | echo -n "TCP Port: " 36 | kubectl --kubeconfig <(./get_admin_kubeconfig.sh $CLUSTERNAME) -n istio-system get service istio-ingressgateway \ 37 | -o jsonpath='{.spec.ports[?(@.name=="tcp")].nodePort}' 38 | echo 39 | 40 | EOF -------------------------------------------------------------------------------- /bin/kubectl_as_admin.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | if [[ -z $1 ]]; then 7 | echo Usage: $0 CLUSTERNAME 8 | echo 9 | echo Examples: 10 | echo -------- 11 | echo $0 kfcluster get pods -A 12 | echo $0 dfcluster exec admincli-0 -n dfdemo -- edf 13 | echo $0 dfcluster exec admincli-0 -n dfdemo -- edf check all 14 | exit 1 15 | fi 16 | 17 | CLUSTERNAME=$1 18 | 19 | set -u 20 | 21 | source ./scripts/check_prerequisites.sh 22 | source ./scripts/variables.sh 23 | 24 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF 25 | kubectl --kubeconfig <(./get_admin_kubeconfig.sh $CLUSTERNAME) ${@:2} 26 | EOF -------------------------------------------------------------------------------- /bin/ldapsearch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source "./scripts/variables.sh" 3 | source "./scripts/functions.sh" 4 | 5 | if [[ "$AD_SERVER_ENABLED" != "True" ]]; then 6 | echo "Aborting. AD Server has not been enabled" 7 | fi 8 | 9 | show_help() { 10 | print_term_width '=' 11 | echo Usage: $0 -q query 12 | echo 13 | echo Example query strings 14 | echo --------------------- 15 | echo "# Retrieve the ad_admin1 record" 16 | echo "$0 -q 'CN=ad_admin1'" 17 | echo 18 | echo "# Retrieve all 'user' records in group ${AD_ADMIN_GROUP}" 19 | echo "$0 -q '(&(memberOf=CN=${AD_ADMIN_GROUP},CN=Users,DC=samdom,DC=example,DC=com)(objectClass=user))'" 20 | print_term_width '=' 21 | } 22 | 23 | OPTIND=1 # Reset in case getopts has been used previously in the shell. 24 | 25 | query="" 26 | 27 | while getopts "q:" opt; do 28 | case "$opt" in 29 | q) query=$OPTARG 30 | ;; 31 | esac 32 | done 33 | 34 | if [[ OPTIND == 1 || -z $query ]] 35 | then 36 | show_help 37 | exit 1 38 | fi 39 | 40 | shift $((OPTIND-1)) 41 | 42 | [ "${1:-}" = "--" ] && shift 43 | 44 | ldapsearch -o ldif-wrap=no \ 45 | -x \ 46 | -H ldap://$AD_PUB_IP:389 \ 47 | -D 'cn=Administrator,CN=Users,DC=samdom,DC=example,DC=com' \ 48 | -w '5ambaPwd@' \ 49 | -b 'CN=Users,DC=samdom,DC=example,DC=com' \ 50 | $query 51 | 52 | # To connect over TLS 53 | # LDAPTLS_REQCERT=never ldapsearch -o ldif-wrap=no -x -H ldaps://localhost:636 -D 'cn=Administrator,CN=Users,DC=samdom,DC=example,DC=com' -w '5ambaPwd@' -b 'DC=samdom,DC=example,DC=com' -------------------------------------------------------------------------------- /bin/list_k8sclusters.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | set -u 6 | 7 | ./scripts/check_prerequisites.sh 8 | source ./scripts/variables.sh 9 | 10 | 11 | # use the project's HPECP CLI config file 12 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 13 | 14 | hpecp k8scluster list -------------------------------------------------------------------------------- /bin/list_k8sworkers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | set -u 6 | 7 | ./scripts/check_prerequisites.sh 8 | source ./scripts/variables.sh 9 | 10 | 11 | # use the project's HPECP CLI config file 12 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 13 | 14 | hpecp k8sworker list -------------------------------------------------------------------------------- /bin/list_tenants.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | set -u 6 | 7 | ./scripts/check_prerequisites.sh 8 | source ./scripts/variables.sh 9 | 10 | 11 | # use the project's HPECP CLI config file 12 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 13 | 14 | hpecp tenant list -------------------------------------------------------------------------------- /bin/mapr_edge_demo/mapr_edge_demo_docs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "https://github.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/blob/master/docs/README-DF-EDGE-CORE-CLOUD2.md" -------------------------------------------------------------------------------- /bin/mapr_edge_demo/mapr_edge_demo_edge_watch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | HIDE_WARNINGS=1 4 | 5 | source "./scripts/variables.sh" 6 | 7 | ./generated/ssh_mapr_cluster_2_host_0.sh -t \ 8 | "bash -c 'watch ls -lr /mapr/edge1.enterprise.org/apps/pipeline/data/files-missionX'" -------------------------------------------------------------------------------- /bin/mapr_edge_demo/mapr_edge_demo_hq_watch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | HIDE_WARNINGS=1 4 | 5 | source "./scripts/variables.sh" 6 | 7 | ./generated/ssh_mapr_cluster_1_host_0.sh -t \ 8 | "bash -c 'watch ls -lr /mapr/dc1.enterprise.org/apps/pipeline/data/files-missionX'" -------------------------------------------------------------------------------- /bin/mapr_edge_demo/mapr_edge_demo_mapruserticket_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export HIDE_WARNINGS=1 4 | 5 | source "./scripts/variables.sh" 6 | 7 | DC_MAPR_USERTICKET="$(HIDE_WARNINGS=1 ./generated/ssh_mapr_cluster_1_host_0.sh 'sudo head -n1 /opt/mapr/conf/mapruserticket')" 8 | EDGE_MAPR_USERTICKET="$(HIDE_WARNINGS=1 ./generated/ssh_mapr_cluster_2_host_0.sh 'sudo head -n1 /opt/mapr/conf/mapruserticket')" 9 | 10 | # Ensure mapruserticket is the same on all nodes on the hq cluster and the edge cluster 11 | for I in 0 1 2; do 12 | echo "$DC_MAPR_USERTICKET" | \ 13 | ./generated/ssh_mapr_cluster_1_host_$I.sh "sudo bash -c 'cat > /opt/mapr/conf/mapruserticket'" 14 | 15 | echo "$EDGE_MAPR_USERTICKET" | \ 16 | ./generated/ssh_mapr_cluster_2_host_$I.sh "sudo bash -c 'cat > /opt/mapr/conf/mapruserticket'" 17 | done; 18 | 19 | # Ensure all nodes in both clusters have maprusertickets for both hq and edge 20 | for I in 0 1 2; do 21 | echo "$DC_MAPR_USERTICKET" | \ 22 | ./generated/ssh_mapr_cluster_2_host_$I.sh "sudo bash -c 'cat >> /opt/mapr/conf/mapruserticket'" 23 | 24 | echo "$EDGE_MAPR_USERTICKET" | \ 25 | ./generated/ssh_mapr_cluster_1_host_$I.sh "sudo bash -c 'cat >> /opt/mapr/conf/mapruserticket'" 26 | done; 27 | 28 | # verify maprusertickets 29 | for i in 1 2; do 30 | for j in 0 1 2; do 31 | echo CLUSTER $i HOST $j; 32 | ./generated/ssh_mapr_cluster_${i}_host_${j}.sh "sudo cat /opt/mapr/conf/mapruserticket" 33 | done; 34 | done; -------------------------------------------------------------------------------- /bin/mapr_edge_demo/mapr_edge_demo_poststartup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export HIDE_WARNINGS=1 4 | 5 | source "./scripts/variables.sh" 6 | 7 | 8 | ./generated/ssh_mapr_cluster_2_host_0.sh sudo -u mapr bash < /dev/null 17 | mapr dbshell --cmdfile <(echo 'find /apps/pipeline/data/imagesTable --limit 1 --pretty') 18 | EOF -------------------------------------------------------------------------------- /bin/mapr_edge_demo/mapr_edge_demo_query_stream.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export HIDE_WARNINGS=1 4 | 5 | source "./scripts/variables.sh" 6 | source "./scripts/functions.sh" 7 | 8 | ################################################################################ 9 | print_header "streamanalyzer docs - https://docs.datafabric.hpe.com/62/ReferenceGuide/mapr_streamanalyzer.html" 10 | echo 11 | echo "Running 'mapr streamanalyzer -path /apps/pipeline/data/pipelineStream -printMessages'" 12 | print_term_width '-' 13 | ################################################################################ 14 | 15 | ./generated/ssh_mapr_cluster_1_host_0.sh sudo -u mapr bash < /opt/mapr/conf/mapruserticket'" 13 | 14 | echo "$EDGE_MAPR_USERTICKET" | \ 15 | ./generated/ssh_mapr_cluster_2_host_$I.sh "sudo bash -c 'cat > /opt/mapr/conf/mapruserticket'" 16 | done; 17 | 18 | for I in 0 1 2; do 19 | echo "$DC_MAPR_USERTICKET" | \ 20 | ./generated/ssh_mapr_cluster_2_host_$I.sh "sudo bash -c 'cat >> /opt/mapr/conf/mapruserticket'" 21 | 22 | echo "$EDGE_MAPR_USERTICKET" | \ 23 | ./generated/ssh_mapr_cluster_1_host_$I.sh "sudo bash -c 'cat >> /opt/mapr/conf/mapruserticket'" 24 | done; 25 | 26 | for i in 1 2; do 27 | for j in 0 1 2; do 28 | echo CLUSTER $i HOST $j; 29 | ./generated/ssh_mapr_cluster_${i}_host_${j}.sh "sudo cat /opt/mapr/conf/mapruserticket" 30 | done; 31 | done; -------------------------------------------------------------------------------- /bin/mapr_edge_demo/mapr_edge_demo_restart_vol_mirror.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export HIDE_WARNINGS=1 4 | 5 | source "./scripts/variables.sh" 6 | 7 | ./generated/ssh_mapr_cluster_2_host_0.sh </dev/null 2>&1 || sudo yum install fuse-sshfs 6 | 7 | ./bin/ssh_rdp_linux_server.sh "[[ -d /home/ubuntu/clientmount ]] || mkdir /home/ubuntu/clientmount" 8 | 9 | echo "Mounting '/home/ubuntu/clientmount' to '${PWD}/rdpmount'" 10 | 11 | sshfs -o StrictHostKeyChecking=no \ 12 | -o IdentityFile="${PWD}/generated/controller.prv_key" \ 13 | ubuntu@$RDP_PUB_IP:/home/ubuntu/clientmount \ 14 | ${PWD}/rdpmount 15 | 16 | echo "To unmount:" 17 | echo "fusermount -u ${PWD}/rdpmount" 18 | -------------------------------------------------------------------------------- /bin/terraform_apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | ./scripts/check_prerequisites.sh 7 | source ./scripts/functions.sh 8 | 9 | print_term_width "=" 10 | echo "TIP: Parameters given to this script are passed to 'terraform apply'" 11 | echo " Example: ./bin/terraform_apply.sh -var='ad_server_enabled=false'" 12 | print_term_width "=" 13 | 14 | 15 | if [[ ! -f "./generated/controller.prv_key" ]]; then 16 | [[ -d "./generated" ]] || mkdir generated 17 | ssh-keygen -m pem -t rsa -N "" -f "./generated/controller.prv_key" 18 | mv "./generated/controller.prv_key.pub" "./generated/controller.pub_key" 19 | chmod 600 "./generated/controller.prv_key" 20 | fi 21 | 22 | if [[ ! -f "./generated/ca-key.pem" ]]; then 23 | openssl genrsa -out "./generated/ca-key.pem" 2048 24 | openssl req -x509 \ 25 | -new -nodes \ 26 | -key "./generated/ca-key.pem" \ 27 | -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=mydomain.com" \ 28 | -sha256 -days 1024 \ 29 | -out "./generated/ca-cert.pem" 30 | chmod 660 "./generated/ca-key.pem" 31 | fi 32 | 33 | terraform apply -var-file=<(cat etc/*.tfvars) \ 34 | -var="client_cidr_block=$(curl -s http://ipinfo.io/ip)/32" "$@" 35 | 36 | terraform output -json > generated/output.json 37 | ./scripts/post_refresh_or_apply.sh 38 | 39 | source ./scripts/variables.sh 40 | if [[ "$RDP_SERVER_ENABLED" == True && "$RDP_SERVER_OPERATING_SYSTEM" == "LINUX" ]]; then 41 | # Display RDP Endpoint and Credentials 42 | ./bin/rdp_credentials.sh 43 | fi 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /bin/terraform_apply_accept.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | ./scripts/check_prerequisites.sh 7 | 8 | if [[ ! -f "./generated/controller.prv_key" ]]; then 9 | [[ -d "./generated" ]] || mkdir generated 10 | ssh-keygen -t rsa -N "" -f "./generated/controller.prv_key" 11 | mv "./generated/controller.prv_key.pub" "./generated/controller.pub_key" 12 | chmod 600 "./generated/controller.prv_key" 13 | fi 14 | 15 | if [[ ! -f "./generated/ca-key.pem" ]]; then 16 | openssl genrsa -out "./generated/ca-key.pem" 2048 17 | openssl req -x509 \ 18 | -new -nodes \ 19 | -key "./generated/ca-key.pem" \ 20 | -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=mydomain.com" \ 21 | -sha256 -days 1024 \ 22 | -out "./generated/ca-cert.pem" 23 | chmod 660 "./generated/ca-key.pem" 24 | fi 25 | 26 | terraform apply -var-file=<(cat etc/*.tfvars) \ 27 | -var="client_cidr_block=$(curl -s http://ipinfo.io/ip)/32" -auto-approve=true "$@" && \ 28 | 29 | terraform output -json > generated/output.json && \ 30 | ./scripts/post_refresh_or_apply.sh 31 | 32 | source ./scripts/variables.sh 33 | if [[ "$RDP_SERVER_ENABLED" == True && "$RDP_SERVER_OPERATING_SYSTEM" == "Linux" ]]; then 34 | # Display RDP Endpoint and Credentials 35 | ./bin/rdp_credentials.sh 36 | fi 37 | -------------------------------------------------------------------------------- /bin/terraform_destroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | ./scripts/check_prerequisites.sh 7 | 8 | # create private/public keys if they don't exist 9 | if [[ ! -f "./generated/controller.prv_key" ]]; then 10 | [[ -d "./generated" ]] || mkdir generated 11 | ssh-keygen -m pem -t rsa -N "" -f "./generated/controller.prv_key" 12 | mv "./generated/controller.prv_key.pub" "./generated/controller.pub_key" 13 | chmod 600 "./generated/controller.prv_key" 14 | fi 15 | 16 | if [[ ! -f "./generated/ca-key.pem" ]]; then 17 | openssl genrsa -out "./generated/ca-key.pem" 2048 18 | openssl req -x509 \ 19 | -new -nodes \ 20 | -key "./generated/ca-key.pem" \ 21 | -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=mydomain.com" \ 22 | -sha256 -days 1024 \ 23 | -out "./generated/ca-cert.pem" 24 | chmod 660 "./generated/ca-key.pem" 25 | fi 26 | 27 | terraform destroy -var-file=<(cat etc/*.tfvars) \ 28 | -var="client_cidr_block=$(curl -s http://ipinfo.io/ip)/32" && \ 29 | rm -rf ./generated -------------------------------------------------------------------------------- /bin/terraform_destroy_accept.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | ./scripts/check_prerequisites.sh 7 | 8 | if [[ ! -f "./generated/controller.prv_key" ]]; then 9 | [[ -d "./generated" ]] || mkdir generated 10 | ssh-keygen -m pem -t rsa -N "" -f "./generated/controller.prv_key" 11 | mv "./generated/controller.prv_key.pub" "./generated/controller.pub_key" 12 | chmod 600 "./generated/controller.prv_key" 13 | fi 14 | 15 | if [[ ! -f "./generated/ca-key.pem" ]]; then 16 | openssl genrsa -out "./generated/ca-key.pem" 2048 17 | openssl req -x509 \ 18 | -new -nodes \ 19 | -key "./generated/ca-key.pem" \ 20 | -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=mydomain.com" \ 21 | -sha256 -days 1024 \ 22 | -out "./generated/ca-cert.pem" 23 | chmod 660 "./generated/ca-key.pem" 24 | fi 25 | 26 | terraform destroy -var-file=<(cat etc/*.tfvars) \ 27 | -var="client_cidr_block=$(curl -s http://ipinfo.io/ip)/32" -auto-approve=true && \ 28 | rm -rf ./generated 29 | -------------------------------------------------------------------------------- /bin/terraform_get_worker_hosts_private_ips_by_index.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import json 6 | 7 | def parse_slice(value): 8 | """ 9 | Parses a `slice()` from string, like `start:stop:step`. 10 | """ 11 | if value: 12 | parts = value.split(':') 13 | if len(parts) == 1: 14 | # slice(stop) 15 | parts = [None, parts[0]] 16 | # else: slice(start, stop[, step]) 17 | else: 18 | # slice() 19 | parts = [] 20 | return slice(*[int(p) if p else None for p in parts]) 21 | 22 | 23 | KF_HOSTS_INDEX = parse_slice(sys.argv[1]) 24 | 25 | 26 | stream = os.popen("terraform output -json -no-color workers_private_ip") 27 | 28 | obj=json.load(stream) 29 | 30 | print(' '.join(obj[0][KF_HOSTS_INDEX])) 31 | 32 | -------------------------------------------------------------------------------- /bin/terraform_get_worker_hosts_private_ips_by_index_as_array_of_strings.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import json 6 | 7 | def parse_slice(value): 8 | """ 9 | Parses a `slice()` from string, like `start:stop:step`. 10 | """ 11 | if value: 12 | parts = value.split(':') 13 | if len(parts) == 1: 14 | # slice(stop) 15 | parts = [None, parts[0]] 16 | # else: slice(start, stop[, step]) 17 | else: 18 | # slice() 19 | parts = [] 20 | return slice(*[int(p) if p else None for p in parts]) 21 | 22 | 23 | KF_HOSTS_INDEX = parse_slice(sys.argv[1]) 24 | 25 | 26 | stream = os.popen("terraform output -json -no-color workers_private_ip") 27 | 28 | obj=json.load(stream) 29 | 30 | 31 | 32 | print('[' + ','.join("'{0}'".format(w) for w in obj[0][KF_HOSTS_INDEX]) + ']') 33 | 34 | -------------------------------------------------------------------------------- /bin/terraform_output.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | terraform output -json > generated/output.json -------------------------------------------------------------------------------- /bin/terraform_plan.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | ./scripts/check_prerequisites.sh 7 | 8 | if [[ ! -f "./generated/controller.prv_key" ]]; then 9 | [[ -d "./generated" ]] || mkdir generated 10 | ssh-keygen -m pem -t rsa -N "" -f "./generated/controller.prv_key" 11 | mv "./generated/controller.prv_key.pub" "./generated/controller.pub_key" 12 | chmod 600 "./generated/controller.prv_key" 13 | fi 14 | 15 | if [[ ! -f "./generated/ca-key.pem" ]]; then 16 | openssl genrsa -out "./generated/ca-key.pem" 2048 17 | openssl req -x509 \ 18 | -new -nodes \ 19 | -key "./generated/ca-key.pem" \ 20 | -subj "/C=US/ST=CA/O=MyOrg, Inc./CN=mydomain.com" \ 21 | -sha256 -days 1024 \ 22 | -out "./generated/ca-cert.pem" 23 | chmod 660 "./generated/ca-key.pem" 24 | fi 25 | 26 | terraform plan -var-file=<(cat etc/*.tfvars) \ 27 | -var="client_cidr_block=$(curl -s http://ipinfo.io/ip)/32" \ 28 | -out terraform-plan-$(date +"%Y_%m_%d_%I_%M_%p").out 29 | 30 | -------------------------------------------------------------------------------- /bin/terraform_refresh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | ./scripts/check_prerequisites.sh 7 | 8 | terraform refresh -var-file=<(cat etc/*.tfvars) \ 9 | -var="client_cidr_block=$(curl -s http://ipinfo.io/ip)/32" 10 | 11 | terraform output -json > generated/output.json 12 | ./scripts/post_refresh_or_apply.sh 13 | 14 | source ./scripts/variables.sh 15 | if [[ "$RDP_SERVER_ENABLED" == True && "$RDP_SERVER_OPERATING_SYSTEM" == "LINUX" ]]; then 16 | # Display RDP Endpoint and Credentials 17 | ./bin/rdp_credentials.sh 18 | fi 19 | -------------------------------------------------------------------------------- /bin/updates/fix_notebook_hadoop_perms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | 7 | if [[ -z $1 ]]; then 8 | echo Usage: $0 TENANT_ID 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | ./scripts/check_prerequisites.sh 15 | source ./scripts/variables.sh 16 | 17 | echo "Running script: $0 $@" 18 | 19 | # use the project's HPECP CLI config file 20 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 21 | 22 | export TENANT_ID=$1 23 | echo $TENANT_ID 24 | 25 | export NB_CLUSTER_NAME=nb 26 | echo NB_CLUSTER_NAME=$NB_CLUSTER_NAME 27 | 28 | export CLUSTER_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 29 | echo CLUSTER_ID=$CLUSTER_ID 30 | 31 | export TENANT_NS=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [namespace]" --output text) 32 | echo TENANT_NS=$TENANT_NS 33 | 34 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 35 | 36 | set -e 37 | set -u 38 | set -o pipefail 39 | 40 | POD=\$(kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) \ 41 | get pod -l kubedirector.hpe.com/kdcluster=$NB_CLUSTER_NAME -n $TENANT_NS -o 'jsonpath={.items..metadata.name}') 42 | 43 | kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) \ 44 | exec -c app -n $TENANT_NS \$POD -- chown bluedata:bluedata /opt/bluedata/hadoop-2.8.5/etc/hadoop/core-site.xml 45 | 46 | kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) \ 47 | exec -c app -n $TENANT_NS \$POD -- chmod 644 /opt/bluedata/hadoop-2.8.5/etc/hadoop/core-site.xml 48 | 49 | EOF1 50 | -------------------------------------------------------------------------------- /bin/updates/restart_trainingengineinstance_haproxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | 7 | if [[ -z $1 ]]; then 8 | echo Usage: $0 TENANT_ID 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | ./scripts/check_prerequisites.sh 15 | source ./scripts/variables.sh 16 | 17 | echo "Running script: $0 $@" 18 | 19 | # use the project's HPECP CLI config file 20 | export HPECP_CONFIG_FILE="./generated/hpecp.conf" 21 | 22 | export TENANT_ID=$1 23 | echo $TENANT_ID 24 | 25 | export CLUSTER_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [_links.k8scluster]" --output text) 26 | echo CLUSTER_ID=$CLUSTER_ID 27 | 28 | export TENANT_NS=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [namespace]" --output text) 29 | echo TENANT_NS=$TENANT_NS 30 | 31 | ssh -q -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -T ubuntu@${RDP_PUB_IP} <<-EOF1 32 | 33 | set -e 34 | set -u 35 | set -o pipefail 36 | 37 | POD=\$(kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) \ 38 | get pod -l kubedirector.hpe.com/kdcluster=trainingengineinstance,kubedirector.hpe.com/role=LoadBalancer -n $TENANT_NS -o 'jsonpath={.items..metadata.name}') 39 | 40 | set -x 41 | 42 | kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) \ 43 | exec -c app -n $TENANT_NS \$POD -- systemctl restart haproxy 44 | 45 | EOF1 46 | -------------------------------------------------------------------------------- /bin/vpn_connectivity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | source "./scripts/variables.sh" 7 | source "./scripts/functions.sh" 8 | 9 | RED='\033[0;31m' 10 | NC='\033[0m' # No Color 11 | 12 | echo 13 | echo -e "${RED}Attempting to ping the controller private IP${NC}" 14 | ping -c 5 $CTRL_PRV_IP 15 | 16 | echo 17 | echo -e "${RED}Attempting to ping the controller private DNS${NC}" 18 | ping -c 5 $CTRL_PRV_DNS 19 | 20 | command -v scutil >/dev/null 2>&1 && { 21 | echo 22 | echo -e "${RED}OS X Nameservers${NC}" 23 | scutil --dns | grep 'nameserver\[[0-9]*\]' 24 | } 25 | 26 | echo 27 | 28 | -------------------------------------------------------------------------------- /bin/vpn_server_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | source "./scripts/variables.sh" 7 | 8 | echo RDP_SERVER_ENABLED=$RDP_SERVER_ENABLED 9 | echo RDP_SERVER_OPERATING_SYSTEM=$RDP_SERVER_OPERATING_SYSTEM 10 | 11 | if [[ "$RDP_SERVER_ENABLED" != "True" && "$RDP_SERVER_OPERATING_SYSTEM" != "LINUX" ]] 12 | then 13 | echo "Aborting. RDP Linux Server has not been enabled in etc/bluedata_infra.tfvars" 14 | exit 1 15 | fi 16 | 17 | echo "Testing connectivity to $RDP_PUB_IP" 18 | ping -c 2 $RDP_PUB_IP || { 19 | echo "$(tput setaf 1)Aborting. Could not ping RDP Linux Server." 20 | echo " - You may need to disconnect from your corporate VPN, and/or" 21 | echo " - You may need to run ./bin/terraform_apply.sh$(tput sgr0)" 22 | exit 1 23 | } 24 | 25 | if [[ ! -f "./generated/vpn_users" ]]; then 26 | echo user1:$(openssl rand -hex 12 | tr -d '\n') > "./generated/vpn_users" 27 | echo $(openssl rand -hex 30 | tr -d '\n') > "./generated/vpn_shared_key" 28 | fi 29 | 30 | VPN_USERS=$(cat "./generated/vpn_users") 31 | VPN_PSK=$(cat "./generated/vpn_shared_key") 32 | 33 | ssh -o StrictHostKeyChecking=no -i "./generated/controller.prv_key" ubuntu@$RDP_PUB_IP <<-SSH_EOF 34 | set -eux 35 | sudo ufw allow 1701 36 | if docker ps | grep softethervpn; then 37 | docker kill \$(docker ps | grep softethervpn | awk '{ print \$1 }') 38 | fi 39 | docker run -d --cap-add NET_ADMIN --restart=always -e USERS="$VPN_USERS" -e PSK="$VPN_PSK" -p 500:500/udp -p 4500:4500/udp -p 1701:1701/tcp -p 1194:1194/udp -p 5555:5555/tcp siomiz/softethervpn 40 | SSH_EOF 41 | -------------------------------------------------------------------------------- /build_ide.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker build . -t hpecp/hpecp-ide 4 | -------------------------------------------------------------------------------- /docs/README-ADDING-MORE-WORKERS.md: -------------------------------------------------------------------------------- 1 | ### Add more worker nodes 2 | 3 | Set the variable `worker_count=` in `etc/bluedata_infra.tfvars` to the desired number. 4 | 5 | ``` 6 | # check the changes that will be done in the output - don't forget to approve when prompted 7 | ./bin/terraform_apply.sh 8 | 9 | # run a script to prepare the worker - follow the prompts and instructions. 10 | ./scripts/bluedata_prepare_worker.sh 11 | ``` 12 | -------------------------------------------------------------------------------- /docs/README-COST-ESTIMATES.MD: -------------------------------------------------------------------------------- 1 | ### Overview 2 | 3 | 4 | Download jq transformation script: 5 | 6 | ```console 7 | curl -sLO https://raw.githubusercontent.com/antonbabenko/terraform-cost-estimation/master/terraform.jq 8 | ``` 9 | 10 | Extract current configuration and send to https://cost.modules.tf/ for an estimate 11 | 12 | ```console 13 | terraform state pull | jq -cf terraform.jq | curl -s -X POST -H "Content-Type: application/json" -d @- https://cost.modules.tf/ 14 | ``` 15 | 16 | Outputs something like: 17 | 18 | ```json 19 | {"hourly": 11.27, "monthly": 8115.26} 20 | ``` 21 | 22 | Btw my environment currently looks like this for the above cost estimation: 23 | 24 | ```console 25 | $ ./generated/get_private_endpoints.sh 26 | ------------- ---------------- -------------------------------------------------------- 27 | NAME IP DNS 28 | ------------- ---------------- -------------------------------------------------------- 29 | RDP Server 10.1.0.113 NA 30 | Controller 10.1.0.181 ip-10-1-0-181.eu-west-3.compute.internal 31 | Gateway 10.1.0.39 ip-10-1-0-39.eu-west-3.compute.internal 32 | AD 10.1.0.214 NA 33 | Worker 0 10.1.0.220 ip-10-1-0-220.eu-west-3.compute.internal 34 | Worker 1 10.1.0.80 ip-10-1-0-80.eu-west-3.compute.internal 35 | Worker 2 10.1.0.145 ip-10-1-0-145.eu-west-3.compute.internal 36 | Worker 3 10.1.0.36 ip-10-1-0-36.eu-west-3.compute.internal 37 | MAPR CLS 1 0 10.1.0.67 ip-10-1-0-67.eu-west-3.compute.internal 38 | MAPR CLS 1 1 10.1.0.87 ip-10-1-0-87.eu-west-3.compute.internal 39 | MAPR CLS 1 2 10.1.0.126 ip-10-1-0-126.eu-west-3.compute.internal 40 | ------------- ---------------- -------------------------------------------------------- 41 | ``` 42 | 43 | **NOTE:**. 44 | 45 | This only estimates: 46 | 47 | - aws_instance 48 | - aws_ebs_volume 49 | - aws_ebs_snapshot 50 | - aws_ebs_snapshot_copy 51 | - aws_nat_gateway 52 | 53 | ### References 54 | 55 | - https://github.com/antonbabenko/terraform-cost-estimation 56 | 57 | -------------------------------------------------------------------------------- /docs/README-DATA-FABRIC-ARCHITECTURE-OVERVIEW.md: -------------------------------------------------------------------------------- 1 | Coming soon... 2 | 3 | Purpose 4 | 5 | - Introduce users to the architecture of the HPE Container Platform data fabric. 6 | 7 | Describe: 8 | 9 | - Topologies including CLDB per controller 10 | - System Managed Objects (tenant storage, monitoring) 11 | - Mapr container (epic-mapr) on all controllers and workers 12 | - ... 13 | 14 | Deep dive references: 15 | 16 | - [Data Tiering](./docs/README-DATA-TIERING.md) 17 | - [AD/LDAP Setup](./docs/README-MAPR-LDAP.md) 18 | - FUSE Client (coming soon) 19 | - Shared Volume Creation (coming soon) 20 | - ... 21 | -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image001.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image001.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image002.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image002.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image003.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image003.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image004.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image004.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image005.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image005.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image006.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image006.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image007.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image007.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image008.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image008.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image009.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image009.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image010.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image010.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image011.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image011.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image012.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image012.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image013.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image013.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image014.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image014.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image015.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image015.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image016.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image016.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image017.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image017.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image018.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image018.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image019.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image019.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image020.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image021.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image021.png -------------------------------------------------------------------------------- /docs/README-DATA-TIERING/image022.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-DATA-TIERING/image022.png -------------------------------------------------------------------------------- /docs/README-DESIGN-PRINCIPLES.md: -------------------------------------------------------------------------------- 1 | Design Principles 2 | ================= 3 | 4 | # Separation of infrastructure and software deployment 5 | 6 | This project has been designed to provide separation of concerns as much as possible: 7 | 8 | - Infrastructure setup 9 | - HPE Container Platform (HCP) setup 10 | 11 | The infrastructure setup is performed by Terraform and provides the AWS components required for running HCP and other services such as a RDP Jump Host and an Active Directory Server. 12 | 13 | Post infrastructure setup, the setup of HCP is peformed by a bash script: `./scripts/bluedata_intall.sh`. 14 | 15 | The separation of concerns has two main goals: 16 | 17 | 1. The user can choose to only setup the infrastructure and then manually install HCP (this is good for learning how to manually deploy HCP) 18 | 2. The non-infrastucture setup is performed using a shell script, so should be easy to read and understand how to perform a manual deployment of HCP. It is for this reason that ansible or other technologies have not been used for the post infrastructure setup. 19 | 20 | # Idempotent Scripts 21 | 22 | Scripts are idempotent as much as possible. 23 | 24 | For more information see https://www.infoworld.com/article/3263724/idempotence-and-the-discipline-of-devops.html 25 | 26 | -------------------------------------------------------------------------------- /docs/README-DESTROY-DEMO-ENV.md: -------------------------------------------------------------------------------- 1 | ### Destroy complete environment from AWS 2 | 3 | ``` 4 | # check the changes that will be done in the output - don't forget to approve when prompted 5 | ./bin/terraform_destroy.sh 6 | ``` 7 | -------------------------------------------------------------------------------- /docs/README-EC2-START-STOP-STATUS.md: -------------------------------------------------------------------------------- 1 | ### Stopping, Starting and viewing the running status of your EC2 instances 2 | 3 | Some scripts were generated for you to view the status of your EC2 instances managed by this terraform project. 4 | 5 | - `./bin/ec2_stop_all_instances.sh` to stop your instances 6 | - `./bin/ec2_start_all_instances.sh` to start your instances 7 | - `./bin/ec2_instance_status.sh` to view running instances 8 | 9 | ------ 10 | 11 | ### Stopping instances - (Former version) example 12 | 13 | ![stopping instances](./README-EC2-START-STOP-STATUS/stopping_instances.gif) 14 | 15 | ### Starting instances - (Former version) example 16 | 17 | ![starting instances](./README-EC2-START-STOP-STATUS/starting_instances.gif) 18 | 19 | ### Running instances - (Former version) example 20 | 21 | ![running instances](./README-EC2-START-STOP-STATUS/running_instances.gif) 22 | -------------------------------------------------------------------------------- /docs/README-EC2-START-STOP-STATUS/running_instances.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-EC2-START-STOP-STATUS/running_instances.gif -------------------------------------------------------------------------------- /docs/README-EC2-START-STOP-STATUS/starting_instances.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-EC2-START-STOP-STATUS/starting_instances.gif -------------------------------------------------------------------------------- /docs/README-EC2-START-STOP-STATUS/stopping_instances.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-EC2-START-STOP-STATUS/stopping_instances.gif -------------------------------------------------------------------------------- /docs/README-INSPECT-API.md: -------------------------------------------------------------------------------- 1 | # How to use Wireshark to inspect API calls at the wire level 2 | 3 | - Deploy HPECP with this project using: `install_with_ssl = false` in `etc/bluedata_infra.tfvars` 4 | - Install Wireshark 5 | - Bind Wireshark 6 | - Install `tcpdump` on the controller 7 | 8 | ``` 9 | ./generated/ssh_controller.sh sudo yum install -y tcpdump 10 | ``` 11 | 12 | - Run `tcpdump` and pipe output over ssh to wireshark: 13 | 14 | ``` 15 | # MAC OSX 16 | ./generated/ssh_controller.sh sudo tcpdump -i lo -U -s0 -w - 'port 8080' | sudo /Applications/Wireshark.app/Contents/MacOS/Wireshark -k -i - 17 | ``` 18 | 19 | - Make API call. 20 | - Filter wireshark, e.g. 21 | 22 | - `http` 23 | - `http.request.method == "POST" or http.request.method == "GET"` 24 | - `http.request.uri == "/api/v1/user"` 25 | - `http.request.uri matches "k8skubeconfig"` 26 | 27 | - Right click stream, and select follow HTTP Stream 28 | -------------------------------------------------------------------------------- /docs/README-LOGIN-MAPR-CONTROL-SYSTEM.md: -------------------------------------------------------------------------------- 1 | ## Login to the MAPR Control System 2 | 3 | Open a browser to https://:8443 4 | 5 | The username is `admin`. 6 | 7 | You can obtain the password by SSH'ing into the HCP Controller and running: 8 | 9 | ``` 10 | sudo cat /opt/bluedata/mapr/conf/mapr-admin-pass 11 | ``` 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/add_ad_admin_user.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/add_ad_admin_user.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/add_ad_user_group.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/add_ad_user_group.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/create_volume.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/create_volume.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/create_volume_button.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/create_volume_button.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/datatap_browser.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/datatap_browser.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/login_ad_admin1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/login_ad_admin1.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/login_ad_user1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/login_ad_user1.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | GLOBAL_SETTINGS = { 4 | "BASE_URL" : 'https://127.0.0.1:8080', 5 | "CONFIG_URL" : "/api/v1/config/", 6 | "LOCK_URL" : "/api/v1/lock/", 7 | "UPGRADE_URL" : "/api/v1/upgrade/", 8 | "DATACONN_URL" : "/api/v1/dataconn/", 9 | "TESTDATACONN_URL" : "/api/v1/testdataconn/", 10 | "JOB_URL" : "/api/v1/job/", 11 | "CLUSTER_URL" : "/api/v1/cluster/", 12 | "ROLE_URL" : "/api/v1/role/", 13 | "USER_URL" : "/api/v1/user/", 14 | "TENANT_URL" : "/api/v1/tenant/", 15 | "BLUEDATA_BASE_PATH" : "/srv/bluedata/", 16 | "RESULTS_BASE_PATH" : "/results/", 17 | "LOGIN_URL" : "/api/v1/login", 18 | "LOGOUT_URL" : "/api/v1/logout", 19 | "LICENSE_URL" : "/api/v1/license/", 20 | "FLAVOR_URL" : "/api/v1/flavor/", 21 | "BDS_USER" : "demo.user", 22 | "BDS_ADMIN" : "admin", 23 | "BDS_MEMBER_ROLE" : "Member", 24 | "BDS_TENANT_ADMIN_ROLE" : "Admin", 25 | "BDS_SITE_ADMIN_ROLE" : "Site Admin", 26 | "BDS_DEFAULT_TENANT" : "Demo Tenant", 27 | "BDS_ADMIN_TENANT" : "Site Admin", 28 | "BDS_PASSWORD" : "admin123", 29 | "BDS_SESSION_TAG" : "X-BDS-SESSION", 30 | "COMMON_LABEL_PARAM" : "?label", 31 | "COMMON_QUOTA_PARAM" : "?quota", 32 | "USER_PARAM" : "?user", 33 | "STATS_URL" : "/api/v1/stats/", 34 | "TENANT_PARAM" : "?tenant", 35 | "DCO_URL" : "/api/v1/dataconn/", 36 | "CATALOG_URL" : "/api/v1/catalog/", 37 | "RESOURCE_CONFIG_URL" : "/api/v1/install?install_reconfig", 38 | "HTTPFS_USER" : "httpfs", 39 | "BDS_TENANT_NAME" : "Demo Tenant", 40 | "BDS_TENANT_ADMIN" : "admin", 41 | "BDS_TENANT_ADMIN_PASSWORD" : "admin123" 42 | } 43 | 44 | def get_setting(key): 45 | env_name = "BD_SETTING_" + key 46 | env_val = os.environ.get(env_name) 47 | if env_val is None: 48 | return GLOBAL_SETTINGS[key] 49 | else: 50 | return env_val -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/spark_success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/spark_success.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/user_settings.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/user_settings.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/volume_authorization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/volume_authorization.png -------------------------------------------------------------------------------- /docs/README-MAPR-LDAP/volume_menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-MAPR-LDAP/volume_menu.png -------------------------------------------------------------------------------- /docs/README-POLICY-DEMO/add_policies.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-POLICY-DEMO/add_policies.png -------------------------------------------------------------------------------- /docs/README-POLICY-DEMO/add_policies2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-POLICY-DEMO/add_policies2.png -------------------------------------------------------------------------------- /docs/README-POLICY-DEMO/add_policy_to_cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-POLICY-DEMO/add_policy_to_cluster.png -------------------------------------------------------------------------------- /docs/README-POLICY-DEMO/add_policy_to_cluster2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-POLICY-DEMO/add_policy_to_cluster2.png -------------------------------------------------------------------------------- /docs/README-POLICY-DEMO/fork_repo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-POLICY-DEMO/fork_repo.png -------------------------------------------------------------------------------- /docs/README-POLICY-DEMO/view_argo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-POLICY-DEMO/view_argo.png -------------------------------------------------------------------------------- /docs/README-POLICY-DEMO/view_argo2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-POLICY-DEMO/view_argo2.png -------------------------------------------------------------------------------- /docs/README-RDP.md: -------------------------------------------------------------------------------- 1 | ### Overview 2 | 3 | You can enable a RDP server to be automatically configured for HPE Container Platform. 4 | 5 | This is done in the `etc/bluedata_infra.tfvars` file. 6 | 7 | ``` 8 | rdp_server_enabled = true 9 | rdp_server_operating_system = "LINUX" 10 | ``` 11 | 12 | This will cause the RDP server to be created the next time you run `./bin/terraform_apply.sh`. 13 | 14 | The RDP server by default will have a dynamically assigned public IP adddress. If you would like a static IP address (AWS EIP), configure the following option: 15 | 16 | ``` 17 | create_eip_rdp_linux_server = true 18 | ``` 19 | 20 | ### Getting the Credentials 21 | 22 | ``` 23 | ./generated/rdp_credentials.sh 24 | ``` 25 | 26 | ![rdp credentials](./README-RDP/rdp_credentials.gif) 27 | 28 | ## Using the RDP Server 29 | 30 | The RDP server is configure so that firefox autostarts: 31 | 32 | - with common links such as HCP admin interface and MCS admin interface 33 | - with HCP ssl certificate trusted 34 | 35 | The RDP server also has links on the desktop: 36 | 37 | - to retrieve the MCS admin password 38 | - for ssh sessions to the controller, gatway and active directory server 39 | - with txt file notes, containing installation instruction, list of worker IP addresses 40 | - SSH key afor adding workers and gateway 41 | 42 | ### Accessing with a web browser 43 | 44 | ![rdp browser](./README-RDP/rdp_browser.gif) 45 | 46 | ### Accessing with a RDP client 47 | 48 | You can connect to the RDP server using a RDP Client such as the one provided by Microsoft 49 | 50 | ### RDP SSH/SCP 51 | 52 | You can easily connect to the RDP server: 53 | 54 | - using ssh from your client machine using: `./generated/ssh_rdp_linux_server.sh`. 55 | - using sftp from your client machine using: `./generated/sftp_rdp_linux_server.sh`. 56 | 57 | ### Limitations 58 | 59 | - The RDP server is great for beginners but RDP can be slow. 60 | - If you find RDP too slow, consider using the [VPN](./README-VPN.md) instead 61 | - Using copy and paste on RDP over https is cumbersome. 62 | -------------------------------------------------------------------------------- /docs/README-RDP/rdp_browser.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-RDP/rdp_browser.gif -------------------------------------------------------------------------------- /docs/README-RDP/rdp_credentials.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-RDP/rdp_credentials.gif -------------------------------------------------------------------------------- /docs/README-SHARING-NON-TERRAFORM.md: -------------------------------------------------------------------------------- 1 | ### Overview 2 | 3 | You can share your environment with non-terraform users. 4 | 5 | ### Pre-requisites 6 | 7 | - You must have enabled the RDP server in your `etc/bluedata_infra.tfvars` file (`rdp_server_enabled=true`) 8 | - Don't forget to run `./bin/terraform_apply.bin` after updating variables 9 | 10 | ### Instructions 11 | 12 | - Non-terraform users will need to install an AWS client 13 | - An IAM user with very limited permissions has been created for the non-terraform user 14 | - The AWS access and secret key for the IAM user are in `generated/non_terraform_user_scripts.txt` 15 | - The script contains a command allowing users to **start/stop the EC2 instances** 16 | - The script contains a command allowing users to **update the NACL and Security Groups** to permit access from their IP address 17 | - The script contains a command allowing users to **retrieve the RDP host public IP address** 18 | 19 | ### Optional 20 | 21 | - You can provide users access to the environment using a VPN (L2TP+IPSEC) 22 | - You can add additional users to the VPN - see [here](https://github.com/bluedata-community/bluedata-demo-env-aws-terraform/blob/master/docs/README-VPN.md#add-vpn-users) 23 | - It is recommend to enable an EIP for the RDP server in your `etc/bluedata_infra.tf` file (`create_eip_rdp_linux_server=true`) so users don't have to keep updating the VPN server IP address 24 | - Don't forget to run `./bin/terraform_apply.bin` after updating variables 25 | -------------------------------------------------------------------------------- /docs/README-SSL-CERTIFICATES/install_docs_ssl_instruction.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-SSL-CERTIFICATES/install_docs_ssl_instruction.png -------------------------------------------------------------------------------- /docs/README-TAINT.MD: -------------------------------------------------------------------------------- 1 | If you need to remove a host but don't want to destroy everything (i.e. with terraform destroy): 2 | 3 | ``` 4 | $ terraform state list 5 | ... 6 | module.controller.aws_instance.controller 7 | ... 8 | ``` 9 | 10 | then 11 | 12 | ``` 13 | $ terraform taint module.controller.aws_instance.controller 14 | $ ./bin/terraform_apply.sh # or ./bin/terraform_apply_accept.sh 15 | ``` 16 | -------------------------------------------------------------------------------- /docs/README-TROUBLESHOOTING/mapr_datatap_browser_empty.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-TROUBLESHOOTING/mapr_datatap_browser_empty.png -------------------------------------------------------------------------------- /docs/README-TROUBLESHOOTING/mapr_datatap_browser_fixed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-TROUBLESHOOTING/mapr_datatap_browser_fixed.png -------------------------------------------------------------------------------- /docs/README-TROUBLESHOOTING/unsupported_source_instance_type.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-TROUBLESHOOTING/unsupported_source_instance_type.png -------------------------------------------------------------------------------- /docs/README-VPN.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | The VPN is based on: 4 | 5 | - https://github.com/halo/macosvpn 6 | - https://www.softether.org/ 7 | - https://hub.docker.com/r/siomiz/softethervpn/ (at the time of writing, this uses the latest RFM release of SoftEther) 8 | 9 | Note: 10 | 11 | - The VPN server is provided by [SoftEther](https://www.softether.org/) running on the RDP Linux server. 12 | - SoftEther is used because it does not have the 2 user limitation like OpenVPN. 13 | - The SoftEther VPN server is only accessible by whitelisted client IP addresses, e.g. those managed by terraform. 14 | - The Mac VPN client (described below) is setup NOT to route all traffic to it. 15 | 16 | ## Server setup 17 | 18 | - run `./bin/vpn_server_setup.sh` to create the vpn - you can run this multiple times (e.g. after adding new users). 19 | 20 | ![vpn server setup](./README-VPN/vpn_server_setup.gif) 21 | 22 | ## Mac OS VPN client management 23 | 24 | - run `sudo ./bin/vpn_mac_connect.sh` to create vpn and to connect to it. 25 | - run `sudo ./bin/vpn_mac_connect_with_keepalive` to create vpn and to connect to it, and periodically check it. 26 | 27 | 28 | ![vpn mac connect](./README-VPN/vpn_mac_connect.gif) 29 | 30 | - run `sudo ./generated/vpn_mac_delete.sh` to delete the vpn 31 | - run `sudo ./generated/vpn_mac_status.sh` to report on the vpn status 32 | 33 | 34 | ## Non Mac OS VPN client management 35 | 36 | - VPN is L2TP+IPSEC 37 | - Pre-shared key is stored in: `./generated/vpn_shared_key` 38 | - Users and password are stored in: `./generated/vpn_users` 39 | 40 | ## Add VPN Users 41 | 42 | - Multiple usernames and passwords may be set with the following pattern: `username:password;user2:pass2;user3:pass3` 43 | - Username and password are separated by `:`. Each pair of username:password should be separated by `;` 44 | - If not set a single user account with username ("user1") and a random weak password is created. 45 | - You can add users to the file: `./generated/vpn_users` 46 | - If the file `./generated/vpn_users` doesn't exist on running `./generated/vpn_server_setup.sh`, it will be created automatically 47 | - The pre-shared key is created automatically in the file `./generated/vpn_shared_key` 48 | 49 | -------------------------------------------------------------------------------- /docs/README-VPN/mac-setup01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-VPN/mac-setup01.png -------------------------------------------------------------------------------- /docs/README-VPN/mac-setup02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-VPN/mac-setup02.png -------------------------------------------------------------------------------- /docs/README-VPN/vpn_mac_connect.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-VPN/vpn_mac_connect.gif -------------------------------------------------------------------------------- /docs/README-VPN/vpn_server_setup.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README-VPN/vpn_server_setup.gif -------------------------------------------------------------------------------- /docs/README/create_from_scratch.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README/create_from_scratch.gif -------------------------------------------------------------------------------- /docs/README/project_init.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/docs/README/project_init.gif -------------------------------------------------------------------------------- /etc/bluedata_infra.tfvars_example: -------------------------------------------------------------------------------- 1 | ######################################################### 2 | # You will need to change the variables in this section # 3 | ######################################################### 4 | 5 | profile = "default" # You shouldn't need to change this 6 | region = "eu-west-3" # Change to your preferred region - ensure you have enough spare VPCs in your region. 7 | az = "eu-west-3a" # Change to your preferred AZ 8 | project_id = "<>-hcp" # Change your project name. 9 | # E.g. csnow-hcp-demo (with no spaces) 10 | # ** project_id maximum length is 18 chars ** 11 | 12 | # If you want to allow other clients to have full network access to this environment, add them here. 13 | # These IPs need a suffix such as /32 for specific IP address. Use https://ifconfig.me to find your IP 14 | 15 | additional_client_ip_list = [ 16 | # E.g. "1.1.1.1/32","2.2.2.2/32" 17 | ] 18 | 19 | 20 | worker_count = 4 21 | 22 | # Set to true to install HPE CP with embedded DF (mapr) 23 | embedded_df = true 24 | 25 | # When using private buckets (s3://) set the epic_dl_url to your s3 url else use a https url 26 | epic_dl_url = "s3://csnow-bins/hpe-cp-rhel-release-5.3-3031.bin" 27 | 28 | # set to true for private buckets (s3://...) otherwise set to false 29 | epid_dl_url_needs_presign = true 30 | 31 | 32 | ##################################################################### 33 | # For a full list of settings, see 'bluedata_infra.tfvars_template' # 34 | ##################################################################### 35 | 36 | # TIP: Parameters given to this script are passed to 'terraform apply' 37 | # Example: ./bin/terraform_apply.sh -var='ad_server_enabled=false' 38 | -------------------------------------------------------------------------------- /etc/bluedata_infra.tfvars_example_mlops: -------------------------------------------------------------------------------- 1 | Deprecated. Please use bluedata_infra.tfvars_example_picasso_mlops. -------------------------------------------------------------------------------- /etc/bluedata_infra.tfvars_example_picasso: -------------------------------------------------------------------------------- 1 | Deprecated. Please use bluedata_infra.tfvars_example_picasso_mlops. -------------------------------------------------------------------------------- /etc/bluedata_infra.tfvars_example_picasso_mlops: -------------------------------------------------------------------------------- 1 | ######################################################### 2 | # You will need to change the variables in this section # 3 | ######################################################### 4 | 5 | profile = "default" # You shouldn't need to change this 6 | region = "eu-west-2" # Change to your preferred region - ensure you have enough spare VPCs in your region. 7 | az = "eu-west-2a" # Change to your preferred AZ 8 | project_id = "<>-hcp" # Change your project name. 9 | # E.g. csnow-hcp-demo (with no spaces) 10 | # ** project_id maximum length is 18 chars ** 11 | 12 | # If you want to allow other clients to have full network access to this environment, add them here. 13 | # These IPs need a suffix such as /32 for specific IP address. Use https://ifconfig.me to find your IP 14 | 15 | additional_client_ip_list = [ 16 | # E.g. "1.1.1.1/32","2.2.2.2/32" 17 | ] 18 | 19 | ######################################################################## 20 | # You probably won't will need to change the variables in this section # 21 | ######################################################################## 22 | 23 | 24 | # for use with ./bin/create_new_environment_from_scratch_with_picasso_and_mlops.sh) 25 | 26 | worker_count = 11 27 | 28 | # Note: these instance types have been verified for eu-west-2 29 | wkr_instance_types = [ 30 | 31 | ### Start Picasso + MLOPS Cluster ### 32 | 33 | # picasso masters 34 | "r5a.xlarge", 35 | "r5a.xlarge", 36 | "r5a.xlarge", 37 | 38 | # picasso workers 39 | "m4.4xlarge", 40 | "m4.4xlarge", 41 | "m4.4xlarge", 42 | "m4.4xlarge", 43 | "m4.4xlarge", 44 | 45 | # MLOPS workers 46 | "m5a.4xlarge", 47 | "m5a.4xlarge", 48 | "m5a.4xlarge", 49 | 50 | ### End Picasso + MLOPS Cluster ### 51 | ] 52 | 53 | 54 | embedded_df = false 55 | 56 | epic_dl_url = "s3://csnow-bins/hpe-cp-rhel-release-5.3.2-3046.bin" 57 | epid_dl_url_needs_presign = true 58 | -------------------------------------------------------------------------------- /etc/bluedata_infra_eks.tfvars_template: -------------------------------------------------------------------------------- 1 | # You can specify an EKS cluster here 2 | 3 | create_eks_cluster = true 4 | eks_instance_type = "t3.2xlarge" 5 | eks_scaling_config_desired_size = 1 # must be >= 1 6 | eks_scaling_config_max_size = 1 # must be >= 1 7 | eks_scaling_config_min_size = 1 # must be >= 1 8 | eks_subnet2_cidr_block = "10.1.2.0/24" # you shouldn't need to change this 9 | eks_subnet3_cidr_block = "10.1.3.0/24" # you shouldn't need to change this 10 | eks_subnet2_az_suffix = "b" # you shouldn't need to change this 11 | eks_subnet3_az_suffix = "c" # you shouldn't need to change this 12 | 13 | # After terraform has created the EKS cluster, you can retrieve the endpoint details 14 | # for adding to the HPE CP UI using `terraform output`. The variables are named: 15 | # 16 | # eks-server-url 17 | # eks-ca-certificate 18 | # eks-bearer-token 19 | # 20 | # You can display these variable values with: ./scripts/eks_setup.sh 21 | # 22 | # Alternatively, you can automate importing your eks cluster with: ./scripts/eks_imports.sh -------------------------------------------------------------------------------- /etc/bluedata_infra_gpu_workers.tfvars_template: -------------------------------------------------------------------------------- 1 | # 2 | # NOTE: you have to manually install GPU drivers on GPU workers before adding to HPE CP 3 | # 4 | 5 | gpu_worker_count = 0 # How many hosts do you want for EPIC or K8S (with GPUs)? 6 | gpu_worker_instance_type = "g4dn.xlarge" # Specify the GPU worker host instance type 7 | gpu_worker_has_disk_for_df = false # Should the GPU worker hsot have a persistent disk for Data Fabric? 8 | -------------------------------------------------------------------------------- /etc/bluedata_infra_instance_types.tfvars: -------------------------------------------------------------------------------- 1 | ########################################################################## 2 | # You MAY need to change these types if not available in your AWS region # 3 | # You can heck at: https://aws.amazon.com/ec2/pricing/on-demand/ # 4 | # See docs/README-TROUBLESHOOTING.MD#error-launching-source-instance. # 5 | ########################################################################## 6 | 7 | gtw_instance_type = "m5a.xlarge" 8 | ctr_instance_type = "r5a.xlarge" 9 | wkr_instance_type = "m5.4xlarge" # or provide wkr_instance_types = [] 10 | nfs_instance_type = "t2.small" 11 | ad_instance_type = "t2.small" 12 | rdp_instance_type = "t3.large" 13 | mapr_instance_type = "m5.4xlarge" -------------------------------------------------------------------------------- /etc/bluedata_infra_mapr.tfvars_template: -------------------------------------------------------------------------------- 1 | 2 | mapr_cluster_1_count = 0 # How many hosts do you want for MAPR CLUSTER 1? (0 or 3) 3 | mapr_cluster_1_name = "demo1.mapr.com" 4 | mapr_cluster_2_count = 0 # How many hosts do you want for MAPR CLUSTER 2? (0 or 3) 5 | mapr_cluster_2_name = "demo2.mapr.com" -------------------------------------------------------------------------------- /etc/bluedata_infra_misc.tfvars: -------------------------------------------------------------------------------- 1 | ############################################################## 2 | ###### You probably won't need to change anything below ###### 3 | ############################################################## 4 | 5 | ssh_prv_key_path = "./generated/controller.prv_key" 6 | ssh_pub_key_path = "./generated/controller.pub_key" 7 | 8 | vpc_cidr_block = "10.1.0.0/16" 9 | subnet_cidr_block = "10.1.0.0/24" 10 | 11 | selinux_disabled = true 12 | ad_server_enabled = true # Do not disable this unless you are doing a manual installation 13 | 14 | rdp_server_enabled = true # Do not disable this unless you are doing a manual installation 15 | rdp_server_operating_system = "LINUX" 16 | create_eip_rdp_linux_server = false 17 | 18 | 19 | # you may need to set this if you are deploying your instances in a different region to the s3 bucket on your install binary 20 | epic_dl_url_presign_options = "--region eu-west-1" 21 | 22 | # epic installer options 23 | # epic_options = "--skipeula" # epic < 5.2 24 | epic_options = "--skipeula --default-password admin123" # epic >= 5.2 25 | 26 | create_eip_gateway = true 27 | create_eip_controller = false -------------------------------------------------------------------------------- /etc/hpecp_cli_logging.conf: -------------------------------------------------------------------------------- 1 | [loggers] 2 | keys=root,HPECP_CLI 3 | 4 | [handlers] 5 | keys=consoleHandler,fileHandler 6 | 7 | [formatters] 8 | keys=consoleFormatter,fileFormatter 9 | 10 | [logger_root] 11 | level=INFO 12 | handlers=consoleHandler,fileHandler 13 | 14 | [logger_HPECP_CLI] 15 | level=DEBUG 16 | handlers=fileHandler 17 | qualname=HPECP_CLI 18 | propagate=0 19 | 20 | [handler_consoleHandler] 21 | level=INFO 22 | class=StreamHandler 23 | formatter=consoleFormatter 24 | args=(os.devnull,) 25 | 26 | [handler_fileHandler] 27 | level=DEBUG 28 | class=FileHandler 29 | formatter=fileFormatter 30 | # hpecp_cli_log_file is set by terraform 31 | args=("${hpecp_cli_log_file}","a") 32 | 33 | [formatter_consoleFormatter] 34 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 35 | datefmt= 36 | 37 | [formatter_fileFormatter] 38 | format=%(asctime)s - %(name)s - %(levelname)s - %(message)s 39 | datefmt= 40 | -------------------------------------------------------------------------------- /etc/port_forwards.sh_template: -------------------------------------------------------------------------------- 1 | -L 443:localhost:443 2 | -L 8443:localhost:8443 -------------------------------------------------------------------------------- /etc/postcreate_core.sh_template: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | source "./scripts/variables.sh" 7 | source "./scripts/functions.sh" 8 | 9 | print_header "Installing HPECP CLI to local machine" 10 | export HPECP_CONFIG_FILE=generated/hpecp.conf 11 | export HPECP_LOG_CONFIG_FILE=${PWD}/generated/hpecp_cli_logging.conf 12 | pip3 uninstall -y hpecp || true # uninstall if exists 13 | pip3 install --user --upgrade --quiet hpecp 14 | 15 | HPECP_VERSION=$(hpecp config get --query 'objects.[bds_global_version]' --output text) 16 | echo "HPECP Version: ${HPECP_VERSION}" 17 | 18 | print_header "Configuring Global Active Directory in HPE CP" 19 | ./bin/experimental/01_configure_global_active_directory.sh 20 | 21 | print_header "Adding a Gateway to HPE CP" 22 | ./bin/experimental/02_gateway_add.sh 23 | 24 | if [[ "${INSTALL_WITH_SSL}" == "True" ]]; then 25 | print_header "Setting Gateway SSL" 26 | ./bin/experimental/set_gateway_ssl.sh 27 | fi 28 | 29 | print_header "Configuring Active Directory in Demo Tenant" 30 | ./bin/experimental/setup_demo_tenant_ad.sh 31 | 32 | print_header "Enable Virtual Nodes on Controller" 33 | ./bin/experimental/epic_enable_virtual_node_assignment.sh 34 | -------------------------------------------------------------------------------- /grep_log.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | grep '| /api/v2/k8scluster/1 | c1 |' 2021* | awk 'BEGIN { FS = "|" } ; { print $1, $6 }' 4 | 5 | -------------------------------------------------------------------------------- /hpecp_env_conf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ ${BASH_SOURCE[0]} != $0 ]]; then 4 | export HPECP_CONFIG_FILE=./generated/hpecp.conf 5 | source <(hpecp autocomplete bash) 6 | else 7 | echo Usage: . $0 8 | fi 9 | 10 | 11 | -------------------------------------------------------------------------------- /learn/COURSE_CONTENT_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | Some information about this course content. 4 | 5 | ## Objectives 6 | 7 | - Objective 1 8 | - Objective 2 9 | - Objective n 10 | 11 | ## Pre-requisites 12 | 13 | - Pre-requisite 1 14 | - Pre-requisite 2 15 | - Pre-requisite n 16 | 17 | ## Learning Content 18 | 19 | ### Step 1 20 | 21 | Information on step 1. 22 | 23 | ### Step 2 24 | 25 | Information on step 2 26 | 27 | ### Step 3 28 | 29 | Information on step 3 30 | 31 | ## Learning Summary 32 | 33 | Reflection on the this course content. 34 | 35 | -------------------------------------------------------------------------------- /learn/Data_Fabric_Administration/tiering.md: -------------------------------------------------------------------------------- 1 | Follow the instructions [here](https://github.com/bluedata-community/bluedata-demo-env-aws-terraform/blob/master/docs/README-DATA-TIERING.md) 2 | -------------------------------------------------------------------------------- /learn/HCP_Administration/add_a_gateway_host.md: -------------------------------------------------------------------------------- 1 | coming soon ... 2 | -------------------------------------------------------------------------------- /learn/HCP_Administration/configure_active_directory.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /learn/HCP_Administration/manual_install_deploying_the_platform.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | In this section you learn how to manually deploy the HPE Container Platform. 4 | 5 | ## Pre-requisites 6 | 7 | - Complete: [Manual Install Overview](../README.md#product-overview) 8 | - Complete: [Installation Overview](./HCP_Administration/manual_install_overview.md) 9 | - Complete: [Planning the Deployment](./HCP_Administration/manual_install_planning_the_deployment.md) 10 | - Complete: [System Requirements](./HCP_Administration/manual_install_system_requirements.md) 11 | 12 | ## Learning Content 13 | 14 | Read the [documentation](http://docs.bluedata.com/home) sections: 15 | 16 | - Deploying the platform 17 | - Support and troubleshooting 18 | 19 | See the image below: 20 | 21 | 22 | 23 | ## Learning Summary 24 | 25 | In this section you learned how to manually deploy the HPE Container Platform. 26 | 27 | -------------------------------------------------------------------------------- /learn/HCP_Administration/manual_install_deploying_the_platform/docs_menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/learn/HCP_Administration/manual_install_deploying_the_platform/docs_menu.png -------------------------------------------------------------------------------- /learn/HCP_Administration/manual_install_overview.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | In this section, you will learn the general workflow for installing HPE Container Platform. 4 | 5 | ## Pre-requisites 6 | 7 | - [Product Overview](../README.md#product-overview) 8 | - [Foundation Technical Concepts](../README.md#foundation-technical-concepts) 9 | 10 | ## Learning Content 11 | 12 | This content is not based on an older version of HPE Container Platform but it is still very relevant 13 | 14 | - Watch: [BlueData Virtual Training: Part 1 - BlueData Installation](https://www.youtube.com/watch?v=aBsyx2Nvk6A) [40 mins] 15 | - Watch: [BlueData Virtual Training: Part 2 - Site Configuration](https://www.youtube.com/watch?v=TND1bv8e3HA) [25 mins] 16 | - Watch: [BlueData Virtual Training: Part 3 - Tenant Configuration](https://www.youtube.com/watch?v=_MnoepQyhfU) [20 mins] 17 | - Watch: [BlueData Virtual Training: Part 4 - Deploying Applications using BlueData EPIC](https://www.youtube.com/watch?v=nyh-7YF8FMA) [25 mins] 18 | - Watch: [BlueData Virtual Training: Part 5 - Upgrading the BlueData EPIC platform](https://www.youtube.com/watch?v=eZfPxn938U0) [5 mins] 19 | 20 | ## Learning Summary 21 | 22 | In this section, you have been walked through the general workflow for installing HPE Container Platform. 23 | 24 | -------------------------------------------------------------------------------- /learn/HCP_Administration/manual_install_planning_the_deployment.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | In this section you will learn how to plan the HPE Container Platform deployment 4 | 5 | ## Pre-requisites 6 | 7 | - Watch: [Manual Install Overview](../README.md#product-overview) 8 | 9 | ## Learning Content 10 | 11 | - Read: [Planning Overview](http://docs.bluedata.com/50_planning-overview) 12 | - Read: [Networking Considerations](http://docs.bluedata.com/50_networking-considerations) 13 | - Read: [EPIC Network Planning](http://docs.bluedata.com/50_epic-network-planning) 14 | - Read: [Storage](http://docs.bluedata.com/50_storage) 15 | - Read: [Platform Resource Planning](http://docs.bluedata.com/50_platform-resource-planning) 16 | 17 | ## Learning Summary 18 | 19 | In this section, you have learnt how to plan the deployment of HPE Container Platform. 20 | 21 | -------------------------------------------------------------------------------- /learn/HCP_Administration/manual_install_system_requirements.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | It is very important before installing HPE Container Platform that you understand the System requirements. 4 | 5 | ## Pre-requisites 6 | 7 | - Complete: [Manual Install Overview](./manual_install_overview.md) 8 | 9 | ## Learning Content 10 | 11 | General System Requirements [documentation](http://docs.bluedata.com/50_system-requirements-overview) 12 | 13 | 22 | 23 | Kubernetes System Requirements [documentation](http://docs.bluedata.com/50_system-requirements-overview) 24 | 25 | 32 | 33 | ## Learning Summary 34 | 35 | In this section, you have learnt the system requirements for installing HPE Container Platform. 36 | 37 | -------------------------------------------------------------------------------- /learn/HCP_Administration/provision_demo_environment.md: -------------------------------------------------------------------------------- 1 | coming soon ... 2 | --- 3 | 4 | ## Overview 5 | 6 | Some information about this course content. 7 | 8 | ## Objectives 9 | 10 | - Objective 1 11 | - Objective 2 12 | - Objective n 13 | 14 | ## Pre-requisites 15 | 16 | - Pre-requisite 1 17 | - Pre-requisite 2 18 | - Pre-requisite n 19 | 20 | ## Learning Content 21 | 22 | ### Step 1 23 | 24 | Information on step 1. 25 | 26 | ### Step 2 27 | 28 | Information on step 2 29 | 30 | ### Step 3 31 | 32 | Information on step 3 33 | 34 | ## Learning Summary 35 | 36 | Reflection on the this course content. 37 | 38 | -------------------------------------------------------------------------------- /learn/HCP_Administration/shutdown_demo_environment.md: -------------------------------------------------------------------------------- 1 | coming soon ... 2 | -------------------------------------------------------------------------------- /learn/HCP_Administration/startup_demo_environment.md: -------------------------------------------------------------------------------- 1 | coming soon ... 2 | -------------------------------------------------------------------------------- /learn/HCP_Administration/status_demo_environment.md: -------------------------------------------------------------------------------- 1 | coming soon ... 2 | -------------------------------------------------------------------------------- /learn/HCP_Administration/teardown_demo_environment.md: -------------------------------------------------------------------------------- 1 | coming soon ... 2 | -------------------------------------------------------------------------------- /learn/HCP_Advanced_Concepts/universal_concepts.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | In this section you learn universal concepts about the HPE Container Platform. 4 | 5 | Note that you have already covered some of this content in the section "HCP Foundation Concepts". 6 | 7 | ## Pre-requisites 8 | 9 | - Complete: [Foundation Concepts](../README.md#foundation-technical-concepts) 10 | - Complete: [Manually Install HCP](..//README.md#manually-install-hcp) 11 | 12 | ## Learning Content 13 | 14 | Read the [documentation](http://docs.bluedata.com/home) sections: 15 | 16 | - Universal Concepts 17 | 18 | See the image below: 19 | 20 | 21 | 22 | 23 | ## Learning Summary 24 | 25 | In this section you learned universal concepts about the HPE Container Platform 26 | -------------------------------------------------------------------------------- /learn/HCP_Advanced_Concepts/universal_concepts/docs_menu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hpe-container-platform-community/hcp-demo-env-aws-terraform/7b8ab9ed0f5abccfd588659e0861634a69f4e354/learn/HCP_Advanced_Concepts/universal_concepts/docs_menu.png -------------------------------------------------------------------------------- /learn/HCP_Foundation_Concepts/controller_gateway_and_worker_hosts.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | HPE Container Platform has four main types of hosts - understanding the different types of hosts is core foundation knowledge. 4 | 5 | ## Objectives 6 | 7 | - Understand the different types of HPE Container Platform hosts. 8 | 9 | ## Pre-requisites 10 | 11 | - Bookmark http://docs.bluedata.com! 12 | 13 | ## Learning Content 14 | 15 | - READ: http://docs.bluedata.com/50_controller-gateway-and-worker-hosts 16 | 17 | ## Learning Summary 18 | 19 | The HPE Container Platform docs site is a gold mine of information, it is highly recommended that you get used to using this site! 20 | 21 | ## Test your understanding 22 | 23 | - Which type of host is where you initially install HPE Container Platform? 24 | - What type of host manages the EPIC workers? 25 | - What type of host manages the Kubernetes workers? 26 | - Do you need a Gateway host? 27 | - Can you run Kubernetes clusters on EPIC hosts? 28 | - Can you run EPIC clusters on Kubernetes hosts? 29 | -------------------------------------------------------------------------------- /learn/HCP_Foundation_Concepts/datataps.md: -------------------------------------------------------------------------------- 1 | coming soon ... 2 | 3 | --- 4 | 5 | ## Overview 6 | 7 | DataTaps ... 8 | 9 | ## Objectives 10 | 11 | - ... 12 | 13 | ## Pre-requisites 14 | 15 | - Bookmark http://docs.bluedata.com! 16 | 17 | ## Learning Content 18 | 19 | - READ: http://docs.bluedata.com/50_about-datataps 20 | 21 | ## Learning Summary 22 | 23 | The HPE Container Platform docs site is a gold mine of information, it is highly recommended that you get used to using this site! 24 | 25 | ## Test your understanding 26 | 27 | - ... 28 | -------------------------------------------------------------------------------- /learn/HCP_Foundation_Concepts/software-components.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | HPE Container Platform has a diffent software components that you need to understand 4 | 5 | ## Objectives 6 | 7 | - Understand the different types of HPE Container Platform hosts. 8 | 9 | ## Pre-requisites 10 | 11 | - Bookmark https://docs.containerplatform.hpe.com/home! 12 | 13 | ## Learning Content 14 | 15 | - READ: https://docs.containerplatform.hpe.com/53/reference/universal-concepts/Software_Components.html 16 | 17 | ## Learning Summary 18 | 19 | The HPE Container Platform docs site is a gold mine of information, it is highly recommended that you get used to using this site! 20 | 21 | ## Test your understanding 22 | 23 | - What are the three different types of roles does the HPE Container Platform have? 24 | - What is the temporary file system called in this document? 25 | 26 | ## Task 27 | 28 | - Draw the diagram and describe the different components to a colleage. 29 | -------------------------------------------------------------------------------- /learn/Product_Overview/hpe_container_strategy_market_overview.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | This is the first in a series of videos highlighting the market overview behind the HPE Container Platform strategy. 4 | 5 | ## Objectives 6 | 7 | - Understand the HPE Container Platform strategy market overview. 8 | 9 | ## Pre-requisites 10 | 11 | - None. 12 | 13 | ## Learning Content 14 | 15 | - WATCH: https://www.hpe.com/us/en/solutions/container-platform.brightcove.440cd5a2-c9b5-4b15-ad78-9f5adc672fb7.html.html 16 | 17 | ## Learning Summary 18 | 19 | - TODO 20 | 21 | ## Test your understanding 22 | 23 | - TODO 24 | 25 | ## Task 26 | 27 | - TODO 28 | -------------------------------------------------------------------------------- /learn/hpe_container_strategy_market_overview.md: -------------------------------------------------------------------------------- 1 | ## Overview 2 | 3 | - The objective of this session is gain an understanding of the HPE Container Platform Strategy. 4 | 5 | ## Pre-requisites 6 | 7 | - None 8 | 9 | ## Learning Content 10 | 11 | Watch the following videos: 12 | 13 | - [HPE Container Strategy - Transformation challenges](https://www.hpe.com/us/en/solutions/container-platform.brightcove.c94f2cc7-92b7-44b1-aae1-8ba2992f0578.html.html) 14 | - [HPE Container Strategy - HPE solution](https://www.hpe.com/us/en/solutions/container-platform.brightcove.20b7d874-85ca-4c3f-8765-9a6c6492bbdb.html.html) 15 | - [HPE Container Platform - How it Works](https://www.hpe.com/us/en/solutions/container-platform.brightcove.df01eb6c-2faf-4eb6-b637-82d0080fb1b9.html) 16 | - [HPE Data Fabric](https://www.hpe.com/us/en/solutions/container-platform.brightcove.2c86bcce-641c-4df8-99b9-7f43afb4c1f1.html.html) 17 | - [Operationalizing the ML Lifecycle](https://www.hpe.com/us/en/solutions/container-platform.brightcove.b7e8ce3b-f18e-49e7-bd3c-d55b11c4ca9a.html) 18 | 19 | ## Test your knowledge 20 | 21 | Coming soon ... 22 | 23 | ## Learning Summary 24 | 25 | Coming soon ... 26 | 27 | -------------------------------------------------------------------------------- /modules/module-ad-server/files/ldif_modify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | ldapmodify -H ldap://localhost:389 \ 6 | -D 'cn=Administrator,CN=Users,DC=samdom,DC=example,DC=com' \ 7 | -f /home/centos/ad_set_posix_classes.ldif \ 8 | -w '5ambaPwd@' -c 2>&1 >ad_set_posix_classes.log -------------------------------------------------------------------------------- /modules/module-ad-server/files/run_ad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | sudo docker pull rsippl/samba-ad-dc > /dev/null 6 | 7 | sudo docker run --privileged --restart=unless-stopped \ 8 | -p 53:53 -p 53:53/udp -p 88:88 -p 88:88/udp -p 135:135 -p 137-138:137-138/udp -p 139:139 -p 389:389 \ 9 | -p 389:389/udp -p 445:445 -p 464:464 -p 464:464/udp -p 636:636 -p 1024-1044:1024-1044 -p 3268-3269:3268-3269 \ 10 | -e "SAMBA_DOMAIN=samdom" \ 11 | -e "SAMBA_REALM=samdom.example.com" \ 12 | -e "SAMBA_ADMIN_PASSWORD=5ambaPwd@" \ 13 | -e "ROOT_PASSWORD=R00tPwd@" \ 14 | -e "LDAP_ALLOW_INSECURE=true" \ 15 | -e "SAMBA_HOST_IP=$(hostname --all-ip-addresses |cut -f 1 -d' ')" \ 16 | -v /home/centos/ad_user_setup.sh:/usr/local/bin/custom.sh \ 17 | --name samdom \ 18 | --dns 127.0.0.1 \ 19 | -d \ 20 | --entrypoint "/bin/bash" \ 21 | rsippl/samba-ad-dc \ 22 | -c "chmod +x /usr/local/bin/custom.sh &&. /init.sh app:start" -------------------------------------------------------------------------------- /modules/module-ad-server/main.tf: -------------------------------------------------------------------------------- 1 | /******************* Instance: AD Server ********************/ 2 | 3 | resource "aws_instance" "ad_server" { 4 | ami = var.ad_ec2_ami 5 | instance_type = var.ad_instance_type 6 | key_name = var.key_name 7 | vpc_security_group_ids = var.vpc_security_group_ids 8 | subnet_id = var.subnet_id 9 | 10 | count = var.ad_server_enabled == true ? 1 : 0 11 | 12 | root_block_device { 13 | volume_type = "gp2" 14 | volume_size = 400 15 | tags = { 16 | Name = "${var.project_id}-ad-server-root-ebs" 17 | Project = var.project_id 18 | user = var.user 19 | deployment_uuid = var.deployment_uuid 20 | } 21 | } 22 | 23 | tags = { 24 | Name = "${var.project_id}-instance-ad-server" 25 | Project = var.project_id 26 | user = var.user 27 | deployment_uuid = var.deployment_uuid 28 | } 29 | 30 | provisioner "file" { 31 | connection { 32 | type = "ssh" 33 | user = "centos" 34 | host = aws_instance.ad_server[0].public_ip 35 | private_key = file(var.ssh_prv_key_path) 36 | agent = false 37 | } 38 | source = "${path.module}/files/" 39 | destination = "/home/centos/" 40 | } 41 | 42 | provisioner "remote-exec" { 43 | connection { 44 | type = "ssh" 45 | user = "centos" 46 | host = aws_instance.ad_server[0].public_ip 47 | private_key = file(var.ssh_prv_key_path) 48 | agent = false 49 | } 50 | inline = [ 51 | < 0 ? aws_instance.ad_server[0].private_ip : null 3 | } 4 | 5 | output "public_ip" { 6 | value = var.ad_server_enabled && length(aws_instance.ad_server) > 0 ? aws_instance.ad_server[0].public_ip : null 7 | } 8 | 9 | output "ssh_command" { 10 | value = var.ad_server_enabled && length(aws_instance.ad_server) > 0 ? "ssh -o StrictHostKeyChecking=no -i \"${var.ssh_prv_key_path}\" centos@${aws_instance.ad_server[0].public_ip}" : "ad server not enabled" 11 | } 12 | 13 | output "instance_id" { 14 | value = var.ad_server_enabled && length(aws_instance.ad_server) > 0 ? aws_instance.ad_server[0].id : null 15 | } 16 | 17 | output "instance_arn" { 18 | value = var.ad_server_enabled && length(aws_instance.ad_server) > 0 ? aws_instance.ad_server[0].arn : null 19 | } -------------------------------------------------------------------------------- /modules/module-ad-server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | } 4 | variable "user" { 5 | type = string 6 | } 7 | variable "ssh_prv_key_path" { 8 | type = string 9 | } 10 | variable "ad_ec2_ami" { 11 | type = string 12 | } 13 | variable "ad_instance_type" { 14 | type = string 15 | } 16 | variable "ad_server_enabled" { 17 | type = bool 18 | } 19 | variable "key_name" { 20 | type = string 21 | } 22 | variable "vpc_security_group_ids" { 23 | type = list 24 | } 25 | variable "subnet_id" { 26 | type = string 27 | } 28 | variable "deployment_uuid" { 29 | type = string 30 | } 31 | variable "ad_admin_group" { 32 | type = string 33 | } 34 | variable "ad_member_group" { 35 | type = string 36 | } 37 | -------------------------------------------------------------------------------- /modules/module-ad-server/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | -------------------------------------------------------------------------------- /modules/module-controller/outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | output "id" { 3 | value = aws_instance.controller.id 4 | } 5 | 6 | output "arn" { 7 | value = aws_instance.controller.arn 8 | } 9 | 10 | output "public_ip" { 11 | value = var.create_eip ? aws_eip.controller[0].public_ip : aws_instance.controller.public_ip 12 | } 13 | 14 | output "private_ip" { 15 | value = aws_instance.controller.private_ip 16 | } 17 | 18 | output "public_dns" { 19 | value = var.create_eip ? aws_eip.controller[0].public_dns : aws_instance.controller.public_dns 20 | } 21 | 22 | output "private_dns" { 23 | value = aws_instance.controller.private_dns 24 | } -------------------------------------------------------------------------------- /modules/module-controller/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | } 4 | variable "user" { 5 | type = string 6 | } 7 | variable "aws_zone_id" { 8 | type = string 9 | } 10 | variable "az" { 11 | type = string 12 | } 13 | variable "client_cidr_block" { 14 | type = string 15 | } 16 | variable "additional_client_ip_list" { 17 | type = list 18 | } 19 | variable "vpc_cidr_block" { 20 | type = string 21 | } 22 | variable "subnet_cidr_block" { 23 | type = string 24 | } 25 | variable "key_name" { 26 | type = string 27 | } 28 | variable "security_group_ids" { 29 | type = list 30 | } 31 | variable "subnet_id" { 32 | type = string 33 | } 34 | variable "ec2_ami" { 35 | type = string 36 | } 37 | variable "ctr_instance_type" { 38 | type = string 39 | } 40 | variable "ssh_prv_key_path" { 41 | type = string 42 | } 43 | variable "create_eip" { 44 | type = bool 45 | } 46 | variable "deployment_uuid" { 47 | type = string 48 | } 49 | -------------------------------------------------------------------------------- /modules/module-controller/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | -------------------------------------------------------------------------------- /modules/module-gateway/main.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "aws_eip" "gateway" { 3 | vpc = true 4 | count = var.create_eip ? 1 : 0 5 | tags = { 6 | Name = "${var.project_id}-gateway" 7 | Project = var.project_id 8 | user = var.user 9 | deployment_uuid = var.deployment_uuid 10 | } 11 | } 12 | 13 | // EIP associations 14 | 15 | resource "aws_eip_association" "eip_assoc_gateway" { 16 | count = var.create_eip ? 1 : 0 17 | instance_id = aws_instance.gateway.id 18 | allocation_id = aws_eip.gateway[0].id 19 | } 20 | 21 | // Instance 22 | 23 | resource "aws_instance" "gateway" { 24 | ami = var.ec2_ami 25 | instance_type = var.gtw_instance_type 26 | key_name = var.key_name 27 | vpc_security_group_ids = var.security_group_ids 28 | subnet_id = var.subnet_id 29 | 30 | root_block_device { 31 | volume_type = "gp2" 32 | volume_size = 400 33 | tags = { 34 | Name = "${var.project_id}-gateway-ebs" 35 | Project = var.project_id 36 | user = var.user 37 | deployment_uuid = var.deployment_uuid 38 | } 39 | } 40 | 41 | tags = { 42 | Name = "${var.project_id}-instance-gateway" 43 | Project = var.project_id 44 | user = var.user 45 | deployment_uuid = var.deployment_uuid 46 | } 47 | 48 | provisioner "remote-exec" { 49 | connection { 50 | type = "ssh" 51 | user = "centos" 52 | host = aws_instance.gateway.public_ip 53 | private_key = file(var.ssh_prv_key_path) 54 | agent = false 55 | } 56 | inline = [ 57 | "sudo yum update -y -q" 58 | ] 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /modules/module-gateway/outputs.tf: -------------------------------------------------------------------------------- 1 | output "id" { 2 | value = aws_instance.gateway.id 3 | } 4 | output "arn" { 5 | value = aws_instance.gateway.arn 6 | } 7 | output "private_ip" { 8 | value = aws_instance.gateway.private_ip 9 | } 10 | output "private_dns" { 11 | value = aws_instance.gateway.private_dns 12 | } 13 | output "public_ip" { 14 | value = var.create_eip ? aws_eip.gateway[0].public_ip : aws_instance.gateway.public_ip 15 | } 16 | output "public_dns" { 17 | value = var.create_eip ? aws_eip.gateway[0].public_dns : aws_instance.gateway.public_dns 18 | } -------------------------------------------------------------------------------- /modules/module-gateway/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | } 4 | variable "user" { 5 | type = string 6 | } 7 | variable "aws_zone_id" { 8 | type = string 9 | } 10 | variable "az" { 11 | type = string 12 | } 13 | variable "client_cidr_block" { 14 | type = string 15 | } 16 | variable "additional_client_ip_list" { 17 | type = list 18 | } 19 | variable "vpc_cidr_block" { 20 | type = string 21 | } 22 | variable "subnet_cidr_block" { 23 | type = string 24 | } 25 | variable "key_name" { 26 | type = string 27 | } 28 | variable "security_group_ids" { 29 | type = list 30 | } 31 | variable "subnet_id" { 32 | type = string 33 | } 34 | variable "ec2_ami" { 35 | type = string 36 | } 37 | variable "gtw_instance_type" { 38 | type = string 39 | } 40 | variable "ssh_prv_key_path" { 41 | type = string 42 | } 43 | variable "create_eip" { 44 | type = bool 45 | } 46 | variable "deployment_uuid" { 47 | type = string 48 | } 49 | -------------------------------------------------------------------------------- /modules/module-gateway/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | -------------------------------------------------------------------------------- /modules/module-network/main-dns-knative.tf: -------------------------------------------------------------------------------- 1 | // retrieve the ingress gateway host IPs with 2 | /* 3 | 4 | kubectl get po -l istio=ingressgateway -n istio-system \ 5 | -o jsonpath='{.items[*].status.hostIP}' 6 | 7 | // e.g. 10.1.0.193 10.1.0.132 10.1.0.174 8 | 9 | */ 10 | 11 | resource "aws_route53_record" "knative" { 12 | zone_id = aws_route53_zone.main.zone_id 13 | name = "*.knative.${var.dns_zone_name}" 14 | type = "A" 15 | ttl = "300" 16 | records = [ "10.1.0.193","10.1.0.132","10.1.0.174" ] 17 | } 18 | 19 | 20 | // now patch knative with the domain 21 | /* 22 | 23 | kubectl patch configmap/config-domain \ 24 | --namespace knative-serving \ 25 | --type merge \ 26 | --patch '{"data":{"knative.samdom.example.com":""}}' 27 | 28 | 29 | */ -------------------------------------------------------------------------------- /modules/module-network/main-dns.tf: -------------------------------------------------------------------------------- 1 | 2 | // Prototype for ROUTE 53 internal IP addresses 3 | 4 | 5 | resource "aws_vpc_dhcp_options" "mydhcp" { 6 | domain_name = "${var.aws_region}.compute.internal" 7 | domain_name_servers = ["AmazonProvidedDNS"] 8 | tags = { 9 | Name = "${var.project_id}-vpc-dhcp-options" 10 | Project = var.project_id 11 | user = var.user 12 | deployment_uuid = var.deployment_uuid 13 | } 14 | } 15 | resource "aws_vpc_dhcp_options_association" "dns_resolver" { 16 | vpc_id = aws_vpc.main.id 17 | dhcp_options_id = aws_vpc_dhcp_options.mydhcp.id 18 | } 19 | 20 | // DNS PART ZONE AND RECORDS 21 | resource "aws_route53_zone" "main" { 22 | name = var.dns_zone_name 23 | vpc { 24 | vpc_id = aws_vpc.main.id 25 | } 26 | comment = var.project_id 27 | } 28 | 29 | resource "aws_route53_record" "controller" { 30 | zone_id = aws_route53_zone.main.zone_id 31 | name = "controller.${var.dns_zone_name}" 32 | type = "A" 33 | ttl = "300" 34 | records = [ var.controller_private_ip ] 35 | } 36 | 37 | resource "aws_route53_record" "ad" { 38 | count = var.ad_server_enabled ? 1 : 0 39 | zone_id = aws_route53_zone.main.zone_id 40 | name = "ad.${var.dns_zone_name}" 41 | type = "A" 42 | ttl = "300" 43 | records = [ var.ad_private_ip ] 44 | } 45 | 46 | resource "aws_route53_record" "rdp" { 47 | count = var.rdp_linux_server_enabled ? 1 : 0 48 | zone_id = aws_route53_zone.main.zone_id 49 | name = "rdp.${var.dns_zone_name}" 50 | type = "A" 51 | ttl = "300" 52 | records = [ var.rdp_private_ip ] 53 | } 54 | 55 | resource "aws_route53_record" "gateway" { 56 | zone_id = aws_route53_zone.main.zone_id 57 | name = "gateway.${var.dns_zone_name}" 58 | type = "A" 59 | ttl = "300" 60 | records = [ var.gateway_private_ip ] 61 | } 62 | 63 | resource "aws_route53_record" "workers" { 64 | count = length(var.workers_private_ip) 65 | zone_id = aws_route53_zone.main.zone_id 66 | name = "worker.${count.index}.${var.dns_zone_name}" 67 | type = "A" 68 | ttl = "300" 69 | records = [ var.workers_private_ip[count.index] ] 70 | } 71 | -------------------------------------------------------------------------------- /modules/module-network/main.tf: -------------------------------------------------------------------------------- 1 | /******************* VPC ********************/ 2 | 3 | resource "aws_vpc" "main" { 4 | cidr_block = var.vpc_cidr_block 5 | enable_dns_hostnames = true 6 | enable_dns_support = true 7 | # assign_generated_ipv6_cidr_block = true 8 | 9 | tags = { 10 | Name = "${var.project_id}-vpc" 11 | Project = var.project_id 12 | user = var.user 13 | deployment_uuid = var.deployment_uuid 14 | } 15 | } 16 | 17 | resource "aws_subnet" "main" { 18 | vpc_id = aws_vpc.main.id 19 | cidr_block = var.subnet_cidr_block 20 | availability_zone_id = var.aws_zone_id 21 | map_public_ip_on_launch = true 22 | 23 | # ipv6_cidr_block = cidrsubnet(aws_vpc.main.ipv6_cidr_block, 8, 1) 24 | # assign_ipv6_address_on_creation = true 25 | 26 | tags = { 27 | Name = "${var.project_id}-subnet" 28 | Project = var.project_id 29 | user = var.user 30 | deployment_uuid = var.deployment_uuid 31 | } 32 | } 33 | 34 | /******************* Route Table ********************/ 35 | 36 | resource "aws_route_table" "main" { 37 | vpc_id = aws_vpc.main.id 38 | 39 | tags = { 40 | Name = "${var.project_id}-main-route-table" 41 | Project = var.project_id 42 | user = var.user 43 | deployment_uuid = var.deployment_uuid 44 | } 45 | } 46 | 47 | resource "aws_route" "main" { 48 | route_table_id = aws_route_table.main.id 49 | destination_cidr_block = "0.0.0.0/0" 50 | gateway_id = aws_internet_gateway.main.id 51 | } 52 | 53 | resource "aws_route" "softether" { 54 | count = var.rdp_linux_server_enabled ? 1 : 0 55 | route_table_id = aws_route_table.main.id 56 | destination_cidr_block = var.softether_cidr_block 57 | network_interface_id = var.rdp_network_interface_id 58 | } 59 | 60 | resource "aws_route_table_association" "main" { 61 | subnet_id = aws_subnet.main.id 62 | route_table_id = aws_route_table.main.id 63 | } 64 | 65 | /******************* Internet Gateway ********************/ 66 | 67 | resource "aws_internet_gateway" "main" { 68 | vpc_id = aws_vpc.main.id 69 | 70 | tags = { 71 | Name = "${var.project_id}-internet-gateway" 72 | Project = var.project_id 73 | user = var.user 74 | deployment_uuid = var.deployment_uuid 75 | } 76 | } -------------------------------------------------------------------------------- /modules/module-network/outputs.tf: -------------------------------------------------------------------------------- 1 | output "security_group_main_id" { 2 | value = aws_security_group.main.id 3 | } 4 | 5 | output "security_group_allow_ssh_from_world_id" { 6 | value = aws_security_group.allow_ssh_from_world.id 7 | } 8 | 9 | output "security_group_allow_rdp_from_world_id" { 10 | value = aws_security_group.allow_rdp_from_world.id 11 | } 12 | 13 | output "security_group_allow_all_from_client_ip" { 14 | value = aws_security_group.allow_all_from_specified_ips.id 15 | } 16 | 17 | output "security_group_allow_all_from_client_ip_arn" { 18 | value = aws_security_group.allow_all_from_specified_ips.arn 19 | } 20 | 21 | output "vpc_main_id" { 22 | value = aws_vpc.main.id 23 | } 24 | 25 | output "vpc_main_arn" { 26 | value = aws_vpc.main.arn 27 | } 28 | 29 | output "route_main_id" { 30 | value = aws_route_table.main.id 31 | } 32 | 33 | output "subnet_main_id" { 34 | value = aws_subnet.main.id 35 | } 36 | 37 | output "network_acl_id" { 38 | value = aws_network_acl.main.id 39 | } 40 | 41 | output "network_acl_arn" { 42 | value = aws_network_acl.main.arn 43 | } 44 | 45 | output "sg_allow_all_from_specified_ips" { 46 | value = aws_security_group.allow_all_from_specified_ips.id 47 | } -------------------------------------------------------------------------------- /modules/module-network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | } 4 | variable "user" { 5 | type = string 6 | } 7 | variable "aws_zone_id" { 8 | type = string 9 | } 10 | variable "aws_region" { 11 | type = string 12 | } 13 | variable "client_cidr_block" { 14 | type = string 15 | } 16 | variable "additional_client_ip_list" { 17 | type = list 18 | } 19 | variable "vpc_cidr_block" { 20 | type = string 21 | } 22 | variable "subnet_cidr_block" { 23 | type = string 24 | } 25 | variable "dns_zone_name" { 26 | type = string 27 | } 28 | variable "controller_private_ip" { 29 | type = string 30 | } 31 | variable "gateway_private_ip" { 32 | type = string 33 | } 34 | variable "workers_private_ip" { 35 | type = list 36 | } 37 | variable "ad_server_enabled" { 38 | type = bool 39 | } 40 | variable "ad_private_ip" { 41 | type = string 42 | } 43 | variable "rdp_network_interface_id" { 44 | type = string 45 | } 46 | variable "rdp_private_ip" { 47 | type = string 48 | } 49 | variable "rdp_linux_server_enabled" { 50 | type = string 51 | } 52 | variable "softether_cidr_block" { 53 | type = string 54 | } 55 | variable "deployment_uuid" { 56 | type = string 57 | } -------------------------------------------------------------------------------- /modules/module-network/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | -------------------------------------------------------------------------------- /modules/module-nfs-server/main.tf: -------------------------------------------------------------------------------- 1 | /******************* Instance: NFS Server (e.g. for ML OPS) ********************/ 2 | 3 | resource "aws_instance" "nfs_server" { 4 | ami = var.nfs_ec2_ami 5 | instance_type = var.nfs_instance_type 6 | key_name = var.key_name 7 | vpc_security_group_ids = var.vpc_security_group_ids 8 | subnet_id = var.subnet_id 9 | 10 | count = var.nfs_server_enabled == true ? 1 : 0 11 | 12 | root_block_device { 13 | volume_type = "gp2" 14 | volume_size = 400 15 | tags = { 16 | Name = "${var.project_id}-nfs-server-root-ebs" 17 | Project = var.project_id 18 | user = var.user 19 | deployment_uuid = var.deployment_uuid 20 | } 21 | } 22 | 23 | tags = { 24 | Name = "${var.project_id}-instance-nfs-server" 25 | Project = var.project_id 26 | user = var.user 27 | deployment_uuid = var.deployment_uuid 28 | } 29 | 30 | provisioner "remote-exec" { 31 | connection { 32 | type = "ssh" 33 | user = "centos" 34 | host = aws_instance.nfs_server[0].public_ip 35 | private_key = file(var.ssh_prv_key_path) 36 | agent = false 37 | } 38 | inline = [ 39 | "sudo yum -y -q install nfs-utils", 40 | "sudo mkdir /nfsroot", 41 | "echo '/nfsroot *(rw,no_root_squash,no_subtree_check)' | sudo tee /etc/exports", 42 | "sudo exportfs -r", 43 | "sudo systemctl enable nfs-server.service", 44 | "sudo systemctl start nfs-server.service" 45 | ] 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /modules/module-nfs-server/outputs.tf: -------------------------------------------------------------------------------- 1 | output "private_ip" { 2 | value = var.nfs_server_enabled ? aws_instance.nfs_server[0].private_ip : null 3 | } 4 | 5 | output "nfs_folder" { 6 | value = var.nfs_server_enabled ? "/nfsroot" : "nfs server not enabled" 7 | } 8 | 9 | output "instance_id" { 10 | value = var.nfs_server_enabled ? aws_instance.nfs_server[0].id : null 11 | } 12 | 13 | output "instance_arn" { 14 | value = var.nfs_server_enabled ? aws_instance.nfs_server[0].arn : null 15 | } 16 | 17 | output "ssh_command" { 18 | value = var.nfs_server_enabled ? "ssh -o StrictHostKeyChecking=no -i \"${var.ssh_prv_key_path}\" centos@${aws_instance.nfs_server[0].public_ip}" : "nfs server not enabled" 19 | } -------------------------------------------------------------------------------- /modules/module-nfs-server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | } 4 | variable "user" { 5 | type = string 6 | } 7 | variable "ssh_prv_key_path" { 8 | type = string 9 | } 10 | variable "nfs_ec2_ami" { 11 | type = string 12 | } 13 | variable "nfs_instance_type" { 14 | type = string 15 | } 16 | variable "nfs_server_enabled" { 17 | type = bool 18 | } 19 | variable "key_name" { 20 | type = string 21 | } 22 | variable "vpc_security_group_ids" { 23 | type = list 24 | } 25 | variable "subnet_id" { 26 | type = string 27 | } 28 | variable "deployment_uuid" { 29 | type = string 30 | } 31 | -------------------------------------------------------------------------------- /modules/module-nfs-server/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | } 7 | required_version = ">= 0.13" 8 | } 9 | -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Desktop/bluedata-docs.desktop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Encoding=UTF-8 4 | Name=HCP Docs 5 | Type=Link 6 | URL=http://docs.bluedata.com/ 7 | Icon=firefox 8 | Exec=firefox -private-window -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Desktop/code.desktop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Name=Visual Studio Code 4 | Comment=Code Editing. Redefined. 5 | GenericName=Text Editor 6 | Exec=/usr/share/code/code --no-sandbox --unity-launch %F 7 | Icon=/usr/share/pixmaps/com.visualstudio.code.png 8 | Type=Application 9 | StartupNotify=false 10 | StartupWMClass=Code 11 | Categories=Utility;TextEditor;Development;IDE; 12 | MimeType=text/plain;inode/directory; 13 | Actions=new-empty-window; 14 | Keywords=vscode; 15 | 16 | X-Desktop-File-Install-Version=0.23 17 | 18 | [Desktop Action new-empty-window] 19 | Name=New Empty Window 20 | Exec=/usr/share/code/code --no-sandbox --new-window %F 21 | Icon=/usr/share/pixmaps/com.visualstudio.code.png -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Desktop/github-project.desktop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Encoding=UTF-8 4 | Name=Github Project Page 5 | Type=Link 6 | URL=https://github.com/hpe-container-platform-community/hcp-demo-env-aws-terraform 7 | Icon=firefox 8 | Exec=firefox -private-window -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Desktop/mapr-password.desktop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Name=MAPR Admin Password 4 | GenericName=MAPR Admin Password 5 | Exec=mate-terminal --working-directory="/home/ubuntu/" --command "ssh controller cat /opt/bluedata/mapr/conf/mapr-admin-pass; bash" 6 | Icon=utilities-terminal 7 | Type=Application 8 | Terminal=false 9 | Categories=System;GTK;Utility;TerminalEmulator; 10 | StartupNotify=true 11 | Keywords=MATE;terminal;shell;prompt;command;commandline; -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Desktop/ssh_ad.desktop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Name=SSH Active Directory 4 | GenericName=SSH Active Directory 5 | Exec=mate-terminal --working-directory="/home/ubuntu/" --command "ssh ad" 6 | Icon=utilities-terminal 7 | Type=Application 8 | Terminal=false 9 | Categories=System;GTK;Utility;TerminalEmulator; 10 | StartupNotify=true 11 | Keywords=MATE;terminal;shell;prompt;command;commandline; -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Desktop/ssh_controller.desktop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Name=SSH Controller 4 | GenericName=SSH Controller 5 | Exec=mate-terminal --working-directory="/home/ubuntu/" --command "ssh controller" 6 | Icon=utilities-terminal 7 | Type=Application 8 | Terminal=false 9 | Categories=System;GTK;Utility;TerminalEmulator; 10 | StartupNotify=true 11 | Keywords=MATE;terminal;shell;prompt;command;commandline; -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Desktop/ssh_gateway.desktop: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Name=SSH Gateway 4 | GenericName=SSH Gateway 5 | Exec=mate-terminal --working-directory="/home/ubuntu/" --command "ssh gateway" 6 | Icon=utilities-terminal 7 | Type=Application 8 | Terminal=false 9 | Categories=System;GTK;Utility;TerminalEmulator; 10 | StartupNotify=true 11 | Keywords=MATE;terminal;shell;prompt;command;commandline; -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Templates/HCP.admin.desktop.tpl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Encoding=UTF-8 4 | Name=HCP Admin Console 5 | Type=Link 6 | URL=https://${controller_private_ip}/bdswebui/login/ 7 | Icon=firefox 8 | Exec=firefox -private-window -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Templates/MCS.admin.desktop.tpl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Encoding=UTF-8 4 | Name=HCP MAPR Control System console 5 | Type=Link 6 | URL=https://${controller_private_ip}:8443 7 | Icon=firefox 8 | Exec=firefox -private-window -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/Templates/startup.desktop.tpl: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env xdg-open 2 | [Desktop Entry] 3 | Name=HCP Links 4 | Exec=firefox https://${controller_private_ip} https://${controller_private_ip}/apidocs http://docs.bluedata.com https://github.com/hpe-container-platform-community/hcp-demo-env-aws-terraform https://${controller_private_ip}:8443 5 | Icon=firefox 6 | Type=Application 7 | StartupNotify=true 8 | Terminal=false -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/outputs.tf: -------------------------------------------------------------------------------- 1 | output "private_ip" { 2 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].private_ip : null 3 | } 4 | 5 | locals { 6 | rdp_created = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 7 | } 8 | 9 | output "public_ip" { 10 | value = local.rdp_created ? (var.create_eip ? aws_eip.rdp_server[0].public_ip : aws_instance.rdp_server[0].public_ip) : null 11 | } 12 | 13 | output "instance_id" { 14 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].id : null 15 | } 16 | 17 | output "instance_arn" { 18 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].arn : null 19 | } 20 | 21 | output "ssh_command" { 22 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? "ssh -o StrictHostKeyChecking=no -i \"${var.ssh_prv_key_path}\" centos@${aws_instance.rdp_server[0].public_ip}" : "rdp server not enabled" 23 | } 24 | 25 | output "enc_administrator_password" { 26 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].password_data : null 27 | } 28 | output "network_interface_id" { 29 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].primary_network_interface_id : null 30 | } -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | } 4 | variable "user" { 5 | type = string 6 | } 7 | variable "ssh_prv_key_path" { 8 | type = string 9 | } 10 | variable "rdp_ec2_ami" { 11 | type = string 12 | } 13 | variable "rdp_instance_type" { 14 | type = string 15 | } 16 | variable "rdp_server_enabled" { 17 | type = bool 18 | } 19 | variable "key_name" { 20 | type = string 21 | } 22 | variable "vpc_security_group_ids" { 23 | type = list 24 | } 25 | variable "subnet_id" { 26 | type = string 27 | } 28 | variable "az" { 29 | type = string 30 | } 31 | variable "ca_cert" { 32 | type = string 33 | } 34 | variable "controller_private_ip" { 35 | type = string 36 | } 37 | variable "create_eip" { 38 | type = bool 39 | } 40 | variable "deployment_uuid" { 41 | type = string 42 | } 43 | -------------------------------------------------------------------------------- /modules/module-rdp-server-linux/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | template = { 7 | source = "hashicorp/template" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | -------------------------------------------------------------------------------- /modules/module-rdp-server/main.tf: -------------------------------------------------------------------------------- 1 | /******************* Instance: RDP Server ********************/ 2 | 3 | data "template_file" "userdata_win" { 4 | template = < 6 | $SourceURL = "https://download-installer.cdn.mozilla.net/pub/firefox/releases/73.0.1/win64/en-US/Firefox%20Setup%2073.0.1.msi"; 7 | $Installer = $env:TMP + "\firefox.msi"; 8 | Invoke-WebRequest $SourceURL -OutFile $Installer; 9 | Start-Process -FilePath $Installer -Args "/quiet" -Wait; 10 | Remove-Item $Installer; 11 | 12 | $Path = $env:TEMP; 13 | $Installer = "chrome_installer.exe"; 14 | Invoke-WebRequest "https://dl.google.com/chrome/install/latest/chrome_installer.exe" -OutFile $Path$Installer; 15 | Start-Process -FilePath $Path$Installer -Args "/silent /install" -Verb RunAs -Wait; 16 | Remove-Item $Path$Installer 17 | 18 | 19 | false 20 | EOF 21 | } 22 | 23 | resource "aws_instance" "rdp_server" { 24 | ami = var.rdp_ec2_ami 25 | instance_type = var.rdp_instance_type 26 | key_name = var.key_name 27 | vpc_security_group_ids = var.vpc_security_group_ids 28 | subnet_id = var.subnet_id 29 | user_data = data.template_file.userdata_win.rendered 30 | get_password_data = true 31 | 32 | lifecycle { 33 | ignore_changes = [ user_data, ] 34 | } 35 | 36 | count = var.rdp_server_enabled == true ? 1 : 0 37 | 38 | root_block_device { 39 | volume_type = "gp2" 40 | volume_size = 400 41 | tags = { 42 | Name = "${var.project_id}-rdp-server-root-ebs" 43 | Project = var.project_id 44 | user = var.user 45 | deployment_uuid = var.deployment_uuid 46 | } 47 | } 48 | 49 | tags = { 50 | Name = "${var.project_id}-instance-rdp-server" 51 | Project = var.project_id 52 | user = var.user 53 | deployment_uuid = var.deployment_uuid 54 | } 55 | } -------------------------------------------------------------------------------- /modules/module-rdp-server/outputs.tf: -------------------------------------------------------------------------------- 1 | output "private_ip" { 2 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].private_ip : null 3 | } 4 | 5 | output "public_ip" { 6 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].public_ip : null 7 | } 8 | 9 | output "instance_id" { 10 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].id : null 11 | } 12 | 13 | output "ssh_command" { 14 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? "ssh -o StrictHostKeyChecking=no -i \"${var.ssh_prv_key_path}\" centos@${aws_instance.rdp_server[0].public_ip}" : "rdp server not enabled" 15 | } 16 | 17 | output "enc_administrator_password" { 18 | value = var.rdp_server_enabled && length(aws_instance.rdp_server) > 0 ? aws_instance.rdp_server[0].password_data : null 19 | } -------------------------------------------------------------------------------- /modules/module-rdp-server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project_id" { 2 | type = string 3 | } 4 | variable "user" { 5 | type = string 6 | } 7 | variable "ssh_prv_key_path" { 8 | type = string 9 | } 10 | variable "rdp_ec2_ami" { 11 | type = string 12 | } 13 | variable "rdp_instance_type" { 14 | type = string 15 | } 16 | variable "rdp_server_enabled" { 17 | type = bool 18 | } 19 | variable "key_name" { 20 | type = string 21 | } 22 | variable "vpc_security_group_ids" { 23 | type = list 24 | } 25 | variable "subnet_id" { 26 | type = string 27 | } 28 | variable "deployment_uuid" { 29 | type = string 30 | } 31 | -------------------------------------------------------------------------------- /modules/module-rdp-server/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | } 6 | template = { 7 | source = "hashicorp/template" 8 | } 9 | } 10 | required_version = ">= 0.13" 11 | } 12 | -------------------------------------------------------------------------------- /picasso_deploy_loop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | trap ctrl_c INT 4 | 5 | function ctrl_c() { 6 | exit 7 | } 8 | 9 | while : 10 | do 11 | THE_DATE=$(date +"%Y-%m-%dT%H:%M:%S%z") 12 | ./bin/create_new_environment_from_scratch_with_picasso.sh > ${THE_DATE}-picasso.log 2>&1 13 | 14 | if hpecp k8scluster list | grep error 15 | then 16 | ./bin/ssh_controller.sh sudo tar czf - /var/log/bluedata/ > ${THE_DATE}-controller-logs.tar.gz 17 | 18 | source "./scripts/variables.sh" 19 | 20 | for i in "${!WRKR_PUB_IPS[@]}"; do 21 | ssh -o StrictHostKeyChecking=no -i "./generated/controller.prv_key" centos@${WRKR_PUB_IPS[$i]} sudo tar czf - /var/log/bluedata/ > ${THE_DATE}-${WRKR_PUB_IPS[$i]}-logs.tar.gz 22 | done 23 | fi 24 | ./bin/terraform_destroy_accept.sh 25 | done -------------------------------------------------------------------------------- /resize_cloud9_ebs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Specify the desired volume size in GiB as a command line argument. If not specified, default to 20 GiB. 4 | SIZE=${1:-20} 5 | 6 | # Get the ID of the environment host Amazon EC2 instance. 7 | INSTANCEID=$(curl http://169.254.169.254/latest/meta-data/instance-id) 8 | 9 | # Get the ID of the Amazon EBS volume associated with the instance. 10 | VOLUMEID=$(aws ec2 describe-instances \ 11 | --instance-id $INSTANCEID \ 12 | --query "Reservations[0].Instances[0].BlockDeviceMappings[0].Ebs.VolumeId" \ 13 | --output text) 14 | 15 | # Resize the EBS volume. 16 | aws ec2 modify-volume --volume-id $VOLUMEID --size $SIZE 17 | 18 | # Wait for the resize to finish. 19 | while [ \ 20 | "$(aws ec2 describe-volumes-modifications \ 21 | --volume-id $VOLUMEID \ 22 | --filters Name=modification-state,Values="optimizing","completed" \ 23 | --query "length(VolumesModifications)"\ 24 | --output text)" != "1" ]; do 25 | sleep 1 26 | done 27 | 28 | #Check if we're on an NVMe filesystem 29 | if [ $(readlink -f /dev/xvda) = "/dev/xvda" ] 30 | then 31 | # Rewrite the partition table so that the partition takes up all the space that it can. 32 | sudo growpart /dev/xvda 1 33 | 34 | # Expand the size of the file system. 35 | # Check if we are on AL2 36 | STR=$(cat /etc/os-release) 37 | SUB="VERSION_ID=\"2\"" 38 | if [[ "$STR" == *"$SUB"* ]] 39 | then 40 | sudo xfs_growfs -d / 41 | else 42 | sudo resize2fs /dev/xvda1 43 | fi 44 | 45 | else 46 | # Rewrite the partition table so that the partition takes up all the space that it can. 47 | sudo growpart /dev/nvme0n1 1 48 | 49 | # Expand the size of the file system. 50 | # Check if we're on AL2 51 | STR=$(cat /etc/os-release) 52 | SUB="VERSION_ID=\"2\"" 53 | if [[ "$STR" == *"$SUB"* ]] 54 | then 55 | sudo xfs_growfs -d / 56 | else 57 | sudo resize2fs /dev/nvme0n1p1 58 | fi 59 | fi 60 | -------------------------------------------------------------------------------- /run_ide.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | IMG=hpecp/hpecp-ide:latest 4 | 5 | if [[ "$(docker images -q $IMG 2> /dev/null)" == "" ]]; then 6 | ./build_ide.sh 7 | fi 8 | 9 | docker run -it --init -p 3000:3000 -v "$(pwd):/home/project:cached" $IMG 10 | -------------------------------------------------------------------------------- /scripts/check_client_ip.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 7 | 8 | source "$SCRIPT_DIR/variables.sh" 9 | 10 | CURR_CLIENT_CIDR_BLOCK="$(curl -s http://ipinfo.io/ip)/32" 11 | 12 | if [[ "$CLIENT_CIDR_BLOCK" = "$CURR_CLIENT_CIDR_BLOCK" ]]; 13 | then 14 | echo "Your client IP address [${CLIENT_CIDR_BLOCK}] has not changed - no need to update AWS NACL or SG rules" 15 | else 16 | echo "*********************************************************************************************************" 17 | echo "Your client IP adddress was previously [${CLIENT_CIDR_BLOCK}] and is now [${CURR_CLIENT_CIDR_BLOCK}]" 18 | echo "It appears to have changed since you last ran './bin/terraform_apply', so you should run the" 19 | echo "following command to update your environment with your new IP address:" 20 | echo 21 | echo "./bin/terraform_apply.sh" 22 | echo "*******************************************************************************************************" 23 | exit 1 24 | fi -------------------------------------------------------------------------------- /scripts/eks_import.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ ! -z $C9_USER && ( -z $AWS_ACCESS_KEY_ID || -z $AWS_SECRET_ACCESS_KEY ) ]]; 4 | then 5 | echo AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables must be set on Cloud 9 6 | exit 1 7 | fi 8 | 9 | . hpecp_env_conf.sh 10 | 11 | CLUSTER_NAME=myeks 12 | CLUSTER_DESC=myeks 13 | 14 | terraform output eks-server-url > generated/eks_server_url 15 | 16 | aws eks --region $(terraform output -raw aws_region) update-kubeconfig --name $(terraform output -raw eks-cluster-name) --kubeconfig generated/eks_kubeconfig &>/dev/null 17 | 18 | kubectl --kubeconfig generated/eks_kubeconfig create serviceaccount abc123 &>/dev/null 19 | kubectl --kubeconfig generated/eks_kubeconfig create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=default:abc123 &>/dev/null 20 | SA_TOKEN=$(kubectl --kubeconfig generated/eks_kubeconfig get serviceaccount/abc123 -o jsonpath={.secrets[0].name}) 21 | 22 | set -e 23 | set -u 24 | set -o pipefail 25 | 26 | kubectl --kubeconfig generated/eks_kubeconfig get secret $SA_TOKEN -o jsonpath={'.data.token'} > generated/eks_token.base64 27 | kubectl --kubeconfig generated/eks_kubeconfig get secret $SA_TOKEN -o jsonpath={'.data.ca\.crt'} > generated/eks_ca.crt.base64 28 | 29 | export POD_DNS_DOMAIN=cluster.local 30 | export EKS_SERVER=$(cat generated/eks_server_url) 31 | export EKS_CA_CERT=$(cat generated/eks_ca.crt.base64) 32 | export EKS_TOKEN=$(cat generated/eks_token.base64) 33 | 34 | set -x 35 | 36 | hpecp k8scluster import-cluster \ 37 | --cluster-type eks \ 38 | --name $CLUSTER_NAME \ 39 | --description $CLUSTER_DESC \ 40 | --pod-dns-domain $POD_DNS_DOMAIN \ 41 | --server-url $EKS_SERVER \ 42 | --ca $EKS_CA_CERT \ 43 | --bearer-token $EKS_TOKEN 44 | 45 | 46 | watch hpecp k8scluster list 47 | -------------------------------------------------------------------------------- /scripts/eks_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | terraform output eks-server-url > generated/eks_server_url 5 | 6 | aws eks --region $(terraform output -raw aws_region) update-kubeconfig --name $(terraform output -raw eks-cluster-name) --kubeconfig generated/eks_kubeconfig 7 | 8 | 9 | kubectl --kubeconfig generated/eks_kubeconfig create serviceaccount abc123 10 | kubectl --kubeconfig generated/eks_kubeconfig create clusterrolebinding add-on-cluster-admin --clusterrole=cluster-admin --serviceaccount=default:abc123 11 | SA_TOKEN=$(kubectl --kubeconfig generated/eks_kubeconfig get serviceaccount/abc123 -o jsonpath={.secrets[0].name}) 12 | 13 | set -e 14 | set -u 15 | set -o pipefail 16 | 17 | kubectl --kubeconfig generated/eks_kubeconfig get secret $SA_TOKEN -o jsonpath={'.data.token'} > generated/eks_token.base64 18 | kubectl --kubeconfig generated/eks_kubeconfig get secret $SA_TOKEN -o jsonpath={'.data.ca\.crt'} > generated/eks_ca.crt.base64 19 | 20 | export POD_DNS_DOMAIN=cluster.local 21 | export EKS_SERVER=$(cat generated/eks_server_url) 22 | export EKS_CA_CERT=$(cat generated/eks_ca.crt.base64) 23 | export EKS_TOKEN=$(cat generated/eks_token.base64) 24 | 25 | bold=$(tput bold) 26 | normal=$(tput sgr0) 27 | 28 | echo 29 | echo ${bold}export POD_DNS_DOMAIN${normal}=${POD_DNS_DOMAIN} 30 | echo ${bold}export EKS_SERVER${normal}=${EKS_SERVER} 31 | echo ${bold}export EKS_CA_CERT${normal}=${EKS_CA_CERT} 32 | echo 33 | echo ${bold}export EKS_TOKEN${normal}=${EKS_TOKEN} 34 | 35 | echo 36 | echo 37 | 38 | 39 | #kubectl --kubeconfig generated/eks_kubeconfig describe configmaps/coredns -n kube-system 40 | -------------------------------------------------------------------------------- /scripts/end_user_scripts/patch_datatap_5.1.1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 7 | 8 | source "$SCRIPT_DIR/../variables.sh" 9 | 10 | echo "Retrieving presigned URL for s3://csnow-bins/libMapRClient_c.so.1" 11 | MAPR_BIN_DL_URL="$(aws s3 presign --region eu-west-1 s3://csnow-bins/libMapRClient_c.so.1)" 12 | echo MAPR_BIN_DL_URL=$MAPR_BIN_DL_URL 13 | 14 | for HOST in $CTRL_PUB_IP ${WRKR_PUB_IPS[@]}; 15 | do 16 | echo HOST=$HOST 17 | ssh -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" centos@$HOST <<-SSH_EOF 18 | set -eu 19 | 20 | # if the host has bdconfig, it has been added to HCP 21 | if command -v bdconfig >/dev/null 2>&1; then 22 | 23 | # now select the controller and worker hosts 24 | EPIC_HOSTS=\$(bdconfig --getworkers | awk '{ if (\$5 != "proxy") { print \$2 } }') 25 | CURR_HOST=\$(hostname -I | awk '{ print \$1 }') 26 | 27 | # Only run on EPIC hosts and not K8S hosts 28 | if [[ \${EPIC_HOSTS} == *"\${CURR_HOST}"* ]]; then 29 | 30 | echo "Patching DataTap on EPIC Host ${HOST} (\${CURR_HOST}) ..." 31 | 32 | sudo systemctl stop bds-worker 33 | 34 | sudo rm -f /usr/lib64/libMapRClient_c.so 35 | 36 | wget -c --progress=bar -e dotbytes=1M -o /home/centos/libMapRClient_c.so.1 "${MAPR_BIN_DL_URL}" 37 | 38 | sudo mv /home/centos/libMapRClient_c.so.1 /usr/lib64/libMapRClient_c.so.1 39 | sudo chown root:root /usr/lib64/libMapRClient_c.so.1 40 | sudo chmod 644 /usr/lib64/libMapRClient_c.so.1 41 | 42 | sudo ln -f -s /usr/lib64/libMapRClient_c.so.1 /usr/lib64/libMapRClient_c.so 43 | 44 | ls -al /usr/lib64/libMapRClient_c.so* 45 | 46 | sudo systemctl start bds-worker 47 | else 48 | echo "Skipping Host ${HOST} ..." 49 | fi 50 | else 51 | echo "Skipping Host ${HOST} ..." 52 | fi 53 | 54 | SSH_EOF 55 | done 56 | -------------------------------------------------------------------------------- /scripts/functions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e # abort on error 4 | set -u # abort on undefined variable 5 | 6 | function print_term_width { 7 | 8 | # Usage: 9 | # 10 | # print_term_width '=' 11 | 12 | local char=$1 13 | 14 | printf "%`tput cols`s"|tr ' ' "$char" 15 | } 16 | 17 | function print_header { 18 | 19 | # Usage: 20 | # 21 | # print_header 'Some header message' 22 | 23 | local title=$1 24 | 25 | print_term_width '=' 26 | echo $title 27 | print_term_width '=' 28 | } 29 | 30 | function fail { 31 | echo $1 >&2 32 | exit 1 33 | } 34 | 35 | function retry { 36 | local n=1 37 | local max=5 38 | local delay=15 39 | while true; do 40 | "$@" && break || { 41 | if [[ $n -lt $max ]]; then 42 | ((n++)) 43 | echo "Command failed. Attempt $n/$max:" 44 | sleep $delay; 45 | else 46 | fail "The command has failed after $n attempts." 47 | fi 48 | } 49 | done 50 | } -------------------------------------------------------------------------------- /scripts/utility/credentials.json: -------------------------------------------------------------------------------- 1 | { 2 | "access_key": "", 3 | "secret_key": "", 4 | "endpoint_url": "s3://csnow-bins/bdcatalog-centos7-bluedata-notebook-5.2.bin" 5 | } 6 | -------------------------------------------------------------------------------- /scripts/utility/presign_upload.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import json 6 | from argparse import ArgumentParser 7 | from urllib.parse import urlencode, quote_plus 8 | 9 | import boto3 10 | 11 | with open('credentials.json', 'r') as fd: 12 | credentials = json.loads(fd.read()) 13 | 14 | def main(): 15 | 16 | parser = ArgumentParser(description='Creates a Presigned URL') 17 | parser.add_argument('--bucket-name', 18 | dest='bucket_name', 19 | action='store', 20 | required=True, 21 | help='the name of the bucket to upload to') 22 | parser.add_argument('--object-name', 23 | dest='object_name', 24 | action='store', 25 | required=True, 26 | help='the name of the object to upload') 27 | args = parser.parse_args() 28 | 29 | s3 = boto3.client('s3', 30 | aws_access_key_id=credentials.get('access_key'), 31 | aws_secret_access_key=credentials.get('secret_key'), 32 | ) 33 | 34 | response = s3.generate_presigned_url( 35 | ClientMethod='put_object', 36 | Params={'Bucket': args.bucket_name, 'Key': args.object_name}, 37 | ExpiresIn=3600, 38 | ) 39 | 40 | print(f"curl -i --request PUT --upload-file {args.object_name} '{response}'") 41 | 42 | if __name__ == '__main__': 43 | main() 44 | -------------------------------------------------------------------------------- /scripts/variables_dump.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 4 | 5 | source "$SCRIPT_DIR/variables.sh" 6 | 7 | echo ADDITIONAL_CLIENT_IP_LIST="${ADDITIONAL_CLIENT_IP_LIST}" 8 | -------------------------------------------------------------------------------- /scripts/velero_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | 6 | if [[ ! -d generated ]]; then 7 | echo "This file should be executed from the project directory" 8 | exit 1 9 | fi 10 | 11 | if [[ -z $1 ]]; then 12 | echo Usage: $0 CLUSTER_NAME 13 | exit 1 14 | fi 15 | 16 | export CLUSTER_NAME=$1 17 | 18 | set -u 19 | 20 | ./scripts/check_prerequisites.sh 21 | source ./scripts/variables.sh 22 | 23 | 24 | ssh -o StrictHostKeyChecking=no -i "./generated/controller.prv_key" ubuntu@$RDP_PUB_IP < \$KUBECONFIG 30 | 31 | export KUBECONFIG=~/kubeconfig_$CLUSTER_NAME.conf 32 | 33 | velero create backup $CLUSTER_NAME --wait 34 | 35 | ENDSSH 36 | 37 | -------------------------------------------------------------------------------- /scripts/velero_restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [[ ! -d generated ]]; then 6 | echo "This file should be executed from the project directory" 7 | exit 1 8 | fi 9 | 10 | if [[ -z $1 ]]; then 11 | echo Usage: $0 CLUSTER_NAME 12 | exit 1 13 | fi 14 | 15 | export CLUSTER_NAME=$1 16 | 17 | set -u 18 | 19 | ./scripts/check_prerequisites.sh 20 | source ./scripts/variables.sh 21 | 22 | 23 | ssh -o StrictHostKeyChecking=no -i "./generated/controller.prv_key" ubuntu@$RDP_PUB_IP < \$KUBECONFIG 29 | 30 | export KUBECONFIG=~/kubeconfig_$CLUSTER_NAME.conf 31 | 32 | velero restore create --from-backup $CLUSTER_NAME 33 | 34 | ENDSSH 35 | 36 | -------------------------------------------------------------------------------- /scripts/verify_ad_server_config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Ensure the AD server is correctly configured 4 | 5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 6 | 7 | source "$SCRIPT_DIR/variables.sh" 8 | 9 | if [[ "$AD_SERVER_ENABLED" == False ]]; then 10 | echo "Skipping script '$0' because AD Server is not enabled" 11 | exit 1 12 | fi 13 | 14 | set +e 15 | #ssh -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -tt -T centos@${CTRL_PUB_IP} <<-SSH_EOF 16 | 17 | set -x 18 | 19 | # Apply the posix classes ldif. This should have been applied by terraform when the EC2 instance was created. 20 | # If it was applied, it will return 20 here. If not, it will be run for the first time and return 0 if successful. 21 | 22 | ssh -o StrictHostKeyChecking=no -i "${LOCAL_SSH_PRV_KEY_PATH}" -tt -T centos@${AD_PUB_IP} \ 23 | "ldapmodify -H ldap://localhost:389 -D 'cn=Administrator,CN=Users,DC=samdom,DC=example,DC=com' -f /home/centos/ad_set_posix_classes.ldif -w '5ambaPwd@' -c 2>&1 > /dev/null" 24 | #SSH_EOF 25 | 26 | ret_val=$? 27 | 28 | # response code is 20 if the ldif has already been applied 29 | 30 | if [[ "$ret_val" == "0" || "$ret_val" == "20" ]]; then 31 | echo "AD Server appears to be correctly configured." 32 | else 33 | echo "Aborting. AD Server is not correctly configured." 34 | exit 1 35 | fi 36 | 37 | -------------------------------------------------------------------------------- /static/basic-r-test.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "id": "bb5b5018-628c-42ad-bedf-5ffef45372b0", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stdout", 11 | "output_type": "stream", 12 | "text": [ 13 | "[1] \"test\"\n" 14 | ] 15 | } 16 | ], 17 | "source": [ 18 | "print('test')" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": null, 24 | "id": "e092fde1-2e1d-48b8-80cf-68c5a424a5c6", 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [] 28 | } 29 | ], 30 | "metadata": { 31 | "kernelspec": { 32 | "display_name": "R", 33 | "language": "R", 34 | "name": "ir" 35 | }, 36 | "language_info": { 37 | "codemirror_mode": "r", 38 | "file_extension": ".r", 39 | "mimetype": "text/x-r-source", 40 | "name": "R", 41 | "pygments_lexer": "r", 42 | "version": "3.6.1" 43 | } 44 | }, 45 | "nbformat": 4, 46 | "nbformat_minor": 5 47 | } -------------------------------------------------------------------------------- /static/datatap.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "id": "ahead-layout", 6 | "metadata": {}, 7 | "source": [ 8 | "TenantStorage was created automatically when the tenant was created.\n", 9 | "\n", 10 | "Use the `hadoop fs` cli to upload a file." 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 1, 16 | "id": "exterior-population", 17 | "metadata": {}, 18 | "outputs": [], 19 | "source": [ 20 | "! hadoop fs -put -f wine-quality.csv dtap://TenantStorage/" 21 | ] 22 | }, 23 | { 24 | "cell_type": "markdown", 25 | "id": "searching-attention", 26 | "metadata": {}, 27 | "source": [ 28 | "Now check the file has been uploaded." 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "id": "conventional-vatican", 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "# NBVAL_IGNORE_OUTPUT\n", 39 | "! hadoop fs -tail dtap://TenantStorage/wine-quality.csv" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "id": "finished-warrior", 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [] 49 | } 50 | ], 51 | "metadata": { 52 | "kernelspec": { 53 | "display_name": "Python 3", 54 | "language": "python", 55 | "name": "python3" 56 | }, 57 | "language_info": { 58 | "codemirror_mode": { 59 | "name": "ipython", 60 | "version": 3 61 | }, 62 | "file_extension": ".py", 63 | "mimetype": "text/x-python", 64 | "name": "python", 65 | "nbconvert_exporter": "python", 66 | "pygments_lexer": "ipython3", 67 | "version": "3.8.5" 68 | } 69 | }, 70 | "nbformat": 4, 71 | "nbformat_minor": 5 72 | } 73 | -------------------------------------------------------------------------------- /static/get_kf_dashboard_auth_token.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -o pipefail 5 | 6 | 7 | if [[ -z $3 ]]; then 8 | echo Usage: $0 TENANT_ID USERNAME PASSWORD 9 | exit 1 10 | fi 11 | 12 | set -u 13 | 14 | export PATH=$PATH:~/.local/bin 15 | 16 | export TENANT_ID=$1 17 | export CLUSTER_ID=$(hpecp tenant list --query "[?_links.self.href == '$TENANT_ID'] | [0] | [k8s_cluster]" --output text) 18 | 19 | KFURL="https://$(kubectl --kubeconfig <(hpecp k8scluster --id $CLUSTER_ID admin-kube-config) \ 20 | describe svc/istio-ingressgateway -n istio-system \ 21 | | grep hpecp-internal-gateway/80: \ 22 | | sed -e 's/^[ \t]*hpecp-internal-gateway\/80: //')" 23 | 24 | UNAME="$2" 25 | PSWRD="$3" 26 | 27 | #echo KFURL=$KFURL 28 | 29 | STATE=$(curl -s -k ${KFURL} | grep -oP '(?<=state=)[^ ]*"' | cut -d \" -f1) 30 | REQ=$(curl -s -k "${KFURL}/dex/auth?client_id=kubeflow-oidc-authservice&redirect_uri=%2Flogin%2Foidc&response_type=code&scope=profile+email+groups+openid&state=$STATE" | grep -oP '(?<=req=)\w+') 31 | 32 | curl -s -k "${KFURL}/dex/auth/ad?req=$REQ" -H 'Content-Type: application/x-www-form-urlencoded' --data "login=$UNAME&password=$PSWRD" 33 | 34 | CODE=$(curl -s -k "${KFURL}/dex/approval?req=$REQ" | grep -oP '(?<=code=)\w+') 35 | ret=$? 36 | if [ $ret -ne 0 ]; then 37 | echo "Error" 38 | exit 1 39 | fi 40 | curl -s -k --cookie-jar - "${KFURL}/login/oidc?code=$CODE&state=$STATE" > .dex_session 41 | 42 | AUTH_TOKEN=$(cat .dex_session | grep 'authservice_session' | awk '{ORS="" ; printf "%s", $NF}') 43 | echo $AUTH_TOKEN -------------------------------------------------------------------------------- /static/mlflow-seldon-serving.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 18, 6 | "id": "969105bc-ff57-4e09-85c5-a8518e715b83", 7 | "metadata": {}, 8 | "outputs": [], 9 | "source": [] 10 | }, 11 | { 12 | "cell_type": "code", 13 | "execution_count": 20, 14 | "id": "b6fe2336-4ae8-4f1c-8024-5f32769dcaa6", 15 | "metadata": {}, 16 | "outputs": [ 17 | { 18 | "name": "stdout", 19 | "output_type": "stream", 20 | "text": [ 21 | "{\"data\":{\"names\":[],\"ndarray\":[5.655099099229192]},\"meta\":{}}\n" 22 | ] 23 | } 24 | ], 25 | "source": [ 26 | "%%bash\n", 27 | "\n", 28 | "## Enter the SERVING_URL from the URI left hand menu:\n", 29 | "##\n", 30 | "## -> Model Serving -> MLflow Seldon Endpoints\n", 31 | "\n", 32 | "SERVING_URL=https://ip-10-1-0-43.eu-west-3.compute.internal:10004/seldon/hpecp-tenant-4-g5nj4/model-wineserving/api/v1.0/predictions\n", 33 | "\n", 34 | "TENANT_NAME=k8s-tenant-1\n", 35 | "\n", 36 | "TENANT_ID=$(~/.local/bin/hpecp tenant list --query \"[?tenant_type == 'k8s' && label.name == '$TENANT_NAME'] | [0] | [_links.self.href]\" --output text)\n", 37 | " \n", 38 | "AUTH_TOKEN=$(bash get_kf_dashboard_auth_token.sh $TENANT_ID ad_user1 pass123)\n", 39 | "\n", 40 | "curl -s -k \\\n", 41 | " --cookie \"authservice_session=${AUTH_TOKEN}\" \\\n", 42 | " -X POST -H 'Content-Type: application/json' \\\n", 43 | " -d '{\"data\":{\"names\":[],\"ndarray\":[[7.0,0.27,0.36,20.7,0.045,45.0,170.0,1.001,3.0,0.45,8.8]]}}' \\\n", 44 | " ${SERVING_URL}" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": null, 50 | "id": "8e7ec75f-b233-453e-8bee-22d3b89fbbe7", 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [] 54 | } 55 | ], 56 | "metadata": { 57 | "kernelspec": { 58 | "display_name": "Python 3", 59 | "language": "python", 60 | "name": "python3" 61 | }, 62 | "language_info": { 63 | "codemirror_mode": { 64 | "name": "ipython", 65 | "version": 3 66 | }, 67 | "file_extension": ".py", 68 | "mimetype": "text/x-python", 69 | "name": "python", 70 | "nbconvert_exporter": "python", 71 | "pygments_lexer": "ipython3", 72 | "version": "3.8.8" 73 | } 74 | }, 75 | "nbformat": 4, 76 | "nbformat_minor": 5 77 | } -------------------------------------------------------------------------------- /static/pytest-launcher.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 4, 6 | "id": "57c28325-5790-406f-bbf3-b9a2a0db28b2", 7 | "metadata": {}, 8 | "outputs": [ 9 | { 10 | "name": "stdout", 11 | "output_type": "stream", 12 | "text": [ 13 | "\u001b[1m============================= test session starts ==============================\u001b[0m\n", 14 | "platform linux -- Python 3.7.3, pytest-6.2.3, py-1.10.0, pluggy-0.13.1\n", 15 | "rootdir: /home/ad_user1/examples/mlflow\n", 16 | "plugins: nbval-0.9.6, anyio-2.2.0\n", 17 | "collected 0 items \u001b[0m\n", 18 | "\n", 19 | "\u001b[33m============================ \u001b[33mno tests ran\u001b[0m\u001b[33m in 0.00s\u001b[0m\u001b[33m =============================\u001b[0m\n", 20 | "\u001b[31mERROR: file or directory not found: /home/ad_user1/training_cluster_connection_test.ipynb\n", 21 | "\u001b[0m\n" 22 | ] 23 | } 24 | ], 25 | "source": [ 26 | "! ~/.local/bin/py.test --nbval-lax ~/training_cluster_connection_test.ipynb" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "id": "f8292e65-e2df-40bf-ab64-5a88e547d2ff", 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [] 36 | } 37 | ], 38 | "metadata": { 39 | "kernelspec": { 40 | "display_name": "Python 3", 41 | "language": "python", 42 | "name": "python3" 43 | }, 44 | "language_info": { 45 | "codemirror_mode": { 46 | "name": "ipython", 47 | "version": 3 48 | }, 49 | "file_extension": ".py", 50 | "mimetype": "text/x-python", 51 | "name": "python", 52 | "nbconvert_exporter": "python", 53 | "pygments_lexer": "ipython3", 54 | "version": "3.7.3" 55 | } 56 | }, 57 | "nbformat": 4, 58 | "nbformat_minor": 5 59 | } 60 | -------------------------------------------------------------------------------- /terragrunt.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | 3 | extra_arguments "common_vars" { 4 | commands = ["plan", "apply"] 5 | 6 | arguments = [ 7 | "-var-file=./etc/bluedata_infra.tfvars", 8 | "-var=client_cidr_block=${run_cmd("curl", "-s", "http://ipinfo.io/ip")}/32" 9 | ] 10 | } 11 | 12 | after_hook "write_output_json" { 13 | commands = ["apply"] 14 | execute = ["./bin/terraform_output.sh"] 15 | run_on_error = false 16 | } 17 | 18 | 19 | } 20 | -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = ">= 2.59" 7 | } 8 | local = { 9 | version = "~> 1.4.0" 10 | source = "hashicorp/local" 11 | } 12 | null = { 13 | version = "~> 2.1.2" 14 | source = "hashicorp/null" 15 | } 16 | random = { 17 | version = "~> 2.3.0" 18 | source = "hashicorp/random" 19 | } 20 | template = { 21 | version = "~> 2.1.2" 22 | source = "hashicorp/template" 23 | } 24 | } 25 | } 26 | --------------------------------------------------------------------------------