├── .gitignore
├── LICENSE.md
├── README.md
├── azure
├── README.md
├── azure_dns
│ ├── .gitignore
│ ├── README.md
│ ├── azure_dns_domain
│ │ ├── main.tf
│ │ └── provider.tf
│ ├── azure_dns_record
│ │ ├── main.tf
│ │ └── provider.tf
│ ├── azure_net
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── azure_vm
│ │ ├── main.tf
│ │ ├── network.tf
│ │ ├── outputs.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── godaddy_dns_nameservers
│ │ ├── main.tf
│ │ └── provider.tf
│ ├── godaddy_dns_record
│ │ ├── main.tf
│ │ └── provider.tf
│ ├── main.tf
│ ├── outputs.tf
│ ├── terraform.tfvars
│ └── variables.tf
├── azure_vm
│ ├── .gitignore
│ ├── README.md
│ ├── azure_net
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── azure_vm
│ │ ├── main.tf
│ │ ├── network.tf
│ │ ├── outputs.tf
│ │ ├── provider.tf
│ │ └── variables.tf
│ ├── main.tf
│ ├── outputs.tf
│ ├── terraform.tfvars
│ └── variables.tf
└── blob
│ └── blob_storage
│ ├── 1.azure_cli
│ ├── .gitignore
│ ├── Dockerfile
│ ├── README.md
│ ├── create_blob.sh
│ ├── create_env.sh
│ ├── delete_blob.sh
│ ├── docker-compose.yml
│ └── entrypoint.sh
│ └── 2.terraform
│ ├── .gitignore
│ ├── README.md
│ ├── docker-compose.yml
│ ├── main.tf
│ ├── modules
│ ├── azure_blob
│ │ ├── locals.tf
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ └── minio_values
│ │ ├── locals.tf
│ │ ├── main.tf
│ │ ├── minio.env.tmpl
│ │ ├── s3cfg.tmpl
│ │ ├── values.minio_config.yaml.tmpl
│ │ ├── values.minio_secrets.yaml.tmpl
│ │ └── variables.tf
│ ├── provider.tf
│ ├── terraform.tfvars
│ ├── test1_create_only_blob
│ ├── main.tf
│ ├── provider.tf
│ └── values
│ │ └── .gitkeep
│ ├── test2_create_storage_acct
│ ├── main.tf
│ ├── provider.tf
│ └── values
│ │ └── .gitkeep
│ ├── test3_create_all
│ ├── main.tf
│ ├── provider.tf
│ └── values
│ │ └── .gitkeep
│ └── values
│ └── .gitkeep
├── certifications
├── LICENSE
├── README.md
├── change_config
│ ├── README.md
│ ├── ansible
│ │ └── READEME.md
│ ├── chef
│ │ └── README.md
│ ├── puppet
│ │ └── README.md
│ └── saltstack
│ │ ├── README.md
│ │ └── SaltStack-Enterprise-training-syllabus.pdf
├── cicd
│ ├── README.md
│ └── github
│ │ ├── README.md
│ │ └── github-actions-exam-preparation-study-guide__2_.pdf
├── cloud
│ ├── README.md
│ ├── aws
│ │ ├── README.md
│ │ ├── image.png
│ │ ├── images
│ │ │ ├── practioner.png
│ │ │ ├── solutions_architect_associate.png
│ │ │ └── solutions_architect_professional.png
│ │ ├── practitioner.md
│ │ ├── solutions_architect_associate.md
│ │ └── solutions_architect_professional.md
│ ├── azure
│ │ ├── README.md
│ │ ├── administrator.md
│ │ ├── fundamentals.md
│ │ ├── image.png
│ │ ├── images
│ │ │ ├── microsoft-certified-associate-badge.png
│ │ │ ├── microsoft-certified-associate-badge.svg
│ │ │ ├── microsoft-certified-expert-badge.png
│ │ │ ├── microsoft-certified-expert-badge.svg
│ │ │ ├── microsoft-certified-fundamentals-badge.png
│ │ │ └── microsoft-certified-fundamentals-badge.svg
│ │ └── solutions_architect.md
│ └── gcp
│ │ ├── README.md
│ │ ├── architect.md
│ │ ├── architect.png
│ │ ├── engineer.md
│ │ ├── engineer.png
│ │ ├── image.png
│ │ ├── leader.md
│ │ └── leader.png
├── hashicorp
│ ├── README.md
│ ├── consul
│ │ ├── README.md
│ │ ├── consul.labs
│ │ │ ├── 1.deploy_consul
│ │ │ │ └── interactive.md
│ │ │ └── README.md
│ │ └── image.png
│ ├── terraform
│ │ ├── README.md
│ │ └── image.png
│ └── vault
│ │ ├── README.md
│ │ ├── associate.md
│ │ ├── associate.png
│ │ ├── image.png
│ │ ├── ops_pro.md
│ │ └── ops_pro.png
├── k8s
│ ├── README.md
│ ├── capa_argocd
│ │ ├── README.md
│ │ └── gitops_argocd.md
│ ├── cca_cilium
│ │ └── README.md
│ ├── cgoa_gitops
│ │ └── README.md
│ ├── cka
│ │ └── README.md
│ ├── ckad
│ │ └── README.md
│ ├── cks
│ │ └── README.md
│ ├── ica_istio
│ │ └── README.md
│ ├── kcna
│ │ └── README.md
│ └── pca_prometheus
│ │ ├── PCA_Curriculum.pdf
│ │ └── README.md
└── o11y
│ ├── README.md
│ └── elastic
│ └── README.md
├── docker-ansible
├── Brewfile
├── README.md
├── docker-wordpress-shell.yml
├── docker-wordpress.sh
├── docker-wordpress.yml
├── helper-mac-install.sh
└── requirements.txt
├── docker-docker_compose
├── Brewfile
├── README.md
├── compose_static
│ └── docker-compose.yml
├── compose_w_envars
│ ├── .env
│ └── docker-compose.yml
└── helper-mac-install.sh
├── kubernetes
├── .gitignore
├── aks
│ ├── README.md
│ ├── series_0_provisioning
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── azure_cli
│ │ │ ├── 0_basic
│ │ │ │ ├── README.md
│ │ │ │ └── examples
│ │ │ │ │ └── hello_k8s.yaml
│ │ │ ├── 1_dns
│ │ │ │ ├── README.md
│ │ │ │ └── attach_dns.sh
│ │ │ ├── 2_acr
│ │ │ │ ├── README.md
│ │ │ │ ├── attach_acr.sh
│ │ │ │ └── create_acr.sh
│ │ │ ├── 3_calico
│ │ │ │ └── README.md
│ │ │ ├── 4_azure_cni
│ │ │ │ ├── README.md
│ │ │ │ └── scripts
│ │ │ │ │ ├── enable_pod_subnet.sh
│ │ │ │ │ ├── get_ip_addresses.sh
│ │ │ │ │ └── get_role_assignments.sh
│ │ │ ├── 5_pod_id
│ │ │ │ ├── README.md
│ │ │ │ ├── examples
│ │ │ │ │ ├── cert-manager
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── create_pod_identities.sh
│ │ │ │ │ │ ├── helmfile.yaml
│ │ │ │ │ │ └── issuers.yaml
│ │ │ │ │ └── externaldns
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── aad_binding.yaml
│ │ │ │ │ │ ├── create_pod_identity.sh
│ │ │ │ │ │ └── helmfile.yaml
│ │ │ │ └── scripts
│ │ │ │ │ ├── create_dns_sp.sh
│ │ │ │ │ ├── enable_pod_identity.sh
│ │ │ │ │ └── install_pod_identity.sh
│ │ │ ├── demos
│ │ │ │ ├── README.md
│ │ │ │ ├── cert-manager
│ │ │ │ │ ├── README.md
│ │ │ │ │ └── hello-kubernetes
│ │ │ │ │ │ └── helmfile.yaml
│ │ │ │ └── external-dns
│ │ │ │ │ ├── README.md
│ │ │ │ │ └── hello-kubernetes
│ │ │ │ │ └── helmfile.yaml
│ │ │ └── scripts
│ │ │ │ ├── create_cluster.sh
│ │ │ │ ├── create_dns_zone.sh
│ │ │ │ └── delete_cluster.sh
│ │ └── terraform
│ │ │ ├── .gitignore
│ │ │ ├── 0_basic
│ │ │ ├── README.md
│ │ │ ├── main.tf
│ │ │ └── provider.tf
│ │ │ ├── 1_dns
│ │ │ ├── README.md
│ │ │ ├── attach_dns.tf
│ │ │ ├── external_dns.tf
│ │ │ ├── main.tf
│ │ │ ├── provider.tf
│ │ │ └── templates
│ │ │ │ └── external_dns_values.yaml.tmpl
│ │ │ ├── demos
│ │ │ └── hello-kubernetes
│ │ │ │ ├── README.md
│ │ │ │ ├── main.tf
│ │ │ │ └── provider.tf
│ │ │ └── modules
│ │ │ ├── aks
│ │ │ ├── main.tf
│ │ │ ├── outputs.tf
│ │ │ └── variables.tf
│ │ │ ├── dns
│ │ │ ├── main.tf
│ │ │ └── variables.tf
│ │ │ └── group
│ │ │ └── main.tf
│ ├── series_1_endpoint
│ │ ├── README.md
│ │ ├── part_1_externaldns
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── chart-values.yaml.shtmpl
│ │ │ ├── create_file_structure.sh
│ │ │ ├── example.env.sh
│ │ │ ├── examples
│ │ │ │ ├── dgraph
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── chart_values.yaml.shtmpl
│ │ │ │ │ ├── getting_started_data.sh
│ │ │ │ │ └── helmfile.yaml
│ │ │ │ └── hello
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── hello-k8s.yaml.shtmpl
│ │ │ │ │ └── helmfile.yaml
│ │ │ ├── helmfile.yaml
│ │ │ └── terraform
│ │ │ │ └── simple_azure_dns.tf
│ │ ├── part_2_ingress_nginx
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── examples
│ │ │ │ ├── dgraph
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── data
│ │ │ │ │ │ ├── getting_started_data.sh
│ │ │ │ │ │ ├── sw.rdf
│ │ │ │ │ │ └── sw.schema
│ │ │ │ │ └── helmfile.yaml
│ │ │ │ └── hello
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── hello-k8s.yaml.shtmpl
│ │ │ │ │ └── helmfile.yaml
│ │ │ ├── helmfile.yaml
│ │ │ └── scripts
│ │ │ │ ├── config_azure_dns_access.sh
│ │ │ │ ├── create_azure_resources.sh
│ │ │ │ └── create_project_file_structure.sh
│ │ ├── part_3_cert_manager
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── examples
│ │ │ │ ├── dgraph
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── data
│ │ │ │ │ │ ├── getting_started.sh
│ │ │ │ │ │ ├── sw.rdf
│ │ │ │ │ │ └── sw.schema
│ │ │ │ │ └── helmfile.yaml
│ │ │ │ └── hello
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── hello-k8s.yaml.shtmpl
│ │ │ │ │ └── helmfile.yaml
│ │ │ ├── helmfile.yaml
│ │ │ ├── issuer.yaml
│ │ │ └── scripts
│ │ │ │ └── create_project_file_structure.sh
│ │ └── part_4_ingress_nginx_grpc
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── examples
│ │ │ ├── dgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── data
│ │ │ │ │ ├── .python-version
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── getting_started_data.py
│ │ │ │ │ ├── requirements.txt
│ │ │ │ │ ├── sw.nquads.rdf
│ │ │ │ │ └── sw.schema
│ │ │ │ └── helmfile.yaml
│ │ │ └── yages
│ │ │ │ ├── README.md
│ │ │ │ └── helmfile.yaml
│ │ │ ├── helmfile.yaml
│ │ │ ├── issuer.yaml
│ │ │ └── scripts
│ │ │ └── create_project_file_structure.sh
│ └── series_2_network_mgmnt
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── part_0_docker
│ │ ├── README.md
│ │ ├── aks_ssh
│ │ │ ├── Dockerfile
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── helmfile.yaml
│ │ │ └── pod.yaml.envsubst
│ │ ├── linkerd
│ │ │ ├── README.md
│ │ │ ├── current.txt
│ │ │ ├── republish_extension_images.sh
│ │ │ └── republish_linkerd_images.sh
│ │ └── pydgraph
│ │ │ ├── Dockerfile
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── load_data.py
│ │ │ ├── requirements.txt
│ │ │ ├── sw.nquads.rdf
│ │ │ └── sw.schema
│ │ ├── part_1_acr
│ │ ├── README.md
│ │ ├── examples
│ │ │ ├── dgraph
│ │ │ │ ├── README.md
│ │ │ │ └── helmfile.yaml
│ │ │ └── pydgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── deploy.yaml.envsubst
│ │ │ │ └── helmfile.yaml
│ │ └── scripts
│ │ │ ├── create_acr.sh
│ │ │ ├── create_aks_with_acr.sh
│ │ │ ├── create_project_file_structure.sh
│ │ │ └── delete_aks.sh
│ │ ├── part_2_calico
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── examples
│ │ │ ├── dgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── deploy_dgraph.sh
│ │ │ │ ├── helmfile.yaml
│ │ │ │ └── network_policy.yaml
│ │ │ └── pydgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── deploy_pydgraph_client.sh
│ │ │ │ └── helmfile.yaml
│ │ └── scripts
│ │ │ ├── create_acr.sh
│ │ │ ├── create_aks_with_acr.sh
│ │ │ ├── create_project_file_structure.sh
│ │ │ ├── delete_aks.sh
│ │ │ └── print_ip_addr.sh
│ │ ├── part_3_linkerd
│ │ ├── README.md
│ │ ├── certs
│ │ │ └── README.md
│ │ ├── examples
│ │ │ ├── dgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── deploy_dgraph.sh
│ │ │ │ ├── helmfile.yaml
│ │ │ │ └── network_policy.yaml
│ │ │ └── pydgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── deploy_pydgraph_client.sh
│ │ │ │ └── helmfile.yaml
│ │ └── scripts
│ │ │ ├── create_aks_with_acr.sh
│ │ │ ├── create_certs.sh
│ │ │ ├── create_project_file_structure.sh
│ │ │ ├── deploy_ext_jaeger.sh
│ │ │ ├── deploy_ext_viz.sh
│ │ │ ├── deploy_linkerd.sh
│ │ │ └── print_ip_addr.sh
│ │ └── part_4_istio
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── addons
│ │ └── README.md
│ │ ├── examples
│ │ ├── bookinfo
│ │ │ ├── README.md
│ │ │ ├── bookinfo-gateway.yaml
│ │ │ ├── bookinfo.yaml
│ │ │ └── deploy_dgraph.sh
│ │ ├── dgraph
│ │ │ ├── README.md
│ │ │ ├── deploy_dgraph.sh
│ │ │ ├── helmfile.yaml
│ │ │ ├── network_policy.yaml
│ │ │ └── network_policy_helmfile.yaml
│ │ └── pydgraph
│ │ │ ├── README.md
│ │ │ ├── deploy_pydgraph_allow.sh
│ │ │ ├── deploy_pydgraph_deny.sh
│ │ │ └── helmfile.yaml
│ │ └── scripts
│ │ ├── create_aks_with_acr.sh
│ │ ├── create_project_file_structure.sh
│ │ ├── fetch_addons.sh
│ │ ├── get_support_versions.sh
│ │ ├── get_supported_regions.sh
│ │ └── print_ip_addr.sh
├── eks
│ ├── .gitignore
│ ├── README.md
│ ├── baseline
│ │ ├── eksctl
│ │ │ ├── README.md
│ │ │ ├── addons
│ │ │ │ ├── aws_ebs_csi_driver
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── create_esci_irsa.sh
│ │ │ │ │ ├── create_storage_class.sh
│ │ │ │ │ ├── delete.sh
│ │ │ │ │ ├── install_esci_eksaddon.sh
│ │ │ │ │ ├── install_esci_helm.sh
│ │ │ │ │ └── set_default_storage_class.sh
│ │ │ │ └── aws_load_balancer_controller
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── create_albc_irsa.sh
│ │ │ │ │ ├── create_albc_policy.sh
│ │ │ │ │ ├── create_service_account.sh
│ │ │ │ │ ├── delete.sh
│ │ │ │ │ ├── get_albc_policy.sh
│ │ │ │ │ ├── iam_policy.json
│ │ │ │ │ └── install_albc_helm.sh
│ │ │ ├── examples
│ │ │ │ └── dgraph
│ │ │ │ │ ├── README.md
│ │ │ │ │ └── scripts
│ │ │ │ │ ├── delete_dgraph.sh
│ │ │ │ │ ├── deploy_app_dgraph.sh
│ │ │ │ │ ├── deploy_app_ratel.sh
│ │ │ │ │ ├── deploy_netpol_dgraph.sh
│ │ │ │ │ └── deploy_netpol_ratel.sh
│ │ │ └── tests
│ │ │ │ ├── README.md
│ │ │ │ ├── delete_ing.sh
│ │ │ │ ├── delete_lb.sh
│ │ │ │ ├── delete_netpol.sh
│ │ │ │ ├── delete_pv.sh
│ │ │ │ ├── test_ing.sh
│ │ │ │ ├── test_lb.sh
│ │ │ │ ├── test_netpol.sh
│ │ │ │ └── test_pv.sh
│ │ └── terraform
│ │ │ └── README.md
│ └── example.env.sh
├── eks_1_provision_eksctl
│ ├── README.md
│ ├── part0_intro
│ │ ├── README.md
│ │ ├── create_eks_cluster.sh
│ │ ├── create_gke_cluster.sh
│ │ └── install
│ │ │ ├── Brewfile
│ │ │ ├── README.md
│ │ │ ├── choco.xml
│ │ │ ├── install_eksctl_linux.sh
│ │ │ ├── install_kubectl_debian.sh
│ │ │ ├── install_kubectl_rhel.sh
│ │ │ ├── kubernetes.list
│ │ │ └── kubernetes.repo
│ ├── part1_cli
│ │ ├── README.md
│ │ ├── create_cluster.sh
│ │ └── delete_cluster.sh
│ ├── part2_crd
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── create_cluster.sh
│ │ ├── delete_cluster.sh
│ │ └── template_cluster.yaml
│ └── part3_app
│ │ ├── README.md
│ │ ├── hello-k8s-deploy.yaml
│ │ └── hello-k8s-svc.yaml
├── eks_3_ingress_nginx
│ ├── README.md
│ ├── part0_provision
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── cluster.yaml
│ │ ├── create_cluster.sh
│ │ ├── delete_cluster.sh
│ │ └── template_cluster.yaml
│ ├── part1_addons
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── add-external-dns.sh
│ │ ├── add-ingress-nginx.sh
│ │ ├── template.external-dns.yaml
│ │ └── template.nginx-ingress.yaml
│ └── part2_app
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── hello-k8s-deploy.yaml
│ │ ├── hello-k8s-svc-clusterip.yaml
│ │ └── template-ingress.yaml
├── eks_b1_provision_vpc_terraform
│ ├── README.md
│ ├── main.tf
│ ├── provider.tf
│ ├── terraform.tfvars
│ └── vpc
│ │ ├── locals.tf
│ │ ├── main.tf
│ │ ├── variables.tf
│ │ └── versions.tf
├── eks_b2_provision_eks_eksctl
│ ├── README.md
│ ├── part1_static_config
│ │ ├── README.md
│ │ └── cluster_config.yaml
│ ├── part2_template_config
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── eksctl_config
│ │ │ ├── cluster_config.yaml.tmpl
│ │ │ ├── data.tf
│ │ │ ├── locals.tf
│ │ │ ├── main.tf
│ │ │ └── variables.tf
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ ├── terraform.tfvars
│ │ └── vpc
│ │ │ ├── locals.tf
│ │ │ ├── main.tf
│ │ │ ├── output.tf
│ │ │ ├── variables.tf
│ │ │ └── versions.tf
│ └── part3_template_config_2
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── eksctl_config
│ │ ├── cluster_config.yaml.tmpl
│ │ ├── data.tf
│ │ ├── locals.tf
│ │ ├── main.tf
│ │ └── variables.tf
│ │ ├── main.tf
│ │ ├── provider.tf
│ │ ├── terraform.tfvars
│ │ └── vpc
│ │ ├── locals.tf
│ │ ├── main.tf
│ │ ├── output.tf
│ │ ├── variables.tf
│ │ └── versions.tf
├── gke
│ ├── baseline
│ │ ├── gcloud_sdk
│ │ │ ├── README.md
│ │ │ ├── example_env.sh
│ │ │ ├── examples
│ │ │ │ └── dgraph
│ │ │ │ │ ├── README.md
│ │ │ │ │ └── scripts
│ │ │ │ │ ├── delete_dgraph.sh
│ │ │ │ │ ├── deploy_app_dgraph.sh
│ │ │ │ │ ├── deploy_app_ratel.sh
│ │ │ │ │ ├── deploy_netpol_dgraph.sh
│ │ │ │ │ └── deploy_netpol_ratel.sh
│ │ │ ├── scripts
│ │ │ │ └── create_projects.sh
│ │ │ └── tests
│ │ │ │ ├── README.md
│ │ │ │ ├── delete_ing.sh
│ │ │ │ ├── delete_lb.sh
│ │ │ │ ├── delete_netpol.sh
│ │ │ │ ├── delete_pv.sh
│ │ │ │ ├── test_ing.sh
│ │ │ │ ├── test_lb.sh
│ │ │ │ ├── test_netpol.sh
│ │ │ │ └── test_pv.sh
│ │ └── terraform
│ │ │ └── README.md
│ ├── ingress
│ │ ├── ambassador
│ │ │ ├── README.md
│ │ │ └── example
│ │ │ │ ├── apache
│ │ │ │ ├── README.md
│ │ │ │ ├── test_classic_ingress.sh
│ │ │ │ └── test_emissary_ingress.sh
│ │ │ │ └── dgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── dgraph_classic_ingress.sh
│ │ │ │ ├── dgraph_clean.sh
│ │ │ │ ├── dgraph_emissary_ingress.sh
│ │ │ │ └── dgraph_install.sh
│ │ ├── ingress-gce
│ │ │ ├── README.md
│ │ │ ├── dgraph
│ │ │ │ ├── dgraph_allow_list.sh
│ │ │ │ └── helmfile.yaml
│ │ │ ├── kube-addons
│ │ │ │ ├── helmfile.yaml
│ │ │ │ └── issuers.yaml
│ │ │ ├── ratel
│ │ │ │ └── helmfile.yaml
│ │ │ └── scripts
│ │ │ │ ├── clean.sh
│ │ │ │ ├── dns.sh
│ │ │ │ ├── example.env.sh
│ │ │ │ ├── gke.sh
│ │ │ │ ├── projects.sh
│ │ │ │ ├── setup.sh
│ │ │ │ └── validator.sh
│ │ ├── ingress-nginx-grpc
│ │ │ ├── README.md
│ │ │ ├── clients
│ │ │ │ ├── .gitignore
│ │ │ │ └── README.md
│ │ │ ├── dgraph
│ │ │ │ ├── dgraph_allow_list.sh
│ │ │ │ └── helmfile.yaml
│ │ │ ├── kube-addons
│ │ │ │ ├── helmfile.yaml
│ │ │ │ └── issuers.yaml
│ │ │ ├── ratel
│ │ │ │ └── helmfile.yaml
│ │ │ └── scripts
│ │ │ │ ├── clean.sh
│ │ │ │ ├── dns.sh
│ │ │ │ ├── example.env.sh
│ │ │ │ ├── gke.sh
│ │ │ │ ├── projects.sh
│ │ │ │ ├── setup.sh
│ │ │ │ └── validator.sh
│ │ └── nginx-kubernetes-ingress
│ │ │ ├── clients
│ │ │ ├── .gitignore
│ │ │ └── README.md
│ │ │ ├── dgraph
│ │ │ ├── dgraph_allow_list.sh
│ │ │ └── helmfile.yaml
│ │ │ ├── kube-addons
│ │ │ ├── helmfile.yaml
│ │ │ └── issuers.yaml
│ │ │ ├── ratel
│ │ │ └── helmfile.yaml
│ │ │ └── scripts
│ │ │ ├── clean.sh
│ │ │ ├── dns.sh
│ │ │ ├── example.env.sh
│ │ │ ├── gke.sh
│ │ │ ├── projects.sh
│ │ │ ├── setup.sh
│ │ │ └── validator.sh
│ └── service-mesh
│ │ ├── consul-connect
│ │ ├── README.md
│ │ ├── consul
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── fetch_cert.sh
│ │ │ ├── fetch_consul_debian.sh
│ │ │ ├── fetch_k8s_cli.sh
│ │ │ └── helmfile.yaml
│ │ ├── deploy_all.sh
│ │ ├── examples
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── dgraph
│ │ │ │ ├── README.md
│ │ │ │ ├── dgraph_allow_list.sh
│ │ │ │ ├── experimental
│ │ │ │ │ ├── intention.yaml
│ │ │ │ │ ├── sd.dgraph.yaml
│ │ │ │ │ └── sd.pydgraph_client.yaml
│ │ │ │ ├── helmfile.yaml
│ │ │ │ └── pydgraph_client.yaml
│ │ │ └── static_server
│ │ │ │ ├── README.md
│ │ │ │ ├── multport
│ │ │ │ ├── README.md
│ │ │ │ ├── client.yaml
│ │ │ │ └── server.yaml
│ │ │ │ ├── secure_http
│ │ │ │ ├── README.md
│ │ │ │ ├── client-to-server-intention.yaml
│ │ │ │ ├── client.yaml
│ │ │ │ ├── dc1.yaml
│ │ │ │ ├── secure-dc1.yaml
│ │ │ │ └── server.yaml
│ │ │ │ └── secure_multiport
│ │ │ │ ├── README.md
│ │ │ │ ├── client-to-server-intention.yaml
│ │ │ │ ├── client.yaml
│ │ │ │ ├── secure-dc1.yaml
│ │ │ │ └── server.yaml
│ │ ├── o11y
│ │ │ ├── README.md
│ │ │ └── helmfile.yaml
│ │ └── scripts
│ │ │ ├── clean_gcp.sh
│ │ │ ├── clean_k8s.sh
│ │ │ ├── example.env.sh
│ │ │ ├── filesetup.sh
│ │ │ └── gke.sh
│ │ └── nginx-service-mesh
│ │ ├── README.md
│ │ ├── clients
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── alpha_server.txt
│ │ ├── fetch_scripts.sh
│ │ ├── setup_pydgraph_gcp.sh
│ │ └── traffic.yaml
│ │ ├── dgraph
│ │ ├── README.md
│ │ ├── dgraph_allow_list.sh
│ │ ├── helmfile.yaml
│ │ └── vs.yaml
│ │ ├── kube_addons
│ │ ├── cert_manager
│ │ │ ├── helmfile.yaml
│ │ │ └── issuers.yaml
│ │ ├── external_dns
│ │ │ └── helmfile.yaml
│ │ └── nginx_ic
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── docker_keys.sh
│ │ │ └── helmfile.yaml
│ │ ├── nsm
│ │ ├── README.md
│ │ ├── deny_access_control_mode.json
│ │ ├── helmfile.yaml
│ │ └── install_cli.sh
│ │ ├── o11y
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── fetch_manifests.sh
│ │ └── helmfile.yaml
│ │ ├── ratel
│ │ ├── README.md
│ │ ├── helmfile.yaml
│ │ └── vs.yaml
│ │ └── scripts
│ │ ├── clean_all.sh
│ │ ├── deploy_all.sh
│ │ ├── part_1
│ │ ├── clean_gcp.sh
│ │ ├── clean_k8s.sh
│ │ ├── deploy_all.sh
│ │ ├── example.env.sh
│ │ ├── gcr.sh
│ │ ├── gke.sh
│ │ ├── projects.sh
│ │ └── setup.sh
│ │ ├── part_2
│ │ ├── clean_gcp.sh
│ │ ├── clean_k8s.sh
│ │ ├── deploy_all.sh
│ │ ├── example.env.sh
│ │ ├── projects.sh
│ │ ├── setup.sh
│ │ └── wi.sh
│ │ └── part_3
│ │ └── test_access.sh
├── gke_1_provision_cloudsdk
│ ├── README.md
│ ├── create_basic_cluster.sh
│ ├── create_gke_cluster.sh
│ ├── hello-k8s-deploy.yaml
│ └── hello-k8s-svc.yaml
├── gke_2_provision_terraform
│ ├── .gitignore
│ ├── README.md
│ ├── hello-k8s-deploy.yaml
│ ├── hello-k8s-svc.yaml
│ ├── main.tf
│ └── provider.tf
├── gke_3_service_ingress
│ ├── README.md
│ ├── part2_services
│ │ ├── README.md
│ │ ├── hello_gke_extlb_deploy.yaml
│ │ └── hello_gke_extlb_svc.yaml
│ └── part3_ingress
│ │ ├── README.md
│ │ ├── hello_gke_ing_deploy.yaml
│ │ ├── hello_gke_ing_ing.yaml
│ │ └── hello_gke_ing_svc.yaml
├── gke_4_externaldns
│ ├── README.md
│ ├── part1_clouddns
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── check_clouddns.sh
│ │ ├── install_external_dns.sh
│ │ └── template_values.yaml
│ ├── part2_service
│ │ ├── .gitignore
│ │ ├── README.md
│ │ └── template_service.yaml
│ └── part3_ingress
│ │ ├── .gitignore
│ │ ├── README.md
│ │ └── template_ingress.yaml
├── gke_5_googlessl
│ ├── README.md
│ ├── part1_ephemeral_ip
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── hello_deploy.yaml
│ │ ├── hello_service.yaml
│ │ ├── template_ingress.yaml
│ │ └── template_managed_cert.yaml
│ └── part2_reserved_ip
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── hello_deploy.yaml
│ │ ├── hello_service.yaml
│ │ ├── template_ingress.yaml
│ │ └── template_managed_cert.yaml
└── helmfile_1
│ ├── README.md
│ ├── env.sh
│ ├── helmfile.yaml
│ └── values
│ ├── dgraph.yaml.gotmpl
│ └── minio.yaml.gotmpl
├── vagrant
├── ansible_local
│ ├── README.md
│ ├── create_workarea.sh
│ ├── part1_ubuntu
│ │ ├── Vagrantfile
│ │ └── provision
│ │ │ ├── playbook.yml
│ │ │ └── roles
│ │ │ └── hello_web
│ │ │ ├── defaults
│ │ │ └── main.yml
│ │ │ ├── files
│ │ │ └── index.html
│ │ │ └── tasks
│ │ │ └── main.yml
│ └── part2_centos
│ │ ├── Vagrantfile
│ │ └── provision
│ │ ├── playbook.yml
│ │ └── roles
│ │ └── hello_web
│ │ ├── defaults
│ │ └── main.yml
│ │ ├── files
│ │ └── index.html
│ │ └── tasks
│ │ └── main.yml
├── chef_zero
│ ├── README.md
│ ├── create_workarea.sh
│ ├── part1_ubuntu
│ │ ├── Vagrantfile
│ │ ├── cookbooks
│ │ │ └── hello_web
│ │ │ │ ├── attributes
│ │ │ │ └── default.rb
│ │ │ │ ├── files
│ │ │ │ └── default
│ │ │ │ │ └── index.html
│ │ │ │ ├── metadata.rb
│ │ │ │ └── recipes
│ │ │ │ └── default.rb
│ │ └── nodes
│ │ │ └── .gitkeep
│ └── part2_centos
│ │ ├── Vagrantfile
│ │ ├── cookbooks
│ │ └── hello_web
│ │ │ ├── attributes
│ │ │ └── default.rb
│ │ │ ├── files
│ │ │ └── default
│ │ │ │ └── index.html
│ │ │ ├── metadata.rb
│ │ │ └── recipes
│ │ │ └── default.rb
│ │ └── nodes
│ │ └── .gitkeep
├── docker
│ ├── README.md
│ ├── create_workarea.sh
│ ├── part1_build
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── Vagrantfile
│ │ └── public-html
│ │ │ └── index.html
│ └── part2_image
│ │ ├── README.md
│ │ ├── Vagrantfile
│ │ └── public-html
│ │ └── index.html
├── puppet
│ ├── README.md
│ ├── hello_web
│ │ ├── .gitignore
│ │ ├── README.md
│ │ ├── bootstrap.sh
│ │ ├── rocky9
│ │ │ ├── Vagrantfile
│ │ │ └── manifests
│ │ │ │ └── default.pp
│ │ ├── site
│ │ │ └── hello_web
│ │ │ │ ├── files
│ │ │ │ └── index.html
│ │ │ │ ├── manifests
│ │ │ │ └── init.pp
│ │ │ │ └── metadata.json
│ │ └── ubuntu2204
│ │ │ ├── Vagrantfile
│ │ │ └── manifests
│ │ │ └── default.pp
│ └── legacy
│ │ ├── create_workarea.sh
│ │ ├── part1_ubuntu
│ │ ├── README.md
│ │ ├── Vagrantfile
│ │ ├── bootstrap.sh
│ │ ├── manifests
│ │ │ └── default.pp
│ │ └── site
│ │ │ └── hello_web
│ │ │ ├── files
│ │ │ └── index.html
│ │ │ └── manifests
│ │ │ └── init.pp
│ │ └── part2_centos
│ │ ├── README.md
│ │ ├── Vagrantfile
│ │ ├── bootstrap.sh
│ │ ├── manifests
│ │ └── default.pp
│ │ └── site
│ │ └── hello_web
│ │ ├── files
│ │ └── index.html
│ │ └── manifests
│ │ └── init.pp
├── puppet_server
│ ├── README.md
│ └── hello_web_proj
│ │ ├── README.md
│ │ ├── Vagrantfile
│ │ ├── boostrap.sh
│ │ ├── setup.sh
│ │ └── site
│ │ ├── manifests
│ │ └── site.pp
│ │ └── modules
│ │ └── hello_web
│ │ ├── files
│ │ └── index.html
│ │ ├── manifests
│ │ └── init.pp
│ │ └── metadata.json
├── salt
│ ├── README.md
│ ├── create_workarea.sh
│ ├── part1_ubuntu
│ │ ├── Vagrantfile
│ │ └── roots
│ │ │ ├── pillar
│ │ │ ├── hello_web.sls
│ │ │ └── top.sls
│ │ │ └── salt
│ │ │ ├── hello_web
│ │ │ ├── defaults.yaml
│ │ │ ├── files
│ │ │ │ └── index.html
│ │ │ ├── init.sls
│ │ │ └── map.jinja
│ │ │ └── top.sls
│ └── part2_centos
│ │ ├── Vagrantfile
│ │ └── roots
│ │ ├── pillar
│ │ ├── hello_web.sls
│ │ └── top.sls
│ │ └── salt
│ │ ├── hello_web
│ │ ├── defaults.yaml
│ │ ├── files
│ │ │ └── index.html
│ │ ├── init.sls
│ │ └── map.jinja
│ │ └── top.sls
└── shell
│ ├── README.md
│ ├── create_workarea.sh
│ ├── part1_ubuntu
│ ├── README.md
│ ├── Vagrantfile
│ └── scripts
│ │ └── hello_web.sh
│ ├── part2a_centos
│ ├── README.md
│ ├── Vagrantfile
│ └── scripts
│ │ └── hello_web.sh
│ └── part2b_gentoo
│ ├── README.md
│ ├── Vagrantfile
│ └── scripts
│ └── hello_web.sh
├── vault-docker
├── README.md
└── approle
│ ├── README.md
│ └── dgraph
│ ├── .env
│ ├── .gitignore
│ ├── Brewfile
│ ├── README.md
│ ├── chooc.config
│ ├── compose.yml
│ ├── dgraph
│ ├── alpha.yaml
│ ├── backup.graphql
│ └── export.graphql
│ ├── scripts
│ ├── dgraph
│ │ ├── backup.sh
│ │ ├── export.sh
│ │ ├── getting_started
│ │ │ ├── 1.data_json.sh
│ │ │ ├── 1.data_rdf.sh
│ │ │ ├── 2.schema.sh
│ │ │ ├── 3.query_starring_edge.sh
│ │ │ ├── 4.query_movies_after_1980.sh
│ │ │ └── README.md
│ │ └── login.sh
│ ├── randpasswd.ps1
│ ├── randpasswd.sh
│ ├── unseal.sh
│ ├── vault_api
│ │ ├── 1.unseal.sh
│ │ ├── 2.configure.sh
│ │ ├── 3.policies.sh
│ │ ├── 4.roles.sh
│ │ ├── 5.secrets_dgraph_create.sh
│ │ ├── 6.secrets_dgraph_read.sh
│ │ └── README.md
│ └── vault_cli
│ │ ├── 1.unseal.sh
│ │ ├── 2.configure.sh
│ │ ├── 3.policies.sh
│ │ ├── 4.roles.sh
│ │ ├── 5.secrets_dgraph_create.sh
│ │ ├── 6.secrets_dgraph_read.sh
│ │ └── README.md
│ └── vault
│ ├── config.hcl
│ ├── policy_admin.hcl
│ └── policy_dgraph.hcl
└── vbox
├── macos
├── 00.all_install.sh
├── 01.homebrew_install.sh
├── 02.vbox_install.sh
├── 03.vagrant_install.sh
├── 04.vagrant_demo_macos.sh
├── 05.vagrant_demo_windows_01.sh
├── 05.vagrant_demo_windows_02.cmd
├── 05.vagrant_demo_windows_03.sh
├── 06.kitchen_chefdk_install.sh
├── 07.kitchen_chef_generate.sh
├── 08.kitchen_screeenfetch.sh
├── 09.kitchen_demo.sh
├── 10.docker_toolbox_install.sh
├── 12.docker_machine_demo.sh
├── 13.minikube_install.sh
├── 14.kubectl_client_install.sh
├── 15.minikube_demo.sh
├── 16.print_info.sh
├── 17.cleanup.sh
├── Brewfile
└── README.md
├── vbox_fedora
├── 01.vbox_install.sh
├── 02.vbox_post_install.sh
├── 03.vagrant_install.sh
├── 04.vagrant_demo_gentoo.sh
├── 05.vagrant_demo_arch.sh
├── 06.kitchen_chefdk_install.sh
├── 07.kitchen_chef_generate.sh
├── 08.kitchen_screeenfetch.sh
├── 09.kitchen_demo.sh
├── 10.docker_machine_install.sh
├── 11.docker_client_install.sh
├── 12.docker_machine_demo.sh
├── 13.minikube_install.sh
├── 14.kubectl_client_install.sh
├── 15.minikube_demo.sh
├── 16.print_info.sh
├── 17.cleanup.sh
└── README.md
└── windows
├── 00.all_install.ps1
├── 01.chocolatey_install.ps1
├── 02.vbox_install.ps1
├── 03.vagrant_install.ps1
├── 04.vagrant_demo_manjaro.ps1
├── 05.vagrant_demo_win2016_01.ps1
├── 06.vagrant_demo_win2016_02.ps1
├── 07.vagrant_demo_win2016_03.sh
├── 08.kitchen_chefdk_install.ps1
├── 09.kitchen_chef_generate.ps1
├── 10.kitchen_screeenfetch.ps1
├── 11.kitchen_demo.ps1
├── 12.docker_toolbox_install.ps1
├── 13.docker_machine_demo.ps1
├── 14.minikube_install.ps1
├── 15.minikube_demo.ps1
├── 16.print_info.ps1
├── 17.cleanup.ps1
├── README.md
└── choco.config
/.gitignore:
--------------------------------------------------------------------------------
1 | .kitchen
2 | .vagrant
3 | .DS_Store
4 |
--------------------------------------------------------------------------------
/azure/README.md:
--------------------------------------------------------------------------------
1 | # The Azure Series
2 |
3 | Code for articles releated to Azure
4 |
--------------------------------------------------------------------------------
/azure/azure_dns/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform*
2 | terraform.tfstate*
3 | azure_vm.pem
4 | env.sh
5 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_dns_domain/main.tf:
--------------------------------------------------------------------------------
1 | ### Input Variables
2 | variable "resource_group_name" {}
3 | variable "domain" {}
4 | variable "subdomain_prefix" { default = "" }
5 |
6 | ### Local Variables
7 | locals {
8 | domain_name = var.subdomain_prefix == "" ? "${var.domain}" : "${var.subdomain_prefix}.${var.domain}"
9 | }
10 |
11 | ### Resources
12 | resource "azurerm_dns_zone" "default" {
13 | name = local.domain_name
14 | resource_group_name = var.resource_group_name
15 | }
16 |
17 | ### Output Variables
18 | output "dns_zone_name" {
19 | value = azurerm_dns_zone.default.name
20 | }
21 |
22 | output "name_servers" {
23 | value = azurerm_dns_zone.default.name_servers
24 | }
25 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_dns_domain/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azurerm = {
4 | source = "hashicorp/azurerm"
5 | version = "~>2.0"
6 | }
7 | }
8 | }
9 |
10 | provider "azurerm" {
11 | features {}
12 | }
13 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_dns_record/main.tf:
--------------------------------------------------------------------------------
1 | ### Input Variables
2 | variable "resource_group_name" {}
3 | variable "dns_zone_name" {}
4 | variable "name" {}
5 | variable "ip_address" {}
6 |
7 | ### Resources
8 | resource "azurerm_dns_a_record" "default" {
9 | name = var.name
10 | zone_name = var.dns_zone_name
11 | resource_group_name = var.resource_group_name
12 | ttl = 300
13 | records = [var.ip_address]
14 | }
15 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_dns_record/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azurerm = {
4 | source = "hashicorp/azurerm"
5 | version = "~>2.0"
6 | }
7 | }
8 | }
9 |
10 | provider "azurerm" {
11 | features {}
12 | }
13 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_net/main.tf:
--------------------------------------------------------------------------------
1 | resource "azurerm_virtual_network" "default" {
2 | name = "appVnet"
3 | address_space = ["10.0.0.0/16"]
4 | location = var.location
5 | resource_group_name = var.resource_group_name
6 |
7 | tags = {
8 | environment = "dev"
9 | }
10 | }
11 |
12 | resource "azurerm_subnet" "default" {
13 | name = "appSubnet"
14 | resource_group_name = var.resource_group_name
15 | virtual_network_name = azurerm_virtual_network.default.name
16 | address_prefixes = ["10.0.2.0/24"]
17 | }
18 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_net/outputs.tf:
--------------------------------------------------------------------------------
1 | output "subnet_id" {
2 | value = azurerm_subnet.default.id
3 | }
4 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_net/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azurerm = {
4 | source = "hashicorp/azurerm"
5 | version = "~>2.0"
6 | }
7 | }
8 | }
9 |
10 | provider "azurerm" {
11 | features {}
12 | }
13 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_net/variables.tf:
--------------------------------------------------------------------------------
1 | variable "resource_group_name" {}
2 | variable "location" {}
3 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_vm/outputs.tf:
--------------------------------------------------------------------------------
1 | output "tls_private_key" {
2 | value = tls_private_key.ssh.private_key_pem
3 | sensitive = true
4 | }
5 |
6 | output "public_ip" {
7 | value = azurerm_public_ip.default.ip_address
8 | }
9 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_vm/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azurerm = {
4 | source = "hashicorp/azurerm"
5 | version = "~>2.0"
6 | }
7 | }
8 | }
9 |
10 | provider "azurerm" {
11 | features {}
12 | }
13 |
--------------------------------------------------------------------------------
/azure/azure_dns/azure_vm/variables.tf:
--------------------------------------------------------------------------------
1 | variable "resource_group_name" {}
2 | variable "location" {}
3 |
4 | variable "image_publisher" { default = "Canonical" }
5 | variable "image_offer" { default = "UbuntuServer" }
6 | variable "image_sku" { default = "18.04-LTS" }
7 | variable "image_version" { default = "latest" }
8 |
9 | variable "computer_name" {}
10 | variable "admin_username" {}
11 |
12 | variable "subnet_id" {}
13 |
--------------------------------------------------------------------------------
/azure/azure_dns/godaddy_dns_nameservers/main.tf:
--------------------------------------------------------------------------------
1 | ### input variables
2 | variable "domain" {}
3 | variable "name_servers" {}
4 |
5 | resource "godaddy_domain_record" "default" {
6 | domain = var.domain
7 | nameservers = var.name_servers
8 | }
9 |
--------------------------------------------------------------------------------
/azure/azure_dns/godaddy_dns_nameservers/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | godaddy = {
4 | source = "n3integration/godaddy"
5 | version = "1.8.7"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/azure/azure_dns/godaddy_dns_record/main.tf:
--------------------------------------------------------------------------------
1 | ### input variables
2 | variable "domain" {}
3 | variable "name" {}
4 | variable "ip_address" {}
5 |
6 | ### resources
7 | resource "godaddy_domain_record" "default" {
8 | domain = var.domain
9 |
10 | record {
11 | name = var.name
12 | type = "A"
13 | data = var.ip_address
14 | ttl = 3600
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/azure/azure_dns/godaddy_dns_record/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | godaddy = {
4 | source = "n3integration/godaddy"
5 | version = "1.8.7"
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/azure/azure_dns/outputs.tf:
--------------------------------------------------------------------------------
1 | output "tls_private_key" {
2 | value = module.azure_vm.tls_private_key
3 | sensitive = true
4 | }
5 |
6 | output "public_ip" {
7 | value = module.azure_vm.public_ip
8 | }
9 |
--------------------------------------------------------------------------------
/azure/azure_dns/terraform.tfvars:
--------------------------------------------------------------------------------
1 | image_publisher = "Canonical"
2 | image_offer = "0001-com-ubuntu-server-focal"
3 | image_sku = "20_04-lts"
4 | computer_name = "appvm"
5 | admin_username = "azureuser"
6 |
--------------------------------------------------------------------------------
/azure/azure_dns/variables.tf:
--------------------------------------------------------------------------------
1 | variable "resource_group_name" {}
2 | variable "location" {}
3 | variable "image_publisher" {}
4 | variable "image_offer" {}
5 | variable "image_sku" {}
6 | variable "computer_name" {}
7 | variable "admin_username" {}
8 |
9 | variable "domain" { default = "" }
10 | variable "subdomain_prefix" { default = "dev" }
11 |
--------------------------------------------------------------------------------
/azure/azure_vm/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform*
2 | terraform.tfstate*
3 | azure_vm.pem
4 |
--------------------------------------------------------------------------------
/azure/azure_vm/azure_net/main.tf:
--------------------------------------------------------------------------------
1 | resource "azurerm_virtual_network" "default" {
2 | name = "appVnet"
3 | address_space = ["10.0.0.0/16"]
4 | location = var.location
5 | resource_group_name = var.resource_group_name
6 |
7 | tags = {
8 | environment = "dev"
9 | }
10 | }
11 |
12 | resource "azurerm_subnet" "default" {
13 | name = "appSubnet"
14 | resource_group_name = var.resource_group_name
15 | virtual_network_name = azurerm_virtual_network.default.name
16 | address_prefixes = ["10.0.2.0/24"]
17 | }
18 |
--------------------------------------------------------------------------------
/azure/azure_vm/azure_net/outputs.tf:
--------------------------------------------------------------------------------
1 | output "subnet_id" {
2 | value = azurerm_subnet.default.id
3 | }
4 |
--------------------------------------------------------------------------------
/azure/azure_vm/azure_net/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azurerm = {
4 | source = "hashicorp/azurerm"
5 | version = "~>2.0"
6 | }
7 | }
8 | }
9 |
10 | provider "azurerm" {
11 | features {}
12 | }
13 |
--------------------------------------------------------------------------------
/azure/azure_vm/azure_net/variables.tf:
--------------------------------------------------------------------------------
1 | variable "resource_group_name" {}
2 | variable "location" {}
3 |
--------------------------------------------------------------------------------
/azure/azure_vm/azure_vm/outputs.tf:
--------------------------------------------------------------------------------
1 | output "tls_private_key" {
2 | value = tls_private_key.ssh.private_key_pem
3 | sensitive = true
4 | }
5 |
6 | output "public_ip" {
7 | value = azurerm_public_ip.default.ip_address
8 | }
9 |
--------------------------------------------------------------------------------
/azure/azure_vm/azure_vm/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azurerm = {
4 | source = "hashicorp/azurerm"
5 | version = "~>2.0"
6 | }
7 | }
8 | }
9 |
10 | provider "azurerm" {
11 | features {}
12 | }
13 |
--------------------------------------------------------------------------------
/azure/azure_vm/azure_vm/variables.tf:
--------------------------------------------------------------------------------
1 | variable "resource_group_name" {}
2 | variable "location" {}
3 |
4 | variable "image_publisher" { default = "Canonical" }
5 | variable "image_offer" { default = "UbuntuServer" }
6 | variable "image_sku" { default = "18.04-LTS" }
7 | variable "image_version" { default = "latest" }
8 |
9 | variable "computer_name" {}
10 | variable "admin_username" {}
11 |
12 | variable "subnet_id" {}
13 |
--------------------------------------------------------------------------------
/azure/azure_vm/main.tf:
--------------------------------------------------------------------------------
1 | module "azure_net" {
2 | source = "./azure_net"
3 | resource_group_name = var.resource_group_name
4 | location = var.location
5 | }
6 |
7 | module "azure_vm" {
8 | source = "./azure_vm"
9 | resource_group_name = var.resource_group_name
10 | location = var.location
11 | subnet_id = module.azure_net.subnet_id
12 | image_publisher = var.image_publisher
13 | image_offer = var.image_offer
14 | image_sku = var.image_sku
15 | computer_name = var.computer_name
16 | admin_username = var.admin_username
17 | }
18 |
--------------------------------------------------------------------------------
/azure/azure_vm/outputs.tf:
--------------------------------------------------------------------------------
1 | output "tls_private_key" {
2 | value = module.azure_vm.tls_private_key
3 | sensitive = true
4 | }
5 |
6 | output "public_ip" {
7 | value = module.azure_vm.public_ip
8 | }
9 |
--------------------------------------------------------------------------------
/azure/azure_vm/terraform.tfvars:
--------------------------------------------------------------------------------
1 | location = "westus"
2 | image_publisher = "Canonical"
3 | image_offer = "0001-com-ubuntu-server-focal"
4 | image_sku = "20_04-lts"
5 | computer_name = "appvm"
6 | admin_username = "azureuser"
7 |
--------------------------------------------------------------------------------
/azure/azure_vm/variables.tf:
--------------------------------------------------------------------------------
1 | variable "resource_group_name" {}
2 | variable "location" {}
3 | variable "image_publisher" {}
4 | variable "image_offer" {}
5 | variable "image_sku" {}
6 | variable "computer_name" {}
7 | variable "admin_username" {}
8 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/1.azure_cli/.gitignore:
--------------------------------------------------------------------------------
1 | .env
2 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/1.azure_cli/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:focal
2 | RUN apt-get -qq update && \
3 | apt-get install -y curl python3-pip && \
4 | rm -rf /var/lib/apt/lists/*
5 | # Install MinIO Client
6 | RUN curl --silent -O https://dl.min.io/client/mc/release/linux-amd64/mc && \
7 | chmod +x mc && mv mc /usr/local/bin
8 | # Install s3cmd
9 | RUN pip3 install s3cmd
10 | COPY ./entrypoint.sh /usr/local/bin/entrypoint.sh
11 | RUN chmod +x /usr/local/bin/entrypoint.sh
12 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/1.azure_cli/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | set -eo pipefail
3 |
4 | # configure MinIO Client
5 | /usr/local/bin/mc alias set myazure http://${AZURE_GATEWAY}:9000 ${MINIO_ACCESS_KEY} ${MINIO_SECRET_KEY}
6 |
7 | # configure s3cfg
8 | cat <<-EOF > ~/.s3cfg
9 | # Setup endpoint: hostname of the Web App
10 | host_base = ${AZURE_GATEWAY}:9000
11 | host_bucket = ${AZURE_GATEWAY}:9000
12 | # Leave as default
13 | bucket_location = us-east-1
14 | use_https = False
15 |
16 | access_key = ${MINIO_ACCESS_KEY}
17 | secret_key = ${MINIO_SECRET_KEY}
18 |
19 | # Use S3 v4 signature APIs
20 | signature_v2 = False
21 | EOF
22 |
23 | exec "$@"
24 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/.gitignore:
--------------------------------------------------------------------------------
1 | # terraform
2 | .terraform
3 | *terraform.tfstate*
4 |
5 | # project
6 | **/values/*
7 | !**/values/.gitkeep
8 | .env
9 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/README.md:
--------------------------------------------------------------------------------
1 | # Azure Blob Storage with Terraform
2 |
3 | This uses Terraform to create the Azure Blob Storage.
4 |
5 |
6 |
7 | ## Instructions
8 |
9 | ```bash
10 | terraform init
11 | terraform plan
12 | terraform apply
13 | ```
14 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.5"
2 | services:
3 | minio:
4 | image: minio/minio:${MINIO_TAG}
5 | command: gateway azure
6 | container_name: azure-gateway
7 | environment:
8 | MINIO_ACCESS_KEY: ${MINIO_ACCESS_KEY}
9 | MINIO_SECRET_KEY: ${MINIO_SECRET_KEY}
10 | ports:
11 | - 9000:9000
12 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/modules/azure_blob/outputs.tf:
--------------------------------------------------------------------------------
1 | #####################################################################
2 | ## Resources
3 | #####################################################################
4 | output "AccountName" {
5 | value = local.account_name
6 | }
7 |
8 | output "AccountKey" {
9 | value = local.account_key
10 | sensitive = true
11 | }
12 |
13 | output "ResourceName" {
14 | value = local.resource_name
15 | }
16 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/modules/minio_values/minio.env.tmpl:
--------------------------------------------------------------------------------
1 | MINIO_ACCESS_KEY=${accessKey}
2 | MINIO_SECRET_KEY=${secretKey}
3 | MINIO_TAG=${tag}
4 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/modules/minio_values/s3cfg.tmpl:
--------------------------------------------------------------------------------
1 | # Setup endpoint: hostname of the Web App
2 | host_base = ${minio_host}:9000
3 | host_bucket = ${minio_host}:9000
4 | # Leave as default
5 | bucket_location = us-east-1
6 | use_https = False
7 |
8 | access_key = ${accessKey}
9 | secret_key = ${secretKey}
10 |
11 | # Use S3 v4 signature APIs
12 | signature_v2 = False
13 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/modules/minio_values/values.minio_config.yaml.tmpl:
--------------------------------------------------------------------------------
1 | image:
2 | repository: minio/minio
3 | tag: ${tag}
4 | persistence:
5 | enabled: false
6 | azuregateway:
7 | enabled: true
8 | replicas: ${replicas}
9 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/modules/minio_values/values.minio_secrets.yaml.tmpl:
--------------------------------------------------------------------------------
1 | accessKey: ${accessKey}
2 | secretKey: ${secretKey}
3 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/provider.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | version = "=2.20.0"
3 | features {}
4 | }
5 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | resource_group_name = "my-superfun-resources"
2 | storage_account_name = "my0new0unique0storage"
3 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/test1_create_only_blob/provider.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | version = "=2.20.0"
3 | features {}
4 | }
5 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/test1_create_only_blob/values/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/azure/blob/blob_storage/2.terraform/test1_create_only_blob/values/.gitkeep
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/test2_create_storage_acct/provider.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | version = "=2.20.0"
3 | features {}
4 | }
5 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/test2_create_storage_acct/values/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/azure/blob/blob_storage/2.terraform/test2_create_storage_acct/values/.gitkeep
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/test3_create_all/provider.tf:
--------------------------------------------------------------------------------
1 | provider "azurerm" {
2 | version = "=2.20.0"
3 | features {}
4 | }
5 |
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/test3_create_all/values/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/azure/blob/blob_storage/2.terraform/test3_create_all/values/.gitkeep
--------------------------------------------------------------------------------
/azure/blob/blob_storage/2.terraform/values/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/azure/blob/blob_storage/2.terraform/values/.gitkeep
--------------------------------------------------------------------------------
/certifications/README.md:
--------------------------------------------------------------------------------
1 | # Certification Notes
2 |
3 | These are my notes on certifications and other learning activities.
4 |
5 | I originally had these on [Notion](https://www.notion.so), but was blocked by paywall, so migrating here.
6 |
7 | * [CI/CD](cicd/README.md) - ArgoCD, FluxCD, Spinnaker
8 | * [Cloud](cloud/README.md) - Azure, GCP, AWS
9 | * [Hashicorp](hashicorp/README.md) - Consul, Terraform, Vault
10 | * [Kubernetes](k8s/README.md) - Kubernetes, ArgoCD, Prometheus, Istio, Cilium
11 | * [Observability](o11y/README.md) - Prometheus, ElasticSearch
12 |
--------------------------------------------------------------------------------
/certifications/change_config/README.md:
--------------------------------------------------------------------------------
1 | # Change Configuration
2 |
3 | * [Puppet](./README.md)
4 | * [Chef](./chef/README.md)
5 | * [Salt Stack (discontinued)](./saltstack/README.md)
6 | * [Ansible](./ansible/READEME.md)
--------------------------------------------------------------------------------
/certifications/change_config/saltstack/SaltStack-Enterprise-training-syllabus.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/change_config/saltstack/SaltStack-Enterprise-training-syllabus.pdf
--------------------------------------------------------------------------------
/certifications/cicd/README.md:
--------------------------------------------------------------------------------
1 | # CI/CD
2 |
3 | * [Github Actions](github/README.md)
--------------------------------------------------------------------------------
/certifications/cicd/github/github-actions-exam-preparation-study-guide__2_.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cicd/github/github-actions-exam-preparation-study-guide__2_.pdf
--------------------------------------------------------------------------------
/certifications/cloud/README.md:
--------------------------------------------------------------------------------
1 | # Cloud
2 |
3 | * [Azure](azure/README.md)
4 | * [AWS](aws/README.md)
5 | * [Google Cloud](gcp/README.md)
6 |
--------------------------------------------------------------------------------
/certifications/cloud/aws/README.md:
--------------------------------------------------------------------------------
1 | # AWS
2 |
3 | ## Overvciew
4 |
5 |
6 |
7 | ## Certifications
8 |
9 | * [Cloud Practioner](practitioner.md)- $100
10 | * [Solutions Architect (Associate)](solutions_architect_associate.md) - $150
11 | * [Solutions Architect (Professional)](solutions_architect_professional.md) - $300
--------------------------------------------------------------------------------
/certifications/cloud/aws/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/aws/image.png
--------------------------------------------------------------------------------
/certifications/cloud/aws/images/practioner.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/aws/images/practioner.png
--------------------------------------------------------------------------------
/certifications/cloud/aws/images/solutions_architect_associate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/aws/images/solutions_architect_associate.png
--------------------------------------------------------------------------------
/certifications/cloud/aws/images/solutions_architect_professional.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/aws/images/solutions_architect_professional.png
--------------------------------------------------------------------------------
/certifications/cloud/azure/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/azure/image.png
--------------------------------------------------------------------------------
/certifications/cloud/azure/images/microsoft-certified-associate-badge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/azure/images/microsoft-certified-associate-badge.png
--------------------------------------------------------------------------------
/certifications/cloud/azure/images/microsoft-certified-expert-badge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/azure/images/microsoft-certified-expert-badge.png
--------------------------------------------------------------------------------
/certifications/cloud/azure/images/microsoft-certified-fundamentals-badge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/azure/images/microsoft-certified-fundamentals-badge.png
--------------------------------------------------------------------------------
/certifications/cloud/gcp/README.md:
--------------------------------------------------------------------------------
1 | # Google Cloud Platform
2 |
3 |
4 |
5 | ## Certifications
6 |
7 | * [Digital Leader](leader.md)- $99
8 | * [Cloud Engineer](engineer.md)- $125
9 | * [Cloud Architect](architect.md)- $200
10 |
11 | ## External (non-Google) Links
12 |
13 | * [Google Cloud Digital Leader Certification Study Course – Pass the Exam With This Free 6 Hour Course](https://www.freecodecamp.org/news/google-cloud-digital-leader-course/)
14 |
15 |
--------------------------------------------------------------------------------
/certifications/cloud/gcp/architect.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/gcp/architect.png
--------------------------------------------------------------------------------
/certifications/cloud/gcp/engineer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/gcp/engineer.png
--------------------------------------------------------------------------------
/certifications/cloud/gcp/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/gcp/image.png
--------------------------------------------------------------------------------
/certifications/cloud/gcp/leader.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/cloud/gcp/leader.png
--------------------------------------------------------------------------------
/certifications/hashicorp/README.md:
--------------------------------------------------------------------------------
1 | # Hashicorp
2 |
3 | * [Consul](consul/README.md)
4 | * [Terraform](terraform/README.md)
5 | * [Vault](vault/README.md)
6 | * [Vault Associate](./associate.md)
7 | * [Vault Operations Professional](ops_pro.md)
8 |
--------------------------------------------------------------------------------
/certifications/hashicorp/consul/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/hashicorp/consul/image.png
--------------------------------------------------------------------------------
/certifications/hashicorp/terraform/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/hashicorp/terraform/image.png
--------------------------------------------------------------------------------
/certifications/hashicorp/vault/README.md:
--------------------------------------------------------------------------------
1 | # Vault
2 |
3 |
4 |
5 | * [Associate](associate.md)
6 | * [Operations Professional](ops_pro.md)
--------------------------------------------------------------------------------
/certifications/hashicorp/vault/associate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/hashicorp/vault/associate.png
--------------------------------------------------------------------------------
/certifications/hashicorp/vault/image.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/hashicorp/vault/image.png
--------------------------------------------------------------------------------
/certifications/hashicorp/vault/ops_pro.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/hashicorp/vault/ops_pro.png
--------------------------------------------------------------------------------
/certifications/k8s/pca_prometheus/PCA_Curriculum.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/certifications/k8s/pca_prometheus/PCA_Curriculum.pdf
--------------------------------------------------------------------------------
/certifications/o11y/README.md:
--------------------------------------------------------------------------------
1 | # Observability
2 |
3 |
--------------------------------------------------------------------------------
/docker-ansible/Brewfile:
--------------------------------------------------------------------------------
1 | cask_args appdir: '/Applications'
2 | cask 'virtualbox'
3 | cask 'virtualbox-extension-pack'
4 | cask 'vagrant'
5 | cask 'docker-toolbox'
6 |
--------------------------------------------------------------------------------
/docker-ansible/helper-mac-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # only run on Mac OS X
3 | [ $(uname -s) = "Darwin" ] || { echo 'Only runs on Mac OS X!!!' ; exit 1 ; }
4 | # Install Homebrew
5 | which -s brew || /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
6 |
7 | brew bundle --verbose
8 |
9 | pip install -r requirements.txt
10 |
--------------------------------------------------------------------------------
/docker-ansible/requirements.txt:
--------------------------------------------------------------------------------
1 | ansible
2 | docker
3 |
--------------------------------------------------------------------------------
/docker-docker_compose/Brewfile:
--------------------------------------------------------------------------------
1 | cask_args appdir: '/Applications'
2 | cask 'virtualbox'
3 | cask 'virtualbox-extension-pack'
4 | cask 'vagrant'
5 | cask 'docker-toolbox'
6 |
--------------------------------------------------------------------------------
/docker-docker_compose/compose_static/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 | services:
3 | db:
4 | image: mysql:5.7
5 | volumes:
6 | - db_data:/var/lib/mysql
7 | restart: always
8 | environment:
9 | MYSQL_ROOT_PASSWORD: wordpress
10 | MYSQL_DATABASE: wordpress
11 | MYSQL_USER: wordpress
12 | MYSQL_PASSWORD: wordpress
13 | wordpress:
14 | depends_on:
15 | - db
16 | image: wordpress:latest
17 | ports:
18 | - "8000:80"
19 | restart: always
20 | environment:
21 | WORDPRESS_DB_HOST: db:3306
22 | WORDPRESS_DB_USER: wordpress
23 | WORDPRESS_DB_PASSWORD: wordpress
24 | volumes:
25 | db_data:
26 |
--------------------------------------------------------------------------------
/docker-docker_compose/compose_w_envars/.env:
--------------------------------------------------------------------------------
1 | WP_HOST_PORT=8000
2 | WP_CONTAINER_PORT=80
3 |
--------------------------------------------------------------------------------
/docker-docker_compose/helper-mac-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # only run on Mac OS X
3 | [ $(uname -s) = "Darwin" ] || { echo 'Only runs on Mac OS X!!!' ; exit 1 ; }
4 | # Install Homebrew
5 | which -s brew || /usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
6 |
7 | brew bundle --verbose
8 |
--------------------------------------------------------------------------------
/kubernetes/.gitignore:
--------------------------------------------------------------------------------
1 | env.sh
2 | logs
3 |
--------------------------------------------------------------------------------
/kubernetes/aks/README.md:
--------------------------------------------------------------------------------
1 | # Azure Kubernetes Series
2 |
3 | This is material related to Azure Kubernetes series.
4 |
5 | * [Series 0: Provisioning](series_0_provisioning/README.md) - covers standing up AKS cluster.
6 | * [Series 1: Endpoint](series_1_endpoint/README.md) - managing endpoint access into a Kubernetes cluster using ingress or service (type Loadbalancer) Kubernetes resources with automation for DNS record upserts, issuing X.509 certificiates, and reverse proxy.
7 | * [Series 2: Progresssive Networking](series_2_network_mgmnt/README.md) - manaing networks with network policies and service meshes. for internal security and laod balancing.
8 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/.gitignore:
--------------------------------------------------------------------------------
1 | logs
2 | env.sh
3 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/README.md:
--------------------------------------------------------------------------------
1 | # Provisioning AKS
2 |
3 | This area covers topics on standing up an AKS cluster.
4 |
5 | * Azure CLI
6 | * [Basic](./azure_cli/0_basic/README.md) - provision cluster with `az` using the default network plugin `kubenet`.
7 | * Terraform
8 | * [Basic](./terraform/0_basic/README.md) - provision cluster with `terraform` using the default network plugin `kubenet`.
9 | * [Calico](./terraform/0_basic/README.md) - provision cluster with `terraform` using the network plugin `azure` and network policies with `calico`.
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/azure_cli/4_azure_cni/scripts/enable_pod_subnet.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v az > /dev/null || { echo "'az' command not not found" 1>&2; exit 1; }
5 |
6 | az feature register --name PodSubnetPreview --namespace Microsoft.ContainerService
7 | # az feature register --name AutoUpgradePreview --namespace Microsoft.ContainerService
8 | az extension add --name aks-preview
9 | az extension update --name aks-preview
10 | az provider register --namespace Microsoft.ContainerService
11 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/azure_cli/4_azure_cni/scripts/get_ip_addresses.sh:
--------------------------------------------------------------------------------
1 | JSONPATH_NODES='{range .items[*]}{@.metadata.name}{"\t"}{@.status.addresses[?(@.type == "InternalIP")].address}{"\n"}{end}'
2 | JSONPATH_PODS='{range .items[*]}{@.metadata.name}{"\t"}{@.status.podIP}{"\n"}{end}'
3 |
4 | cat <<-EOF
5 | Nodes:
6 | ------------
7 | $(kubectl get nodes --output jsonpath="$JSONPATH_NODES" | xargs printf "%-40s %s\n")
8 |
9 | Pods:
10 | ------------
11 | $(kubectl get pods --output jsonpath="$JSONPATH_PODS" --all-namespaces | \
12 | xargs printf "%-40s %s\n"
13 | )
14 | EOF
15 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/azure_cli/5_pod_id/examples/externaldns/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/kubernetes/aks/series_0_provisioning/azure_cli/5_pod_id/examples/externaldns/README.md
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/azure_cli/5_pod_id/scripts/enable_pod_identity.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v az > /dev/null || { echo "'az' command not not found" 1>&2; exit 1; }
5 |
6 | az feature register --name EnablePodIdentityPreview --namespace Microsoft.ContainerService
7 | az feature register --name AutoUpgradePreview --namespace Microsoft.ContainerService
8 | az extension add --name aks-preview
9 | az extension update --name aks-preview
10 | az provider register --namespace Microsoft.ContainerService
11 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/azure_cli/demos/README.md:
--------------------------------------------------------------------------------
1 | # Demos
2 |
3 | Demo programs demonstrate using Kubernetes resources and addons.
4 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/azure_cli/demos/external-dns/README.md:
--------------------------------------------------------------------------------
1 | # External DNS demos
2 |
3 | These solutions will use K8S `service` resource of type `LoadBalancer`.
4 |
5 | When `external-dns` is installed and functional, DNS records will be updated on Azure DNS.
6 |
7 | ## Environment Variables
8 |
9 | The following environment variables need to be set before running these scripts:
10 |
11 | * `AZ_DNS_DOMAIN` such as `example.com` or `example.internal`
12 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/terraform/.gitignore:
--------------------------------------------------------------------------------
1 | terraform.tfvars
2 | env.sh
3 | secrets.json
4 | .terraform
5 | *tfstate*
6 | *terraform.lock.hcl
7 | logs
8 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/terraform/0_basic/provider.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | azurerm = {
4 | source = "hashicorp/azurerm"
5 | version = "~>2.0"
6 | }
7 | }
8 | }
9 |
10 | provider "azurerm" {
11 | features {}
12 | }
13 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/terraform/1_dns/templates/external_dns_values.yaml.tmpl:
--------------------------------------------------------------------------------
1 | provider: azure
2 | azure:
3 | resourceGroup: ${resource_group}
4 | tenantId: ${tenant_id}
5 | subscriptionId: ${subscription_id}
6 | useManagedIdentityExtension: true
7 | logLevel: ${log_level}
8 | domainFilters:
9 | - ${domain}
10 | txtOwnerId: external-dns
11 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/terraform/modules/aks/outputs.tf:
--------------------------------------------------------------------------------
1 | output "name" {
2 | description = "The Kubernetes Managed Cluster name."
3 | value = azurerm_kubernetes_cluster.k8s.name
4 | }
5 |
6 | output "kubelet_identity" {
7 | description = "A kubelet_identity block"
8 | value = azurerm_kubernetes_cluster.k8s.kubelet_identity
9 | }
10 |
11 | output "kube_config" {
12 | value = azurerm_kubernetes_cluster.k8s.kube_config
13 | }
14 |
15 | output "kube_config_raw" {
16 | value = azurerm_kubernetes_cluster.k8s.kube_config_raw
17 | }
18 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_0_provisioning/terraform/modules/dns/variables.tf:
--------------------------------------------------------------------------------
1 | ### Input Variables
2 | variable "resource_group_name" {
3 | description = "The resource group name to be imported"
4 | type = string
5 | }
6 |
7 | variable "domain" {
8 | description = "The domain name used to create the Azure DNS zone"
9 | type = string
10 | }
11 |
12 | variable "subdomain_prefix" {
13 | description = "The subdomain_prefix used to create domain name."
14 | type = string
15 | default = ""
16 | }
17 |
18 | variable "create_dns_zone" {
19 | description = "Toggle whether or not to create the resource."
20 | type = bool
21 | default = true
22 | }
23 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_1_externaldns/.gitignore:
--------------------------------------------------------------------------------
1 | env.sh
2 | hello-k8s.yaml
3 | chart-values.yaml
4 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_1_externaldns/chart-values.yaml.shtmpl:
--------------------------------------------------------------------------------
1 | provider: azure
2 | azure:
3 | resourceGroup: $AZ_RESOURCE_GROUP
4 | tenantId: $AZ_TENANT_ID
5 | subscriptionId: $AZ_SUBSCRIPTION_ID
6 | useManagedIdentityExtension: true
7 | logLevel: debug
8 | domainFilters:
9 | - $AZ_DNS_DOMAIN
10 | txtOwnerId: external-dns
11 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_1_externaldns/create_file_structure.sh:
--------------------------------------------------------------------------------
1 | mkdir -p \
2 | ~/azure_externaldns/{terraform,examples/{dgraph,hello}} && \
3 | cd ~/azure_externaldns
4 |
5 | touch \
6 | env.sh chart-values.{yaml,yaml.shtmpl} helmfile.yaml \
7 | examples/dgraph/{chart-values.{yaml,yaml.shtmpl},helmfile.yaml} \
8 | examples/hello/hello_k8s.yaml.shtmpl \
9 | terraform/{simple_azure_dns.tf,terraform.tfvars}
10 |
11 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_1_externaldns/example.env.sh:
--------------------------------------------------------------------------------
1 | # resource group
2 | export AZ_RESOURCE_GROUP="external-dns"
3 | export AZ_LOCATION="westus"
4 | # domain name you will use, e.g. example.com
5 | export AZ_DNS_DOMAIN="example.com"
6 | # AKS cluster name and local kubeconfig configuration
7 | export AZ_CLUSTER_NAME="external-dns-demo"
8 | export KUBECONFIG=${HOME}/.kube/${AZ_CLUSTER_NAME}
9 | # Fetch tenant and subscription ids for external-dns
10 | export AZ_TENANT_ID=$(az account show --query tenantId | tr -d '"')
11 | export AZ_SUBSCRIPTION_ID=$(az account show --query id | tr -d '"')
12 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_1_externaldns/examples/dgraph/chart_values.yaml.shtmpl:
--------------------------------------------------------------------------------
1 | alpha:
2 | service:
3 | type: LoadBalancer
4 | annotations:
5 | external-dns.alpha.kubernetes.io/hostname: alpha.${AZ_DNS_DOMAIN}
6 | ## set to Local for whitelist to work with service LoadBalancer
7 | externalTrafficPolicy: Local
8 | configFile:
9 | ## accept list should include local AKS IPs and home office IPs (whatismyip)
10 | config.yaml: |
11 | security:
12 | whitelist: ${DG_ALLOW_LIST}
13 | ratel:
14 | enabled: true
15 | service:
16 | type: LoadBalancer
17 | annotations:
18 | external-dns.alpha.kubernetes.io/hostname: ratel.${AZ_DNS_DOMAIN}
19 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_1_externaldns/terraform/simple_azure_dns.tf:
--------------------------------------------------------------------------------
1 | ### Input Variables
2 | variable "resource_group_name" {}
3 | variable "domain_name" {}
4 |
5 | ### Azure DNS Zone Reource
6 | resource "azurerm_dns_zone" "default" {
7 | name = var.domain_name
8 | resource_group_name = var.resource_group_name
9 | }
10 |
11 | ### Provider Requirements
12 | terraform {
13 | required_providers {
14 | azurerm = {
15 | source = "hashicorp/azurerm"
16 | version = "~>2.0"
17 | }
18 | }
19 | }
20 |
21 | provider "azurerm" {
22 | features {}
23 | }
24 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_2_ingress_nginx/.gitignore:
--------------------------------------------------------------------------------
1 | env.sh
2 | hello-k8s.yaml
3 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_2_ingress_nginx/examples/dgraph/data/getting_started_data.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -x
3 |
4 | curl "alpha.$AZ_DNS_DOMAIN/mutate?commitNow=true" --silent --request POST \
5 | --header "Content-Type: application/rdf" \
6 | --data-binary @sw.rdf | jq
7 |
8 | curl "alpha.$AZ_DNS_DOMAIN/alter" --silent --request POST \
9 | --data-binary @sw.schema | jq
10 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_2_ingress_nginx/examples/dgraph/data/sw.schema:
--------------------------------------------------------------------------------
1 | name: string @index(term) .
2 | release_date: datetime @index(year) .
3 | revenue: float .
4 | running_time: int .
5 | starring: [uid] .
6 | director: [uid] .
7 |
8 | type Person {
9 | name
10 | }
11 |
12 | type Film {
13 | name
14 | release_date
15 | revenue
16 | running_time
17 | starring
18 | director
19 | }
20 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_2_ingress_nginx/scripts/config_azure_dns_access.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | export AZ_PRINCIPAL_ID=$(
3 | az aks show -g $AZ_RESOURCE_GROUP -n $AZ_CLUSTER_NAME \
4 | --query "identityProfile.kubeletidentity.objectId" | tr -d '"'
5 | )
6 |
7 | export AZ_DNS_SCOPE=$(
8 | az network dns zone list \
9 | --query "[?name=='$AZ_DNS_DOMAIN'].id" \
10 | --output table | tail -1
11 | )
12 |
13 | az role assignment create \
14 | --assignee "$AZ_PRINCIPAL_ID" \
15 | --role "DNS Zone Contributor" \
16 | --scope "$AZ_DNS_SCOPE"
17 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_2_ingress_nginx/scripts/create_project_file_structure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir -p ~/azure_ingress_nginx/examples/{dgraph,hello} && cd ~/azure_ingress_nginx
3 | touch env.sh helmfile.yaml examples/{dgraph,hello}/helmfile.yaml
4 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_3_cert_manager/.gitignore:
--------------------------------------------------------------------------------
1 | env.sh
2 | hello-k8s.yaml
3 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_3_cert_manager/examples/dgraph/data/getting_started.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | set -x
3 |
4 | curl "https://alpha.$AZ_DNS_DOMAIN/mutate?commitNow=true" --silent --request POST \
5 | --header "Content-Type: application/rdf" \
6 | --data-binary @sw.rdf | jq
7 |
8 | curl "https://alpha.$AZ_DNS_DOMAIN/alter" --silent --request POST \
9 | --data-binary @sw.schema | jq
10 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_3_cert_manager/examples/dgraph/data/sw.schema:
--------------------------------------------------------------------------------
1 | name: string @index(term) .
2 | release_date: datetime @index(year) .
3 | revenue: float .
4 | running_time: int .
5 | starring: [uid] .
6 | director: [uid] .
7 |
8 | type Person {
9 | name
10 | }
11 |
12 | type Film {
13 | name
14 | release_date
15 | revenue
16 | running_time
17 | starring
18 | director
19 | }
20 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_3_cert_manager/scripts/create_project_file_structure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir -p ~/azure_cert_manager/examples/{dgraph,hello} && cd ~/azure_cert_manager
3 | touch env.sh helmfile.yaml ./examples/{dgraph,hello}/helmfile.yaml
4 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_4_ingress_nginx_grpc/.gitignore:
--------------------------------------------------------------------------------
1 | env.sh
2 | hello-k8s.yaml
3 | logs
4 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_4_ingress_nginx_grpc/examples/dgraph/data/.python-version:
--------------------------------------------------------------------------------
1 | pydgraph
2 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_4_ingress_nginx_grpc/examples/dgraph/data/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2021.5.30
2 | grpcio==1.38.1
3 | protobuf==3.17.3
4 | pydgraph==21.3.0
5 | six==1.16.0
6 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_4_ingress_nginx_grpc/examples/dgraph/data/sw.schema:
--------------------------------------------------------------------------------
1 | name: string @index(term) .
2 | release_date: datetime @index(year) .
3 | revenue: float .
4 | running_time: int .
5 | starring: [uid] .
6 | director: [uid] .
7 |
8 | type Person {
9 | name
10 | }
11 |
12 | type Film {
13 | name
14 | release_date
15 | revenue
16 | running_time
17 | starring
18 | director
19 | }
20 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_1_endpoint/part_4_ingress_nginx_grpc/scripts/create_project_file_structure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir -p ~/azure_ingress_nginx_grpc/examples/{dgraph,hello} && cd ~/azure_cert_manager
3 | touch env.sh helmfile.yaml ./examples/{dgraph,hello}/helmfile.yaml
4 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/.gitignore:
--------------------------------------------------------------------------------
1 | *env.sh
2 | hello-k8s.yaml
3 | logs
4 | .idea
5 | *.key
6 | *.crt
7 | api.proto
8 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/aks_ssh/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM alpine:3.14
2 |
3 | # install utilities
4 | RUN apk add --update --no-cache openssh \
5 | curl \
6 | tcptraceroute \
7 | bash \
8 | vim
9 |
10 | RUN mkdir ~/.ssh && chmod 700 ~/.ssh
11 |
12 | CMD exec /bin/bash -c "trap : TERM INT; sleep infinity & wait"
13 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/aks_ssh/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: build push
2 |
3 | build:
4 | @docker build -t ${AZ_ACR_LOGIN_SERVER}/aks-ssh:latest .
5 |
6 | push:
7 | @docker push ${AZ_ACR_LOGIN_SERVER}/aks-ssh:latest
8 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/aks_ssh/pod.yaml.envsubst:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Pod
3 | metadata:
4 | name: aks-ssh
5 | spec:
6 | containers:
7 | - name: aks-ssh
8 | image: ${AZ_ACR_LOGIN_SERVER}/aks-ssh:latest
9 | resources:
10 | requests:
11 | memory: "64Mi"
12 | cpu: "80m"
13 | limits:
14 | memory: "128Mi"
15 | cpu: "250m"
16 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/linkerd/current.txt:
--------------------------------------------------------------------------------
1 | cr.l5d.io/linkerd/controller:stable-2.10.2
2 | cr.l5d.io/linkerd/debug:stable-2.10.2
3 | cr.l5d.io/linkerd/grafana:stable-2.10.2
4 | cr.l5d.io/linkerd/jaeger-webhook:stable-2.10.2
5 | cr.l5d.io/linkerd/metrics-api:stable-2.10.2
6 | cr.l5d.io/linkerd/proxy-init:v1.3.11
7 | cr.l5d.io/linkerd/proxy:stable-2.10.2
8 | cr.l5d.io/linkerd/tap:stable-2.10.2
9 | cr.l5d.io/linkerd/web:stable-2.10.2
10 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/pydgraph/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: test build push clean
2 |
3 | build:
4 | @docker build -t ${AZ_ACR_LOGIN_SERVER}/pydgraph-client:latest .
5 |
6 | push:
7 | @docker push ${AZ_ACR_LOGIN_SERVER}/pydgraph-client:latest
8 |
9 | test: build
10 | docker run --detach --name pydgraph_client ${AZ_ACR_LOGIN_SERVER}/pydgraph-client:latest
11 |
12 | clean:
13 | docker stop pydgraph_client && docker rm pydgraph_client
14 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/pydgraph/README.md:
--------------------------------------------------------------------------------
1 | # Pydgraph Client
2 |
3 | This is small utility container that can execute seed the graph database with a schema and data (RDF n-quads). This process only documents building and pushing the artifact to a container registry.
4 |
5 |
6 | ## Building
7 |
8 | ```bash
9 | az acr login --name ${AZ_ACR_NAME}
10 |
11 | make build
12 | make push
13 |
14 | # verify results
15 | az acr repository list --name ${AZ_ACR_NAME} --output table
16 | ```
17 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/pydgraph/requirements.txt:
--------------------------------------------------------------------------------
1 | certifi==2021.5.30
2 | grpcio==1.38.1
3 | protobuf==3.17.3
4 | pydgraph==21.3.0
5 | six==1.16.0
6 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_0_docker/pydgraph/sw.schema:
--------------------------------------------------------------------------------
1 | name: string @index(term) .
2 | release_date: datetime @index(year) .
3 | revenue: float .
4 | running_time: int .
5 | starring: [uid] .
6 | director: [uid] .
7 |
8 | type Person {
9 | name
10 | }
11 |
12 | type Film {
13 | name
14 | release_date
15 | revenue
16 | running_time
17 | starring
18 | director
19 | }
20 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_1_acr/examples/dgraph/README.md:
--------------------------------------------------------------------------------
1 | # Dgraph Deploy
2 |
3 | ## Deploy
4 |
5 | ### Using Helmfile
6 |
7 | ```bash
8 | helmfile apply
9 | ```
10 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_1_acr/examples/dgraph/helmfile.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | - name: dgraph
3 | url: https://charts.dgraph.io
4 |
5 | releases:
6 | - name: demo
7 | namespace: dgraph
8 | chart: dgraph/dgraph
9 | version: 0.0.17
10 | values:
11 | - alpha:
12 | configFile:
13 | config.yaml: |
14 | security:
15 | whitelist: {{ env "DG_ACCEPT_LIST" | default "0.0.0.0/0" | quote }}
16 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_1_acr/examples/pydgraph/deploy.yaml.envsubst:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: pydgraph-client
6 | spec:
7 | replicas: 1
8 | selector:
9 | matchLabels:
10 | app: pydgraph-client
11 | template:
12 | metadata:
13 | labels:
14 | app: pydgraph-client
15 | spec:
16 | containers:
17 | - name: pydgraph-client
18 | image: ${AZ_ACR_LOGIN_SERVER}/pydgraph-client:latest
19 | resources:
20 | requests:
21 | memory: "64Mi"
22 | cpu: "80m"
23 | limits:
24 | memory: "128Mi"
25 | cpu: "250m"
26 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_1_acr/scripts/create_acr.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify required commands
4 | command -v az > /dev/null || \
5 | { echo "[ERROR]: 'az' command not not found" 1>&2; exit 1; }
6 |
7 | ## Verify these variables are set
8 | [[ -z "$AZ_RESOURCE_GROUP" ]] && { echo 'AZ_RESOURCE_GROUP not specified. Aborting' 2>&1 ; exit 1; }
9 | [[ -z "$AZ_ACR_NAME" ]] && { echo 'AZ_ACR_NAME not specified. Aborting' 2>&1 ; exit 1; }
10 |
11 | az acr create \
12 | --resource-group ${AZ_RESOURCE_GROUP} \
13 | --name ${AZ_ACR_NAME} \
14 | --sku Basic
15 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_1_acr/scripts/create_project_file_structure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir -p ~/azure_acr/examples/{dgraph} && cd ~/azure_acr
3 | touch \
4 | env.sh \
5 | ./examples/{dgraph,pydgraph}/helmfile.yaml \
6 | ./examples/pydgraph/{Dockerfile,Makefile,requirements.txt,oad_data.py,sw.schema,sw.nquads.rdf}
7 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_1_acr/scripts/delete_aks.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify required commands
4 | command -v az > /dev/null || \
5 | { echo "[ERROR]: 'az' command not not found" 1>&2; exit 1; }
6 |
7 | ## Verify these variables are set
8 | [[ -z "$AZ_RESOURCE_GROUP" ]] && { echo 'AZ_RESOURCE_GROUP not specified. Aborting' 2>&1 ; exit 1; }
9 | [[ -z "$AZ_CLUSTER_NAME" ]] && { echo 'AZ_CLUSTER_NAME not specified. Aborting' 2>&1 ; exit 1; }
10 |
11 | az aks delete --resource-group ${AZ_RESOURCE_GROUP} --name ${AZ_CLUSTER_NAME}
12 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/.gitignore:
--------------------------------------------------------------------------------
1 | *env.sh
2 | hello-k8s.yaml
3 | logs
4 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/examples/dgraph/deploy_dgraph.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # required command line tools
4 | command -v helm > /dev/null || \
5 | { echo "[ERROR]: 'helm' command not not found" 1>&2; exit 1; }
6 | command -v helmfile > /dev/null || \
7 | { echo "[ERROR]: 'helmfile' command not not found" 1>&2; exit 1; }
8 | command -v kubectl > /dev/null || \
9 | { echo "[ERROR]: 'kubectl' command not not found" 1>&2; exit 1; }
10 |
11 | # deploy dgraph
12 | HELMFILE=${HELMFILE:-"$(dirname $0)/helmfile.yaml"}
13 | helmfile --file $HELMFILE apply
14 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/examples/dgraph/helmfile.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | - name: dgraph
3 | url: https://charts.dgraph.io
4 |
5 | releases:
6 | - name: demo
7 | namespace: dgraph
8 | chart: dgraph/dgraph
9 | version: 0.0.17
10 | values:
11 | - alpha:
12 | configFile:
13 | config.yaml: |
14 | security:
15 | whitelist: {{ env "DG_ACCEPT_LIST" | default "0.0.0.0/0" | quote }}
16 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/examples/pydgraph/deploy_pydgraph_client.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # required command line tools
4 | command -v helm > /dev/null || \
5 | { echo "[ERROR]: 'helm' command not not found" 1>&2; exit 1; }
6 | command -v helmfile > /dev/null || \
7 | { echo "[ERROR]: 'helmfile' command not not found" 1>&2; exit 1; }
8 | command -v kubectl > /dev/null || \
9 | { echo "[ERROR]: 'kubectl' command not not found" 1>&2; exit 1; }
10 |
11 | # deploy pydgraph clients
12 | HELMFILE=${HELMFILE:-"$(dirname $0)/helmfile.yaml"}
13 | helmfile --file $HELMFILE apply
14 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/scripts/create_acr.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify required commands
4 | command -v az > /dev/null || \
5 | { echo "[ERROR]: 'az' command not not found" 1>&2; exit 1; }
6 |
7 | ## Verify these variables are set
8 | [[ -z "$AZ_RESOURCE_GROUP" ]] && { echo 'AZ_RESOURCE_GROUP not specified. Aborting' 2>&1 ; exit 1; }
9 | [[ -z "$AZ_ACR_NAME" ]] && { echo 'AZ_ACR_NAME not specified. Aborting' 2>&1 ; exit 1; }
10 |
11 | az acr create \
12 | --resource-group ${AZ_RESOURCE_GROUP} \
13 | --name ${AZ_ACR_NAME} \
14 | --sku Basic
15 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/scripts/create_project_file_structure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir -p ~/azure_calico/examples/{dgraph,pydgraph} && cd ~/azure_calico
3 |
4 | touch env.sh \
5 | ./examples/dgraph/{helmfile.yaml,network_policy.yaml} \
6 | ./examples/pydgraph/{Dockerfile,Makefile,helmfile.yaml,requirements.txt,load_data.py,sw.schema,sw.nquads.rdf}
7 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/scripts/delete_aks.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify required commands
4 | command -v az > /dev/null || \
5 | { echo "[ERROR]: 'az' command not not found" 1>&2; exit 1; }
6 |
7 | ## Verify these variables are set
8 | [[ -z "$AZ_RESOURCE_GROUP" ]] && { echo 'AZ_RESOURCE_GROUP not specified. Aborting' 2>&1 ; exit 1; }
9 | [[ -z "$AZ_CLUSTER_NAME" ]] && { echo 'AZ_CLUSTER_NAME not specified. Aborting' 2>&1 ; exit 1; }
10 |
11 | az aks delete --resource-group ${AZ_RESOURCE_GROUP} --name ${AZ_CLUSTER_NAME}
12 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_2_calico/scripts/print_ip_addr.sh:
--------------------------------------------------------------------------------
1 | JSONPATH_NODES='{range .items[*]}{@.metadata.name}{"\t"}{@.status.addresses[?(@.type == "InternalIP")].address}{"\n"}{end}'
2 | JSONPATH_PODS='{range .items[*]}{@.metadata.name}{"\t"}{@.status.podIP}{"\n"}{end}'
3 |
4 | cat <<-EOF
5 | Nodes:
6 | ------------
7 | $(kubectl get nodes --output jsonpath="$JSONPATH_NODES" | xargs printf "%-40s %s\n")
8 |
9 | Pods:
10 | ------------
11 | $(kubectl get pods --output jsonpath="$JSONPATH_PODS" --all-namespaces | \
12 | xargs printf "%-40s %s\n"
13 | )
14 | EOF
15 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/certs/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/certs/README.md
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/examples/dgraph/README.md:
--------------------------------------------------------------------------------
1 | # Dgraph Deploy
2 |
3 | ## Deploy
4 |
5 | ### Using Helmfile
6 |
7 | ```bash
8 | ./deploy_dgraph.sh
9 | ```
10 |
11 | ### Adding Network Policy
12 |
13 | ```bash
14 | kubectl --namespace "dgraph" -f net_policy.yaml
15 | ```
16 |
17 | ### Profile
18 |
19 | ```bash
20 | curl -sOL https://raw.githubusercontent.com/dgraph-io/dgo/v210.03.0/protos/api.proto
21 |
22 | linkerd profile \
23 | --proto api.proto \
24 | --namespace dgraph dgraph-svc | \
25 | kubectl apply --namespace "dgraph" --filename -
26 | ```
27 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/examples/dgraph/helmfile.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | - name: dgraph
3 | url: https://charts.dgraph.io
4 |
5 | releases:
6 | - name: demo
7 | namespace: dgraph
8 | chart: dgraph/dgraph
9 | version: 0.0.17
10 | values:
11 | - alpha:
12 | configFile:
13 | config.yaml: |
14 | security:
15 | whitelist: {{ env "DG_ACCEPT_LIST" | default "0.0.0.0/0" | quote }}
16 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/examples/pydgraph/README.md:
--------------------------------------------------------------------------------
1 | # Dgraph Deploy
2 |
3 | ## Deploy
4 |
5 | ### Using Helmfile
6 |
7 | ```bash
8 | ./deploy_pydgraph_client.sh
9 | ```
10 |
11 | ## Running Tools in Client Container
12 |
13 | ```bash
14 | PYDGRAPH_POD=$(kubectl get pods --namespace pydgraph-client --output name)
15 | kubectl exec -ti --namespace pydgraph-client ${PYDGRAPH_POD} --container "pydgraph-client" -- bash
16 | ```
17 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/scripts/create_certs.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify essential commands
4 | command -v step > /dev/null || \
5 | { echo "[ERROR]: 'step' command not not found" 1>&2; exit 1; }
6 |
7 | CERT_PATH=${CERT_PATH:-"$(dirname $0)/../certs"}
8 |
9 | step certificate create root.linkerd.cluster.local \
10 | $CERT_PATH/ca.crt $CERT_PATH/ca.key \
11 | --profile root-ca --no-password --insecure
12 |
13 | step certificate create identity.linkerd.cluster.local \
14 | $CERT_PATH/issuer.crt $CERT_PATH/issuer.key \
15 | --profile intermediate-ca --not-after 8760h \
16 | --no-password --insecure \
17 | --ca $CERT_PATH/ca.crt --ca-key $CERT_PATH/ca.key
18 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/scripts/create_project_file_structure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir -p ~/azure_linkerd/{certs,examples/{dgraph,pydgraph}} && cd ~/azure_linkerd
3 |
4 | touch env.sh \
5 | ./certs/{ca,issuer}{.key,.crt} \
6 | ./examples/dgraph/{helmfile.yaml,network_policy.yaml} \
7 | ./examples/pydgraph/{Dockerfile,Makefile,helmfile.yaml,requirements.txt,load_data.py,sw.schema,sw.nquads.rdf}
8 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/scripts/deploy_ext_jaeger.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify essential commands
4 | command -v linkerd > /dev/null || \
5 | { echo "[ERROR]: 'linkerd' command not not found" 1>&2; exit 1; }
6 |
7 | # NOTE: namespace is embedded
8 | linkerd jaeger install --set defaultRegistry="$LINKERD_REGISTRY" | kubectl apply -f -
9 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/scripts/deploy_ext_viz.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify essential commands
4 | command -v linkerd > /dev/null || \
5 | { echo "[ERROR]: 'linkerd' command not not found" 1>&2; exit 1; }
6 |
7 | # NOTE: namespace is embedded
8 | linkerd viz install --set defaultRegistry="$LINKERD_REGISTRY" | kubectl apply -f -
9 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_3_linkerd/scripts/print_ip_addr.sh:
--------------------------------------------------------------------------------
1 | JSONPATH_NODES='{range .items[*]}{@.metadata.name}{"\t"}{@.status.addresses[?(@.type == "InternalIP")].address}{"\n"}{end}'
2 | JSONPATH_PODS='{range .items[*]}{@.metadata.name}{"\t"}{@.status.podIP}{"\n"}{end}'
3 |
4 | cat <<-EOF
5 | Nodes:
6 | ------------
7 | $(kubectl get nodes --output jsonpath="$JSONPATH_NODES" | xargs printf "%-40s %s\n")
8 |
9 | Pods:
10 | ------------
11 | $(kubectl get pods --output jsonpath="$JSONPATH_PODS" --all-namespaces | \
12 | xargs printf "%-40s %s\n"
13 | )
14 | EOF
15 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/.gitignore:
--------------------------------------------------------------------------------
1 | /addons/*
2 | !/addons/README.md
3 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/addons/README.md:
--------------------------------------------------------------------------------
1 | # Telemetry Addons
2 |
3 | ```bash
4 | VER="1.10"
5 | PREFIX="raw.githubusercontent.com/istio/istio/release-${VER}/samples/addons/"
6 | MANIFESTS=("grafana" "jaeger" "kiali" "prometheus" "prometheus_vm" "prometheus_vm_tls")
7 | for MANIFEST in ${MANIFESTS[*]}; do
8 | curl --silent --location "https://$PREFIX/$MANIFEST.yaml" --output ./$MANIFEST.yaml
9 | done
10 | ```
11 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/examples/bookinfo/deploy_dgraph.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | command -v kubectl > /dev/null || \
4 | { echo "[ERROR]: 'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | HELMFILE=${HELMFILE:-"$(dirname $0)/helmfile.yaml"}
7 |
8 | kubectl get namespace "bookinfo" > /dev/null 2> /dev/null || \
9 | kubectl create namespace "bookinfo" && \
10 | kubectl label namespaces "bookinfo" name="bookinfo"
11 | kubectl label namespace "bookinfo" istio-injection="enabled"
12 |
13 | kubectl apply -n bookinfo -f https://raw.githubusercontent.com/istio/istio/release-1.10/samples/bookinfo/platform/kube/bookinfo.yaml
14 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/examples/dgraph/README.md:
--------------------------------------------------------------------------------
1 | # Dgraph Deploy
2 |
3 | ## Deploy
4 |
5 | ### Using Helmfile
6 |
7 | ```bash
8 | # deploy Dgraph cluster
9 | helmfile --file $HELMFILE apply
10 | ```
11 |
12 | #### Adding Network Policy
13 |
14 | This policy will deny traffic that is outside of the service mesh.
15 |
16 | NOTE: This requires a network plugin that supports network policies, such as Calico, to be installed previously.
17 |
18 | ```bash
19 | kubectl apply -f network_policy.yaml
20 | ```
21 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/examples/dgraph/deploy_dgraph.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | command -v helm > /dev/null || \
4 | { echo "[ERROR]: 'helm' command not not found" 1>&2; exit 1; }
5 | command -v helmfile > /dev/null || \
6 | { echo "[ERROR]: 'helmfile' command not not found" 1>&2; exit 1; }
7 | command -v kubectl > /dev/null || \
8 | { echo "[ERROR]: 'kubectl' command not not found" 1>&2; exit 1; }
9 |
10 | HELMFILE=${HELMFILE:-"$(dirname $0)/helmfile.yaml"}
11 | helmfile --file $HELMFILE apply
12 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/examples/pydgraph/deploy_pydgraph_deny.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | command -v helm > /dev/null || \
4 | { echo "[ERROR]: 'helm' command not not found" 1>&2; exit 1; }
5 | command -v helmfile > /dev/null || \
6 | { echo "[ERROR]: 'helmfile' command not not found" 1>&2; exit 1; }
7 | command -v kubectl > /dev/null || \
8 | { echo "[ERROR]: 'kubectl' command not not found" 1>&2; exit 1; }
9 | command -v linkerd > /dev/null || \
10 | { echo "[ERROR]: 'linkerd' command not not found" 1>&2; exit 1; }
11 |
12 | HELMFILE=${HELMFILE:-"$(dirname $0)/helmfile.yaml"}
13 | helmfile --namespace "pydgraph-deny" --file $HELMFILE apply
14 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/scripts/create_project_file_structure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | mkdir -p ~/azure_istio/{addons,examples/{dgraph,pydgraph}} && cd ~/azure_istio
3 | touch env.sh \
4 | ./examples/dgraph/{helmfile.yaml,network_policy.yaml} \
5 | ./examples/pydgraph/{Dockerfile,Makefile,helmfile.yaml,requirements.txt,load_data.py,sw.schema,sw.nquads.rdf} \
6 | ./addons/{grafana,jaeger,kiali,prometheus{,_vm,_vm_tls}}.yaml \
7 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/scripts/fetch_addons.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | command -v kubectl > /dev/null || \
4 | { echo "[ERROR]: 'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | ADDONS=${ADDONS:-"$(dirname $0)/../addons"}
7 |
8 | VER="1.10"
9 | PREFIX="raw.githubusercontent.com/istio/istio/release-${VER}/samples/addons/"
10 | MANIFESTS=("grafana" "jaeger" "kiali" "prometheus" "prometheus_vm" "prometheus_vm_tls")
11 |
12 | for MANIFEST in ${MANIFESTS[*]}; do
13 | curl --silent \
14 | --location "https://${PREFIX}/${MANIFEST}.yaml" \
15 | --output ${ADDONS}/${MANIFEST}.yaml
16 | done
17 |
18 | kubectl apply --filename ${ADDONS}
19 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/scripts/get_support_versions.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify required commands
4 | command -v az > /dev/null || \
5 | { echo "[ERROR]: 'az' command not not found" 1>&2; exit 1; }
6 | [[ -z "$AZ_LOCATION" ]] && { echo 'AZ_LOCATION not specified. Aborting' 2>&1 ; exit 1; }
7 |
8 | az aks get-versions --location ${AZ_LOCATION} --query "orchestrators[].orchestratorVersion"
9 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/scripts/get_supported_regions.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Verify required commands
4 | command -v az > /dev/null || \
5 | { echo "[ERROR]: 'az' command not not found" 1>&2; exit 1; }
6 |
7 | az provider list \
8 | --query "[?namespace=='Microsoft.ContainerService'].resourceTypes[] | [?resourceType=='managedClusters'].locations[]" \
9 | -o tsv
10 |
--------------------------------------------------------------------------------
/kubernetes/aks/series_2_network_mgmnt/part_4_istio/scripts/print_ip_addr.sh:
--------------------------------------------------------------------------------
1 | JSONPATH_NODES='{range .items[*]}{@.metadata.name}{"\t"}{@.status.addresses[?(@.type == "InternalIP")].address}{"\n"}{end}'
2 | JSONPATH_PODS='{range .items[*]}{@.metadata.name}{"\t"}{@.status.podIP}{"\n"}{end}'
3 |
4 | cat <<-EOF
5 | Nodes:
6 | ------------
7 | $(kubectl get nodes --output jsonpath="$JSONPATH_NODES" | xargs printf "%-40s %s\n")
8 |
9 | Pods:
10 | ------------
11 | $(kubectl get pods --output jsonpath="$JSONPATH_PODS" --all-namespaces | \
12 | xargs printf "%-40s %s\n"
13 | )
14 | EOF
15 |
--------------------------------------------------------------------------------
/kubernetes/eks/.gitignore:
--------------------------------------------------------------------------------
1 | .envrc
2 |
3 |
--------------------------------------------------------------------------------
/kubernetes/eks/baseline/eksctl/addons/aws_ebs_csi_driver/create_esci_irsa.sh:
--------------------------------------------------------------------------------
1 | # AWS IAM role bound to a Kubernetes service account
2 | eksctl create iamserviceaccount \
3 | --name "ebs-csi-controller-sa" \
4 | --namespace "kube-system" \
5 | --cluster $EKS_CLUSTER_NAME \
6 | --region $EKS_REGION \
7 | --attach-policy-arn $POLICY_ARN_ESCI \
8 | --role-only \
9 | --role-name $ROLE_NAME_ECSI \
10 | --approve
11 |
--------------------------------------------------------------------------------
/kubernetes/eks/baseline/eksctl/addons/aws_ebs_csi_driver/create_storage_class.sh:
--------------------------------------------------------------------------------
1 | cat < /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | ## cleanup
7 | kubectl delete "ingress/gke-ingress" --namespace "httpd-ing"
8 | kubectl delete namespace "httpd-ing"
--------------------------------------------------------------------------------
/kubernetes/eks/baseline/eksctl/tests/delete_lb.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v kubectl > /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | ## Cleanup
7 | kubectl delete "service/httpd" --namespace "httpd-svc"
8 | kubectl delete namespace "httpd-svc"
--------------------------------------------------------------------------------
/kubernetes/eks/baseline/eksctl/tests/delete_netpol.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v kubectl > /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | ## cleanup
7 | MANIFESTS=(04-client 03-frontend 02-backend 01-management-ui 00-namespace)
8 | APP_URL=https://docs.projectcalico.org/v3.5/getting-started/kubernetes/tutorials/stars-policy/manifests/
9 |
10 | for MANIFEST in ${MANIFESTS[*]}; do
11 | kubectl delete --filename $APP_URL/$MANIFEST.yaml
12 | done
13 |
--------------------------------------------------------------------------------
/kubernetes/eks/baseline/eksctl/tests/delete_pv.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v kubectl > /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | kubectl delete pod app --namespace "pv-test"
7 | kubectl delete pvc pv-claim --namespace "pv-test"
8 | kubectl delete ns "pv-test"
--------------------------------------------------------------------------------
/kubernetes/eks/baseline/terraform/README.md:
--------------------------------------------------------------------------------
1 | TBA
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/README.md:
--------------------------------------------------------------------------------
1 | # EKS 3: Ingress Nginx
2 |
3 | This is supporting code for provisioning an EKS cluster using `eksctl` tool.
4 |
5 | * Introduction
6 | * Constrast between `gcloud container clusters create` vs `eksctl create cluster`
7 | * Install Scripts (Lin/Win/Mac) for `eksctl` and `kubectl`
8 | * Provision using `eksclt` CLI
9 | * Provision using `eksctl` config file
10 | * Deploying Application with external load balancer endpoint
11 |
12 | ## Sections
13 |
14 | * [Part 0: Introduction](part0_intro/README.md)
15 | * [Part 1: EKSCtl CLI](part1_cli/README.md)
16 | * [Part 2: EKSCtl Config](part2_crd/README.md)
17 | * [Part 3: Deploy Application](part3_app/README.md)
18 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/README.md:
--------------------------------------------------------------------------------
1 | # Part 0 - Introduction
2 |
3 | * [Installer Scripts](install/README.md)
4 | * Example Scripts
5 | * [create_gke_cluster.sh](create_gke_cluster.sh)
6 | * [create_eks_cluster.sh](create_eks_cluster.sh)
7 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/create_eks_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v eksctl > /dev/null || \
3 | { echo 'eksctl command not not found' 1>&2; exit 1; }
4 |
5 | eksctl create cluster \
6 | --version 1.14 \
7 | --region us-west-2 \
8 | --node-type t3.medium \
9 | --nodes 3 \
10 | --nodes-min 1 \
11 | --nodes-max 4 \
12 | --name my-demo
13 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/create_gke_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v gcloud > /dev/null || \
3 | { echo 'gcloud command not not found' 1>&2; exit 1; }
4 |
5 | gcloud container clusters create \
6 | --cluster-version 1.14.10-gke.36 \
7 | --region us-west1 \
8 | --machine-type n1-standard-2 \
9 | --num-nodes 1 \
10 | --min-nodes 1 \
11 | --max-nodes 4 \
12 | my-demo
13 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/install/Brewfile:
--------------------------------------------------------------------------------
1 | tap "weaveworks/tap"
2 | brew "eksctl"
3 | brew "kubernetes-cli"
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/install/choco.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/install/install_eksctl_linux.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | TARBALL_NAME="eksctl_$(uname -s)_amd64.tar.gz"
4 | HTTP_PATH="weaveworks/eksctl/releases/download/latest_release"
5 | LOCATION="https://github.com/$HTTP_PATH/$TARBALL_NAME"
6 |
7 | curl --silent --location $LOCATION | tar xz -C /tmp
8 | sudo mv /tmp/eksctl /usr/local/bin
9 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/install/install_kubectl_debian.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | sudo apt-get update && sudo apt-get install -y apt-transport-https
4 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg \
5 | | sudo apt-key add -
6 | cp kubernetes.list /etc/apt/sources.list.d/kubernetes.list
7 | sudo apt-get updatesudo apt-get install -y kubectl
8 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/install/install_kubectl_rhel.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | sudo cp kubernetes.repo /etc/yum.repos.d/kubernetes.repo
4 | sudo yum install -y kubectl
5 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/install/kubernetes.list:
--------------------------------------------------------------------------------
1 | deb https://apt.kubernetes.io/ kubernetes-xenial main
2 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part0_intro/install/kubernetes.repo:
--------------------------------------------------------------------------------
1 | [kubernetes]
2 | name=Kubernetes
3 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
4 | enabled=1
5 | gpgcheck=1
6 | repo_gpgcheck=1
7 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
8 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part1_cli/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v eksctl > /dev/null || \
3 | { echo 'eksctl command not not found' 1>&2; exit 1; }
4 |
5 | ## default settings
6 | MY_CLUSTER_NAME=${1:-"my-demo-cluster"}
7 | MY_REGION=${2:-"us-west-2"}
8 | MY_VERSION=${3:-"1.14"}
9 |
10 | ## provision eks using eksctl cli
11 | eksctl create cluster \
12 | --version $MY_VERSION \
13 | --region $MY_REGION \
14 | --node-type t3.medium \
15 | --nodes 3 \
16 | --nodes-min 1 \
17 | --nodes-max 4 \
18 | --name $MY_CLUSTER_NAME
19 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part1_cli/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v eksctl > /dev/null || \
3 | { echo 'eksctl command not not found' 1>&2; exit 1; }
4 |
5 | ## default settings
6 | MY_CLUSTER_NAME=${1:-"my-demo-cluster"}
7 | MY_REGION=${2:-"us-west-2"}
8 |
9 | ## provision eks using eksctl cli
10 | eksctl delete cluster \
11 | --region $MY_REGION \
12 | --name $MY_CLUSTER_NAME \
13 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part2_crd/.gitignore:
--------------------------------------------------------------------------------
1 | cluster.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part2_crd/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v eksctl > /dev/null || \
3 | { echo 'eksctl command not not found' 1>&2; exit 1; }
4 |
5 | ## default settings
6 | MY_CLUSTER_NAME=${1:-"my-demo-cluster"}
7 | MY_REGION=${2:-"us-west-2"}
8 | MY_VERSION=${3:-"1.14"}
9 |
10 | ## create eksctl config from template
11 | sed -e "s/\$MY_CLUSTER_NAME/$MY_CLUSTER_NAME/" \
12 | -e "s/\$MY_REGION/$MY_REGION/" \
13 | -e "s/\$MY_VERSION/$MY_VERSION/" \
14 | template_cluster.yaml > cluster.yaml
15 |
16 | ## provision eks from eksctl config
17 | eksctl create cluster --config-file "cluster.yaml"
18 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part2_crd/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v eksctl > /dev/null || \
3 | { echo 'eksctl command not not found' 1>&2; exit 1; }
4 |
5 | ## provision eks using eksctl cli
6 | eksctl delete cluster --config-file "cluster.yaml"
7 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part2_crd/template_cluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: eksctl.io/v1alpha5
3 | kind: ClusterConfig
4 | metadata:
5 | name: $MY_CLUSTER_NAME
6 | region: $MY_REGION
7 | version: "$MY_VERSION"
8 | nodeGroups:
9 | - name: $MY_CLUSTER_NAME-workers
10 | instanceType: t3.medium
11 | desiredCapacity: 4
12 | minSize: 1
13 | maxSize: 4
14 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part3_app/README.md:
--------------------------------------------------------------------------------
1 | # Application
2 |
3 | This deploys a demonistration application "hello-kubernetes" with external load balancer endpoint.
4 |
5 | ## Deploy
6 |
7 | ```bash
8 | cat *.yaml | kubectl create --filename -
9 | ```
10 |
11 | ## Delete
12 |
13 | ```bash
14 | cat *.yaml | kubectl delete --filename -
15 | ```
16 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part3_app/hello-k8s-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-kubernetes
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-kubernetes
11 | template:
12 | metadata:
13 | labels:
14 | app: hello-kubernetes
15 | spec:
16 | containers:
17 | - name: hello-kubernetes
18 | image: paulbouwer/hello-kubernetes:1.5
19 | ports:
20 | - containerPort: 8080
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "80m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "250m"
28 |
--------------------------------------------------------------------------------
/kubernetes/eks_1_provision_eksctl/part3_app/hello-k8s-svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-kubernetes
6 | spec:
7 | type: LoadBalancer
8 | ports:
9 | - port: 80
10 | targetPort: 8080
11 | selector:
12 | app: hello-kubernetes
13 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/README.md:
--------------------------------------------------------------------------------
1 | # EKS 3: Ingress Nginx
2 |
3 | This is supporting code to creating the following:
4 |
5 | * Provisioning EKS with `eksctl`
6 | * Notes on BYOC (*bring-your-own-cluster*) to implement your own cluster
7 | * Adding `external-dns` and `ingress-nginx` addons
8 | * Notes for Route53 and AWS Certificate Manager setup
9 | * Deploying Application that uses the ingress with DNS and TLS
10 |
11 | ## Sections
12 |
13 | * [Part 0: Provision EKS](part0_provision/README.md)
14 | * [Part 1: Kubernetes Addons](part1_addons/README.md)
15 | * [Part 2: Deploy Application](part2_app/README.md)
16 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part0_provision/.gitignore:
--------------------------------------------------------------------------------
1 | cluster_with_dns.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part0_provision/cluster.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: eksctl.io/v1alpha5
2 | kind: ClusterConfig
3 | metadata:
4 | name: joauqin-cluster
5 | region: us-east-2
6 | version: "1.14"
7 | # traits of worker nodes
8 | nodeGroups:
9 | - name: joauqin-cluster-workers
10 | instanceType: t3.medium
11 | desiredCapacity: 4
12 | minSize: 1
13 | maxSize: 4
14 | iam:
15 | # polices added to worker node role
16 | withAddonPolicies:
17 | # allows read/write to zones in Route53
18 | externalDNS: true
19 | # access to ACM
20 | certManager: true
21 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part0_provision/create_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v eksctl > /dev/null || \
3 | { echo 'eksctl command not not found' 1>&2; exit 1; }
4 |
5 | ## default settings
6 | MY_CLUSTER_NAME=${1:-"my-ingress-demo-cluster"}
7 | MY_REGION=${2:-"us-west-2"}
8 | MY_VERSION=${3:-"1.14"}
9 |
10 | ## create eksctl config from template
11 | sed -e "s/\$MY_CLUSTER_NAME/$MY_CLUSTER_NAME/" \
12 | -e "s/\$MY_REGION/$MY_REGION/" \
13 | -e "s/\$MY_VERSION/$MY_VERSION/" \
14 | template_cluster.yaml > cluster_with_dns.yaml
15 |
16 | ## provision eks from eksctl config
17 | eksctl create cluster --config-file "cluster_with_dns.yaml"
18 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part0_provision/delete_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v eksctl > /dev/null || \
3 | { echo 'eksctl command not not found' 1>&2; exit 1; }
4 |
5 | ## provision eks using eksctl cli
6 | eksctl delete cluster --config-file "cluster.yaml"
7 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part0_provision/template_cluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: eksctl.io/v1alpha5
3 | kind: ClusterConfig
4 | metadata:
5 | name: $MY_CLUSTER_NAME
6 | region: $MY_REGION
7 | version: "$MY_VERSION"
8 | # traits of worker nodes
9 | nodeGroups:
10 | - name: $MY_CLUSTER_NAME-workers
11 | instanceType: t3.medium
12 | desiredCapacity: 4
13 | minSize: 1
14 | maxSize: 4
15 | iam:
16 | # polices added to worker node role
17 | withAddonPolicies:
18 | # allows read/write to zones in Route53
19 | externalDNS: true
20 | # access to ACM
21 | certManager: true
22 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part1_addons/.gitignore:
--------------------------------------------------------------------------------
1 | values.external-dns.yaml
2 | values.nginx-ingress.yaml
3 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part1_addons/template.external-dns.yaml:
--------------------------------------------------------------------------------
1 | # restrict to changes to these domains
2 | domainFilters:
3 | - $MY_DOMAIN
4 | # descriptive text to show who created record
5 | txtOwnerId: "externaldns"
6 | aws:
7 | zoneType: public
8 | region: ""
9 | # fetch latest image from the source
10 | image:
11 | registry: registry.opensource.zalan.do
12 | repository: teapot/external-dns
13 | tag: v0.7.3
14 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part2_app/.gitignore:
--------------------------------------------------------------------------------
1 | hello-k8s-ing.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part2_app/hello-k8s-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-kubernetes
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-kubernetes
11 | template:
12 | metadata:
13 | labels:
14 | app: hello-kubernetes
15 | spec:
16 | containers:
17 | - name: hello-kubernetes
18 | image: paulbouwer/hello-kubernetes:1.5
19 | ports:
20 | - containerPort: 8080
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "80m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "250m"
28 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part2_app/hello-k8s-svc-clusterip.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-kubernetes
6 | spec:
7 | # use ClusterIP with ingress
8 | type: ClusterIP
9 | ports:
10 | # service listens on http (80)
11 | - port: 80
12 | # sends to a pod on port 8080
13 | targetPort: 8080
14 | selector:
15 | app: hello-kubernetes
16 |
--------------------------------------------------------------------------------
/kubernetes/eks_3_ingress_nginx/part2_app/template-ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: hello-kubernetes
6 | annotations:
7 | # use nginx-ingress (needed if 2+ ingress controllers)
8 | kubernetes.io/ingress.class: nginx
9 | spec:
10 | rules:
11 | # route by virtualhost - externaldns registers this record
12 | - host: $MY_DNS_NAME
13 | http:
14 | paths:
15 | - backend:
16 | serviceName: hello-kubernetes
17 | # connect to service using http port
18 | servicePort: 80
19 | # default path put here explicitly for illustrative purposes
20 | path: /
21 |
--------------------------------------------------------------------------------
/kubernetes/eks_b1_provision_vpc_terraform/main.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "name" {}
3 |
4 | module "vpc" {
5 | source = "./vpc"
6 | name = var.name
7 | region = var.region
8 | }
9 |
--------------------------------------------------------------------------------
/kubernetes/eks_b1_provision_vpc_terraform/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_b1_provision_vpc_terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | region = "us-east-2"
2 | name = "acme-test-cluster"
3 |
--------------------------------------------------------------------------------
/kubernetes/eks_b1_provision_vpc_terraform/vpc/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | eks_cluster_name = var.eks_cluster_name != "" ? var.eks_cluster_name : var.name
3 | }
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_b1_provision_vpc_terraform/vpc/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "eks_cluster_name" { default = "" }
3 | variable "name" {}
4 |
5 | variable "cidr" {
6 | default = "192.168.0.0/16"
7 | }
8 |
9 | variable "private_subnets" {
10 | default = ["192.168.160.0/19", "192.168.128.0/19", "192.168.96.0/19"]
11 | }
12 |
13 | variable "public_subnets" {
14 | default = ["192.168.64.0/19", "192.168.32.0/19", "192.168.0.0/19"]
15 | }
16 |
--------------------------------------------------------------------------------
/kubernetes/eks_b1_provision_vpc_terraform/vpc/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = "~> 0.12.0"
3 |
4 | required_providers {
5 | aws = "~> 3.3"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part1_static_config/README.md:
--------------------------------------------------------------------------------
1 | # Static Cluster Config
2 |
3 | This contains a eksctl cluster configuration. The subnet-ids are fictional, so will need to be updated, along with region names, to match VPC infrastructure you had provisioned.
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/.gitignore:
--------------------------------------------------------------------------------
1 | cluster_config.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/eksctl_config/data.tf:
--------------------------------------------------------------------------------
1 | data "aws_subnet" "private0" { id = var.private_subnet_ids[0] }
2 | data "aws_subnet" "private1" { id = var.private_subnet_ids[1] }
3 | data "aws_subnet" "private2" { id = var.private_subnet_ids[2] }
4 | data "aws_subnet" "public0" { id = var.public_subnet_ids[0] }
5 | data "aws_subnet" "public1" { id = var.public_subnet_ids[1] }
6 | data "aws_subnet" "public2" { id = var.public_subnet_ids[2] }
7 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/eksctl_config/main.tf:
--------------------------------------------------------------------------------
1 | resource "local_file" "default" {
2 | count = var.cluster_config_enabled ? 1 : 0
3 | content = local.cluster_config_values
4 | filename = var.filename
5 | file_permission = "0644"
6 | }
7 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/eksctl_config/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {}
2 | variable "region" {}
3 | variable "private_subnet_ids" {}
4 | variable "public_subnet_ids" {}
5 |
6 | variable "public_key_name" {}
7 | variable "instance_type" {}
8 |
9 | variable "cluster_config_enabled" { default = true }
10 | variable "min_size" { default = 3 }
11 | variable "max_size" { default = 6 }
12 | variable "desired_capacity" { default = 3 }
13 |
14 | variable "filename" {}
15 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/terraform.tfvars:
--------------------------------------------------------------------------------
1 | region = "us-east-2"
2 | name = "acme-test-cluster"
3 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/vpc/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | eks_cluster_name = var.eks_cluster_name != "" ? var.eks_cluster_name : var.name
3 | }
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/vpc/output.tf:
--------------------------------------------------------------------------------
1 | output "private_subnet_ids" {
2 | value = module.vpc.private_subnets
3 | }
4 |
5 | output "public_subnet_ids" {
6 | value = module.vpc.public_subnets
7 | }
8 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/vpc/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "eks_cluster_name" { default = "" }
3 | variable "name" {}
4 |
5 | variable "cidr" {
6 | default = "192.168.0.0/16"
7 | }
8 |
9 | variable "private_subnets" {
10 | default = ["192.168.160.0/19", "192.168.128.0/19", "192.168.96.0/19"]
11 | }
12 |
13 | variable "public_subnets" {
14 | default = ["192.168.64.0/19", "192.168.32.0/19", "192.168.0.0/19"]
15 | }
16 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part2_template_config/vpc/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = "~> 0.12.0"
3 |
4 | required_providers {
5 | aws = "~> 3.3"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/.gitignore:
--------------------------------------------------------------------------------
1 | cluster_config.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/eksctl_config/main.tf:
--------------------------------------------------------------------------------
1 | resource "local_file" "default" {
2 | count = var.cluster_config_enabled ? 1 : 0
3 | content = local.cluster_config_values
4 | filename = var.filename
5 | file_permission = "0644"
6 | }
7 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/eksctl_config/variables.tf:
--------------------------------------------------------------------------------
1 | variable "name" {}
2 | variable "region" {}
3 | variable "vpc_id" {}
4 | variable "public_key_name" {}
5 | variable "instance_type" {}
6 |
7 | variable "cluster_config_enabled" { default = true }
8 | variable "min_size" { default = 3 }
9 | variable "max_size" { default = 6 }
10 | variable "desired_capacity" { default = 3 }
11 |
12 | variable "filename" {}
13 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/provider.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = var.region
3 | }
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/terraform.tfvars:
--------------------------------------------------------------------------------
1 | region = "us-east-2"
2 | name = "acme-test-cluster"
3 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/vpc/locals.tf:
--------------------------------------------------------------------------------
1 | locals {
2 | eks_cluster_name = var.eks_cluster_name != "" ? var.eks_cluster_name : var.name
3 | }
4 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/vpc/output.tf:
--------------------------------------------------------------------------------
1 | output "private_subnet_ids" {
2 | value = module.vpc.private_subnets
3 | }
4 |
5 | output "public_subnet_ids" {
6 | value = module.vpc.public_subnets
7 | }
8 |
9 | output "vpc_id" {
10 | value = module.vpc.vpc_id
11 | }
12 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/vpc/variables.tf:
--------------------------------------------------------------------------------
1 | variable "region" {}
2 | variable "eks_cluster_name" { default = "" }
3 | variable "name" {}
4 |
5 | variable "cidr" {
6 | default = "192.168.0.0/16"
7 | }
8 |
9 | variable "private_subnets" {
10 | default = ["192.168.160.0/19", "192.168.128.0/19", "192.168.96.0/19"]
11 | }
12 |
13 | variable "public_subnets" {
14 | default = ["192.168.64.0/19", "192.168.32.0/19", "192.168.0.0/19"]
15 | }
16 |
--------------------------------------------------------------------------------
/kubernetes/eks_b2_provision_eks_eksctl/part3_template_config_2/vpc/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = "~> 0.12.0"
3 |
4 | required_providers {
5 | aws = "~> 3.3"
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/kubernetes/gke/baseline/gcloud_sdk/examples/dgraph/scripts/delete_dgraph.sh:
--------------------------------------------------------------------------------
1 | helm delete "dg" --namespace "dgraph"
2 | kubectl delete pvc --selector release="dg" --namespace dgraph
3 |
--------------------------------------------------------------------------------
/kubernetes/gke/baseline/gcloud_sdk/tests/delete_ing.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v kubectl > /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | ## cleanup
7 | kubectl delete "ingress/gke-ingress" --namespace "httpd-ing"
8 | kubectl delete namespace "httpd-ing"
--------------------------------------------------------------------------------
/kubernetes/gke/baseline/gcloud_sdk/tests/delete_lb.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v kubectl > /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | ## Cleanup
7 | kubectl delete "service/httpd" --namespace "httpd-svc"
8 | kubectl delete namespace "httpd-svc"
--------------------------------------------------------------------------------
/kubernetes/gke/baseline/gcloud_sdk/tests/delete_netpol.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v kubectl > /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | ## cleanup
7 | MANIFESTS=(04-client 03-frontend 02-backend 01-management-ui 00-namespace)
8 | APP_URL=https://docs.projectcalico.org/v3.5/getting-started/kubernetes/tutorials/stars-policy/manifests/
9 |
10 | for MANIFEST in ${MANIFESTS[*]}; do
11 | kubectl delete --filename $APP_URL/$MANIFEST.yaml
12 | done
13 |
--------------------------------------------------------------------------------
/kubernetes/gke/baseline/gcloud_sdk/tests/delete_pv.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for required commands
4 | command -v kubectl > /dev/null || { echo "'kubectl' command not not found" 1>&2; exit 1; }
5 |
6 | kubectl delete pod app --namespace "pv-test"
7 | kubectl delete pvc pv-claim --namespace "pv-test"
8 | kubectl delete ns "pv-test"
--------------------------------------------------------------------------------
/kubernetes/gke/baseline/terraform/README.md:
--------------------------------------------------------------------------------
1 | # Ultimate Baseline GKE (Terraform)
2 |
3 | TBA
--------------------------------------------------------------------------------
/kubernetes/gke/ingress/ambassador/example/dgraph/dgraph_clean.sh:
--------------------------------------------------------------------------------
1 |
2 | RELEASE_NAME=${RELEASE_NAME:-"dg"}
3 | helm delete $RELEASE_NAME --namespace dgraph
4 | kubectl delete pvc --namespace dgraph --selector release=$RELEASE_NAME
5 | kubectl delete namespace dgraph
--------------------------------------------------------------------------------
/kubernetes/gke/ingress/ambassador/example/dgraph/dgraph_install.sh:
--------------------------------------------------------------------------------
1 | export DGRAPH_ALLOW_LIST=${DGRAPH_ALLOW_LIST:-"0.0.0.0/0"}
2 | export DGRAPH_RELEASE_NAME=${DGRAPH_RELEASE_NAME:-"dg"}
3 |
4 | helm install $DGRAPH_RELEASE_NAME dgraph/dgraph \
5 | --namespace dgraph \
6 | --create-namespace \
7 | --values - < ca.pem
6 |
7 | export CONSUL_HTTP_TOKEN=$(kubectl get \
8 | --namespace consul secrets/consul-bootstrap-acl-token \
9 | --template={{.data.token}} | base64 -d
10 | )
11 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/consul/fetch_cert.sh:
--------------------------------------------------------------------------------
1 | kubectl get secret \
2 | --namespace consul consul-ca-cert \
3 | -o jsonpath="{.data['tls\.crt']}" \
4 | | base64 --decode > ca.pem
5 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/consul/fetch_consul_debian.sh:
--------------------------------------------------------------------------------
1 | wget -O- https://apt.releases.hashicorp.com/gpg \
2 | | gpg --dearmor \
3 | | sudo tee /usr/share/keyrings/hashicorp-archive-keyring.gpg
4 | echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" \
5 | | sudo tee /etc/apt/sources.list.d/hashicorp.list
6 | sudo apt update && sudo apt install consul
7 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/.gitignore:
--------------------------------------------------------------------------------
1 | pydgraph-client
2 | greeter
3 | learn-consul-kubernetes
4 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/dgraph/dgraph_allow_list.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | DG_ALLOW_LIST=$(gcloud container clusters describe $GKE_CLUSTER_NAME \
3 | --project $GKE_PROJECT_ID \
4 | --region $GKE_REGION \
5 | --format json \
6 | | jq -r '.clusterIpv4Cidr,.servicesIpv4Cidr' \
7 | | tr '\n' ','
8 | )
9 | export MY_IP_ADDRESS=$(curl --silent ifconfig.me)
10 | export DG_ALLOW_LIST="${DG_ALLOW_LIST}${MY_IP_ADDRESS}/32"
11 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/dgraph/experimental/intention.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: consul.hashicorp.com/v1alpha1
3 | kind: ServiceIntentions
4 | metadata:
5 | name: pydgraph-client-to-dgraph
6 | spec:
7 | destination:
8 | name: dgraph-dgraph-alpha
9 | sources:
10 | - name: pydgraph-client
11 | action: allow
12 | ---
13 | apiVersion: consul.hashicorp.com/v1alpha1
14 | kind: ServiceIntentions
15 | metadata:
16 | name: pydgraph-client-to-dgraph-grpc
17 | spec:
18 | destination:
19 | name: dgraph-dgraph-alpha-grpc
20 | sources:
21 | - name: pydgraph-client
22 | action: allow
23 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/dgraph/experimental/sd.dgraph.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: consul.hashicorp.com/v1alpha1
3 | kind: ServiceDefaults
4 | metadata:
5 | name: dgraph-dgraph-alpha
6 | spec:
7 | protocol: 'http'
8 | ---
9 | apiVersion: consul.hashicorp.com/v1alpha1
10 | kind: ServiceDefaults
11 | metadata:
12 | name: dgraph-dgraph-alpha-grpc
13 | spec:
14 | protocol: 'grpc'
15 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/dgraph/experimental/sd.pydgraph_client.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: consul.hashicorp.com/v1alpha1
3 | kind: ServiceDefaults
4 | metadata:
5 | name: pydgraph-client
6 | spec:
7 | protocol: 'http'
8 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/static_server/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Static Server
3 |
4 | These are static server example that come from Hashicorp's documentation.
5 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/static_server/secure_http/client-to-server-intention.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: consul.hashicorp.com/v1alpha1
2 | kind: ServiceIntentions
3 | metadata:
4 | name: client-to-server
5 | spec:
6 | destination:
7 | name: static-server
8 | sources:
9 | - name: static-client
10 | action: allow
11 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/static_server/secure_http/dc1.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | name: consul
3 | enabled: true
4 | datacenter: dc1
5 | server:
6 | replicas: 1
7 | securityContext:
8 | runAsNonRoot: false
9 | runAsUser: 0
10 | connectInject:
11 | enabled: true
12 | controller:
13 | enabled: true
14 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/static_server/secure_http/secure-dc1.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | name: consul
3 | enabled: true
4 | datacenter: dc1
5 | gossipEncryption:
6 | autoGenerate: true
7 | tls:
8 | enabled: true
9 | enableAutoEncrypt: true
10 | verify: true
11 | acls:
12 | manageSystemACLs: true
13 | server:
14 | replicas: 1
15 | securityContext:
16 | runAsNonRoot: false
17 | runAsUser: 0
18 | connectInject:
19 | enabled: true
20 | controller:
21 | enabled: true
22 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/static_server/secure_multiport/client-to-server-intention.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: consul.hashicorp.com/v1alpha1
2 | kind: ServiceIntentions
3 | metadata:
4 | name: client-to-server
5 | spec:
6 | destination:
7 | name: static-server
8 | sources:
9 | - name: static-client
10 | action: allow
11 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/examples/static_server/secure_multiport/secure-dc1.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | name: consul
3 | enabled: true
4 | datacenter: dc1
5 | gossipEncryption:
6 | autoGenerate: true
7 | tls:
8 | enabled: true
9 | enableAutoEncrypt: true
10 | verify: true
11 | acls:
12 | manageSystemACLs: true
13 | server:
14 | replicas: 1
15 | securityContext:
16 | runAsNonRoot: false
17 | runAsUser: 0
18 | connectInject:
19 | enabled: true
20 | controller:
21 | enabled: true
22 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/scripts/clean_gcp.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | source env.sh
4 |
5 | gcloud container clusters delete $GKE_CLUSTER_NAME \
6 | --project $GKE_PROJECT_ID --region $GKE_REGION
7 |
8 | gcloud iam service-accounts delete $GKE_SA_EMAIL --project $GKE_PROJECT_ID
9 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/scripts/clean_k8s.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source env.sh
3 |
4 | # delete pydgraph-client
5 | helmfile --file ./examples/dgraph/helmfile.yaml delete
6 | kubectl delete namespace pydgraph-client
7 |
8 | # delete dgraph
9 | helmfile --file ./examples/dgraph/helmfile.yaml delete
10 | kubectl delete pvc --selector app=dgraph --namespace "dgraph"
11 | kubectl delete namespace dgraph
12 |
13 | # delete consul
14 | helmfile --file ./consul/helmfile.yaml delete
15 | kubectl delete pvc --selector app=consul --namespace "consul"
16 | kubectl delete namespace consul
17 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/scripts/example.env.sh:
--------------------------------------------------------------------------------
1 |
2 | # gke
3 | export GKE_PROJECT_ID="my-gke-project" # CHANGE ME
4 | export GKE_CLUSTER_NAME="csm-demo"
5 | export GKE_REGION="us-central1"
6 | export GKE_SA_NAME="gke-worker-nodes-sa"
7 | export GKE_SA_EMAIL="$GKE_SA_NAME@${GKE_PROJECT_ID}.iam.gserviceaccount.com"
8 | export KUBECONFIG=~/.kube/$GKE_REGION-$GKE_CLUSTER_NAME.yaml
9 |
10 | # gcr
11 | export GCR_PROJECT_ID="my-gcr-project"
12 | export DOCKER_REGISTRY="gcr.io/$GCR_PROJECT_ID"
13 |
14 | # other
15 | export USE_GKE_GCLOUD_AUTH_PLUGIN=True
16 | export ClOUD_BILLING_ACCOUNT="" # CHANGEME
17 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/consul-connect/scripts/filesetup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | export PROJECT_DIR=~/projects/consul_connect
3 |
4 | mkdir -p $PROJECT_DIR/{examples/dgraph,consul}
5 | cd $PROJECT_DIR
6 | touch {consul,examples/dgraph}/helmfile.yaml \
7 | examples/dgraph/dgraph_allow_list.sh
8 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/clients/.gitignore:
--------------------------------------------------------------------------------
1 | examples
2 | api.proto
3 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/clients/alpha_server.txt:
--------------------------------------------------------------------------------
1 | dgraph-dgraph-alpha-headless.dgraph.svc.cluster.local
2 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/clients/fetch_scripts.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | USER="darkn3rd"
4 | GIST_ID="089d18ac58951709a98ac6a617f26bea"
5 | VERS="68bd5356561dd373ba13542fee4f978d8aace872"
6 | FILE="setup_pydgraph_gcp.sh"
7 | URL=https://gist.githubusercontent.com/$USER/$GIST_ID/raw/$VERS/$FILE
8 | echo "Fetching Scripts from $URL"
9 | curl -s $URL | bash -s --
10 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/dgraph/dgraph_allow_list.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | DG_ALLOW_LIST=$(gcloud container clusters describe $GKE_CLUSTER_NAME \
3 | --project $GKE_PROJECT_ID \
4 | --region $GKE_REGION \
5 | --format json \
6 | | jq -r '.clusterIpv4Cidr,.servicesIpv4Cidr' \
7 | | tr '\n' ','
8 | )
9 | export MY_IP_ADDRESS=$(curl --silent ifconfig.me)
10 | export DG_ALLOW_LIST="${DG_ALLOW_LIST}${MY_IP_ADDRESS}/32"
11 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/dgraph/helmfile.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | # https://artifacthub.io/packages/helm/dgraph/dgraph/0.0.19
3 | - name: dgraph
4 | url: https://charts.dgraph.io
5 |
6 | releases:
7 | - name: dgraph
8 | namespace: dgraph
9 | chart: dgraph/dgraph
10 | version: 0.0.19
11 | values:
12 | - image:
13 | tag: v21.03.2
14 | alpha:
15 | configFile:
16 | config.yaml: |
17 | security:
18 | whitelist: {{ env "DG_ACCEPT_LIST" | default "0.0.0.0/0" | quote }}
19 | service:
20 | type: ClusterIP
21 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/kube_addons/nginx_ic/.gitignore:
--------------------------------------------------------------------------------
1 | nginx-repo.crt
2 | nginx-repo.key
3 | nginx-repo.jwt
4 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/nsm/README.md:
--------------------------------------------------------------------------------
1 | # NGINX Service Mesh (NSM)
2 |
3 | This tutorial will deploy NSM service mesh. There are a few options that you can set before deploying:
4 |
5 |
6 | * Auto-Injection (`export NSM_AUTO_INJECTION=false` to disable) - this will put everything deployed in the cluster on the service mesh.
7 | * Strict vs Permissive (`export NSM_MTLS_MODE=permissive`) - this will required that mTLS is used to communicate to the service is set use `export NSM_MTLS_MODE=strict`, otherwise, anything can connect with `permissive`.
8 |
9 |
10 | # Checking Config
11 |
12 | ```bash
13 | nginx-meshctl config | jq -r .accessControlMode
14 | ```
15 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/nsm/deny_access_control_mode.json:
--------------------------------------------------------------------------------
1 | {
2 | "op": "replace",
3 | "field": {
4 | "accessControlMode": "deny"
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/nsm/install_cli.sh:
--------------------------------------------------------------------------------
1 | pushd ~/Downloads
2 | if [[ "$(uname -s)" == "Linux" ]]; then
3 | [[ -f nginx-meshctl_linux.gz ]] && gunzip nginx-meshctl_linux.gz
4 | sudo mv nginx-meshctl_linux /usr/local/bin/nginx-meshctl
5 | sudo chmod +x /usr/local/bin/nginx-meshctl
6 | elif [[ "$(uname -s)" == "Darwin" ]]; then
7 | [[ -f nginx-meshctl_darwin.gz ]] && gunzip nginx-meshctl_darwin.gz
8 | sudo mv nginx-meshctl_darwin /usr/local/bin/nginx-meshctl
9 | sudo chmod +x /usr/local/bin/nginx-meshctl
10 | fi
11 | popd
12 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/o11y/.gitignore:
--------------------------------------------------------------------------------
1 | grafana.yaml
2 | jaeger.yaml
3 | otel-collector.yaml
4 | prometheus.yaml
5 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/scripts/part_1/clean_gcp.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source env.sh
3 |
4 | # Google Cloud Resources
5 | gcloud container clusters delete $GKE_CLUSTER_NAME \
6 | --project $GKE_PROJECT_ID \
7 | --region $GKE_REGION
8 |
9 | gcloud iam service-accounts delete $GKE_SA_EMAIL --project $GKE_PROJECT_ID
10 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/scripts/part_1/example.env.sh:
--------------------------------------------------------------------------------
1 |
2 | # gke
3 | export GKE_PROJECT_ID="my-gke-project" # CHANGE ME
4 | export GKE_CLUSTER_NAME="my-nginx-kic" # CHANGE ME
5 | export GKE_REGION="us-central1"
6 | export GKE_SA_NAME="gke-worker-nodes-sa"
7 | export GKE_SA_EMAIL="$GKE_SA_NAME@${GKE_PROJECT_ID}.iam.gserviceaccount.com"
8 | export KUBECONFIG=~/.kube/$REGION-$GKE_CLUSTER_NAME.yaml
9 |
10 | # other
11 | export USE_GKE_GCLOUD_AUTH_PLUGIN=True
12 | export ClOUD_BILLING_ACCOUNT="" # CHANGEME
13 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/scripts/part_1/gcr.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source env.sh
3 |
4 | # Grant local docker access to GCR
5 | gcloud auth configure-docker
6 |
7 | # Grant read permissions explicitly GCS storage used for GCR
8 | # Docs: https://cloud.google.com/storage/docs/access-control/using-iam-permissions#gsutil
9 | gsutil iam ch \
10 | serviceAccount:$GKE_SA_EMAIL:objectViewer \
11 | gs://artifacts.$GCR_PROJECT_ID.appspot.com
12 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/scripts/part_1/setup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # ~/projects/nsm
4 | # ├── clients
5 | # │ └── fetch_scripts.sh
6 | # ├── dgraph
7 | # │ ├── dgraph_allow_lists.sh
8 | # │ └── helmfile.yaml
9 | # ├── nsm
10 | # │ └── helmfile.yaml
11 | # └── o11y
12 | # └── fetch_manifests.sh
13 |
14 |
15 | PROJECT_DIR=~/projects/nsm
16 | mkdir -p $PROJECT_DIR/{clients,dgraph,nsm,o11y}
17 | cd $PROJECT_DIR
18 |
19 | touch {nsm,dgraph}/helmfile.yaml \
20 | o11y/fetch_manifests.sh \
21 | dgraph/dgraph_allow_lists.sh \
22 | clients/fetch_scripts.sh
23 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/scripts/part_2/clean_k8s.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source env.sh
3 |
4 | # Ratel Resources
5 | kubectl delete deploy/dgraph-ratel --namespace "ratel"
6 | kubectl delete svc/dgraph-ratel --namespace "ratel"
7 |
8 | # VirtualServers
9 | helm delete dgraph-virtualservers --namespace "dgraph"
10 | helm delete ratel-virtualserver --namespace "ratel"
11 |
12 | # Kubernetes Addons
13 | helm delete "external-dns" --namespace "kube-addons"
14 | helm delete "nginx-ingress" --namespace "kube-addons"
15 | helm delete "cert-manager-issuers" --namespace "kube-addons"
16 | helm delete "cert-manager" --namespace "kube-addons"
17 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/scripts/part_2/example.env.sh:
--------------------------------------------------------------------------------
1 | # external-dns + cloud-dns
2 | export DNS_PROJECT_ID="my-cloud-dns-project" # CHANGE ME
3 | export DNS_DOMAIN="example.com" # CHANGE ME
4 | export EXTERNALDNS_LOG_LEVEL="debug"
5 | export EXTERNALDNS_NS="kube-addons"
6 | export CERTMANAGER_NS="kube-addons"
7 | export DNS_SA_NAME="cloud-dns-sa"
8 | export DNS_SA_EMAIL="$DNS_SA_NAME@${GKE_PROJECT_ID}.iam.gserviceaccount.com"
9 |
10 | # gcr (container registry)
11 | export GCR_PROJECT_ID="my-gcr-project"
12 |
13 | # cert-manager
14 | export ACME_ISSUER_EMAIL="user@example.com" # CHANGE ME
15 | export ACME_ISSUER_NAME="letsencrypt-prod"
16 |
--------------------------------------------------------------------------------
/kubernetes/gke/service-mesh/nginx-service-mesh/scripts/part_2/projects.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | source env.sh
3 |
4 | # enable billing and APIs for DNS project if not done already
5 | gcloud projects create $DNS_PROJECT_ID
6 | gcloud config set project $DNS_PROJECT_ID
7 | gcloud beta billing projects link $DNS_PROJECT_ID \
8 | --billing-account $ClOUD_BILLING_ACCOUNT
9 | gcloud services enable "dns.googleapis.com"
10 |
--------------------------------------------------------------------------------
/kubernetes/gke_1_provision_cloudsdk/create_basic_cluster.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for gcloud command
4 | command -v gcloud > /dev/null || \
5 | { echo 'gcloud command not not found' 1>&2; exit 1; }
6 |
7 | ## Defaults
8 | MY_CLUSTER_NAME=${1:-"test-cluster"} # gke cluster name
9 | MY_REGION=${2:-"us-central1"} # default region if not set
10 | MY_PROJECT=${3:-"$(gcloud config get-value project)"} # default project if not set
11 |
12 | ## Create cluster (1 node per zone)
13 | gcloud container --project $MY_PROJECT clusters create \
14 | --num-nodes 1 \
15 | --region $MY_REGION \
16 | $MY_CLUSTER_NAME
17 |
--------------------------------------------------------------------------------
/kubernetes/gke_1_provision_cloudsdk/hello-k8s-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-basic-deploy
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-basic
11 | template:
12 | metadata:
13 | labels:
14 | app: hello-basic
15 | spec:
16 | containers:
17 | - name: hello-kubernetes-basic
18 | image: paulbouwer/hello-kubernetes:1.5
19 | ports:
20 | - containerPort: 8080
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "80m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "250m"
28 |
--------------------------------------------------------------------------------
/kubernetes/gke_1_provision_cloudsdk/hello-k8s-svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-basic-svc
6 | spec:
7 | type: ClusterIP
8 | ports:
9 | - port: 8080
10 | targetPort: 8080
11 | selector:
12 | app: hello-basic
13 |
--------------------------------------------------------------------------------
/kubernetes/gke_2_provision_terraform/.gitignore:
--------------------------------------------------------------------------------
1 | .terraform
2 | .terraform.tfstate.lock.info
3 | terraform.tfstate
4 | terraform.tfstate.backup
5 | terraform.tfvars
6 |
--------------------------------------------------------------------------------
/kubernetes/gke_2_provision_terraform/hello-k8s-deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-tf-deploy
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-tf
11 | template:
12 | metadata:
13 | labels:
14 | app: hello-tf
15 | spec:
16 | containers:
17 | - name: hello-kubernetes-tf
18 | image: paulbouwer/hello-kubernetes:1.5
19 | ports:
20 | - containerPort: 8080
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "80m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "250m"
28 |
--------------------------------------------------------------------------------
/kubernetes/gke_2_provision_terraform/hello-k8s-svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-tf-svc
6 | spec:
7 | type: ClusterIP
8 | ports:
9 | - port: 8080
10 | targetPort: 8080
11 | selector:
12 | app: hello-tf
13 |
--------------------------------------------------------------------------------
/kubernetes/gke_2_provision_terraform/provider.tf:
--------------------------------------------------------------------------------
1 | provider "google" {
2 | version = "~> 3.16.0"
3 | region = var.region
4 | project = var.project
5 | }
6 |
7 | provider "random" {
8 | version = "~> 2.2.1"
9 | }
10 |
11 | provider "null" {
12 | version = "~> 2.1.2"
13 | }
14 |
15 | provider "kubernetes" {
16 | version = "~> v1.11.3"
17 | }
18 |
19 |
--------------------------------------------------------------------------------
/kubernetes/gke_3_service_ingress/README.md:
--------------------------------------------------------------------------------
1 | # GKE 3: Deploying Service or Ingress on GKE
2 |
3 | ## Part 1
4 |
5 | Create a GKE cluster. See previous articles:
6 |
7 | * [Provision using Google Cloud SDK](../gke_1_provision_cloudsdk/README.md)
8 | * [Provision using Terraform](../gke_2_provision_terraform/README.md)
9 |
10 | ## Part 2
11 |
12 | Deploy a Service with Load Balancer
13 |
14 | * Part 2 [README.md](part2_services/README.md)
15 |
16 | ## Part 3
17 |
18 | Deploy an Ingress
19 |
20 | * Part 3 [README.md](part3_ingress/README.md)
--------------------------------------------------------------------------------
/kubernetes/gke_3_service_ingress/part2_services/hello_gke_extlb_svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-gke-extlb
6 | spec:
7 | type: LoadBalancer
8 | ports:
9 | - port: 80
10 | targetPort: 8080
11 | selector:
12 | app: hello-gke-extlb
13 |
--------------------------------------------------------------------------------
/kubernetes/gke_3_service_ingress/part3_ingress/hello_gke_ing_deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-gke-ing
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-gke-ing
11 | template:
12 | metadata:
13 | labels:
14 | app: hello-gke-ing
15 | spec:
16 | containers:
17 | - name: hello-kubernetes
18 | image: paulbouwer/hello-kubernetes:1.5
19 | ports:
20 | - containerPort: 8080
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "80m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "250m"
28 |
--------------------------------------------------------------------------------
/kubernetes/gke_3_service_ingress/part3_ingress/hello_gke_ing_ing.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.k8s.io/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: hello-gke-ing
6 | annotations:
7 | kubernetes.io/ingress.class: gce
8 | spec:
9 | rules:
10 | - http:
11 | paths:
12 | - path: /*
13 | backend:
14 | serviceName: hello-gke-ing
15 | servicePort: 80
16 |
--------------------------------------------------------------------------------
/kubernetes/gke_3_service_ingress/part3_ingress/hello_gke_ing_svc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-gke-ing
6 | spec:
7 | type: NodePort
8 | ports:
9 | - port: 80
10 | targetPort: 8080
11 | selector:
12 | app: hello-gke-ing
13 |
--------------------------------------------------------------------------------
/kubernetes/gke_4_externaldns/README.md:
--------------------------------------------------------------------------------
1 | # GKE 4: Extending GKE with External DNS (Cloud DNS)
2 |
3 | ## Part 1
4 |
5 | * Part 2 [README.md](part1_clouddns/README.md)
6 |
7 | ## Part 2
8 |
9 | Deploy a Service with Load Balancer
10 |
11 | * Part 2 [README.md](part2_service/README.md)
12 |
13 | ## Part 3
14 |
15 | Deploy an Ingress
16 |
17 | * Part 3 [README.md](part3_ingress/README.md)
--------------------------------------------------------------------------------
/kubernetes/gke_4_externaldns/part1_clouddns/.gitignore:
--------------------------------------------------------------------------------
1 | gcp-external-dns.values.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/gke_4_externaldns/part1_clouddns/check_clouddns.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | ## Check for gcloud command
4 | command -v gcloud > /dev/null || \
5 | { echo 'gcloud command not not found' >&2; exit 1; }
6 |
7 | ## Check for arguments
8 | if (( $# < 1 )); then
9 | printf " Usage: $0 [GCP_PROJECT_NAME]\n\n" >&2
10 | exit 1
11 | fi
12 |
13 | ## Local Variables
14 | MY_ZONE=${1}
15 | MY_PROJECT=${2:-"$(gcloud config get-value project)"} # default project if not set
16 |
17 | ## Print Zone Records
18 | gcloud dns record-sets list \
19 | --project $MY_PROJECT \
20 | --zone $MY_ZONE \
21 | --filter "type=NS OR type=SOA" \
22 | --format json
23 |
--------------------------------------------------------------------------------
/kubernetes/gke_4_externaldns/part1_clouddns/template_values.yaml:
--------------------------------------------------------------------------------
1 | domainFilters:
2 | - $MY_DOMAIN
3 | txtOwnerId: external-dns
4 | # GKE cluster and CloudDNS must be in same project
5 | provider: google
6 | image:
7 | registry: registry.opensource.zalan.do
8 | repository: teapot/external-dns
9 | tag: latest
10 | rbac:
11 | create: true
12 | apiVersion: v1
13 | # Set to 'upsert-only' for updates, 'sync' to alow deletes
14 | policy: upsert-only
15 |
--------------------------------------------------------------------------------
/kubernetes/gke_4_externaldns/part2_service/.gitignore:
--------------------------------------------------------------------------------
1 | hello_k8s_lb.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/gke_4_externaldns/part3_ingress/.gitignore:
--------------------------------------------------------------------------------
1 | hello_k8s_gce.yaml
2 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part1_ephemeral_ip/.gitignore:
--------------------------------------------------------------------------------
1 | hello_ingress.yaml
2 | hello_managed_cert.yaml
3 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part1_ephemeral_ip/hello_deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-k8s-gce-ssl
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-k8s-gce-ssl
11 | template:
12 | metadata:
13 | labels:
14 | app: hello-k8s-gce-ssl
15 | spec:
16 | containers:
17 | - name: hello-kubernetes
18 | image: paulbouwer/hello-kubernetes:1.5
19 | ports:
20 | - containerPort: 8080
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "80m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "250m"
28 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part1_ephemeral_ip/hello_service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-k8s-gce-ssl
6 | spec:
7 | # GCE Ingress has to be 'NodePort' or 'LoadBalancer'
8 | type: NodePort
9 | ports:
10 | - port: 80
11 | targetPort: 8080
12 | selector:
13 | app: hello-k8s-gce-ssl
14 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part1_ephemeral_ip/template_ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: hello-k8s-gce-ssl
6 | annotations:
7 | kubernetes.io/ingress.class: gce
8 | networking.gke.io/managed-certificates: hello-k8s-gce-ssl
9 | spec:
10 | rules:
11 | - host: $MY_DNS_NAME
12 | http:
13 | paths:
14 | - backend:
15 | serviceName: hello-k8s-gce-ssl
16 | servicePort: 80
17 | path: /*
18 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part1_ephemeral_ip/template_managed_cert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.gke.io/v1beta2
3 | kind: ManagedCertificate
4 | metadata:
5 | name: hello-k8s-gce-ssl
6 | spec:
7 | domains:
8 | - $MY_DNS_NAME
9 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part2_reserved_ip/.gitignore:
--------------------------------------------------------------------------------
1 | hello_ingress.yaml
2 | hello_managed_cert.yaml
3 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part2_reserved_ip/hello_deploy.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-k8s-gce-ssl2
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-k8s-gce-ssl2
11 | template:
12 | metadata:
13 | labels:
14 | app: hello-k8s-gce-ssl2
15 | spec:
16 | containers:
17 | - name: hello-kubernetes
18 | image: paulbouwer/hello-kubernetes:1.5
19 | ports:
20 | - containerPort: 8080
21 | resources:
22 | requests:
23 | memory: "64Mi"
24 | cpu: "80m"
25 | limits:
26 | memory: "128Mi"
27 | cpu: "250m"
28 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part2_reserved_ip/hello_service.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: hello-k8s-gce-ssl2
6 | spec:
7 | # GCE Ingress has to be 'NodePort' or 'LoadBalancer'
8 | type: NodePort
9 | ports:
10 | - port: 80
11 | targetPort: 8080
12 | selector:
13 | app: hello-k8s-gce-ssl2
14 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part2_reserved_ip/template_ingress.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: extensions/v1beta1
3 | kind: Ingress
4 | metadata:
5 | name: hello-k8s-gce-ssl2
6 | annotations:
7 | kubernetes.io/ingress.class: gce
8 | networking.gke.io/managed-certificates: hello-k8s-gce-ssl2
9 | kubernetes.io/ingress.global-static-ip-name: $MY_ADDRESS_NAME
10 | spec:
11 | rules:
12 | - host: $MY_DNS_NAME
13 | http:
14 | paths:
15 | - backend:
16 | serviceName: hello-k8s-gce-ssl2
17 | servicePort: 80
18 | path: /*
19 |
--------------------------------------------------------------------------------
/kubernetes/gke_5_googlessl/part2_reserved_ip/template_managed_cert.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: networking.gke.io/v1beta2
3 | kind: ManagedCertificate
4 | metadata:
5 | name: hello-k8s-gce-ssl2
6 | spec:
7 | domains:
8 | - $MY_DNS_NAME
9 |
--------------------------------------------------------------------------------
/kubernetes/helmfile_1/README.md:
--------------------------------------------------------------------------------
1 | # Helmfile example
2 |
3 | This is an example on how to use `helmfile` to coordinate installation of MinIO, and Dgraph.
4 |
5 | ## Instructions
6 |
7 | ```bash
8 | . env.sh
9 | helmfile apply
10 | ```
11 |
--------------------------------------------------------------------------------
/kubernetes/helmfile_1/env.sh:
--------------------------------------------------------------------------------
1 | # required env vars
2 | export MINIO_ACCESS_KEY=backups
3 | export MINIO_SECRET_KEY=password123
4 | # optional env vars
5 | export MINIO_NAMESPACE=minio
6 | export DGRAPH_NAMESPACE=dgraph
7 |
--------------------------------------------------------------------------------
/kubernetes/helmfile_1/helmfile.yaml:
--------------------------------------------------------------------------------
1 | repositories:
2 | - name: minio
3 | url: https://helm.min.io/
4 | - name: dgraph
5 | url: https://charts.dgraph.io
6 |
7 | releases:
8 | - name: minio
9 | namespace: {{ env "MINIO_NAMESPACE" | default "minio" }}
10 | chart: minio/minio
11 | version: 8.0.10
12 | values:
13 | - ./values/minio.yaml.gotmpl
14 |
15 | - name: dgraph
16 | namespace: {{ env "DGRAPH_NAMESPACE" | default "dgraph" }}
17 | chart: dgraph/dgraph
18 | version: 0.0.17
19 | values:
20 | - ./values/dgraph.yaml.gotmpl
21 |
--------------------------------------------------------------------------------
/kubernetes/helmfile_1/values/dgraph.yaml.gotmpl:
--------------------------------------------------------------------------------
1 | backups:
2 | full:
3 | enabled: true
4 | debug: true
5 | schedule: "*/15 * * * *"
6 | destination: minio://minio.{{ env "MINIO_NAMESPACE" | default "minio" }}.svc:9000/dgraph
7 | minioSecure: false
8 | keys:
9 | minio:
10 | access: {{ requiredEnv "MINIO_ACCESS_KEY" }}
11 | secret: {{ requiredEnv "MINIO_SECRET_KEY" }}
12 | alpha:
13 | configFile:
14 | config.yaml: |
15 | security:
16 | whitelist: 10.0.0.0/8,172.0.0.0/8,192.168.0.0/16
17 |
--------------------------------------------------------------------------------
/kubernetes/helmfile_1/values/minio.yaml.gotmpl:
--------------------------------------------------------------------------------
1 | image:
2 | repository: minio/minio
3 | tag: RELEASE.2021-05-27T22-06-31Z
4 | mcImage:
5 | repository: minio/mc
6 | tag: RELEASE.2021-05-26T19-19-26Z
7 | accessKey: {{ requiredEnv "MINIO_ACCESS_KEY" }}
8 | secretKey: {{ requiredEnv "MINIO_SECRET_KEY" }}
9 | defaultBucket:
10 | enabled: true
11 | name: dgraph
12 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/create_workarea.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Purpose: Create structure from $HOME directory, or directory of your choosing
4 | #
5 |
6 | WORKAREA=${WORKAREA:-"${HOME}/vagrant-ansible"}
7 | ROLEPATH=${WORKAREA}/provision/roles/hello_web
8 |
9 | # Create Ansible Role
10 | if command -v ansible-galaxy > /dev/null; then
11 | ansible-galaxy init ${ROLEPATH}
12 | else
13 | mkdir -p ${ROLEPATH}/{defaults,files,tasks}
14 | touch ${ROLEPATH}/{defaults/main.yml,tasks/main.yml,files/index.html}
15 | fi
16 |
17 | cat <<-'HTML' > ${ROLEPATH}/files/index.html
18 |
19 |
20 | Hello World!
21 |
22 |
23 | HTML
24 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part1_ubuntu/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "bento/ubuntu-16.04"
3 | config.vm.network "forwarded_port", guest: 80, host: 8086
4 |
5 | ####### Provision #######
6 | config.vm.provision "ansible_local" do |ansible|
7 | ansible.playbook = "provision/playbook.yml"
8 | ansible.verbose = true
9 | end
10 | end
11 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part1_ubuntu/provision/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | gather_facts: yes
4 | become: true
5 | roles:
6 | - hello_web
7 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part1_ubuntu/provision/roles/hello_web/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hello_web:
3 | docroot: /var/www/html
4 | package: apache2
5 | service: apache2
6 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part1_ubuntu/provision/roles/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part1_ubuntu/provision/roles/hello_web/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Install Web Service"
3 | package:
4 | name: "{{ hello_web.package }}"
5 | state: present
6 | - name: "Start Web Service"
7 | service:
8 | name: "{{ hello_web.service }}"
9 | state: started
10 | enabled: yes
11 | - name: "Copy Web Content"
12 | copy:
13 | src: "{{ role_path }}/files/index.html"
14 | dest: "{{ hello_web.docroot }}/index.html"
15 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part2_centos/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "bento/centos-7.5"
3 | config.vm.network "forwarded_port", guest: 80, host: 8086
4 | ####### Provision #######
5 | config.vm.provision "ansible_local" do |ansible|
6 | ansible.playbook = "provision/playbook.yml"
7 | ansible.verbose = true
8 | ansible.extra_vars = {
9 | hello_web: {
10 | package: "httpd",
11 | service: "httpd",
12 | docroot: "/var/www/html"
13 | }
14 | }
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part2_centos/provision/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: all
3 | gather_facts: yes
4 | become: true
5 | roles:
6 | - hello_web
7 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part2_centos/provision/roles/hello_web/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hello_web:
3 | docroot: /var/www/html
4 | package: apache2
5 | service: apache2
6 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part2_centos/provision/roles/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/ansible_local/part2_centos/provision/roles/hello_web/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Install Web Service"
3 | package:
4 | name: "{{ hello_web.package }}"
5 | state: present
6 | - name: "Start Web Service"
7 | service:
8 | name: "{{ hello_web.service }}"
9 | state: started
10 | enabled: yes
11 | - name: "Copy Web Content"
12 | copy:
13 | src: "{{ role_path }}/files/index.html"
14 | dest: "{{ hello_web.docroot }}/index.html"
15 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part1_ubuntu/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure('2') do |config|
2 | config.vm.box = 'bento/ubuntu-16.04'
3 | config.vm.network 'forwarded_port', guest: 80, host: 8082
4 |
5 | ####### Provision #######
6 | config.vm.provision 'chef_zero' do |chef|
7 | chef.cookbooks_path = "cookbooks" # ❶
8 | chef.add_recipe 'hello_web' # ❷
9 | chef.nodes_path = 'nodes' # ❸
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part1_ubuntu/cookbooks/hello_web/attributes/default.rb:
--------------------------------------------------------------------------------
1 | default['hello_web']['package'] = 'apache2'
2 | default['hello_web']['service'] = 'apache2'
3 | default['hello_web']['docroot'] = '/var/www/html'
4 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part1_ubuntu/cookbooks/hello_web/files/default/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part1_ubuntu/cookbooks/hello_web/metadata.rb:
--------------------------------------------------------------------------------
1 | name 'hello_web'
2 | version '0.0.1'
3 | chef_version '>= 12.14' if respond_to?(:chef_version)
4 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part1_ubuntu/cookbooks/hello_web/recipes/default.rb:
--------------------------------------------------------------------------------
1 | apt_update 'Update the apt cache daily' do
2 | frequency 86_400
3 | action :periodic
4 | end
5 |
6 | package node['hello_web']['package']
7 |
8 | cookbook_file "#{node['hello_web']['docroot']}/index.html" do
9 | source 'index.html'
10 | action :create
11 | end
12 |
13 | service node['hello_web']['service'] do
14 | supports status: true, restart: true, reload: true
15 | action %i(enable start)
16 | end
17 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part1_ubuntu/nodes/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/vagrant/chef_zero/part1_ubuntu/nodes/.gitkeep
--------------------------------------------------------------------------------
/vagrant/chef_zero/part2_centos/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure('2') do |config|
2 | config.vm.box = 'bento/centos-7.5'
3 | config.vm.network 'forwarded_port', guest: 80, host: 8082
4 |
5 | ####### Provision #######
6 | config.vm.provision 'chef_zero' do |chef|
7 | chef.add_recipe 'hello_web'
8 | chef.cookbooks_path = 'cookbooks'
9 | chef.nodes_path = 'nodes'
10 |
11 | #### Override Attributes ####
12 | chef.json = {
13 | 'hello_web' => {
14 | 'package' => 'httpd',
15 | 'service' => 'httpd',
16 | 'docroot' => '/var/www/html'
17 | }
18 | }
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part2_centos/cookbooks/hello_web/attributes/default.rb:
--------------------------------------------------------------------------------
1 | default['hello_web']['package'] = 'apache2'
2 | default['hello_web']['service'] = 'apache2'
3 | default['hello_web']['docroot'] = '/var/www/html'
4 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part2_centos/cookbooks/hello_web/files/default/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part2_centos/cookbooks/hello_web/metadata.rb:
--------------------------------------------------------------------------------
1 | name 'hello_web'
2 | version '0.0.1'
3 | chef_version '>= 12.14' if respond_to?(:chef_version)
4 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part2_centos/cookbooks/hello_web/recipes/default.rb:
--------------------------------------------------------------------------------
1 | apt_update 'Update the apt cache daily' do
2 | frequency 86_400
3 | action :periodic
4 | end
5 |
6 | package node['hello_web']['package']
7 |
8 | cookbook_file "#{node['hello_web']['docroot']}/index.html" do
9 | source 'index.html'
10 | action :create
11 | end
12 |
13 | service node['hello_web']['service'] do
14 | supports status: true, restart: true, reload: true
15 | action %i(enable start)
16 | end
17 |
--------------------------------------------------------------------------------
/vagrant/chef_zero/part2_centos/nodes/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/vagrant/chef_zero/part2_centos/nodes/.gitkeep
--------------------------------------------------------------------------------
/vagrant/docker/create_workarea.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Purpose: Create structure from $HOME directory, or directory of choosing
4 | #
5 |
6 | WORKAREA=${WORKAREA:-"${HOME}/vagrant-docker"}
7 |
8 | mkdir -p ${WORKAREA}/{build,image}/public-html
9 | touch ${WORKAREA}/{build,image}/Vagrantfile ${WORKAREA}/build/Dockerfile
10 |
11 | for path in build image; do
12 | cat <<-'HTML' > ${WORKAREA}/${path}/public-html/index.html
13 |
14 |
15 | Hello World!
16 |
17 |
18 | HTML
19 | done
20 |
--------------------------------------------------------------------------------
/vagrant/docker/part1_build/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:16.04
2 | RUN apt-get -qq update && \
3 | apt-get install -y apache2 && \
4 | apt-get clean
5 | COPY public-html/index.html /var/www/html/
6 | EXPOSE 80
7 | CMD apachectl -D FOREGROUND
8 |
--------------------------------------------------------------------------------
/vagrant/docker/part1_build/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "ubuntu/xenial64"
3 | config.vm.network "forwarded_port", guest: 80, host: 8081
4 |
5 | ####### Provision #######
6 | config.vm.provision "docker" do |docker|
7 | docker.build_image "/vagrant",
8 | args: "-t example/hello_web"
9 | docker.run "hello_web",
10 | image: "example/hello_web:latest",
11 | args: "-p 80:80"
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/vagrant/docker/part1_build/public-html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/docker/part2_image/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "ubuntu/xenial64"
3 | config.vm.network "forwarded_port", guest: 80, host: 8081
4 |
5 | ####### Provision #######
6 | config.vm.provision "docker", images: %w(httpd:2.4) do |docker|
7 | docker.run "hello_web",
8 | image: "httpd:2.4",
9 | args: "-p 80:80 " +
10 | "-v /vagrant/public-html:/usr/local/apache2/htdocs/"
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/vagrant/docker/part2_image/public-html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/puppet/hello_web/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
2 | logs/
3 |
--------------------------------------------------------------------------------
/vagrant/puppet/hello_web/rocky9/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "generic/rocky9"
3 | config.vm.network "forwarded_port", guest: 80, host: 8083
4 |
5 | ####### Install Puppet Agent #######
6 | config.vm.provision "bootstrap", before: :all, type: "shell", path: "../bootstrap.sh"
7 |
8 | ####### Provision #######
9 | config.vm.provision :puppet do |puppet|
10 | puppet.module_path = "../site"
11 | puppet.options = "--verbose --debug"
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/vagrant/puppet/hello_web/rocky9/manifests/default.pp:
--------------------------------------------------------------------------------
1 | node default {
2 | class { 'hello_web':
3 | package_name => 'httpd',
4 | service_name => 'httpd',
5 | doc_root => '/var/www/html',
6 | }
7 | }
8 |
9 |
--------------------------------------------------------------------------------
/vagrant/puppet/hello_web/site/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/puppet/hello_web/site/hello_web/manifests/init.pp:
--------------------------------------------------------------------------------
1 | class hello_web (
2 | $package_name = 'apache2',
3 | $service_name = 'apache2',
4 | $doc_root = '/var/www/html'
5 | ) {
6 |
7 | package { $package_name:
8 | ensure => present,
9 | }
10 |
11 | service { $service_name:
12 | ensure => running,
13 | enable => true,
14 | }
15 |
16 | file { "$doc_root/index.html":
17 | source => "puppet:///modules/hello_web/index.html",
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/vagrant/puppet/hello_web/ubuntu2204/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure('2') do |config|
2 | config.vm.box = 'generic/ubuntu2204'
3 | config.vm.network 'forwarded_port', guest: 80, host: 8085
4 |
5 | ####### Install Puppet Agent #######
6 | config.vm.provision "bootstrap", before: :all, type: "shell", path: "../bootstrap.sh"
7 |
8 | ####### Provision #######
9 | config.vm.provision :puppet do |puppet|
10 | puppet.module_path = "../site"
11 | puppet.options = "--verbose --debug"
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/vagrant/puppet/hello_web/ubuntu2204/manifests/default.pp:
--------------------------------------------------------------------------------
1 | node default {
2 | class { 'hello_web': }
3 | }
4 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/create_workarea.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Purpose: Create structure from $HOME directory, or directory of your choosing
4 | #
5 |
6 | WORKAREA=${WORKAREA:-"${HOME}/vagrant-puppet"}
7 | MODULE=${WORKAREA}/site/hello_web
8 |
9 | mkdir -p ${WORKAREA}/{site,manifests} \
10 | ${WORKAREA}/site/hello_web/{files,manifests}
11 |
12 | touch ${WORKAREA}/{Vagrantfile,bootstrap.sh} \
13 | ${WORKAREA}/manifests/default.pp \
14 | ${WORKAREA}/site/hello_web/manifests/init.pp
15 |
16 | cat <<-'HTML' > ${MODULE}/files/index.html
17 |
18 |
19 | Hello World!
20 |
21 |
22 | HTML
23 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part1_ubuntu/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "bento/ubuntu-16.04"
3 | config.vm.network "forwarded_port", guest: 80, host: 8084
4 |
5 | ####### Install Puppet Agent #######
6 | config.vm.provision "shell", path: "./bootstrap.sh"
7 |
8 | ####### Provision #######
9 | config.vm.provision "puppet" do |puppet|
10 | puppet.module_path = "./site"
11 | puppet.options = "--verbose --debug"
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part1_ubuntu/manifests/default.pp:
--------------------------------------------------------------------------------
1 | node default {
2 | include hello_web
3 | }
4 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part1_ubuntu/site/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part1_ubuntu/site/hello_web/manifests/init.pp:
--------------------------------------------------------------------------------
1 | class hello_web (
2 | $package_name = 'apache2',
3 | $service_name = 'apache2',
4 | $doc_root = '/var/www/html'
5 | ) {
6 |
7 | package { $package_name:
8 | ensure => present,
9 | }
10 |
11 | service { $service_name:
12 | ensure => running,
13 | enable => true,
14 | }
15 |
16 | file { "$doc_root/index.html":
17 | source => "puppet:///modules/hello_web/index.html",
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part2_centos/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure("2") do |config|
2 | config.vm.box = "bento/centos-7.5"
3 | config.vm.network "forwarded_port", guest: 80, host: 8084
4 |
5 | ####### Install Puppet Agent #######
6 | config.vm.provision "shell", path: "./bootstrap.sh"
7 |
8 | ####### Provision #######
9 | config.vm.provision "puppet" do |puppet|
10 | puppet.module_path = "./site"
11 | puppet.options = "--verbose --debug"
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part2_centos/manifests/default.pp:
--------------------------------------------------------------------------------
1 | node default {
2 | class { 'hello_web':
3 | package_name => 'httpd',
4 | service_name => 'httpd',
5 | doc_root => '/var/www/html',
6 | }
7 | }
8 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part2_centos/site/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/puppet/legacy/part2_centos/site/hello_web/manifests/init.pp:
--------------------------------------------------------------------------------
1 | class hello_web (
2 | $package_name = 'apache2',
3 | $service_name = 'apache2',
4 | $doc_root = '/var/www/html'
5 | ) {
6 |
7 | package { $package_name:
8 | ensure => present,
9 | }
10 |
11 | service { $service_name:
12 | ensure => running,
13 | enable => true,
14 | }
15 |
16 | file { "$doc_root/index.html":
17 | source => "puppet:///modules/hello_web/index.html",
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/vagrant/puppet_server/README.md:
--------------------------------------------------------------------------------
1 | # Puppet Server Provisioner
2 |
3 | Articles about using the `puppet_server` provisioner where code must work with a Puppet Server.
4 |
5 |
--------------------------------------------------------------------------------
/vagrant/puppet_server/hello_web_proj/setup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | PROJ_HOME=~/vagrant-puppetserver
4 |
5 | # craete directory structure
6 | mkdir -p \
7 | $PROJ_HOME/site/{data,manifests,modules/hello_web/{files,manifests}}
8 |
9 | cd $PROJ_HOME
10 |
11 | # create files
12 | touch \
13 | Vagrantfile \
14 | bootstrap.sh \
15 | site/manifests/site.pp \
16 | site/modules/hello_web/{manifests/init.pp,files/index.html,metadata.json}
--------------------------------------------------------------------------------
/vagrant/puppet_server/hello_web_proj/site/manifests/site.pp:
--------------------------------------------------------------------------------
1 | node "node01.local" {
2 | class { 'hello_web': }
3 | }
4 |
5 | node "node02.local" {
6 | class { 'hello_web': }
7 | }
8 |
--------------------------------------------------------------------------------
/vagrant/puppet_server/hello_web_proj/site/modules/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/puppet_server/hello_web_proj/site/modules/hello_web/manifests/init.pp:
--------------------------------------------------------------------------------
1 | class hello_web (
2 | $package_name = 'apache2',
3 | $service_name = 'apache2',
4 | $doc_root = '/var/www/html'
5 | ) {
6 |
7 | package { $package_name:
8 | ensure => present,
9 | }
10 |
11 | service { $service_name:
12 | ensure => running,
13 | enable => true,
14 | }
15 |
16 | file { "$doc_root/index.html":
17 | source => "puppet:///modules/hello_web/index.html",
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/vagrant/puppet_server/hello_web_proj/site/modules/hello_web/metadata.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "joachim8675309-hello_web",
3 | "version": "0.1.0",
4 | "author": "joachim8675309",
5 | "summary": "Hello World Tutorial",
6 | "license": "Apache-2.0",
7 | "source": "https://github.com/darkn3rd/blog_tutorials",
8 | "dependencies": [],
9 | "operatingsystem_support": [
10 | {
11 | "operatingsystem": "Ubuntu",
12 | "operatingsystemrelease": ["22.04"]
13 | }
14 | ],
15 | "requirements": [
16 | {
17 | "name": "puppet",
18 | "version_requirement": ">= 7.24 < 9.0.0"
19 | }
20 | ]
21 | }
22 |
--------------------------------------------------------------------------------
/vagrant/salt/create_workarea.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Purpose: Create structure from $HOME directory, or directory of your choosing
4 | #
5 |
6 | WORKAREA=${WORKAREA:-"${HOME}/vagrant-salt"}
7 | FORMULAPATH=${WORKAREA}/roots/salt/hello_web
8 |
9 | mkdir -p ${WORKAREA}/roots/{pillar,salt/hello_web/files}
10 |
11 | touch ${WORKAREA}/Vagrantfile \
12 | ${WORKAREA}/roots/salt/top.sls \
13 | ${WORKAREA}/roots/pillar/{top.sls,hello_web.sls} \
14 | ${FORMULAPATH}/{defaults.yaml,init.sls,map.jinja,files/index.html}
15 |
16 | cat <<-'HTML' > ${FORMULAPATH}/files/index.html
17 |
18 |
19 | Hello World!
20 |
21 |
22 | HTML
23 |
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure('2') do |config|
2 | config.vm.box = 'bento/ubuntu-16.04'
3 | config.vm.network 'forwarded_port', guest: 80, host: 8085
4 |
5 | ####### File Share #######
6 | config.vm.synced_folder './roots/salt/', '/srv/salt'
7 | config.vm.synced_folder './roots/pillar', '/srv/pillar'
8 |
9 | ####### Provision #######
10 | config.vm.provision :salt do |salt|
11 | salt.masterless = true
12 | salt.run_highstate = true
13 | salt.verbose = true
14 | end
15 | end
16 |
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/roots/pillar/hello_web.sls:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/vagrant/salt/part1_ubuntu/roots/pillar/hello_web.sls
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/roots/pillar/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - hello_web
4 |
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/roots/salt/hello_web/defaults.yaml:
--------------------------------------------------------------------------------
1 | hello_web:
2 | docroot: /var/www/html
3 | package: apache2
4 | service: apache2
5 |
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/roots/salt/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/roots/salt/hello_web/init.sls:
--------------------------------------------------------------------------------
1 | {% from "hello_web/map.jinja" import hello_web with context %}
2 |
3 | hello_web:
4 | pkg.installed:
5 | - name: {{ hello_web.package }}
6 | service.running:
7 | - name: {{ hello_web.service }}
8 | - enable: True
9 | - reload: True
10 | file.managed:
11 | - name: {{ hello_web.docroot }}/index.html
12 | - source: salt://hello_web/files/index.html
13 |
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/roots/salt/hello_web/map.jinja:
--------------------------------------------------------------------------------
1 | {% import_yaml 'hello_web/defaults.yaml' as default_settings %}
2 | {% set hello_web = salt['pillar.get']('hello_web', default=default_settings.get('hello_web'), merge=True) %}
3 |
--------------------------------------------------------------------------------
/vagrant/salt/part1_ubuntu/roots/salt/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - hello_web
4 |
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/Vagrantfile:
--------------------------------------------------------------------------------
1 | Vagrant.configure('2') do |config|
2 | config.vm.box = "bento/centos-7.5"
3 | config.vm.network 'forwarded_port', guest: 80, host: 8085
4 |
5 | ####### File Share #######
6 | config.vm.synced_folder './roots/salt/', '/srv/salt'
7 | config.vm.synced_folder './roots/pillar', '/srv/pillar'
8 |
9 | ####### Provision #######
10 | config.vm.provision :salt do |salt|
11 | salt.masterless = true
12 | salt.run_highstate = true
13 | salt.verbose = true
14 | salt.pillar "hello_web" => {
15 | "package" => "httpd",
16 | "service" => "httpd",
17 | "docroot" => "/var/www/html"
18 | }
19 | end
20 | end
21 |
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/roots/pillar/hello_web.sls:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/darkn3rd/blog_tutorials/2dd664ec57599537ed71cd9d52bb6c8176b7550f/vagrant/salt/part2_centos/roots/pillar/hello_web.sls
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/roots/pillar/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - hello_web
4 |
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/roots/salt/hello_web/defaults.yaml:
--------------------------------------------------------------------------------
1 | hello_web:
2 | docroot: /var/www/html
3 | package: apache2
4 | service: apache2
5 |
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/roots/salt/hello_web/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello World!
4 |
5 |
6 |
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/roots/salt/hello_web/init.sls:
--------------------------------------------------------------------------------
1 | {% from "hello_web/map.jinja" import hello_web with context %}
2 |
3 | hello_web:
4 | pkg.installed:
5 | - name: {{ hello_web.package }}
6 | service.running:
7 | - name: {{ hello_web.service }}
8 | - enable: True
9 | - reload: True
10 | file.managed:
11 | - name: {{ hello_web.docroot }}/index.html
12 | - source: salt://hello_web/files/index.html
13 |
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/roots/salt/hello_web/map.jinja:
--------------------------------------------------------------------------------
1 | {% import_yaml 'hello_web/defaults.yaml' as default_settings %}
2 | {% set hello_web = salt['pillar.get']('hello_web', default=default_settings.get('hello_web'), merge=True) %}
3 |
--------------------------------------------------------------------------------
/vagrant/salt/part2_centos/roots/salt/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - hello_web
4 |
--------------------------------------------------------------------------------
/vagrant/shell/create_workarea.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #
3 | # Purpose: Create structure from $HOME directory, or directory of choosing
4 | #
5 |
6 | WORKAREA=${WORKAREA:-"${HOME}/vagrant-shell"}
7 |
8 | mkdir -p ${WORKAREA}/scripts
9 | touch ${WORKAREA}/{Vagrantfile,scripts/hello_web.sh}
10 |
--------------------------------------------------------------------------------
/vagrant/shell/part1_ubuntu/Vagrantfile:
--------------------------------------------------------------------------------
1 | script_path = './scripts'
2 |
3 | Vagrant.configure("2") do |config|
4 | config.vm.box = "ubuntu/xenial64"
5 | config.vm.network "forwarded_port", guest: 80, host: 8080
6 |
7 | ####### Provision #######
8 | config.vm.provision "shell" do |script|
9 | script.path = "#{script_path}/hello_web.sh"
10 | script.args = %w(apache2 apache2 /var/www/html)
11 | end
12 | end
13 |
--------------------------------------------------------------------------------
/vagrant/shell/part1_ubuntu/scripts/hello_web.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | #### Set variables with intelligent defaults
4 | APACHE_PACKAGE=${1:-'apache2'}
5 | APACHE_SERVICE=${2:-'apache2'}
6 | APACHE_DOCROOT=${3:-'/var/www/html'}
7 |
8 | #### Download and Install Package
9 | apt-get update
10 | apt-get install -y ${APACHE_PACKAGE}
11 |
12 | #### Start, Enable Service
13 | systemctl start ${APACHE_SERVICE}.service
14 | systemctl enable ${APACHE_SERVICE}.service
15 |
16 | #### Create Content
17 | cat <<-'HTML' > ${APACHE_DOCROOT}/index.html
18 |
19 |
20 | Hello World!
21 |
22 |
23 | HTML
24 |
--------------------------------------------------------------------------------
/vagrant/shell/part2a_centos/Vagrantfile:
--------------------------------------------------------------------------------
1 | script_path = './scripts'
2 |
3 | Vagrant.configure("2") do |config|
4 | config.vm.box = "centos/7"
5 | config.vm.network "forwarded_port", guest: 80, host: 8080
6 |
7 | ####### Provision #######
8 | config.vm.provision "shell" do |script|
9 | script.path = "#{script_path}/hello_web.sh"
10 | script.args = %w(
11 | httpd
12 | httpd
13 | /var/www/html
14 | )
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/vagrant/shell/part2b_gentoo/Vagrantfile:
--------------------------------------------------------------------------------
1 | script_path = './scripts'
2 |
3 | Vagrant.configure("2") do |config|
4 | config.vm.box = "generic/gentoo"
5 | config.vm.network "forwarded_port", guest: 80, host: 8080
6 |
7 | ####### Provision #######
8 | config.vm.provision "shell" do |script|
9 | script.path = "#{script_path}/hello_web.sh"
10 | script.args = %w(
11 | www-servers/apache
12 | apache2
13 | /var/www/localhost/htdocs
14 | )
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/vault-docker/README.md:
--------------------------------------------------------------------------------
1 | # HashiCorp Vault hosted on Docker Environments
2 |
3 | This area covers using Vault hosted on Docker environments.
4 |
--------------------------------------------------------------------------------
/vault-docker/approle/README.md:
--------------------------------------------------------------------------------
1 | # AppRole
2 |
3 | These are examples of using the AppRole Feature.
4 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/.env:
--------------------------------------------------------------------------------
1 | DGRAPH_VERSION=v23.1.1
2 | VAULT_VERSION=1.16
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/.gitignore:
--------------------------------------------------------------------------------
1 | vault/*.json
2 | vault/data
3 |
4 | dgraph/vault_role_id
5 | dgraph/vault_secret_id
6 | dgraph/export
7 | dgraph/backups
8 |
9 | # ignore any script generated json/hcl/graphql
10 | scripts/**/*.json
11 | scripts/**/*.hcl
12 | scripts/**/*.graphql
13 |
14 | # Misc
15 | unseal.creds
16 | logs
17 | .dgraph.token
18 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/Brewfile:
--------------------------------------------------------------------------------
1 | brew "bash"
2 | brew "curl"
3 | brew "grep"
4 | brew "jq"
5 | cask "docker"
6 | tap "hashicorp/tap"
7 | brew "hashicorp/tap/vault"
8 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/chooc.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/dgraph/alpha.yaml:
--------------------------------------------------------------------------------
1 | vault:
2 | addr: http://vault:8200
3 | acl_field: hmac_secret_file
4 | acl_format: raw
5 | enc_field: enc_key
6 | enc_format: raw
7 | path: secret/data/dgraph/alpha
8 | role_id_file: /dgraph/vault/role_id
9 | secret_id_file: /dgraph/vault/secret_id
10 | security:
11 | whitelist: 10.0.0.0/8,172.0.0.0/8,192.168.0.0/16
12 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/dgraph/backup.graphql:
--------------------------------------------------------------------------------
1 | mutation {
2 | backup(input: {
3 | destination: "/dgraph/backups"
4 | forceFull: true
5 | }) {
6 | response {
7 | message
8 | code
9 | }
10 | }
11 | }
12 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/dgraph/export.graphql:
--------------------------------------------------------------------------------
1 | mutation {
2 | export(input: { format: "json" }) {
3 | response {
4 | message
5 | code
6 | }
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/dgraph/getting_started/3.query_starring_edge.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v jq > /dev/null || \
3 | { echo "[ERROR]: 'jq' command not not found" 1>&2; exit 1; }
4 | command -v curl > /dev/null || \
5 | { echo "[ERROR]: 'curl' command not not found" 1>&2; exit 1; }
6 |
7 | export DGRAPH_HTTP=${DGRAPH_HTTP:-"http://localhost:8080"}
8 | [[ -z "$DGRAPH_TOKEN" ]] && { echo 'DGRAPH_TOKEN not specified. Aborting' 2>&1 ; exit 1; }
9 |
10 | curl "$DGRAPH_HTTP/query" --silent --request POST \
11 | --header "X-Dgraph-AccessToken: $DGRAPH_TOKEN" \
12 | --header "Content-Type: application/dql" \
13 | --data $'{ me(func: has(starring)) { name } }' \
14 | | jq .data
15 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/dgraph/getting_started/README.md:
--------------------------------------------------------------------------------
1 | # Dgraph Getting Started Using Access Key
2 |
3 |
4 | ```bash
5 | export DGRAPH_HTTP="http://localhost:8080"
6 | export DGRAPH_ADMIN_USER="groot"
7 | export DGRAPH_ADMIN_PSWD="password"
8 | # Fetch DGRAPH_TOKEN
9 | ../login.sh # sets DGRAPH_TOKEN
10 | # Load Data
11 | ./1.data_json.sh # or run ./1.data_rds.sh
12 | # Load Schema
13 | ./2.schema.sh
14 | # Demo Queries
15 | ./3.query-starring_edge.sh
16 | ./4.query_movies_after_1980.sh
17 | ```
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/randpasswd.sh:
--------------------------------------------------------------------------------
1 | randpasswd() {
2 | NUM=${1:-32}
3 |
4 | # macOS scenario
5 | if [[ $(uname -s) == "Darwin" ]]; then
6 | perl -pe 'binmode(STDIN, ":bytes"); tr/A-Za-z0-9//dc;' < /dev/urandom | head -c $NUM
7 | else
8 | # tested with: GNU/Linux, Cygwin, MSys
9 | tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w $NUM | sed 1q
10 | fi
11 | }
12 |
13 | NUM=${1:-32}
14 |
15 | randpasswd $NUM
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/vault_api/1.unseal.sh:
--------------------------------------------------------------------------------
1 | ../unseal.sh
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/vault_api/README.md:
--------------------------------------------------------------------------------
1 | # Vault REST API
2 |
3 | These scripts demonstrate how to use AppRole using the vault REST API.
4 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/vault_cli/1.unseal.sh:
--------------------------------------------------------------------------------
1 | ../unseal.sh
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/vault_cli/2.configure.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | command -v vault > /dev/null || \
3 | { echo "[ERROR]: 'vault' command not not found" 1>&2; exit 1; }
4 |
5 | [[ -z "$VAULT_ROOT_TOKEN" ]] && { echo 'VAULT_ROOT_TOKEN not specified. Aborting' 2>&1 ; exit 1; }
6 | export VAULT_ADDR=${VAULT_ADDR:-"http://localhost:8200"}
7 |
8 | vault login $VAULT_ROOT_TOKEN
9 |
10 | # idempotent enable approle at approle/
11 | vault auth list | grep -q '^approle' || vault auth enable approle
12 |
13 | # idempotent enable kv-v2 at secrets/
14 | vault secrets list | grep -q '^secret' || vault secrets enable -path=secret kv-v2
15 |
16 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/scripts/vault_cli/README.md:
--------------------------------------------------------------------------------
1 | # Vault CLI
2 |
3 | These scripts demonstrate how to use AppRole using the vault cli.
4 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/vault/config.hcl:
--------------------------------------------------------------------------------
1 | storage "raft" {
2 | path = "/vault/data"
3 | node_id = "vault1"
4 | }
5 |
6 | listener "tcp" {
7 | address = "0.0.0.0:8200"
8 | tls_disable = "true"
9 | }
10 |
11 | api_addr = "http://127.0.0.1:8200"
12 | cluster_addr = "http://127.0.0.1:8201"
13 | ui = true
14 | disable_mlock = true
15 |
--------------------------------------------------------------------------------
/vault-docker/approle/dgraph/vault/policy_dgraph.hcl:
--------------------------------------------------------------------------------
1 | path "secret/data/dgraph/*" {
2 | capabilities = [ "read", "update" ]
3 | }
4 |
--------------------------------------------------------------------------------
/vbox/macos/00.all_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # variables for readibility
4 | PREFIX=https://raw.githubusercontent.com
5 | PATH=Homebrew/install/master/install
6 | URL=${PREFIX}/${PATH}
7 |
8 | # install homebrew w/ ruby install script
9 | /usr/bin/ruby -e "$(curl -fsSL ${URL})"
10 |
11 | # Install All Packages at Once
12 | cat <<-'BREWFILE_EOF' > Brewfile
13 | cask 'virtualbox'
14 | cask 'virtualbox-extension-pack'
15 | cask 'vagrant'
16 | tap 'chef/chef'
17 | cask 'chefdk'
18 | cask 'docker-toolbox'
19 | cask 'minikube'
20 | brew 'kubectl'
21 | BREWFILE_EOF
22 | brew bundle --verbose
23 |
--------------------------------------------------------------------------------
/vbox/macos/01.homebrew_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # variables for readibility
4 | PREFIX=https://raw.githubusercontent.com
5 | PATH=Homebrew/install/master/install
6 | URL=${PREFIX}/${PATH}
7 |
8 | # install homebrew w/ ruby install script
9 | /usr/bin/ruby -e "$(curl -fsSL ${URL})"
10 |
--------------------------------------------------------------------------------
/vbox/macos/02.vbox_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | brew cask install virtualbox
4 | brew cask install virtualbox-extension-pack
5 |
--------------------------------------------------------------------------------
/vbox/macos/03.vagrant_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | brew cask install vagrant
4 |
--------------------------------------------------------------------------------
/vbox/macos/04.vagrant_demo_macos.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | WORKAREA=${HOME}/vbox_tutorial
3 |
4 | ##############################################
5 | # Prerequisites: Mac OS X image
6 | # See: https://github.com/boxcutter/macos
7 | ##############################################
8 |
9 | mkdir -p ${WORKAREA}/mymacosx && cd ${WORKAREA}/mymacosx
10 | vagrant init my/macos-1012 && vagrant up
11 |
12 | URL=https://github.com/KittyKatt/screenFetch/archive/master.zip
13 | vagrant ssh --command "curl -OL ${URL}"
14 | vagrant ssh --command 'unzip master.zip'
15 | vagrant ssh --command './screenFetch-master/screenfetch-dev'
16 |
--------------------------------------------------------------------------------
/vbox/macos/05.vagrant_demo_windows_03.sh:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Previous Steps
3 | # vagrant ssh
4 | # c:\tools\msys64\usr\bin\bash.exe
5 | ##############################################
6 |
7 | # install unzip package
8 | PATH=/usr/bin:$PATH
9 | pacman -S unzip
10 |
11 | # install screenfetch
12 | URL=https://github.com/KittyKatt/screenFetch/archive/master.zip
13 | curl -OL $URL
14 | unzip master.zip
15 |
16 | # run screenfetch
17 | ./screenFetch-master/screenfetch-dev
18 |
--------------------------------------------------------------------------------
/vbox/macos/06.kitchen_chefdk_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | brew tap chef/chef
4 | brew cask install chefdk
5 |
--------------------------------------------------------------------------------
/vbox/macos/07.kitchen_chef_generate.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | WORKAREA=${HOME}/vbox_tutorial
4 | mkdir -p ${WORKAREA}/cookbooks && cd ${WORKAREA}/cookbooks
5 |
6 | # Generate example
7 | chef generate cookbook helloworld && cd helloworld
8 | # Create Ubuntu and CentOS systems
9 | kitchen create
10 |
--------------------------------------------------------------------------------
/vbox/macos/08.kitchen_screeenfetch.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | wget https://github.com/KittyKatt/screenFetch/archive/master.zip
4 |
5 | unzip master.zip
6 | mv screenFetch-master/ ${HOME}/.kitchen/cache/
7 |
--------------------------------------------------------------------------------
/vbox/macos/09.kitchen_demo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Install pciutils on CentOS (required by screenfetch)
4 | kitchen exec centos --command='sudo yum -y install pciutils'
5 | # Install a snap on Ubuntu (avoids warnings w/ screenfetch)
6 | kitchen exec ubuntu --command='sudo snap install hello-world'
7 |
8 | # Run screenfetch script on all systems
9 | kitchen exec default* \
10 | --command='sudo \
11 | /tmp/omnibus/cache/screenFetch-master/screenfetch-dev'
12 |
--------------------------------------------------------------------------------
/vbox/macos/10.docker_toolbox_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | brew cask install docker-toolbox
4 |
--------------------------------------------------------------------------------
/vbox/macos/12.docker_machine_demo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Create a docker machine environment
4 | docker-machine create --driver virtualbox default
5 |
6 | # Tell docker engine to use our machine's docker
7 | eval $(docker-machine env default)
8 |
9 | # Run a container form docker hub
10 | docker run docker/whalesay cowsay Hello World
11 |
--------------------------------------------------------------------------------
/vbox/macos/13.minikube_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | brew cask install minikube
4 |
--------------------------------------------------------------------------------
/vbox/macos/14.kubectl_client_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | brew install kubectl
4 |
--------------------------------------------------------------------------------
/vbox/macos/15.minikube_demo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Start minikube environment
4 | minikube start --vm-driver=virtualbox
5 |
6 | # Deploy Something
7 | kubectl run hello-minikube \
8 | --image=k8s.gcr.io/echoserver:1.4 \
9 | --port=8080
10 |
11 | kubectl expose deployment hello-minikube \
12 | --type=NodePort
13 |
14 | until kubectl get pod | grep hello-minikube | grep -q running; do sleep 1; done
15 |
16 | curl $(minikube service hello-minikube --url)
17 |
--------------------------------------------------------------------------------
/vbox/macos/16.print_info.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | printf "\nVirtualBox %s\n" $(vboxmanage --version) && \
4 | vagrant --version && \
5 | kitchen --version && \
6 | docker-machine --version && \
7 | docker --version && \
8 | minikube version && \
9 | printf "Kubectl Client: %s\n" \
10 | $(kubectl version | awk -F\" \
11 | '/Client/{ print $6 }')
12 |
13 | printf "Currently Running VMS:\n"
14 | for VMS in $(vboxmanage list runningvms | cut -d'"' -f2); do
15 | printf " * %s\n" ${VMS}
16 | done
17 |
--------------------------------------------------------------------------------
/vbox/macos/Brewfile:
--------------------------------------------------------------------------------
1 | cask 'virtualbox'
2 | cask 'virtualbox-extension-pack'
3 | cask 'vagrant'
4 | tap 'chef/chef'
5 | cask 'chefdk'
6 | cask 'docker-toolbox'
7 | cask 'minikube'
8 | brew 'kubectl'
9 |
--------------------------------------------------------------------------------
/vbox/macos/README.md:
--------------------------------------------------------------------------------
1 | # Virutalbox on Windows
2 |
3 | ## Tutorial
4 |
5 | https://medium.com/@Joachim8675309/virtualbox-and-friends-on-windows-8-1-3c691460698f
6 |
7 | ## Notes
8 |
9 | This was driven using Windows 8.1, but similar experience on Windows 7/10/2012r2/2016.
10 |
11 | ## License
12 | 
This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
13 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/02.vbox_post_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Install kernel development packages
4 | sudo dnf install -y \
5 | binutils \
6 | gcc \
7 | make \
8 | patch \
9 | libgomp \
10 | glibc-headers \
11 | glibc-devel \
12 | kernel-headers \
13 | kernel-devel \
14 | dkms
15 |
16 | # Install/Setup VirtualBox 5.2.x
17 | sudo dnf install -y VirtualBox-5.2
18 | sudo /usr/lib/virtualbox/vboxdrv.sh setup
19 |
20 | # Test Version
21 | vboxmanage --version
22 | 5.2.16r123759
23 |
24 | # Enable Current User
25 | usermod -a -G vboxusers ${USER}
26 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/03.vagrant_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | VER=$(
4 | curl -s https://releases.hashicorp.com/vagrant/ | \
5 | grep -oP '(\d\.){2}\d' | \
6 | head -1
7 | )
8 | PKG="vagrant_${VER}_$(uname -p).rpm"
9 |
10 | curl -oL https://releases.hashicorp.com/vagrant/${VER}/${PKG}
11 | sudo rpm -Uvh ${PKG}
12 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/04.vagrant_demo_gentoo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | WORKAREA=${HOME}/vbox_tutorial
3 |
4 | mkdir -p ${WORKAREA}/mygentoo && cd ${WORKAREA}/mygentoo
5 | vagrant init generic/gentoo && vagrant up
6 |
7 | # install & run neofetch
8 | vagrant ssh --command 'sudo emerge -a app-misc/neofetch'
9 | vagrant ssh --command 'neofetch'
10 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/05.vagrant_demo_arch.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | WORKAREA=${HOME}/vbox_tutorial
3 |
4 | mkdir ${WORKAREA}/myarch && cd ${WORKAREA}/myarch
5 | vagrant init archlinux/archlinux && vagrant up
6 |
7 | # install and run neofetch
8 | vagrant ssh --command 'sudo pacman -S neofetch'
9 | vagrant ssh --command 'neofetch'
10 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/06.kitchen_chefdk_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | VER=3.2.30
4 | PKG=chefdk-${VER}-1.el7.x86_64.rpm
5 |
6 | URL=https://packages.chef.io/files/stable/chefdk/${VER}/el/7/${PKG}
7 |
8 | curl -O ${URL}
9 | sudo rpm -Uvh ${PKG}
10 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/07.kitchen_chef_generate.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | WORKAREA=${HOME}/vbox_tutorial
4 | mkdir -p ${WORKAREA}/cookbooks && cd ${WORKAREA}/cookbooks
5 |
6 | # Generate example
7 | chef generate cookbook helloworld && cd helloworld
8 | # Create Ubuntu and CentOS systems
9 | kitchen create
10 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/08.kitchen_screeenfetch.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | wget https://github.com/KittyKatt/screenFetch/archive/master.zip
4 |
5 | unzip master.zip
6 | mv screenFetch-master/ ${HOME}/.kitchen/cache/
7 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/09.kitchen_demo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Install pciutils on CentOS (required by screenfetch)
4 | kitchen exec centos --command='sudo yum -y install pciutils'
5 | # Install a snap on Ubuntu (avoids warnings w/ screenfetch)
6 | kitchen exec ubuntu --command='sudo snap install hello-world'
7 |
8 | # Run screenfetch script on all systems
9 | kitchen exec default* \
10 | --command='sudo \
11 | /tmp/omnibus/cache/screenFetch-master/screenfetch-dev'
12 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/10.docker_machine_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | VER=v0.14.0
4 | BASE=https://github.com/docker/machine/releases/download/${VER}
5 |
6 | # Download Artifact
7 | curl -L ${BASE}/docker-machine-$(uname -s)-$(uname -m) \
8 | > /tmp/docker-machine
9 |
10 | sudo install /tmp/docker-machine /usr/local/bin/docker-machine
11 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/11.docker_client_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | REPOURL=https://download.docker.com/linux/fedora/docker-ce.repo
4 |
5 | sudo dnf config-manager --add-repo ${REPO_URL}
6 | sudo dnf install -y docker-ce
7 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/12.docker_machine_demo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Create a docker machine environment
4 | docker-machine create --driver virtualbox default
5 |
6 | # Tell docker engine to use our machine's docker
7 | eval $(docker-machine env default)
8 |
9 | # Run a container form docker hub
10 | docker run docker/whalesay cowsay Hello World
11 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/13.minikube_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | BASE=https://storage.googleapis.com/minikube/releases
4 |
5 | # Download & Install Artifact
6 | curl -Lo minikube ${BASE}/v0.28.1/minikube-linux-amd64 && \
7 | chmod +x minikube && \
8 | sudo mv minikube /usr/local/bin/
9 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/14.kubectl_client_install.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | BASE=https://storage.googleapis.com/kubernetes-release/release
4 | VER=$(curl -s ${BASE}/stable.txt)
5 |
6 | # Download Artiface and Install
7 | curl -Lo kubectl ${BASE}/${VER}/bin/linux/amd64/kubectl && \
8 | chmod +x kubectl && \
9 | sudo mv kubectl /usr/local/bin/
10 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/15.minikube_demo.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Start minikube environment
4 | minikube start --vm-driver=virtualbox
5 |
6 | # Deploy Something
7 | kubectl run hello-minikube \
8 | --image=k8s.gcr.io/echoserver:1.4 \
9 | --port=8080
10 |
11 | kubectl expose deployment hello-minikube \
12 | --type=NodePort
13 |
14 | until kubectl get pod | grep hello-minikube | grep -q running; do sleep 1; done
15 |
16 | curl $(minikube service hello-minikube --url)
17 |
--------------------------------------------------------------------------------
/vbox/vbox_fedora/16.print_info.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | printf "\nVirtualBox %s\n" $(vboxmanage --version) && \
4 | vagrant --version && \
5 | kitchen --version && \
6 | docker-machine --version && \
7 | docker --version && \
8 | minikube version && \
9 | printf "Kubectl Client: %s\n" \
10 | $(kubectl version | awk -F\" \
11 | '/Client/{ print $6 }')
12 |
13 | printf "Currently Running VMS:\n"
14 | for VMS in $(vboxmanage list runningvms | cut -d'"' -f2); do
15 | printf " * %s\n" ${VMS}
16 | done
17 |
--------------------------------------------------------------------------------
/vbox/windows/01.chocolatey_install.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Administrator Priviledge Shell ONLY
3 | ##############################################
4 |
5 | # Set privilege for running scripts
6 | Set-ExecutionPolicy Bypass -Scope Process -Force
7 | # variables for readibility
8 | $scripturl = 'https://chocolatey.org/install.ps1'
9 | $wc = New-Object System.Net.WebClient
10 | # Install Chocolately
11 | Invoke-Expression ($wc.DownloadString($scripturl))
12 |
--------------------------------------------------------------------------------
/vbox/windows/02.vbox_install.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Administrator Priviledge Shell ONLY
3 | ##############################################
4 |
5 | # Helper Function
6 | Function Update-Environment
7 | {
8 | $m = [System.Environment]::GetEnvironmentVariable("Path","Machine")
9 | $u = [System.Environment]::GetEnvironmentVariable("Path","User")
10 | $env:Path = $m + ";" + $u
11 | }
12 |
13 | # Administrator Shell
14 | choco install -y virtualbox
15 | Update-Environment
16 |
--------------------------------------------------------------------------------
/vbox/windows/03.vagrant_install.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Administrator Priviledge Shell ONLY
3 | ##############################################
4 | choco install -y vagrant
5 |
--------------------------------------------------------------------------------
/vbox/windows/04.vagrant_demo_manjaro.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Non-Administrative Priviledge Shell ONLY
3 | ##############################################
4 | $workarea=$home\vbox_tutorial
5 | mkdir $workarea
6 |
7 | mkdir $workarea\mymanjaro
8 | cd $workarea\mymanjaro
9 |
10 | vagrant init mloskot/manjaro-i3-17.0-minimal
11 | vagrant up
12 |
13 | # Download and Install ScreenFetch on virtual guest
14 | $url = 'https://github.com/KittyKatt/screenFetch/archive/master.zip'
15 | vagrant ssh --command "curl -OL $url"
16 | vagrant ssh --command 'sudo pacman -S unzip'
17 | vagrant ssh --command 'unzip master.zip'
18 | vagrant ssh --command './screenFetch-master/screenfetch-dev'
19 |
--------------------------------------------------------------------------------
/vbox/windows/07.vagrant_demo_win2016_03.sh:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # ONLY RUN ON Virtual Guest
3 | # Run First:
4 | # cd $home\vbox_tutorial/mywindows
5 | # vagrant ssh
6 | # c:\tools\msys64\usr\bin\bash.exe
7 | ##############################################
8 |
9 | # install unzip package
10 | PATH=/usr/bin:$PATH
11 | pacman -S unzip
12 | # install screenfetch
13 | URL=https://github.com/KittyKatt/screenFetch/archive/master.zip
14 | curl -OL $URL
15 | unzip master.zip
16 | # run screenfetch
17 | ./screenFetch-master/screenfetch-dev
18 |
--------------------------------------------------------------------------------
/vbox/windows/08.kitchen_chefdk_install.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Administrator Priviledge Shell ONLY
3 | ##############################################
4 | choco install -y chefdk
5 |
--------------------------------------------------------------------------------
/vbox/windows/09.kitchen_chef_generate.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Non-Administrative Priviledge Shell ONLY
3 | ##############################################
4 | $workarea=$home\vbox_tutorial
5 |
6 | mkdir $workarea\cookbooks
7 | cd $workarea\cookbooks
8 |
9 | # Generate example
10 | chef generate cookbook helloworld
11 | cd helloworld
12 | # Create Ubuntu and CentOS systems
13 | kitchen create
14 |
--------------------------------------------------------------------------------
/vbox/windows/10.kitchen_screeenfetch.ps1:
--------------------------------------------------------------------------------
1 | $path = "$home\.kitchen\cache"
2 | # Download Archive
3 | $url = 'https://github.com/KittyKatt/screenFetch/archive/master.zip'
4 | $wc = New-Object System.Net.WebClient
5 | [Net.ServicePointManager]::SecurityProtocol = "tls12, tls11, tls"
6 | $wc.DownloadFile($url, "$path\master.zip")
7 |
8 | # Unzip Archive
9 | Add-Type -AssemblyName System.IO.Compression.FileSystem
10 |
11 | function Unzip
12 | {
13 | param([string]$zip, [string]$out)
14 | [System.IO.Compression.ZipFile]::ExtractToDirectory($zip, $out)
15 | }
16 |
17 | Unzip "$path\master.zip" "$path\
18 |
--------------------------------------------------------------------------------
/vbox/windows/11.kitchen_demo.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Non-Administrative Priviledge Shell ONLY
3 | ##############################################
4 | $workarea=$home\vbox_tutorial
5 | cd $workarea\cookbooks\helloworld
6 |
7 | # Install pciutils on CentOS (required by screenfetch)
8 | kitchen exec centos --command='sudo yum -y install pciutils'
9 | # Install a snap on Ubuntu (avoids warnings w/ screenfetch)
10 | kitchen exec ubuntu --command='sudo snap install hello-world'
11 |
12 | # Run screenfetch script on all systems
13 | kitchen exec default* `
14 | --command='sudo /tmp/omnibus/cache/screenFetch-master/screenfetch-dev'
15 |
--------------------------------------------------------------------------------
/vbox/windows/12.docker_toolbox_install.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Administrator Priviledge Shell ONLY
3 | ##############################################
4 | Function Update-Environment
5 | {
6 | $m = [System.Environment]::GetEnvironmentVariable("Path","Machine")
7 | $u = [System.Environment]::GetEnvironmentVariable("Path","User")
8 | $env:Path = $m + ";" + $u
9 | }
10 |
11 | choco install -y docker-toolbox
12 | Update-Environment
13 |
--------------------------------------------------------------------------------
/vbox/windows/13.docker_machine_demo.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Non-Administrative Priviledge Shell ONLY
3 | ##############################################
4 |
5 | # Create a docker machine environment called default
6 | docker-machine create --driver virtualbox 'default'
7 |
8 | # Tell docker engine to use machine's docker (defaulting to default)
9 | & docker-machine env default | Invoke-Expression
10 |
11 | # Run a container fetched from docker hub
12 | docker run docker/whalesay cowsay Hello World
13 |
--------------------------------------------------------------------------------
/vbox/windows/14.minikube_install.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Administrator Priviledge Shell ONLY
3 | ##############################################
4 | Function Update-Environment
5 | {
6 | $m = [System.Environment]::GetEnvironmentVariable("Path","Machine")
7 | $u = [System.Environment]::GetEnvironmentVariable("Path","User")
8 | $env:Path = $m + ";" + $u
9 | }
10 |
11 | # Install MiniKube and Kubernetes-CLI dependency
12 | choco install -y minikube
13 | Update-Environment
14 |
--------------------------------------------------------------------------------
/vbox/windows/15.minikube_demo.ps1:
--------------------------------------------------------------------------------
1 | ##############################################
2 | # Non-Administrative Priviledge Shell ONLY
3 | ##############################################
4 |
5 | # Start minikube environment
6 | minikube start --vm-driver=virtualbox
7 | # Deploy Something
8 | kubectl run hello-minikube `
9 | --image=k8s.gcr.io/echoserver:1.4 `
10 | --port=8080
11 | kubectl expose deployment hello-minikube `
12 | --type=NodePort
13 |
14 |
15 | # Loop Until Available
16 | # kubectl get pod
17 |
18 | $url = & minikube service hello-minikube --url
19 | (New-Object System.Net.WebClient).DownloadString($url)
20 |
--------------------------------------------------------------------------------
/vbox/windows/16.print_info.ps1:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | "`nVirtualBox $(vboxmanage --version)`n" + `
4 | "$(vagrant --version)`n" + `
5 | "$(kitchen --version)`n" + `
6 | "$(docker-machine --version)`n" + `
7 | "$(docker --version)`n" + `
8 | "$(minikube version)`n" + `
9 | "Kubectl Client: " + `
10 | "$(kubectl version | Select-string "Client")".Split('"')[5] + `
11 | "`n"
12 |
13 | "Current Runing VMS:"
14 | vboxmanage list runningvms | ForEach-Object {$_.Split('"')[1]}
15 |
--------------------------------------------------------------------------------
/vbox/windows/choco.config:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------