├── .github ├── ISSUE_TEMPLATE │ ├── BUG_REPORT.md │ └── ENHANCEMENT.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── 01-path-basics ├── 101-start-here │ ├── readme.adoc │ ├── scripts │ │ ├── aws-auth-cm.sh │ │ ├── create-kubeconfig.sh │ │ └── lab-ide-build.sh │ └── templates │ │ ├── config-k8s-workshop │ │ ├── lab-ide-novpc.template │ │ └── lab-ide-vpc.template ├── 102-your-first-cluster │ ├── instance-groups │ │ ├── nginx-on-1d-ig.yaml │ │ ├── nginx-on-p2.yaml │ │ └── readme.adoc │ └── readme.adoc └── 103-kubernetes-concepts │ ├── readme.adoc │ └── templates │ ├── cronjob.yaml │ ├── daemonset.yaml │ ├── deployment-namespace.yaml │ ├── deployment.yaml │ ├── echo-deployment.yaml │ ├── echo.yaml │ ├── job-parallel.yaml │ ├── job.yaml │ ├── namespace.yaml │ ├── pod-burstable.yaml │ ├── pod-cpu-memory.yaml │ ├── pod-guaranteed.yaml │ ├── pod-guaranteed2.yaml │ ├── pod-resources.yaml │ ├── pod-resources1.yaml │ ├── pod-resources2.yaml │ ├── pod.yaml │ ├── replicaset.yaml │ ├── resource-quota.yaml │ └── service.yaml ├── 02-path-working-with-clusters ├── 201-cluster-monitoring │ ├── readme.adoc │ └── templates │ │ ├── heapster │ │ ├── grafana.yaml │ │ ├── heapster-rbac.yaml │ │ ├── heapster.yaml │ │ └── influxdb.yaml │ │ └── prometheus │ │ ├── alertmanager.yaml │ │ ├── grafana-bundle.yaml │ │ ├── prometheus-bundle.yaml │ │ └── prometheus.yaml ├── 202-service-mesh │ └── readme.adoc ├── 203-cluster-upgrades │ └── readme.adoc ├── 204-cluster-logging-with-EFK │ ├── readme.adoc │ └── templates │ │ ├── fluentd-configmap.yaml │ │ ├── fluentd-ds.yaml │ │ ├── fluentd-role-binding.yaml │ │ ├── fluentd-role.yaml │ │ ├── fluentd-service-account.yaml │ │ └── fluentd-svc.yaml ├── 205-cluster-autoscaling │ ├── readme.adoc │ └── templates │ │ ├── 2-10-autoscaler.yaml │ │ ├── asg-policy.json │ │ └── dummy-resource-offers.yaml ├── 206-cloudformation-and-terraform │ ├── cloudformation │ │ ├── k8s-create-no-alb.yaml │ │ └── vault │ │ │ ├── readme.adoc │ │ │ ├── vault-template-asg.json │ │ │ └── vault-template.json │ └── readme.adoc └── 207-cluster-monitoring-with-datadog │ ├── readme.adoc │ └── templates │ ├── datadog │ ├── agent.yaml │ └── rbac.yaml │ ├── mongodb │ ├── mongodb.yaml │ └── storageclass.yaml │ ├── nginx │ └── nginx.yaml │ ├── redis │ └── redis.yaml │ └── webapp │ └── webapp.yaml ├── 03-path-application-development ├── 301-local-development │ └── readme.adoc ├── 302-app-discovery │ ├── readme.adoc │ └── templates │ │ └── app.yml ├── 303-app-update │ ├── images │ │ ├── app-v1 │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ └── server.js │ │ ├── app-v2 │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ └── server.js │ │ └── readme.adoc │ ├── readme.adoc │ └── templates │ │ ├── app-recreate.yaml │ │ ├── app-rolling.yaml │ │ ├── app-service.yaml │ │ ├── app-v1.yaml │ │ └── app-v2.yaml ├── 304-app-scaling │ └── readme.adoc ├── 305-app-scaling-custom-metrics │ ├── readme.adoc │ └── templates │ │ ├── cluster-agent │ │ ├── cluster-agent.yaml │ │ └── datadog-cluster-agent_service.yaml │ │ ├── hpa-example │ │ ├── cluster-agent-hpa-svc.yaml │ │ └── hpa-manifest.yaml │ │ └── rbac │ │ ├── rbac-cluster-agent.yaml │ │ └── rbac-hpa.yaml ├── 306-app-tracing-with-jaeger-and-x-ray │ ├── jaeger │ │ ├── images │ │ │ ├── bookinfo.png │ │ │ ├── jaeger-console.png │ │ │ ├── jaeger-dag.png │ │ │ ├── jaeger-spans.png │ │ │ └── jaeger-trace.png │ │ └── readme.adoc │ ├── readme.adoc │ └── x-ray │ │ ├── images │ │ ├── ec2-iamrole.png │ │ ├── flaskxray.png │ │ ├── iamrole.png │ │ ├── xray1.png │ │ └── xraytrace.png │ │ ├── nodejs-microservices │ │ ├── greeter │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ └── server.js │ │ ├── name │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ └── server.js │ │ └── webapp │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ └── server.js │ │ ├── python-flask-app │ │ ├── Dockerfile │ │ ├── app.py │ │ └── requirements.txt │ │ ├── readme.adoc │ │ ├── templates │ │ ├── daemonsetxray.yaml │ │ ├── flaskyxray-deployment.yaml │ │ ├── flaskyxray-service.yaml │ │ └── nodejs-microservices.yaml │ │ └── x-ray-daemon │ │ └── Dockerfile ├── 307-app-management-with-helm │ ├── readme.adoc │ └── sample │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── templates │ │ ├── db-deployment.yaml │ │ ├── db-service.yaml │ │ ├── webapp-deployment.yaml │ │ └── webapp-service.yaml │ │ └── values.yaml ├── 308-statefulsets-and-pvs │ ├── readme.adoc │ └── templates │ │ ├── mysql-configmap.yaml │ │ ├── mysql-services.yaml │ │ └── mysql-statefulset.yaml ├── 309-cicd-workflows │ ├── 308-1-codesuite │ │ └── README.adoc │ └── readme.adoc ├── 310-deploying-a-chart-repository │ ├── readme.adoc │ └── sample │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── README.md │ │ ├── sample-1.0.0.tgz │ │ ├── templates │ │ ├── db-deployment.yaml │ │ ├── db-service.yaml │ │ ├── webapp-deployment.yaml │ │ └── webapp-service.yaml │ │ └── values.yaml └── 311-chaos-engineering │ ├── experiments │ └── experiment.json │ ├── readme.adoc │ └── templates │ └── app.yaml ├── 04-path-security-and-networking ├── 401-configmaps-and-secrets │ ├── images │ │ ├── app │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ ├── readme.adoc │ │ │ └── server.js │ │ ├── parameter-store-kubernetes │ │ │ ├── pom.xml │ │ │ ├── readme.adoc │ │ │ └── src │ │ │ │ └── main │ │ │ │ └── java │ │ │ │ └── org │ │ │ │ └── examples │ │ │ │ └── java │ │ │ │ └── App.java │ │ └── sec_mgr_app │ │ │ ├── .dockerignore │ │ │ ├── Dockerfile │ │ │ ├── package.json │ │ │ └── server.js │ ├── readme.adoc │ └── templates │ │ ├── app-pod.yaml │ │ ├── kube-auth.hcl │ │ ├── pod-parameter-store.yaml │ │ ├── pod-secret-env.yaml │ │ ├── pod-secret-volume.yaml │ │ ├── pod-secretsmanager.yaml │ │ ├── pod-vault.yaml │ │ ├── redis-config │ │ ├── redis-configmap.yaml │ │ ├── redis-pod.yaml │ │ ├── secret.yaml │ │ ├── vault-auth.yaml │ │ ├── vault-reviewer-rbac.yaml │ │ └── vault-reviewer.yaml ├── 402-authentication-and-authorization │ ├── readme.adoc │ └── templates │ │ ├── Dockerfile-consul-template │ │ ├── config.ctmpl │ │ ├── consul-template-wrapper.sh │ │ ├── deployment-with-vault.yaml │ │ ├── kube-auth.hcl │ │ ├── kube2iam-ds.yaml │ │ ├── namespace-role-annotation.yaml │ │ ├── pod-role-trust-policy.json │ │ ├── pod-with-kube2iam.yaml │ │ └── pod-with-vault.yaml ├── 403-admission-policy │ └── readme.adoc ├── 404-network-policies │ ├── calico │ │ ├── readme.adoc │ │ └── templates │ │ │ └── calico-update.yaml │ ├── readme.adoc │ ├── templates │ │ ├── allow-network-policy.yaml │ │ └── deny-all-by-default-network-policy.yaml │ └── weavenet │ │ ├── readme.adoc │ │ └── templates │ │ └── weavenet-update.yaml ├── 405-ingress-controllers │ ├── readme.adoc │ └── templates │ │ ├── alb-ingress-controller.yaml │ │ ├── alb-ingress-resource.yaml │ │ ├── app.yml │ │ ├── kube-aws-ingress-controller-deployment.yaml │ │ ├── sample-app-v1.yaml │ │ ├── sample-app-v2.yaml │ │ ├── sample-ing-traffic.yaml │ │ ├── sample-ing-v1.yaml │ │ ├── sample-ing-v2.yaml │ │ ├── sample-svc-v1.yaml │ │ ├── sample-svc-v2.yaml │ │ └── skipper-ingress-daemonset.yaml └── 406-coredns │ ├── readme.adoc │ └── templates │ ├── busybox.yaml │ ├── coredns-kops.yaml │ ├── coredns-minikube.yaml │ ├── coredns-service.yaml │ └── kubedns-service.yaml ├── 05-path-next-steps ├── 501-k8s-best-practices │ └── readme.adoc └── 502-for-further-reading │ └── readme.adoc ├── CODE_OF_CONDUCT.adoc ├── CONTRIBUTING.adoc ├── LICENSE ├── NOTICE ├── developer-path.adoc ├── operations-path.adoc ├── readme.adoc ├── resources ├── abstract.adoc ├── images │ ├── autoscalingdash.png │ ├── aws-kms-create-key.png │ ├── aws-kms-key-admins.png │ ├── aws-kms-key-usage-perms.png │ ├── button-continue-developer.png │ ├── button-continue-operations.png │ ├── button-continue-standard.png │ ├── button-start-developer.png │ ├── button-start-operations.png │ ├── button-start-standard.png │ ├── caching-demo.png │ ├── cicd.png │ ├── cloud9-development-environment-welcome.png │ ├── cloud9-development-welcome-screen.png │ ├── cloud9-disable-temp-credentials.png │ ├── cloud9-run-script.png │ ├── cloudformation-output-tab.png │ ├── coffeehouse.png │ ├── container-map.png │ ├── container-view.png │ ├── datadog-logo.png │ ├── datadogdashboards.png │ ├── deploy-to-aws.png │ ├── full-trace.png │ ├── go-to-redis-traces.png │ ├── hostmap.png │ ├── infinite-demo.png │ ├── istio-sample-app-product-page.png │ ├── istio-trace.png │ ├── k8s-services.png │ ├── kubernetes-aws-smile.png │ ├── kubernetes-dashboard-default.png │ ├── linkerd-default-dashboard.png │ ├── linkerd-viz.png │ ├── linkerd.png │ ├── logging-cloudwatch-es-cluster.png │ ├── logging-cloudwatch-es-overview.png │ ├── logging-cloudwatch-es-subscribe-confirmation.png │ ├── logging-cloudwatch-es-subscribe-filter-created.png │ ├── logging-cloudwatch-es-subscribe-iam.png │ ├── logging-cloudwatch-es-subscribe-log-format.png │ ├── logging-cloudwatch-es-subscribe-start-streaming.png │ ├── logging-cloudwatch-es-subscribe.png │ ├── logging-cloudwatch-fluentd-stream.png │ ├── logging-cloudwatch-kibana-default.png │ ├── logmonitor.png │ ├── minikube-dashboard.png │ ├── monitoring-grafana-dashboards-cluster.png │ ├── monitoring-grafana-dashboards-pods.png │ ├── monitoring-grafana-dashboards.png │ ├── monitoring-grafana-prometheus-dashboard-1.png │ ├── monitoring-grafana-prometheus-dashboard-2.png │ ├── monitoring-grafana-prometheus-dashboard-3.png │ ├── monitoring-grafana-prometheus-dashboard-capacity-planning.png │ ├── monitoring-grafana-prometheus-dashboard-cluster-status.png │ ├── monitoring-grafana-prometheus-dashboard-control-plane-status.png │ ├── monitoring-grafana-prometheus-dashboard-dashboard-home.png │ ├── monitoring-grafana-prometheus-dashboard-nodes.png │ ├── monitoring-nodes-after.png │ ├── monitoring-nodes-before.png │ ├── monitoring-pods-after.png │ ├── monitoring-pods-before.png │ ├── next-step-arrow.png │ ├── nginx-pod-default-page.png │ ├── nginx-welcome-page.png │ ├── redis-apm-monitor.png │ ├── redis-dashboard.png │ ├── redis-logs.png │ ├── redis-traces.png │ ├── services.png │ ├── stop_sign01.png │ ├── traces.png │ └── webapp.png ├── slides │ ├── kubecon-2017-austin.pptx │ ├── slides-old.pptx │ └── slides-workshop.pptx └── workshop-prereqs.adoc ├── standard-path.adoc └── wip-modules └── cluster-federation └── readme.adoc /.github/ISSUE_TEMPLATE/BUG_REPORT.md: -------------------------------------------------------------------------------- 1 | For this bug report, be sure to include: 2 | * A short, descriptive title. Ideally, other community members should be able to get a good idea of the issue just from reading the title. 3 | * A detailed description of the problem you're experiencing. This should include: 4 | * Expected behavior of the workshop and the actual behavior exhibited. 5 | * Any details of your application environment that may be relevant. 6 | * Commands and output used to reproduce the issue. 7 | * [Markdown][https://guides.github.com/features/mastering-markdown/] formatting as appropriate to make the report easier to read; for example use code blocks when pasting a code snippet and exception stacktraces. 8 | 9 | *Description:* 10 | 11 | 12 | *Expected vs. Actual Behavior:* 13 | 14 | 15 | *Environment Details:* 16 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/ENHANCEMENT.md: -------------------------------------------------------------------------------- 1 | For this enhancement, be sure to include: 2 | * A short, descriptive title. Ideally, other community members should be able to get a good idea of the enhancement just from reading the title. 3 | * A detailed description of the the proposed enhancement. Include justification for why it should be added to the workshop, and possibly example code to illustrate how it should work. 4 | * link:https://guides.github.com/features/mastering-markdown/[Markdown] formatting as appropriate to make the request easier to read. 5 | * If you intend to implement this enhancement, indicate that you'd like to the issue to be assigned to you. 6 | 7 | 8 | *Description:* 9 | 10 | 11 | *Justification (Why should this be added?):* 12 | 13 | 14 | *Assigned to you? (Y/N)* 15 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available (include [keywords](https://help.github.com/articles/closing-issues-using-keywords/) to close issue as applicable, e.g. "fixes <##>"):* 2 | 3 | *Description of changes:* 4 | 5 | 6 | By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_STORE 2 | **/*.iml 3 | **/target 4 | **/.idea 5 | **/dependency-reduced-pom.xml 6 | **/journal.json 7 | **/chaostoolkit.log 8 | 03-path-application-development/306-app-management-with-helm/sample/*.tgz 9 | 03-path-application-development/309-deploying-a-chart-repository/sample/*.tgz 10 | -------------------------------------------------------------------------------- /01-path-basics/101-start-here/scripts/aws-auth-cm.sh: -------------------------------------------------------------------------------- 1 | # aws-auth ConfigMap script 2 | #title aws-auth-cm.sh 3 | #description This script will add a ConfigMap aws-auth to the EKS cluster k8s-workshop, allowing the worker nodes to join the cluster. 4 | #author @buzzsurfr 5 | #contributors @buzzsurfr @dalbhanj @cloudymind 6 | #date 2018-06-05 7 | #version 0.1 8 | #usage curl -sSL https://s3.amazonaws.com/DOC-EXAMPLE-BUCKET/v0.5/aws-auth-cm.sh | bash -s stable 9 | #============================================================================== 10 | 11 | curl -O https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/aws-auth-cm.yaml 12 | export EKS_WORKER_ROLE=$(aws cloudformation describe-stacks --stack-name k8s-workshop-worker-nodes | jq -r '.Stacks[0].Outputs[]|select(.OutputKey=="NodeInstanceRole")|.OutputValue') 13 | sed -i -e "s##${EKS_WORKER_ROLE}#g" aws-auth-cm.yaml 14 | kubectl apply -f aws-auth-cm.yaml 15 | -------------------------------------------------------------------------------- /01-path-basics/101-start-here/scripts/create-kubeconfig.sh: -------------------------------------------------------------------------------- 1 | # Create kubeconfig script 2 | #title create-kubeconfig.sh 3 | #description This script will create a kubeconfig file based on the EKS cluster k8s-workshop. 4 | #author @buzzsurfr 5 | #contributors @buzzsurfr @dalbhanj @cloudymind 6 | #date 2018-06-07 7 | #version 0.2 8 | #usage curl -sSL https://s3.amazonaws.com/DOC-EXAMPLE-BUCKET/v0.5/create-kubeconfig.sh | bash -s stable 9 | #============================================================================== 10 | 11 | # Download kubeconfig template 12 | mkdir $HOME/.kube 13 | aws s3 cp s3://DOC-EXAMPLE-BUCKET/v0.5/config-k8s-workshop $HOME/.kube/config 14 | 15 | # Configure based on EKS cluster k8s-workshop 16 | sed -i -e "s##$(aws eks describe-cluster --name k8s-workshop --query cluster.endpoint --output text)#g" $HOME/.kube/config 17 | sed -i -e "s##$(aws eks describe-cluster --name k8s-workshop --query cluster.certificateAuthority.data --output text)#g" $HOME/.kube/config 18 | sed -i -e "s##k8s-workshop#g" $HOME/.kube/config 19 | sed -i -e "s##$EKS_SERVICE_ROLE#g" $HOME/.kube/config 20 | -------------------------------------------------------------------------------- /01-path-basics/101-start-here/scripts/lab-ide-build.sh: -------------------------------------------------------------------------------- 1 | # IDE-Build script 2 | #title lab-ide-build.sh 3 | #description This script will setup the Cloud9 IDE with the prerequisite packages and code for the workshop. 4 | #author @buzzsurfr 5 | #contributors @buzzsurfr @dalbhanj @cloudymind 6 | #date 2018-05-12 7 | #version 0.2 8 | #usage curl -sSL https://s3.amazonaws.com/lab-ide-theomazonian/lab-ide-build.sh | bash -s stable 9 | #============================================================================== 10 | 11 | # Install jq 12 | sudo yum -y install jq 13 | 14 | # Update awscli 15 | sudo -H pip install -U awscli 16 | 17 | # Install bash-completion 18 | sudo yum install bash-completion -y 19 | 20 | # Install kubectl 21 | curl -o kubectl https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/bin/linux/amd64/kubectl 22 | chmod +x kubectl && sudo mv kubectl /usr/local/bin/ 23 | echo "source <(kubectl completion bash)" >> ~/.bashrc 24 | 25 | # Install Heptio Authenticator 26 | curl -o aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/bin/linux/amd64/heptio-authenticator-aws 27 | chmod +x ./aws-iam-authenticator && sudo mv aws-iam-authenticator /usr/local/bin/ 28 | 29 | # Install kops 30 | curl -LO https://github.com/kubernetes/kops/releases/download/$(curl -s https://api.github.com/repos/kubernetes/kops/releases/latest | grep tag_name | cut -d '"' -f 4)/kops-linux-amd64 31 | chmod +x kops-linux-amd64 32 | sudo mv kops-linux-amd64 /usr/local/bin/kops 33 | 34 | # Configure AWS CLI 35 | availability_zone=$(curl http://169.254.169.254/latest/meta-data/placement/availability-zone) 36 | export AWS_DEFAULT_REGION=${availability_zone%?} 37 | 38 | # Lab-specific configuration 39 | export AWS_AVAILABILITY_ZONES="$(aws ec2 describe-availability-zones --query 'AvailabilityZones[].ZoneName' --output text | awk -v OFS="," '$1=$1')" 40 | export AWS_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id) 41 | aws ec2 describe-instances --instance-ids $AWS_INSTANCE_ID > /tmp/instance.json 42 | export AWS_STACK_NAME=$(jq -r '.Reservations[0].Instances[0]|(.Tags[]|select(.Key=="aws:cloudformation:stack-name")|.Value)' /tmp/instance.json) 43 | export AWS_ENVIRONMENT=$(jq -r '.Reservations[0].Instances[0]|(.Tags[]|select(.Key=="aws:cloud9:environment")|.Value)' /tmp/instance.json) 44 | export AWS_MASTER_STACK=${AWS_STACK_NAME%$AWS_ENVIRONMENT} 45 | export AWS_MASTER_STACK=${AWS_MASTER_STACK%?} 46 | export AWS_MASTER_STACK=${AWS_MASTER_STACK#aws-cloud9-} 47 | export KOPS_STATE_STORE=s3://$(aws cloudformation describe-stack-resource --stack-name $AWS_MASTER_STACK --logical-resource-id "KopsStateStore" | jq -r '.StackResourceDetail.PhysicalResourceId') 48 | 49 | # EKS-specific variables from CloudFormation 50 | export EKS_VPC_ID=$(aws cloudformation describe-stacks --stack-name $AWS_MASTER_STACK | jq -r '.Stacks[0].Outputs[]|select(.OutputKey=="EksVpcId")|.OutputValue') 51 | export EKS_SUBNET_IDS=$(aws cloudformation describe-stacks --stack-name $AWS_MASTER_STACK | jq -r '.Stacks[0].Outputs[]|select(.OutputKey=="EksVpcSubnetIds")|.OutputValue') 52 | export EKS_SECURITY_GROUPS=$(aws cloudformation describe-stacks --stack-name $AWS_MASTER_STACK | jq -r '.Stacks[0].Outputs[]|select(.OutputKey=="EksVpcSecurityGroups")|.OutputValue') 53 | export EKS_SERVICE_ROLE=$(aws cloudformation describe-stacks --stack-name $AWS_MASTER_STACK | jq -r '.Stacks[0].Outputs[]|select(.OutputKey=="EksServiceRoleArn")|.OutputValue') 54 | 55 | # Persist lab variables 56 | echo "AWS_DEFAULT_REGION=$AWS_DEFAULT_REGION" >> ~/.bashrc 57 | echo "AWS_AVAILABILITY_ZONES=$AWS_AVAILABILITY_ZONES" >> ~/.bashrc 58 | echo "AWS_STACK_NAME=$AWS_STACK_NAME" >> ~/.bashrc 59 | echo "AWS_MASTER_STACK=$AWS_MASTER_STACK" >> ~/.bashrc 60 | echo "KOPS_STATE_STORE=$KOPS_STATE_STORE" >> ~/.bashrc 61 | 62 | # Persist EKS variables 63 | echo "EKS_VPC_ID=$EKS_VPC_ID" >> ~/.bashrc 64 | echo "EKS_SUBNET_IDS=$EKS_SUBNET_IDS" >> ~/.bashrc 65 | echo "EKS_SECURITY_GROUPS=$EKS_SECURITY_GROUPS" >> ~/.bashrc 66 | echo "EKS_SERVICE_ROLE=$EKS_SERVICE_ROLE" >> ~/.bashrc 67 | 68 | # EKS-Optimized AMI 69 | if [ "$AWS_DEFAULT_REGION" == "us-east-1" ]; then 70 | export EKS_WORKER_AMI=ami-dea4d5a1 71 | elif [ "$AWS_DEFAULT_REGION" == "us-west-2" ]; then 72 | export EKS_WORKER_AMI=ami-73a6e20b 73 | fi 74 | echo "EKS_WORKER_AMI=$EKS_WORKER_AMI" >> ~/.bashrc 75 | 76 | # Create SSH key 77 | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa 78 | 79 | # Create EC2 Keypair 80 | aws ec2 create-key-pair --key-name ${AWS_STACK_NAME} --query 'KeyMaterial' --output text > $HOME/.ssh/k8s-workshop.pem 81 | chmod 0400 $HOME/.ssh/k8s-workshop.pem 82 | 83 | if [ ! -d "aws-workshop-for-kubernetes/" ]; then 84 | # Download lab Repository 85 | git clone https://github.com/aws-samples/aws-workshop-for-kubernetes 86 | fi 87 | -------------------------------------------------------------------------------- /01-path-basics/101-start-here/templates/config-k8s-workshop: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | server: 5 | certificate-authority-data: 6 | name: kubernetes 7 | contexts: 8 | - context: 9 | cluster: kubernetes 10 | user: aws 11 | name: aws 12 | current-context: aws 13 | kind: Config 14 | preferences: {} 15 | users: 16 | - name: aws 17 | user: 18 | exec: 19 | apiVersion: client.authentication.k8s.io/v1alpha1 20 | command: aws-iam-authenticator 21 | args: 22 | - "token" 23 | - "-i" 24 | - "" 25 | # - "-r" 26 | # - "" 27 | -------------------------------------------------------------------------------- /01-path-basics/102-your-first-cluster/instance-groups/nginx-on-1d-ig.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | env: test 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | imagePullPolicy: IfNotPresent 12 | nodeSelector: 13 | type: 1d-ig -------------------------------------------------------------------------------- /01-path-basics/102-your-first-cluster/instance-groups/nginx-on-p2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | labels: 6 | env: test 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx 11 | imagePullPolicy: IfNotPresent 12 | nodeSelector: 13 | type: p2-ig -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/cronjob.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1beta1 2 | kind: CronJob 3 | metadata: 4 | name: hello 5 | spec: 6 | schedule: "*/1 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | metadata: 11 | labels: 12 | app: hello-cronpod 13 | spec: 14 | containers: 15 | - name: hello 16 | image: busybox 17 | args: 18 | - /bin/sh 19 | - -c 20 | - date; echo Hello World! 21 | restartPolicy: OnFailure 22 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: prometheus-daemonset 5 | spec: 6 | selector: 7 | matchLabels: 8 | tier: monitoring 9 | name: prometheus-exporter 10 | template: 11 | metadata: 12 | labels: 13 | tier: monitoring 14 | name: prometheus-exporter 15 | spec: 16 | containers: 17 | - name: prometheus 18 | image: prom/node-exporter 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/deployment-namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment-ns 5 | namespace: dev 6 | spec: 7 | replicas: 3 8 | selector: 9 | matchLabels: 10 | app: nginx 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx:1.12.1 19 | ports: 20 | - containerPort: 80 21 | - containerPort: 443 22 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: nginx 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.12.1 18 | ports: 19 | - containerPort: 80 20 | - containerPort: 443 21 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/echo-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: echo-deployment 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: echo-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: echo-pod 14 | spec: 15 | containers: 16 | - name: echoheaders 17 | image: k8s.gcr.io/echoserver:1.10 18 | imagePullPolicy: IfNotPresent 19 | ports: 20 | - containerPort: 8080 21 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/echo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: echo-deployment 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: echo-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: echo-pod 14 | spec: 15 | containers: 16 | - name: echoheaders 17 | image: k8s.gcr.io/echoserver:1.10 18 | imagePullPolicy: IfNotPresent 19 | ports: 20 | - containerPort: 8080 21 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/job-parallel.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: wait 5 | spec: 6 | completions: 6 7 | parallelism: 2 8 | template: 9 | metadata: 10 | name: wait 11 | spec: 12 | containers: 13 | - name: wait 14 | image: ubuntu 15 | command: ["sleep", "20"] 16 | restartPolicy: Never 17 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: wait 5 | spec: 6 | template: 7 | metadata: 8 | name: wait 9 | spec: 10 | containers: 11 | - name: wait 12 | image: ubuntu 13 | command: ["sleep", "20"] 14 | restartPolicy: Never 15 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/namespace.yaml: -------------------------------------------------------------------------------- 1 | kind: Namespace 2 | apiVersion: v1 3 | metadata: 4 | name: dev 5 | labels: 6 | name: dev 7 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod-burstable.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod-burstable 5 | labels: 6 | name: nginx-pod 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | resources: 12 | limits: 13 | memory: "200Mi" 14 | cpu: 1 15 | requests: 16 | memory: "100Mi" 17 | cpu: 1 18 | ports: 19 | - containerPort: 80 -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod-cpu-memory.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod 5 | labels: 6 | name: nginx-pod 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | resources: 12 | requests: 13 | memory: "100m" 14 | cpu: 1 15 | ports: 16 | - containerPort: 80 17 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod-guaranteed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod-guaranteed 5 | labels: 6 | name: nginx-pod 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | resources: 12 | limits: 13 | memory: "200Mi" 14 | cpu: 1 15 | ports: 16 | - containerPort: 80 -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod-guaranteed2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod-guaranteed2 5 | labels: 6 | name: nginx-pod 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | resources: 12 | limits: 13 | memory: "200Mi" 14 | cpu: 1 15 | requests: 16 | memory: "200Mi" 17 | cpu: 1 18 | ports: 19 | - containerPort: 80 -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod2 5 | labels: 6 | name: nginx-pod 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | resources: 12 | limits: 13 | memory: "200Mi" 14 | cpu: 2 15 | requests: 16 | memory: "100Mi" 17 | cpu: 1 18 | ports: 19 | - containerPort: 80 -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod-resources1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: wildfly-pod 5 | labels: 6 | name: wildfly-pod 7 | spec: 8 | containers: 9 | - name: wildfly 10 | image: jboss/wildfly:11.0.0.Final 11 | resources: 12 | limits: 13 | memory: "200Mi" 14 | cpu: 2 15 | requests: 16 | memory: "100Mi" 17 | cpu: 1 18 | ports: 19 | - containerPort: 8080 20 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod-resources2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: wildfly-pod 5 | labels: 6 | name: wildfly-pod 7 | spec: 8 | containers: 9 | - name: wildfly 10 | image: jboss/wildfly:11.0.0.Final 11 | resources: 12 | limits: 13 | memory: "300Mi" 14 | cpu: 2 15 | requests: 16 | memory: "100Mi" 17 | cpu: 1 18 | ports: 19 | - containerPort: 8080 -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod 5 | labels: 6 | name: nginx-pod 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | ports: 12 | - containerPort: 80 -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: nginx-replicaset 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | name: nginx-replica 10 | template: 11 | metadata: 12 | labels: 13 | name: nginx-replica 14 | spec: 15 | containers: 16 | - name: nginx-replica 17 | image: nginx:1.12.1 18 | imagePullPolicy: IfNotPresent 19 | ports: 20 | - containerPort: 80 21 | - containerPort: 443 22 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/resource-quota.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | name: quota 5 | spec: 6 | hard: 7 | cpu: "4" 8 | memory: 6G 9 | pods: "12" 10 | replicationcontrollers: "3" 11 | services: "5" 12 | configmaps: "5" 13 | -------------------------------------------------------------------------------- /01-path-basics/103-kubernetes-concepts/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: echo-service 5 | spec: 6 | selector: 7 | app: echo-pod 8 | ports: 9 | - name: http 10 | protocol: TCP 11 | port: 80 12 | targetPort: 8080 13 | type: LoadBalancer -------------------------------------------------------------------------------- /02-path-working-with-clusters/201-cluster-monitoring/templates/heapster/grafana.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: monitoring-grafana 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | task: monitoring 11 | k8s-app: grafana 12 | template: 13 | metadata: 14 | labels: 15 | task: monitoring 16 | k8s-app: grafana 17 | spec: 18 | containers: 19 | - name: grafana 20 | image: k8s.gcr.io/heapster-grafana-amd64:v4.4.3 21 | ports: 22 | - containerPort: 3000 23 | protocol: TCP 24 | volumeMounts: 25 | - mountPath: /etc/ssl/certs 26 | name: ca-certificates 27 | readOnly: true 28 | - mountPath: /var 29 | name: grafana-storage 30 | env: 31 | - name: INFLUXDB_HOST 32 | value: monitoring-influxdb 33 | - name: GF_SERVER_HTTP_PORT 34 | value: "3000" 35 | # The following env variables are required to make Grafana accessible via 36 | # the kubernetes api-server proxy. On production clusters, we recommend 37 | # removing these env variables, setup auth for grafana, and expose the grafana 38 | # service using a LoadBalancer or a public IP. 39 | - name: GF_AUTH_BASIC_ENABLED 40 | value: "false" 41 | - name: GF_AUTH_ANONYMOUS_ENABLED 42 | value: "true" 43 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 44 | value: Admin 45 | - name: GF_SERVER_ROOT_URL 46 | # If you're only using the API Server proxy, set this value instead: 47 | # value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ 48 | value: / 49 | volumes: 50 | - name: ca-certificates 51 | hostPath: 52 | path: /etc/ssl/certs 53 | - name: grafana-storage 54 | emptyDir: {} 55 | --- 56 | apiVersion: v1 57 | kind: Service 58 | metadata: 59 | labels: 60 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 61 | # If you are NOT using this as an addon, you should comment out this line. 62 | kubernetes.io/cluster-service: 'true' 63 | kubernetes.io/name: monitoring-grafana 64 | name: monitoring-grafana 65 | namespace: kube-system 66 | spec: 67 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer 68 | # or through a public IP. 69 | # type: LoadBalancer 70 | # You could also use NodePort to expose the service at a randomly-generated port 71 | # type: NodePort 72 | ports: 73 | - port: 80 74 | targetPort: 3000 75 | selector: 76 | k8s-app: grafana 77 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/201-cluster-monitoring/templates/heapster/heapster-rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: heapster 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:heapster 9 | subjects: 10 | - kind: ServiceAccount 11 | name: heapster 12 | namespace: kube-system 13 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/201-cluster-monitoring/templates/heapster/heapster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: heapster 5 | namespace: kube-system 6 | --- 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | metadata: 10 | name: heapster 11 | namespace: kube-system 12 | spec: 13 | replicas: 1 14 | selector: 15 | matchLabels: 16 | task: monitoring 17 | k8s-app: heapster 18 | template: 19 | metadata: 20 | labels: 21 | task: monitoring 22 | k8s-app: heapster 23 | spec: 24 | serviceAccountName: heapster 25 | containers: 26 | - name: heapster 27 | image: k8s.gcr.io/heapster-amd64:v1.5.2 28 | imagePullPolicy: IfNotPresent 29 | command: 30 | - /heapster 31 | - --source=kubernetes:https://kubernetes.default 32 | - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086 33 | --- 34 | apiVersion: v1 35 | kind: Service 36 | metadata: 37 | labels: 38 | task: monitoring 39 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 40 | # If you are NOT using this as an addon, you should comment out this line. 41 | kubernetes.io/cluster-service: 'true' 42 | kubernetes.io/name: Heapster 43 | name: heapster 44 | namespace: kube-system 45 | spec: 46 | ports: 47 | - port: 80 48 | targetPort: 8082 49 | selector: 50 | k8s-app: heapster 51 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/201-cluster-monitoring/templates/heapster/influxdb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: monitoring-influxdb 5 | namespace: kube-system 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | task: monitoring 11 | k8s-app: influxdb 12 | template: 13 | metadata: 14 | labels: 15 | task: monitoring 16 | k8s-app: influxdb 17 | spec: 18 | containers: 19 | - name: influxdb 20 | image: k8s.gcr.io/heapster-influxdb-amd64:v1.3.3 21 | volumeMounts: 22 | - mountPath: /data 23 | name: influxdb-storage 24 | volumes: 25 | - name: influxdb-storage 26 | emptyDir: {} 27 | --- 28 | apiVersion: v1 29 | kind: Service 30 | metadata: 31 | labels: 32 | task: monitoring 33 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 34 | # If you are NOT using this as an addon, you should comment out this line. 35 | kubernetes.io/cluster-service: 'true' 36 | kubernetes.io/name: monitoring-influxdb 37 | name: monitoring-influxdb 38 | namespace: kube-system 39 | spec: 40 | ports: 41 | - port: 8086 42 | targetPort: 8086 43 | selector: 44 | k8s-app: influxdb 45 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/alertmanager.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | resolve_timeout: 5m 3 | route: 4 | group_by: ['job'] 5 | group_wait: 30s 6 | group_interval: 5m 7 | repeat_interval: 12h 8 | receiver: 'webhook' 9 | receivers: 10 | - name: 'webhook' 11 | webhook_configs: 12 | - url: 'http://alertmanagerwh:30500/' 13 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/201-cluster-monitoring/templates/prometheus/prometheus-bundle.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | --- 6 | apiVersion: rbac.authorization.k8s.io/v1 7 | kind: ClusterRoleBinding 8 | metadata: 9 | name: prometheus-operator 10 | roleRef: 11 | apiGroup: rbac.authorization.k8s.io 12 | kind: ClusterRole 13 | name: prometheus-operator 14 | subjects: 15 | - kind: ServiceAccount 16 | name: prometheus-operator 17 | namespace: monitoring 18 | --- 19 | apiVersion: rbac.authorization.k8s.io/v1 20 | kind: ClusterRole 21 | metadata: 22 | name: prometheus-operator 23 | namespace: monitoring 24 | rules: 25 | - apiGroups: 26 | - extensions 27 | resources: 28 | - thirdpartyresources 29 | verbs: 30 | - "*" 31 | - apiGroups: 32 | - apiextensions.k8s.io 33 | resources: 34 | - customresourcedefinitions 35 | verbs: 36 | - "*" 37 | - apiGroups: 38 | - monitoring.coreos.com 39 | resources: 40 | - alertmanagers 41 | - prometheuses 42 | - servicemonitors 43 | verbs: 44 | - "*" 45 | - apiGroups: 46 | - apps 47 | resources: 48 | - statefulsets 49 | verbs: ["*"] 50 | - apiGroups: [""] 51 | resources: 52 | - configmaps 53 | - secrets 54 | verbs: ["*"] 55 | - apiGroups: [""] 56 | resources: 57 | - pods 58 | verbs: ["list", "delete"] 59 | - apiGroups: [""] 60 | resources: 61 | - services 62 | - endpoints 63 | verbs: ["get", "create", "update"] 64 | - apiGroups: [""] 65 | resources: 66 | - nodes 67 | verbs: ["list", "watch"] 68 | - apiGroups: [""] 69 | resources: 70 | - namespaces 71 | verbs: ["list"] 72 | --- 73 | apiVersion: v1 74 | kind: ServiceAccount 75 | metadata: 76 | name: prometheus-operator 77 | namespace: monitoring 78 | --- 79 | apiVersion: apps/v1 80 | kind: Deployment 81 | metadata: 82 | labels: 83 | k8s-app: prometheus-operator 84 | name: prometheus-operator 85 | namespace: monitoring 86 | spec: 87 | replicas: 1 88 | selector: 89 | matchLabels: 90 | k8s-app: prometheus-operator 91 | template: 92 | metadata: 93 | labels: 94 | k8s-app: prometheus-operator 95 | spec: 96 | containers: 97 | - args: 98 | - --kubelet-service=kube-system/kubelet 99 | - --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1 100 | image: quay.io/coreos/prometheus-operator:v0.14.1 101 | name: prometheus-operator 102 | ports: 103 | - containerPort: 8080 104 | name: http 105 | resources: 106 | limits: 107 | cpu: 200m 108 | memory: 100Mi 109 | requests: 110 | cpu: 100m 111 | memory: 50Mi 112 | serviceAccountName: prometheus-operator 113 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/204-cluster-logging-with-EFK/templates/fluentd-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: fluentd 5 | namespace: logging 6 | labels: 7 | k8s-app: fluentd 8 | component: logging-agent 9 | spec: 10 | minReadySeconds: 10 11 | updateStrategy: 12 | type: RollingUpdate 13 | rollingUpdate: 14 | maxUnavailable: 1 15 | selector: 16 | matchLabels: 17 | k8s-app: fluentd 18 | template: 19 | metadata: 20 | labels: 21 | k8s-app: fluentd 22 | spec: 23 | containers: 24 | - name: fluentd 25 | image: quay.io/coreos/fluentd-kubernetes:v0.12.33-cloudwatch 26 | imagePullPolicy: Always 27 | command: ["fluentd", "-c", "/fluentd/etc/fluentd.conf", "-p", "/fluentd/plugins"] 28 | env: 29 | - name: FLUENTD_CONFIG 30 | value: fluentd-standalone.conf 31 | - name: AWS_REGION 32 | value: $REGION 33 | - name: AWS_ACCESS_KEY 34 | value: $ACCESS_KEY 35 | - name: AWS_SECRET_KEY 36 | value: $SECRET_KEY 37 | resources: 38 | limits: 39 | memory: 200Mi 40 | requests: 41 | cpu: 500m 42 | memory: 200Mi 43 | ports: 44 | - name: prom-metrics 45 | containerPort: 24231 46 | protocol: TCP 47 | - name: monitor-agent 48 | containerPort: 24220 49 | protocol: TCP 50 | - name: http-input 51 | containerPort: 9880 52 | protocol: TCP 53 | livenessProbe: 54 | httpGet: 55 | # Use percent encoding for query param. 56 | # The value is {"log": "health check"}. 57 | # the endpoint itself results in a new fluentd 58 | # tag 'fluentd.pod-healthcheck' 59 | path: /fluentd.pod.healthcheck?json=%7B%22log%22%3A+%22health+check%22%7D 60 | port: 9880 61 | initialDelaySeconds: 5 62 | timeoutSeconds: 1 63 | volumeMounts: 64 | - name: varlog 65 | mountPath: /var/log 66 | - name: varlibdockercontainers 67 | mountPath: /var/lib/docker/containers 68 | readOnly: true 69 | - name: fluentd-config 70 | mountPath: /fluentd/etc 71 | volumes: 72 | - name: varlog 73 | hostPath: 74 | path: /var/log 75 | - name: varlibdockercontainers 76 | hostPath: 77 | path: /var/lib/docker/containers 78 | - name: fluentd-config 79 | configMap: 80 | name: "fluentd-config" 81 | terminationGracePeriodSeconds: 60 82 | serviceAccountName: fluentd 83 | tolerations: 84 | - key: node-role.kubernetes.io/master 85 | operator: Exists 86 | effect: NoSchedule 87 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/204-cluster-logging-with-EFK/templates/fluentd-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: fluentd-read 5 | namespace: logging 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: fluentd-read 10 | subjects: 11 | - kind: ServiceAccount 12 | name: fluentd 13 | namespace: logging 14 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/204-cluster-logging-with-EFK/templates/fluentd-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: fluentd-read 5 | namespace: logging 6 | rules: 7 | - apiGroups: [""] 8 | resources: 9 | - namespaces 10 | - pods 11 | verbs: ["get", "list", "watch"] 12 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/204-cluster-logging-with-EFK/templates/fluentd-service-account.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fluentd 5 | namespace: logging 6 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/204-cluster-logging-with-EFK/templates/fluentd-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: fluentd 5 | namespace: logging 6 | labels: 7 | k8s-app: fluentd 8 | spec: 9 | type: ClusterIP 10 | clusterIP: None 11 | selector: 12 | k8s-app: fluentd 13 | ports: 14 | # Exposes Prometheus metrics 15 | - name: prometheus-metrics 16 | port: 24231 17 | targetPort: prom-metrics 18 | protocol: TCP 19 | # Can be accessed using "kubectl proxy" at: 20 | # http://127.0.0.1:8001/api/v1/proxy/namespaces/kube-system/services/fluentd:monitor-agent/api/plugins.json 21 | - name: monitor-agent 22 | port: 24220 23 | targetPort: monitor-agent 24 | protocol: TCP 25 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/205-cluster-autoscaling/templates/2-10-autoscaler.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: cluster-autoscaler 6 | namespace: kube-system 7 | labels: 8 | app: cluster-autoscaler 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: cluster-autoscaler 14 | template: 15 | metadata: 16 | labels: 17 | app: cluster-autoscaler 18 | spec: 19 | containers: 20 | - image: awspartners/cluster-autoscaler:v0.6.0 21 | name: cluster-autoscaler 22 | resources: 23 | limits: 24 | cpu: 100m 25 | memory: 300Mi 26 | requests: 27 | cpu: 100m 28 | memory: 300Mi 29 | command: 30 | - ./cluster-autoscaler 31 | - --v=4 32 | - --stderrthreshold=info 33 | - --cloud-provider=aws 34 | - --skip-nodes-with-local-storage=false 35 | - --nodes=2:10:nodes.example.cluster.k8s.local 36 | env: 37 | - name: AWS_REGION 38 | value: us-east-1 39 | volumeMounts: 40 | - name: ssl-certs 41 | mountPath: /etc/ssl/certs/ca-certificates.crt 42 | readOnly: true 43 | imagePullPolicy: "Always" 44 | volumes: 45 | - name: ssl-certs 46 | hostPath: 47 | path: "/etc/ssl/certs/ca-certificates.crt" 48 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/205-cluster-autoscaling/templates/asg-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "autoscaling:DescribeAutoScalingGroups", 8 | "autoscaling:DescribeAutoScalingInstances", 9 | "autoscaling:SetDesiredCapacity", 10 | "autoscaling:TerminateInstanceInAutoScalingGroup" 11 | ], 12 | "Resource": "*" 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/205-cluster-autoscaling/templates/dummy-resource-offers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: greeter 6 | spec: 7 | selector: 8 | app: greeter 9 | ports: 10 | - port: 8080 11 | --- 12 | apiVersion: apps/v1 13 | kind: Deployment 14 | metadata: 15 | name: greeter 16 | labels: 17 | app: greeter 18 | spec: 19 | replicas: 10 20 | template: 21 | metadata: 22 | labels: 23 | app: greeter 24 | spec: 25 | containers: 26 | - name: name 27 | image: arungupta/greeter-service:latest 28 | resources: 29 | limits: 30 | cpu: 1024m 31 | memory: 1024M 32 | requests: 33 | cpu: 500m 34 | memory: 512M 35 | ports: 36 | - containerPort: 8080 37 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/206-cloudformation-and-terraform/cloudformation/vault/readme.adoc: -------------------------------------------------------------------------------- 1 | = Create Vault Server using CloudFormation template 2 | 3 | This chapter explains how to create a Vault Server using CloudFormation and have Vault dev server running as part of boot strap process. 4 | 5 | == CloudFormation 6 | 7 | https://aws.amazon.com/cloudformation/[AWS CloudFormation] gives developers and systems administrators an easy way to create and manage a collection of related AWS resources, provisioning and updating them in an orderly and predictable fashion. 8 | 9 | To launch this stack via AWS CLI, please use below command. Replace ParameterValue(s) with your subnetID, keypairname and VPC-ID vaules 10 | 11 | cd quickdeploy/cloudformation/vault 12 | $aws cloudformation create-stack \ 13 | --stack-name VaultServer-$(date "+%F-%Hh-%Mm-%Ss-%Z") \ 14 | --template-body file://vault-template.json \ 15 | --capabilities CAPABILITY_IAM \ 16 | --parameters \ 17 | ParameterKey=KeyName,ParameterValue= \ 18 | 19 | The template has AMI mappings to all regions, with parameters like VPCID and subnetID where you want your instances to be created. 20 | The following is an explanation of the input parameters: 21 | 22 | ---------------------------------------------------------- 23 | KeyName: This must be a valid EC2 KeyPair for ssh access 24 | # Required: Yes 25 | ---------------------------------------------------------- 26 | InstanceType: Instance type which you want to you 27 | # Default: Creates a m4.large intance 28 | # Required: No 29 | ---------------------------------------------------------- 30 | SubnetID: Provide existing SubnetId if any 31 | # Default: Creates a new Subnet 32 | # Required: No 33 | ---------------------------------------------------------- 34 | VPCId: Provide existing VPCId if any 35 | # Default: Creates a new VPC 36 | # Required: No 37 | ---------------------------------------------------------- 38 | EC2InstanceProfile: Provide an existing IAM role 39 | # Default: ecsInstanceRole 40 | # Required: No 41 | ---------------------------------------------------------- 42 | 43 | To check if Vault server dev is started on the instance created, please SSH into the instance and run below command: 44 | 45 | 46 | To SSH on to the instance $ssh -i ec2-user@ 47 | $vault status 48 | 49 | Note: vault-template.json will create single EC2 instance which will have Dev Vault server running with VAULT_ADDR=http://:8200 variable set. 50 | 51 | If you want to create Vault server instance as part of autoscaling group, please use vault-template-asg.json template. The ASG template will also create a classic load balancer and associated to Autoscaling group with ELB listerner mapped to port 80 and instance port to 8200 52 | 53 | Below are the variables that should be exported while trying to connect to Vault server from your local machine. 54 | 55 | Default single EC2 instance: VAULT_ADDR=http://:8200 56 | 57 | Vault server behind ASG: VAULT_ADDR=http:// 58 | 59 | For PublicIp address/ELB DNS name, check CloudFormation stack output section after stack is created. 60 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/206-cloudformation-and-terraform/readme.adoc: -------------------------------------------------------------------------------- 1 | = Create Kubernetes cluster using CloudFormation and Terraform 2 | :toc: 3 | :icons: 4 | :linkcss: 5 | :imagesdir: ../../resources/images 6 | 7 | This chapter explains how to create a Kubernetes cluster using CloudFormation and Terraform. 8 | 9 | == CloudFormation 10 | 11 | https://aws.amazon.com/cloudformation/[AWS CloudFormation] gives developers and systems administrators an easy way to create and manage a collection of related AWS resources, provisioning and updating them in an orderly and predictable fashion. 12 | 13 | A CloudFormation template is available in the link:cloudformation[] directory. 14 | 15 | In the example below, create a Kubernetes cluster with cloudformation using the aws cli. Note that a valid EC2 KeyName registered to your account must be supplied to execute successfully. 16 | 17 | cd quickdeploy 18 | aws cloudformation create-stack \ 19 | --stack-name k8s \ 20 | --template-body file://cloudformation/k8s-create-no-alb.yaml \ 21 | --capabilities CAPABILITY_IAM \ 22 | --parameters \ 23 | ParameterKey=KeyName,ParameterValue=demo-key-2 \ 24 | ParameterKey=MasterInstanceType,ParameterValue=c4.xlarge \ 25 | ParameterKey=NetworkProvider,ParameterValue=calico \ 26 | ParameterKey=NodeInstanceType,ParameterValue=c4.large \ 27 | ParameterKey=Nodes,ParameterValue=3 28 | 29 | The following is an explanation of the input parameters: 30 | 31 | KeyName: This must be a valid EC2 KeyPair for ssh access. # Required 32 | MasterInstanceType: EC2 instance type of master. # Defaulted to c4.xlarge 33 | NetworkProvider: Container networking provider. # Defaulted to none 34 | NodeInstanceType: EC2 instance type of nodes. # Defaulted to c4.large 35 | Nodes: Number of k8s nodes. # Defaulted to 3 36 | 37 | NOTE: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/[kubeadm] is used to create the cluster. Currently, it allows to create a single master, with a single etcd database running on it. This means your cluster is not highly available. 38 | 39 | == Terraform 40 | 41 | The template and instructions need to be added: https://github.com/arun-gupta/kubernetes-aws-workshop/issues/135 42 | 43 | 44 | You are now ready to continue on with the workshop! 45 | 46 | :frame: none 47 | :grid: none 48 | :valign: top 49 | 50 | [align="center", cols="1", grid="none", frame="none"] 51 | |===== 52 | |image:button-continue-operations.png[link=../../04-path-security-and-networking/401-configmaps-and-secrets] 53 | |link:../../operations-path.adoc[Go to Operations Index] 54 | |===== 55 | -------------------------------------------------------------------------------- /02-path-working-with-clusters/207-cluster-monitoring-with-datadog/templates/datadog/agent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: DaemonSet 3 | metadata: 4 | name: dd-agent 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | app: dd-agent 10 | name: dd-agent 11 | spec: 12 | containers: 13 | - image: datadog/agent:latest 14 | imagePullPolicy: Always 15 | name: dd-agent 16 | ports: 17 | - containerPort: 8125 18 | hostPort: 8125 19 | name: dogstatsdport 20 | protocol: UDP 21 | - containerPort: 8126 22 | name: traceport 23 | protocol: TCP 24 | env: 25 | - name: DD_API_KEY 26 | value: 27 | - name: KUBERNETES 28 | value: "yes" 29 | - name: DD_APM_ENABLED 30 | value: "true" 31 | - name: DD_PROCESS_AGENT_ENABLED 32 | value: "true" 33 | - name: DD_LOGS_ENABLED 34 | value: "true" 35 | - name: DD_LOGS_CONFIG_CONTAINER_COLLECT_ALL 36 | value: "true" 37 | - name: HOST_PROC 38 | value: /host/proc 39 | - name: HOST_SYS 40 | value: /host/sys 41 | volumeMounts: 42 | - name: dockersocket 43 | mountPath: /var/run/docker.sock 44 | - name: procdir 45 | mountPath: /host/proc 46 | readOnly: true 47 | - name: cgroups 48 | mountPath: /host/sys/fs/cgroup 49 | readOnly: true 50 | - name: passwd 51 | mountPath: /etc/passwd 52 | readOnly: true 53 | - name: pointerdir 54 | mountPath: /opt/datadog-agent/run 55 | volumes: 56 | - hostPath: 57 | path: /run/docker.sock 58 | name: dockersocket 59 | - hostPath: 60 | path: /proc 61 | name: procdir 62 | - hostPath: 63 | path: /sys/fs/cgroup 64 | name: cgroups 65 | - hostPath: 66 | path: /etc/passwd 67 | name: passwd 68 | - hostPath: 69 | path: /opt/datadog-agent/run 70 | name: pointerdir 71 | --- 72 | apiVersion: v1 73 | kind: Service 74 | metadata: 75 | name: dd-agent 76 | labels: 77 | run: dd-agent 78 | spec: 79 | ports: 80 | - name: dogstatsdport 81 | port: 8125 82 | targetPort: 8125 83 | protocol: UDP 84 | - name: traceport 85 | port: 8126 86 | targetPort: 8126 87 | protocol: TCP 88 | targetPort: 8126 89 | protocol: TCP 90 | selector: 91 | app: dd-agent 92 | type: ClusterIP -------------------------------------------------------------------------------- /02-path-working-with-clusters/207-cluster-monitoring-with-datadog/templates/datadog/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: datadog-agent 5 | rules: 6 | - apiGroups: # This is required by the agent to query the Kubelet API. 7 | - "" 8 | resources: 9 | - nodes/metrics 10 | - nodes/spec 11 | - nodes/proxy # Required to get /pods 12 | verbs: 13 | - get 14 | --- 15 | kind: ServiceAccount 16 | apiVersion: v1 17 | metadata: 18 | name: datadog-agent 19 | namespace: default 20 | --- 21 | apiVersion: rbac.authorization.k8s.io/v1 22 | kind: ClusterRoleBinding 23 | metadata: 24 | name: datadog-agent 25 | roleRef: 26 | apiGroup: rbac.authorization.k8s.io 27 | kind: ClusterRole 28 | name: datadog-agent 29 | subjects: 30 | - kind: ServiceAccount 31 | name: datadog-agent 32 | namespace: default -------------------------------------------------------------------------------- /02-path-working-with-clusters/207-cluster-monitoring-with-datadog/templates/mongodb/mongodb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: mongo 6 | labels: 7 | name: mongo 8 | spec: 9 | ports: 10 | - port: 27017 11 | targetPort: 27017 12 | clusterIP: None 13 | selector: 14 | role: mongo 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: StatefulSet 18 | metadata: 19 | name: mongo 20 | spec: 21 | serviceName: "mongo" 22 | replicas: 3 23 | template: 24 | metadata: 25 | annotations: 26 | ad.datadoghq.com/mongo.check_names: '["mongo"]' 27 | ad.datadoghq.com/mongo.init_configs: '[{}]' 28 | ad.datadoghq.com/mongo.instances: '[{"server": "mongodb://datadog:tndPhL3wrMEDuj4wLEHmbxbV@%%host%%:%%port%%"}]' 29 | labels: 30 | role: mongo 31 | environment: test 32 | spec: 33 | serviceAccountName: mongorbac 34 | terminationGracePeriodSeconds: 10 35 | containers: 36 | - name: mongo 37 | image: mongo 38 | command: 39 | - mongod 40 | - "--replSet" 41 | - rs0 42 | - "--bind_ip" 43 | - 0.0.0.0 44 | - "--smallfiles" 45 | - "--noprealloc" 46 | ports: 47 | - containerPort: 27017 48 | volumeMounts: 49 | - name: mongo-persistent-storage 50 | mountPath: /data/db 51 | - name: mongo-sidecar 52 | image: cvallance/mongo-k8s-sidecar 53 | env: 54 | - name: MONGO_SIDECAR_POD_LABELS 55 | value: "role=mongo,environment=test" 56 | volumeClaimTemplates: 57 | - metadata: 58 | name: mongo-persistent-storage 59 | annotations: 60 | volume.beta.kubernetes.io/storage-class: "fast" 61 | spec: 62 | accessModes: [ "ReadWriteOnce" ] 63 | resources: 64 | requests: 65 | storage: 1Gi 66 | --- 67 | kind: ClusterRole 68 | apiVersion: rbac.authorization.k8s.io/v1 69 | metadata: 70 | name: mongorbac 71 | rules: 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - pods 76 | verbs: 77 | - get 78 | - list 79 | --- 80 | apiVersion: rbac.authorization.k8s.io/v1 81 | kind: ClusterRoleBinding 82 | metadata: 83 | name: mongorbac 84 | roleRef: 85 | apiGroup: rbac.authorization.k8s.io 86 | kind: ClusterRole 87 | name: mongorbac 88 | subjects: 89 | - kind: ServiceAccount 90 | name: mongorbac 91 | namespace: default 92 | --- 93 | kind: ServiceAccount 94 | apiVersion: v1 95 | metadata: 96 | name: mongorbac 97 | namespace: default -------------------------------------------------------------------------------- /02-path-working-with-clusters/207-cluster-monitoring-with-datadog/templates/mongodb/storageclass.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1beta1 3 | metadata: 4 | name: fast 5 | provisioner: kubernetes.io/aws-ebs 6 | parameters: 7 | type: gp2 -------------------------------------------------------------------------------- /02-path-working-with-clusters/207-cluster-monitoring-with-datadog/templates/nginx/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | replicas: 1 7 | template: # create pods using pod definition in this template 8 | metadata: 9 | annotations: 10 | ad.datadoghq.com/nginx.check_names: '["nginx"]' 11 | ad.datadoghq.com/nginx.init_configs: '[{}]' 12 | ad.datadoghq.com/nginx.instances: '[{"nginx_status_url": "http://%%host%%/nginx_status"}]' 13 | labels: 14 | role: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: charlyyfon/nodeapp:nginx 19 | imagePullPolicy: Always 20 | ports: 21 | - containerPort: 80 22 | volumeMounts: 23 | - name: "config" 24 | mountPath: "/etc/nginx/nginx.conf" 25 | subPath: "nginx.conf" 26 | volumes: 27 | - name: "config" 28 | configMap: 29 | name: "nginxconfig" 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: nginx-deployment 35 | spec: 36 | ports: 37 | - name: nginx 38 | port: 80 39 | targetPort: 80 40 | protocol: TCP 41 | selector: 42 | role: nginx 43 | type: LoadBalancer 44 | --- 45 | apiVersion: v1 46 | data: 47 | nginx.conf: |+ 48 | worker_processes 5; 49 | events { 50 | worker_connections 4096; 51 | } 52 | http { 53 | server { 54 | location /nginx_status { 55 | stub_status on; 56 | access_log /dev/stdout; 57 | allow all; 58 | } 59 | location / { 60 | proxy_pass http://fan:5000; 61 | proxy_set_header Host $host; 62 | proxy_set_header X-Real-IP $remote_addr; 63 | proxy_redirect off; 64 | } 65 | } 66 | } 67 | kind: ConfigMap 68 | metadata: 69 | name: nginxconfig 70 | namespace: default -------------------------------------------------------------------------------- /02-path-working-with-clusters/207-cluster-monitoring-with-datadog/templates/redis/redis.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: redis 5 | spec: 6 | replicas: 1 # tells deployment to run 2 pods matching the template 7 | template: # create pods using pod definition in this template 8 | metadata: 9 | annotations: 10 | ad.datadoghq.com/redis.check_names: '["redisdb"]' 11 | ad.datadoghq.com/redis.init_configs: '[{}]' 12 | ad.datadoghq.com/redis.instances: '[{"host": "%%host%%","port":"6379"}]' 13 | labels: 14 | role: redis 15 | spec: 16 | containers: 17 | - name: redis 18 | image: charlyyfon/nodeapp:redis 19 | imagePullPolicy: Always 20 | ports: 21 | - name: redis 22 | containerPort: 6379 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: redis 28 | labels: 29 | role: redis 30 | spec: 31 | ports: 32 | - port: 6379 33 | targetPort: 6379 34 | selector: 35 | role: redis -------------------------------------------------------------------------------- /02-path-working-with-clusters/207-cluster-monitoring-with-datadog/templates/webapp/webapp.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: fan 5 | spec: 6 | replicas: 1 # tells deployment to run 2 pods matching the template 7 | template: # create pods using pod definition in this template 8 | metadata: 9 | labels: 10 | role: fan 11 | spec: 12 | containers: 13 | - name: fan 14 | image: charlyyfon/nodeapp:fetch 15 | imagePullPolicy: Always 16 | ports: 17 | - name: fan 18 | containerPort: 5000 19 | env: 20 | - name: API_KEY 21 | value: DD_API_KEY 22 | --- 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: fan 27 | labels: 28 | role: fan 29 | spec: 30 | ports: 31 | - port: 5000 32 | targetPort: 5000 33 | protocol: TCP 34 | selector: 35 | role: fan 36 | type: ClusterIP -------------------------------------------------------------------------------- /03-path-application-development/302-app-discovery/templates/app.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: name-service 5 | spec: 6 | selector: 7 | app: name-pod 8 | ports: 9 | - port: 8080 10 | --- 11 | apiVersion: apps/v1 12 | kind: ReplicaSet 13 | metadata: 14 | name: name-rs 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: name-pod 20 | template: 21 | metadata: 22 | labels: 23 | app: name-pod 24 | spec: 25 | containers: 26 | - name: name 27 | image: arungupta/name-service:latest 28 | ports: 29 | - containerPort: 8080 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: greeter-service 35 | spec: 36 | selector: 37 | app: greeter-pod 38 | ports: 39 | - port: 8080 40 | --- 41 | apiVersion: apps/v1 42 | kind: ReplicaSet 43 | metadata: 44 | name: greeter-rs 45 | spec: 46 | replicas: 1 47 | selector: 48 | matchLabels: 49 | app: greeter-pod 50 | template: 51 | metadata: 52 | labels: 53 | app: greeter-pod 54 | spec: 55 | containers: 56 | - name: name 57 | image: arungupta/greeter-service:latest 58 | ports: 59 | - containerPort: 8080 60 | --- 61 | apiVersion: v1 62 | kind: Service 63 | metadata: 64 | name: webapp-service 65 | spec: 66 | selector: 67 | app: webapp-pod 68 | ports: 69 | - name: web 70 | port: 80 71 | targetPort: 8080 72 | type: LoadBalancer 73 | --- 74 | apiVersion: apps/v1 75 | kind: ReplicaSet 76 | metadata: 77 | name: webapp-rs 78 | spec: 79 | replicas: 1 80 | selector: 81 | matchLabels: 82 | app: webapp-pod 83 | template: 84 | metadata: 85 | labels: 86 | app: webapp-pod 87 | spec: 88 | containers: 89 | - name: webapp-pod 90 | image: arungupta/webapp-service:latest 91 | env: 92 | - name: NAME_SERVICE_HOST 93 | value: name-service 94 | - name: NAME_SERVICE_PORT 95 | value: "8080" 96 | - name: NAME_SERVICE_PATH 97 | value: / 98 | - name: GREETER_SERVICE_HOST 99 | value: greeter-service 100 | - name: GREETER_SERVICE_PORT 101 | value: "8080" 102 | - name: GREETER_SERVICE_PATH 103 | value: / 104 | ports: 105 | - containerPort: 8080 106 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v1/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | 4 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:boron 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | COPY package.json . 8 | # For npm@5 or later, copy package-lock.json as well 9 | # COPY package.json package-lock.json . 10 | 11 | RUN npm install 12 | 13 | # Bundle app source 14 | COPY . . 15 | 16 | EXPOSE 8080 17 | CMD [ "npm", "start" ] 18 | 19 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v1/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "webapp", 3 | "version": "1.0.0", 4 | "description": "Webapp using Node.js", 5 | "author": "Arun Gupta", 6 | "main": "server.js", 7 | "scripts": { 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "express": "^4.13.3" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v1/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const express = require('express'); 4 | 5 | // Constants 6 | const PORT = 8080; 7 | const HOST = '0.0.0.0'; 8 | 9 | // App 10 | const app = express(); 11 | app.get('/', (req, res) => { 12 | res.send("Hello World!"); 13 | }); 14 | 15 | app.listen(PORT, HOST); 16 | console.log(`Running on http://${HOST}:${PORT}`); 17 | 18 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v2/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | 4 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:boron 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | COPY package.json . 8 | # For npm@5 or later, copy package-lock.json as well 9 | # COPY package.json package-lock.json . 10 | 11 | RUN npm install 12 | 13 | # Bundle app source 14 | COPY . . 15 | 16 | EXPOSE 8080 17 | CMD [ "npm", "start" ] 18 | 19 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v2/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "webapp", 3 | "version": "1.0.0", 4 | "description": "Webapp using Node.js", 5 | "author": "Arun Gupta", 6 | "main": "server.js", 7 | "scripts": { 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "express": "^4.13.3" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/app-v2/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const express = require('express'); 4 | 5 | // Constants 6 | const PORT = 8080; 7 | const HOST = '0.0.0.0'; 8 | 9 | // App 10 | const app = express(); 11 | app.get('/', (req, res) => { 12 | res.send("Howdy World!"); 13 | }); 14 | 15 | app.listen(PORT, HOST); 16 | console.log(`Running on http://${HOST}:${PORT}`); 17 | 18 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/images/readme.adoc: -------------------------------------------------------------------------------- 1 | = Docker images for Kubernetes Application Upgrade 2 | 3 | == Docker Container 4 | 5 | . Build Docker images: 6 | 7 | docker image build -t arungupta/app-upgrade:v1 app-v1 8 | docker image build -t arungupta/app-upgrade:v2 app-v2 9 | 10 | . Run Docker container: 11 | 12 | docker container run -d -p 8080:8080 arungupta/app-upgrade:v1 13 | docker container run -d -p 8081:8080 arungupta/app-upgrade:v2 14 | 15 | . Access v1 application: 16 | 17 | curl http://localhost:8080 18 | Hello World! 19 | 20 | . Access v2 application: 21 | 22 | $ curl http://localhost:8081 23 | Howdy World! 24 | 25 | == Kubernetes 26 | 27 | . Run pod: 28 | 29 | kubectl run app-v1 --image=arungupta/app-upgrade:v1 30 | kubectl run app-v2 --image=arungupta/app-upgrade:v2 31 | 32 | . Publish service: 33 | 34 | kubectl expose deployment/app-v1 --target-port=8080 --port=8080 --name=app-v1 35 | kubectl expose deployment/app-v2 --target-port=8080 --port=8081 --name=app-v2 36 | 37 | . Access v1 application (on minikube): 38 | 39 | curl http://localhost:8080 40 | Hello World! 41 | + 42 | For a Kubernetes cluster running on AWS, you need to run proxy and access the application using http://localhost:8001/api/v1/proxy/namespaces/default/services/app-v1/. 43 | + 44 | . Access v2 application: 45 | 46 | curl http://localhost:8081 47 | Howdy World! 48 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/templates/app-recreate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: app-recreate 5 | spec: 6 | replicas: 5 7 | selector: 8 | matchLabels: 9 | name: app-recreate 10 | strategy: 11 | type: Recreate 12 | template: 13 | metadata: 14 | labels: 15 | name: app-recreate 16 | spec: 17 | containers: 18 | - name: app-recreate 19 | image: arungupta/app-upgrade:v1 20 | ports: 21 | - containerPort: 8080 22 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/templates/app-rolling.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: app-rolling 5 | spec: 6 | replicas: 5 7 | selector: 8 | matchLabels: 9 | name: app-rolling 10 | strategy: 11 | type: RollingUpdate 12 | rollingUpdate: 13 | maxSurge: 1 14 | maxUnavailable: 1 15 | template: 16 | metadata: 17 | labels: 18 | name: app-rolling 19 | spec: 20 | containers: 21 | - name: app-rolling 22 | image: arungupta/app-upgrade:v1 23 | ports: 24 | - containerPort: 8080 25 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/templates/app-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: app-service 5 | spec: 6 | selector: 7 | name: app 8 | ports: 9 | - name: app 10 | port: 80 11 | type: LoadBalancer -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/templates/app-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: app-v1 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | name: app 10 | version: v1 11 | template: 12 | metadata: 13 | labels: 14 | name: app 15 | version: v1 16 | spec: 17 | containers: 18 | - name: app 19 | image: arungupta/app-upgrade:v1 20 | ports: 21 | - containerPort: 8080 22 | -------------------------------------------------------------------------------- /03-path-application-development/303-app-update/templates/app-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: app-v2 5 | spec: 6 | replicas: 2 7 | selector: 8 | matchLabels: 9 | name: app 10 | version: v2 11 | template: 12 | metadata: 13 | labels: 14 | name: app 15 | version: v2 16 | spec: 17 | containers: 18 | - name: app 19 | image: arungupta/app-upgrade:v2 20 | ports: 21 | - containerPort: 8080 22 | -------------------------------------------------------------------------------- /03-path-application-development/305-app-scaling-custom-metrics/templates/cluster-agent/cluster-agent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: datadog-cluster-agent 5 | namespace: default 6 | spec: 7 | template: 8 | metadata: 9 | labels: 10 | app: datadog-cluster-agent 11 | name: datadog-agent 12 | spec: 13 | serviceAccountName: datadog-cluster-agent 14 | containers: 15 | - image: datadog/cluster-agent:latest 16 | imagePullPolicy: Always 17 | name: datadog-cluster-agent 18 | env: 19 | - name: DD_API_KEY 20 | value: '' 21 | - name: DD_APP_KEY 22 | value: '' 23 | - name: DD_COLLECT_KUBERNETES_EVENTS 24 | value: "true" 25 | - name: DD_LEADER_ELECTION 26 | value: "true" 27 | - name: DD_CLUSTER_AGENT_AUTH_TOKEN 28 | value: 29 | - name: DD_EXTERNAL_METRICS_PROVIDER_ENABLED 30 | value: 'true' -------------------------------------------------------------------------------- /03-path-application-development/305-app-scaling-custom-metrics/templates/cluster-agent/datadog-cluster-agent_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: datadog-cluster-agent 5 | labels: 6 | app: datadog-cluster-agent 7 | spec: 8 | ports: 9 | - port: 5005 # Has to be the same as the one exposed in the DCA. Default is 5005. 10 | protocol: TCP 11 | selector: 12 | app: datadog-cluster-agent -------------------------------------------------------------------------------- /03-path-application-development/305-app-scaling-custom-metrics/templates/hpa-example/cluster-agent-hpa-svc.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: datadog-custom-metrics-server 5 | spec: 6 | selector: 7 | app: datadog-cluster-agent 8 | ports: 9 | - protocol: TCP 10 | port: 443 11 | targetPort: 443 -------------------------------------------------------------------------------- /03-path-application-development/305-app-scaling-custom-metrics/templates/hpa-example/hpa-manifest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: autoscaling/v2beta1 2 | kind: HorizontalPodAutoscaler 3 | metadata: 4 | name: nginxext 5 | spec: 6 | minReplicas: 1 7 | maxReplicas: 5 8 | scaleTargetRef: 9 | apiVersion: apps/v1 10 | kind: Deployment 11 | name: nginx 12 | metrics: 13 | - type: External 14 | external: 15 | metricName: nginx.net.request_per_s 16 | metricSelector: 17 | matchLabels: 18 | kube_container_name: nginx 19 | targetAverageValue: 50 -------------------------------------------------------------------------------- /03-path-application-development/305-app-scaling-custom-metrics/templates/rbac/rbac-cluster-agent.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: datadog-cluster-agent 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - services 10 | - events 11 | - endpoints 12 | - pods 13 | - nodes 14 | - componentstatuses 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | - apiGroups: 20 | - "autoscaling" 21 | resources: 22 | - horizontalpodautoscalers 23 | verbs: 24 | - list 25 | - watch 26 | - apiGroups: 27 | - "" 28 | resources: 29 | - configmaps 30 | resourceNames: 31 | - datadogtoken # Kubernetes event collection state 32 | - datadog-leader-election # Leader election token 33 | verbs: 34 | - get 35 | - update 36 | - apiGroups: # To create the leader election token 37 | - "" 38 | resources: 39 | - configmaps 40 | verbs: 41 | - create 42 | - get 43 | - update 44 | - nonResourceURLs: 45 | - "/version" 46 | - "/healthz" 47 | verbs: 48 | - get 49 | --- 50 | apiVersion: rbac.authorization.k8s.io/v1 51 | kind: ClusterRoleBinding 52 | metadata: 53 | name: datadog-cluster-agent 54 | roleRef: 55 | apiGroup: rbac.authorization.k8s.io 56 | kind: ClusterRole 57 | name: datadog-cluster-agent 58 | subjects: 59 | - kind: ServiceAccount 60 | name: datadog-cluster-agent 61 | namespace: default 62 | --- 63 | kind: ServiceAccount 64 | apiVersion: v1 65 | metadata: 66 | name: datadog-cluster-agent 67 | namespace: default -------------------------------------------------------------------------------- /03-path-application-development/305-app-scaling-custom-metrics/templates/rbac/rbac-hpa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: system:auth-delegator 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: system:auth-delegator 9 | subjects: 10 | - kind: ServiceAccount 11 | name: dca 12 | namespace: default 13 | --- 14 | apiVersion: rbac.authorization.k8s.io/v1 15 | kind: RoleBinding 16 | metadata: 17 | name: dca 18 | namespace: kube-system 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: Role 22 | name: extension-apiserver-authentication-reader 23 | subjects: 24 | - kind: ServiceAccount 25 | name: dca 26 | namespace: default 27 | --- 28 | apiVersion: apiregistration.k8s.io/v1beta1 29 | kind: APIService 30 | metadata: 31 | name: v1beta1.external.metrics.k8s.io 32 | spec: 33 | insecureSkipTLSVerify: true 34 | group: external.metrics.k8s.io 35 | groupPriorityMinimum: 100 36 | versionPriority: 100 37 | service: 38 | name: datadog-custom-metrics-server 39 | namespace: default 40 | version: v1beta1 41 | --- 42 | apiVersion: rbac.authorization.k8s.io/v1 43 | kind: ClusterRole 44 | metadata: 45 | name: external-metrics-reader 46 | rules: 47 | - apiGroups: 48 | - "external.metrics.k8s.io" 49 | resources: 50 | - "*" 51 | verbs: 52 | - list 53 | - get 54 | - watch 55 | --- 56 | apiVersion: rbac.authorization.k8s.io/v1 57 | kind: ClusterRoleBinding 58 | metadata: 59 | name: external-metrics-reader 60 | roleRef: 61 | apiGroup: rbac.authorization.k8s.io 62 | kind: ClusterRole 63 | name: external-metrics-reader 64 | subjects: 65 | - kind: ServiceAccount 66 | name: horizontal-pod-autoscaler 67 | namespace: kube-system 68 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/bookinfo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/bookinfo.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-console.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-console.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-dag.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-dag.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-spans.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-spans.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-trace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/images/jaeger-trace.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/jaeger/readme.adoc: -------------------------------------------------------------------------------- 1 | = Application tracing using Jaeger 2 | :toc: 3 | :linkcss: 4 | :imagesdir: images 5 | 6 | This section will explain how to use http://jaeger.readthedocs.io/en/latest/[Jaeger] to perform distributed tracing on applications deployed on Kubernetes. 7 | Jaeger, inspired by Dapper and OpenZipkin, is a distributed tracing system released as open source by Uber Technologies. 8 | It can be used for monitoring microservice-based architectures and supports the following features: 9 | 10 | * Distributed context propagation 11 | * Distributed transaction monitoring 12 | * Root cause analysis 13 | * Service dependency analysis 14 | * Performance / latency optimization 15 | 16 | In September 2017, Jaeger became a member of the link:https://www.cncf.io/[CNCF]. 17 | 18 | == Pre-requisites 19 | 20 | A 3 master nodes and 5 worker nodes cluster as explained at link:../cluster-install#multi-master-multi-node-multi-az-gossip-based-cluster[] is used for this chapter. 21 | 22 | We will use the Istio service mesh sample application, so follow these steps to link:../../service-mesh#install-istio[install istio] and link:../../service-mesh#deploying-the-sample-application[deploy the sample application]. 23 | Check that you can see the BookInfo product page in your browser, as seen below. 24 | 25 | image::bookinfo.png[] 26 | 27 | == Deploy Jaeger 28 | 29 | Jaeger can be deployed in two ways: 30 | 31 | * a production setup which uses persistent storage for storing the application traces 32 | * an all-in-one setup which uses in-memory storage. Not suitable for production use 33 | 34 | We will use the all-in-one setup, which will deploy the Jaeger agent, collector and query service as a single pod. 35 | 36 | Deploy the Jaeger all-in-one pod into your cluster: 37 | 38 | kubectl apply -n istio-system -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml 39 | 40 | $ kubectl apply -n istio-system -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml 41 | deployment "jaeger-deployment" created 42 | service "jaeger-query" created 43 | service "jaeger-collector" created 44 | service "jaeger-agent" created 45 | service "zipkin" created 46 | 47 | If all components were installed successfully, you should be able to see the Jaeger console. Use the shortcut below to 48 | open the Jaeger console. This may take a minute or two, first for the Ingress to be created, and secondly for the Ingress 49 | to hook up with the services it exposes. Just keep refreshing the browser until the Jaeger console appears. 50 | 51 | JAEGER_INGRESS=$(kubectl get svc jaeger-query -n istio-system -o jsonpath="{.status.loadBalancer.ingress[0].*}") 52 | open http://$JAEGER_INGRESS 53 | 54 | You should see the Jaeger console. 55 | 56 | image::jaeger-console.png[] 57 | 58 | In the Jaeger console, select 'productpage' from the Services drop down, and click 'Find Traces'. 59 | If there are no Services in the drop down (i.e. you only see a '-'), go to the BookInfo application in your browser and refresh the product page. 60 | This will send an HTTP request to the BookInfo application. The HTTP trace will be captured by Jaeger and be visible in the Jaeger console. You'll need to 61 | refresh the Jaeger console page in order to see the list of Services. 62 | 63 | image::jaeger-spans.png[] 64 | 65 | For each trace (you may only have one), you'll see the total time required by the call, and the number of spans involved (in this case, 11). 66 | In one of the traces, click 'productpage'. You'll see various spans showing the tracing of your request through the BookInfo application, 67 | with the time taken for each call. As the BookInfo application is using Istio for pod-to-pod communication, you can also see the small 68 | overhead Istio introduces into each of the calls. 69 | 70 | image::jaeger-trace.png[] 71 | 72 | Jaeger can also show you the path taken by a request after it enters via your endpoint. Click on 'Dependencies' in the menu, 73 | then the 'DAG' tab. Jaeger will show you the different microservices invoked by your request. 74 | 75 | image::jaeger-dag.png[] 76 | 77 | == Cleanup 78 | $ kubectl delete -n istio-system -f https://raw.githubusercontent.com/jaegertracing/jaeger-kubernetes/master/all-in-one/jaeger-all-in-one-template.yml 79 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/readme.adoc: -------------------------------------------------------------------------------- 1 | = Application tracing 2 | :toc: 3 | :icons: 4 | :linkcss: 5 | :imagesdir: ../../resources/images 6 | 7 | This section will explain how to perform distribute tracing on applications deployed on Kubernetes. 8 | 9 | We will cover the following tracing frameworks: 10 | 11 | * link:x-ray[X-Ray] 12 | * link:jaeger[Jaeger] 13 | 14 | Once you've explored link:x-ray[X-Ray] and link:jaeger[Jaeger], you are ready to continue on with the workshop! 15 | 16 | :frame: none 17 | :grid: none 18 | :valign: top 19 | 20 | [align="center", cols="1", grid="none", frame="none"] 21 | |===== 22 | |image:button-continue-developer.png[link=../../03-path-application-development/306-app-management-with-helm] 23 | |link:../../developer-path.adoc[Go to Developer Index] 24 | |===== 25 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/ec2-iamrole.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/ec2-iamrole.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/flaskxray.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/flaskxray.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/iamrole.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/iamrole.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/xray1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/xray1.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/xraytrace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/images/xraytrace.png -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/greeter/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | 4 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/greeter/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:boron 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | COPY package.json . 8 | # For npm@5 or later, copy package-lock.json as well 9 | # COPY package.json package-lock.json . 10 | 11 | RUN npm install 12 | 13 | # Bundle app source 14 | COPY . . 15 | 16 | EXPOSE 8080 17 | CMD [ "npm", "start" ] 18 | 19 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/greeter/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "greeter", 3 | "version": "1.0.0", 4 | "description": "Greeter service using Node.js", 5 | "author": "Arun Gupta", 6 | "main": "server.js", 7 | "scripts": { 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "aws-sdk": "2.9.0", 12 | "aws-xray-sdk": "1.1.4", 13 | "express": "^4.13.3" 14 | } 15 | } 16 | 17 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/greeter/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | // Include the AWS X-Ray Node.js SDK and set configuration 4 | var XRay = require('aws-xray-sdk'); 5 | var AWS = require('aws-sdk'); 6 | const express = require('express'); 7 | 8 | // Constants 9 | const PORT = 8080; 10 | const HOST = '0.0.0.0'; 11 | 12 | AWS.config.region = process.env.REGION 13 | XRay.config([XRay.plugins.EC2Plugin, XRay.plugins.ECSPlugin]); 14 | 15 | 16 | // App 17 | const app = express(); 18 | app.use(XRay.express.openSegment('greeter-svc')); 19 | 20 | app.get('/*', (req, res) => { 21 | var greet = 'Hello'; 22 | 23 | var seg = XRay.getSegment(); 24 | seg.addAnnotation('greet_req', req.query['greet']); 25 | 26 | console.log('greet[greet]: ' + req.query['greet']); 27 | 28 | if (req.query['greet'] == 'ho') { 29 | greet = 'Howdy'; 30 | } 31 | 32 | res.send(greet); 33 | }); 34 | 35 | app.use(XRay.express.closeSegment()); 36 | app.listen(PORT, HOST); 37 | console.log(`Running on http://${HOST}:${PORT}`); 38 | 39 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/name/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | 4 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/name/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:boron 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | COPY package.json . 8 | # For npm@5 or later, copy package-lock.json as well 9 | # COPY package.json package-lock.json . 10 | 11 | RUN npm install 12 | 13 | # Bundle app source 14 | COPY . . 15 | 16 | EXPOSE 8080 17 | CMD [ "npm", "start" ] 18 | 19 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/name/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "name", 3 | "version": "1.0.0", 4 | "description": "Name service using Node.js", 5 | "author": "Arun Gupta", 6 | "main": "server.js", 7 | "scripts": { 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "aws-sdk": "2.9.0", 12 | "aws-xray-sdk": "1.1.4", 13 | "express": "^4.13.3" 14 | } 15 | } 16 | 17 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/name/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | // Include the AWS X-Ray Node.js SDK and set configuration 4 | var XRay = require('aws-xray-sdk'); 5 | var AWS = XRay.captureAWS(require('aws-sdk')); 6 | const express = require('express'); 7 | 8 | // Constants 9 | const PORT = 8080; 10 | const HOST = '0.0.0.0'; 11 | 12 | AWS.config.region = process.env.REGION 13 | XRay.config([XRay.plugins.EC2Plugin, XRay.plugins.ECSPlugin]); 14 | 15 | // App 16 | const app = express(); 17 | app.use(XRay.express.openSegment('name-svc')); 18 | 19 | 20 | app.get('/*', (req, res) => { 21 | var name = 'Arun'; 22 | 23 | var seg = XRay.getSegment(); 24 | seg.addAnnotation('name_req', req.query['id']); 25 | 26 | console.log('name[id]: ' + req.query['id']); 27 | 28 | if (req.query['id'] == '1') { 29 | name = 'Sheldon'; 30 | } 31 | 32 | res.send(name); 33 | }); 34 | 35 | app.use(XRay.express.closeSegment()); 36 | app.listen(PORT, HOST); 37 | console.log(`Running on http://${HOST}:${PORT}`); 38 | 39 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/webapp/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | 4 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/webapp/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:boron 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | COPY package.json . 8 | # For npm@5 or later, copy package-lock.json as well 9 | # COPY package.json package-lock.json . 10 | 11 | RUN npm install 12 | 13 | # Bundle app source 14 | COPY . . 15 | 16 | EXPOSE 8080 17 | CMD [ "npm", "start" ] 18 | 19 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/webapp/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "webapp", 3 | "version": "1.0.0", 4 | "description": "Webapp using Node.js", 5 | "author": "Arun Gupta", 6 | "main": "server.js", 7 | "scripts": { 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "express": "^4.13.3", 12 | "aws-sdk": "2.9.0", 13 | "aws-xray-sdk": "1.1.4", 14 | "sync-request": "^4.1.0" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/nodejs-microservices/webapp/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | // Include the AWS X-Ray Node.js SDK and set configuration 4 | //ref: https://docs.aws.amazon.com/xray-sdk-for-nodejs/latest/reference/ 5 | const XRay = require('aws-xray-sdk'); 6 | const AWS = XRay.captureAWS(require('aws-sdk')); 7 | const http = XRay.captureHTTPs(require('http')); 8 | const express = require('express'); 9 | // const request = require('sync-request'); 10 | 11 | // Constants 12 | const PORT = 8080; 13 | const HOST = '0.0.0.0'; 14 | AWS.config.region = process.env.REGION 15 | 16 | XRay.config([XRay.plugins.EC2Plugin, XRay.plugins.ECSPlugin]); 17 | //XRay.middleware.setSamplingRules('sampling-rules.json'); 18 | XRay.middleware.enableDynamicNaming('*.elb.amazonaws.com'); 19 | 20 | // App 21 | const app = express(); 22 | app.use(XRay.express.openSegment('webapp')); 23 | 24 | app.get('/', (req, res) => { 25 | let seg = XRay.getSegment(); 26 | seg.addAnnotation('param_greet', req.query['greet']); 27 | seg.addAnnotation('param_id', req.query['id']); 28 | 29 | getContent({hostname:`${process.env.GREETER_SERVICE_HOST}` ,port:process.env.GREETER_SERVICE_PORT, path: `/${process.env.GREETER_SERVICE_PATH}?greet=${req.query['greet']}`}) 30 | .then( function (html){ 31 | console.log (html); 32 | var output1 = html; 33 | // console.log(`output = ${output1}`); 34 | getContent({hostname:`${process.env.NAME_SERVICE_HOST}` ,port:process.env.NAME_SERVICE_PORT, path: `/${process.env.NAME_SERVICE_PATH}?id=${req.query['id']}`}) 35 | .then(function (html2){ 36 | console.log(html2); 37 | var output2 = html2; 38 | // console.log (output1 + output2); 39 | res.send(output1 +' ' + output2); 40 | }) 41 | .catch((err) => console.error(err)); 42 | }) 43 | .catch((err) => console.error(err)); 44 | 45 | }); 46 | 47 | 48 | // -------------------------------------------------------- 49 | // function get(url) { 50 | // return require('sync-request')('GET', url).getBody(); 51 | // } 52 | 53 | function getContent(option) { 54 | // return new pending promise 55 | return new Promise((resolve, reject) => { 56 | const httpreq = http.request(option, (res) => { 57 | // console.log(`STATUS: ${res.statusCode}`); 58 | //console.log(`HEADERS: ${JSON.stringify(res.headers)}`); 59 | res.setEncoding('utf8'); 60 | var data = ''; 61 | res.on('data', (chunk) => { 62 | data += chunk; 63 | }); 64 | res.on('end', () => { 65 | var result = data; 66 | // console.log(`result=${result}`); 67 | resolve(result); 68 | }); 69 | }); 70 | httpreq.on('error', (e) => { 71 | console.error(`problem with request: ${e.message}`); 72 | reject(new Error(`problem with request: ${e.message}`)); 73 | }); 74 | httpreq.end(); 75 | }) 76 | }; 77 | 78 | app.use(XRay.express.closeSegment()); 79 | app.listen(PORT, HOST); 80 | 81 | console.log(`Running on http://${HOST}:${PORT}`); 82 | 83 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/python-flask-app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3-alpine 2 | MAINTAINER Eng-Hwa 3 | 4 | RUN mkdir -p /usr/src/app 5 | WORKDIR /usr/src/app 6 | 7 | COPY requirements.txt /usr/src/app/ 8 | RUN pip install --no-cache-dir -r requirements.txt 9 | 10 | COPY . /usr/src/app 11 | 12 | # Expose the Flask port 13 | EXPOSE 5000 14 | 15 | CMD [ "python", "./app.py" ] 16 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/python-flask-app/app.py: -------------------------------------------------------------------------------- 1 | import os 2 | import socket 3 | import os 4 | from flask import Flask, request 5 | 6 | #https://docs.aws.amazon.com/xray-sdk-for-python/latest/reference/basic.html 7 | # #Lets try to use AWS X-ray for metrics / logging if available to us 8 | try: 9 | from aws_xray_sdk.core import xray_recorder 10 | from aws_xray_sdk.core import patch_all 11 | from aws_xray_sdk.ext.flask.middleware import XRayMiddleware 12 | # xray_recorder.configure(context_missing=LOG_ERROR) 13 | # xray_recorder.configure(sampling=False) 14 | plugins = ('EC2Plugin','ECSPlugin') 15 | xray_recorder.configure(plugins=plugins) 16 | patch_all() 17 | except: 18 | logging.exception('Failed to import X-ray') 19 | 20 | app = Flask(__name__) 21 | 22 | 23 | try: 24 | xray_recorder.configure(service='Flask App') 25 | XRayMiddleware(app, xray_recorder) 26 | except: 27 | logging.exception('Failed to load X-ray') 28 | 29 | 30 | @app.route('/') 31 | def hello(): 32 | return 'Hello Flask!' 33 | 34 | @app.route("/hostname") 35 | def return_hostname(): 36 | return "This is an example wsgi app served from {} to {}".format(socket.gethostname(), request.remote_addr) 37 | 38 | @app.route("/env") 39 | def env(): 40 | return os.environ['AWS_XRAY_DAEMON_ADDRESS'] 41 | 42 | 43 | @app.route('/fib/') 44 | def index(number=1): 45 | result = fib(number) 46 | return "Python Fib("+ str(number) + "): " + str(result) 47 | 48 | @xray_recorder.capture('Fibonnaci') 49 | def fib(n): 50 | if n == 0: 51 | return 0 52 | elif n == 1: 53 | return 1 54 | else: 55 | return fib(n - 1) + fib(n - 2) 56 | 57 | 58 | if __name__ == '__main__': 59 | # Bind to PORT if defined, otherwise default to 5000. 60 | port = int(os.environ.get('PORT', 5000)) 61 | app.run(host='0.0.0.0', port=port) 62 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/python-flask-app/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask 2 | aws-xray-sdk==0.92.2 3 | boto3==1.4.7 4 | botocore==1.7.7 5 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/templates/daemonsetxray.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: xray 5 | spec: 6 | selector: 7 | matchLabels: 8 | tier: monitoring 9 | name: xray 10 | template: 11 | metadata: 12 | labels: 13 | tier: monitoring 14 | name: xray 15 | spec: 16 | containers: 17 | - name: xray 18 | image: .dkr.ecr..amazonaws.com/xraydaemon:v1 19 | ports: 20 | - name: xrayport 21 | containerPort: 2000 22 | hostPort: 2000 23 | protocol: UDP 24 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/templates/flaskyxray-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: flaskxray-deployment 5 | spec: 6 | replicas: 5 7 | selector: 8 | matchLabels: 9 | app: flaskxray-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: flaskxray-pod 14 | spec: 15 | containers: 16 | - name: flaskxray-container 17 | # ref : https://kubernetes.io/docs/concepts/containers/images/#using-aws-ec2-container-registry 18 | image: .dkr.ecr..amazonaws.com/flaskxray:v1 19 | env: 20 | - name: AWS_XRAY_DAEMON_ADDRESS 21 | value: 172.17.0.1:2000 22 | - name: AWS_XRAY_CONTEXT_MISSING 23 | value: LOG_ERROR 24 | imagePullPolicy: IfNotPresent 25 | ports: 26 | - containerPort: 5000 27 | resources: 28 | requests: 29 | memory: "256Mi" 30 | cpu: "250m" 31 | limits: 32 | memory: "512Mi" 33 | cpu: "500m" 34 | readinessProbe: 35 | tcpSocket: 36 | port: 5000 37 | initialDelaySeconds: 30 38 | periodSeconds: 10 39 | livenessProbe: 40 | httpGet: 41 | path: /env 42 | port: 5000 43 | httpHeaders: 44 | - name: X-Custom-Header 45 | value: healthz 46 | initialDelaySeconds: 3 47 | periodSeconds: 15 48 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/templates/flaskyxray-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: flaskxray-service 5 | spec: 6 | selector: 7 | app: flaskxray-pod 8 | ports: 9 | - name: http 10 | protocol: TCP 11 | port: 80 12 | targetPort: 5000 13 | type: LoadBalancer -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/templates/nodejs-microservices.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: name-service 5 | spec: 6 | selector: 7 | app: name-pod 8 | ports: 9 | - port: 8080 10 | --- 11 | apiVersion: apps/v1 12 | kind: Deployment 13 | metadata: 14 | name: name-deployment 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: name-pod 20 | template: 21 | metadata: 22 | labels: 23 | app: name-pod 24 | spec: 25 | containers: 26 | - name: name 27 | image: 28 | env: 29 | - name: AWS_XRAY_DAEMON_ADDRESS 30 | value: 172.17.0.1:2000 31 | - name: AWS_XRAY_CONTEXT_MISSING 32 | value: LOG_ERROR 33 | ports: 34 | - containerPort: 8080 35 | --- 36 | apiVersion: v1 37 | kind: Service 38 | metadata: 39 | name: greeter-service 40 | spec: 41 | selector: 42 | app: greeter-pod 43 | ports: 44 | - port: 8080 45 | --- 46 | apiVersion: apps/v1 47 | kind: Deployment 48 | metadata: 49 | name: greeter-deployment 50 | spec: 51 | replicas: 1 52 | selector: 53 | matchLabels: 54 | app: greeter-pod 55 | template: 56 | metadata: 57 | labels: 58 | app: greeter-pod 59 | spec: 60 | containers: 61 | - name: greeter 62 | image: 63 | env: 64 | - name: AWS_XRAY_DAEMON_ADDRESS 65 | value: 172.17.0.1:2000 66 | - name: AWS_XRAY_CONTEXT_MISSING 67 | value: LOG_ERROR 68 | ports: 69 | - containerPort: 8080 70 | --- 71 | apiVersion: v1 72 | kind: Service 73 | metadata: 74 | name: webapp-service 75 | spec: 76 | selector: 77 | app: webapp-pod 78 | ports: 79 | - name: web 80 | port: 80 81 | targetPort: 8080 82 | type: LoadBalancer 83 | --- 84 | apiVersion: apps/v1 85 | kind: Deployment 86 | metadata: 87 | name: webapp-deployment 88 | spec: 89 | replicas: 1 90 | selector: 91 | matchLabels: 92 | app: webapp-pod 93 | template: 94 | metadata: 95 | labels: 96 | app: webapp-pod 97 | spec: 98 | containers: 99 | - name: webapp-pod 100 | image: 101 | env: 102 | - name: AWS_XRAY_DAEMON_ADDRESS 103 | value: 172.17.0.1:2000 104 | - name: AWS_XRAY_CONTEXT_MISSING 105 | value: LOG_ERROR 106 | - name: NAME_SERVICE_HOST 107 | value: name-service 108 | - name: NAME_SERVICE_PORT 109 | value: "8080" 110 | - name: NAME_SERVICE_PATH 111 | value: / 112 | - name: GREETER_SERVICE_HOST 113 | value: greeter-service 114 | - name: GREETER_SERVICE_PORT 115 | value: "8080" 116 | - name: GREETER_SERVICE_PATH 117 | value: / 118 | ports: 119 | - containerPort: 8080 120 | -------------------------------------------------------------------------------- /03-path-application-development/306-app-tracing-with-jaeger-and-x-ray/x-ray/x-ray-daemon/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | 3 | # Install CA certificates 4 | RUN apt-get update && apt-get install -y --force-yes --no-install-recommends apt-transport-https curl ca-certificates wget && apt-get clean && apt-get autoremove && rm -rf /var/lib/apt/lists/* 5 | 6 | RUN wget https://s3.dualstack.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-2.x.deb 7 | 8 | RUN dpkg -i aws-xray-daemon-2.x.deb 9 | 10 | # Run the X-Ray daemon 11 | CMD ["/usr/bin/xray", "--bind=0.0.0.0:2000"] -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: sample 2 | version: 1.0.0 3 | description: My first Helm chart 4 | keywords: 5 | - java 6 | - javaee 7 | - mysql 8 | - wildfly 9 | - wildfly swarm 10 | home: https://github.com/aws-samples/aws-workshop-for-kubernetes 11 | sources: 12 | - https://github.com/aws-samples/aws-workshop-for-kubernetes 13 | maintainers: 14 | - name: Arun Gupta 15 | email: arun.gupta@gmail.com 16 | -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/README.md: -------------------------------------------------------------------------------- 1 | # First Helm Chart 2 | 3 | This chart has a simple Java EE application that publishes a REST endpoint at `/resources/employees`. The resource returns a list of employees by querying a database. 4 | 5 | The Java EE application is deployed as a JAR built using WildFly Swarm. MySQL is used as the backend database. 6 | 7 | The application also publishes Prometheus-style metrics at `/metrics`. -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/templates/db-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mysql-deployment 5 | spec: 6 | replicas: {{ .Values.db.replicas }} 7 | selector: 8 | matchLabels: 9 | app: mysql-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: mysql-pod 14 | spec: 15 | containers: 16 | - name: mysql 17 | image: {{ .Values.db.image }} 18 | ports: 19 | - containerPort: {{ .Values.db.port }} 20 | env: 21 | - name: MYSQL_DATABASE 22 | value: {{ .Values.db.database }} 23 | - name: MYSQL_USER 24 | value: {{ .Values.db.user }} 25 | - name: MYSQL_PASSWORD 26 | value: {{ .Values.db.password }} 27 | - name: MYSQL_ROOT_PASSWORD 28 | value: {{ .Values.db.root }} 29 | -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/templates/db-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: db 5 | spec: 6 | selector: 7 | app: mysql-pod 8 | ports: 9 | - port: {{ .Values.db.port }} -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/templates/webapp-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: webapp-deployment 5 | spec: 6 | replicas: {{ .Values.webapp.replicas }} 7 | selector: 8 | matchLabels: 9 | app: webapp-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: webapp-pod 14 | spec: 15 | containers: 16 | - name: webapp 17 | image: {{ .Values.webapp.image }} 18 | ports: 19 | - containerPort: {{ .Values.webapp.port }} 20 | -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/templates/webapp-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: webapp 5 | spec: 6 | selector: 7 | app: webapp-pod 8 | ports: 9 | - port: {{ .Values.webapp.port }} -------------------------------------------------------------------------------- /03-path-application-development/307-app-management-with-helm/sample/values.yaml: -------------------------------------------------------------------------------- 1 | webapp: 2 | image: arungupta/docker-javaee:dockerconeu17 3 | port: 8080 4 | replicaCount: 1 5 | 6 | db: 7 | image: mysql:8 8 | database: employees 9 | user: mysql 10 | password: mysql 11 | root: supersecret 12 | port: 3306 13 | replicas: 1 -------------------------------------------------------------------------------- /03-path-application-development/308-statefulsets-and-pvs/templates/mysql-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mysql-config 5 | labels: 6 | app: mysql 7 | data: 8 | master.cnf: | 9 | # Apply this config only on the master. 10 | [mysqld] 11 | log-bin 12 | slave.cnf: | 13 | # Apply this config only on slaves. 14 | [mysqld] 15 | super-read-only 16 | -------------------------------------------------------------------------------- /03-path-application-development/308-statefulsets-and-pvs/templates/mysql-services.yaml: -------------------------------------------------------------------------------- 1 | # Headless service for stable DNS entries of StatefulSet members. 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: mysql 6 | labels: 7 | app: mysql 8 | spec: 9 | ports: 10 | - name: mysql 11 | port: 3306 12 | clusterIP: None 13 | selector: 14 | app: mysql 15 | --- 16 | # Client service for connecting to any MySQL instance for reads. 17 | # For writes, you must instead connect to the master: mysql-0.mysql. 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: mysql-read 22 | labels: 23 | app: mysql 24 | spec: 25 | ports: 26 | - name: mysql 27 | port: 3306 28 | selector: 29 | app: mysql 30 | 31 | -------------------------------------------------------------------------------- /03-path-application-development/309-cicd-workflows/308-1-codesuite/README.adoc: -------------------------------------------------------------------------------- 1 | = Code Services - Continuous Deployment 2 | :toc: 3 | :icons: 4 | :linkcss: 5 | :imagesdir: ../../../resources/images 6 | 7 | = Code services - Continuous Deployment Reference Architecture for Kubernetes 8 | 9 | The Code services Continuous Deployment reference architecture demonstrates how to achieve continuous 10 | deployment of an application to a Kubernetes cluster using AWS CodePipeline, AWS CodeCommit, AWS CodeBuild and AWS Lambda. 11 | 12 | Launching this AWS CloudFormation stack provisions a continuous deployment process that uses AWS CodePipeline 13 | to monitor an AWS CodeCommit repository for new commits, AWS CodeBuild to create a new Docker container image and to push 14 | it into Amazon ECR. Finally an AWS Lambda function with the Kubernetes Python SDK updates a Kubernetes deployment in a live cluster. 15 | 16 | When you deploy the cloudformation stack there will be four parameters that are specific to your Kubernetes cluster. You will need the API endpoint (enter only the subdomain and omit 'api'), Certificate Authority Data, Client Certificate Data and Client Key Data. 17 | The last of these three are sensitive, the cloudformation parameter is marked with the "NoEcho" property set to true so that the contents are not exposed through cloudformation. In addition those strings are encrypted with the account default 18 | KMS key and stored in parameter store. The Lambda function that authenticates to your Kubernetes API endpoint is assigned an IAM role that has permission to access those keys. The Lambda function builds a config file in the tmpfs directory of the Lambda which is in memory 19 | so that when the Lambda function terminates the secrets are gone. 20 | 21 | image::cicd.png[Architecture] 22 | 23 | Head over the https://github.com/aws-samples/aws-kube-codesuite[repo] to deploy this architecture into your own cluster. 24 | -------------------------------------------------------------------------------- /03-path-application-development/309-cicd-workflows/readme.adoc: -------------------------------------------------------------------------------- 1 | = Continuous Integration, Delivery, and Deployment with Kubernetes 2 | :toc: 3 | :icons: 4 | :linkcss: 5 | :imagesdir: ../../resources/images 6 | 7 | There are many different tools for building CI/CD pipelines with the Kubernetes platform. 8 | 9 | Please explore the sections below for more information: 10 | 11 | * link:308-1-code-services/[code services] 12 | 13 | Once you've looked at the options above, you are ready to continue with the workshop! 14 | 15 | :frame: none 16 | :grid: none 17 | :valign: top 18 | 19 | [align="center", cols="1", grid="none", frame="none"] 20 | |===== 21 | |image:button-continue-developer.png[link=../../03-path-application-development/309-deploying-a-chart-repository] 22 | |link:../../developer-path.adoc[Go to Developer Index] 23 | |===== 24 | -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | 23 | -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/Chart.yaml: -------------------------------------------------------------------------------- 1 | name: sample 2 | version: 1.0.0 3 | description: My first Helm chart 4 | keywords: 5 | - java 6 | - javaee 7 | - mysql 8 | - wildfly 9 | - wildfly swarm 10 | home: https://github.com/aws-samples/aws-workshop-for-kubernetes 11 | sources: 12 | - https://github.com/aws-samples/aws-workshop-for-kubernetes 13 | maintainers: 14 | - name: Arun Gupta 15 | email: arun.gupta@gmail.com 16 | -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/README.md: -------------------------------------------------------------------------------- 1 | # First Helm Chart 2 | 3 | This chart has a simple Java EE application that publishes a REST endpoint at `/resources/employees`. The resource returns a list of employees by querying a database. 4 | 5 | The Java EE application is deployed as a JAR built using WildFly Swarm. MySQL is used as the backend database. 6 | 7 | The application also publishes Prometheus-style metrics at `/metrics`. -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/sample-1.0.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/03-path-application-development/310-deploying-a-chart-repository/sample/sample-1.0.0.tgz -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/templates/db-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mysql-deployment 5 | spec: 6 | replicas: {{ .Values.db.replicas }} 7 | selector: 8 | matchLabels: 9 | app: mysql-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: mysql-pod 14 | spec: 15 | containers: 16 | - name: mysql 17 | image: {{ .Values.db.image }} 18 | ports: 19 | - containerPort: {{ .Values.db.port }} 20 | env: 21 | - name: MYSQL_DATABASE 22 | value: {{ .Values.db.database }} 23 | - name: MYSQL_USER 24 | value: {{ .Values.db.user }} 25 | - name: MYSQL_PASSWORD 26 | value: {{ .Values.db.password }} 27 | - name: MYSQL_ROOT_PASSWORD 28 | value: {{ .Values.db.root }} 29 | -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/templates/db-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: db 5 | spec: 6 | selector: 7 | app: mysql-pod 8 | ports: 9 | - port: {{ .Values.db.port }} -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/templates/webapp-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: webapp-deployment 5 | spec: 6 | replicas: {{ .Values.webapp.replicas }} 7 | selector: 8 | matchLabels: 9 | app: webapp-pod 10 | template: 11 | metadata: 12 | labels: 13 | app: webapp-pod 14 | spec: 15 | containers: 16 | - name: webapp 17 | image: {{ .Values.webapp.image }} 18 | ports: 19 | - containerPort: {{ .Values.webapp.port }} 20 | -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/templates/webapp-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: webapp 5 | spec: 6 | selector: 7 | app: webapp-pod 8 | ports: 9 | - port: {{ .Values.webapp.port }} 10 | -------------------------------------------------------------------------------- /03-path-application-development/310-deploying-a-chart-repository/sample/values.yaml: -------------------------------------------------------------------------------- 1 | webapp: 2 | image: arungupta/docker-javaee:dockerconeu17 3 | port: 8080 4 | replicaCount: 1 5 | 6 | db: 7 | image: mysql:8 8 | database: employees 9 | user: mysql 10 | password: mysql 11 | root: supersecret 12 | port: 3306 13 | replicas: 1 -------------------------------------------------------------------------------- /03-path-application-development/311-chaos-engineering/experiments/experiment.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.0.0", 3 | "title": "Terminating the greeting service should not impact users", 4 | "description": "How does the greeting service unavailbility impacts our users? Do they see an error or does the webapp gets slower?", 5 | "tags": [ 6 | "kubernetes", 7 | "aws" 8 | ], 9 | "configuration": { 10 | "web_app_url": { 11 | "type": "env", 12 | "key": "WEBAPP_URL" 13 | } 14 | }, 15 | "steady-state-hypothesis": { 16 | "title": "Services are all available and healthy", 17 | "probes": [ 18 | { 19 | "type": "probe", 20 | "name": "application-should-be-alive-and-healthy", 21 | "tolerance": true, 22 | "provider": { 23 | "type": "python", 24 | "module": "chaosk8s.pod.probes", 25 | "func": "pods_in_phase", 26 | "arguments": { 27 | "label_selector": "app=webapp-pod", 28 | "phase": "Running", 29 | "ns": "default" 30 | } 31 | } 32 | }, 33 | { 34 | "type": "probe", 35 | "name": "application-must-respond-normally", 36 | "tolerance": 200, 37 | "provider": { 38 | "type": "http", 39 | "url": "${web_app_url}", 40 | "timeout": 3 41 | } 42 | } 43 | ] 44 | }, 45 | "method": [ 46 | { 47 | "type": "action", 48 | "name": "terminate-greeting-service", 49 | "provider": { 50 | "type": "python", 51 | "module": "chaosk8s.pod.actions", 52 | "func": "terminate_pods", 53 | "arguments": { 54 | "label_selector": "app=greeter-pod", 55 | "ns": "default" 56 | } 57 | } 58 | }, 59 | { 60 | "type": "probe", 61 | "name": "fetch-application-logs", 62 | "provider": { 63 | "type": "python", 64 | "module": "chaosk8s.pod.probes", 65 | "func": "read_pod_logs", 66 | "arguments": { 67 | "label_selector": "app=webapp-pod", 68 | "last": "20s", 69 | "ns": "default" 70 | } 71 | } 72 | } 73 | ], 74 | "rollbacks": [] 75 | } -------------------------------------------------------------------------------- /03-path-application-development/311-chaos-engineering/templates/app.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: name-service 5 | spec: 6 | selector: 7 | app: name-pod 8 | ports: 9 | - port: 8080 10 | --- 11 | apiVersion: apps/v1 12 | kind: ReplicaSet 13 | metadata: 14 | name: name-rs 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: name-pod 20 | template: 21 | metadata: 22 | labels: 23 | app: name-pod 24 | spec: 25 | containers: 26 | - name: name 27 | image: arungupta/name-service:latest 28 | ports: 29 | - containerPort: 8080 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: greeter-service 35 | spec: 36 | selector: 37 | app: greeter-pod 38 | ports: 39 | - port: 8080 40 | --- 41 | apiVersion: apps/v1 42 | kind: ReplicaSet 43 | metadata: 44 | name: greeter-rs 45 | spec: 46 | replicas: 1 47 | selector: 48 | matchLabels: 49 | app: greeter-pod 50 | template: 51 | metadata: 52 | labels: 53 | app: greeter-pod 54 | spec: 55 | containers: 56 | - name: name 57 | image: arungupta/greeter-service:latest 58 | ports: 59 | - containerPort: 8080 60 | --- 61 | apiVersion: v1 62 | kind: Service 63 | metadata: 64 | name: webapp-service 65 | spec: 66 | selector: 67 | app: webapp-pod 68 | ports: 69 | - name: web 70 | port: 80 71 | targetPort: 8080 72 | type: LoadBalancer 73 | --- 74 | apiVersion: apps/v1 75 | kind: ReplicaSet 76 | metadata: 77 | name: webapp-rs 78 | spec: 79 | replicas: 1 80 | selector: 81 | matchLabels: 82 | app: webapp-pod 83 | template: 84 | metadata: 85 | labels: 86 | app: webapp-pod 87 | spec: 88 | containers: 89 | - name: webapp-pod 90 | image: arungupta/webapp-service:latest 91 | env: 92 | - name: NAME_SERVICE_HOST 93 | value: name-service 94 | - name: NAME_SERVICE_PORT 95 | value: "8080" 96 | - name: NAME_SERVICE_PATH 97 | value: / 98 | - name: GREETER_SERVICE_HOST 99 | value: greeter-service 100 | - name: GREETER_SERVICE_PORT 101 | value: "8080" 102 | - name: GREETER_SERVICE_PATH 103 | value: / 104 | ports: 105 | - containerPort: 8080 106 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/app/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | 4 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:boron 2 | 3 | # Create app directory 4 | WORKDIR /usr/src/app 5 | 6 | # Install app dependencies 7 | COPY package.json . 8 | # For npm@5 or later, copy package-lock.json as well 9 | # COPY package.json package-lock.json . 10 | 11 | RUN npm install 12 | 13 | # Bundle app source 14 | COPY . . 15 | 16 | EXPOSE 8080 17 | CMD [ "npm", "start" ] 18 | 19 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "webapp", 3 | "version": "1.0.0", 4 | "description": "Webapp using Node.js", 5 | "author": "Arun Gupta", 6 | "main": "server.js", 7 | "scripts": { 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "express": "^4.13.3" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/app/readme.adoc: -------------------------------------------------------------------------------- 1 | = Docker image for Kubernetes ConfigMap in a Pod 2 | 3 | == Docker Container 4 | 5 | . Build Docker image: 6 | 7 | docker image build -t arungupta/print-hello . 8 | 9 | . Run Docker container 10 | 11 | docker container run -it -p 8080:8080 -e COUNT=2 arungupta/print-hello 12 | 13 | == Kubernetes Pod with environment variable 14 | 15 | . Run pod: 16 | 17 | kubectl run app --env="COUNT=2" --image=arungupta/print-hello 18 | 19 | == Kubernetes Deployment with ConfigMap 20 | 21 | . Create ConfigMap: 22 | 23 | kubectl create configmap hello-count --from-literal=COUNT=2 24 | 25 | . Create Pod with ConfigMap 26 | 27 | kuebctl create -f templates/app-pod.yaml 28 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/app/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | const express = require('express'); 4 | 5 | // Constants 6 | const PORT = 8080; 7 | const HOST = '0.0.0.0'; 8 | 9 | // App 10 | const app = express(); 11 | app.get('/', (req, res) => { 12 | for (var i = 0; i < process.env.COUNT; i++) { 13 | console.log("Hello world " + i); 14 | } 15 | res.send("printed " + process.env.COUNT + " times"); 16 | }); 17 | 18 | app.listen(PORT, HOST); 19 | console.log(`Running on http://${HOST}:${PORT}`); 20 | 21 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/parameter-store-kubernetes/pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | org.examples.java 5 | parameter-store-kubernetes 6 | jar 7 | 1.0-SNAPSHOT 8 | http://maven.apache.org 9 | 10 | 11 | com.amazonaws 12 | aws-java-sdk-ssm 13 | 1.11.235 14 | 15 | 16 | 17 | 18 | 19 | org.apache.maven.plugins 20 | maven-compiler-plugin 21 | 3.6.1 22 | 23 | 1.8 24 | 1.8 25 | 26 | 27 | 28 | org.apache.maven.plugins 29 | maven-shade-plugin 30 | 3.1.0 31 | 32 | 33 | package 34 | 35 | shade 36 | 37 | 38 | 39 | 40 | 41 | org.codehaus.mojo 42 | exec-maven-plugin 43 | 1.5.0 44 | 45 | 46 | 47 | exec 48 | 49 | 50 | 51 | 52 | org.examples.java.App 53 | 54 | 55 | 56 | org.apache.maven.plugins 57 | maven-jar-plugin 58 | 3.0.2 59 | 60 | 61 | 62 | org.examples.java.App 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | docker 72 | 73 | 74 | 75 | io.fabric8 76 | docker-maven-plugin 77 | 0.22.1 78 | 79 | 80 | 81 | arungupta/${project.name} 82 | 83 | openjdk:latest 84 | 85 | artifact 86 | 87 | java -jar maven/${project.name}-${project.version}.jar 88 | 89 | 90 | 91 | 92 | 93 | 94 | docker:build 95 | package 96 | 97 | build 98 | 99 | 100 | 101 | docker:push 102 | install 103 | 104 | push 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/parameter-store-kubernetes/readme.adoc: -------------------------------------------------------------------------------- 1 | = AWS Parameter Store and Kubernetes 2 | 3 | This application shows how a Java application deployed as a Pod in a Kubernetes cluster can read secrets from AWS Parameter Store. 4 | 5 | . Build Docker image: `mvn package -Pdocker` 6 | . Push Docker image: `docker push arungupta/parameter-store-kubernetes:latest` 7 | 8 | == To be tested 9 | 10 | . Delete pod: `kubectl delete pod/parameter-store-kubernetes` 11 | . Deploy pod: `kubectl apply -f pod.yaml` 12 | . Check pod logs: `kubectl logs pod/parameter-store-kubernetes` 13 | 14 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/parameter-store-kubernetes/src/main/java/org/examples/java/App.java: -------------------------------------------------------------------------------- 1 | package org.examples.java; 2 | 3 | import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagement; 4 | import com.amazonaws.services.simplesystemsmanagement.AWSSimpleSystemsManagementClientBuilder; 5 | import com.amazonaws.services.simplesystemsmanagement.model.GetParameterRequest; 6 | import com.amazonaws.services.simplesystemsmanagement.model.GetParameterResult; 7 | 8 | /** 9 | * Hello world! 10 | * 11 | * @author Arun Gupta 12 | */ 13 | public class App { 14 | public static void main(String[] args) { 15 | System.out.println("parameter store: " 16 | + getSecret("GREETING") 17 | + getSecret("NAME")); 18 | } 19 | 20 | private static String getSecret(String secret) { 21 | AWSSimpleSystemsManagement client= AWSSimpleSystemsManagementClientBuilder.defaultClient(); 22 | GetParameterRequest request= new GetParameterRequest(); 23 | request.setName(secret); 24 | request.setWithDecryption(true); 25 | GetParameterResult result = client.getParameter(request); 26 | return result.getParameter().getValue(); 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/sec_mgr_app/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/sec_mgr_app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:carbon 2 | # Create app directory 3 | WORKDIR /usr/src/app 4 | 5 | # Install app dependencies 6 | # A wildcard is used to ensure both package.json AND package-lock.json are copied 7 | # where available (npm@5+) 8 | COPY package*.json ./ 9 | 10 | RUN npm install 11 | # If you are building your code for production 12 | # RUN npm install --only=production 13 | # Bundle app source 14 | COPY . . 15 | 16 | CMD [ "npm", "start" ] 17 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/sec_mgr_app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "docker_web_app", 3 | "version": "1.0.0", 4 | "description": "Node.js on Docker", 5 | "author": "Paavan Mistry", 6 | "main": "server.js", 7 | "scripts": { 8 | "start": "node server.js" 9 | }, 10 | "dependencies": { 11 | "aws-sdk": "latest", 12 | "dotenv": "^6.0.0", 13 | "npm": "^6.1.0" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/images/sec_mgr_app/server.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | require('dotenv').config(); 4 | 5 | var AWS = require('aws-sdk'), 6 | endpoint = process.env.ENDPOINT, 7 | region = process.env.REGION, 8 | secretName = process.env.SECRETNAME, 9 | secret = "", 10 | binarySecretData = ""; 11 | 12 | 13 | // Constants 14 | var client = new AWS.SecretsManager({ 15 | endpoint: endpoint, 16 | region: region 17 | }); 18 | 19 | 20 | // App 21 | client.getSecretValue({SecretId: secretName}, function(err, data) { 22 | if(err) { 23 | if(err.code === 'ResourceNotFoundException') 24 | console.log("The requested secret " + secretName + " was not found"); 25 | else if(err.code === 'InvalidRequestException') 26 | console.log("The request was invalid due to: " + err.message); 27 | else if(err.code === 'InvalidParameterException') 28 | console.log("The request had invalid params: " + err.message); 29 | } 30 | else { 31 | // Decrypted secret using the associated KMS CMK 32 | // Depending on whether the secret was a string or binary, one of these fields will be populated 33 | if(data.SecretString !== "") { 34 | secret = data.SecretString; 35 | // console.log(secret); 36 | } else { 37 | binarySecretData = data.SecretBinary; 38 | } 39 | } 40 | 41 | // Your code goes here. 42 | console.log(`Secret retrieved from AWS SecretsManager: The Secret ${secretName} has SecretString ${secret}`); 43 | }); 44 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/app-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: app-pod 5 | labels: 6 | name: app-pod 7 | spec: 8 | containers: 9 | - name: app 10 | image: arungupta/print-hello:latest 11 | env: 12 | - name: COUNT 13 | valueFrom: 14 | configMapKeyRef: 15 | name: hello-config 16 | key: COUNT 17 | ports: 18 | - containerPort: 8080 19 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/kube-auth.hcl: -------------------------------------------------------------------------------- 1 | path "secret/creds" { 2 | capabilities = ["read"] 3 | } -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/pod-parameter-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-parameter-store 5 | spec: 6 | containers: 7 | - name: pod-parameter-store 8 | image: arungupta/parameter-store-kubernetes:latest 9 | restartPolicy: Never 10 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/pod-secret-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-secret-env 5 | spec: 6 | containers: 7 | - name: pod-secret-env 8 | image: redis 9 | env: 10 | - name: SECRET_USERNAME 11 | valueFrom: 12 | secretKeyRef: 13 | name: mysecret 14 | key: username 15 | - name: SECRET_PASSWORD 16 | valueFrom: 17 | secretKeyRef: 18 | name: mysecret 19 | key: password 20 | restartPolicy: Never 21 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/pod-secret-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-secret-volume 5 | spec: 6 | containers: 7 | - name: pod-secret-volume 8 | image: redis 9 | volumeMounts: 10 | - name: foo 11 | mountPath: "/etc/foo" 12 | readOnly: true 13 | volumes: 14 | - name: foo 15 | secret: 16 | secretName: mysecret 17 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/pod-secretsmanager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-secretsmanager 5 | annotations: 6 | seccomp.security.alpha.kubernetes.io/pod: docker/default 7 | apparmor.security.beta.kubernetes.io/pod: runtime/default 8 | spec: 9 | securityContext: 10 | runAsUser: 1337 11 | runAsNonRoot: true 12 | containers: 13 | - name: pod-secretsmanager 14 | image: paavanmistry/node-aws-sm-demo:latest 15 | securityContext: 16 | allowPrivilegeEscalation: false 17 | env: 18 | - name: ENDPOINT 19 | value: "https://secretsmanager.us-west-2.amazonaws.com" 20 | - name: REGION 21 | value: "us-west-2" 22 | - name: SECRETNAME 23 | value: "sm-demo-secret" 24 | restartPolicy: Never -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/pod-vault.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: vault-kubernetes 5 | spec: 6 | serviceAccountName: vault-auth 7 | containers: 8 | - name: vault-kubernetes 9 | image: arungupta/vault-kubernetes:latest 10 | env: 11 | - name: VAULT_ADDR 12 | valueFrom: 13 | configMapKeyRef: 14 | name: vault 15 | key: address 16 | restartPolicy: Never 17 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/redis-config: -------------------------------------------------------------------------------- 1 | maxmemory 2mb 2 | maxmemory-policy allkeys-lru 3 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/redis-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: redis-config 5 | labels: 6 | k8s-app: redis 7 | data: 8 | redis-config: | 9 | maxmemory 2mb 10 | maxmemory-policy allkeys-lru 11 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/redis-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: redis-pod 5 | spec: 6 | containers: 7 | - name: redis 8 | image: kubernetes/redis:v1 9 | env: 10 | - name: MASTER 11 | value: "true" 12 | ports: 13 | - containerPort: 6379 14 | resources: 15 | limits: 16 | cpu: "0.1" 17 | volumeMounts: 18 | - mountPath: /redis-master-data 19 | name: data 20 | - mountPath: /redis-master 21 | name: config 22 | volumes: 23 | - name: data 24 | emptyDir: {} 25 | - name: config 26 | configMap: 27 | name: redis-config 28 | items: 29 | - key: redis-config 30 | path: redis.conf 31 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysecret 5 | data: 6 | username: YWRtaW4= 7 | password: cGFzc3dvcmQ= 8 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/vault-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: vault-auth -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/vault-reviewer-rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: role-tokenreview-binding 5 | namespace: default 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: system:auth-delegator 10 | subjects: 11 | - kind: ServiceAccount 12 | name: vault-reviewer 13 | namespace: default 14 | -------------------------------------------------------------------------------- /04-path-security-and-networking/401-configmaps-and-secrets/templates/vault-reviewer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: vault-reviewer -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/Dockerfile-consul-template: -------------------------------------------------------------------------------- 1 | from hashicorp/consul-template:alpine 2 | 3 | RUN apk add --no-cache jq 4 | ADD consul-template-wrapper.sh /consul-template-wrapper.sh 5 | ADD config.ctmpl /config.ctmpl 6 | CMD ["sh", "/consul-template-wrapper.sh"] -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/config.ctmpl: -------------------------------------------------------------------------------- 1 | [default] 2 | {{ with secret "aws/creds/readonly"}} 3 | {{ if .Data.access_key }} 4 | aws_access_key_id = {{ .Data.access_key }} 5 | {{ end }} 6 | {{ if .Data.secret_key }} 7 | aws_secret_access_key = {{ .Data.secret_key }} 8 | {{ end }} 9 | {{ end }} -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/consul-template-wrapper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Write /root/.aws/config 4 | 5 | cat << EOF > /root/.aws/config 6 | [default] 7 | region = us-east-1 8 | EOF 9 | 10 | # Generate Vault token with k8s auth, get dynamic 11 | # AWS creds, and write AWS creds to /root/.aws/credentials 12 | 13 | SERVICE_ACCOUNT_TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token) 14 | 15 | VAULT_TOKEN=$(curl -sb \ 16 | --request POST \ 17 | --data "{\"role\": \"demo\", \"jwt\": \"${SERVICE_ACCOUNT_TOKEN}\"}" \ 18 | "${VAULT_ADDR}/v1/auth/kubernetes/login" | jq -r '.auth .client_token') 19 | 20 | /bin/consul-template \ 21 | --vault-token=$VAULT_TOKEN \ 22 | --vault-addr=$VAULT_ADDR \ 23 | --vault-renew-token=false \ 24 | -template "/config.ctmpl:/root/.aws/credentials" 25 | Add Comment Collapse -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/deployment-with-vault.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: vault-sidecar 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: vault-sidecar 11 | spec: 12 | serviceAccountName: vault-auth 13 | containers: 14 | - name: aws-cli 15 | image: cgswong/aws:aws 16 | command: 17 | - "sleep" 18 | - "9999999" 19 | imagePullPolicy: IfNotPresent 20 | volumeMounts: 21 | - name: app-secrets 22 | mountPath: "/root/.aws/" 23 | - name: consul-template 24 | image: "arungupta/vault-sidecar:0.0.1" 25 | imagePullPolicy: IfNotPresent 26 | env: 27 | - name: VAULT_ADDR 28 | value: "http://:8200" 29 | volumeMounts: 30 | - name: app-secrets 31 | mountPath: "/root/.aws/" 32 | volumes: 33 | - name: app-secrets 34 | emptyDir: 35 | medium: "Memory" 36 | -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/kube-auth.hcl: -------------------------------------------------------------------------------- 1 | path "secret/creds" { 2 | capabilities = ["read"] 3 | } 4 | 5 | path "aws/creds/readonly" { 6 | capabilities = ["read"] 7 | } -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/kube2iam-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: kube2iam 5 | labels: 6 | app: kube2iam 7 | spec: 8 | selector: 9 | matchLabels: 10 | name: kube2iam 11 | template: 12 | metadata: 13 | labels: 14 | name: kube2iam 15 | spec: 16 | hostNetwork: true 17 | containers: 18 | - image: jtblin/kube2iam:0.8.1 19 | name: kube2iam 20 | args: 21 | - "--auto-discover-base-arn" 22 | - "--host-interface=cbr0" 23 | - "--host-ip=$(HOST_IP)" 24 | - "--iptables=true" 25 | env: 26 | - name: HOST_IP 27 | valueFrom: 28 | fieldRef: 29 | fieldPath: status.podIP 30 | securityContext: 31 | privileged: true 32 | ports: 33 | - containerPort: 8181 34 | hostPort: 8181 35 | name: http 36 | -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/namespace-role-annotation.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | annotations: 5 | iam.amazonaws.com/allowed-roles: | 6 | ["{{NodeIamRoleARN}}"] 7 | name: default 8 | -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/pod-role-trust-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Sid": "", 6 | "Effect": "Allow", 7 | "Principal": { 8 | "Service": "ec2.amazonaws.com" 9 | }, 10 | "Action": "sts:AssumeRole" 11 | }, 12 | { 13 | "Sid": "", 14 | "Effect": "Allow", 15 | "Principal": { 16 | "AWS": "{{NodeIAMRoleARN}}" 17 | }, 18 | "Action": "sts:AssumeRole" 19 | } 20 | ] 21 | } -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/pod-with-kube2iam.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: aws-cli 5 | labels: 6 | name: aws-cli 7 | annotations: 8 | iam.amazonaws.com/role: MyPodRole 9 | spec: 10 | containers: 11 | - image: cgswong/aws:aws 12 | command: 13 | - "sleep" 14 | - "9999999" 15 | name: aws-cli 16 | -------------------------------------------------------------------------------- /04-path-security-and-networking/402-authentication-and-authorization/templates/pod-with-vault.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: aws-cli 5 | labels: 6 | name: aws-cli 7 | spec: 8 | containers: 9 | - image: cgswong/aws:aws 10 | command: 11 | - "sleep" 12 | - "9999999" 13 | name: aws-cli -------------------------------------------------------------------------------- /04-path-security-and-networking/404-network-policies/templates/allow-network-policy.yaml: -------------------------------------------------------------------------------- 1 | kind: NetworkPolicy 2 | apiVersion: networking.k8s.io/v1 3 | metadata: 4 | name: allow 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: http-echo 9 | ingress: 10 | - from: 11 | - podSelector: 12 | matchLabels: 13 | app: busybox 14 | -------------------------------------------------------------------------------- /04-path-security-and-networking/404-network-policies/templates/deny-all-by-default-network-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: deny-all-by-default 5 | spec: 6 | podSelector: {} 7 | -------------------------------------------------------------------------------- /04-path-security-and-networking/404-network-policies/weavenet/readme.adoc: -------------------------------------------------------------------------------- 1 | = Kubernetes - Enforcing Network Security Policies with Weave Net 2 | :toc: 3 | 4 | https://www.weave.works/docs/net/latest/kubernetes/kube-addon/[Weave Net] provides a program called the Weave NPC (network policy controller). The NPC runs once on every host, and routes the traffic according to the rules set up in the YAML file. Weave Net does this by talking to IPtables which is a feature of Linux. 5 | 6 | At the top level forward chain, a rule is injected that checks whether a WEAVE-NPC policy applies, and if it doesn’t, then the packet is dropped. If is does, and if there is an established connection, then the packet is accepted. Only packets with newly opened connections are checked. Therefore, if the packet is already on an established connection, it is accepted, and the other chains are checked. IPtables rules are updated when the policy’s ‘ipsets’ are changed on every coming and going pod. Weave begins with the source address on the network, which goes over a linux bridge. In the course of traversing that bridge, the connection is checked against the IPtables rules. 7 | 8 | This exercise will walk you through configuring Weave Net and applying a Network Policy. 9 | 10 | == Prerequisites 11 | 12 | In order to perform exercises in this chapter, you’ll need to deploy configurations to a 3 master, 5 worker kops-created cluster created with the following command-line: 13 | 14 | kops create cluster \ 15 | --name example2.cluster.k8s.local \ 16 | --master-count 3 \ 17 | --node-count 5 \ 18 | --zones ${AWS_AVAILABILITY_ZONES} \ 19 | --networking weave \ 20 | --yes 21 | 22 | This command-line implements the `--networking weave` option, which tells the cluster to use Weave Net instead of the default networking provided by kubenet. Note, the name here is `example2.cluster.k8s.local` instead of the usual `example.cluster.k8s.local` name. This is just to make sure, in case, the two clusters can coexist. 23 | 24 | To check the network configuration is using Weave Net, view the cluster configuration using the following command: 25 | 26 | kops edit cluster example2.cluster.k8s.local 27 | 28 | This will show the following fragment under `.spec`: 29 | 30 | networking: 31 | weave: {} 32 | 33 | Quit the edit without making any changes; this step was just to check. 34 | 35 | This chapter also uses some files from the repo; please `cd` into `network-policies/weavenet` to use them. 36 | 37 | === Update Weave Net to 2.1.3 in kops 1.7.x 38 | kops 1.7.x comes with Weave Net 2.0.5 out-of-the-box which does not support the latest network-policy updates for kubernetes 1.7. 39 | In order to make this work, we need to update Weave Net via: 40 | 41 | ``` 42 | $ kubectl apply -f templates/weavenet-update.yaml 43 | clusterrole "weave-net" configured 44 | serviceaccount "weave-net" unchanged 45 | clusterrolebinding "weave-net" configured 46 | role "weave-net" created 47 | rolebinding "weave-net" created 48 | daemonset "weave-net" configured 49 | ``` 50 | 51 | After this, wait for the Weave Net pods to be updated via: 52 | ``` 53 | $ kubectl rollout status ds/weave-net -n kube-system 54 | Waiting for rollout to finish: 0 out of 8 new pods have been updated... 55 | Waiting for rollout to finish: 1 out of 8 new pods have been updated... 56 | [...] 57 | Waiting for rollout to finish: 7 of 8 updated pods are available... 58 | daemon set "weave-net" successfully rolled out 59 | ``` 60 | 61 | -------------------------------------------------------------------------------- /04-path-security-and-networking/404-network-policies/weavenet/templates/weavenet-update.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: weave-net 5 | labels: 6 | name: weave-net 7 | role.kubernetes.io/networking: "1" 8 | rules: 9 | - apiGroups: 10 | - '' 11 | resources: 12 | - pods 13 | - namespaces 14 | - nodes 15 | verbs: 16 | - get 17 | - list 18 | - watch 19 | - apiGroups: 20 | - networking.k8s.io 21 | resources: 22 | - networkpolicies 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | --- 28 | apiVersion: v1 29 | kind: ServiceAccount 30 | metadata: 31 | name: weave-net 32 | labels: 33 | name: weave-net 34 | role.kubernetes.io/networking: "1" 35 | namespace: kube-system 36 | --- 37 | apiVersion: rbac.authorization.k8s.io/v1 38 | kind: ClusterRoleBinding 39 | metadata: 40 | name: weave-net 41 | labels: 42 | name: weave-net 43 | role.kubernetes.io/networking: "1" 44 | roleRef: 45 | kind: ClusterRole 46 | name: weave-net 47 | apiGroup: rbac.authorization.k8s.io 48 | subjects: 49 | - kind: ServiceAccount 50 | name: weave-net 51 | namespace: kube-system 52 | --- 53 | apiVersion: rbac.authorization.k8s.io/v1 54 | kind: Role 55 | metadata: 56 | name: weave-net 57 | namespace: kube-system 58 | labels: 59 | name: weave-net 60 | role.kubernetes.io/networking: "1" 61 | rules: 62 | - apiGroups: 63 | - '' 64 | resources: 65 | - configmaps 66 | resourceNames: 67 | - weave-net 68 | verbs: 69 | - get 70 | - update 71 | - apiGroups: 72 | - '' 73 | resources: 74 | - configmaps 75 | verbs: 76 | - create 77 | --- 78 | apiVersion: rbac.authorization.k8s.io/v1 79 | kind: RoleBinding 80 | metadata: 81 | name: weave-net 82 | namespace: kube-system 83 | labels: 84 | name: weave-net 85 | role.kubernetes.io/networking: "1" 86 | roleRef: 87 | kind: Role 88 | name: weave-net 89 | apiGroup: rbac.authorization.k8s.io 90 | subjects: 91 | - kind: ServiceAccount 92 | name: weave-net 93 | namespace: kube-system 94 | --- 95 | apiVersion: apps/v1 96 | kind: DaemonSet 97 | metadata: 98 | name: weave-net 99 | labels: 100 | name: weave-net 101 | role.kubernetes.io/networking: "1" 102 | namespace: kube-system 103 | spec: 104 | selector: 105 | matchLabels: 106 | name: weave-net 107 | role.kubernetes.io/networking: "1" 108 | template: 109 | metadata: 110 | labels: 111 | name: weave-net 112 | role.kubernetes.io/networking: "1" 113 | spec: 114 | containers: 115 | - name: weave 116 | command: 117 | - /home/weave/launch.sh 118 | env: 119 | - name: HOSTNAME 120 | valueFrom: 121 | fieldRef: 122 | apiVersion: v1 123 | fieldPath: spec.nodeName 124 | - name: IPALLOC_RANGE 125 | value: 100.96.0.0/11 126 | - name: WEAVE_MTU 127 | value: "8912" 128 | image: 'weaveworks/weave-kube:2.1.3' 129 | livenessProbe: 130 | httpGet: 131 | host: 127.0.0.1 132 | path: /status 133 | port: 6784 134 | initialDelaySeconds: 30 135 | resources: 136 | requests: 137 | cpu: 100m 138 | memory: 200Mi 139 | limits: 140 | cpu: 100m 141 | memory: 200Mi 142 | securityContext: 143 | privileged: true 144 | volumeMounts: 145 | - name: weavedb 146 | mountPath: /weavedb 147 | - name: cni-bin 148 | mountPath: /host/opt 149 | - name: cni-bin2 150 | mountPath: /host/home 151 | - name: cni-conf 152 | mountPath: /host/etc 153 | - name: dbus 154 | mountPath: /host/var/lib/dbus 155 | - name: lib-modules 156 | mountPath: /lib/modules 157 | - name: xtables-lock 158 | mountPath: /run/xtables.lock 159 | - name: weave-npc 160 | env: 161 | - name: HOSTNAME 162 | valueFrom: 163 | fieldRef: 164 | apiVersion: v1 165 | fieldPath: spec.nodeName 166 | image: 'weaveworks/weave-npc:2.1.3' 167 | resources: 168 | requests: 169 | cpu: 100m 170 | memory: 200Mi 171 | limits: 172 | cpu: 100m 173 | memory: 200Mi 174 | securityContext: 175 | privileged: true 176 | volumeMounts: 177 | - name: xtables-lock 178 | mountPath: /run/xtables.lock 179 | hostNetwork: true 180 | hostPID: true 181 | restartPolicy: Always 182 | securityContext: 183 | seLinuxOptions: {} 184 | serviceAccountName: weave-net 185 | tolerations: 186 | - effect: NoSchedule 187 | operator: Exists 188 | volumes: 189 | - name: weavedb 190 | hostPath: 191 | path: /var/lib/weave 192 | - name: cni-bin 193 | hostPath: 194 | path: /opt 195 | - name: cni-bin2 196 | hostPath: 197 | path: /home 198 | - name: cni-conf 199 | hostPath: 200 | path: /etc 201 | - name: dbus 202 | hostPath: 203 | path: /var/lib/dbus 204 | - name: lib-modules 205 | hostPath: 206 | path: /lib/modules 207 | - name: xtables-lock 208 | hostPath: 209 | path: /run/xtables.lock 210 | updateStrategy: 211 | type: RollingUpdate 212 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/alb-ingress-controller.yaml: -------------------------------------------------------------------------------- 1 | # Application Load Balancer (ALB) Ingress Controller Deployment Manifest. 2 | # This manifest details sensible defaults for deploying an ALB Ingress Controller. 3 | # GitHub: https://github.com/coreos/alb-ingress-controller 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | labels: 8 | app: alb-ingress-controller 9 | name: alb-ingress-controller 10 | # Namespace the ALB Ingress Controller should run in. Does not impact which 11 | # namespaces it's able to resolve ingress resource for. For limiting ingress 12 | # namespace scope, see --watch-namespace. 13 | namespace: kube-system 14 | spec: 15 | replicas: 1 16 | selector: 17 | matchLabels: 18 | app: alb-ingress-controller 19 | strategy: 20 | rollingUpdate: 21 | maxSurge: 1 22 | maxUnavailable: 1 23 | type: RollingUpdate 24 | template: 25 | metadata: 26 | creationTimestamp: null 27 | labels: 28 | app: alb-ingress-controller 29 | spec: 30 | containers: 31 | - args: 32 | - /server 33 | # Ingress controllers must have a default backend deployment where 34 | # all unknown locations can be routed to. Often this is a 404 page. The 35 | # default backend is not particularly helpful to the ALB Ingress Controller 36 | # but is still required. The default backend and its respective service 37 | # must be running Kubernetes for this controller to start. 38 | - --default-backend-service=kube-system/default-http-backend 39 | # Limit the namespace where this ALB Ingress Controller deployment will 40 | # resolve ingress resources. If left commented, all namespaces are used. 41 | #- --watch-namespace=your-k8s-namespace 42 | # Setting the ingress-class flag below will ensure that only ingress resources with the 43 | # annotation kubernetes.io/ingress.class: "alb" are respected by the controller. You may 44 | # choose any class you'd like for this controller to respect. 45 | #- --ingress-class=alb 46 | env: 47 | # AWS region this ingress controller will operate in. 48 | # List of regions: 49 | # http://docs.aws.amazon.com/general/latest/gr/rande.html#vpc_region 50 | - name: AWS_REGION 51 | value: us-east-1 52 | # Name of your cluster. Used when naming resources created 53 | # by the ALB Ingress Controller, providing distinction between 54 | # clusters. 55 | - name: CLUSTER_NAME 56 | value: cluster.k8s.local 57 | # AWS key id for authenticating with the AWS API. 58 | # This is only here for examples. It's recommended you instead use 59 | # a project like kube2iam for granting access. 60 | #- name: AWS_ACCESS_KEY_ID 61 | #value: KEYVALUE 62 | # AWS key secret for authenticating with the AWS API. 63 | # This is only here for examples. It's recommended you instead use 64 | # a project like kube2iam for granting access. 65 | #- name: AWS_SECRET_ACCESS_KEY 66 | #value: SECRETVALUE 67 | # Enables logging on all outbound requests sent to the AWS API. 68 | # If logging is desired, set to true. 69 | - name: AWS_DEBUG 70 | value: "false" 71 | - name: POD_NAME 72 | valueFrom: 73 | fieldRef: 74 | apiVersion: v1 75 | fieldPath: metadata.name 76 | - name: POD_NAMESPACE 77 | valueFrom: 78 | fieldRef: 79 | apiVersion: v1 80 | fieldPath: metadata.namespace 81 | # Repository location of the ALB Ingress Controller. 82 | image: quay.io/coreos/alb-ingress-controller:1.0-alpha.3 83 | imagePullPolicy: Always 84 | name: server 85 | resources: {} 86 | terminationMessagePath: /dev/termination-log 87 | dnsPolicy: ClusterFirst 88 | restartPolicy: Always 89 | securityContext: {} 90 | terminationGracePeriodSeconds: 30 91 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/alb-ingress-resource.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: "webapp-alb-ingress" 5 | annotations: 6 | alb.ingress.kubernetes.io/scheme: internet-facing 7 | alb.ingress.kubernetes.io/subnets: 'subnet-2e917d01, subnet-eb02a5d4' 8 | labels: 9 | app: webapp-service 10 | spec: 11 | rules: 12 | - http: 13 | paths: 14 | - path: / 15 | backend: 16 | serviceName: "webapp-service" 17 | servicePort: 80 -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/app.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: name-service 5 | spec: 6 | selector: 7 | app: name-pod 8 | ports: 9 | - port: 8080 10 | --- 11 | apiVersion: apps/v1 12 | kind: ReplicaSet 13 | metadata: 14 | name: name-rs 15 | spec: 16 | replicas: 1 17 | selector: 18 | matchLabels: 19 | app: name-pod 20 | template: 21 | metadata: 22 | labels: 23 | app: name-pod 24 | spec: 25 | containers: 26 | - name: name 27 | image: arungupta/name-service:latest 28 | ports: 29 | - containerPort: 8080 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: greeter-service 35 | spec: 36 | selector: 37 | app: greeter-pod 38 | ports: 39 | - port: 8080 40 | --- 41 | apiVersion: apps/v1 42 | kind: ReplicaSet 43 | metadata: 44 | name: greeter-rs 45 | spec: 46 | replicas: 1 47 | selector: 48 | matchLabels: 49 | app: greeter-pod 50 | template: 51 | metadata: 52 | labels: 53 | app: greeter-pod 54 | spec: 55 | containers: 56 | - name: name 57 | image: arungupta/greeter-service:latest 58 | ports: 59 | - containerPort: 8080 60 | --- 61 | apiVersion: v1 62 | kind: Service 63 | metadata: 64 | name: webapp-service 65 | spec: 66 | selector: 67 | app: webapp-pod 68 | ports: 69 | - name: web 70 | port: 80 71 | targetPort: 8080 72 | type: NodePort 73 | --- 74 | apiVersion: apps/v1 75 | kind: ReplicaSet 76 | metadata: 77 | name: webapp-rs 78 | spec: 79 | replicas: 1 80 | selector: 81 | matchLabels: 82 | app: webapp-pod 83 | template: 84 | metadata: 85 | labels: 86 | app: webapp-pod 87 | spec: 88 | containers: 89 | - name: webapp-pod 90 | image: arungupta/webapp-service:latest 91 | env: 92 | - name: NAME_SERVICE_HOST 93 | value: name-service 94 | - name: NAME_SERVICE_PORT 95 | value: "8080" 96 | - name: NAME_SERVICE_PATH 97 | value: / 98 | - name: GREETER_SERVICE_HOST 99 | value: greeter-service 100 | - name: GREETER_SERVICE_PORT 101 | value: "8080" 102 | - name: GREETER_SERVICE_PATH 103 | value: / 104 | ports: 105 | - containerPort: 8080 106 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/kube-aws-ingress-controller-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: kube-ingress-aws-controller 5 | namespace: kube-system 6 | labels: 7 | component: ingress 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | component: ingress 13 | template: 14 | metadata: 15 | labels: 16 | component: ingress 17 | spec: 18 | containers: 19 | - name: kube-ingress-aws-controller 20 | image: registry.opensource.zalan.do/teapot/kube-ingress-aws-controller:latest 21 | env: 22 | - name: AWS_REGION 23 | value: 24 | resources: 25 | limits: 26 | cpu: 200m 27 | memory: 200Mi 28 | requests: 29 | cpu: 50m 30 | memory: 25Mi 31 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/sample-app-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: demo-app-v1 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | application: demo 10 | version: v1 11 | template: 12 | metadata: 13 | labels: 14 | application: demo 15 | version: v1 16 | spec: 17 | containers: 18 | - name: skipper-demo 19 | image: registry.opensource.zalan.do/pathfinder/skipper:latest 20 | args: 21 | - "skipper" 22 | - "-inline-routes" 23 | - "* -> inlineContent(\"

Hello!

\") -> " 24 | ports: 25 | - containerPort: 9090 26 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/sample-app-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: demo-app-v2 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | application: demo 10 | version: v2 11 | template: 12 | metadata: 13 | labels: 14 | application: demo 15 | version: v2 16 | spec: 17 | containers: 18 | - name: skipper-demo 19 | image: registry.opensource.zalan.do/pathfinder/skipper:latest 20 | args: 21 | - "skipper" 22 | - "-inline-routes" 23 | - "* -> inlineContent(\"

Hello AWS!

\") -> " 24 | ports: 25 | - containerPort: 9090 26 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/sample-ing-traffic.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: "demo-traffic-switching" 5 | labels: 6 | application: demo 7 | annotations: 8 | zalando.org/backend-weights: | 9 | {"demo-app-v1": 80, "demo-app-v2": 20} 10 | spec: 11 | rules: 12 | - host: "" 13 | http: 14 | paths: 15 | - backend: 16 | serviceName: "demo-app-v1" 17 | servicePort: 80 18 | - backend: 19 | serviceName: "demo-app-v2" 20 | servicePort: 80 21 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/sample-ing-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: "demo-v1" 5 | labels: 6 | application: demo 7 | spec: 8 | rules: 9 | - host: "" 10 | http: 11 | paths: 12 | - backend: 13 | serviceName: "demo-app-v1" 14 | servicePort: 80 15 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/sample-ing-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: "demo-feature-toggle" 5 | labels: 6 | application: demo 7 | annotations: 8 | zalando.org/skipper-predicate: QueryParam("version", "^v2$") 9 | zalando.org/skipper-filter: ratelimit(2, "1m") 10 | spec: 11 | rules: 12 | - host: "" 13 | http: 14 | paths: 15 | - backend: 16 | serviceName: "demo-app-v1" 17 | servicePort: 80 18 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/sample-svc-v1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: demo-app-v1 5 | labels: 6 | application: demo 7 | version: v1 8 | spec: 9 | type: ClusterIP 10 | ports: 11 | - port: 80 12 | protocol: TCP 13 | targetPort: 9090 14 | name: external 15 | selector: 16 | application: demo 17 | version: v1 18 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/sample-svc-v2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: demo-app-v2 5 | labels: 6 | application: demo 7 | version: v2 8 | spec: 9 | type: ClusterIP 10 | ports: 11 | - port: 80 12 | protocol: TCP 13 | targetPort: 9090 14 | name: external 15 | selector: 16 | application: demo 17 | version: v2 18 | -------------------------------------------------------------------------------- /04-path-security-and-networking/405-ingress-controllers/templates/skipper-ingress-daemonset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: skipper-ingress 5 | namespace: kube-system 6 | labels: 7 | component: ingress 8 | spec: 9 | selector: 10 | matchLabels: 11 | component: ingress 12 | updateStrategy: 13 | type: RollingUpdate 14 | template: 15 | metadata: 16 | name: skipper-ingress 17 | labels: 18 | component: ingress 19 | spec: 20 | hostNetwork: true 21 | containers: 22 | - name: skipper-ingress 23 | image: registry.opensource.zalan.do/pathfinder/skipper:latest 24 | ports: 25 | - name: ingress-port 26 | containerPort: 9999 27 | hostPort: 9999 28 | args: 29 | - "skipper" 30 | - "-kubernetes" 31 | - "-kubernetes-in-cluster" 32 | - "-address=:9999" 33 | - "-proxy-preserve-host" 34 | - "-serve-host-metrics" 35 | - "-enable-ratelimits" 36 | - "-experimental-upgrade" 37 | - "-metrics-exp-decay-sample" 38 | - "-kubernetes-https-redirect=true" 39 | resources: 40 | limits: 41 | cpu: 200m 42 | memory: 200Mi 43 | requests: 44 | cpu: 25m 45 | memory: 25Mi 46 | readinessProbe: 47 | httpGet: 48 | path: /kube-system/healthz 49 | port: 9999 50 | initialDelaySeconds: 5 51 | timeoutSeconds: 5 52 | -------------------------------------------------------------------------------- /04-path-security-and-networking/406-coredns/templates/busybox.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: busybox 5 | namespace: default 6 | spec: 7 | containers: 8 | - image: busybox 9 | command: 10 | - sleep 11 | - "3600" 12 | imagePullPolicy: IfNotPresent 13 | name: busybox 14 | restartPolicy: Always 15 | -------------------------------------------------------------------------------- /04-path-security-and-networking/406-coredns/templates/coredns-kops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | labels: 11 | kubernetes.io/bootstrapping: rbac-defaults 12 | name: system:coredns 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - endpoints 18 | - services 19 | - pods 20 | - namespaces 21 | verbs: 22 | - list 23 | - watch 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | annotations: 29 | rbac.authorization.kubernetes.io/autoupdate: "true" 30 | labels: 31 | kubernetes.io/bootstrapping: rbac-defaults 32 | name: system:coredns 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: system:coredns 37 | subjects: 38 | - kind: ServiceAccount 39 | name: coredns 40 | namespace: kube-system 41 | --- 42 | apiVersion: v1 43 | kind: ConfigMap 44 | metadata: 45 | name: coredns 46 | namespace: kube-system 47 | data: 48 | Corefile: | 49 | .:53 { 50 | errors 51 | log stdout 52 | health 53 | kubernetes cluster.local 100.64.0.0/13 54 | proxy . /etc/resolv.conf 55 | cache 30 56 | } 57 | --- 58 | apiVersion: apps/v1 59 | kind: Deployment 60 | metadata: 61 | name: coredns 62 | namespace: kube-system 63 | labels: 64 | k8s-app: coredns 65 | kubernetes.io/cluster-service: "true" 66 | kubernetes.io/name: "CoreDNS" 67 | spec: 68 | replicas: 2 69 | selector: 70 | matchLabels: 71 | k8s-app: coredns 72 | template: 73 | metadata: 74 | labels: 75 | k8s-app: coredns 76 | annotations: 77 | scheduler.alpha.kubernetes.io/critical-pod: '' 78 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 79 | spec: 80 | serviceAccountName: coredns 81 | containers: 82 | - name: coredns 83 | image: coredns/coredns:latest 84 | imagePullPolicy: Always 85 | args: [ "-conf", "/etc/coredns/Corefile" ] 86 | volumeMounts: 87 | - name: config-volume 88 | mountPath: /etc/coredns 89 | ports: 90 | - containerPort: 53 91 | name: dns 92 | protocol: UDP 93 | - containerPort: 53 94 | name: dns-tcp 95 | protocol: TCP 96 | - containerPort: 9153 97 | name: metrics 98 | protocol: TCP 99 | livenessProbe: 100 | httpGet: 101 | path: /health 102 | port: 8080 103 | scheme: HTTP 104 | initialDelaySeconds: 60 105 | timeoutSeconds: 5 106 | successThreshold: 1 107 | failureThreshold: 5 108 | dnsPolicy: Default 109 | volumes: 110 | - name: config-volume 111 | configMap: 112 | name: coredns 113 | items: 114 | - key: Corefile 115 | path: Corefile 116 | -------------------------------------------------------------------------------- /04-path-security-and-networking/406-coredns/templates/coredns-minikube.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: coredns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | labels: 11 | kubernetes.io/bootstrapping: rbac-defaults 12 | name: system:coredns 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - endpoints 18 | - services 19 | - pods 20 | - namespaces 21 | verbs: 22 | - list 23 | - watch 24 | --- 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | kind: ClusterRoleBinding 27 | metadata: 28 | annotations: 29 | rbac.authorization.kubernetes.io/autoupdate: "true" 30 | labels: 31 | kubernetes.io/bootstrapping: rbac-defaults 32 | name: system:coredns 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: system:coredns 37 | subjects: 38 | - kind: ServiceAccount 39 | name: coredns 40 | namespace: kube-system 41 | --- 42 | apiVersion: v1 43 | kind: ConfigMap 44 | metadata: 45 | name: coredns 46 | namespace: kube-system 47 | data: 48 | Corefile: | 49 | .:53 { 50 | errors 51 | log stdout 52 | health 53 | kubernetes cluster.local 10.0.0.0/24 54 | proxy . /etc/resolv.conf 55 | cache 30 56 | } 57 | --- 58 | apiVersion: apps/v1 59 | kind: Deployment 60 | metadata: 61 | name: coredns 62 | namespace: kube-system 63 | labels: 64 | k8s-app: coredns 65 | kubernetes.io/cluster-service: "true" 66 | kubernetes.io/name: "CoreDNS" 67 | spec: 68 | replicas: 1 69 | selector: 70 | matchLabels: 71 | k8s-app: coredns 72 | template: 73 | metadata: 74 | labels: 75 | k8s-app: coredns 76 | annotations: 77 | scheduler.alpha.kubernetes.io/critical-pod: '' 78 | scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]' 79 | spec: 80 | serviceAccountName: coredns 81 | containers: 82 | - name: coredns 83 | image: coredns/coredns:latest 84 | imagePullPolicy: Always 85 | args: [ "-conf", "/etc/coredns/Corefile" ] 86 | volumeMounts: 87 | - name: config-volume 88 | mountPath: /etc/coredns 89 | ports: 90 | - containerPort: 53 91 | name: dns 92 | protocol: UDP 93 | - containerPort: 53 94 | name: dns-tcp 95 | protocol: TCP 96 | - containerPort: 9153 97 | name: metrics 98 | protocol: TCP 99 | livenessProbe: 100 | httpGet: 101 | path: /health 102 | port: 8080 103 | scheme: HTTP 104 | initialDelaySeconds: 60 105 | timeoutSeconds: 5 106 | successThreshold: 1 107 | failureThreshold: 5 108 | dnsPolicy: Default 109 | volumes: 110 | - name: config-volume 111 | configMap: 112 | name: coredns 113 | items: 114 | - key: Corefile 115 | path: Corefile 116 | -------------------------------------------------------------------------------- /04-path-security-and-networking/406-coredns/templates/coredns-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: coredns 8 | kubernetes.io/cluster-service: "true" 9 | kubernetes.io/name: "CoreDNS" 10 | spec: 11 | selector: 12 | k8s-app: coredns 13 | clusterIP: 100.64.0.10 14 | ports: 15 | - name: dns 16 | port: 53 17 | protocol: UDP 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | - name: metrics 22 | port: 9153 23 | protocol: TCP 24 | -------------------------------------------------------------------------------- /04-path-security-and-networking/406-coredns/templates/kubedns-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-addon: kube-dns.addons.k8s.io 8 | k8s-app: kube-dns 9 | kubernetes.io/cluster-service: "true" 10 | kubernetes.io/name: KubeDNS 11 | spec: 12 | selector: 13 | k8s-app: kube-dns 14 | clusterIP: 100.64.0.10 15 | ports: 16 | - name: dns 17 | port: 53 18 | protocol: UDP 19 | targetPort: 53 20 | - name: dns-tcp 21 | port: 53 22 | protocol: TCP 23 | targetPort: 53 -------------------------------------------------------------------------------- /05-path-next-steps/502-for-further-reading/readme.adoc: -------------------------------------------------------------------------------- 1 | = For Further Reading 2 | :toc: 3 | :icons: 4 | :linkcss: 5 | :imagesdir: ../../resources/images 6 | 7 | Congratulations! You have completed the Kubernetes the AWSome Way! workshop. 8 | We hope you have enjoyed learning more about Kubernetes on AWS. 9 | 10 | Please remember to clean up any resources spun up during this workshop! 11 | 12 | You can find instructions for Workshop Cleanup in relevant Index pages 13 | 14 | == Further Reading 15 | 16 | * link:https://aws.amazon.com/eks[Amazon Elastic Container Service for Kubernetes (EKS)] 17 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.adoc: -------------------------------------------------------------------------------- 1 | == Code of Conduct 2 | This project has adopted the link:https://aws.github.io/code-of-conduct[Amazon Open Source Code of Conduct]. 3 | For more information see the link:https://aws.github.io/code-of-conduct-faq[Code of Conduct FAQ] or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | AWS Workshop for Kubernetes 2 | Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | -------------------------------------------------------------------------------- /developer-path.adoc: -------------------------------------------------------------------------------- 1 | = Kubernetes the AWSome Way! 2 | :icons: 3 | :linkattrs: 4 | :imagesdir: resources/images 5 | 6 | image:kubernetes-aws-smile.png[alt="kubernetes and aws logos", align="left",width=420] 7 | 8 | == Developer Path Index 9 | 10 | This path is suitable for individual developers or development teams interested in deploying, managing, and scaling cloud-native applications on Kubernetes. 11 | 12 | :frame: none 13 | :grid: none 14 | :valign: top 15 | :halign: center 16 | 17 | [cols="1*^",grid="cols",options="header"] 18 | |===== 19 | |anchor:dev[Developer Path]Developer Path 20 | |link:01-path-basics/101-start-here[101: Start Here] 21 | |link:01-path-basics/102-your-first-cluster[102: Create a Kubernetes cluster using kops] 22 | |link:01-path-basics/103-kubernetes-concepts[103: Introduction to the Kubernetes CLI] 23 | |link:03-path-application-development/301-local-development[301: Setting up a Local Development Environment] 24 | |link:03-path-application-development/302-app-discovery[302: Service Discovery for Microservices] 25 | |link:03-path-application-development/303-app-update[303: Updating Applications on Kubernetes] 26 | |link:03-path-application-development/304-app-scaling[304: Scaling Applications on Kubernetes] 27 | |link:03-path-application-development/305-app-tracing-with-jaeger-and-x-ray[305: Tracing Applications with Jaeger and X-Ray] 28 | |link:03-path-application-development/306-app-management-with-helm[306: Manage Applications with Helm] 29 | |link:03-path-application-development/307-statefulsets-and-pvs[307: Store Persistent Data with StatefulSets and PVs] 30 | |link:03-path-application-development/310-chaos-engineering[310: Applying Chaos Engineering] 31 | |link:04-path-security-and-networking/401-configmaps-and-secrets[401: ConfigMaps and Secrets] 32 | |link:04-path-security-and-networking/402-authentication-and-authorization[402: Authentication, Authorization, and Access] 33 | |link:04-path-security-and-networking/403-admission-policy[403: Admission Control for Kubernetes on AWS] 34 | |link:04-path-security-and-networking/404-network-policies[404: Network Policies] 35 | |link:04-path-security-and-networking/405-ingress-controllers[405: Ingress Controllers] 36 | |link:04-path-security-and-networking/406-coredns[406: CoreDNS] 37 | |link:05-path-next-steps/501-k8s-best-practices[501: Best Practices] 38 | |link:05-path-next-steps/502-for-further-reading[502: For Further Reading] 39 | |===== 40 | 41 | == Workshop Cleanup 42 | 43 | Once you have finished with the workshop, please don't forget to spin down your cluster or you will incur additional charges. 44 | (We will also remind you at the end!) 45 | 46 | ==== Delete Kubernetes cluster resources 47 | 48 | In your Cloud9 IDE, check if there are any running kubernetes cluster 49 | 50 | $ kops get cluster 51 | 52 | Delete kubernetes cluster 53 | 54 | $ kops delete cluster example.cluster.k8s.local --yes 55 | 56 | Wait until all resources are deleted by kops 57 | 58 | ==== Delete Cloud9 Envionment 59 | 60 | Go to CloudFormation console, right click template with name 'k8s-workshop' and select 'Delete Stack' 61 | 62 | This should delete all the resources associated with this workshop 63 | 64 | NOTE: You will incur charges as you go through these workshop guides as it will exceed the link:http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/free-tier-limits.html[limits of AWS free tier]. An estimate of charges (<$20/day) can be seen at this link:https://calculator.s3.amazonaws.com/index.html#r=FRA&s=EC2&key=calc-E6DBD6F1-C45D-4827-93F8-D9B18C5994B0[simple monthly calculator] 65 | -------------------------------------------------------------------------------- /operations-path.adoc: -------------------------------------------------------------------------------- 1 | = Kubernetes the AWSome Way! 2 | :icons: 3 | :linkattrs: 4 | :imagesdir: resources/images 5 | 6 | image:kubernetes-aws-smile.png[alt="kubernetes and aws logos", align="left",width=420] 7 | 8 | == Operations Path Index 9 | 10 | This path is suitable for operations teams interested in deploying, managing, and scaling clusters with Kubernetes. 11 | 12 | :frame: none 13 | :grid: none 14 | :valign: top 15 | :halign: center 16 | 17 | [cols="1*^",grid="cols",options="header"] 18 | |===== 19 | |anchor:ops[Operations Path]Operations Path 20 | |link:01-path-basics/101-start-here[101: Start Here] 21 | |link:01-path-basics/102-your-first-cluster[102: Create a Kubernetes cluster using kops] 22 | |link:01-path-basics/103-kubernetes-concepts[103: Introduction to the Kubernetes CLI] 23 | |link:02-path-working-with-clusters/201-cluster-monitoring[201: Monitoring a Kubernetes Cluster] 24 | |link:02-path-working-with-clusters/202-service-mesh[202: Leveraging a Service Mesh] 25 | |link:02-path-working-with-clusters/203-cluster-upgrades[203: Upgrading a Kubernetes Cluster] 26 | |link:02-path-working-with-clusters/204-cluster-logging-with-EFK[204: Logging with an EFK Stack] 27 | |link:02-path-working-with-clusters/205-cluster-autoscaling[205: Autoscaling a Kubernetes Cluster] 28 | |link:02-path-working-with-clusters/206-cloudformation-and-terraform[206: Deploy Kubernetes with Terraform and CloudFormation] 29 | |link:04-path-security-and-networking/401-configmaps-and-secrets[401: ConfigMaps and Secrets] 30 | |link:04-path-security-and-networking/402-authentication-and-authorization[402: Authentication, Authorization, and Access] 31 | |link:04-path-security-and-networking/403-admission-policy[403: Admission Control for Kubernetes on AWS] 32 | |link:04-path-security-and-networking/404-network-policies[404: Network Policies] 33 | |link:04-path-security-and-networking/405-ingress-controllers[405: Ingress Controllers] 34 | |link:04-path-security-and-networking/406-coredns[406: CoreDNS] 35 | |link:05-path-next-steps/501-k8s-best-practices[501: Best Practices] 36 | |link:05-path-next-steps/502-for-further-reading[502: For Further Reading] 37 | |===== 38 | 39 | == Workshop Cleanup 40 | 41 | Once you have finished with the workshop, please don't forget to spin down your cluster or you will incur additional charges. 42 | (We will also remind you at the end!) 43 | 44 | ==== Delete Kubernetes cluster resources 45 | 46 | In your Cloud9 IDE, check if there are any running kubernetes cluster 47 | 48 | $ kops get cluster 49 | 50 | Delete kubernetes cluster 51 | 52 | $ kops delete cluster example.cluster.k8s.local --yes 53 | 54 | Wait until all resources are deleted by kops 55 | 56 | ==== Delete Cloud9 Envionment 57 | 58 | Go to CloudFormation console, right click template with name 'k8s-workshop' and select 'Delete Stack' 59 | 60 | This should delete all the resources associated with this workshop 61 | 62 | NOTE: You will incur charges as you go through these workshop guides as it will exceed the link:http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/free-tier-limits.html[limits of AWS free tier]. An estimate of charges (<$20/day) can be seen at this link:https://calculator.s3.amazonaws.com/index.html#r=FRA&s=EC2&key=calc-E6DBD6F1-C45D-4827-93F8-D9B18C5994B0[simple monthly calculator] 63 | -------------------------------------------------------------------------------- /readme.adoc: -------------------------------------------------------------------------------- 1 | = Kubernetes the AWSome Way! 2 | :icons: 3 | :linkattrs: 4 | :imagesdir: resources/images 5 | 6 | image:stop_sign01.png[align="left",width=120] 7 | 8 | **This content is outdated and is no longer maintained. Please go to https://www.eksworkshop.com/ for newest EKS tutorials!** 9 | 10 | image:kubernetes-aws-smile.png[alt="kubernetes and aws logos", align="left",width=420] 11 | 12 | This is a self-paced workshop designed for Development and Operations teams who would like to leverage Kubernetes on Amazon Web Services (AWS). 13 | 14 | This workshop provides instructions to create, manage, and scale a Kubernetes cluster on AWS, as well as how to deploy applications, scale them, run stateless and stateful containers, perform service discovery between different microservices, and other similar concepts. 15 | 16 | It also shows deep integration with several AWS technologies. 17 | 18 | We recommend at least 2 hours to complete the workshop. 19 | 20 | Click the button below to start! 21 | 22 | image::button-start-standard.png[link=01-path-basics/101-start-here/] 23 | 24 | == Extended Paths 25 | 26 | The workshop also contains extended paths for Developers or Operations Engineers. 27 | The extended paths go into much greater detail regarding features and capabilities of Kubernetes specifically for those Teams. 28 | We recommend at least 4 hours for each of the extended paths. 29 | 30 | Click on one of the two buttons below to start! 31 | 32 | :frame: none 33 | :grid: none 34 | :valign: top 35 | 36 | [align="center", cols="2*", grid="none", frame="none"] 37 | |===== 38 | |image:button-start-developer.png[link=01-path-basics/101-start-here] 39 | |image:button-start-operations.png[link=01-path-basics/101-start-here] 40 | |===== 41 | 42 | 43 | NOTE: You will incur charges as you go through these workshop guides as it will exceed the link:http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/free-tier-limits.html[limits of AWS free tier]. An estimate of charges (<$20/day) can be seen at this link:https://calculator.s3.amazonaws.com/index.html#r=FRA&s=EC2&key=calc-E6DBD6F1-C45D-4827-93F8-D9B18C5994B0[simple monthly calculator] 44 | 45 | === Participation 46 | 47 | You can share this workshop via https://amzn.to/k8s-on-aws. We encourage participation; if you find anything, please submit an issue. However, if you want to help raise the bar, **submit a PR**! 48 | -------------------------------------------------------------------------------- /resources/abstract.adoc: -------------------------------------------------------------------------------- 1 | = Workshop Abstract 2 | 3 | == Title 4 | 5 | Kubernetes the AWSome Way! 6 | 7 | == Abstract 8 | 9 | Kubernetes is a popular cloud-native open-source orchestration platform for container management, scaling and automated deployment. It includes a rich set of features such as service discovery, multi-tenancy, stateful containers, resource usage monitoring, and rolling updates. This workshop will get you started with operating a Kubernetes cluster on AWS. In addition, it also explains how to deploy applications to this cluster. 10 | 11 | Some of the questions that will be covered in this workshop are: 12 | 13 | . How do we create and manage a Kubernetes cluster? 14 | . How an application is mapped to Kubernetes abstractions? 15 | . How does service discovery work between different applications? 16 | . How to scale, generate logs and monitor an application? 17 | . How to create a CI/CD pipeline? 18 | . How do we integrate with tools such as Maven? 19 | . How applications can store configuration data and secrets? 20 | . How to use IAM for authentication and authorization? 21 | 22 | In this code-driven workshop, you will learn how to package, deploy, scale and monitor your application using Kubernetes and the AWS cloud. 23 | 24 | -------------------------------------------------------------------------------- /resources/images/autoscalingdash.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/autoscalingdash.png -------------------------------------------------------------------------------- /resources/images/aws-kms-create-key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/aws-kms-create-key.png -------------------------------------------------------------------------------- /resources/images/aws-kms-key-admins.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/aws-kms-key-admins.png -------------------------------------------------------------------------------- /resources/images/aws-kms-key-usage-perms.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/aws-kms-key-usage-perms.png -------------------------------------------------------------------------------- /resources/images/button-continue-developer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/button-continue-developer.png -------------------------------------------------------------------------------- /resources/images/button-continue-operations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/button-continue-operations.png -------------------------------------------------------------------------------- /resources/images/button-continue-standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/button-continue-standard.png -------------------------------------------------------------------------------- /resources/images/button-start-developer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/button-start-developer.png -------------------------------------------------------------------------------- /resources/images/button-start-operations.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/button-start-operations.png -------------------------------------------------------------------------------- /resources/images/button-start-standard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/button-start-standard.png -------------------------------------------------------------------------------- /resources/images/caching-demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/caching-demo.png -------------------------------------------------------------------------------- /resources/images/cicd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/cicd.png -------------------------------------------------------------------------------- /resources/images/cloud9-development-environment-welcome.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/cloud9-development-environment-welcome.png -------------------------------------------------------------------------------- /resources/images/cloud9-development-welcome-screen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/cloud9-development-welcome-screen.png -------------------------------------------------------------------------------- /resources/images/cloud9-disable-temp-credentials.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/cloud9-disable-temp-credentials.png -------------------------------------------------------------------------------- /resources/images/cloud9-run-script.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/cloud9-run-script.png -------------------------------------------------------------------------------- /resources/images/cloudformation-output-tab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/cloudformation-output-tab.png -------------------------------------------------------------------------------- /resources/images/coffeehouse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/coffeehouse.png -------------------------------------------------------------------------------- /resources/images/container-map.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/container-map.png -------------------------------------------------------------------------------- /resources/images/container-view.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/container-view.png -------------------------------------------------------------------------------- /resources/images/datadog-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/datadog-logo.png -------------------------------------------------------------------------------- /resources/images/datadogdashboards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/datadogdashboards.png -------------------------------------------------------------------------------- /resources/images/deploy-to-aws.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/deploy-to-aws.png -------------------------------------------------------------------------------- /resources/images/full-trace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/full-trace.png -------------------------------------------------------------------------------- /resources/images/go-to-redis-traces.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/go-to-redis-traces.png -------------------------------------------------------------------------------- /resources/images/hostmap.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/hostmap.png -------------------------------------------------------------------------------- /resources/images/infinite-demo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/infinite-demo.png -------------------------------------------------------------------------------- /resources/images/istio-sample-app-product-page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/istio-sample-app-product-page.png -------------------------------------------------------------------------------- /resources/images/istio-trace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/istio-trace.png -------------------------------------------------------------------------------- /resources/images/k8s-services.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/k8s-services.png -------------------------------------------------------------------------------- /resources/images/kubernetes-aws-smile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/kubernetes-aws-smile.png -------------------------------------------------------------------------------- /resources/images/kubernetes-dashboard-default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/kubernetes-dashboard-default.png -------------------------------------------------------------------------------- /resources/images/linkerd-default-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/linkerd-default-dashboard.png -------------------------------------------------------------------------------- /resources/images/linkerd-viz.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/linkerd-viz.png -------------------------------------------------------------------------------- /resources/images/linkerd.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/linkerd.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-cluster.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-overview.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-subscribe-confirmation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-subscribe-confirmation.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-subscribe-filter-created.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-subscribe-filter-created.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-subscribe-iam.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-subscribe-iam.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-subscribe-log-format.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-subscribe-log-format.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-subscribe-start-streaming.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-subscribe-start-streaming.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-es-subscribe.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-es-subscribe.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-fluentd-stream.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-fluentd-stream.png -------------------------------------------------------------------------------- /resources/images/logging-cloudwatch-kibana-default.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logging-cloudwatch-kibana-default.png -------------------------------------------------------------------------------- /resources/images/logmonitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/logmonitor.png -------------------------------------------------------------------------------- /resources/images/minikube-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/minikube-dashboard.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-dashboards-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-dashboards-cluster.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-dashboards-pods.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-dashboards-pods.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-dashboards.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-dashboards.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-1.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-2.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-3.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-capacity-planning.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-capacity-planning.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-cluster-status.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-cluster-status.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-control-plane-status.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-control-plane-status.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-dashboard-home.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-dashboard-home.png -------------------------------------------------------------------------------- /resources/images/monitoring-grafana-prometheus-dashboard-nodes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-grafana-prometheus-dashboard-nodes.png -------------------------------------------------------------------------------- /resources/images/monitoring-nodes-after.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-nodes-after.png -------------------------------------------------------------------------------- /resources/images/monitoring-nodes-before.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-nodes-before.png -------------------------------------------------------------------------------- /resources/images/monitoring-pods-after.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-pods-after.png -------------------------------------------------------------------------------- /resources/images/monitoring-pods-before.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/monitoring-pods-before.png -------------------------------------------------------------------------------- /resources/images/next-step-arrow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/next-step-arrow.png -------------------------------------------------------------------------------- /resources/images/nginx-pod-default-page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/nginx-pod-default-page.png -------------------------------------------------------------------------------- /resources/images/nginx-welcome-page.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/nginx-welcome-page.png -------------------------------------------------------------------------------- /resources/images/redis-apm-monitor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/redis-apm-monitor.png -------------------------------------------------------------------------------- /resources/images/redis-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/redis-dashboard.png -------------------------------------------------------------------------------- /resources/images/redis-logs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/redis-logs.png -------------------------------------------------------------------------------- /resources/images/redis-traces.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/redis-traces.png -------------------------------------------------------------------------------- /resources/images/services.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/services.png -------------------------------------------------------------------------------- /resources/images/stop_sign01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/stop_sign01.png -------------------------------------------------------------------------------- /resources/images/traces.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/traces.png -------------------------------------------------------------------------------- /resources/images/webapp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/images/webapp.png -------------------------------------------------------------------------------- /resources/slides/kubecon-2017-austin.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/slides/kubecon-2017-austin.pptx -------------------------------------------------------------------------------- /resources/slides/slides-old.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/slides/slides-old.pptx -------------------------------------------------------------------------------- /resources/slides/slides-workshop.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-workshop-for-kubernetes/f9ee072711197d70f3b4a27a3d4ee8e089831168/resources/slides/slides-workshop.pptx -------------------------------------------------------------------------------- /resources/workshop-prereqs.adoc: -------------------------------------------------------------------------------- 1 | :linkattrs: 2 | 3 | = Workshop Prereqs 4 | 5 | Each attendee needs an https://aws.amazon.com/resources/create-account/[AWS Account] to get 6 | started with the workshop. We recommend to create an Account at least few days before the event. 7 | Here is the info on creating and activating your AWS Account: https://aws.amazon.com/premiumsupport/knowledge-center/create-and-activate-aws-account/ 8 | 9 | As an additional step, we recommend to go through 101 documentation and create your Cloud9 10 | environment so that you can get started quickly with your learning paths. Here is the info on 11 | creating Cloud9 for this workshop: link:../01-path-basics/101-start-here[101 start here] 12 | 13 | ProTip! If you are already familiar with Kubernetes cluster concepts, follow 102 and link:../01-path-basics/102-your-first-cluster[create your first cluster] 14 | 15 | Note: you will incur charges as you go through the workshop guides because it will exceed the 16 | limits of http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/free-tier-limits.html[AWS free tier]. 17 | An estimate of charges (<$20/day) can be seen at this https://calculator.s3.amazonaws.com/index.html#r=FRA&s=EC2&key=calc-E6DBD6F1-C45D-4827-93F8-D9B18C5994B0[simple monthly calculator] 18 | -------------------------------------------------------------------------------- /standard-path.adoc: -------------------------------------------------------------------------------- 1 | = Kubernetes the AWSome Way! 2 | :icons: 3 | :linkattrs: 4 | :imagesdir: resources/images 5 | 6 | image:kubernetes-aws-smile.png[alt="kubernetes and aws logos", align="left",width=420] 7 | 8 | == Standard Path Index 9 | 10 | :frame: none 11 | :grid: none 12 | :valign: top 13 | :halign: center 14 | 15 | [cols="1*^",grid="cols",options="header"] 16 | |===== 17 | |anchor:standard[Standard Path]Standard Path 18 | |link:01-path-basics/101-start-here[101: Start Here] 19 | |link:01-path-basics/102-your-first-cluster[102: Create a Kubernetes cluster using kops] 20 | |link:01-path-basics/103-kubernetes-concepts[103: Introduction to the Kubernetes CLI] 21 | |link:02-path-working-with-clusters/201-cluster-monitoring[201: Monitoring a Kubernetes Cluster] 22 | |link:02-path-working-with-clusters/202-service-mesh[202: Leveraging a Service Mesh] 23 | |link:02-path-working-with-clusters/203-cluster-upgrades[203: Upgrading a Kubernetes Cluster] 24 | |link:03-path-application-development/302-app-discovery[302: Service Discovery for Microservices] 25 | |link:03-path-application-development/303-app-update[303: Updating Applications on Kubernetes] 26 | |link:03-path-application-development/304-app-scaling[304: Scaling Applications on Kubernetes] 27 | |link:05-path-next-steps/502-for-further-reading[502: For Further Reading] 28 | |===== 29 | 30 | 31 | == Workshop Cleanup 32 | 33 | Once you have finished with the workshop, please don't forget to spin down your cluster or you will incur additional charges. 34 | (We will also remind you at the end!) 35 | 36 | ==== Delete Kubernetes cluster resources 37 | 38 | In your Cloud9 IDE, check if there are any running kubernetes cluster 39 | 40 | $ kops get cluster 41 | 42 | Delete kubernetes cluster 43 | 44 | $ kops delete cluster example.cluster.k8s.local --yes 45 | 46 | Wait until all resources are deleted by kops 47 | 48 | ==== Delete Cloud9 Envionment 49 | 50 | Go to CloudFormation console, right click template with name 'k8s-workshop' and select 'Delete Stack' 51 | 52 | This should delete all the resources associated with this workshop 53 | 54 | NOTE: You will incur charges as you go through these workshop guides as it will exceed the link:http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/free-tier-limits.html[limits of AWS free tier]. An estimate of charges (<$20/day) can be seen at this link:https://calculator.s3.amazonaws.com/index.html#r=FRA&s=EC2&key=calc-E6DBD6F1-C45D-4827-93F8-D9B18C5994B0[simple monthly calculator] 55 | --------------------------------------------------------------------------------