├── .github └── FUNDING.yml ├── Birth_of_Containerization └── README.md ├── CNAME ├── Calico ├── Operator │ ├── Calico-lab-07.md │ ├── Calico-lab-6.md │ ├── Lab-Environment.md │ ├── Prerequisites.md │ ├── Q1.md │ ├── Validating-Installation.md │ ├── calcio-lab-5.md │ ├── calico-install-way.md │ ├── calico-lab-1.md │ ├── calico-lab-2.md │ ├── calico-lab-3.md │ ├── calico-lab-4.md │ ├── install-multipass.md │ └── readme.md └── readme.md ├── Containerd └── Readme.md ├── DCA └── readme.md ├── Docker ├── Dev │ ├── Basic-Concepts.md │ ├── Dependency-conflict.md │ ├── DevopsDocker.md │ ├── README.md │ ├── Run-first-container.md │ ├── Scaling-up.md │ ├── SeamlessUpgrades.md │ └── Various-Products-Need.md ├── Overview │ ├── Docker-Networking.md │ ├── Docker-Stacks.md │ ├── Docker-Volumes.md │ ├── Docker-swarm.md │ ├── Installing_Docker.md │ ├── README.md │ ├── Understanding-DockerFile-Deep-Drive.md │ ├── Understanding_Docker.md │ └── docker-compose.md └── workshop │ └── dockerfile │ ├── add1-demo │ └── Dockerfile │ ├── add2-demo │ ├── Dockerfile │ └── app-folder.zip │ ├── add3-demo │ └── Dockerfile │ ├── arg-demo │ └── Dockerfile │ ├── arg3-demo │ ├── Dockerfile │ └── hello │ ├── copy-demo │ ├── Dockerfile │ ├── file1 │ ├── file2 │ ├── file3 │ ├── folder1 │ │ ├── folderfile1 │ │ └── folderfile2 │ └── special1 │ ├── entrypoint-demo │ └── Dockerfile │ ├── entrypoint2-demo │ └── Dockerfile │ ├── expose-demo │ └── Dockerfile │ ├── from-demo │ ├── Dockerfile │ └── hello │ ├── healthcheck-demo │ └── Dockerfile │ ├── label-demo │ ├── .DS_Store │ ├── Dockerfile │ └── hello │ ├── onbuild-my-base-demo │ └── Dockerfile │ ├── run-demo │ └── Dockerfile │ ├── user-demo │ └── Dockerfile │ ├── volume-demo │ └── Dockerfile │ └── workdir-demo │ └── Dockerfile ├── Gemfile ├── Kubernetes ├── beginner │ ├── Canary-Deployment-Strategy.md │ ├── Deployment-process.md │ ├── History-Deployment-Processes.md │ ├── Monitoring-Health.md │ ├── README.md │ ├── Sequential-Breakdown-of-the-Process.md │ ├── Start-ReplicaSet.md │ ├── The-Schedulers.md │ ├── blue-Green-Release-Strategy.md │ ├── componets-stages-pod-scheduling.md │ ├── creating-service.md │ ├── demo.html │ ├── deploy-releases.md │ ├── deploying-releases.md │ ├── history-of-infra.md │ ├── kubectl.md │ ├── minikube-cluster.md │ ├── minikube-cmd.md │ ├── minikube.md │ ├── multi-container.md │ ├── playing-with-running-pod.md │ ├── pod-declarative-syntax.md │ ├── public │ │ ├── overrides.css │ │ └── styles.css │ ├── run-pods.md │ ├── service-type.md │ ├── start-with-ingress.md │ ├── start-with-pods.md │ ├── updating-deployment.md │ ├── whatk8s.md │ └── zero-downtime-rolling-update.md ├── fundamentals │ ├── Accessing-specific-logs.md │ ├── DaemonSet.md │ ├── Deployments.md │ ├── Introduction-to-kubernetes.md │ ├── Kubernetes-Scheduler.md │ ├── Kubernetes_Configmap.md │ ├── Kubernetes_Secret.md │ ├── Pod.md │ ├── PodSecurityPolicy.md │ ├── README.md │ ├── ReplicaSet.md │ ├── ReplicationController.md │ ├── StatefulSets.md │ ├── etcd-k8s.md │ ├── jobs-cronjobs.md │ ├── minikube_on_mac.md │ ├── play-with-k8s.md │ ├── pod-agent-sidecar-logging.md │ ├── pod-logging-sidecar.md │ ├── pod-logging-volum.md │ ├── pod-logs-counter.md │ ├── pod-security-1.md │ └── security-attri-pod.md ├── k3s │ ├── README.md │ └── civo │ │ └── installing-civo.md └── yml-sample │ ├── README.md │ ├── broken-init-container │ └── init-container.yaml │ ├── broken-liveness │ └── liveness.yaml │ ├── broken-pods │ ├── bad-command.yaml │ ├── default-shell-command.yaml │ ├── failed-command.yaml │ ├── misused-command.yaml │ ├── multi-container-no-command.yaml │ ├── no-command.yaml │ ├── oom-killed.yaml │ ├── private-repo.yaml │ └── too-much-mem.yaml │ ├── broken-readiness │ ├── readiness-broken │ │ └── readiness-broken.yaml │ └── readiness.yaml │ ├── broken-secrets │ └── simple-secret.yaml │ ├── cronjob │ └── simple.yaml │ ├── daemon-set │ └── simple-daemon-set.yaml │ ├── deployments │ └── simple-deployment.yaml │ ├── dns-config │ ├── dns-config.yaml │ └── policy.yaml │ ├── dns-debug │ └── dns-debug.yaml │ ├── headless-service │ └── headless-service.yaml │ ├── ingress │ ├── README.md │ ├── fanout.yaml │ ├── ingress-class.yaml │ ├── ingress.yaml │ ├── nohost.yaml │ ├── rewrite.yaml │ ├── tls.yaml │ └── virtualhosting.yaml │ ├── init-container │ ├── init-container-msg.yaml │ └── init-container.yaml │ ├── jobs │ ├── simple.yaml │ ├── timeout.yaml │ └── timetolive.yaml │ ├── lifecycle │ └── lifecycle.yaml │ ├── liveness │ ├── advanced-liveness.yaml │ └── liveness.yaml │ ├── memory-request │ └── memory-request-limit.yaml │ ├── namespace │ └── namespace.yaml │ ├── network-policy │ ├── README.md │ ├── default-allow-egress.yaml │ ├── default-allow-ingress.yaml │ ├── default-deny-all.yaml │ ├── default-deny-egress.yaml │ ├── default-deny-ingress.yaml │ └── policy.yaml │ ├── persistent-volumes │ └── README.md │ ├── pod-security-policy │ ├── README.md │ ├── privileged.yaml │ ├── psp.yaml │ └── restricted.yaml │ ├── pods │ ├── README.md │ ├── host-aliases.yaml │ ├── imagepullsecret.yaml │ ├── multi-container.yaml │ └── simple.yaml │ ├── privileged │ ├── README.md │ ├── namespace.yaml │ └── simple.yaml │ ├── rbac │ ├── README.md │ ├── cluster-role-binding.yaml │ ├── cluster-role.yaml │ ├── role-binding.yaml │ └── role.yaml │ ├── readiness │ └── readiness.yaml │ ├── resource-quotas │ └── quotas.yaml │ ├── resources │ ├── resource-limit.yaml │ └── resource-request.yaml │ ├── secrets │ └── simple-secret.yaml │ ├── service-topologies │ └── fallback.yaml │ ├── services │ ├── external-ips.yaml │ ├── external-name.yaml │ ├── load-balancer.yaml │ ├── multi-port-service.yaml │ ├── node-port.yaml │ ├── service-and-endpoint.yaml │ └── simple.yaml │ ├── statefulset │ └── simple-stateful-set.yaml │ ├── subdomain │ ├── README.md │ └── simple.yaml │ ├── topology-spread-constraints │ ├── topology-spread-constraints-with-node-affinity.yaml │ └── topology-spread-constraints.yaml │ ├── volumes │ ├── README.md │ ├── configmap.yaml │ ├── emptydir.yaml │ ├── file-or-create.yaml │ ├── hostdir.yaml │ ├── local.yaml │ ├── projected.yaml │ ├── sa-token.yaml │ ├── subpath.yaml │ └── subpathexpr.yaml │ └── webserver │ └── simple.yaml ├── LICENSE ├── LXC └── Linux Containers │ ├── Installing LXC-on-Ubuntu-from-source.md │ ├── Installing-and-Running-LXC-on-Linux-Systems.md │ ├── Limiting-IO-throughput.md │ ├── Limiting-memory-usage.md │ ├── Linux-namespaces–the-foundation-of-LXC.md │ ├── Managing_resources_with_systemd.md │ ├── Mount-namespaces.md │ ├── Network_namespaces.md │ ├── PID-namespace.md │ ├── README.md │ ├── Resource_management_with_cgroups.md │ ├── The-OS-kernel-its-early-limitations.md │ ├── The-case-for-Linux containers.md │ ├── The-cgroup-freezer-subsystem.md │ ├── The-cpu-and-cpuset-subsystems.md │ ├── UTS-namespaces.md │ ├── User_namespaces.md │ ├── Using-userspace-tools-to-manage-cgroups-persist-changes.md │ └── introduction-to-lxc.md ├── Okteto ├── README.md ├── first-pod.md └── intro-okteto.md ├── README.md ├── _config.yml ├── container-security └── readme.md ├── img ├── Cluster-IP.png ├── Container-to-Container.png ├── ContainerLabs-official.png ├── ContainerLabs.png ├── Contributor.png ├── Deploy-po-container.png ├── Deployement-Sequence.png ├── Deployement-containerlabs.png ├── Devops-chain.png ├── DockerAchitecture.png ├── Docker_Daemon.png ├── Ingress-traffic.png ├── NodePort-flow.png ├── Pod-scheduling-sequence.png ├── R-Bridge-Networking.png ├── README.md ├── Recreate-update.png ├── Rolling_update.png ├── UDP.png ├── Virtualization.png ├── WordpressDev.png ├── WordpressDev2.png ├── WordpressDockerReverse.png ├── WordpressDockerServer.png ├── WordpressReverse.png ├── WordpressupdatedContainer.png ├── akmsymlk8s.png ├── bare-metal-old.png ├── blkio1.jpg ├── blue-green.png ├── canary-dep.png ├── cgroup1.jpg ├── configmap-diagram.gif ├── connection.png ├── containerlabs-logo.png ├── containerlabs-replicaset.png ├── containerlabs_banner.jpg ├── containerlabs_logo.png ├── containers-simple.png ├── daemonsetvs.png ├── docker-driver-network.png ├── docker-page-traefik.jpg ├── docker-swarm.png ├── dockerfile-doc.png ├── everyserver.png ├── four-components-of-computer-system.png ├── health-page-traefik.jpg ├── host-gw.png ├── how-its-work-okteto.png ├── image-container.png ├── k8s-Xnet-pod.png ├── k8s-net-pod.png ├── k8s_arch_new.png ├── k8snet.png ├── kube-scheduler-blue.png ├── kube-scheduler-green-table.png ├── kube-scheduler-ocean.png ├── kube-scheduler-tainted.png ├── kube-scheduler-toleration.png ├── kube-scheduler-waiter.png ├── kube-scheduler.png ├── kubernetes-master-node.png ├── load-balancer.png ├── master-node-k8s.png ├── microservice.png ├── minikube-internal.png ├── monolith.png ├── mount-app1.jpg ├── node-cluster.png ├── node-container-scheduler.png ├── ovs1bridge.jpg ├── playwithk8s-login.png ├── pod-log-sidecar.png ├── pod-node.png ├── pod-single-container.png ├── pod-with-failed-container.png ├── pods-k8s.png ├── pwk-start.png ├── rancher_host_net.png ├── replica-cotroller.png ├── replicaset-controller-sequence.png ├── replicaset-service.png ├── replicasetvs.png ├── running-first-container.png ├── traefik-logo.png ├── virtualizationvscontainerlization.png └── worker-node.png ├── monthly-github └── awesome-github.md ├── rancher ├── Networking │ ├── Bridge_Networking.md │ ├── CNI-Intro.md │ ├── Custom-Bridge-Network.md │ ├── How_a_Network_Policy_Works.md │ ├── Interlude_Netfilter_iptables_rules.md │ ├── Introduction_to_Kubernetes_Networking.md │ ├── Introduction_to_Networking_with_Docker.md │ ├── Kubernetes_Service.md │ ├── Network_Policy.md │ ├── Networking-with-Calico.md │ ├── Networking-with-Flannel.md │ ├── README.md │ └── pod_networking.md └── beginner │ └── README.md ├── sponsorship ├── img │ ├── LO_Horizontal_Full Colour.png │ └── list.md └── readme.md └── traefik └── fundamentals ├── README.md ├── taefikv1-Intro.md └── traefikv2_intro.md /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: containerlabs 6 | ko_fi: sangam 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: ['paypal.me/sangambiradar'] 13 | 14 | # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 15 | -------------------------------------------------------------------------------- /Birth_of_Containerization/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Birth Of Containerization ( For Software Industory ) 4 | nav_order: 2 5 | --- 6 | # Birth Of Containerization ( For Software Industory ) 7 | 8 | 9 | The Word Container itself not new when shipping industory is booming its the idea of " Malcom McLean " I will Say He is "Father of Containerisation " and many company starting Shipping The Goods . [Check this Video](https://www.youtube.com/watch?v=Gn7IoT_WSRA&t=89s){: .btn .btn-outline } 10 | 11 | Imagine that you have a company in Europe and you want to transport your products to China by sea. So, you can find a ship with the adequate size and load the Ship with your cargo. 12 | 13 | 14 | 15 | 16 | # How its Similiar To Software Industory ? 17 | 18 | Metaphor: In the software world, the ship is your Server. 19 | 20 | Containers have been around for a very long time ? Did You Know That 21 | 22 | First experimentations 23 | {: .label .label-blue } 24 | 25 | - [IBM VM/370 (1972)](https://en.wikipedia.org/wiki/VM_%28operating_system%29){: .btn .btn-outline } 26 | - [FreeBSD jails (1999-2000)](https://www.freebsd.org/cgi/man.cgi?query=jail&sektion=8&manpath=FreeBSD+4.0-RELEASE){: .btn .btn-outline } 27 | - [Linux VServers (2001)](https://web.archive.org/web/20160411192058/http://www.solucorp.qc.ca/changes.hc?projet=vserver){: .btn .btn-outline } 28 | - [Solaris Containers (2004)](https://en.wikipedia.org/wiki/Solaris_Containers){: .btn .btn-outline } 29 | - [Open VZ (Open Virtuzzo)(2005)](https://www.virtuozzo.com/about/open-source.html){: .btn .btn-outline } 30 | - [Process Containers (2006)](https://www.kernel.org/doc/ols/2007/ols2007v2-pages-45-58.pdf){: .btn .btn-outline } 31 | 32 | Stable experimentations after CGroups and NameSapces features 33 | {: .label .label-blue } 34 | 35 | - [LXC (2008) - First Linux container manager](https://github.com/lxc/lxc){: .btn .btn-outline } 36 | - [ Warden (2011)/ CloudFoundry ](https://github.com/cloudfoundry-attic/warden){: .btn .btn-outline } 37 | - [ LMCTFY (2013)/ Currently Part Of Open Container Foundation](https://github.com/opencontainers/runc/tree/master/libcontainer){: .btn .btn-outline } 38 | - [ Docker(2013) - Currently Docker is the #1 most wanted platform ](https://www.docker.com/){: .btn .btn-outline } 39 | - [The Importance of Container Security Is Revealed 2016](https://blog.aquasec.com/dirty-cow-vulnerability-impact-on-containers){: .btn .btn-outline } 40 | - [Container Tools Become Mature (2017)](){: .btn .btn-outline } 41 | - [Adoption of rkt and Containerd by CNCF](https://www.cncf.io/news/2017/03/30/new-stack-cncf-accepts-dockers-containerd-coreos-rkt-incubation-projects/){: .btn .btn-outline } 42 | - [ Kubernetes Grows Up (2017)](https://blog.aquasec.com/dockercon-eu-2017){: .btn .btn-outline } 43 | - [The Gold Standard (2018)](https://github.com/kubernetes/kubernetes){: .btn .btn-outline } 44 | - [ A Shifting Landscape(2019)](https://landscape.cncf.io/){: .btn .btn-outline } 45 | - [Container Predictions(2020)](https://www.devopsdigest.com/2020-devops-containers-kubernetes-predictions-2){: .btn .btn-outline } 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /CNAME: -------------------------------------------------------------------------------- 1 | www.containerlabs.kubedaily.com -------------------------------------------------------------------------------- /Calico/Operator/Calico-lab-07.md: -------------------------------------------------------------------------------- 1 | to simulate a compromise of the customer pod we will exec into the pod and attempt to access the database directly from there. 2 | 3 | Enter the customer pod 4 | 5 | First we will find the customer pod name and store it in an environment variable to simplify future commands. 6 | 7 | CUSTOMER_POD=$(kubectl get pods -n yaobank -l app=customer -o name) 8 | Note that the CUSTOMER_POD environment variable only exists within your current shell, so if you exit that shell you must set it again in your new shell using the same command as above. 9 | 10 | Now we will exec into the customer pod, and run bash, to give us a command prompt within the pod: 11 | 12 | kubectl exec -it $CUSTOMER_POD -n yaobank -c customer -- /bin/bash 13 | Access the database 14 | 15 | From within the customer pod, we will now attempt to access the database directly, simulating an attack. As the pod is not secured with NetworkPolicy, the attack will succeed and the balance of all users will be returned. 16 | 17 | curl http://database:2379/v2/keys?recursive=true | python -m json.tool 18 | Leaving the customer pod 19 | 20 | To return from the pod back to our original host command line shell, use the exit command: 21 | 22 | exit 23 | -------------------------------------------------------------------------------- /Calico/Operator/Calico-lab-6.md: -------------------------------------------------------------------------------- 1 | you already have the "Yet Another Online Bank" (yaobank) installed, as created in the “Installing the Sample Application” module in Week 1. 2 | 3 | If you don’t already have it installed then go back and do so now. 4 | 5 | For reference, this is architecture of YAOBank: 6 | 7 | webui(customer)---> bussiness logic(summery)----> Data ----> (database) 8 | 9 | If you are not already on host1, you can enter host1 by using the multipass shell command. 10 | ``` 11 | multipass shell host1 12 | ``` 13 | You can validate the yaobank installation by using kubectl get pods. 14 | ``` 15 | kubectl get pods -n yaobank 16 | Example output: 17 | 18 | NAME READY STATUS RESTARTS AGE 19 | database-6c5db58d95-slsph 1/1 Running 0 26m 20 | summary-85c56b76d7-28chk 1/1 Running 0 26m 21 | summary-85c56b76d7-l7rsv 1/1 Running 0 26m 22 | customer-574bd6cc75-5cwnn 1/1 Running 0 26m 23 | ``` 24 | -------------------------------------------------------------------------------- /Calico/Operator/Prerequisites.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 1 4 | 5 | Local Workstation / Laptop 6 | 7 | If you would like to use a local workstation, the following platforms have been validated: 8 | 9 | Linux with Snapcraft: https://snapcraft.io/docs/installing-snapd. 10 | Mac OS X Catalina. 11 | Windows 10. 12 | The minimum hardware requirements for your local workstation are as follows: 13 | 14 | 4 Cores. 15 | 12GB of RAM. 16 | 40GB of free disk/storage on your root drive. 17 | Note: All lab materials will be obtained from the internet. As such, deployment times may vary depending on the speed of your connection. 18 | 19 | Cloud Provider 20 | 21 | If you would like to use a Cloud Provider, you must ensure that the cloud provider and VM chosen support nested virtualization. The following platforms have been validated: 22 | 23 | Microsoft Azure 24 | The minimum hardware requirements for your cloud provider instance are as follows: 25 | 26 | Nested Virtualization. 27 | 4 vCPU. 28 | 12GB of RAM. 29 | 128GB of storage. 30 | Microsoft Azure 31 | 32 | The following image / machine types have been validated to complete the certification program. 33 | 34 | Ubuntu Server 18.04 LTS 35 | D4s_v3 - 4vCPU / 16G. 36 | -------------------------------------------------------------------------------- /Calico/Operator/Q1.md: -------------------------------------------------------------------------------- 1 | 2 | 1. What are the key principles of the Kubernetes networking model? 3 | 4 | Select all that apply: 5 | 6 | 7 | - [x] Every pod gets its own IP address 8 | - [x] Containers within a pod share the pod IP address 9 | - [x] Containers within a pod can communicate freely with each other 10 | - Pods are in the same subnet 11 | - [x] Pods can communicate with each other directly without NAT 12 | - Pods are in an overlay network 13 | - [x] Network isolation is provided by network policy 14 | - Pods can communicate with workloads outside of the cluster without NAT 15 | 16 | 17 | 2. Kubernetes supports network plugins using which APIs? 18 | 19 | Select all that apply: 20 | 21 | - Kubenet 22 | - [x] CNI 23 | - IPAM 24 | - REST 25 | 26 | 27 | 3. Kubernetes Services: 28 | 29 | Select all that apply: 30 | 31 | 32 | - [x] Can be thought of as a virtual load balancer built into the pod network 33 | - [x] Normally use label selectors to define which pods belong to a Service 34 | - [x] Are discoverable by pods through DNS (kube-dns) 35 | - Allow pods to communicate with each other without NAT 36 | - [x] May include external load balancers 37 | 38 | 39 | 40 | 4. Calico can be installed: 41 | 42 | 43 | Select all that apply: 44 | 45 | - [x] As part of hosted Kubernetes platform (e.g. EKS, AKS, GKE, IKS) 46 | - [x] As part of a kubernetes distro or installer (e.g. kops, kubespray, microk8s, etc) 47 | - [x] Using YAML manifests 48 | - [x] Using the Tigera Calico operator 49 | -------------------------------------------------------------------------------- /Calico/Operator/Validating-Installation.md: -------------------------------------------------------------------------------- 1 | 3. Validating the Lab Orchestrator Installation 2 | 3 | After installing multipass and restarting your workstation, the installation can be validated by running the following commands: 4 | ``` 5 | multipass launch -n test1 6 | ``` 7 | Once the command is completed, the output should be the following: 8 | ``` 9 | Launched: test1 10 | ``` 11 | If your virtual machine did not launch, multipass allows you to inspect information about the instance. 12 | ``` 13 | multipass info test1 14 | ``` 15 | The info command for multipass can give information about the test1 instance. 16 | 17 | If the instance is still starting, on some platforms multipass has issues starting the VM itself as part of the launch. We can force-start the instance by using the start command. 18 | ``` 19 | multipass start --all 20 | ``` 21 | If the VM does not show as having an IP address then there may be an incompatibility between multipass and your host network. If your network is slow then sometimes the multipass launch command can timeout and report failure while downloading the VM image, even though the VM creation is still in progress and may yet succeed. 22 | 23 | Once the virtual machine has launched, try to execute a shell for test1. 24 | ``` 25 | multipass shell test1 26 | ``` 27 | A “Message of the Day” should display, with a command prompt at the very bottom: 28 | ``` 29 | 30 | To run a command as administrator (user "root"), use "sudo ". 31 | See "man sudo_root" for details. 32 | ubuntu@test1:~$ 33 | ``` 34 | At this prompt, you can type exit to log-out. 35 | ``` 36 | exit 37 | `` 38 | After the launch is successful of the test1 instance, delete and purge this instance. 39 | ``` 40 | multipass delete test1 41 | multipass purge 42 | ``` 43 | -------------------------------------------------------------------------------- /Calico/Operator/calcio-lab-5.md: -------------------------------------------------------------------------------- 1 | Tearing Down and Recreating Your Lab 2 | 3 | If you get your Lab into a bad state, you might find the quickest way to get you back on track is to tear down and recreate your lab. 4 | 5 | Tear Down 6 | 7 | Start by deleting all instances: 8 | ``` 9 | multipass delete --all 10 | ``` 11 | Instances after they’re deleted are still stored in multipass, but in a deleted state. This is an implementation detail of multipass, this means the instances are gone from the virtualization layer, but still stored in multipass. To fully delete the instances (similar to a trash / recycle bin on desktop platforms), we must purge the instances: 12 | ``` 13 | multipass purge 14 | ``` 15 | -------------------------------------------------------------------------------- /Calico/Operator/calico-install-way.md: -------------------------------------------------------------------------------- 1 | There are many different ways to install Calico. The Calico docs site includes recommended install options across a range of environments, so you don’t need to be an expert to get started. 2 | 3 | Broadly there are 4 different approaches to install. 4 | 5 | Manifest 6 | 7 | This is the most basic method for installing Calico. The Calico docs include a range of manifests for different environments. If you are an advanced user, you can customize the manifests to give you ultimate flexibility and control over your installation. 8 | 9 | Operator 10 | 11 | Calico 3.15 introduces the option to install Calico using an open-source operator, created by Tigera. This offers simplification for installing and configuring Calico without needing to customize manifests. Additionally, the operator allows you to have a uniform, self-healing environment. Using the Tigera operator is highly recommended. 12 | 13 | Managed Kubernetes Services 14 | 15 | Support for Calico is included with many of the most popular managed Kubernetes services (e.g. EKS, AKS, GKE, IKS), either enabled by default, or optionally enabled using the cloud provider’s management consoles or command line tools, depending on the specific managed Kubernetes service. 16 | 17 | Kubernetes Distros and Installers 18 | 19 | Many Kubernetes distros and installers include support for installing Calico. (e.g. kops, kubespray, microk8s, etc). Most of these currently use manifest based installs under the covers. 20 | -------------------------------------------------------------------------------- /Calico/Operator/calico-lab-2.md: -------------------------------------------------------------------------------- 1 | Installing the Sample Application 2 | 3 | If you are not already on host1, you can enter host1 by using the multipass shell command. 4 | ``` 5 | multipass shell host1 6 | 7 | ``` 8 | To install yaobank into your kubernetes cluster, apply the following manifest: 9 | ``` 10 | kubectl apply -f https://raw.githubusercontent.com/tigera/ccol1/main/yaobank.yaml 11 | ``` 12 | -------------------------------------------------------------------------------- /Calico/Operator/calico-lab-3.md: -------------------------------------------------------------------------------- 1 | Verify the Sample Application 2 | 3 | Check the Deployment Status 4 | 5 | To validate that the application has been deployed into your cluster, we will check the rollout status of each of the microservices. 6 | 7 | Check the customer microservice: 8 | ``` 9 | kubectl rollout status -n yaobank deployment/customer 10 | Example output: 11 | 12 | deployment "customer" successfully rolled out 13 | ``` 14 | Check the summary microservice: 15 | ``` 16 | 17 | kubectl rollout status -n yaobank deployment/summary 18 | Example output: 19 | 20 | deployment "summary" successfully rolled out 21 | ``` 22 | Check the database microservice: 23 | ``` 24 | kubectl rollout status -n yaobank deployment/database 25 | Example output: 26 | 27 | deployment "database" successfully rolled out 28 | ``` 29 | Access the Sample Application Web GUI 30 | 31 | Now we can browse to the service using the service’s NodePort. The NodePort exists on every node in the cluster. We’ll use the control node, but you get the exact same behavior connecting to any other node in the cluster. 32 | ``` 33 | curl 198.19.0.1:30180 34 | The resulting output should contain the following balance information: 35 | 36 | 37 |

Welcome to YAO Bank

38 |

Name: Spike Curtis

39 |

Balance: 2389.45

40 |

Log Out >>

41 | 42 | ``` 43 | Congratulations! You're ready to proceed to the next module: Managing Your Lab. 44 | -------------------------------------------------------------------------------- /Calico/Operator/calico-lab-4.md: -------------------------------------------------------------------------------- 1 | Stopping and Resuming your Lab 2 | 3 | he multipass utility offers stop and start functionality for safely freezing and resuming instances. You can use this if desired to free up resources from your lab host machine while you aren’t working on the course. 4 | 5 | If you haven’t already done so, you can exit the host1 instance by using the exit command. 6 | 7 | exit 8 | Stop 9 | 10 | Stopping instances is the recommended way to manage your lab. This allows you to save your progress, and later continue from where you left off. To do this stop all instances with the following command: 11 | ``` 12 | multipass stop --all 13 | ``` 14 | On some platforms, the stop action will crash and not proceed with shutting the instances down. This is an issue with Multipass that we expect to be resolved. 15 | 16 | List 17 | 18 | To confirm the stop has successfully completed, you can view the state of the instances managed by multipass using the list command: 19 | ``` 20 | multipass list 21 | Example output: 22 | 23 | Name State IPv4 Image 24 | control Stopped -- Ubuntu 20.04 LTS 25 | host1 Stopped -- Ubuntu 20.04 LTS 26 | node1 Stopped -- Ubuntu 20.04 LTS 27 | node2 Stopped -- Ubuntu 20.04 LTS 28 | Start 29 | ``` 30 | When you are ready to resume the course, you can simply start the instances from a suspended, or stopped state. The commands to start all instances can be found below. We recommend starting the instances in this order to cause minimum disruption to the Kubernetes control plane. 31 | ``` 32 | multipass start control 33 | multipass start node1 34 | multipass start node2 35 | multipass start host1 36 | ``` 37 | Special Note Regarding Windows VMware Interactions 38 | 39 | On Windows, if you're running an old version of VMware Workstation/Player you may find that VMware can not start your VMs after using multipass due to its use of Hyper-V. This has been fixed in the latest version of VMware Workstation and Windows 10. To workaround this issue, after stopping your multipass VMs, you can switch the Hyper-V feature off using the following command: 40 | 41 | bcdedit /set hypervisorlaunchtype off 42 | After rebooting your computer, VMware should now be able to launch your Virtual Machines once again. To toggle the feature back on again to resume your certification lab you can use the following command: 43 | 44 | bcdedit /set hypervisorlaunchtype auto 45 | After rebooting your computer once again, you can start the instances in multipass as normal and continue the certification program labs. 46 | -------------------------------------------------------------------------------- /Calico/Operator/install-multipass.md: -------------------------------------------------------------------------------- 1 | Installing the Lab Orchestrator 2 | 3 | To begin we will be installing Multipass. Multipass is a utility from Canonical that allows you to create Ubuntu VMs across a range of platforms in a uniform fashion. We recommend using the latest stable version of Multipass (version 1.4.0 at the time of writing). If any difficulty is encountered deploying the labs - please try this version of Multipass and let us know in the #academy slack channel that you encountered issues with a newer version. 4 | 5 | Multipass installation instructions can be found here: https://multipass.run 6 | 7 | Note: If you're running on Windows, you must use the default Hyper-V option. In addition, note that if you are running an old version of VMware Workstation/Player on Windows, you may find that VMware can not start VMs after using multipass due to its use of Hyper-V. If you experience this, please see the workaround instructions later in the "Managing Your Lab" module. 8 | 9 | Restart your workstation after installing Multipass and before installing the lab. 10 | -------------------------------------------------------------------------------- /Calico/Operator/readme.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Calico/readme.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Containerd/Readme.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /DCA/readme.md: -------------------------------------------------------------------------------- 1 | # Docker Certified Associate (DCA) 2 | 3 | ## Question 1: What are the steps to perform to sign images in a way that UCP trusts them? 4 | - A) Approve image on UCP 5 | - B) Initialize trust metadata for the repository 6 | - C) Delegate signing to the keys in your UCP Client Bundlle 7 | - D) Config notory client 8 | -------------------------------------------------------------------------------- /Docker/Dev/Basic-Concepts.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Basic Concepts 4 | parent: Docker For Developer 5 | nav_order: 6 6 | --- 7 | 8 | # Basic Concepts 9 | 10 | There are three concepts I need you to grasp before we begin: containers, images, and registries. 11 | 12 | 13 | ## Containers 14 | 15 | - A container is what we eventually want to run and host in Docker. You can 16 | think of it as an isolated machine, or a virtual machine if you prefer. 17 | 18 | - From a conceptual point of view, a container runs inside the Docker host isolated 19 | from the other containers and even the host OS. It cannot see the other containers, physical storage, or 20 | get incoming connections unless you explicitly state that it can. It contains everything it needs to run: OS, packages, 21 | runtimes, files, environment variables, standard input, and output. 22 | - Your typical Docker server would look like this — a host for many containers: 23 | 24 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/containers-simple.png) 25 | 26 | 27 | The fact that there are two app2 containers in the schema above is normal; this is typically the case when a server hosts a 28 | release and a test version. Which means you could host both versions on the same server. 29 | In fact, each container has its own ID, but let’s keep things simple for now. 30 | 31 | 32 | ## Images 33 | Any container that runs is created from an image. An image describes everything that is needed to 34 | create a container; it is a template for containers. You may create as many containers as needed from a single image. 35 | The whole picture looks like: 36 | 37 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/image-container.png) 38 | 39 | ## Registries 40 | 41 | Images are stored in a registry. In the example above, the app2 42 | image is used to create two containers. Each container lives its own life, and they both share a common root: their image from the registry. 43 | -------------------------------------------------------------------------------- /Docker/Dev/Dependency-conflict.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Solves Dependency Conflicts 4 | parent: Docker For Developer 5 | nav_order: 2 6 | --- 7 | 8 | # Solves Dependency Conflicts 9 | 10 | - A typical web application looks something like the following: 11 | 12 | [A Web Application Model](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/WordpressDev.png) 13 | 14 | - The application is made of files served by an HTTP server (Apache here, but it could be Kestrel, IIS, NGINX, ...), a runtime (PHP 5.6 here) and a development 15 | framework (Wordpress 4.9 here). 16 | 17 | - Without containers, the dependencies and files are all placed together on a server. Since managing these dependencies is time-consuming, similar apps are typically grouped on the same server, 18 | sharing their dependencies: 19 | 20 | ![A model of two web applications sharing the same dependencies](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/WordpressDev2.png) 21 | 22 | - Now suppose you want to upgrade the PHP runtime from version 5.6 to 7.2. However, the version change induces breaking changes in the applications that 23 | therefore need to be updated. You need to update both App 1 and App 2 when proceeding with the upgrade. On a server that may host many apps of this type, 24 | this is going to be a daunting task, and you’ll need to delay the upgrade until all apps are ready. 25 | 26 | - Another similar problem is when you want to host App 3 on the same server, but App 3 uses the Node.JS runtime together with a package that, 27 | when installed, changes a dependency used by the PHP runtime. Conflicts between runtimes happen often, 28 | so you’ve probably faced that problem already. 29 | 30 | - Containers solve this problem since each app will run inside its own container with its own dependencies. Your typical server would look like: 31 | 32 | ![A model of two web applications on a Docker Server](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/WordpressDockerServer.png) 33 | 34 | 35 | Each container encapsulates its own dependencies. Which means you can migrate the PHP runtime from version 5.6 to 7.2 in a container without 36 | it affecting others. Any other container that would use, for instance, Node.JS would not interfere with any of the Wordpress containers. 37 | 38 | -------------------------------------------------------------------------------- /Docker/Dev/DevopsDocker.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: A DevOps Enabler Tool - Docker 4 | parent: Docker For Developer 5 | nav_order: 1 6 | --- 7 | 8 | 9 | 10 | # A DevOps Enabler Tool - Docker 11 | 12 | Docker is an engine that runs containers. As a tool, containers allow you to solve many challenges created in the growing DevOps trend. 13 | 14 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/Devops-chain.png) 15 | 16 | In DevOps, the Dev and Ops teams have conflicting goals: 17 | 18 | | Dev Team Seeks | Ops Team Seeks | 19 | |- |- | 20 | | Frequent deployments and updates | Stability of production apps | 21 | | Easy creation of new resources | Manage infrastructure, not
applications

Manage infrastructure, not
applications | 22 | 23 | 24 | - As an agile developer, I want to frequently publish my applications so that deployment becomes a routine. The rationale behind this is that this agility makes the “go-to production” event a normal, frequent, completely mastered event instead of a dreaded disaster that may awake monsters who hit me one week later. On the other hand, it is the Ops team that has to face the user if anything goes wrong in deployment - so they naturally want stability. 25 | 26 | - Containers make deployment easy. Deploying is as simple as running a new container, routing users to the new one, and trashing the old one. It can even be automated by orchestration tools. Since it’s so easy, we can afford to have many containers serving a single application for increased stability during updates. 27 | 28 | - If you don’t use containers, Ops need to handle your hosting environment: runtimes, libraries, and OS needed by your application. On the other hand, when using containers, they need one single methodology that can handle the containers you provide no matter what’s inside them. You may as well use .NET Core, Java, Node.JS, PHP, Python, or another development tool: it doesn’t matter to them as long as your code is containerized. This is a considerable advantage for containers when it comes to DevOps. 29 | 30 | we’ll see how to create container images for specific development technologies. 31 | 32 | -------------------------------------------------------------------------------- /Docker/Dev/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Docker For Developer 4 | nav_order: 5 5 | has_children: true 6 | permalink: /Docker/Dev/ 7 | --- 8 | -------------------------------------------------------------------------------- /Docker/Dev/Run-first-container.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Running First hello-world Container 4 | parent: Docker For Developer 5 | nav_order: 7 6 | --- 7 | 8 | # Running First hello-world Container 9 | 10 | Run the following command on a command-line: 11 | 12 | ``` 13 | docker run hello-world 14 | 15 | ``` 16 | 17 | output 18 | 19 | 20 | ``` 21 | 22 | $ docker run hello-world 23 | Unable to find image 'hello-world:latest' locally 24 | latest: Pulling from library/hello-world 25 | 0e03bdcc26d7: Pull complete 26 | Digest: sha256:4cf9c47f86df71d48364001ede3a4fcd85ae80ce02ebad74156906caff5378bc 27 | Status: Downloaded newer image for hello-world:latest 28 | 29 | Hello from Docker! 30 | This message shows that your installation appears to be working correctly. 31 | 32 | To generate this message, Docker took the following steps: 33 | 1. The Docker client contacted the Docker daemon. 34 | 2. The Docker daemon pulled the "hello-world" image from the Docker Hub. 35 | (amd64) 36 | 3. The Docker daemon created a new container from that image which runs the 37 | executable that produces the output you are currently reading. 38 | 4. The Docker daemon streamed that output to the Docker client, which sent it 39 | to your terminal. 40 | 41 | To try something more ambitious, you can run an Ubuntu container with: 42 | $ docker run -it ubuntu bash 43 | 44 | Share images, automate workflows, and more with a free Docker ID: 45 | https://hub.docker.com/ 46 | 47 | For more examples and ideas, visit: 48 | https://docs.docker.com/get-started/ 49 | 50 | 51 | ``` 52 | 53 | Congratulations, you just ran your first container! Here’s what just happened in detail: 54 | 1. Your command asks Docker to create and run a container based on the hello-world image.
55 | 2. Since the hello-world image wasn’t already present on your disk, Docker downloaded it from a default registry, the Docker Hub. More about that later.
56 | 3. Docker created a container based on the hello-world image.
57 | 4. The hello-world image states that, when started, it should output some 58 | text to the console, so this is the text you see as the container is running.
59 | 5. The container stopped.
60 | Here’s what you did, slightly simplified: 61 | 62 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/running-first-container.png) 63 | 64 | If you run the same command again, you’ll see that all the above steps are being repeated except for step 2; this is because the image 65 | does not need to be downloaded as it is already present on your machine from the first time you ran the command. This is a simple optimization, 66 | but you’ll see later that Docker optimizes many more steps. 67 | As such, Docker makes scarce use of a machine’s resources. 68 | -------------------------------------------------------------------------------- /Docker/Dev/Scaling-up.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Allows Easy Scaling Up 4 | parent: Docker For Developer 5 | nav_order: 3 6 | --- 7 | 8 | # Allows Easy Scaling Up 9 | 10 | When a server application needs to handle a higher usage than what a single server can handle, the solution is well-known, place a reverse proxy 11 | in front of it, and duplicate the server as many times as needed. In our previous Wordpress application example, this meant duplicating the server together 12 | with all of its dependencies: 13 | 14 | ![A model of using a reverse proxy with duplicate servers](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/WordpressReverse.png) 15 | 16 | That is only going to make things worse when upgrading: we’ll need to upgrade each server’s dependencies together with all of the conflicts that may induce 17 | Again, containers have a solution for this containers are based on images. You can run as many containers as you wish from a single image — all the containers 18 | will support the exact same dependencies. 19 | 20 | Better yet: when using an orchestrator, you merely need to state how many containers you want and the image name and the orchestrator 21 | creates that many containers on all of your Docker servers. We’ll see this in the orchestrators part of this course. This is how it looks: 22 | 23 | ![A model of creating duplicate containers using an orchestrator](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/WordpressDockerReverse.png) 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /Docker/Dev/SeamlessUpgrades.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Allows Seamless Upgrades 4 | parent: Docker For Developer 5 | nav_order: 4 6 | --- 7 | 8 | # Allows Seamless Upgrades 9 | 10 | Even in scaled-up scenarios, a container-based approach makes tricky concepts seem trivial. Without containers, 11 | your favorite admin will not be happy with you if he has to update every server, including the dependencies. 12 | 13 | ![Every server needs to be updated](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/everyserver.png) 14 | 15 | Of course, in such a case, the update process depends on the application and its dependencies. Don’t even try to tell your admins about DevOps if you want 16 | to remain alive. By using containers, it’s a simple matter of telling the orchestrator that you want to run a new image version, 17 | and it gradually replaces every container with another one running the new version. Whatever technology stack is running inside the containers, 18 | telling the orchestrator that you want to run a new image version is a simple command 19 | 20 | The illustration below shows the process as it goes on: the orchestrator replaces one container, and then moves on to the other ones. 21 | While the new container is not ready, traffic is being routed to the old version containers so that there is no interruption of service. 22 | 23 | ![Rerouting traffic to the updated container](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/WordpressupdatedContainer.png) 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /Docker/Dev/Various-Products-Need.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Various Products for Various Needs 4 | parent: Docker For Developer 5 | nav_order: 5 6 | --- 7 | 8 | Various Products for Various Needs 9 | 10 | - In a production environment that runs containers hosting critical applications, you would rather have your favorite admins install Docker Enterprise. 11 | However, on your development machine or a continuous integration build machine, you can use the free Docker Engine Community or Docker Desktop depending 12 | on your machine type. 13 | 14 | In short: 15 | 16 | | Use | Product | 17 | |- |- | 18 | | Developer machine | Docker Engine Community
or
Docker Desktop | 19 | | Small server, small expectations | Docker Engine Community | 20 | | Serious stuff/Critical applications | Docker Engine Enterprise
or Kubernetes | 21 | 22 | 23 | ## Hello World Test 24 | 25 | Whatever the edition you install, you can check your installation by running the following command in a command-line (your terminal on Linux, 26 | or PowerShell on Windows): 27 | 28 | ``` 29 | docker run hello-world 30 | 31 | ``` 32 | -------------------------------------------------------------------------------- /Docker/Overview/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Docker Fundamental 4 | nav_order: 4 5 | has_children: true 6 | permalink: /Docker/Overview/ 7 | --- 8 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/add1-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # ADD instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | LABEL maintainer="sangam biradar " 4 | LABEL version=3.0 5 | ADD https://github.com/docker-library/hello-world/raw/master/amd64/hello-world/hello / 6 | RUN chmod +x /hello 7 | CMD ["/hello"] 8 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/add2-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # ADD instruction Dockerfile for Docker Quick Start 2 | FROM alpine:latest 3 | LABEL maintainer="Sangam Biradar " 4 | LABEL version=2.0 5 | WORKDIR theqsg 6 | ADD --chown=35:35 app-folder.tar special-files/ 7 | WORKDIR / 8 | CMD ["sh"] 9 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/add2-demo/app-folder.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/add2-demo/app-folder.zip -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/add3-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # ADD instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | LABEL maintainer="sangam biradar" 4 | LABEL version=3.0 5 | ADD https://github.com/docker-library/hello-world/raw/master/amd64/hello-world/hello / 6 | RUN chmod +x /hello 7 | CMD ["/hello"] 8 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/arg-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # ARG instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | LABEL maintainer="Sangam Biradar" 4 | 5 | ENV key1="ENV is stronger than an ARG" 6 | RUN echo ${key1} 7 | ARG key1="not going to matter" 8 | RUN echo ${key1} 9 | 10 | RUN echo ${key2} 11 | ARG key2="defaultValue" 12 | RUN echo ${key2} 13 | ENV key2="ENV value takes over" 14 | RUN echo ${key2} 15 | CMD ["sh"] 16 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/arg3-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # ARG instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | LABEL maintainer="sangam biradar" 4 | 5 | ENV lifecycle="production" 6 | RUN echo ${lifecycle} 7 | ARG username="35" 8 | RUN echo ${username} 9 | ARG appdir 10 | RUN echo ${appdir} 11 | 12 | ADD hello /${appdir}/ 13 | RUN chown -R ${username}:${username} ${appdir} 14 | WORKDIR ${appdir} 15 | USER ${username} 16 | 17 | CMD ["./hello"] 18 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/arg3-demo/hello: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/arg3-demo/hello -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/copy-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # COPY instruction Dockerfile for Docker Quick Start 2 | FROM alpine:latest 3 | LABEL version=1.0 4 | COPY file* theqsg/files/ 5 | COPY folder1 theqsg/ 6 | WORKDIR theqsg 7 | COPY --chown=35:35 special1 special-files/ 8 | WORKDIR / 9 | CMD ["sh"] 10 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/copy-demo/file1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/copy-demo/file1 -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/copy-demo/file2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/copy-demo/file2 -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/copy-demo/file3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/copy-demo/file3 -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/copy-demo/folder1/folderfile1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/copy-demo/folder1/folderfile1 -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/copy-demo/folder1/folderfile2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/copy-demo/folder1/folderfile2 -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/copy-demo/special1: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/copy-demo/special1 -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/entrypoint-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # ENTRYPOINT instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | RUN apk add curl 4 | ENTRYPOINT ["curl"] 5 | CMD ["--help"] 6 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/entrypoint2-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # ENTRYPOINT instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | ENTRYPOINT ["/bin/ping", "-c", "4"] 4 | CMD ["localhost"] 5 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/expose-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # EXPOSE instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | EXPOSE 80/tcp 4 | EXPOSE 81/tcp 5 | EXPOSE 8080/udp 6 | CMD while true; do echo 'DQS Expose Demo' | nc -l -p 80; done 7 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/from-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # FROM instruction Dockerfile for Docker Quick Start guide 2 | # hello-world 3 | FROM scratch 4 | COPY hello / 5 | CMD ["/hello"] 6 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/from-demo/hello: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/from-demo/hello -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/healthcheck-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # HEALTHCHECK instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | RUN apk add curl 4 | EXPOSE 80/tcp 5 | HEALTHCHECK --interval=30s --timeout=3s \ 6 | CMD curl -f http://localhost/ || exit 1 7 | CMD while true; do echo 'DQS Expose Demo' | nc -l -p 80; done 8 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/label-demo/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/label-demo/.DS_Store -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/label-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # hello-world Dockerfile 2 | FROM scratch 3 | LABEL maintainer="sangam biradar" 4 | LABEL "description"="My development Ubuntu image" 5 | LABEL version="1.0" 6 | LABEL label1="value1" \ 7 | label2="value2" \ 8 | lable3="value3" 9 | LABEL my-multi-line-label="Labels can span \ 10 | more than one line in a Dockerfile." 11 | LABEL support-email="support@mycompany.com" support-phone="(123) 456-7890" 12 | LABEL version="2.0" 13 | COPY hello / 14 | CMD ["/hello"] 15 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/label-demo/hello: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/Docker/workshop/dockerfile/label-demo/hello -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/onbuild-my-base-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # my-base Dockerfile 2 | FROM alpine 3 | LABEL maintainer="sangam biradar" 4 | ONBUILD LABEL version="1.0" 5 | ONBUILD LABEL support-email="support@mycompany.com" support-phone="(123) 456-7890" 6 | CMD ["sh"] 7 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/run-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # RUN instruction Dockerfile for Docker Quick Start 2 | FROM ubuntu 3 | RUN id 4 | RUN useradd --create-home -m -s /bin/bash dev 5 | # Add a fun prompt for dev user of my-app 6 | # whale: "\xF0\x9F\x90\xB3" 7 | # alien:"\xF0\x9F\x91\xBD" 8 | # fish:"\xF0\x9F\x90\xA0" 9 | # elephant:"\xF0\x9F\x91\xBD" 10 | # moneybag:"\xF0\x9F\x92\xB0" 11 | RUN echo 'PS1="\[$(tput bold)$(tput setaf 4)\]my-app $(echo -e "\xF0\x9F\x90\xB3") \[$(tput sgr0)\] [\\u@\\h]:\\W \\$ "' >> /home/dev/.bashrc && \ 12 | echo 'alias ls="ls --color=auto"' >> /home/dev/.bashrc 13 | 14 | RUN mkdir /myvol 15 | RUN echo "hello DQS Guide" > /myvol/greeting 16 | RUN ["chmod", "664", "/myvol/greeting"] 17 | RUN ["chown", "dev:dev", "/myvol/greeting"] 18 | VOLUME /myvol 19 | 20 | USER dev 21 | RUN id 22 | 23 | CMD ["/bin/bash"] 24 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/user-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | LABEL maintainer="sangam biradar" 3 | RUN id 4 | USER games:games 5 | RUN id 6 | CMD ["sh"] 7 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/volume-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # VOLUME instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | RUN mkdir /myvol 4 | RUN echo "hello world" > /myvol/greeting 5 | VOLUME /myvol 6 | CMD ["sh"] 7 | -------------------------------------------------------------------------------- /Docker/workshop/dockerfile/workdir-demo/Dockerfile: -------------------------------------------------------------------------------- 1 | # WORKDIR instruction Dockerfile for Docker Quick Start 2 | FROM alpine 3 | # Absolute path... 4 | WORKDIR / 5 | # relative path, relative to previous WORKDIR instruction 6 | # creates new folder 7 | WORKDIR sub-folder-level-1 8 | RUN touch file1.txt 9 | # relative path, relative to previous WORKDIR instruction 10 | # creates new folder 11 | WORKDIR sub-folder-level-2 12 | RUN touch file2.txt 13 | # relative path, relative to previous WORKDIR instruction 14 | # creates new folder 15 | WORKDIR sub-folder-level-3 16 | RUN touch file3.txt 17 | # Absolute path, creates three sub folders... 18 | WORKDIR /l1/l2/l3 19 | CMD ["sh"] 20 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | gemspec 3 | 4 | 5 | group :jekyll_plugins do 6 | gem "jekyll-youtube" 7 | gem 'jekyll-feed' 8 | end 9 | 10 | -------------------------------------------------------------------------------- /Kubernetes/beginner/Deployment-process.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: sequential breakdown of the Kubernetes Deployment process 4 | parent: Kubernetes For Beginner 5 | nav_order: 22 6 | --- 7 | 8 | # sequential breakdown of the Kubernetes Deployment process. 9 | 10 | efore we move onto Deployment updates, we’ll go through our usual ritual of seeing the process through a 11 | sequence diagram. We won’t repeat the explanation of the events that happened after the ReplicaSet object was created as those steps [here](https://containerlabs.kubedaily.com/Kubernetes/beginner/Sequential-Breakdown-of-the-Process.html). 12 | 13 | - Kubernetes client ( kubectl ) sent a request to the API server requesting the creation of a Deployment defined in the deploy.yml file. 14 | - The deployment controller is watching the API server for new events, and it detected that there is a new Deployment object. 15 | - The deployment controller creates a new ReplicaSet object. 16 | 17 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/Deployement-Sequence.png) 18 | 19 | The above illustration is self-explanatory with the sequence of the processes linked to the deployment process. 20 | 21 | 22 | -------------------------------------------------------------------------------- /Kubernetes/beginner/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Kubernetes For Beginner 4 | nav_order: 6 5 | has_children: true 6 | permalink: 7 | --- 8 | -------------------------------------------------------------------------------- /Kubernetes/beginner/Sequential-Breakdown-of-the-Process.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Sequential Breakdown of the Process 4 | parent: Kubernetes For Beginner 5 | nav_order: 17 6 | --- 7 | 8 | # Sequential Breakdown of the Process 9 | 10 | The sequence of events that transpired with the `kubectl create -f replicaset.yaml ` 11 | command is as follows. 12 | 13 | - 1.Kubernetes client (`kubectl`) sent a request to the API server requesting 14 | the creation of a ReplicaSet defined in the `replicaset.yaml` file . 15 | - 2.The controller is watching the API server for new events, and it detected that there is a new ReplicaSet object. 16 | - 3.The controller creates 5 new pod definitions because we have configured replica value as 5 in file. 17 | - 4.Since the scheduler is watching the API server for new events, it detected that there are two unassigned Pods. 18 | - 5.The scheduler decided which node to assign the Pod and sent that information to the API server. 19 | - 6.Kubelet is also watching the API server. It detected that the two Pods were assigned to the node it is running on. 20 | - 7.Kubelet sent requests to Docker requesting the creation of the containers that form the Pod. 21 | - 8.Finally, Kubelet sent a request to the API server notifying it that the Pods 22 | 23 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/replicaset-controller-sequence.png) 24 | 25 | The sequence we described is useful when we want to understand everything that happened in the cluster from the moment we requested the creation of a new ReplicaSet. However, it might be too confusing so we’ll try 26 | to explain the same process through a diagram that more closely represents the cluster. 27 | 28 | 29 | -------------------------------------------------------------------------------- /Kubernetes/beginner/componets-stages-pod-scheduling.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Components and Stages Involved in a Pod's Scheduling 4 | parent: Kubernetes For Beginner 5 | nav_order: 12 6 | --- 7 | 8 | # Components and Stages Involved in a Pod's Scheduling 9 | 10 | Let’s discuss some of the details of Kubernetes components, and try to get an 11 | understanding of how Pod scheduling works. 12 | Three major components were involved in the process: 13 | 14 | 15 | # 1. API Server 16 | 17 | - The API server is the central component of a Kubernetes cluster and it runs on the master node. Since we are using Minikube, both master and worker nodes are 18 | baked into the same virtual machine. However, a more serious Kubernetes cluster should have the two separated on different hosts. 19 | - All other components interact with API server and keep watch for changes. Most of the coordination in Kubernetes consists of a 20 | component writing to the API Server resource that another component is watching. The second component will then react to changes almost immediately. 21 | 22 | # 2. Scheduler 23 | 24 | - The scheduler is also running on the master node. Its job is to watch for unassigned pods and assign them to a node which has available resources 25 | (CPU and memory) matching Pod requirements. Since we are running a single-node cluster, specifying resources would not provide much insight into their 26 | usage so we’ll leave them for later. 27 | 28 | # 3. Kubelet 29 | 30 | Kubelet runs on each node. Its primary function is to make sure that assigned pods are running on the node. It watches for any new Pod 31 | assignments for the node. If a Pod is assigned to the node Kubelet is running on, 32 | it will pull the Pod definition and use it to create containers through Docker or any other supported container engine. 33 | 34 | # Sequential Breakdown of Events 35 | 36 | The sequence of events that transpired with the `kubectl create -f pod/db.yml` 37 | command is as follows: 38 | 39 | - 1. Kubernetes client ( kubectl ) sent a request to the API server requesting 40 | creation of a Pod defined in the pod/db.yml file. 41 | - 2. Since the scheduler is watching the API server for new events, it detected 42 | that there is an unassigned Pod. 43 | - 3. The scheduler decided which node to assign the Pod to and sent that information to the API server. 44 | - 4. Kubelet is also watching the API server. It detected that the Pod was assigned to the node it is running on. 45 | - 5. Kubelet sent a request to Docker requesting the creation of the containers that form the Pod. In our case, the Pod defines a single container based on the mongo image. 46 | - 6. Finally, Kubelet sent a request to the API server notifying it that the Pod was created successfully. 47 | The process might not make much sense right now since we are running a single-node cluster. If we had more VMs, scheduling might 48 | have happened somewhere else, and the complexity of the process would be easier to grasp. We’ll get there in due time. 49 | 50 | The following illustration shows a Pod’s scheduling sequence. 51 | 52 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/Pod-scheduling-sequence.png) 53 | 54 | The above illustration shows us the sequence of events associated with a Pod’s scheduling. 55 | -------------------------------------------------------------------------------- /Kubernetes/beginner/demo.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 |
14 | 15 |
16 | 17 |
18 |
19 | The screen is too narrow to interact with the Terminal, please use a desktop/tablet. 20 |
21 |
22 |
23 |
24 |
25 | 26 |
27 |
28 | 29 |
30 | 31 |
32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /Kubernetes/beginner/deploy-releases.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Deploying New Releases 4 | parent: Kubernetes For Beginner 5 | nav_order: 21 6 | --- 7 | 8 | # Deploying New Releases 9 | 10 | Just as we are not supposed to create Pods directly but using other controllers like ReplicaSet, we are not supposed to create ReplicaSets either. Kubernetes Deployments will create them for us. If you’re wondering why is this so? You’ll have to wait a little while longer to find out. 11 | First, we’ll create a few Deployments and, once we are familiar with the process and the outcomes, it’ll become obvious why they are better at managing ReplicaSets than we are. 12 | 13 | # Looking into the Definition 14 | 15 | 16 | ``` 17 | // deploy.yml 18 | kind: Deployment 19 | apiVersion: extensions/v1beta1 20 | metadata: 21 | name: nginx-deployment 22 | spec: 23 | # A deployment's specification really only 24 | # has a few useful options 25 | 26 | # 1. How many copies of each pod do we want? 27 | replicas: 3 28 | 29 | # 2. How do want to update the pods? 30 | strategy: Recreate 31 | 32 | # 3. Which pods are managed by this deployment? 33 | selector: 34 | # This must match the labels we set on the pod! 35 | matchLabels: 36 | deploy: example 37 | 38 | # This template field is a regular pod configuration 39 | # nested inside the deployment spec 40 | template: 41 | metadata: 42 | # Set labels on the pod. 43 | # This is used in the deployment selector. 44 | labels: 45 | deploy: example 46 | spec: 47 | containers: 48 | - name: nginx 49 | image: nginx:1.7.9 50 | 51 | ``` 52 | 53 | 54 | We will regularly add `--record ` to the kubectl create commands. This allows us to track each change to our resources such as a Deployments 55 | 56 | ``` 57 | kubectl create \ 58 | -f deploy.yml \ 59 | --record 60 | kubectl get -f deploy.yml 61 | 62 | ``` 63 | 64 | ## Describing the Deployment 65 | 66 | ``` 67 | kubectl describe -f deploy.yml 68 | ``` 69 | 70 | From the Events section, we can observe that the Deployment created a ReplicaSet. Or, to be more precise, that it scaled it. That is interesting. 71 | It shows that Deployments control ReplicaSets. The Deployment created the ReplicaSet which, in turn, created Pods. 72 | Let’s confirm that by retrieving the list of all the objects. 73 | 74 | ``` 75 | kubectl get all 76 | 77 | ``` 78 | 79 | you might be wondering why we created the Deployment at all. You might think that we’d have the same result if we created a ReplicaSet directly. You’d be right. 80 | So far, from the functional point of view, there is no difference between a ReplicaSet created directly or using a Deployment. 81 | 82 | The following figure summarizes the cascading effect of deployments resulting in the creation of pods, containers, and replicaSets. 83 | 84 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/Deployement-containerlabs.png) 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /Kubernetes/beginner/deploying-releases.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Getting Started with Deploying Releases 4 | parent: Kubernetes For Beginner 5 | nav_order: 20 6 | --- 7 | 8 | # Getting Started with Deploying Releases 9 | 10 | # Deploying Releases 11 | 12 | - In today’s competitive environment, we have to release features to production as soon as they are developed and tested. The need for 13 | frequent releases fortifies the need for zero-downtime deployments. 14 | - We learned how to deploy our applications packaged as Pods, how to scale them through ReplicaSets, and how to enable 15 | communication through Services. However, all that is useless if we cannot update those applications with new releases. That is where Kubernetes 16 | Deployments come in handy. 17 | - The desired state of our applications is changing all the time. The most common reasons for new states are new releases. The process is relatively 18 | simple. We make a change and commit it to a code repository. We build it, and we test it. Once we’re confident that it works as expected, 19 | we deploy it to a cluster. 20 | - It does not matter whether that deployment is to a development, test, staging, or production environment. We need to deploy a new release to a cluster, 21 | even when that is a single-node Kubernetes running on a laptop. No matter how many environments we have, the process should always be the same or, at 22 | least, as similar as possible. 23 | 24 | # Why Zero Downtime? 25 | 26 | - The deployment must produce no downtime. It does not matter whether it is performed on a testing or a production cluster. Interrupting consumers is disruptive, 27 | and that leads to loss of money and confidence in a product. 28 | - Gone are the days when users did not care if an application sometimes did not work. There are so many competitors out there that a single bad experience 29 | might lead users to another solution. With today’s scale, 0.1% of failed requests is considered disastrous. 30 | - While we might never be able to reach 100% availability, we should certainly not cause downtime ourselves and must minimize other factors that 31 | could cause downtime. 32 | 33 | # Kubernetes Deployments 34 | - provide us with the tools we need to avoid such 35 | - failures by allowing us to update our applications without downtime. 36 | - Let’s explore how Kubernetes Deployments work and the benefits we gain by adopting them. 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /Kubernetes/beginner/kubectl.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Understanding kubectl 4 | parent: Kubernetes For Beginner 5 | nav_order: 5 6 | --- 7 | 8 | # Understanding kubectl 9 | Kubernetes’ command-line tool, kubectl , is used to manage a cluster and applications running inside it. We’ll use kubectl a lot throughout the labs , 10 | so we won’t go into details just yet. Instead, we’ll discuss its commands through examples that will 11 | follow shortly. For now, think of it as your interlocutor with a Kubernetes cluster. 12 | 13 | # Installation 14 | Let’s install kubectl . 15 | Feel free to skip the installation steps if you already have kubectl . Just make 16 | sure that it is version 1.8 or above. 17 | 18 | ## MacOS 19 | 20 | If you are a MacOS user, please execute the commands that follow 21 | 22 | ``` 23 | curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/darwin/amd64/kubectl 24 | 25 | chmod +x ./kubectl 26 | 27 | sudo mv ./kubectl /usr/local/bin/kubectl 28 | 29 | ``` 30 | 31 | If you already have Homebrew package manager installed, you can “brew” it with the command that follows. 32 | 33 | ``` 34 | brew install kubectl 35 | 36 | ``` 37 | ## Linux 38 | 39 | If, on the other hand, you’re a Linux user, the commands that will install kubectl are as follows. 40 | 41 | ``` 42 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl 43 | 44 | chmod +x ./kubectl 45 | 46 | sudo mv ./kubectl /usr/local/bin/kubectl 47 | 48 | ``` 49 | ## Windows 50 | Finally, Windows users should download the binary through the command that follows. 51 | ``` 52 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/windows/amd64/kubectl 53 | 54 | ``` 55 | 56 | Feel free to copy the binary to any directory. The important thing is to add it to your PATH . 57 | 58 | ## Verification 59 | Let’s check kubectl version and, at the same time, validate that it is working correctly. No matter which OS you’re using, the command is as follows. 60 | 61 | ``` 62 | kubectl version 63 | 64 | ``` 65 | fortunately, kubectl can use a few different formats for its output. For example, we can tell it to output the command in yaml format 66 | 67 | ``` 68 | kubectl version --output=yaml 69 | 70 | ``` 71 | 72 | -------------------------------------------------------------------------------- /Kubernetes/beginner/minikube.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Installing Minikube 4 | parent: Kubernetes For Beginner 5 | nav_order: 6 6 | --- 7 | 8 | # Installing Minikube 9 | 10 | 11 | # Understanding Minikube 12 | 13 | Minikube supports several virtualization technologies. 14 | We’ll use VirtualBox throughout the book since it is the only virtualization supported in all operating systems. If you do not have it already, please head to the Download VirtualBox page and get the version that matches your OS 15 | 16 | ### Note :- Please keep in mind that for VirtualBox or HyperV to work, virtualization must be enabled in the BIOS. Most laptops should have it enabled by default. 17 | 18 | # Installation 19 | 20 | Finally, we can install Minikube. 21 | 22 | # MacOS 23 | 24 | If you’re using MacOS, please execute the command that follows. 25 | 26 | ``` 27 | brew install minikube 28 | 29 | ``` 30 | if installation fail check this [Link](https://osxdaily.com/2018/12/31/install-run-virtualbox-macos-install-kernel-fails/) 31 | 32 | # Linux 33 | 34 | If, on the other hand, you prefer Linux, the command is as follows. 35 | 36 | ``` 37 | curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ 38 | 39 | ``` 40 | # Windows 41 | 42 | Finally, you will not get a command if you are a Windows user. Instead, download the latest release from of the minikube-windows-amd64.exe file, rename it to minikube.exe , and add it to your path. 43 | 44 | # Validation 45 | We’ll test whether Minikube works or not by checking its version. 46 | 47 | ``` 48 | minikube version 49 | 50 | ``` 51 | Now we’re ready to give the cluster a spin. 52 | 53 | 54 | -------------------------------------------------------------------------------- /Kubernetes/beginner/public/overrides.css: -------------------------------------------------------------------------------- 1 | #docsToc .push-menu-close-button, 2 | #docs .flyout-button { 3 | display: block; 4 | } 5 | 6 | #docsToc { 7 | position: fixed; 8 | background-color: #fff; 9 | top: 0; 10 | left: 0; 11 | width: 0; 12 | padding: 0; 13 | overflow: hidden; 14 | z-index: 999999; 15 | transition: 0.3s; 16 | } 17 | 18 | .open-toc #docsToc { 19 | padding: 50px 20px; 20 | width: 400px; 21 | max-width: 100vw; 22 | overflow-y: auto; 23 | } 24 | 25 | #docsContent { 26 | width: 100%; 27 | } 28 | -------------------------------------------------------------------------------- /Kubernetes/beginner/start-with-ingress.md: -------------------------------------------------------------------------------- 1 | # Getting Started with Ingress 2 | 3 | ## Why Use Ingress Objects? 4 | 5 | Applications that are not accessible to users are useless. Kubernetes Services provide accessibility with a usability cost. 6 | Each application can be reached through a different port. We cannot expect users to know the port of each service in our cluster. 7 | 8 | - Ingress objects manage external access to the applications running inside a Kubernetes cluster. 9 | 10 | While, at first glance, it might seem that we already accomplished that through Kubernetes Services, they do not make the applications truly accessible. 11 | We still need forwarding rules based on paths and domains, SSL termination and a number of other features. 12 | 13 | In a more traditional setup, we’d probably use an external proxy and a load balancer. Ingress provides an API that allows us to accomplish these things, 14 | in addition to a few other features we expect from a dynamic cluster. 15 | 16 | 17 | # 18 | 19 | -------------------------------------------------------------------------------- /Kubernetes/beginner/start-with-pods.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Getting Started with Pods 4 | parent: Kubernetes For Beginner 5 | nav_order: 9 6 | --- 7 | 8 | # Getting Started with Pods 9 | 10 | ## Understanding Pods 11 | 12 | Pods are equivalent to bricks we use to build houses. Both are uneventful and not much by themselves. Yet, they 13 | are fundamental building blocks without which we could not construct the solution we are set to build. 14 | If you have used Docker or Docker Swarm, you’re probably used to thinking that a container is the smallest unit 15 | and that more complex patterns are built on top of it. With Kubernetes, the smallest unit is a Pod. 16 | 17 | ## A Pod is a way to represent a running process in a cluster. 18 | 19 | - From the Kubernetes’ perspective, there’s nothing smaller than a Pod. 20 | - A Pod encapsulates one or more containers. It provides a unique network IP, attaches storage resources, and also decides how containers should run. Everything in a Pod is tightly coupled. 21 | - We should clarify that containers in a Pod are not necessarily made by Docker. Other container runtimes are supported as well. Still, at the time of this writing, Docker is the 22 | most commonly used container runtime, and all our examples will use it. 23 | - Since we cannot create Pods without a Kubernetes cluster, our first order of business is to create one. 24 | 25 | ## Creating A Cluster(minikube) 26 | We’ll create a local Kubernetes cluster using Minikube. 27 | 28 | ``` 29 | minikube start --vm-driver=virtualbox 30 | kubectl get nodes 31 | 32 | ``` 33 | ## other Alternative free kubernetes cluster 34 | 35 | - [Okteto](www.okteto.com) 36 | - [play with Kubernetes](https://labs.play-with-docker.com/) 37 | 38 | hope everything setup well at your end now ! we will run our first Pod >> 39 | -------------------------------------------------------------------------------- /Kubernetes/beginner/updating-deployment.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Updating Deployments 4 | parent: Kubernetes For Beginner 5 | nav_order: 23 6 | --- 7 | 8 | 9 | # Updating Deployments 10 | 11 | 12 | Updating the nginx Image # 13 | 14 | Let’s see what happens when we set a new image to the Pod. 15 | [ref](https://containerlabs.kubedaily.com/Kubernetes/beginner/deploy-releases.html) 16 | 17 | ``` 18 | 19 | kubectl create \ 20 | -f deploy.yml \ 21 | ngnix = nginx:1.8.0 22 | --record 23 | kubectl get -f deploy.yml 24 | 25 | 26 | ``` 27 | 28 | It’ll take a while until the new image is pulled. 29 | 30 | ## Describing the Deployment 31 | Once it’s done, we can describe the Deployment by checking the events it 32 | created. 33 | ``` 34 | 35 | kubectl describe -f deploy.yml 36 | 37 | ``` 38 | 39 | ## Looking into the Cluster 40 | To be on the safe side, we might want to retrieve all the objects from the 41 | cluster. 42 | 43 | ``` 44 | 45 | kubectl get all 46 | 47 | ``` 48 | 49 | ## Exploring Ways to Update Deployment 50 | 51 | ## Updating Using Commands 52 | 53 | The kubectl set image command is not the only way to update a Deployment. 54 | We could also have used as well. The kuectl edit command would be as follows. 55 | 56 | The command would be as follows. 57 | 58 | ``` 59 | kubectl edit -f deploy.yml 60 | 61 | ``` 62 | 63 | Please do NOT execute it. If you do, you’ll need to type :q followed by the enter key to exit. 64 | 65 | The above `edit` command is not a good way to update the definition. It is unpractical and undocumented. 66 | The `kubectl set image` is more useful if we’d like to integrate Deployment updates with one of the CI/CD tools. 67 | 68 | 69 | ## Updating the YAML File 70 | 71 | Another alternative would be to update the YAML file and execute the `kuectl apply` command. While that is a good idea for applications that do not update 72 | frequently, it does not fit well with those that change weekly, daily, or even hourly. 73 | 74 | nginx is one of those that might get updated with a new release only a couple of times a year so having an always up-to-date YAML file in your source code 75 | repository is an excellent practice. 76 | 77 | 78 | ## Finishing off 79 | 80 | 81 | We used `kubectl set image` just as a way to introduce you to what’s coming 82 | next when we explore frequent deployments without downtime. 83 | 84 | A simple update of Pod images is far from what Deployment offers. To see its real power, we should deploy the API. Since it can be scaled to multiple Pods, 85 | it’ll provide us with a much better playground. 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/Accessing-specific-logs.md: -------------------------------------------------------------------------------- 1 | 2 | # Accessing application-specific logs 3 | 4 | 5 | ``` 6 | // cm-postgres.yaml 7 | master $ cat cm-postgres.yaml 8 | apiVersion: v1 9 | kind: ConfigMap 10 | metadata: 11 | name: postgres-config 12 | labels: 13 | app: postgres 14 | data: 15 | POSTGRES_DB: postgresdb 16 | POSTGRES_USER: testuser 17 | POSTGRES_PASSWORD: testpassword123 18 | 19 | 20 | ``` 21 | 22 | ``` 23 | $ kubectl apply -f postgres.yaml 24 | $ kubectl get pods -l app=postgres 25 | $ kubectl exec -it postgres-0 -- /bin/bash 26 | 27 | ``` 28 | 29 | ``` 30 | CREATE TABLE test ( 31 | id int GENERATED BY DEFAULT AS IDENTITY PRIMARY KEY, 32 | a int NOT NULL, 33 | created_at timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP 34 | ); 35 | INSERT INTO test (a) SELECT * FROM generate_series(-1, -1000, -1); 36 | 37 | ``` 38 | 39 | Get the log's configuration details frompostgresql.conf. You will see that the logs are stored in the /var/log/postgresql directory: 40 | 41 | ``` 42 | $ cat /var/lib/postgresql/data/postgresql.conf |grep log 43 | 44 | ``` 45 | List and access the logs in the /var/log/postgresql directory: 46 | 47 | ``` 48 | $ ls /var/log/postgresql 49 | 50 | 51 | ``` 52 | Optionally, while you're inside the container, you can create a backup of our example postgresdb database in the tmp directory using the following command: 53 | 54 | ``` 55 | $ pg_dump --username testuser postgresdb > /tmp/backup.sql 56 | 57 | ``` 58 | ``` 59 | //postgres.yaml 60 | apiVersion: apps/v1 61 | kind: StatefulSet 62 | metadata: 63 | name: postgres 64 | spec: 65 | serviceName: "postgres" 66 | replicas: 2 67 | selector: 68 | matchLabels: 69 | app: postgres 70 | template: 71 | metadata: 72 | labels: 73 | app: postgres 74 | spec: 75 | containers: 76 | - name: postgres 77 | image: postgres:latest 78 | envFrom: 79 | - configMapRef: 80 | name: postgres-config 81 | ports: 82 | - containerPort: 5432 83 | name: postgredb 84 | volumeMounts: 85 | - name: postgredb 86 | mountPath: /var/lib/postgresql/data 87 | subPath: postgres 88 | volumeClaimTemplates: 89 | - metadata: 90 | name: postgredb 91 | spec: 92 | accessModes: [ "ReadWriteOnce" ] 93 | storageClassName: openebs-jiva-default 94 | resources: 95 | requests: 96 | storage: 5Gi 97 | 98 | ``` 99 | 100 | ``` 101 | cat pvc-postgres.yaml 102 | kind: PersistentVolumeClaim 103 | apiVersion: v1 104 | metadata: 105 | name: postgres-pv-claim 106 | labels: 107 | app: postgres 108 | spec: 109 | storageClassName: openebs-jiva-default 110 | accessModes: 111 | - ReadWriteOnce 112 | resources: 113 | requests: 114 | storage: 10G 115 | 116 | ``` 117 | 118 | ``` 119 | 120 | cat svc-postgres.yaml 121 | apiVersion: v1 122 | kind: Service 123 | metadata: 124 | name: postgres 125 | labels: 126 | app: postgres 127 | spec: 128 | type: NodePort 129 | ports: 130 | - port: 5432 131 | selector: 132 | app: postgres 133 | 134 | 135 | 136 | ``` 137 | 138 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/Kubernetes_Configmap.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: kubernetes Configmap 4 | parent: CKA / CKAD Certification Workshop Track 5 | nav_order: 19 6 | --- 7 | 8 | # What is a ConfigMap in Kubernetes? 9 | 10 | A ConfigMap is a dictionary of configuration settings. This dictionary consists of key-value pairs of strings. Kubernetes provides these values to your containers. 11 | Like with other dictionaries (maps, hashes, ...) the key lets you get and set the configuration value. 12 | 13 | # Why would you use a ConfigMap in Kubernetes? 14 | 15 | Use a ConfigMap to keep your application code separate from your configuration. 16 | It is an important part of creating a Twelve-Factor Application. 17 | This lets you change easily configuration depending on the environment (development, production, testing) and to dynamically change configuration at runtime. 18 | 19 | # What is a ConfigMap used for? 20 | 21 | A ConfigMap stores configuration settings for your code. Store connection strings, public credentials, hostnames, and URLs in your ConfigMap. 22 | 23 | 24 | # How does a ConfigMap work? 25 | - Here's a quick animation I made showing how a ConfigMap works in Kubernetes. 26 | - First, you have multiple ConfigMaps, one for each environment. 27 | - Second, a ConfigMap is created and added to the Kubernetes cluster. 28 | - Third, containers in the Pod reference the ConfigMap and use its values. 29 | 30 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/configmap-diagram.gif) 31 | 32 | 33 | 34 | # Create the environment variables in the text file. 35 | 36 | ```shell 37 | $ echo -e "DB_URL=localhost:3306\nDB_USERNAME=postgres" > config.txt 38 | ``` 39 | 40 | Create the ConfigMap and point to the text file upon creation. 41 | 42 | ```shell 43 | $ kubectl create configmap db-config --from-env-file=config.txt 44 | configmap/db-config created 45 | $ kubectl run backend --image=nginx --restart=Never -o yaml --dry-run > pod.yaml 46 | ``` 47 | 48 | The final YAML file should look similar to the following code snippet. 49 | 50 | ```yaml 51 | apiVersion: v1 52 | kind: Pod 53 | metadata: 54 | creationTimestamp: null 55 | labels: 56 | run: backend 57 | name: backend 58 | spec: 59 | containers: 60 | - image: nginx 61 | name: backend 62 | envFrom: 63 | - configMapRef: 64 | name: db-config 65 | resources: {} 66 | dnsPolicy: ClusterFirst 67 | restartPolicy: Never 68 | status: {} 69 | ``` 70 | 71 | Create the Pod by pointing the `create` command to the YAML file. 72 | 73 | ```shell 74 | $ kubectl create -f pod.yaml 75 | ``` 76 | 77 | # Log into the Pod and run the `env` command. 78 | 79 | ```shell 80 | $ kubectl exec backend -it -- /bin/sh 81 | # env 82 | DB_URL=localhost:3306 83 | DB_USERNAME=postgres 84 | ... 85 | # exit 86 | ``` 87 | 88 | ## Optional 89 | 90 | > How would you approach hot reloading of values defined by a ConfigMap consumed by an application running in Pod? 91 | 92 | Changes to environment variables are only reflected if the Pod is restarted. Alternatively, you can mount a ConfigMap as file and poll changes from the mounted file periodically, however, it requires the application to build in the logic. 93 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: CKA / CKAD Certification Workshop Track 4 | nav_order: 4 5 | has_children: true 6 | permalink: /Kubernetes/ 7 | --- 8 | 9 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/play-with-k8s.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Interactive Playground To Learn Kubernetes 4 | parent: CKA / CKAD Certification Workshop Track 5 | nav_order: 3 6 | --- 7 | 8 | # Interactive Playground To Learn Kubernetes 9 | 10 | # Play With Kubernetes - Free ready-made Kubernetes platfrom 11 | 12 | visit :- https://labs.play-with-k8s.com/ 13 | - Play with Kubernetes is a labs site provided by Docker and created by Tutorius. 14 | Play with Kubernetes is a playground which allows users to run K8s clusters in a matter of seconds. 15 | It gives the experience of having a free Alpine Linux Virtual Machine in browser. 16 | Under the hood Docker-in-Docker (DinD) is used to give the effect of multiple VMs/PCs. 17 | 18 | - If you want to learn more about Kubernetes, consider the Play with Kubernetes Classroom which provides more directed learning using an integrated 19 | Play with Kubernetes commandline. 20 | 21 | - login using docker hub or github credentials 22 | 23 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/playwithk8s-login.png) 24 | 25 | - click on start button 26 | 27 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/pwk-start.png) 28 | 29 | # 30 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/pod-agent-sidecar-logging.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Lab 5 - pod agent sidecar logging 4 | parent: CKA / CKAD Certification Workshop Track 5 | nav_order: 8 6 | --- 7 | 8 | # pod agent sidecar logging 9 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/pod-logging-sidecar.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Lab 4 - Pod Logging Sidecar 4 | parent: CKA / CKAD Certification Workshop Track 5 | nav_order: 7 6 | --- 7 | 8 | 9 | # Lab 4 Pods : Pod Logging Sidecar 10 | 11 | Log-Shipping Sidecar :- 12 | 13 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/pod-log-sidecar.png) 14 | 15 | 16 | # Create ` pod-logging-sidecar.yaml ` file with following Contents:- 17 | ``` 18 | --- 19 | apiVersion: v1 20 | kind: Pod 21 | metadata: 22 | name: counter-log-sidecar 23 | spec: 24 | containers: 25 | - name: count 26 | image: busybox 27 | args: 28 | - /bin/sh 29 | - -c 30 | - > 31 | i=0; 32 | while true; 33 | do 34 | echo "$i: $(date)" >> /var/log/1.log; 35 | echo "$(date) INFO $i" >> /var/log/2.log; 36 | i=$((i+1)); 37 | sleep 1; 38 | done 39 | volumeMounts: 40 | - name: varlog 41 | mountPath: /var/log 42 | - name: counter-log-1 43 | image: busybox 44 | args: [/bin/sh, -c, 'tail -n+1 -f /var/log/1.log'] 45 | volumeMounts: 46 | - name: varlog 47 | mountPath: /var/log 48 | - name: counter-log-2 49 | image: busybox 50 | args: [/bin/sh, -c, 'tail -n+1 -f /var/log/2.log'] 51 | volumeMounts: 52 | - name: varlog 53 | mountPath: /var/log 54 | volumes: 55 | - name: varlog 56 | emptyDir: {} 57 | 58 | 59 | 60 | ``` 61 | 62 | # ` kubectl Create ' Or ` kubectl Apply ' 63 | 64 | ``` 65 | kubectl create -f pod-logging-sidecar.yaml 66 | pod/counter-log-sidecar created 67 | ``` 68 | 69 | # List All Running Pods 70 | 71 | ``` 72 | sangam:pods sangam$ kubectl get po 73 | NAME READY STATUS RESTARTS AGE 74 | counter 1/1 Running 0 2d2h 75 | counter-log-sidecar 3/3 Running 0 4m21s 76 | 77 | ``` 78 | # kubectl logs 79 | 80 | ``` 81 | sangam:pods sangam$ kubectl exec counter-log-sidecar -c count -it bin/sh 82 | / # ls 83 | bin dev etc home proc root sys tmp usr var 84 | / # cd var/log 85 | /var/log # ls 86 | 1.log 2.log 87 | /var/log # 88 | 89 | 90 | ``` 91 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/pod-logging-volum.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Lab 3 Pod - Logging Volume 4 | parent: CKA / CKAD Certification Workshop Track 5 | nav_order: 6 6 | --- 7 | 8 | 9 | # Lab 3 - Pod : Logging Volume 10 | 11 | - You can use a sidecar container in one of the following ways: 12 | 13 | - The sidecar container streams application logs to its own stdout. 14 | - The sidecar container runs a logging agent, which is configured to pick up logs from an application container. 15 | 16 | A pod runs a single container, and the container writes to two different log files, using two different formats. Here's a configuration file for the Pod: 17 | 18 | # Create `pod-logging-volum.yml` File With Following Contents 19 | 20 | ``` 21 | --- 22 | apiVersion: v1 23 | kind: Pod 24 | metadata: 25 | name: counter-log-vol 26 | spec: 27 | containers: 28 | - name: count 29 | image: busybox 30 | args: 31 | - /bin/sh 32 | - -c 33 | - > 34 | i=0; 35 | while true; 36 | do 37 | echo "$i: $(date)" >> /var/log/1.log; 38 | echo "$(date) INFO $i" >> /var/log/2.log; 39 | i=$((i+1)); 40 | sleep 1; 41 | done 42 | volumeMounts: 43 | - name: varlog 44 | mountPath: /var/log 45 | volumes: 46 | - name: varlog 47 | emptyDir: {} 48 | 49 | ``` 50 | # Use `kubectl create` OR ` kubectl apply` 51 | 52 | ``` 53 | 54 | sangam:~ sangam$ kubectl create -f pod-logging-volum.yml 55 | pod/counter-log-vol created 56 | 57 | ``` 58 | # List Of All Runnning Pods 59 | 60 | ``` 61 | 62 | sangam:~ sangam$ kubectl get po 63 | NAME READY STATUS RESTARTS AGE 64 | counter 1/1 Running 0 3h39m 65 | counter-log-vol 1/1 Running 0 12m 66 | myapp-pod 1/1 Running 0 28h 67 | 68 | ``` 69 | 70 | 71 | 72 | # emptyDir 73 | 74 | An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. 75 | As the name says, it is initially empty. Containers in the Pod can all read and write the same files in the emptyDir volume, though that volume can be mounted 76 | at the same or different paths in each Container. 77 | When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever. 78 | 79 | 80 | # Check Volume from the inside pod 81 | 82 | ``` 83 | kubectl exec -it counter-log-vol -- bin/sh 84 | cd /var/log 85 | ls 86 | cat 1.log 2.log 87 | 88 | ``` 89 | -------------------------------------------------------------------------------- /Kubernetes/fundamentals/pod-logs-counter.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Lab2 - Pod - Arg Instruction 4 | parent: CKA / CKAD Certification Workshop Track 5 | nav_order: 5 6 | --- 7 | 8 | ## Lab2 : Pod - Args Instruction 9 | 10 | 11 | 12 | ## Create ` pod-logging.yml` File With Following Contents: 13 | 14 | ``` 15 | 16 | --- 17 | apiVersion: v1 18 | kind: Pod 19 | metadata: 20 | name: counter 21 | spec: 22 | containers: 23 | - name: count 24 | image: busybox 25 | args: ['/bin/sh', '-c', 'i=0; while true; do echo "$i: $(date)"; i=$((i+1)); sleep 1;done'] 26 | 27 | // If you supply a command and args, the default Entrypoint and the default Cmd defined in the Docker image are ignored. Your command is run with your args. 28 | 29 | ``` 30 | ## use `kubectl create` or `kubectl apply' 31 | ``` 32 | kubectl create -f pod-logging.yml 33 | 34 | ``` 35 | 36 | ## List All Running Pod 37 | ``` 38 | sangam:~ sangam$ kubectl get po 39 | NAME READY STATUS RESTARTS AGE 40 | counter 1/1 Running 0 89s 41 | myapp-pod 1/1 Running 0 25h 42 | 43 | ``` 44 | ## Run The Args Specification To Get Logs 45 | ``` 46 | Syntax : kubectl logs - c 47 | ``` 48 | it will exec args specific command 49 | 50 | ``` 51 | kubectl logs counter -c count 52 | 0: Fri Jul 10 06:39:17 UTC 2020 53 | 1: Fri Jul 10 06:39:18 UTC 2020 54 | 2: Fri Jul 10 06:39:19 UTC 2020 55 | 3: Fri Jul 10 06:39:20 UTC 2020 56 | 4: Fri Jul 10 06:39:21 UTC 2020 57 | 5: Fri Jul 10 06:39:22 UTC 2020 58 | 6: Fri Jul 10 06:39:23 UTC 2020 59 | 7: Fri Jul 10 06:39:24 UTC 2020 60 | 61 | ``` 62 | -------------------------------------------------------------------------------- /Kubernetes/k3s/README.md: -------------------------------------------------------------------------------- 1 | Civo Cloud - True K3s Powered managed Kubernetes service 2 | 3 | -------------------------------------------------------------------------------- /Kubernetes/k3s/civo/installing-civo.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # Install Civo CLi 4 | 5 | ``` 6 | sangam:~ sangam$ curl -sL https://civo.com/get | sh 7 | /usr/bin/curl 8 | Finding latest version from GitHub 9 | 0.6.34 10 | Downloading package https://github.com/civo/cli/releases/download/v0.6.34/civo-0.6.34-darwin-amd64.tar.gz to /tmp/civo-0.6.34-darwin-amd64.tar.gz 11 | Download complete. 12 | 13 | Running with sufficient permissions to attempt to move civo to /usr/local/bin 14 | New version of civo installed to /usr/local/bin 15 | Civo CLI v0.6.34 16 | 17 | ``` 18 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-init-container/init-container.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-init-container-pod 6 | spec: 7 | containers: 8 | - name: broken-init-container-container 9 | image: busybox 10 | command: ['sh', '-c', 'echo The app is running! && sleep 3600'] 11 | initContainers: 12 | - name: broken-init-container-init-container 13 | image: busybox 14 | command: ['sh', '-c', "until nslookup pods-init-container-service-nonexistent.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"] 15 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-liveness/liveness.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-liveness-pod 6 | spec: 7 | containers: 8 | - args: 9 | - /bin/sh 10 | - -c 11 | - "sleep 3600" 12 | image: busybox 13 | livenessProbe: 14 | exec: 15 | command: 16 | - cat 17 | - /tmp/healthy 18 | initialDelaySeconds: 5 19 | periodSeconds: 5 20 | name: broken-liveness-container 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/bad-command.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-pods-bad-command-pod 6 | spec: 7 | containers: 8 | - command: 9 | - thiscommanddoesnotexist 10 | image: busybox 11 | name: broken-pods-bad-command-container 12 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/default-shell-command.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-pods-default-shell-command-pod 6 | spec: 7 | containers: 8 | - image: busybox 9 | name: broken-pods-default-shell-comman-container 10 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/failed-command.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-pods-failed-command-pod 6 | spec: 7 | containers: 8 | - image: busybox 9 | command: 10 | - /bin/sh 11 | - -c 12 | - "exit 1" 13 | name: broken-pods-failed-command-container 14 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/misused-command.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-pods-misused-command-pod 6 | spec: 7 | containers: 8 | - image: busybox 9 | command: 10 | - /bin/sh 11 | - -c 12 | name: broken-pods-misused-command-container 13 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/multi-container-no-command.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-pod-multi-container-no-command-pod 6 | spec: 7 | containers: 8 | # this container has no command or entrypoint specified 9 | - image: mstormo/suse 10 | imagePullPolicy: IfNotPresent 11 | name: broken-pod-multi-container-no-command-1 12 | - image: busybox 13 | command: 14 | - sleep 15 | - "3600" 16 | name: broken-pod-multi-container-no-command-2 17 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/no-command.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-pods-no-command-pod 6 | spec: 7 | containers: 8 | # this container has no command or entrypoint specified 9 | - image: mstormo/suse 10 | name: broken-pods-no-command-container 11 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/oom-killed.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/ claims that this invoked the OOM killer, but it runs fine on MicroK8s? 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: broken-pods-oom-killed-pod 7 | spec: 8 | containers: 9 | - args: ["--vm", "1", "--vm-bytes", "250M", "--vm-hang", "1"] 10 | command: ["stress"] 11 | image: polinux/stress 12 | name: broken-pods-oom-killed-container 13 | resources: 14 | limits: 15 | memory: "100Mi" 16 | requests: 17 | memory: "50Mi" 18 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/private-repo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-pods-private-repo-pod 6 | spec: 7 | containers: 8 | # this container has no command or entrypoint specified 9 | - image: imiell/bad-dockerfile-private 10 | name: broken-pods-private-repo-container 11 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-pods/too-much-mem.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/tasks/configure-pod-container/assign-memory-resource/ claims that this invoked the OOM killer, but it runs fine on MicroK8s? 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: broken-pods-too-much-mem-pod 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "3600" 12 | image: busybox 13 | name: broken-pods-too-much-mem-container 14 | resources: 15 | requests: 16 | memory: "1000Gi" 17 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-readiness/readiness-broken/readiness-broken.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: readiness-broken-deployment 6 | labels: 7 | app: readiness-broken-app-label 8 | spec: 9 | replicas: 0 10 | selector: 11 | matchLabels: 12 | app: readiness-broken-app-label 13 | template: 14 | metadata: 15 | labels: 16 | app: readiness-broken-app-label 17 | tier: backend 18 | spec: 19 | containers: 20 | - name: readiness-broken-container 21 | image: eu.gcr.io/container-solutions-workshops//backend:3.14159 22 | ports: 23 | - containerPort: 8080 24 | env: 25 | - name: READINESS_PROBE_ENABLED 26 | value: "enabled" 27 | readinessProbe: 28 | httpGet: 29 | path: /neverready 30 | port: 8080 31 | scheme: HTTP 32 | initialDelaySeconds: 5 33 | timeoutSeconds: 5 34 | --- 35 | apiVersion: v1 36 | kind: Service 37 | metadata: 38 | name: readiness-broken-svc 39 | spec: 40 | type: ClusterIP 41 | ports: 42 | - port: 8080 43 | protocol: TCP 44 | targetPort: 8080 45 | selector: 46 | tier: frontend 47 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-readiness/readiness.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: broken-readiness-pod 6 | spec: 7 | containers: 8 | - args: 9 | - /bin/sh 10 | - -c 11 | - "sleep 3600" 12 | image: busybox 13 | readinessProbe: 14 | exec: 15 | command: 16 | - cat 17 | - /tmp/healthy 18 | initialDelaySeconds: 5 19 | periodSeconds: 5 20 | name: broken-readiness-container 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/broken-secrets/simple-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: secrets-simple-secret-pod 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "3600" 11 | image: busybox 12 | name: secrets-simple-secret-container 13 | volumeMounts: 14 | - name: secrets-simple-secret-volume 15 | mountPath: "/etc/simple-secret" 16 | volumes: 17 | - name: secrets-simple-secret-volume 18 | secret: 19 | secretName: secrets-simple-secret-secret-doesnotexist 20 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/cronjob/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/ 3 | apiVersion: batch/v1beta1 4 | kind: CronJob 5 | metadata: 6 | name: cronjob-simple 7 | spec: 8 | schedule: "*/1 * * * *" 9 | jobTemplate: 10 | spec: 11 | template: 12 | spec: 13 | containers: 14 | - args: 15 | - /bin/sh 16 | - -c 17 | - date; echo Hello from the Kubernetes cluster cronjob 18 | image: busybox 19 | name: cronjob-simple-container 20 | restartPolicy: OnFailure 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/daemon-set/simple-daemon-set.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: fluentd-elasticsearch 6 | spec: 7 | selector: 8 | matchLabels: 9 | name: fluentd-elasticsearch 10 | template: 11 | metadata: 12 | labels: 13 | name: fluentd-elasticsearch 14 | spec: 15 | tolerations: 16 | # this toleration is to have the daemonset runnable on master nodes 17 | # remove it if your masters can't run pods 18 | - effect: NoSchedule 19 | key: node-role.kubernetes.io/master 20 | containers: 21 | - image: quay.io/fluentd_elasticsearch/fluentd:v2.5.2 22 | name: fluentd-elasticsearch 23 | terminationGracePeriodSeconds: 30 24 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/deployments/simple-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: deployments-simple-deployment-deployment 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: deployments-simple-deployment-app 11 | template: 12 | metadata: 13 | labels: 14 | app: deployments-simple-deployment-app 15 | spec: 16 | containers: 17 | - name: busybox 18 | image: busybox 19 | command: 20 | - sleep 21 | - "3600" 22 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/dns-config/dns-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: dns-config-dns-config-pod 6 | spec: 7 | containers: 8 | - name: test 9 | image: nginx 10 | dnsPolicy: "None" 11 | dnsConfig: 12 | nameservers: 13 | - 1.2.3.4 14 | searches: 15 | - ns1.svc.cluster-domain.example 16 | - my.dns.search.suffix 17 | options: 18 | - name: ndots 19 | value: "2" 20 | - name: edns0 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/dns-config/policy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Adapted from: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy 3 | # "Default": The Pod inherits the name resolution configuration from the node that the pods run on. See related discussion for more details. 4 | # "ClusterFirst": Any DNS query that does not match the configured cluster domain suffix, such as "www.kubernetes.io", is forwarded to the upstream nameserver inherited from the node. Cluster administrators may have extra stub-domain and upstream DNS servers configured. See related discussion for details on how DNS queries are handled in those cases. 5 | # "ClusterFirstWithHostNet": For Pods running with hostNetwork, you should explicitly set its DNS policy "ClusterFirstWithHostNet". 6 | # "None": It allows a Pod to ignore DNS settings from the Kubernetes environment. All DNS settings are supposed to be provided using the dnsConfig field in the Pod Spec. See Pod's DNS config subsection below. 7 | 8 | apiVersion: v1 9 | kind: Pod 10 | metadata: 11 | name: dns-config-policy-pod 12 | spec: 13 | containers: 14 | - command: 15 | - sleep 16 | - "3600" 17 | image: busybox 18 | name: dns-config-policy-container 19 | hostNetwork: true 20 | dnsPolicy: ClusterFirstWithHostNet 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/dns-debug/dns-debug.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/ 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: dnsutils 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "3600" 12 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 13 | name: dnsutils 14 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/headless-service/headless-service.yaml: -------------------------------------------------------------------------------- 1 | # Example of a headless service. 2 | # To see the difference, exec onto the headless service app, and do 3 | # 4 | # nslookup headless-service-normal-service 5 | # nslookup headless-service-headless-service 6 | # 7 | # from the dns-debug service (does not work from the deployed app itself - not sure why) 8 | --- 9 | apiVersion: v1 10 | kind: Service 11 | metadata: 12 | name: headless-service-normal-service 13 | spec: 14 | selector: 15 | app: headless-service-app 16 | ports: 17 | - protocol: TCP 18 | port: 80 19 | targetPort: 3000 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: headless-service-headless-service 25 | spec: 26 | clusterIP: None # This marks this service out as a headless service 27 | selector: 28 | app: headless-service-app 29 | ports: 30 | - protocol: TCP 31 | port: 80 32 | targetPort: 3000 33 | --- 34 | apiVersion: apps/v1 35 | kind: Deployment 36 | metadata: 37 | name: headless-service-deployment 38 | labels: 39 | app: headless-service-app 40 | spec: 41 | replicas: 2 42 | selector: 43 | matchLabels: 44 | app: headless-service-app 45 | template: 46 | metadata: 47 | labels: 48 | app: headless-service-app 49 | spec: 50 | containers: 51 | - command: 52 | - sleep 53 | - "3600" 54 | image: busybox 55 | name: headless-service-app 56 | ports: 57 | - containerPort: 3000 58 | --- 59 | # https://kubernetes.io/docs/tasks/administer-cluster/dns-debugging-resolution/ 60 | apiVersion: v1 61 | kind: Pod 62 | metadata: 63 | name: headless-service-dnsutils-pod 64 | spec: 65 | containers: 66 | - command: 67 | - sleep 68 | - "3600" 69 | image: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3 70 | name: dnsutils 71 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/README.md: -------------------------------------------------------------------------------- 1 | See: https://kubernetes.io/docs/concepts/services-networking/ingress/ 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/fanout.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: ingress-fanout 6 | annotations: 7 | nginx.ingress.kubernetes.io/rewrite-target: / 8 | spec: 9 | rules: 10 | - host: foo.bar.com 11 | http: 12 | paths: 13 | - path: /path1 14 | backend: 15 | serviceName: testsvc1 16 | servicePort: 4201 17 | - path: /path2 18 | backend: 19 | serviceName: testsvc2 20 | servicePort: 4202 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/ingress-class.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: IngressClass 4 | metadata: 5 | name: external-lb 6 | spec: 7 | controller: example.com/ingress-controller 8 | parameters: 9 | apiGroup: k8s.example.com/v1alpha 10 | kind: IngressParameters 11 | name: external-lb 12 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: test-ingress 6 | spec: 7 | backend: 8 | serviceName: testsvc 9 | servicePort: 80 10 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/nohost.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: name-virtual-host-ingress 6 | spec: 7 | rules: 8 | - host: first.bar.com 9 | http: 10 | paths: 11 | - backend: 12 | serviceName: testsvc1 13 | servicePort: 80 14 | - host: second.foo.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: testsvc2 19 | servicePort: 80 20 | # No host supplied here 21 | - http: 22 | paths: 23 | - backend: 24 | serviceName: testsvc3 25 | servicePort: 80 26 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/rewrite.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: ingress-rewrite 6 | annotations: 7 | nginx.ingress.kubernetes.io/rewrite-target: / 8 | spec: 9 | rules: 10 | - http: 11 | paths: 12 | - path: /rewritepath 13 | pathType: Prefix 14 | backend: 15 | serviceName: testsvc 16 | servicePort: 80 17 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/tls.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: ingress-tls-secret 6 | data: 7 | # Data here as a placeholder - it's just a base64-encoded 'a' 8 | tls.crt: YQo= 9 | tls.key: YQo= 10 | type: kubernetes.io/tls 11 | --- 12 | apiVersion: networking.k8s.io/v1beta1 13 | kind: Ingress 14 | metadata: 15 | name: ingress-tls 16 | spec: 17 | tls: 18 | - hosts: 19 | - sslexample.foo.com 20 | secretName: ingress-tls-secret 21 | rules: 22 | - host: sslexample.foo.com 23 | http: 24 | paths: 25 | - path: / 26 | backend: 27 | serviceName: testsvc1 28 | servicePort: 80 29 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/ingress/virtualhosting.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1beta1 3 | kind: Ingress 4 | metadata: 5 | name: name-virtual-host-ingress 6 | spec: 7 | rules: 8 | - host: foo.bar.com 9 | http: 10 | paths: 11 | - backend: 12 | serviceName: testsvc1 13 | servicePort: 80 14 | - host: bar.foo.com 15 | http: 16 | paths: 17 | - backend: 18 | serviceName: testsvc2 19 | servicePort: 80 20 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/init-container/init-container-msg.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: init-container-msg-deployment 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: init-container-msg-app 11 | template: 12 | metadata: 13 | labels: 14 | app: init-container-msg-app 15 | spec: 16 | initContainers: 17 | - command: 18 | - "/bin/bash" 19 | - "-c" 20 | - "echo 'message from init' > /init-container-msg-mount-path/this" 21 | image: busybox 22 | name: init-container-msg-container-init 23 | volumeMounts: 24 | - mountPath: /init-container-msg-mount-path 25 | name: init-container-msg-volume 26 | containers: 27 | - command: 28 | - "/bin/sh" 29 | - "-c" 30 | - "while true; do cat /init-container-msg-mount-path/this; sleep 5; done" 31 | image: busybox 32 | name: init-container-msg-container-main 33 | volumeMounts: 34 | - mountPath: /init-container-msg-mount-path 35 | name: init-container-msg-volume 36 | volumes: 37 | - emptyDir: {} 38 | name: init-container-msg-volume 39 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/init-container/init-container.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: init-container-pod 6 | spec: 7 | containers: 8 | - name: init-container-container 9 | image: busybox 10 | command: ['sh', '-c', 'echo The app is running! && sleep 3600'] 11 | initContainers: 12 | - name: init-container-init-container 13 | image: busybox 14 | command: ['sh', '-c', "until nslookup pods-init-container-service.$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace).svc.cluster.local; do echo waiting for myservice; sleep 2; done"] 15 | --- 16 | apiVersion: v1 17 | kind: Service 18 | metadata: 19 | name: init-container-service 20 | spec: 21 | ports: 22 | - protocol: TCP 23 | port: 80 24 | targetPort: 12345 25 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/jobs/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ 3 | apiVersion: batch/v1 4 | kind: Job 5 | metadata: 6 | name: jobs-simple-job 7 | spec: 8 | template: 9 | spec: 10 | containers: 11 | - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 12 | image: perl 13 | name: jobs-simple-container 14 | restartPolicy: Never 15 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/jobs/timeout.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ 3 | apiVersion: batch/v1 4 | kind: Job 5 | metadata: 6 | name: jobs-timeout-job 7 | spec: 8 | activeDeadlineSeconds: 100 9 | template: 10 | spec: 11 | containers: 12 | - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 13 | image: perl 14 | name: jobs-timeout-container 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/jobs/timetolive.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ 3 | apiVersion: batch/v1 4 | kind: Job 5 | metadata: 6 | name: jobs-timetolive-job 7 | spec: 8 | ttlSecondsAfterFinished: 100 9 | template: 10 | spec: 11 | containers: 12 | - command: ["perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"] 13 | image: perl 14 | name: jobs-timetolive-container 15 | restartPolicy: Never 16 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/lifecycle/lifecycle.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: lifecycle-pod 6 | spec: 7 | containers: 8 | - image: nginx 9 | lifecycle: 10 | postStart: 11 | exec: 12 | command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"] 13 | preStop: 14 | exec: 15 | command: ["/bin/sh", "-c", "nginx -s quit; while killall -0 nginx; do sleep 1; done"] 16 | name: lifecycle-container 17 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/liveness/advanced-liveness.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | labels: 6 | test: liveness 7 | name: liveness-http 8 | spec: 9 | containers: 10 | - args: 11 | - /server 12 | image: k8s.gcr.io/liveness 13 | livenessProbe: 14 | httpGet: 15 | httpHeaders: 16 | - name: X-Custom-Header 17 | value: Awesome 18 | # when "host" is not defined, "PodIP" will be used 19 | # host: my-host 20 | # when "scheme" is not defined, "HTTP" scheme will be used. Only "HTTP" and "HTTPS" are allowed 21 | # scheme: HTTPS 22 | path: /healthz 23 | port: 8080 24 | initialDelaySeconds: 15 25 | timeoutSeconds: 1 26 | name: liveness 27 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/liveness/liveness.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: pods-liveness-exec-pod 6 | spec: 7 | containers: 8 | - args: 9 | - /bin/sh 10 | - -c 11 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 12 | image: busybox 13 | livenessProbe: 14 | exec: 15 | command: 16 | - cat 17 | - /tmp/healthy 18 | initialDelaySeconds: 5 19 | periodSeconds: 5 20 | name: pods-liveness-exec-container 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/memory-request/memory-request-limit.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: -memory-request-limit-pod 6 | spec: 7 | containers: 8 | - args: ["--vm", "1", "--vm-bytes", "150M", "--vm-hang", "1"] 9 | command: ["stress"] 10 | image: polinux/stress 11 | name: memory-request-limit-container 12 | resources: 13 | limits: 14 | memory: "200Mi" 15 | requests: 16 | memory: "100Mi" 17 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/namespace/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: namespace-namespace 6 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/network-policy/README.md: -------------------------------------------------------------------------------- 1 | See: https://kubernetes.io/docs/concepts/services-networking/network-policies/ 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/network-policy/default-allow-egress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: network-policy-allow-egress 6 | spec: 7 | podSelector: {} 8 | egress: 9 | - {} 10 | policyTypes: 11 | - Egress 12 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/network-policy/default-allow-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: network-policy-default-allow-ingress 6 | spec: 7 | podSelector: {} 8 | ingress: 9 | - {} 10 | policyTypes: 11 | - Ingress 12 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/network-policy/default-deny-all.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: network-policy-default-deny-all 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | - Egress 11 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/network-policy/default-deny-egress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: network-policy-default-deny-egress 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Egress 10 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/network-policy/default-deny-ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: network-policy-default-deny-ingress 6 | spec: 7 | podSelector: {} 8 | policyTypes: 9 | - Ingress 10 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/network-policy/policy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: network-policy-policy 6 | spec: 7 | podSelector: 8 | matchLabels: 9 | role: db 10 | policyTypes: 11 | - Ingress 12 | - Egress 13 | ingress: 14 | - from: 15 | - ipBlock: 16 | cidr: 172.17.0.0/16 17 | except: 18 | - 172.17.1.0/24 19 | - namespaceSelector: 20 | matchLabels: 21 | project: myproject 22 | - podSelector: 23 | matchLabels: 24 | role: frontend 25 | ports: 26 | - protocol: TCP 27 | port: 6379 28 | egress: 29 | - to: 30 | - ipBlock: 31 | cidr: 10.0.0.0/24 32 | ports: 33 | - protocol: TCP 34 | port: 5978 35 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/persistent-volumes/README.md: -------------------------------------------------------------------------------- 1 | Persistent volumes are not covered here as they require third party dependencies to be set up. 2 | 3 | https://kubernetes.io/docs/concepts/storage/persistent-volumes/ 4 | https://kubernetes.io/docs/concepts/storage/volume-snapshots/ 5 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pod-security-policy/README.md: -------------------------------------------------------------------------------- 1 | https://kubernetes.io/docs/concepts/policy/pod-security-policy/ 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pod-security-policy/privileged.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is the least restrictive policy you can create, equivalent to not using 3 | # the pod security policy admission controller 4 | # https://kubernetes.io/docs/concepts/policy/pod-security-policy/#example-policies 5 | apiVersion: policy/v1beta1 6 | kind: PodSecurityPolicy 7 | metadata: 8 | name: pod-security-policy-privileged-psp 9 | annotations: 10 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 11 | spec: 12 | privileged: true 13 | allowPrivilegeEscalation: true 14 | allowedCapabilities: 15 | - '*' 16 | volumes: 17 | - '*' 18 | hostNetwork: true 19 | hostPorts: 20 | - min: 0 21 | max: 65535 22 | hostIPC: true 23 | hostPID: true 24 | runAsUser: 25 | rule: 'RunAsAny' 26 | seLinux: 27 | rule: 'RunAsAny' 28 | supplementalGroups: 29 | rule: 'RunAsAny' 30 | fsGroup: 31 | rule: 'RunAsAny' 32 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pod-security-policy/psp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: pod-security-policy-psp-namespace 6 | --- 7 | apiVersion: policy/v1beta1 8 | kind: PodSecurityPolicy 9 | metadata: 10 | name: pod-security-policy-psp 11 | spec: 12 | privileged: false # Don't allow privileged pods! 13 | seLinux: 14 | rule: RunAsAny 15 | supplementalGroups: 16 | rule: RunAsAny 17 | runAsUser: 18 | rule: RunAsAny 19 | fsGroup: 20 | rule: RunAsAny 21 | volumes: 22 | - '*' 23 | --- 24 | apiVersion: v1 25 | kind: ServiceAccount 26 | metadata: 27 | name: pod-security-policy-user 28 | namespace: pod-security-policy-psp-namespace 29 | --- 30 | apiVersion: v1 31 | items: 32 | - apiVersion: rbac.authorization.k8s.io/v1 33 | kind: RoleBinding 34 | metadata: 35 | name: pod-security-policy-psp-user-editor 36 | namespace: pod-security-policy-psp-namespace 37 | roleRef: 38 | apiGroup: rbac.authorization.k8s.io 39 | kind: ClusterRole 40 | name: edit 41 | subjects: 42 | - kind: ServiceAccount 43 | name: pod-security-policy-psp-namespace 44 | namespace: pod-security-policy-psp-namespace 45 | kind: List 46 | metadata: 47 | resourceVersion: "" 48 | selfLink: "" 49 | --- 50 | apiVersion: v1 51 | kind: Pod 52 | metadata: 53 | name: pause 54 | namespace: pod-security-policy-psp-namespace-unprivileged 55 | spec: 56 | containers: 57 | - name: pause 58 | image: k8s.gcr.io/pause 59 | --- 60 | apiVersion: v1 61 | kind: Pod 62 | metadata: 63 | name: pause 64 | namespace: pod-security-policy-psp-namespace-privileged 65 | spec: 66 | containers: 67 | - name: pause 68 | image: k8s.gcr.io/pause 69 | securityContext: 70 | privileged: true 71 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pod-security-policy/restricted.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is an example of a restrictive policy that requires users to run as an 3 | # unprivileged user, blocks possible escalations to root, and requires use of 4 | # several security mechanisms. 5 | apiVersion: policy/v1beta1 6 | kind: PodSecurityPolicy 7 | metadata: 8 | name: pod-security-policy-restricted-psp 9 | annotations: 10 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' 11 | apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' 12 | seccomp.security.alpha.kubernetes.io/defaultProfileName: 'runtime/default' 13 | apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' 14 | spec: 15 | allowedHostPaths: 16 | # This allows "/foo", "/foo/", "/foo/bar" etc., but 17 | # disallows "/fool", "/etc/foo" etc. 18 | # "/foo/../" is never valid. 19 | - pathPrefix: "/foo" 20 | readOnly: true # only allow read-only mounts 21 | allowPrivilegeEscalation: false 22 | # This is redundant with non-root + disallow privilege escalation, 23 | # but we can provide it for defense in depth. 24 | fsGroup: 25 | rule: 'MustRunAs' 26 | ranges: 27 | # Forbid adding the root group. 28 | - min: 1 29 | max: 65535 30 | hostIPC: false 31 | hostNetwork: false 32 | hostPID: false 33 | privileged: false 34 | readOnlyRootFilesystem: false 35 | # Required to prevent escalations to root. 36 | requiredDropCapabilities: 37 | - ALL 38 | runAsUser: 39 | # Require the container to run without root privileges. 40 | rule: 'MustRunAsNonRoot' 41 | seLinux: 42 | # This policy assumes the nodes are using AppArmor rather than SELinux. 43 | rule: 'RunAsAny' 44 | supplementalGroups: 45 | rule: 'MustRunAs' 46 | ranges: 47 | # Forbid adding the root group. 48 | - min: 1 49 | max: 65535 50 | # Allow core volume types. 51 | volumes: 52 | - 'configMap' 53 | - 'emptyDir' 54 | - 'projected' 55 | - 'secret' 56 | - 'downwardAPI' 57 | # Assume that persistentVolumes set up by the cluster admin are safe to use. 58 | - 'persistentVolumeClaim' 59 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pods/README.md: -------------------------------------------------------------------------------- 1 | See: https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/#adding-additional-entries-with-hostaliases 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pods/host-aliases.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/#adding-additional-entries-with-hostaliases 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: pods-host-aliases-pod 7 | spec: 8 | restartPolicy: Never 9 | hostAliases: 10 | - ip: "127.0.0.1" 11 | hostnames: 12 | - "foo.local" 13 | - "bar.local" 14 | - ip: "10.1.2.3" 15 | hostnames: 16 | - "foo.remote" 17 | - "bar.remote" 18 | containers: 19 | - name: cat-hosts 20 | image: busybox 21 | command: 22 | - cat 23 | args: 24 | - "/etc/hosts" 25 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pods/imagepullsecret.yaml: -------------------------------------------------------------------------------- 1 | # https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ 2 | --- 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: pods-imagepullsecret-pod 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "3600" 12 | image: busybox 13 | name: pods-simple-container 14 | imagepullsecrets: 15 | - name: regcred # does not exist, create with instructions above 16 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pods/multi-container.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: pods-multi-container-pod 6 | spec: 7 | containers: 8 | - image: busybox 9 | command: 10 | - sleep 11 | - "3600" 12 | name: pods-multi-container-container-1 13 | - image: busybox 14 | command: 15 | - sleep 16 | - "3601" 17 | name: pods-multi-container-container-2 18 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/pods/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: pods-simple-pod 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "3600" 11 | image: busybox 12 | name: pods-simple-container 13 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/privileged/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/privileged/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Namespace here refers to the container namespaces, not kubernetes 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: privileged-namespace-pod 7 | spec: 8 | hostPID: true 9 | hostIPC: true 10 | hostUTS: true 11 | hostNetwork: true 12 | containers: 13 | - command: 14 | - sleep 15 | - "3600" 16 | image: busybox 17 | name: privileged-namespace-container 18 | securityContext: 19 | privileged: true 20 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/privileged/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: privileged-simple-pod 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "3600" 11 | image: busybox 12 | name: privileged-simple-pod 13 | securityContext: 14 | privileged: true 15 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/rbac/README.md: -------------------------------------------------------------------------------- 1 | https://kubernetes.io/docs/reference/access-authn-authz/rbac/ 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/rbac/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | # This cluster role binding allows anyone in the "manager" group to 4 | # read secrets in any namespace. 5 | kind: ClusterRoleBinding 6 | metadata: 7 | name: rbac-cluster-role-binding-cluster-role-binding 8 | subjects: 9 | - kind: Group 10 | name: manager # Name is case sensitive 11 | apiGroup: rbac.authorization.k8s.io 12 | roleRef: 13 | kind: ClusterRole 14 | name: rbac-cluster-role-binding-cluster-role 15 | apiGroup: rbac.authorization.k8s.io 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: ClusterRole 19 | metadata: 20 | # "namespace" omitted since ClusterRoles are not namespaced 21 | name: rbac-cluster-role-binding-cluster-role 22 | rules: 23 | - apiGroups: [""] 24 | # 25 | # at the HTTP level, the name of the resource for accessing Secret 26 | # objects is "secrets" 27 | resources: ["secrets"] 28 | verbs: ["get", "watch", "list"] 29 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/rbac/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | # "namespace" omitted since ClusterRoles are not namespaced 6 | name: rbac-cluster-role 7 | rules: 8 | - apiGroups: [""] 9 | # at the HTTP level, the name of the resource for accessing Secret 10 | # objects is "secrets" 11 | resources: ["secrets"] 12 | verbs: ["get", "watch", "list"] 13 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/rbac/role-binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | # This role binding allows "jane" to read pods in the "default" namespace. 4 | # You need to already have a Role named "pod-reader" in that namespace. 5 | kind: RoleBinding 6 | metadata: 7 | name: rbac-role-binding-role-binding 8 | subjects: 9 | # You can specify more than one "subject" 10 | - kind: User 11 | name: jane # "name" is case sensitive 12 | apiGroup: rbac.authorization.k8s.io 13 | roleRef: 14 | # "roleRef" specifies the binding to a Role / ClusterRole 15 | kind: Role # this must be Role or ClusterRole 16 | # this must match the name of the Role or ClusterRole you wish to bind to 17 | name: rbac-role-binding-role 18 | apiGroup: rbac.authorization.k8s.io 19 | --- 20 | apiVersion: rbac.authorization.k8s.io/v1 21 | kind: Role 22 | metadata: 23 | name: rbac-role-binding-role 24 | rules: 25 | - apiGroups: [""] # "" indicates the core API group 26 | resources: ["pods"] 27 | verbs: ["get", "watch", "list"] 28 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: rbac-role-role 6 | rules: 7 | - apiGroups: [""] # "" indicates the core API group 8 | resources: ["pods"] 9 | verbs: ["get", "watch", "list"] 10 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/readiness/readiness.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: pods-readiness-exec-pod 6 | spec: 7 | containers: 8 | - args: 9 | - /bin/sh 10 | - -c 11 | - touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600 12 | image: busybox 13 | readinessProbe: 14 | exec: 15 | command: 16 | - cat 17 | - /tmp/healthy 18 | initialDelaySeconds: 5 19 | name: pods-readiness-exec-container 20 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/resource-quotas/quotas.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: List 4 | items: 5 | # https://kubernetes.io/docs/concepts/policy/resource-quotas/ 6 | - apiVersion: v1 7 | kind: ResourceQuota 8 | metadata: 9 | name: resource-quotas-quotas-pods-high 10 | spec: 11 | hard: 12 | cpu: "1000" 13 | memory: 200Gi 14 | pods: "10" 15 | scopeSelector: 16 | matchExpressions: 17 | - operator: In 18 | scopeName: PriorityClass 19 | values: ["high"] 20 | - apiVersion: v1 21 | kind: ResourceQuota 22 | metadata: 23 | name: resource-quotas-quotas-pods-medium 24 | spec: 25 | hard: 26 | cpu: "10" 27 | memory: 20Gi 28 | pods: "10" 29 | scopeSelector: 30 | matchExpressions: 31 | - operator: In 32 | scopeName: PriorityClass 33 | values: ["medium"] 34 | - apiVersion: v1 35 | kind: ResourceQuota 36 | metadata: 37 | name: resource-quotas-quotas-pods-low 38 | spec: 39 | hard: 40 | cpu: "5" 41 | memory: 10Gi 42 | pods: "10" 43 | scopeSelector: 44 | matchExpressions: 45 | - operator: In 46 | scopeName: PriorityClass 47 | values: ["low"] 48 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/resources/resource-limit.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: resource-limit-pod 6 | spec: 7 | containers: 8 | - name: resource-limit-container 9 | image: busybox 10 | args: 11 | - sleep 12 | - "600" 13 | livenessProbe: 14 | exec: 15 | command: 16 | - cat 17 | - /tmp/healthy 18 | initialDelaySeconds: 5 19 | periodSeconds: 5 20 | resources: 21 | limits: 22 | cpu: "30m" 23 | memory: "200Mi" 24 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/resources/resource-request.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: resource-request-pod 6 | spec: 7 | containers: 8 | - name: resource-request-container 9 | image: busybox 10 | args: 11 | - sleep 12 | - "600" 13 | livenessProbe: 14 | exec: 15 | command: 16 | - cat 17 | - /tmp/healthy 18 | initialDelaySeconds: 5 19 | periodSeconds: 5 20 | resources: 21 | requests: 22 | memory: "20000Mi" 23 | cpu: "99999m" 24 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/secrets/simple-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: broken-secrets-simple-secret-secret 6 | type: Opaque 7 | stringData: 8 | config.yaml: |- 9 | password: apassword 10 | username: ausername 11 | --- 12 | apiVersion: v1 13 | kind: Pod 14 | metadata: 15 | name: broken-secrets-simple-secret-pod 16 | spec: 17 | containers: 18 | - command: 19 | - sleep 20 | - "3600" 21 | image: busybox 22 | name: broken-secrets-simple-secret-container 23 | volumeMounts: 24 | - name: broken-secrets-simple-secret-volume 25 | mountPath: "/etc/simple-secret" 26 | volumes: 27 | - name: broken-secrets-simple-secret-volume 28 | secret: 29 | secretName: broken-secrets-simple-secret-secret 30 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/service-topologies/fallback.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service-topology/#prefer-node-local-zonal-then-regional-endpoints 3 | # A Service that prefers node local, zonal, then regional endpoints but falls back to cluster wide endpoints. 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: service-topolgies-fallback-service 8 | spec: 9 | selector: 10 | app: my-app 11 | ports: 12 | - protocol: TCP 13 | port: 80 14 | targetPort: 9376 15 | topologyKeys: 16 | - "kubernetes.io/hostname" 17 | - "topology.kubernetes.io/zone" 18 | - "topology.kubernetes.io/region" 19 | - "*" 20 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/services/external-ips.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service/#external-ips 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: service-external-ips-service 7 | spec: 8 | selector: 9 | app: MyApp 10 | ports: 11 | - name: http 12 | protocol: TCP 13 | port: 80 14 | targetPort: 9376 15 | externalIPs: 16 | - 80.11.12.10 17 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/services/external-name.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service/#externalname 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: service-external-name-service 7 | spec: 8 | type: ExternalName 9 | externalName: my.database.example.com 10 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/services/load-balancer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: service-load-balancer-service 7 | spec: 8 | selector: 9 | app: MyApp 10 | ports: 11 | - protocol: TCP 12 | port: 80 13 | targetPort: 9376 14 | clusterIP: 10.0.171.239 15 | type: LoadBalancer 16 | status: 17 | loadBalancer: 18 | ingress: 19 | - ip: 192.0.2.127 20 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/services/multi-port-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: multi-port-service-service 7 | spec: 8 | selector: 9 | app: MyApp 10 | ports: 11 | - name: http 12 | protocol: TCP 13 | port: 80 14 | targetPort: 8080 15 | - name: https 16 | protocol: TCP 17 | port: 443 18 | targetPort: 8443 19 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/services/node-port.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service/#nodeport 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: service-node-port-service 7 | spec: 8 | type: NodePort 9 | selector: 10 | app: MyApp 11 | ports: 12 | # By default and for convenience, the `targetPort` is set to the same value as the `port` field. 13 | - port: 80 14 | targetPort: 80 15 | # Optional field 16 | # By default and for convenience, the Kubernetes control plane will allocate a port from a range (default: 30000-32767) 17 | nodePort: 30007 18 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/services/service-and-endpoint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service/#services-without-selectors 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: service-and-endpoint-service 7 | spec: 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 9376 12 | --- 13 | apiVersion: v1 14 | kind: Endpoints 15 | metadata: 16 | name: service-and-endpoint-endpoint 17 | subsets: 18 | - addresses: 19 | - ip: 192.0.2.42 20 | ports: 21 | - port: 9376 22 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/services/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/services-networking/service/ 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | name: service-simple-service 7 | spec: 8 | selector: 9 | app: service-simple-app 10 | ports: 11 | - protocol: TCP 12 | port: 80 13 | targetPort: 8080 14 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/statefulset/simple-stateful-set.yaml: -------------------------------------------------------------------------------- 1 | # https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | labels: 7 | app: nginx 8 | name: nginx 9 | spec: 10 | clusterIP: None 11 | ports: 12 | - name: web 13 | port: 80 14 | selector: 15 | app: nginx 16 | --- 17 | apiVersion: apps/v1 18 | kind: StatefulSet 19 | metadata: 20 | name: simple-stateful-set 21 | spec: 22 | replicas: 3 # the default is 1 23 | selector: 24 | matchLabels: 25 | app: nginx # has to match .spec.template.metadata.labels 26 | serviceName: "nginx" 27 | template: 28 | metadata: 29 | labels: 30 | app: nginx # has to match .spec.selector.matchLabels 31 | spec: 32 | terminationGracePeriodSeconds: 10 33 | containers: 34 | - image: nginx 35 | name: nginx 36 | ports: 37 | - containerPort: 80 38 | name: web 39 | volumeMounts: 40 | - mountPath: /usr/share/nginx/html 41 | name: www 42 | volumeClaimTemplates: 43 | - metadata: 44 | name: www 45 | spec: 46 | accessModes: ["ReadWriteOnce"] 47 | resources: 48 | requests: 49 | storage: 1Gi 50 | storageClassName: "my-storage-class" 51 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/subdomain/README.md: -------------------------------------------------------------------------------- 1 | From 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/subdomain/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Taken from: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-hostname-and-subdomain-fields 3 | # Currently when a pod is created, its hostname is the Pod's metadata.name value. 4 | # The Pod spec has an optional hostname field, which can be used to specify the Pod's hostname. 5 | # When specified, it takes precedence over the Pod's name to be the hostname of the pod. 6 | # For example, given a Pod with hostname set to "my-host", the Pod will have its hostname set to "my-host". 7 | # The Pod spec also has an optional subdomain field which can be used to specify its subdomain. 8 | # For example, a Pod with hostname set to "foo", and subdomain set to "bar", in namespace 9 | # "default", will have the fully qualified domain name (FQDN) "foo.bar.default.svc.cluster-domain.example". 10 | # 11 | # If there exists a headless service in the same namespace as the pod and with the same name as the subdomain, 12 | # the cluster's DNS Server also returns an A or AAAA record for the Pod's fully qualified hostname. 13 | # For example, given a Pod with the hostname set to "subdomain-simple-hostname-1" and the subdomain 14 | # set to "subdomain-simple-subdomain-service", and a headless Service named "subdomain-simple-subdomain-service" 15 | # in the same namespace, the pod will see its own FQDN as 16 | # "subdomain-simple-hostname-1.subdomain-simple-subdomain-service.default.svc.cluster-domain.example". 17 | # DNS serves an A or AAAA record at that name, pointing to the Pod's IP. 18 | # Both pods "subdomain-simple-pod-1" and "subdomain-simple-pod-2" can have their distinct A or AAAA records. 19 | 20 | Example: 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: subdomain-simple-subdomain-service 25 | spec: 26 | clusterIP: None # A headless service 27 | ports: 28 | - name: subdomain-simple-port-name # Actually, no port is needed. 29 | port: 1234 30 | targetPort: 1234 31 | selector: 32 | name: subdomain-simple-selector 33 | --- 34 | apiVersion: v1 35 | kind: Pod 36 | metadata: 37 | labels: 38 | name: subdomain-simple-selector 39 | name: subdomain-simple-pod-1 40 | spec: 41 | containers: 42 | - command: 43 | - sleep 44 | - "3600" 45 | image: busybox 46 | name: subdomain-simple-container-1 47 | hostname: subdomain-simple-hostname-1 48 | subdomain: subdomain-simple-subdomain-service 49 | --- 50 | apiVersion: v1 51 | kind: Pod 52 | metadata: 53 | name: subdomain-simple-pod-2 54 | labels: 55 | name: subdomain-simple-selector 56 | spec: 57 | containers: 58 | - command: 59 | - sleep 60 | - "3600" 61 | image: busybox 62 | name: subdomain-simple-container-2 63 | hostname: subdomain-simple-hostname-2 64 | subdomain: subdomain-simple-subdomain-service 65 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/topology-spread-constraints/topology-spread-constraints-with-node-affinity.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ 3 | kind: Pod 4 | apiVersion: v1 5 | metadata: 6 | name: topology-spread-constraints/topology-spread-constraints-with-node-affinity-pod 7 | labels: 8 | label1: value1 9 | spec: 10 | topologySpreadConstraints: 11 | - labelSelector: 12 | matchLabels: 13 | label1: value1 14 | maxSkew: 1 15 | topologyKey: zone 16 | whenUnsatisfiable: DoNotSchedule 17 | affinity: 18 | nodeAffinity: 19 | requiredDuringSchedulingIgnoredDuringExecution: 20 | nodeSelectorTerms: 21 | - matchExpressions: 22 | - key: zone 23 | operator: NotIn 24 | values: 25 | - zoneC 26 | containers: 27 | - name: pause 28 | image: k8s.gcr.io/pause:3.1 29 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/topology-spread-constraints/topology-spread-constraints.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ 3 | kind: Pod 4 | apiVersion: v1 5 | metadata: 6 | name: topology-spread-constraints-pod 7 | labels: 8 | label1: value1 9 | spec: 10 | topologySpreadConstraints: 11 | - maxSkew: 1 12 | topologyKey: zone 13 | whenUnsatisfiable: DoNotSchedule 14 | labelSelector: 15 | matchLabels: 16 | label1: value1 17 | containers: 18 | - name: pause 19 | image: k8s.gcr.io/pause:3.1 20 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/README.md: -------------------------------------------------------------------------------- 1 | https://kubernetes.io/docs/concepts/storage/volumes/ 2 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/storage/volumes/#configmap 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: volumes-configmap-pod 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "3600" 12 | image: busybox 13 | name: volumes-configmap-pod-container 14 | volumeMounts: 15 | - name: volumes-configmap-volume 16 | mountPath: /etc/config 17 | volumes: 18 | - name: volumes-configmap-volume 19 | configMap: 20 | name: volumes-configmap-configmap 21 | items: 22 | - key: game.properties 23 | path: configmap-volume-path 24 | --- 25 | apiVersion: v1 26 | kind: ConfigMap 27 | metadata: 28 | name: volumes-configmap-configmap 29 | data: 30 | game.properties: | 31 | enemies=aliens 32 | lives=3 33 | enemies.cheat=true 34 | enemies.cheat.level=noGoodRotten 35 | ui.properties: | 36 | color.good=purple 37 | color.bad=yellow 38 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/emptydir.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: volumes-emptydir-pod 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "3600" 11 | image: busybox 12 | name: volumes-emptydir-container 13 | volumeMounts: 14 | - mountPath: /volumes-emptydir-mount-path 15 | name: volumes-emptydir-volume 16 | volumes: 17 | - name: volumes-emptydir-volume 18 | emptyDir: {} 19 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/file-or-create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-fileorcreate 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: volumes-file-or-create-pod 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "3600" 12 | name: busybox 13 | volumeMounts: 14 | - mountPath: /var/local/aaa 15 | name: volumes-file-or-create-dir 16 | - mountPath: /var/local/aaa/1.txt 17 | name: volumes-file-or-create-file 18 | volumes: 19 | - name: volumes-file-or-create-dir 20 | hostPath: 21 | # Ensure the file directory is created. 22 | path: /var/local/aaa 23 | type: DirectoryOrCreate 24 | - name: volumes-file-or-create-file 25 | hostPath: 26 | path: /var/local/aaa/1.txt 27 | type: FileOrCreate 28 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/hostdir.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: volumes-hostdir-pod 6 | spec: 7 | containers: 8 | - command: 9 | - sleep 10 | - "3600" 11 | image: busybox 12 | name: volumes-hostdir-container 13 | volumeMounts: 14 | - mountPath: /volumes-hostdir-mount-path 15 | name: volumes-hostdir-volume 16 | volumes: 17 | - hostPath: 18 | # directory location on host 19 | path: /tmp 20 | name: volumes-hostdir-volume 21 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/local.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/storage/volumes/#local 3 | apiVersion: v1 4 | kind: PersistentVolume 5 | metadata: 6 | name: volumes-local-persistent-volume 7 | spec: 8 | capacity: 9 | storage: 100Gi 10 | volumeMode: Filesystem 11 | accessModes: 12 | - ReadWriteOnce 13 | persistentVolumeReclaimPolicy: Delete 14 | storageClassName: local-storage 15 | local: 16 | path: /mnt/disks/ssd1 17 | nodeAffinity: 18 | required: 19 | nodeSelectorTerms: 20 | - matchExpressions: 21 | - key: kubernetes.io/hostname 22 | operator: In 23 | values: 24 | - example-node 25 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/projected.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-with-a-secret-a-downward-api-and-a-configmap 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: volumes-projected-pod 7 | spec: 8 | containers: 9 | - command: 10 | - sleep 11 | - "3600" 12 | image: busybox 13 | name: volumes-projected-container 14 | volumeMounts: 15 | - name: volumes-projected-volume-mount 16 | mountPath: "/volumes-projected-volume-path" 17 | readOnly: true 18 | volumes: 19 | - name: volumes-projected-volume-mount 20 | projected: 21 | sources: 22 | - secret: 23 | items: 24 | - key: username 25 | path: my-group/my-username 26 | name: volumes-projected-secret 27 | mode: 511 28 | - downwardAPI: 29 | items: 30 | - path: "labels" 31 | fieldRef: 32 | fieldPath: metadata.labels 33 | - path: "cpu_limit" 34 | resourceFieldRef: 35 | containerName: container-test 36 | resource: limits.cpu 37 | - configMap: 38 | items: 39 | - key: config 40 | path: my-group/my-config 41 | name: volumes-projected-configmap 42 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/sa-token.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/storage/volumes/#example-pod-with-a-secret-a-downward-api-and-a-configmap 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: volumes-sa-token-pod 7 | spec: 8 | containers: 9 | - name: container-test 10 | image: busybox 11 | volumeMounts: 12 | - mountPath: "/service-account" 13 | name: volumes-sa-token-volume 14 | readOnly: true 15 | volumes: 16 | - name: volumes-sa-token-volume 17 | projected: 18 | sources: 19 | - serviceAccountToken: 20 | audience: api 21 | expirationSeconds: 3600 22 | path: token 23 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/subpath.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath 3 | # Sometimes, it is useful to share one volume for multiple uses in a single Pod. 4 | # The volumeMounts.subPath property can be used to specify a sub-path inside the 5 | # referenced volume instead of its root. 6 | apiVersion: v1 7 | kind: Pod 8 | metadata: 9 | name: volumes-subpath-pod 10 | spec: 11 | containers: 12 | - env: 13 | - name: MYSQL_ROOT_PASSWORD 14 | value: "rootpasswd" 15 | image: mysql 16 | name: mysql 17 | volumeMounts: 18 | - mountPath: /var/lib/mysql 19 | name: site-data 20 | subPath: mysql 21 | - image: php:7.0-apache 22 | name: php 23 | volumeMounts: 24 | - mountPath: /var/www/html 25 | name: site-data 26 | subPath: html 27 | volumes: 28 | - name: site-data 29 | persistentVolumeClaim: 30 | claimName: my-lamp-site-data 31 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/volumes/subpathexpr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-with-expanded-environment-variables 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: volumes-subpathexpr-pod 7 | spec: 8 | containers: 9 | - command: ["sleep", "3600"] 10 | env: 11 | - name: POD_NAME 12 | valueFrom: 13 | fieldRef: 14 | apiVersion: v1 15 | fieldPath: metadata.name 16 | image: busybox 17 | name: volumes-subpathexpr-container 18 | volumeMounts: 19 | - name: volumes-subpathexpr-volume 20 | mountPath: /logs 21 | subPathExpr: $(POD_NAME) 22 | restartPolicy: Never 23 | volumes: 24 | - name: volumes-subpathexpr-volume 25 | hostPath: 26 | path: /var/log/pods 27 | -------------------------------------------------------------------------------- /Kubernetes/yml-sample/webserver/simple.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: webserver-simple-deployment 6 | spec: 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: webserver-simple-app 11 | template: 12 | metadata: 13 | labels: 14 | app: webserver-simple-app 15 | spec: 16 | containers: 17 | - name: webserver-simple-container 18 | image: python:3 19 | command: 20 | - python 21 | - -m 22 | - http.server 23 | --- 24 | # https://kubernetes.io/docs/concepts/services-networking/service/ 25 | apiVersion: v1 26 | kind: Service 27 | metadata: 28 | name: webserver-simple-service 29 | spec: 30 | selector: 31 | app: webserver-simple-app 32 | ports: 33 | - protocol: TCP 34 | port: 80 35 | targetPort: 8000 36 | -------------------------------------------------------------------------------- /LXC/Linux Containers/Installing LXC-on-Ubuntu-from-source.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Installing LXC on Ubuntu from source 4 | parent: LXC Hands-On Workshop 5 | nav_order: 18 6 | --- 7 | 8 | 9 | 10 | # Installing LXC on Ubuntu from source 11 | 12 | To use the latest version of LXC, you can download the source code from the upstream GitHub repository and compile it: 13 | First, let's install git and clone the repository: 14 | ``` 15 | root@ubuntu:~# apt-get install git 16 | root@ubuntu:~# cd /usr/src 17 | root@ubuntu:/usr/src# git clone https://github.com/lxc/lxc.git 18 | Cloning into 'lxc'... 19 | remote: Counting objects: 29252, done. 20 | remote: Compressing objects: 100% (156/156), done. 21 | remote: Total 29252 (delta 101), reused 0 (delta 0), 22 | pack-reused 29096 23 | Receiving objects: 100% (29252/29252), 11.96 MiB | 12.62 24 | MiB/s, done. 25 | Resolving deltas: 100% (21389/21389), done. 26 | root@ubuntu:/usr/src# 27 | 28 | ``` 29 | Next, let's install the build tools and various dependencies: 30 | 31 | ``` 32 | root@ubuntu:/usr/src# apt-get install -y dev-utils 33 | build-essential aclocal automake pkg-config git bridge-utils 34 | libcap-dev libcgmanager-dev cgmanager 35 | root@ubuntu:/usr/src# 36 | 37 | 38 | ``` 39 | Now, generate the configure shell script, which will attempt to guess correct values for different system-dependent variables used during compilation: 40 | 41 | ``` 42 | root@ubuntu:/usr/src# cd lxc 43 | root@ubuntu:/usr/src/lxc#./autogen.sh 44 | 45 | ``` 46 | Its time now to run configure. In this example, I'll enable Linux capabilities and cgmanager, which will manage the cgroups for each container: 47 | 48 | ``` 49 | root@ubuntu:/usr/src/lxc# ./configure --enable-capabilities 50 | --enable-cgmanager 51 | ... 52 | ---------------------------- 53 | Environment: 54 | - compiler: gcc 55 | - distribution: ubuntu 56 | - init script type(s): upstart,systemd 57 | - rpath: no 58 | - GnuTLS: no 59 | - Bash integration: yes 60 | Security features: 61 | - Apparmor: no 62 | - Linux capabilities: yes 63 | - seccomp: no 64 | - SELinux: no 65 | - cgmanager: yes 66 | Bindings: 67 | - lua: no 68 | - python3: no 69 | Documentation: 70 | - examples: yes 71 | - API documentation: no 72 | - user documentation: no 73 | Debugging: 74 | - tests: no 75 | - mutex debugging: no 76 | Paths: 77 | Logs in configpath: no 78 | root@ubuntu:/usr/src/lxc# 79 | 80 | ``` 81 | From the preceding abbreviated output we can see what options are going to be available after compilation. 82 | Notice that we are not enabling any of the security features for now, such as Apparmor. 83 | Next, compile with make: 84 | ``` 85 | root@ubuntu:/usr/src/lxc# make 86 | 87 | ``` 88 | Finally, install the binaries, libraries, and templates: 89 | 90 | ``` 91 | root@ubuntu:/usr/src/lxc# make install 92 | 93 | ``` 94 | As of this writing, the LXC binaries look for their libraries in a different path than where they were installed. To fix this just copy them to the correct location: 95 | 96 | ``` 97 | root@ubuntu:/usr/src/lxc# cp /usr/local/lib/liblxc.so* 98 | /usr/lib/x86_64-linux-gnu/ 99 | 100 | 101 | ``` 102 | To check the version that was compiled and installed, execute the following code: 103 | 104 | ``` 105 | 106 | root@ubuntu:/usr/src/lxc# lxc-create --version 107 | 4.0.0 108 | root@ubuntu:/usr/src/lxc# 109 | 110 | ``` 111 | 112 | -------------------------------------------------------------------------------- /LXC/Linux Containers/Linux-namespaces–the-foundation-of-LXC.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Linux namespaces – the foundation of LXC 4 | parent: LXC Hands-On Workshop 5 | nav_order: 4 6 | --- 7 | 8 | 9 | # Linux namespaces – the foundation of LXC 10 | 11 | - Namespaces are the foundation of lightweight process virtualization. They enable a process and its children to have different views of the underlying system. 12 | This is achieved by the addition of the `unshare(`) and `setns()` system calls, 13 | 14 | - and the inclusion of six new constant flags passed to the `clone()`, `unshare()`, and `setns()` system calls: 15 | 16 | - `clone()`: This creates a new process and attaches it to a new specified namespace 17 | 18 | - `unshare()`: This attaches the current process to a new specified namespace 19 | 20 | - `setns()`: This attaches a process to an already existing namespace 21 | 22 | - There are six namespaces currently in use by LXC, with more being developed: 23 | 24 | - Mount namespaces, specified by the `CLONE_NEWNS` flag 25 | 26 | - UTS namespaces, specified by the `CLONE_NEWUTS` flag 27 | 28 | - IPC namespaces, specified by the `CLONE_NEWIPC` flag 29 | 30 | - PID namespaces, specified by the `CLONE_NEWPID` flag 31 | 32 | - User namespaces, specified by the `CLONE_NEWUSER` flag 33 | 34 | - Network namespaces, specified by the `CLONE_NEWNET` flag 35 | 36 | - Let's have a look at each in more detail and see some userspace examples, to help us better understand what happens under the hood. 37 | -------------------------------------------------------------------------------- /LXC/Linux Containers/Managing_resources_with_systemd.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Managing resources with systemd 4 | parent: LXC Hands-On Workshop 5 | nav_order: 16 6 | --- 7 | 8 | 9 | # Managing resources with systemd 10 | 11 | - With the increased adoption of systemd as an init system, new ways of manipulating cgroups were introduced. For example, if the cpu controller is enabled in the kernel, systemd will create a cgroup for each service by default. This behavior can be changed by adding or removing cgroup subsystems in the configuration file of systemd, usually found at /etc/systemd/system.conf. 12 | 13 | - If multiple services are running on the server, the CPU resources will be shared equally among them by default, because systemd assigns equal weights to each. To change this behavior for an application, we can edit its service file and define the CPU shares, allocated memory, and I/O. 14 | 15 | The following example demonstrates how to change the CPU shares, memory, and I/O limits for the nginx process: 16 | 17 | ``` 18 | root@server:~# vim /etc/systemd/system/nginx.service 19 | .include /usr/lib/systemd/system/httpd.service 20 | [Service] 21 | CPUShares=2000 22 | MemoryLimit=1G 23 | BlockIOWeight=100 24 | 25 | 26 | ``` 27 | To apply the changes first reload systemd, then nginx: 28 | ``` 29 | root@server:~# systemctl daemon-reload 30 | root@server:~# systemctl restart httpd.service 31 | root@server:~# 32 | ``` 33 | This will create and update the necessary control files in /sys/fs/cgroup/systemd and apply the limits. 34 | -------------------------------------------------------------------------------- /LXC/Linux Containers/PID-namespace.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: PID namespaces 4 | parent: LXC Hands-On Workshop 5 | nav_order: 7 6 | --- 7 | 8 | 9 | # PID namespaces 10 | 11 | - The Process ID (PID) namespaces provide the ability for a process to have an ID that already exists in the default namespace, 12 | for example an ID of `1`. This allows for an init system to run in a container with various other processes, without causing a collision with the rest of the PIDs on the same OS. 13 | 14 | To demonstrate this concept, open up `pid_namespace.c`: 15 | 16 | ``` 17 | #define _GNU_SOURCE 18 | #include 19 | #include 20 | #include 21 | #include 22 | 23 | static int childFunc(void *arg) 24 | { 25 | printf("Process ID in child = %ld\n", (long) getpid()); 26 | } 27 | 28 | ``` 29 | First, we include the headers and define the childFunc function that the `clone()` system call will use. 30 | The function prints out the child PID using the `getpid()` system call: 31 | ``` 32 | static char child_stack[1024*1024]; 33 | 34 | int main(int argc, char *argv[]) 35 | { 36 | pid_t child_pid; 37 | 38 | child_pid = clone(childFunc, child_stack + 39 | (1024*1024), 40 | CLONE_NEWPID | SIGCHLD, NULL); 41 | 42 | printf("PID of cloned process: %ld\n", (long) child_pid); 43 | waitpid(child_pid, NULL, 0); 44 | exit(EXIT_SUCCESS); 45 | } 46 | 47 | ``` 48 | 49 | In the `main()` function, we specify the stack size and call `clone()`, passing the child function childFunc, 50 | the stack pointer, the CLONE_NEWPID flag, and the SIGCHLD signal. The CLONE_NEWPID flag instructs `clone()` to create a new PID namespace 51 | and the `SIGCHLD` flag notifies the parent process when one of its children terminates. The parent process will block on `waitpid()` if the child process 52 | has not terminated. 53 | 54 | Compile and then run the program with the following: 55 | 56 | ``` 57 | root@server:~# gcc pid_namespace.c -o pid_namespace 58 | root@server:~# ./pid_namespace 59 | PID of cloned process: 17705 60 | Process ID in child = 1 61 | root@server:~# 62 | 63 | ``` 64 | 65 | From the output, we can see that the child process has a PID of 1 inside its namespace and 17705 otherwise. 66 | 67 | # Note 68 | ``` 69 | Note that error handling has been omitted from the code examples for brevity. 70 | ``` 71 | -------------------------------------------------------------------------------- /LXC/Linux Containers/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: LXC Hands-On Workshop 4 | nav_order: 3 5 | has_children: true 6 | permalink: /LXC/ 7 | --- 8 | -------------------------------------------------------------------------------- /LXC/Linux Containers/Resource_management_with_cgroups.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Resource management with cgroups 4 | parent: LXC Hands-On Workshop 5 | nav_order: 10 6 | --- 7 | 8 | 9 | # Resource management with cgroups 10 | 11 | - Cgroups are kernel features that allows fine-grained control over resource allocation for a single process, or a group of processes, called tasks. In the context of LXC this is quite important, because it makes it possible to assign limits to how much memory, CPU time, or I/O, any given container can use. 12 | 13 | The cgroups we are most interested in are described in the following table: 14 | 15 | | Subsystem | Description | Defined in | 16 | |:-------------|:------------------|:------------------| 17 | | `cpu` | Allocates CPU time for tasks | `kernel/sched/core.c` | 18 | | `cpuacct` | Accounts for CPU usage | `kernel/sched/core.c`| 19 | | `cpuset ` | Assigns CPU cores to tasks | `kernel/cpuset.c`| 20 | | `memory` | Allocates memory for tasks | `mm/memcontrol.c`| 21 | | `blkio `| Limits the I/O access to devices | `block/blk-cgroup.c`| 22 | | `devices` | Allows/denies access to devices | `security/device_cgroup.c`| 23 | | `freezer` | Suspends/resumes tasks | `kernel/cgroup_freezer.c`| 24 | | `net_cls` | Tags network packets | `net/sched/cls_cgroup.c`| 25 | | `net_prio` | Prioritizes network traffic |` net/core/netprio_cgroup.c`| 26 | | `hugetlb` | Limits the HugeTLB | `mm/hugetlb_cgroup.c` | 27 | 28 | - Cgroups are organized in hierarchies, represented as directories in a Virtual File System (VFS). Similar to process hierarchies, where every process is a descendent of the init or systemd process, cgroups inherit some of the properties of their parents. Multiple cgroups hierarchies can exist on the system, each one representing a single or group of resources. It is possible to have hierarchies that combine two or more subsystems, for example, memory and I/O, and tasks assigned to a group will have limits applied on those resources. 29 | 30 | Note 31 | ``` 32 | If you are interested in how the different subsystems are implemented in the kernel, install the kernel source and have a look at the C files, 33 | shown in the third column of the table. 34 | ``` 35 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/cgroup1.jpg) 36 | 37 | Cgroups can be used in two ways: 38 | 39 | - By manually manipulating files and directories on a mounted VFS 40 | 41 | - Using userspace tools provided by various packages such as cgroup-bin on Debian/Ubuntu and libcgroup on RHEL/CentOS 42 | 43 | Let's have a look at few practical examples on how to use cgroups to limit resources. This will help us get a better understanding of how containers work. 44 | -------------------------------------------------------------------------------- /LXC/Linux Containers/The-OS-kernel-its-early-limitations.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: The OS kernel and its early limitations 4 | parent: LXC Hands-On Workshop 5 | nav_order: 2 6 | --- 7 | 8 | # The OS kernel and its early limitations 9 | 10 | - The current state of Linux containers is a direct result of the problems that early OS designers were trying to solve – managing memory, I/O, and process scheduling in the most efficient way. 11 | 12 | - In the past, only a single process could be scheduled for work, wasting precious CPU cycles if blocked on an I/O operation. The solution to this problem was to develop better CPU schedulers, so more work can be allocated in a fair way for maximum CPU utilization. Even though the modern schedulers, such as the Completely Fair Scheduler (CFS) in Linux do a great job of allocating fair amounts of time to each process, there's still a strong case for being able to give higher or lower priority to a process and its subprocesses. Traditionally, this can be accomplished by the nice() system call, or real-time scheduling policies, however, there are limitations to the level of granularity or control that can be achieved. 13 | 14 | - Similarly, before the advent of virtual memory, multiple processes would allocate memory from a shared pool of physical memory. The virtual memory provided some form of memory isolation per process, in the sense that processes would have their own address space, and extend the available memory by means of a swap, but still there wasn't a good way of limiting how much memory each process and its children can use. 15 | 16 | - To further complicate the matter, running different workloads on the same physical server usually resulted in a negative impact on all running services. A memory leak or a kernel panic could cause one application to bring the entire operating system down. For example, a web server that is mostly memory bound and a database service that is I/O heavy running together became problematic. In an effort to avoid such scenarios, system administrators would separate the various applications between a pool of servers, leaving some machines underutilized, especially at certain times during the day, when there was not much work to be done. This is a similar problem as a single running process blocked on I/O operation is a waste of CPU and memory resources. 17 | 18 | - The solution to these problems is the use of hypervisor based virtualization, containers, or the combination of both. 19 | -------------------------------------------------------------------------------- /LXC/Linux Containers/The-case-for-Linux containers.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: The case for Linux containers 4 | parent: LXC Hands-On Workshop 5 | nav_order: 3 6 | --- 7 | 8 | # The case for Linux containers 9 | 10 | - The hypervisor as part of the operating system is responsible for managing the life cycle of virtual machines, and has been around since the early days of mainframe machines in the late 1960s. Most modern virtualization implementations, such as Xen and KVM, can trace their origins back to that era. The main reason for the wide adoption of these virtualization technologies around 2005 was the need to better control and utilize the ever-growing clusters of compute resources. The inherited security of having an extra layer between the virtual machine and the host OS was a good selling point for the security minded, though as with any other newly adopted technology there were security incidents. 11 | 12 | - Nevertheless, the adoption of full virtualization and paravirtulization significantly improved the way servers are utilized and applications provisioned. In fact, virtualization such as KVM and Xen is still widely used today, especially in multitenant clouds and cloud technologies such as OpenStack. 13 | 14 | - Hypervisors provide the following benefits, in the context of the problems outlined earlier: 15 | 16 | - Ability to run different operating systems on the same physical server 17 | 18 | - More granular control over resource allocation 19 | 20 | - Process isolation – a kernel panic on the virtual machine will not effect the host OS 21 | 22 | - Separate network stack and the ability to control traffic per virtual machine 23 | 24 | - Reduce capital and operating cost, by simplification of data center management and better utilization of available server resources 25 | 26 | - Arguably the main reason against using any sort of virtualization technology today is the inherited overhead of using multiple kernels in the same OS. It would be much better, in terms of complexity, if the host OS can provide this level of isolation, without the need for hardware extensions in the CPU, or the use of emulation software such as QEMU, or even kernel modules such as KVM. Running an entire operating system on a virtual machine, just to achieve a level of confinement for a single web server, is not the most efficient allocation of resources. 27 | 28 | - Over the last decade, various improvements to the Linux kernel were made to allow for similar functionality, but with less overhead – most notably the kernel namespaces and cgroups. One of the first notable technologies to leverage those changes was LXC, since kernel 2.6.24 and around the 2008 time frame. Even though LXC is not the oldest container technology, it helped fuel the container revolution we see today. 29 | 30 | - The main benefits of using LXC include: 31 | 32 | - Lesser overheads and complexity than running a hypervisor 33 | 34 | - Smaller footprint per container 35 | 36 | - Start times in the millisecond range 37 | 38 | - Native kernel support 39 | 40 | - It is worth mentioning that containers are not inherently as secure as having a hypervisor between the virtual machine and the host OS. However, in recent years, great progress has been made to narrow that gap using Mandatory Access Control (MAC) technologies such as SELinux and AppArmor, kernel capabilities, and cgroups, as demonstrated in later 41 | -------------------------------------------------------------------------------- /LXC/Linux Containers/UTS-namespaces.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: UTS namespaces 4 | parent: LXC Hands-On Workshop 5 | nav_order: 6 6 | --- 7 | 8 | 9 | # UTS namespaces 10 | 11 | - Unix Timesharing (UTS) namespaces provide isolation for the hostname and domain name, so that each LXC container can maintain its own identifier as returned by the hostname `-f` command. 12 | This is needed for most applications that rely on a properly set hostname. 13 | 14 | - To create a bash session in a new UTS namespace, we can use the unshare utility again, which uses the `unshare()` system call to create the namespace and the execve() system call to 15 | execute bash in it: 16 | 17 | ``` 18 | root@server:~# hostname 19 | server 20 | root@server:~# unshare -u /bin/bash 21 | root@server:~# hostname uts-namespace 22 | root@server:~# hostname 23 | uts-namespace 24 | root@server:~# cat /proc/sys/kernel/hostname 25 | uts-namespace 26 | root@server:~# 27 | 28 | ``` 29 | 30 | As the preceding output shows, the hostname inside the namespace is now `uts-namespace`. 31 | 32 | # Next, from a different terminal, check the hostname again to make sure it has not changed: 33 | ``` 34 | 35 | root@server:~# hostname 36 | server 37 | root@server:~# 38 | 39 | ``` 40 | As expected, the hostname only changed in the new UTS namespace. 41 | 42 | # To see the actual system calls that the unshare command uses, we can run the strace utility: 43 | 44 | ``` 45 | root@server:~# strace -s 2000 -f unshare -u /bin/bash 46 | ... 47 | unshare(CLONE_NEWUTS) = 0 48 | getgid() = 0 49 | setgid(0) = 0 50 | getuid() = 0 51 | setuid(0) = 0 52 | execve("/bin/bash", ["/bin/bash"], [/* 15 vars */]) = 0 53 | ... 54 | 55 | ``` 56 | From the output we can see that the unshare command is indeed using the `unshare()` and `execve()` system calls 57 | and the `CLONE_NEWUTS` flag to specify new UTS namespace. 58 | 59 | 60 | # IPC namespaces 61 | 62 | - The Interprocess Communication (IPC) namespaces provide isolation for a set of IPC and synchronization facilities. 63 | - These facilities provide a way of exchanging data and synchronizing the actions between threads and processes. 64 | - They provide primitives such as semaphores, file locks, and mutexes among others, that are needed to have true process separation in a container. 65 | -------------------------------------------------------------------------------- /LXC/Linux Containers/User_namespaces.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: User namespaces 4 | parent: LXC Hands-On Workshop 5 | nav_order: 8 6 | --- 7 | 8 | # User namespaces 9 | 10 | - The user namespaces allow a process inside a namespace to have a different user and group ID than that in the default namespace. 11 | - In the context of LXC, this allows for a process to run as root inside the container, while having a non-privileged ID outside. 12 | This adds a thin layer of security, because braking out for the container will result in a non-privileged user. This is possible because of kernel 3.8, 13 | which introduced the ability for non-privileged processes to create user namespaces. 14 | 15 | - To create a new user namespace as a non-privileged user and have `root` inside, we can use the unshare utility. Let's install the latest version from source: 16 | 17 | ``` 18 | root@ubuntu:~# cd /usr/src/ 19 | root@ubuntu:/usr/src# wget https://www.kernel.org/pub/linux/utils/util-linux/v2.28/util-linux-2.28.tar.gz 20 | root@ubuntu:/usr/src# tar zxfv util-linux-2.28.tar.gz 21 | root@ubuntu:/usr/src# cd util-linux-2.28/ 22 | root@ubuntu:/usr/src/util-linux-2.28# ./configure 23 | root@ubuntu:/usr/src/util-linux-2.28# make && make install 24 | root@ubuntu:/usr/src/util-linux-2.28# unshare --map-root-user --user sh -c whoami 25 | root 26 | root@ubuntu:/usr/src/util-linux-2.28# 27 | 28 | ``` 29 | We can also use the `clone()` system call with the CLONE_NEWUSER flag to create a process in a user namespace, as demonstrated by the following program: 30 | 31 | ``` 32 | #define _GNU_SOURCE 33 | #include 34 | #include 35 | #include 36 | #include 37 | 38 | static int childFunc(void *arg) 39 | { 40 | printf("UID inside the namespace is %ld\n", (long) 41 | geteuid()); 42 | printf("GID inside the namespace is %ld\n", (long) 43 | getegid()); 44 | } 45 | 46 | static char child_stack[1024*1024]; 47 | 48 | int main(int argc, char *argv[]) 49 | { 50 | pid_t child_pid; 51 | 52 | child_pid = clone(childFunc, child_stack + 53 | (1024*1024), 54 | CLONE_NEWUSER | SIGCHLD, NULL); 55 | 56 | printf("UID outside the namespace is %ld\n", (long) 57 | geteuid()); 58 | printf("GID outside the namespace is %ld\n", (long) 59 | getegid()); 60 | waitpid(child_pid, NULL, 0); 61 | exit(EXIT_SUCCESS); 62 | } 63 | ``` 64 | 65 | After compilation and execution, the output looks similar to this when run as root - UID of 0: 66 | 67 | ``` 68 | root@server:~# gcc user_namespace.c -o user_namespace 69 | root@server:~# ./user_namespace 70 | UID outside the namespace is 0 71 | GID outside the namespace is 0 72 | UID inside the namespace is 65534 73 | GID inside the namespace is 65534 74 | root@server:~# 75 | 76 | ``` 77 | 78 | 79 | -------------------------------------------------------------------------------- /LXC/Linux Containers/introduction-to-lxc.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Introduction to Linux Containers 4 | parent: LXC Hands-On Workshop 5 | nav_order: 1 6 | --- 7 | 8 | # Introduction to Linux Containers 9 | 10 | Nowadays, deploying applications inside some sort of a Linux container is a widely adopted practice, primarily due to the evolution of the tooling and the 11 | ease of use it presents. Even though Linux containers, or operating-system-level virtualization, in one form or another, have been around for more than a decade, 12 | it took some time for the technology to mature and enter mainstream operation. One of the reasons for this is the fact that hypervisor-based technologies such as 13 | KVM and Xen were able to solve most of the limitations of the Linux kernel during that period and the overhead it presented was not considered an issue. However, 14 | with the advent of kernel namespaces and control groups (cgroups) the notion of a light-weight virtualization became possible through the use of containers. 15 | 16 | 17 | - I'll cover the following topics: 18 | 19 | - Evolution of the OS kernel and its early limitations 20 | 21 | - Differences between containers and platform virtualization 22 | 23 | - Concepts and terminology related to namespaces and cgroups 24 | 25 | - An example use of process resource isolation and management with network namespaces and cgroups 26 | -------------------------------------------------------------------------------- /Okteto/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Okteto 4 | nav_order: 8 5 | has_children: true 6 | permalink: /Okteto/ 7 | --- 8 | 9 | 10 | # Okteto Workshop 11 | -------------------------------------------------------------------------------- /Okteto/first-pod.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Deploy First Pod On OKteto Cloud Using Kubectl 4 | parent: Okteto 5 | nav_orde : 2 6 | --- 7 | 8 | # Deploy First Pod on Okteto Cloud Using Kubectl 9 | -------------------------------------------------------------------------------- /Okteto/intro-okteto.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Introduction to okteto 4 | parent: Okteto 5 | nav_orde : 1 6 | --- 7 | 8 | 9 | # Introduction to okteto 10 | 11 | 12 | - okteto is tool for all kubernetes Developer to build , run , debug there application without depending on heavy load on local machine . still you can develope application quick and faster . 13 | 14 | - you don't need anything installed on machine rather then okteto cli . remember when we use docker on your machine its take to much memory and other resources . and its very painful cycle to build docker containers again and again locally . but okteto will provide remote developement using power of okteto cloud . 15 | 16 | - Okteto provide Clean and Easy CLi tool without much understanding complexity of docker or kubernetes clustering and swarm . 17 | 18 | # How Okteto Works Under the hood 19 | 20 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/how-its-work-okteto.png) 21 | 22 | - okteto use concept of syncthing . its continuous file synchronization program . which workes in real time to synchronizes file between two or more computers . 23 | 24 | - how this will help, well we have discussed that its don't need any kind of addition installation to run your kubernetes application because of syncthiing its will become much easier to run application . 25 | 26 | - but in case of okteto its just not only synchronize file . but thing this way you have used docker build or docker container run kind of command to run or rebuild docker images thats where okteto is doing things more smarter . okteto will detect `okteto.yml` file when you enter okteto up . or you can deploy any kind of kubernetes application easly 27 | - Okteto Cloud is application catlog where you can check logs or even you can directly just copy the github URL or helm Release link and thats it within 2 min your application up and running even its provide some of pre-integrated application easy to edit , redeploy and monitor . 28 | 29 | # Why Okteto 30 | 31 | - well now you know how okteto works or little overview but real question is why we need okteto ? right 32 | - lets check it out some of the advantage of using okteto 33 | - Fast inner loop development 34 | - Production-like development environment 35 | - Replicability 36 | - Unlimited resources 37 | - Deployment independent 38 | - Works anywhere 39 | 40 | # lets learn kubernetes with okteto ! 41 | 42 | - Lets [Create Account](https://okteto.com/) first ! and its free ! 43 | - [Download Okteto CLI](https://okteto.com/docs/getting-started/index.html#step-2-install-the-okteto-cli) 44 | 45 | yes ! you don't need docker installed ! 46 | 47 | 48 | Author :- [Sangam Biradar](https://twitter.com/BiradarSangam) - [Join Okteto Community Bangalore](https://www.meetup.com/Okteto-Bangalore/) 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: ContainerLabs 4 | nav_order: 1 5 | description: " Welcome To ContainerLabs " 6 | permalink: / 7 | --- 8 | 9 | [Join Containerlabs Community](https://discord.gg/rEvr7vq){: .btn .btn-green .mr-4 } ![Hits](https://hitcounter.pythonanywhere.com/count/tag.svg?url=http%3A%2F%2Fcontainerlabs.kubedaily.com%2F) [![ko-fi](https://www.ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/K3K0E60M) 10 | 11 | ![img](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/ContainerLabs-official.png) 12 | 13 | 14 | 15 | 16 | The Ultimate Workshop Track Specially Designed For You - Select Learning Paths 17 | {: .label .label-blue } 18 | 19 | 20 | [Birth of Containerization](http://containerlabs.kubedaily.com/Birth_of_Containerization/README.html){: .btn .btn-purple .mr-2 } [LXC](http://containerlabs.kubedaily.com/LXC/){: .btn .btn-purple .mr-2 }[Docker Fundamental](http://containerlabs.kubedaily.com/Docker/Overview/){: .btn .btn-purple .mr-2 } [Docker For Developer](https://containerlabs.kubedaily.com/Docker/Dev/){: .btn .btn-purple .mr-2 } [Kubernetes 101](https://containerlabs.kubedaily.com/Kubernetes/beginner/README.html){: .btn .btn-purple .mr-2 } 21 | [Kubernetes](https://containerlabs.kubedaily.com/Kubernetes/){: .btn .btn-purple .mr-2 } [Rancher Networking](https://containerlabs.kubedaily.com/rancher/Networking/){: .btn .btn-purple .mr-2 } [litmuschaos](https://dev.to/t/litmuschaos){: .btn .btn-purple .mr-2 } [Okteto](https://containerlabs.kubedaily.com/Okteto/){: .btn .btn-purple .mr-2 } [Lightweight Kubernetes - K3s](){: .btn .btn-purple .mr-2 } 22 | [Traefik](https://containerlabs.kubedaily.com/traefik/){: .btn .btn-purple .mr-2 } [OpenFaas](){: .btn .btn-purple .mr-2 } 23 | 24 | 25 | 26 | 27 | Containerlabs is an independent community project founded by [Sangam Biradar](https://twitter.com/BiradarSangam/), Developer Advocate at [Accurics](https://www.accurics.com), Also Docker Community Leader,Traefik Ambassdor,Okteto Community lead ,India which is now growing community and He is frequently mentoring and educating free of cost ! 28 | 29 | 30 | # ContainerLabs Episodes Series 31 | 32 | | title | videos | slides | 33 | |---------------------------------------------------------------------------------------|---------|---------| 34 | | Episode 01 :- Okteto The Kubernetes Development platform | [YouTube](https://www.youtube.com/watch?v=vZqT7UVP-6Y&t=2s) | - | 35 | | Episode 02 :- Building a DevOps HomeLab with k3s | [YouTube](https://www.youtube.com/watch?v=ziZbAw6Oxrg) | [slides](https://github.com/lunarops/presentation-slides/blob/main/02-build-a-devops-homelab-with-k3s/presentation.pdf) | 36 | 37 | 38 | email if you want to be part of any episode :- kubedaily@gmail.com 39 | 40 | 41 | 42 | 43 | 44 | # Awesome Essential Container Technology Tools & Library ----- > [Link](https://containerlabs.kubedaily.com/monthly-github/awesome-github.html) 45 | 46 | 47 | 48 | [![Twitter URL](https://img.shields.io/twitter/url/https/twitter.com/fold_left.svg?style=social&label=Follow%20%40KubeDaily)](https://twitter.com/KubeDaily) 49 | -------------------------------------------------------------------------------- /_config.yml: -------------------------------------------------------------------------------- 1 | remote_theme: pmarsceill/just-the-docs 2 | # Set a path/url to a logo that will be displayed instead of the title 3 | logo: "/img/containerlabs-logo.png" 4 | # Enable support for hyphenated search words: 5 | search_tokenizer_separator: /[\s/]+/ 6 | # Aux links for the upper right navigation 7 | aux_links: 8 | "ContainerLabs on GitHub": 9 | - "//github.com/sangam14/ContainerLabs/" 10 | 11 | search_enabled: true 12 | search: 13 | # Split pages into sections that can be searched individually 14 | # Supports 1 - 6, default: 2 15 | heading_level: 2 16 | # Maximum amount of previews per search result 17 | # Default: 3 18 | previews: 3 19 | # Maximum amount of words to display before a matched word in the preview 20 | # Default: 5 21 | preview_words_before: 5 22 | # Maximum amount of words to display after a matched word in the preview 23 | # Default: 10 24 | preview_words_after: 10 25 | # Set the search token separator 26 | # Default: /[\s\-/]+/ 27 | # Example: enable support for hyphenated search words 28 | tokenizer_separator: /[\s/]+/ 29 | # Display the relative url in search results 30 | # Supports true (default) or false 31 | rel_url: true 32 | # Enable or disable the search button that appears in the bottom right corner of every page 33 | # Supports true or false (default) 34 | button: false 35 | 36 | # Footer content 37 | # appears at the bottom of every page's main content 38 | # Footer last edited timestamp 39 | last_edit_timestamp: true # show or hide edit time - page must have `last_modified_date` defined in the frontmatter 40 | last_edit_time_format: "%b %e %Y at %I:%M %p" # uses ruby's time format: https://ruby-doc.org/stdlib-2.7.0/libdoc/time/rdoc/Time.html 41 | 42 | # Footer "Edit this page on GitHub" link text 43 | gh_edit_link: true # show or hide edit this page link 44 | gh_edit_link_text: "Edit this page on GitHub." 45 | gh_edit_repository: "https://github.com/sangam14/ContainerLabs/" # the github URL for your repo 46 | gh_edit_branch: "master" # the branch that your docs is served from 47 | gh_edit_view_mode: "tree" # "tree" or "edit" if you want the user to jump into the editor immediately 48 | 49 | # Google Analytics Tracking (optional) 50 | # e.g, UA-1234567-89 51 | ga_tracking: UA-0JG3VWYTBM 52 | ga_tracking_anonymize_ip: true # Use GDPR compliant Google Analytics settings (true/nil by default) 53 | 54 | plugins: 55 | - jekyll-seo-tag 56 | - jekyll-youtube 57 | - jekyll-feed 58 | 59 | kramdown: 60 | syntax_highlighter_opts: 61 | block: 62 | line_numbers: false 63 | 64 | 65 | compress_html: 66 | clippings: all 67 | comments: all 68 | endings: all 69 | startings: [] 70 | blanklines: false 71 | profile: false 72 | # ignore: 73 | # envs: all 74 | 75 | 76 | -------------------------------------------------------------------------------- /img/Cluster-IP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Cluster-IP.png -------------------------------------------------------------------------------- /img/Container-to-Container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Container-to-Container.png -------------------------------------------------------------------------------- /img/ContainerLabs-official.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/ContainerLabs-official.png -------------------------------------------------------------------------------- /img/ContainerLabs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/ContainerLabs.png -------------------------------------------------------------------------------- /img/Contributor.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Contributor.png -------------------------------------------------------------------------------- /img/Deploy-po-container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Deploy-po-container.png -------------------------------------------------------------------------------- /img/Deployement-Sequence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Deployement-Sequence.png -------------------------------------------------------------------------------- /img/Deployement-containerlabs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Deployement-containerlabs.png -------------------------------------------------------------------------------- /img/Devops-chain.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Devops-chain.png -------------------------------------------------------------------------------- /img/DockerAchitecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/DockerAchitecture.png -------------------------------------------------------------------------------- /img/Docker_Daemon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Docker_Daemon.png -------------------------------------------------------------------------------- /img/Ingress-traffic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Ingress-traffic.png -------------------------------------------------------------------------------- /img/NodePort-flow.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/NodePort-flow.png -------------------------------------------------------------------------------- /img/Pod-scheduling-sequence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Pod-scheduling-sequence.png -------------------------------------------------------------------------------- /img/R-Bridge-Networking.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/R-Bridge-Networking.png -------------------------------------------------------------------------------- /img/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /img/Recreate-update.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Recreate-update.png -------------------------------------------------------------------------------- /img/Rolling_update.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Rolling_update.png -------------------------------------------------------------------------------- /img/UDP.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/UDP.png -------------------------------------------------------------------------------- /img/Virtualization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/Virtualization.png -------------------------------------------------------------------------------- /img/WordpressDev.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/WordpressDev.png -------------------------------------------------------------------------------- /img/WordpressDev2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/WordpressDev2.png -------------------------------------------------------------------------------- /img/WordpressDockerReverse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/WordpressDockerReverse.png -------------------------------------------------------------------------------- /img/WordpressDockerServer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/WordpressDockerServer.png -------------------------------------------------------------------------------- /img/WordpressReverse.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/WordpressReverse.png -------------------------------------------------------------------------------- /img/WordpressupdatedContainer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/WordpressupdatedContainer.png -------------------------------------------------------------------------------- /img/akmsymlk8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/akmsymlk8s.png -------------------------------------------------------------------------------- /img/bare-metal-old.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/bare-metal-old.png -------------------------------------------------------------------------------- /img/blkio1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/blkio1.jpg -------------------------------------------------------------------------------- /img/blue-green.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/blue-green.png -------------------------------------------------------------------------------- /img/canary-dep.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/canary-dep.png -------------------------------------------------------------------------------- /img/cgroup1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/cgroup1.jpg -------------------------------------------------------------------------------- /img/configmap-diagram.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/configmap-diagram.gif -------------------------------------------------------------------------------- /img/connection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/connection.png -------------------------------------------------------------------------------- /img/containerlabs-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/containerlabs-logo.png -------------------------------------------------------------------------------- /img/containerlabs-replicaset.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/containerlabs-replicaset.png -------------------------------------------------------------------------------- /img/containerlabs_banner.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/containerlabs_banner.jpg -------------------------------------------------------------------------------- /img/containerlabs_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/containerlabs_logo.png -------------------------------------------------------------------------------- /img/containers-simple.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/containers-simple.png -------------------------------------------------------------------------------- /img/daemonsetvs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/daemonsetvs.png -------------------------------------------------------------------------------- /img/docker-driver-network.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/docker-driver-network.png -------------------------------------------------------------------------------- /img/docker-page-traefik.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/docker-page-traefik.jpg -------------------------------------------------------------------------------- /img/docker-swarm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/docker-swarm.png -------------------------------------------------------------------------------- /img/dockerfile-doc.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/dockerfile-doc.png -------------------------------------------------------------------------------- /img/everyserver.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/everyserver.png -------------------------------------------------------------------------------- /img/four-components-of-computer-system.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/four-components-of-computer-system.png -------------------------------------------------------------------------------- /img/health-page-traefik.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/health-page-traefik.jpg -------------------------------------------------------------------------------- /img/host-gw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/host-gw.png -------------------------------------------------------------------------------- /img/how-its-work-okteto.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/how-its-work-okteto.png -------------------------------------------------------------------------------- /img/image-container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/image-container.png -------------------------------------------------------------------------------- /img/k8s-Xnet-pod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/k8s-Xnet-pod.png -------------------------------------------------------------------------------- /img/k8s-net-pod.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/k8s-net-pod.png -------------------------------------------------------------------------------- /img/k8s_arch_new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/k8s_arch_new.png -------------------------------------------------------------------------------- /img/k8snet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/k8snet.png -------------------------------------------------------------------------------- /img/kube-scheduler-blue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kube-scheduler-blue.png -------------------------------------------------------------------------------- /img/kube-scheduler-green-table.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kube-scheduler-green-table.png -------------------------------------------------------------------------------- /img/kube-scheduler-ocean.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kube-scheduler-ocean.png -------------------------------------------------------------------------------- /img/kube-scheduler-tainted.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kube-scheduler-tainted.png -------------------------------------------------------------------------------- /img/kube-scheduler-toleration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kube-scheduler-toleration.png -------------------------------------------------------------------------------- /img/kube-scheduler-waiter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kube-scheduler-waiter.png -------------------------------------------------------------------------------- /img/kube-scheduler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kube-scheduler.png -------------------------------------------------------------------------------- /img/kubernetes-master-node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/kubernetes-master-node.png -------------------------------------------------------------------------------- /img/load-balancer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/load-balancer.png -------------------------------------------------------------------------------- /img/master-node-k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/master-node-k8s.png -------------------------------------------------------------------------------- /img/microservice.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/microservice.png -------------------------------------------------------------------------------- /img/minikube-internal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/minikube-internal.png -------------------------------------------------------------------------------- /img/monolith.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/monolith.png -------------------------------------------------------------------------------- /img/mount-app1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/mount-app1.jpg -------------------------------------------------------------------------------- /img/node-cluster.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/node-cluster.png -------------------------------------------------------------------------------- /img/node-container-scheduler.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/node-container-scheduler.png -------------------------------------------------------------------------------- /img/ovs1bridge.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/ovs1bridge.jpg -------------------------------------------------------------------------------- /img/playwithk8s-login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/playwithk8s-login.png -------------------------------------------------------------------------------- /img/pod-log-sidecar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/pod-log-sidecar.png -------------------------------------------------------------------------------- /img/pod-node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/pod-node.png -------------------------------------------------------------------------------- /img/pod-single-container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/pod-single-container.png -------------------------------------------------------------------------------- /img/pod-with-failed-container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/pod-with-failed-container.png -------------------------------------------------------------------------------- /img/pods-k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/pods-k8s.png -------------------------------------------------------------------------------- /img/pwk-start.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/pwk-start.png -------------------------------------------------------------------------------- /img/rancher_host_net.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/rancher_host_net.png -------------------------------------------------------------------------------- /img/replica-cotroller.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/replica-cotroller.png -------------------------------------------------------------------------------- /img/replicaset-controller-sequence.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/replicaset-controller-sequence.png -------------------------------------------------------------------------------- /img/replicaset-service.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/replicaset-service.png -------------------------------------------------------------------------------- /img/replicasetvs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/replicasetvs.png -------------------------------------------------------------------------------- /img/running-first-container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/running-first-container.png -------------------------------------------------------------------------------- /img/traefik-logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/traefik-logo.png -------------------------------------------------------------------------------- /img/virtualizationvscontainerlization.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/virtualizationvscontainerlization.png -------------------------------------------------------------------------------- /img/worker-node.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/img/worker-node.png -------------------------------------------------------------------------------- /rancher/Networking/CNI-Intro.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: CNI - Container Networking Interface 4 | parent: Rancher Networking 5 | nav_order: 10 6 | --- 7 | 8 | # CNI - Container Networking Interface 9 | 10 | 11 | - The Container Networking Interface (CNI) project is also under the governance of the CNCF. It provides a specification and a series of libraries for 12 | writing plugins to configure network interfaces in Linux containers. 13 | - The specification requires that providers implement their plugin as a binary executable that the container engine invokes. Kubernetes does this via the Kubelet process running on each node of the cluster. 14 | - The CNI specification expects the container runtime to create a new network namespace before invoking the CNI plugin. The plugin is then responsible for connecting the container’s network with that of the host. It does this by creating the virtual Ethernet devices that we discussed earlier. 15 | 16 | 17 | # Kubernetes and CNI 18 | - Kubernetes natively supports the CNI model. It gives its users the freedom to choose the network provider or product best suited for their needs. 19 | - To use the CNI plugin, pass `--network-plugin=cni` to the Kubelet when launching it. If your environment is not using the default configuration directory (`/etc/cni/net.d`), 20 | pass the correct configuration directory as a value to` --cni-conf-dir`. 21 | - The Kubelet looks for the CNI plugin binary at `/opt/cni/bin`, but you can specify an alternative location with `--cni-bin-dir`. 22 | 23 | The CNI plugin provides IP address management for the Pods and builds routes for the virtual interfaces. To do this, the plugin interfaces with an IPAM plugin that is also part of the CNI specification. The IPAM plugin must also be a single executable that the CNI plugin consumes. The role of the IPAM plugin is to provide to the CNI plugin the gateway, IP subnet, and routes for the Pod. 24 | -------------------------------------------------------------------------------- /rancher/Networking/Interlude_Netfilter_iptables_rules.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Interlude - Netfilter and iptables rules 4 | parent: Rancher Networking 5 | nav_order: 4 6 | --- 7 | 8 | 9 | 10 | # Interlude: Netfilter and iptables rules 11 | 12 | - we looked at how Docker handles communication between containers. On a Linux host, the component which handles this is called Netfilter, or more commonly by the command used to configure it: iptables.Netfilter manages the rules that define network communication for the Linux kernel. 13 | These rules permit, deny, route, modify, and forward packets. It organizes these rules into tables according to their purpose. 14 | 15 | - Netfilter manages the rules that define network communication for the Linux kernel. 16 | These rules permit, deny, route, modify, and forward packets. It organizes these rules into tables according to their purpose. 17 | 18 | # The Filter Table 19 | 20 | - The Filter TableRules in the Filter table control if a packet is allowed or denied. 21 | Packets which are allowed are forwarded whereas packets which are denied are either rejected or silently dropped 22 | 23 | 24 | # The NAT Table 25 | 26 | - These rules control network address translation. They modify the source or destination address for the packet, changing how the kernel routes the packet 27 | 28 | # The Mangle Table 29 | 30 | - The headers of packets which go through this table are altered, changing the way the packet behaves. Netfilter might shorten the TTL, redirect it to a different address, or change the number of network hops. 31 | 32 | # Raw Table 33 | 34 | - This table marks packets to bypass the iptables stateful connection tracking. 35 | Security TableThis table sets the SELinux security context marks on packets. Setting the marks affects how SELinux (or systems that can interpret SELinux security contexts) handle the packets. 36 | The rules in this table set marks on a per-packet or per-connection basis.Netfilter organizes the rules in a table into chains. 37 | Chains are the means by which Netfilter hooks in the kernel intercept packets as they move through processing. Packets flow through one or more chains and 38 | exit when they match a rule.A rule defines a set of conditions, and if the packet matches those conditions, an action is taken. The universe of actions is diverse, 39 | but examples include: 40 | - Block all connections originating from a specific IP address.
41 | - Block connections to a network interface.
42 | - Allow all HTTP/HTTPS connections.
43 | - Block connections to specific ports.The action that a rule takes is called a target, and represents the decision to accept, drop, or forward the packet. 44 | The system comes with five default chains that match different phases of a packet’s journey through processing: PREROUTING, INPUT, FORWARD, OUTPUT, 45 | and POSTROUTING. Users and programs may create additional chains and inject rules into the system chains to forward packets to a custom chain for 46 | continued processing. 47 | - This architecture allows the Netfilter configuration to follow a logical structure, with chains representing groups of 48 | related rules.Docker creates several chains, and it is the actions of these chains that handle communication between containers, the host, and the outside world. 49 | -------------------------------------------------------------------------------- /rancher/Networking/Network_Policy.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Network Policy 4 | parent: Rancher Networking 5 | nav_order: 8 6 | --- 7 | 8 | # Network Policy 9 | 10 | - In an enterprise deployment of Kubernetes the cluster often supports multiple projects with different goals. Each of these projects has different workloads, and each of these might require a different security policy.Pods, by default, do not filter incoming traffic. 11 | There are no firewall rules for inter-Pod communication. Instead, this responsibility falls to the NetworkPolicy resource, which uses a specification to define the network rules applied to a set of Pods 12 | 13 | - Pods, by default, do not filter incoming traffic. There are no firewall rules for inter-Pod communication. Instead, this responsibility falls to the NetworkPolicy resource, which uses a specification to define the network rules applied to a set of Pods. 14 | 15 | Note:- The network policies are defined in Kubernetes, but the CNI plugins that support network policy implementation do the actual configuration and processing. In a later section, we look at CNI plugins and how they work. 16 | 17 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/k8s-net-pod.png) 18 | 19 | 20 | - The image to the right shows a standard three-tier application with a UI, a backend service, and a database, all deployed within a Kubernetes cluster. 21 | - Requests to the application arrive at the web Pods, which then initiate a request to the backend Pods for data. The backend Pods process the request and perform CRUD operations against the database Pods. 22 | - If the cluster is not using a network policy, any Pod can talk to any other Pod. Nothing prevents the web Pods from communicating directly with the database Pods. If the security requirements of the cluster dictate a need for clear separation between tiers, a network policy enforces it. 23 | 24 | - The policy defined below states that the database Pods can only receive traffic from the Pods with the labels `app=myappand` `role=backend`. It also defines that the backend Pods can only receive traffic from Pods with the labels `app=myapp` and `role=web`. 25 | 26 | ``` 27 | kind: NetworkPolicy 28 | apiVersion: networking.k8s.io/v1 29 | metadata: 30 | name: backend-access-ingress 31 | spec: 32 | podSelector: 33 | matchLabels: 34 | app: myapp 35 | role: backend 36 | ingress: 37 | - from: 38 | - podSelector: 39 | matchLabels: 40 | app: myapp 41 | role: web 42 | kind: NetworkPolicy 43 | apiVersion: networking.k8s.io/v1 44 | metadata: 45 | name: db-access-ingress 46 | spec: 47 | podSelector: 48 | matchLabels: 49 | app: myapp 50 | role: db 51 | ingress: 52 | - from: 53 | - podSelector: 54 | matchLabels: 55 | app: myapp 56 | role: backend 57 | 58 | 59 | ``` 60 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/k8s-Xnet-pod.png) 61 | 62 | 63 | With this network policy in place, Kubernetes blocks communication between the web and database tiers. 64 | -------------------------------------------------------------------------------- /rancher/Networking/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Rancher Networking 4 | nav_order: 6 5 | has_children: true 6 | permalink: /rancher/Networking/ 7 | --- 8 | 9 | # Table Of Content 10 | -------------------------------------------------------------------------------- /rancher/beginner/README.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /sponsorship/img/LO_Horizontal_Full Colour.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sangam14/CloudNativeLab/df26ee9d41e12df945cb4314fffc07d8896c6411/sponsorship/img/LO_Horizontal_Full Colour.png -------------------------------------------------------------------------------- /sponsorship/img/list.md: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /sponsorship/readme.md: -------------------------------------------------------------------------------- 1 | 2 | # Kubedaily Community Sponsorship : 3 | 4 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/sponsorship/img/LO_Horizontal_Full%20Colour.png) 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /traefik/fundamentals/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Traefik 4 | nav_order: 7 5 | has_children: true 6 | permalink: /traefik/ 7 | --- 8 | 9 | ![](https://raw.githubusercontent.com/sangam14/ContainerLabs/master/img/traefik-logo.png) 10 | --------------------------------------------------------------------------------