7 | 8 | ```bash 9 | # bash autocomplete for kubectl commands 10 | source <(kubectl completion bash) 11 | echo "source <(kubectl completion bash)" >> ~/.bashrc 12 | 13 | # zsh autocompletion for kubectl commands 14 | source <(kubectl completion zsh) 15 | echo "source <(kubectl completion zsh)" >> ~/.zshrc 16 | 17 | # use two kubeconfig files named 'config' and 'kubeconfig2' at the same time 18 | export KUBECONFIG=kubeconfig1:kubeconfig2:kubeconfig3:kubeconfig4 19 | 20 | # flatten kubeconfig and save to ~/.kube directory as a file named config 21 | kubectl config view --flatten > ~/.kube/config 22 | 23 | # list the kube config settings 24 | kubectl config view 25 | 26 | # list all the users in the cluster 27 | kubectl config view -o jsonpath='{.users[*].name}' 28 | 29 | # find where control plane is running 30 | kubectl cluster-info 31 | 32 | # get system health (controller manager, scheduler and etcd) 33 | kubectl get componentstatus 34 | 35 | # list all resources available to create (not currently created) 36 | kubectl api-resources 37 | 38 | # list namespaces resources in the kubernetes cluster 39 | kubectl api-resources --namespaced=true 40 | 41 | # list non-namespaced resources in the kubernetes cluster 42 | kubectl api-resources --namespaced=false 43 | 44 | # get the raw metrics for nodes 45 | kubectl get --raw /apis/metrics.k8s.io/v1beta1/nodes 46 | 47 | # get the raw metrics for pods 48 | kubectl get --raw /apis/metrics.k8s.io/v1beta1/pods 49 | 50 | # list all events in the default namespace 51 | kubectl get events 52 | 53 | # list all events in all namespaces 54 | kubectl get events --all-namespaces 55 | 56 | # list all events in the 'kube-system' namespace 57 | kubectl get events -n kube-system 58 | 59 | # watch as events occur in real time in the default namespace 60 | kubectl get events -w 61 | 62 | # in a multi-control-plane setup, find the elected leader (in annotations) 63 | kubectl get ep kube-scheduler -n kube-system -o yaml 64 | 65 | # verify version of kubeadm 66 | kubeadm version 67 | 68 | # view all default values that kubeadm uses to initialize the cluster (with kubeadm init) 69 | kubeadm config print init-defaults 70 | 71 | # print the join command to join more nodes to the kubeadm cluster 72 | sudo kubeadm token create --print-join-command 73 | 74 | # list the tokens that haven't expired yet for kubeadm clusters 75 | sudo kubeadm token list 76 | 77 | # generate a new token 78 | sudo kubeadm token generate 79 | 80 | # verify the cluster components can be upgraded via kubeadm 81 | kubeadm upgrade plan 82 | 83 | # upgrade the local kubelet configuration 84 | sudo kubeadm upgrade node 85 | 86 | # view enabled admission controllers 87 | kubectl exec kube-apiserver-kind-control-plane -n kube-system -- kube-apiserver -h | grep enable-admission-plugins 88 | 89 | # view enabled and disabled admission controllers 90 | ps -ef | grep kube-apiserver | grep admission-plugins 91 | ``` 92 | 93 |
94 |100 | 101 | ```bash 102 | # create namespace 'robot-shop' 103 | kubectl create ns robot-shop 104 | 105 | # list all namespaces in the cluster 106 | kubectl get ns 107 | 108 | # get the yaml config for all namespaces 109 | kubectl get ns -o yaml 110 | 111 | # list all kubernetes resources in all namespaces 112 | kubectl get all --all-namespaces 113 | 114 | -OR- 115 | 116 | kubectl get all -A 117 | 118 | # describe the namespace configuration 119 | kubectl describe ns 120 | 121 | # edit namespace 'robot-shop' 122 | kubectl edit ns robot-shop 123 | 124 | # delete namespace 'robot-shop' 125 | kubectl delete ns robot-shop 126 | 127 | # list all available contexts from kube config 128 | kubectl config get-contexts 129 | 130 | # get the current context for kubectl 131 | kubectl config current-context 132 | 133 | # switch context to a cluster named 'gkeCluster' 134 | kubectl config set-context gkeCluster 135 | 136 | # set context to the current context including webapp namespace 137 | kubectl config set-context --current --namespace webapp 138 | 139 | # change context to a namespace named 'robot-shop' in the cluster named 'gkeCluster' 140 | kubectl config set-context gkeCluster --namespace robot-shop 141 | 142 | # change context to a user named 'admin' in the cluster named 'gkeCluster' 143 | kubectl config set-context gkeCluster --user=admin 144 | 145 | # set the default context to the cluster 'gkeCluster' 146 | kubectl config use-context gkeCluster 147 | 148 | # delete a cluster named 'docker-desktop' from kubeconfig 149 | kubectl config delete-cluster docker-desktop 150 | ``` 151 | 152 |
153 |159 | 160 | ```bash 161 | # list all nodes in the default namespace 162 | kubectl get no 163 | 164 | # same as previous, but with additional info (including IP address of nodes) 165 | kubectl get no -o wide 166 | 167 | # list nodes with the label kubernetes.io/control-plane (NOTE: You may have to show node labels, depending on the bootstrapper) 168 | kubectl get no -l node-role.kubernetes.io/control-plane 169 | 170 | # describe the configuration of all nodes in the cluster. Add the node name to the end in order to only get a specific node configuration (e.g. kubectl describe no node1) 171 | kubectl describe no 172 | 173 | # label node 'mynode1` with key 'disk' and value 'ssd' 174 | kubectl label no mynode1 disk=ssd 175 | 176 | # show labels for nodes in a cluster 177 | kubectl get no --show-labels 178 | 179 | # annotate node 'mynode1' with key 'azure', and value 'node' 180 | kubectl annotate no mynode1 azure=node 181 | 182 | # get the external IP addresses of all the nodes in the default namespace 183 | kubectl get nodes -o jsonpath='{items[*].status.addresses[?(@.type=="ExternalIP")].addresses}' 184 | 185 | # get the name of the first node ([0]) only, in the list of nodes, using jsonpath 186 | kubectl get no -o jsonpath='{.items[0].metadata.name}' 187 | 188 | # view the resource utilization (CPU, memory) of a node named 'mynode1' 189 | kubectl top node mynode1 190 | 191 | # view taints on all nodes 192 | kubectl get no -o json | jq '.items[].spec.taints' 193 | 194 | # view taints with node node names 195 | kubectl get nodes -o jsonpath="{range .items[*]}{.metadata.name} {.spec.taints[?(@.effect=='NoSchedule')].effect}{\"\n\"}{end}" 196 | 197 | # taint node 'mynode1' with a key named 'node-role.kubernetes.io/master' and effect 'NoSchedule' 198 | kubectl taint no mynode1 node-role.kubernetes.io/master:NoSchedule 199 | 200 | # taint node 'mynode1' with a key named 'dedicated', a value of 'special-user' and an effect of 'NoSchedule' 201 | kubectl taint no mynode1 dedicated=special-user:NoSchedule 202 | 203 | # Remove taint from node 'mynode1' with key 'dedicated' and effect 'NoSchedule' 204 | kubectl taint no mynode1 dedicated:NoSchedule- 205 | 206 | # Remove taints with key 'dedicated' from node 'mynode1' 207 | kubectl taint no mynode1 dedicated- 208 | 209 | # list the taints applied to all nodes 210 | kubectl describe no | grep Taint 211 | 212 | # list the taints on all nodes in the cluster 213 | kubectl get nodes -o=custom-columns=NODE:.metadata.name,KEY:.spec.taints[*].key,VALUE:.spec.taints[*].value,EFFECT:.spec.taints[*].effect 214 | 215 | # taint nodes with the label 'disk=ssd' with key 'dedicated' 216 | kubectl taint no -l disk=ssd dedicated=mynode1:PreferNoSchedule 217 | 218 | # taint node 'mynode1' with key 'bar' and no value 219 | kubectl taint no mynode1 bar:NoSchedule 220 | 221 | # drain node 'mynode1', in order to remove any scheduled pods while also ensuring no pods are scheduled to it 222 | kubectl drain mynode1 --ignore-daemonsets --force 223 | 224 | # cordon node 'mynode1', to ensure no pods are scheduled to it 225 | kubectl cordon mynode1 226 | 227 | # uncordon node 'mynode1', to resume scheduling pods to the node 228 | kubectl uncordon mynode1 229 | 230 | # delete node 'mynode1' from the cluster 231 | kubectl delete no mynode1 232 | 233 | # edit the configuration of 'mynode1' 234 | kubectl edit no mynode1 235 | ``` 236 | 237 |
238 |244 | 245 | ```bash 246 | # create pod 'nginx' using the 'nginx' image 247 | kubectl run nginx --image=nginx 248 | 249 | # create pod 'busybox', open a shell inside of the container, and delete pod when exit shell 250 | kubectl run busybox --image=busybox --rm -it -- sh 251 | 252 | # run a temporary pod (deleted when exit) and open a shell inside the container 253 | kubectl run curlpod --image=nicolaka/netshoot --rm -it -- sh 254 | 255 | # run a pod to troubleshoot dns and get a shell to it 256 | kubectl run dnstools --image infoblox/dnstools --rm -it -- bash 257 | 258 | # start a pod named 'debug-pod' and get a shell to the container (keeping it running after exit) 259 | kubectl run debug-pod --image=busybox -it 260 | 261 | # run a pod to test networking using curl 262 | kubectl run --rm -i -tty curl --image=curlimages/curl --restart=Never -- sh 263 | 264 | # create a pod yaml file named pod.yml 265 | kubectl run nginx --image=nginx --dry-run=client -o yaml > pod.yml 266 | 267 | # list all pods in the default namespace 268 | kubectl get po 269 | 270 | # continuously list all pods in all namespaces with wait flag 271 | kubectl get po -A -w 272 | 273 | # list all ready pods that have the label app=nginx 274 | kubectl wait --for=condition=ready pod -l app=nginx 275 | 276 | # list all pods in all namespaces 277 | kubectl get po --all-namespaces 278 | 279 | # list all pods in all namespaces with a wide output (showing pod IP addresses) and selecting only one node by it's name using field selectors 280 | kubectl get pods --all-namespaces -o wide --field-selector spec.nodeName=kind-control-plane 281 | 282 | # list all pods in the kube-system namespace and sort by node name 283 | kubectl get po -o custom-columns=POD:metadata.name,NODE:spec.nodeName --sort-by spec.nodeName -n kube-system 284 | 285 | # list pods by name only 286 | kubectl get po -o jsonpath='{ .items[*].metadata.name}' 287 | 288 | # list all pods with label 'app=mongo' by name only 289 | kubectl get po -l 'app=mongo' -o jsonpath='{ .items[*].metadata.name}' 290 | 291 | # list the first pod with label 'app=mongo' by name only 292 | kubectl get po -l 'app=mongo' -o jsonpath='{ .items[0].metadata.name}' 293 | 294 | # list the name of the first pod with label 'app=mongo' and store it in a variable named 'MONGOPOD' 295 | MONGOPOD=$(kubectl get po -l 'app=mongo' -o jsonpath='{ .items[0].metadata.name}') 296 | 297 | # get the documentation of pods and its fields 298 | kubectl explain pods 299 | 300 | # get the documentation for the spec field in pods 301 | kubectl explain pods.spec.containers 302 | 303 | # list all kubernetes resources in all namespaces 304 | kubectl get all --all-namespaces 305 | 306 | # list all pods, nodes and services in all namespaces 307 | kubectl get po,no,svc --all-namespaces 308 | 309 | # same as above but return additional info (e.g. node assignment) 310 | kubectl get po -o wide 311 | 312 | # describe the configuration of all pods in the default namespace 313 | kubectl describe po 314 | 315 | # give pod 'nginx' a label of 'app=prod' 316 | kubectl label nginx app=prod 317 | 318 | # show the labels for all pods in the default namespace 319 | kubectl get po --show-labels 320 | 321 | # show pods with a label of 'app=nginx' 322 | kubectl get po -l app=nginx 323 | 324 | # annotate pod 'nginx' with key 'special', and value of 'app1' 325 | kubectl annotate po nginx special=app1 326 | 327 | # show the yaml output for the pod named nginx 328 | kubectl get po nginx -o yaml 329 | 330 | # export the yaml manifest of a pod named 'nginx' to a file named 'podconfig.yml' 331 | kubectl get pod nginx -o yaml --export > podconfig.yml 332 | 333 | # list all the pods that are running 334 | kubectl get po --field-selector status.phase=Running 335 | 336 | # run 'mongo' command inside terminal in pod 'mongodb' 337 | kubectl exec -it mongodb mongo 338 | 339 | # list environment variables in pod 'nginx' 340 | kubectl exec nginx env 341 | 342 | # open shell to container 'cart' in pod 'mypod' 343 | kubectl exec -it mypod -c cart -- /bin/bash 344 | 345 | # In a running pod named 'bux', issue a command inside the container that will echo hello in a loop 346 | kubectl exec -it bux -- sh -c "while true; do echo hello; sleep 2; done" 347 | 348 | # create new temporary pod (deletes upon exit) and get dns info 349 | kubectl run curlpod --image=nicolaka/netshoot --rm -it --restart=Never -- cat /etc/resolv.conf 350 | 351 | # create new pod named "netshoot" using image "nicolaka/netshoot" and inserts sleep command to keep it running 352 | kubectl run netshoot --image=nicolaka/netshoot --command sleep --command "3600" 353 | 354 | # get dns info from a pod that's already running 355 | kubectl exec –t nginx – cat /etc/resolv.conf 356 | 357 | # get the log output for a pod named 'nginx' in the default namespace 358 | kubectl logs nginx 359 | 360 | # get the log output for a pods with the label 'app=nginx' in the default namespace 361 | kubectl logs -l app=nginx 362 | 363 | # same as above but output to a file named 'pod.log' 364 | kubectl logs nginx > pod.log 365 | 366 | # get the last hour of log output for a pod named 'nginx' 367 | kubectl logs nginx --since=1h 368 | 369 | # get the last 20 lines of a log output for a pod named 'nginx' 370 | kubectl logs nginx --tail=20 371 | 372 | # get the streaming log output for a container named 'log' in a pod named 'nginx' 373 | kubectl logs -f nginx -c log 374 | 375 | # tail the logs from a pod named 'nginx' that has one container 376 | kubectl logs nginx -f 377 | 378 | # tail the logs from a pod named 'nginx' that has one container 379 | kubectl logs nginx --follow 380 | 381 | # delete pod 'nginx' 382 | kubectl delete po nginx 383 | 384 | # edit the configuration of pod 'nginx' 385 | kubectl edit po nginx 386 | 387 | # port forward from 80 on the container to 8080 on the host (your laptop) 388 | kubectl port-forward nginx 8080:80 389 | 390 | # port forward from 9200 on the container to 9200 on the host but run it in the background, so you can get your prompt back 391 | kubectl port-forward elasticsearch-pod 9200:9200 & 392 | 393 | # after port forwarding, you can curl the port on localhost 394 | curl --head http://localhost:9200 395 | ``` 396 | 397 |
398 |404 | 405 | ```bash 406 | # create a new deployment named 'nginx' using the image 'nginx' 407 | kubectl create deploy nginx --image nginx 408 | 409 | # create a deployment yaml file named deploy.yml 410 | kubectl create deploy nginx --image nginx --dry-run=client -o yaml > deploy.yml 411 | 412 | # create deployment from file 413 | kubectl create -f deploy.yml 414 | 415 | # create deployment and record history (useful for viewing rollout history later) 416 | kubectl create -f deploy.yml --record 417 | 418 | # apply the yaml configuration if resource already exists (will create resource if none exists) 419 | kubectl apply -f deploy.yml 420 | 421 | # apply the yaml configuration if resource already exists (will fail of no resource exists) 422 | kubectl replace -f deploy.yml 423 | 424 | # undo deployment rollout 425 | kubectl rollout undo deploy nginx 426 | 427 | # undo rollout to a specific version 428 | kubectl rollout undo deploy nginx --to-revision=3 429 | 430 | # pause deployment while rolling out (good for canary releases) 431 | kubectl rollout pause deploy nginx 432 | 433 | # resume rollout after pause 434 | kubectl rollout resume deploy nginx 435 | 436 | # get status of rollout 437 | kubectl rollout status deployment/nginx 438 | 439 | # restart pods in a deployment 440 | kubectl rollout restart deployment/nginx 441 | 442 | # get rollout history 443 | kubectl rollout history deploy nginx 444 | 445 | # scale deployment 'nginx' up to 5 replicas 446 | kubectl scale deploy nginx --replicas=5 447 | 448 | # scale deployment named 'nginx' down to 3 and record it into rollout history 449 | kubectl scale deploy nginx --replicas 3 --record 450 | 451 | # get rollout history of deployment nginx 452 | kubectl rollout history deploy nginx 453 | 454 | # set a new image for the deployment with verbose output 455 | kubectl set image deployments/nginx nginx=nginx:1.14.2 --v 6 456 | 457 | # edit deployment 'nginx' 458 | kubectl edit deploy nginx 459 | 460 | # list all deployments in the default namespace 461 | kubectl get deploy 462 | 463 | # list all deployments in all namespaces 464 | kubectl get deploy --all-namespaces 465 | 466 | # list all kubernetes resources in all namespaces 467 | kubectl get all --all-namespaces 468 | 469 | # list all pods, nodes and services in all namespaces 470 | kubectl get po,no,svc --all-namespaces 471 | 472 | # same as above but get additional information (e.g. labels) 473 | kubectl get deploy -o wide 474 | 475 | # show the yaml manifest for all deployments in the default namespace 476 | kubectl get deploy -o yaml 477 | 478 | # describe the configuration for all deployments in the default namespace 479 | kubectl describe deploy 480 | 481 | # get the documentation of the spec field, inside of the template field of a deployment 482 | kubectl explain deploy.spec.template.spec 483 | 484 | # get the documentation of the spec field of a deployment 485 | kubectl explain deploy.spec 486 | 487 | # delete deployment 'nginx' 488 | kubectl delete deploy nginx 489 | 490 | # list all replicasets in the default namespace 491 | kubectl get rs 492 | 493 | # same as above but output more information (e.g. selectors) 494 | kubectl get rs -o wide 495 | 496 | # output the yaml manifest for all replicasets in the default namespace 497 | kubectl get rs -o yaml 498 | 499 | # describe the configuration of all replicasets in the default namespace 500 | kubectl describe rs 501 | 502 | # get the documentation of the vsphere volume path for replicaSets 503 | kubectl explain rs.spec.template.spec.volumes.vsphereVolume.volumePath 504 | ``` 505 | 506 |
507 |513 | 514 | ```bash 515 | # create nodePort type service 'nodeport-svc' in default namespace, exposing port 8080 from the container port 80 516 | kubectl create svc nodeport nodeport-svc --tcp=8080:80 517 | 518 | # create a nodePort service 'app-service' from exposing deployment 'nginx' 519 | kubectl expose deploy nginx --name=app-service --port=80 --type=NodePort 520 | 521 | # create a load balancer type service named 'nginx' from a deployment 522 | kubectl expose deploy nginx --port 80 --target-port 80 --type LoadBalancer 523 | 524 | # Create a second service based on the above service, exposing the container port 8443 as port 443 with the name "nginx-https" 525 | kubectl expose svc nginx --name nginx-https --port 443 --target-port 8443 526 | 527 | # list all services in the default namespace 528 | kubectl get svc 529 | 530 | # same as above but get additional info (e.g. selectors) 531 | kubectl get svc -o wide 532 | 533 | # list all kubernetes resources in all namespaces 534 | kubectl get all --all-namespaces 535 | 536 | # list all pods, nodes and services in all namespaces 537 | kubectl get po,no,svc --all-namespaces 538 | 539 | # show the yaml manifest for all services in the default namespace 540 | kubectl get svc -o yaml 541 | 542 | # list configuration info and events for all services in the default namespace 543 | kubectl describe svc 544 | 545 | # show the labels on all services in the default namespace 546 | kubectl get svc --show-labels 547 | 548 | # get the documentation for the type, under spec for a service 549 | kubectl explain svc.spec.type 550 | 551 | # edit service 'app-service' in the default namespace 552 | kubectl edit svc app-service 553 | 554 | # delete service 'app-service' in the default namespace 555 | kubectl delete svc app-service 556 | 557 | # Create an ingress named 'cool-ing' that takes requests to mycoolwebapp.com/forums to our service named forums-svc on port 8080 with a tls secret "my-cert" 558 | kubectl create ingress cool-ing --rule="mycoolwebapp.com/forums=forums-svc:8080,tls=my-cert" 559 | 560 | # Create an ingress named 'one-ing' that takes all requests to a service named 'myweb-svc' on port 80 561 | kubectl create ingress one-ing --rule="/path=myweb-svc:80" 562 | 563 | # Create an ingress named 'appgw-ing' that adds an annotation for azure application gateways and forwards to 'azurewebapp.com/shop' to our service named 'web-svc' on port 8080 564 | kubectl create ingress appgw-ing --rule="azurewebapp.com/shop=web-svc:8080" --annotation kubernetes.io/ingress.class=azure/application-gateway 565 | 566 | # Create an ingress named 'rewir-ing' with an annotation to rewrite the path for nginx ingress controllers 567 | kubectl create ingress rewire-ing --rule="circuitweb.com/shop=web-svc:8080" --annotation "nginx.ingress.kubernetes.io/rewrite-target= /" 568 | 569 | # Create an ingress named 'moo-ing' where all requests going to service 'milk-svc' on port 80 but requests for 'moo.com/flavors' go to service 'flavor-svc' on port 8080 570 | kubectl create ingress moo-ing --rule="moo.com/=milk-svc:80" --rule="moo.com/flavors=flavor-svc:8080" 571 | 572 | # Create an ingress named 'rid-ing' where any requests with prefix 'bikepath.com/seats*' go to service 'seats-svc' on port 8080 and any requests with prefix 'bikepath.com/tires*' go to service 'tires-svc' on port 80 (http) 573 | kubectl create ingress rid-ing --rule="bikepath.com/seats*=seats-svc:8080" --rule="bikepath.com/tires*=tires-svc:http" 574 | 575 | # Create an ingress named 'soup-ing' with TLS enabled for all requests going to service 'soup-svc' on port 443 and any requests with a prefix 'mysoupwebsite.com/stew/carrot*' going to service 'carrots-svc' on port 8080 576 | kubectl create ingress soup-ing --rule="mysoupwebsite.com/=soup-svc:https,tls" --rule="mysoupwebsite.com/stew/carrot*=carrots-svc:8080" 577 | 578 | # Create an ingress named 'ssh-ing' with TLS enabled where all path requests go to service 'shh-svc' on port 8080 and use a secret named 'ssh-ertificate' 579 | kubectl create ingress shh-ing --rule="lookapassword.com/*=shh-svc:8080,tls=shh-ertificate" 580 | 581 | # Create an ingress named 'back-ing' with a default backend that goes to the service 'backdoor-svc' over port 80 and requests for any path at 'floorsdrawersdoors.com' go to service 'front-svc' on port 8080 and use secret named 'knock-knock' 582 | kubectl create ingress back-ing --default-backend=backdoor-svc:http --rule="floorsdrawersdoors.com/*=front-svc:8080,tls=knock-knock" 583 | ``` 584 | 585 |
586 |592 | 593 | ```bash 594 | # list all roles in the 'kube-system' namespace 595 | kubectl get roles -n kube-system 596 | 597 | # output the yaml manifests for all roles in the 'kube-system' namespace 598 | kubectl get roles -n kube-system -o yaml 599 | 600 | # list all cluster roles 601 | kubectl get clusterroles 602 | 603 | # create a role named 'pod-reader' that can get, watch and list pods in the default namespace 604 | kubectl create role pod-reader --verb=get --verb=list --verb=watch --resource=pods 605 | 606 | # create a cluster role named 'pod-reader' that can get, watch and list pods 607 | kubectl create clusterrole pod-reader --verb=get,list,watch --resource=pods 608 | 609 | # give the user 'bob' permission in the 'admin' cluster role in the namespace 'robot-shop' 610 | kubectl create rolebinding bob-admin-binding --clusterrole=admin --user=bob --namespace=robot-shop 611 | 612 | # Across the entire cluster, grant the permissions in the "admin" ClusterRole to a user named 'bob' 613 | kubectl create clusterrolebinding root-cluster-admin-binding --clusterrole=admin --user=bob 614 | 615 | # Get the clusterrole membership in system groups 616 | kubectl get clusterrolebindings -o json | jq -r '.items[] | select(.subjects[0].kind=="Group") | select(.subjects[0].name=="system:masters")' 617 | 618 | # Get the clusterrole membership by name only 619 | kubectl get clusterrolebindings -o json | jq -r '.items[] | select(.subjects[0].kind=="Group") | select(.subjects[0].name=="system:masters") | .metadata.name' 620 | 621 | # test authorization for user chad to view secrets in the default namespace 622 | kubectl auth can-i get secrets --as chad -n default 623 | 624 | # test if user chad has authorization to delete pods in the default namespace 625 | kubectl auth can-i delete pods --as chad -n default 626 | 627 | # list all service accounts in the default namespace 628 | kubectl get sa 629 | 630 | # view the yaml manifest for all service accounts in the default namespace 631 | kubectl get sa -o yaml 632 | 633 | # output the yaml manifest for a service account named 'default' to a file named 'sa.yml' 634 | kubectl get sa default -o yaml > sa.yml 635 | 636 | # replace the current service account named 'default' with a service account from a yaml manifest named 'sa.yml' 637 | kubectl replace sa default -f sa.yml 638 | 639 | # edit service account 'default' in the default namespace 640 | kubectl edit sa default 641 | 642 | # delete service account 'default' in the default namespace 643 | kubectl delete sa default 644 | ``` 645 | 646 |
647 |653 | 654 | ```bash 655 | # list all the configmaps in the default namespace 656 | kubectl get cm 657 | 658 | # list all the configmaps in all namespaces 659 | kubectl get cm --all-namespaces 660 | 661 | # output the yaml manifest for all configmaps in all namespaces 662 | kubectl get cm --all-namespaces -o yaml 663 | 664 | # create a secret named 'db-user-pass' from a file where the key name will be the name of the file 665 | kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt 666 | 667 | # create the username and password files from the previous command 668 | echo -n 'admin' > ./username.txt 669 | echo -n '1f2budjslkj8' > ./password.txt 670 | 671 | # create a secret named 'db-user-pass' from a file and specify the name of the keys as 'username' and 'password' 672 | kubectl create secret generic db-user-pass --from-file=username=./username.txt --from-file=password=./password.txt 673 | 674 | # create a secret named 'vault-license' with the contents of an environment variable named 'secret' and set the key name to 'license' 675 | kubectl create secret generic vault-license --from-literal="license=${secret}" 676 | 677 | # create an environment variable named 'secret' and set it to the contents of a file named 'vault.hclic' 678 | secret=$(cat valut.hclic) 679 | 680 | # create a secret named 'vault-tls' that contains the certificate data for the CA (name of the file is ca) set to the key 'vault.ca', 681 | # private key (name of the file is key) set to the key 'vault.key', and 682 | # PEM (name of the file is vault.example.com.pem) and set to the key 'vault.crt' for a valid tls certificate 683 | kubectl create secret generic vault-tls --from-file=vault.key=key --from-file=vault.crt=vault.example.com.pem --from-file=vault.ca=ca 684 | 685 | # list all secrets in the default namespace 686 | kubectl get secrets 687 | 688 | # list all secrets in all namespaces 689 | kubectl get secrets --all-namespaces 690 | 691 | # output the yaml manifest for all secrets in all namespaces 692 | kubectl get secrets --all-namespaces -o yaml 693 | 694 | # view the contents of a secret named db-user-pass 695 | kubectl get secret db-user-pass -o jsonpath='{.data}' 696 | 697 | # decode the password output from the previous command 698 | echo 'MYyZDFUm2N2Rm' | base64 --decode 699 | 700 | ``` 701 | 702 |
703 |709 | 710 | ```bash 711 | # list all daemonsets in the default namespace 712 | kubectl get ds 713 | 714 | # list all daemonsets in all namespaces (including in the 'kube-system' namespace) 715 | kubectl get ds --all-namespaces 716 | 717 | # describe the configuration for a daemonset named 'kube-proxy' in the 'kube-system' namespace 718 | kubectl describe ds kube-proxy -n kube-system 719 | 720 | # output the yaml manifest for a daemonset named 'kube-proxy' in the 'kube-system' namespace 721 | kubectl get ds kube-proxy -n kube-system -o yaml 722 | 723 | # edit daemonset 'kube-proxy' in the kube-system namespace 724 | kubectl edit ds kube-proxy -n kube-system 725 | 726 | # edit daemonset 'kube-proxy' in the kube-system namespace 727 | kubectl edit ds kube-proxy -n kube-system 728 | 729 | # delete daemonset 'kube-proxy' in the kube-system namespace 730 | kubectl delete ds kube-proxy -n kube-system 731 | 732 | ``` 733 | 734 |
735 |
741 |
742 | ```bash
743 | # create hostpath persistent volume named 'pv-volume' using 1 gigabyte of storage from the host at '/mnt/data' on host
744 | cat <