├── .github ├── FUNDING.yml ├── ISSUE_TEMPLATE │ ├── bug-report.md │ ├── feature-request.md │ └── lab-proposal.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── LICENSE ├── README.md ├── app ├── .dockerignore ├── Dockerfile ├── init.sh ├── package.json ├── public │ ├── 50x.html │ ├── answers.html │ ├── assets │ │ └── bmc.svg │ ├── css │ │ ├── answers.css │ │ ├── exam.css │ │ ├── feedback.css │ │ ├── index.css │ │ ├── results.css │ │ └── styles.css │ ├── exam.html │ ├── index.html │ ├── js │ │ ├── answers.js │ │ ├── app.js │ │ ├── components │ │ │ ├── clipboard-service.js │ │ │ ├── exam-api.js │ │ │ ├── question-service.js │ │ │ ├── remote-desktop-service.js │ │ │ ├── terminal-service.js │ │ │ ├── timer-service.js │ │ │ ├── ui-utils.js │ │ │ └── wake-lock-service.js │ │ ├── exam.js │ │ ├── feedback.js │ │ ├── index.js │ │ ├── panel-resizer.js │ │ └── results.js │ └── results.html ├── server.js ├── services │ ├── public-service.js │ ├── route-service.js │ ├── ssh-terminal.js │ └── vnc-service.js └── utils │ └── staticFiles.js ├── compose-deploy.sh ├── docker-compose.yaml ├── docs ├── CONTRIBUTING.md ├── PRIVACY_POLICY.md ├── TERMS_OF_SERVICE.md ├── development-setup.md ├── how-to-add-new-labs.md ├── local-setup-guide.md └── webapp │ ├── exam-functionality.md │ ├── index-functionality.md │ └── results-functionality.md ├── facilitator ├── .dockerignore ├── .gitignore ├── Dockerfile ├── README.md ├── assets │ └── exams │ │ ├── cka │ │ ├── 001 │ │ │ ├── answers.md │ │ │ ├── assessment.json │ │ │ ├── config.json │ │ │ └── scripts │ │ │ │ ├── setup │ │ │ │ ├── q10_setup.sh │ │ │ │ ├── q1_setup.sh │ │ │ │ ├── q2_setup.sh │ │ │ │ ├── q3_setup.sh │ │ │ │ ├── q4_setup.sh │ │ │ │ ├── q5_setup.sh │ │ │ │ ├── q6_setup.sh │ │ │ │ ├── q7_setup.sh │ │ │ │ ├── q8_setup.sh │ │ │ │ └── q9_setup.sh │ │ │ │ └── validation │ │ │ │ ├── q10_s1_validate_pod.sh │ │ │ │ ├── q10_s2_validate_health_check_effectiveness.sh │ │ │ │ ├── q1_s1_validate_namespace.sh │ │ │ │ ├── q1_s2_validate_pod.sh │ │ │ │ ├── q1_s3_validate_pod_status.sh │ │ │ │ ├── q2_s1_validate_static_pod.sh │ │ │ │ ├── q2_s2_validate_static_pod_config.sh │ │ │ │ ├── q3_s1_validate_storageclass.sh │ │ │ │ ├── q3_s2_validate_pvc.sh │ │ │ │ ├── q3_s3_validate_storage_access.sh │ │ │ │ ├── q4_s1_validate_pod.sh │ │ │ │ ├── q4_s2_validate_logging.sh │ │ │ │ ├── q5_s1_validate_sa.sh │ │ │ │ ├── q5_s2_validate_rbac.sh │ │ │ │ ├── q5_s3_validate_rbac_permissions.sh │ │ │ │ ├── q6_s1_validate_networkpolicy.sh │ │ │ │ ├── q6_s2_validate_network_policy_effect.sh │ │ │ │ ├── q7_s1_validate_deployment.sh │ │ │ │ ├── q7_s2_validate_service.sh │ │ │ │ ├── q8_s1_validate_pod.sh │ │ │ │ ├── q8_s2_validate_resource_usage.sh │ │ │ │ ├── q9_s1_validate_configmap.sh │ │ │ │ ├── q9_s2_validate_pod.sh │ │ │ │ └── q9_s3_validate_configmap_usage.sh │ │ └── 002 │ │ │ ├── answers.md │ │ │ ├── answers.sh │ │ │ ├── assessment.json │ │ │ ├── config.json │ │ │ └── scripts │ │ │ ├── setup │ │ │ ├── q10_setup.sh │ │ │ ├── q11_setup.sh │ │ │ ├── q12_setup.sh │ │ │ ├── q13_setup.sh │ │ │ ├── q14_setup.sh │ │ │ ├── q15_setup.sh │ │ │ ├── q16_setup.sh │ │ │ ├── q17_setup.sh │ │ │ ├── q18_setup.sh │ │ │ ├── q19_setup.sh │ │ │ ├── q1_setup.sh │ │ │ ├── q20_setup.sh │ │ │ ├── q2_setup.sh │ │ │ ├── q3_setup.sh │ │ │ ├── q4_setup.sh │ │ │ ├── q5_setup.sh │ │ │ ├── q6_setup.sh │ │ │ ├── q7_setup.sh │ │ │ ├── q8_setup.sh │ │ │ └── q9_setup.sh │ │ │ └── validation │ │ │ ├── q10_s1_validate_deployment.sh │ │ │ ├── q10_s2_validate_dns.sh │ │ │ ├── q10_s3_validate_results.sh │ │ │ ├── q11_s1_validate_repo.sh │ │ │ ├── q11_s2_validate_release.sh │ │ │ ├── q11_s3_validate_deployment.sh │ │ │ ├── q12_s1_validate_files.sh │ │ │ ├── q12_s2_validate_resources.sh │ │ │ ├── q12_s3_validate_configmap.sh │ │ │ ├── q13_s1_validate_gateway.sh │ │ │ ├── q13_s2_validate_httproute.sh │ │ │ ├── q13_s3_validate_backends.sh │ │ │ ├── q14_s1_validate_limitrange.sh │ │ │ ├── q14_s2_validate_quota.sh │ │ │ ├── q14_s3_validate_deployment.sh │ │ │ ├── q15_s1_validate_deployment.sh │ │ │ ├── q15_s2_validate_hpa.sh │ │ │ ├── q15_s3_validate_pods.sh │ │ │ ├── q16_s1_validate_sa.sh │ │ │ ├── q16_s2_validate_rbac.sh │ │ │ ├── q16_s3_validate_pod.sh │ │ │ ├── q17_s1_validate_deployments.sh │ │ │ ├── q17_s2_validate_policies.sh │ │ │ ├── q17_s3_validate_enforcement.sh │ │ │ ├── q18_s1_validate_initial.sh │ │ │ ├── q18_s2_validate_update.sh │ │ │ ├── q18_s3_validate_rollback.sh │ │ │ ├── q19_s1_validate_priority.sh │ │ │ ├── q19_s2_validate_pods.sh │ │ │ ├── q19_s3_validate_antiaffinity.sh │ │ │ ├── q1_s1_validate_pvc.sh │ │ │ ├── q1_s2_validate_pod.sh │ │ │ ├── q1_s3_validate_mount.sh │ │ │ ├── q20_s1_validate_port.sh │ │ │ ├── q20_s2_validate_memory.sh │ │ │ ├── q20_s3_validate_probe.sh │ │ │ ├── q20_s4_validate_pods.sh │ │ │ ├── q2_s1_validate_sc.sh │ │ │ ├── q2_s2_validate_default.sh │ │ │ ├── q2_s3_validate_no_other_default.sh │ │ │ ├── q3_s1_validate_pv.sh │ │ │ ├── q3_s2_validate_pvc.sh │ │ │ ├── q3_s3_validate_pod.sh │ │ │ ├── q4_s1_validate_deployment.sh │ │ │ ├── q4_s2_validate_hpa.sh │ │ │ ├── q4_s3_validate_resources.sh │ │ │ ├── q5_s1_validate_node_label.sh │ │ │ ├── q5_s2_validate_affinity.sh │ │ │ ├── q5_s3_validate_pod_placement.sh │ │ │ ├── q6_s1_validate_psp.sh │ │ │ ├── q6_s2_validate_pod.sh │ │ │ ├── q7_s1_validate_taint.sh │ │ │ ├── q7_s2_validate_toleration_deploy.sh │ │ │ ├── q7_s3_validate_normal_deploy.sh │ │ │ ├── q8_s1_validate_statefulset.sh │ │ │ ├── q8_s2_validate_service.sh │ │ │ ├── q8_s3_validate_storage.sh │ │ │ ├── q9_s1_validate_deployment_svc.sh │ │ │ ├── q9_s2_validate_dns_resolution.sh │ │ │ └── q9_s3_validate_dns_config.sh │ │ ├── ckad │ │ ├── 001 │ │ │ ├── answers.md │ │ │ ├── assessment.json │ │ │ ├── config.json │ │ │ └── scripts │ │ │ │ ├── setup │ │ │ │ ├── q10_setup.sh │ │ │ │ ├── q11_setup.sh │ │ │ │ ├── q12_setup.sh │ │ │ │ ├── q14_setup.sh │ │ │ │ ├── q16_setup.sh │ │ │ │ ├── q17_setup.sh │ │ │ │ ├── q18_setup.sh │ │ │ │ ├── q19_setup.sh │ │ │ │ ├── q1_setup.sh │ │ │ │ ├── q20_setup.sh │ │ │ │ ├── q21_setup.sh │ │ │ │ ├── q2_setup.sh │ │ │ │ ├── q3_setup.sh │ │ │ │ ├── q4_setup.sh │ │ │ │ ├── q5_setup.sh │ │ │ │ ├── q6_setup.sh │ │ │ │ ├── q7_setup.sh │ │ │ │ ├── q8_setup.sh │ │ │ │ └── q9_setup.sh │ │ │ │ └── validation │ │ │ │ ├── q10_s1_validate_secret.sh │ │ │ │ ├── q10_s2_validate_pod_running.sh │ │ │ │ ├── q10_s3_validate_pod_env_vars.sh │ │ │ │ ├── q11_s1_validate_cronjob_exists.sh │ │ │ │ ├── q11_s2_validate_cronjob_schedule.sh │ │ │ │ ├── q11_s3_validate_cronjob_command.sh │ │ │ │ ├── q11_s4_validate_cronjob_policy.sh │ │ │ │ ├── q12_s1_validate_pod_running.sh │ │ │ │ ├── q12_s2_validate_liveness_probe.sh │ │ │ │ ├── q12_s3_validate_readiness_probe.sh │ │ │ │ ├── q13_s1_validate_clusterrole.sh │ │ │ │ ├── q13_s2_validate_clusterrolebinding.sh │ │ │ │ ├── q14_s1_validate_helm_installed.sh │ │ │ │ ├── q14_s1_validate_helm_repo.sh │ │ │ │ ├── q14_s2_validate_nginx_deployed.sh │ │ │ │ ├── q15_s1_validate_crd_api.sh │ │ │ │ ├── q15_s2_validate_crd_schema.sh │ │ │ │ ├── q16_s1_validate_policy_created.sh │ │ │ │ ├── q16_s2_validate_pod_selector.sh │ │ │ │ ├── q16_s3_validate_policy_rules.sh │ │ │ │ ├── q17_s1_validate_service_type.sh │ │ │ │ ├── q17_s2_validate_service_selector.sh │ │ │ │ ├── q17_s3_validate_service_ports.sh │ │ │ │ ├── q18_s1_validate_service_type.sh │ │ │ │ ├── q18_s2_validate_service_selector.sh │ │ │ │ ├── q18_s3_validate_service_ports.sh │ │ │ │ ├── q19_s1_validate_ingress_created.sh │ │ │ │ ├── q19_s2_validate_ingress_host.sh │ │ │ │ ├── q19_s3_validate_ingress_backend.sh │ │ │ │ ├── q1_s1_validate_namespace.sh │ │ │ │ ├── q1_s2_validate_deployment.sh │ │ │ │ ├── q1_s3_validate_deployment_running.sh │ │ │ │ ├── q1_s4_validate_deployment_replicas.sh │ │ │ │ ├── q20_s1_validate_job_completed.sh │ │ │ │ ├── q20_s1_validate_job_created.sh │ │ │ │ ├── q20_s2_validate_job_config.sh │ │ │ │ ├── q20_s3_validate_job_completed.sh │ │ │ │ ├── q21_s1_validate_oci_dir_exists.sh │ │ │ │ ├── q21_s2_validate_nginx_image.sh │ │ │ │ ├── q2_s1_validate_pv_created.sh │ │ │ │ ├── q2_s2_validate_pv_capacity.sh │ │ │ │ ├── q2_s3_validate_pv_access_mode.sh │ │ │ │ ├── q2_s4_validate_pv_reclaim_policy.sh │ │ │ │ ├── q3_s1_validate_storageclass_created.sh │ │ │ │ ├── q3_s2_validate_storageclass_provisioner.sh │ │ │ │ ├── q3_s3_validate_storageclass_binding_mode.sh │ │ │ │ ├── q4_s1_validate_pvc_created.sh │ │ │ │ ├── q4_s2_validate_pvc_size.sh │ │ │ │ ├── q4_s3_validate_pvc_access_mode.sh │ │ │ │ ├── q4_s4_validate_pvc_storageclass.sh │ │ │ │ ├── q5_s1_validate_pods_running.sh │ │ │ │ ├── q5_s2_validate_container_image.sh │ │ │ │ ├── q6_s1_validate_multicontainer_pod.sh │ │ │ │ ├── q6_s2_validate_shared_volume.sh │ │ │ │ ├── q7_s1_validate_service_selector.sh │ │ │ │ ├── q7_s2_validate_service_ports.sh │ │ │ │ ├── q8_s1_validate_cpu_limits.sh │ │ │ │ ├── q8_s2_validate_pod_running.sh │ │ │ │ ├── q9_s1_validate_configmap.sh │ │ │ │ ├── q9_s2_validate_pod_running.sh │ │ │ │ ├── q9_s3_validate_pod_env_vars.sh │ │ │ │ └── q9_s4_validate_resources.sh │ │ └── 002 │ │ │ ├── answers.md │ │ │ ├── assessment.json │ │ │ ├── config.json │ │ │ └── scripts │ │ │ ├── setup │ │ │ ├── q10_setup.sh │ │ │ ├── q11_setup.sh │ │ │ ├── q12_setup.sh │ │ │ ├── q13_setup.sh │ │ │ ├── q14_setup.sh │ │ │ ├── q15_setup.sh │ │ │ ├── q16_setup.sh │ │ │ ├── q17_setup.sh │ │ │ ├── q18_setup.sh │ │ │ ├── q19_setup.sh │ │ │ ├── q1_setup.sh │ │ │ ├── q20_setup.sh │ │ │ ├── q2_setup.sh │ │ │ ├── q3_setup.sh │ │ │ ├── q4_setup.sh │ │ │ ├── q5_setup.sh │ │ │ ├── q6_setup.sh │ │ │ ├── q7_setup.sh │ │ │ ├── q8_setup.sh │ │ │ └── q9_setup.sh │ │ │ └── validation │ │ │ ├── q10_s1_validate_namespace.sh │ │ │ ├── q10_s2_validate_pods.sh │ │ │ ├── q10_s3_validate_networkpolicy_exists.sh │ │ │ ├── q10_s4_validate_networkpolicy_ingress.sh │ │ │ ├── q10_s5_validate_networkpolicy_egress.sh │ │ │ ├── q11_s1_validate_namespace.sh │ │ │ ├── q11_s2_validate_pod.sh │ │ │ ├── q11_s3_validate_user.sh │ │ │ ├── q11_s4_validate_security_context.sh │ │ │ ├── q12_s1_validate_dockerfile.sh │ │ │ ├── q12_s2_validate_html.sh │ │ │ ├── q12_s3_validate_image.sh │ │ │ ├── q12_s4_validate_container.sh │ │ │ ├── q13_s1_validate_namespace.sh │ │ │ ├── q13_s2_validate_job.sh │ │ │ ├── q13_s3_validate_job_policy.sh │ │ │ ├── q13_s4_validate_job_deadline.sh │ │ │ ├── q14_s1_validate_namespace.sh │ │ │ ├── q14_s2_validate_pod.sh │ │ │ ├── q14_s3_validate_service.sh │ │ │ ├── q14_s4_validate_shared_volume.sh │ │ │ ├── q15_s1_validate_namespace.sh │ │ │ ├── q15_s2_validate_chart_install.sh │ │ │ ├── q15_s2_validate_resource_quota.sh │ │ │ ├── q15_s2_validate_values.sh │ │ │ ├── q15_s3_validate_chart.sh │ │ │ ├── q15_s3_validate_limit_range.sh │ │ │ ├── q15_s3_validate_release_notes.sh │ │ │ ├── q16_s1_validate_namespace.sh │ │ │ ├── q16_s2_validate_pod.sh │ │ │ ├── q16_s3_validate_startup_probe.sh │ │ │ ├── q16_s4_validate_liveness_probe.sh │ │ │ ├── q16_s5_validate_readiness_probe.sh │ │ │ ├── q17_s1_validate_namespace.sh │ │ │ ├── q17_s2_validate_pod.sh │ │ │ ├── q17_s3_validate_post_start.sh │ │ │ ├── q17_s4_validate_pre_stop.sh │ │ │ ├── q17_s5_validate_grace_period.sh │ │ │ ├── q18_s1_validate_namespace.sh │ │ │ ├── q18_s2_validate_crd.sh │ │ │ ├── q18_s2_validate_priority_class.sh │ │ │ ├── q18_s3_validate_cr_name.sh │ │ │ ├── q18_s3_validate_pod.sh │ │ │ ├── q18_s4_validate_cr_image.sh │ │ │ ├── q18_s4_validate_node_selector.sh │ │ │ ├── q18_s5_validate_cr_replicas.sh │ │ │ ├── q18_s5_validate_toleration.sh │ │ │ ├── q19_s1_validate_namespace.sh │ │ │ ├── q19_s2_validate_basic_output.sh │ │ │ ├── q19_s2_validate_pod.sh │ │ │ ├── q19_s3_validate_advanced_output.sh │ │ │ ├── q19_s3_validate_hostname.sh │ │ │ ├── q19_s4_validate_dns_policy.sh │ │ │ ├── q19_s5_validate_dns_config.sh │ │ │ ├── q1_s1_validate_namespace.sh │ │ │ ├── q1_s2_validate_pod.sh │ │ │ ├── q1_s3_validate_pod_image.sh │ │ │ ├── q1_s4_validate_pod_labels.sh │ │ │ ├── q20_s1_validate_namespace.sh │ │ │ ├── q20_s2_validate_app_pods.sh │ │ │ ├── q20_s2_validate_configmap.sh │ │ │ ├── q20_s3_validate_db_network_policy.sh │ │ │ ├── q20_s3_validate_secret.sh │ │ │ ├── q20_s4_validate_cache_network_policy.sh │ │ │ ├── q20_s4_validate_env_vars.sh │ │ │ ├── q20_s5_validate_configmap_mount.sh │ │ │ ├── q20_s5_validate_default_deny_policy.sh │ │ │ ├── q2_s1_validate_namespace.sh │ │ │ ├── q2_s2_validate_pod_containers.sh │ │ │ ├── q2_s3_validate_container_images.sh │ │ │ ├── q2_s4_validate_shared_volume.sh │ │ │ ├── q3_s1_validate_namespace.sh │ │ │ ├── q3_s2_validate_deployment.sh │ │ │ ├── q3_s3_validate_replicas.sh │ │ │ ├── q3_s4_validate_service.sh │ │ │ ├── q4_s1_validate_namespace.sh │ │ │ ├── q4_s2_validate_configmap.sh │ │ │ ├── q4_s3_validate_secret.sh │ │ │ ├── q4_s4_validate_pod_configmap.sh │ │ │ ├── q4_s5_validate_pod_secret.sh │ │ │ ├── q5_s1_validate_namespace.sh │ │ │ ├── q5_s2_validate_pod.sh │ │ │ ├── q5_s3_validate_liveness.sh │ │ │ ├── q5_s4_validate_readiness.sh │ │ │ ├── q5_s5_validate_resources.sh │ │ │ ├── q6_s1_validate_namespace.sh │ │ │ ├── q6_s2_validate_deployment.sh │ │ │ ├── q6_s3_validate_clusterip.sh │ │ │ ├── q6_s4_validate_nodeport.sh │ │ │ ├── q6_s5_validate_loadbalancer.sh │ │ │ ├── q7_s1_validate_namespace.sh │ │ │ ├── q7_s2_validate_pv.sh │ │ │ ├── q7_s3_validate_pvc.sh │ │ │ ├── q7_s4_validate_pod.sh │ │ │ ├── q7_s5_validate_pod_mount.sh │ │ │ ├── q7_s6_validate_pod_env.sh │ │ │ ├── q8_s1_validate_namespace.sh │ │ │ ├── q8_s2_validate_cronjob.sh │ │ │ ├── q8_s3_validate_cronjob_container.sh │ │ │ ├── q8_s4_validate_cronjob_policy.sh │ │ │ ├── q9_s1_validate_deployment_exists.sh │ │ │ ├── q9_s2_validate_deployment_replicas.sh │ │ │ ├── q9_s3_validate_pods_running.sh │ │ │ └── q9_s4_validate_pods_image.sh │ │ ├── cks │ │ └── 001 │ │ │ ├── answers.md │ │ │ ├── assessment.json │ │ │ ├── config.json │ │ │ └── scripts │ │ │ ├── setup │ │ │ ├── q10_setup.sh │ │ │ ├── q11_setup.sh │ │ │ ├── q12_setup.sh │ │ │ ├── q13_setup.sh │ │ │ ├── q14_setup.sh │ │ │ ├── q15_setup.sh │ │ │ ├── q16_setup.sh │ │ │ ├── q17_setup.sh │ │ │ ├── q18_setup.sh │ │ │ ├── q19_setup.sh │ │ │ ├── q1_setup.sh │ │ │ ├── q20_setup.sh │ │ │ ├── q2_setup.sh │ │ │ ├── q3_setup.sh │ │ │ ├── q4_setup.sh │ │ │ ├── q5_setup.sh │ │ │ ├── q6_setup.sh │ │ │ ├── q7_setup.sh │ │ │ ├── q8_setup.sh │ │ │ ├── q9_setup.sh │ │ │ └── setup.sh │ │ │ └── validation │ │ │ ├── q10_s1_validate_pod.sh │ │ │ ├── q10_s2_validate_configmap.sh │ │ │ ├── q10_s3_validate_syscalls.sh │ │ │ ├── q11_s1_validate_image_policy.sh │ │ │ ├── q11_s1_validate_namespace_labels.sh │ │ │ ├── q11_s2_validate_compliant_pod.sh │ │ │ ├── q11_s2_validate_configuration.sh │ │ │ ├── q11_s3_validate_non_compliant_pod.sh │ │ │ ├── q12_s1_validate_audit_policy.sh │ │ │ ├── q12_s1_validate_secret.sh │ │ │ ├── q12_s2_validate_audit_rules.sh │ │ │ ├── q12_s2_validate_secure_app.sh │ │ │ ├── q12_s3_validate_env_app.sh │ │ │ ├── q13_s1_validate_namespaces.sh │ │ │ ├── q13_s1_validate_pss_namespace.sh │ │ │ ├── q13_s2_validate_compliant_pod.sh │ │ │ ├── q13_s2_validate_quotas.sh │ │ │ ├── q13_s3_validate_network_policies.sh │ │ │ ├── q13_s4_validate_pods.sh │ │ │ ├── q14_s1_validate_apparmor_profile.sh │ │ │ ├── q14_s1_validate_config_specs.sh │ │ │ ├── q14_s2_validate_dockerfile.sh │ │ │ ├── q14_s2_validate_pod_apparmor.sh │ │ │ ├── q14_s3_validate_pod.sh │ │ │ ├── q15_s1_validate_trivy_installation.sh │ │ │ ├── q15_s1_validate_trusted_registries.sh │ │ │ ├── q15_s2_validate_scan_script.sh │ │ │ ├── q15_s2_validate_verification_pod.sh │ │ │ ├── q15_s3_validate_script.sh │ │ │ ├── q16_s1_validate_analysis.sh │ │ │ ├── q16_s1_validate_cis_benchmark.sh │ │ │ ├── q16_s2_validate_remediation.sh │ │ │ ├── q16_s2_validate_secure_deployment.sh │ │ │ ├── q17_s1_validate_immutable.sh │ │ │ ├── q17_s1_validate_runtime_security.sh │ │ │ ├── q17_s2_validate_custom_rules.sh │ │ │ ├── q17_s2_validate_falco_rules.sh │ │ │ ├── q17_s3_validate_daemonset.sh │ │ │ ├── q18_s1_validate_audit_policy.sh │ │ │ ├── q18_s1_validate_container_immutability.sh │ │ │ ├── q18_s2_validate_audit_viewer.sh │ │ │ ├── q18_s2_validate_volume_mounts.sh │ │ │ ├── q19_s1_validate_cluster_upgrade.sh │ │ │ ├── q19_s1_validate_threat_rules.sh │ │ │ ├── q19_s2_validate_detector.sh │ │ │ ├── q19_s3_validate_isolation_policy.sh │ │ │ ├── q1_s1_validate_policy_exists.sh │ │ │ ├── q1_s2_validate_ingress.sh │ │ │ ├── q1_s3_validate_egress.sh │ │ │ ├── q20_s1_validate_encryption_config.sh │ │ │ ├── q20_s1_validate_secure_ingress.sh │ │ │ ├── q20_s2_validate_secure_pods.sh │ │ │ ├── q20_s2_validate_security_headers.sh │ │ │ ├── q2_s1_validate_ingress_exists.sh │ │ │ ├── q2_s2_validate_hostname.sh │ │ │ ├── q2_s3_validate_tls.sh │ │ │ ├── q3_s1_validate_namespace.sh │ │ │ ├── q3_s2_validate_pod.sh │ │ │ ├── q3_s3_validate_rbac.sh │ │ │ ├── q4_s1_validate_policy_exists.sh │ │ │ ├── q4_s2_validate_blocks_metadata.sh │ │ │ ├── q4_s3_validate_test_pod.sh │ │ │ ├── q5_s1_validate_pod_exists.sh │ │ │ ├── q5_s2_validate_volume_mount.sh │ │ │ ├── q5_s3_validate_hash_calculation.sh │ │ │ ├── q6_s1_validate_role_exists.sh │ │ │ ├── q6_s2_validate_role_permissions.sh │ │ │ ├── q6_s3_validate_role_binding.sh │ │ │ ├── q6_s3_validate_rolebinding.sh │ │ │ ├── q7_s1_validate_immutable_filesystem.sh │ │ │ ├── q7_s1_validate_sa_exists.sh │ │ │ ├── q7_s2_validate_non_root.sh │ │ │ ├── q7_s2_validate_sa_automounting.sh │ │ │ ├── q7_s3_validate_capabilities.sh │ │ │ ├── q7_s3_validate_deployment.sh │ │ │ ├── q7_s4_validate_pod_automounting.sh │ │ │ ├── q8_s1_validate_policy_exists.sh │ │ │ ├── q8_s2_validate_policy_selectors.sh │ │ │ ├── q8_s3_validate_admin_access.sh │ │ │ ├── q8_s3_validate_allowed_syscalls.sh │ │ │ ├── q8_s4_validate_restricted_access.sh │ │ │ ├── q9_s1_validate_enc_secret.sh │ │ │ ├── q9_s1_validate_pod_exists.sh │ │ │ ├── q9_s2_validate_capabilities.sh │ │ │ ├── q9_s2_validate_config_file.sh │ │ │ ├── q9_s3_validate_readonly_fs.sh │ │ │ └── q9_s4_validate_user_group.sh │ │ ├── labs.json │ │ └── other │ │ ├── 001 │ │ ├── answers.md │ │ ├── assessment.json │ │ ├── config.json │ │ └── scripts │ │ │ ├── setup │ │ │ ├── q10_setup.sh │ │ │ ├── q11_setup.sh │ │ │ ├── q12_setup.sh │ │ │ ├── q13_setup.sh │ │ │ ├── q14_setup.sh │ │ │ ├── q15_setup.sh │ │ │ ├── q16_setup.sh │ │ │ ├── q1_setup.sh │ │ │ ├── q2_setup.sh │ │ │ ├── q3_setup.sh │ │ │ ├── q4_setup.sh │ │ │ ├── q5_setup.sh │ │ │ ├── q6_setup.sh │ │ │ ├── q7_setup.sh │ │ │ ├── q8_setup.sh │ │ │ └── q9_setup.sh │ │ │ └── validation │ │ │ ├── q10_s1_validate_running.sh │ │ │ ├── q10_s2_validate_cpu.sh │ │ │ ├── q10_s3_validate_memory.sh │ │ │ ├── q11_s1_validate_compose_file.sh │ │ │ ├── q11_s2_validate_services.sh │ │ │ ├── q11_s3_validate_network_volume.sh │ │ │ ├── q12_s1_validate_file.sh │ │ │ ├── q12_s2_validate_base_image.sh │ │ │ ├── q12_s3_validate_details.sh │ │ │ ├── q13_s1_validate_diagnosis.sh │ │ │ ├── q13_s2_validate_container_fixed.sh │ │ │ ├── q14_s1_validate_dockerfile.sh │ │ │ ├── q14_s2_validate_container_user.sh │ │ │ ├── q1_s1_validate_image_v1.sh │ │ │ ├── q1_s2_validate_image_latest.sh │ │ │ ├── q2_s1_validate_container_running.sh │ │ │ ├── q2_s2_validate_image.sh │ │ │ ├── q2_s3_validate_port.sh │ │ │ ├── q2_s4_validate_env.sh │ │ │ ├── q3_s1_validate_volume.sh │ │ │ ├── q3_s2_validate_container_mount.sh │ │ │ ├── q3_s3_validate_file_content.sh │ │ │ ├── q4_s1_validate_dockerfile.sh │ │ │ ├── q4_s2_validate_image_built.sh │ │ │ ├── q4_s3_validate_image_size.sh │ │ │ ├── q5_s1_validate_daemon_config.sh │ │ │ ├── q5_s2_validate_cgroup_driver.sh │ │ │ ├── q6_s1_validate_container.sh │ │ │ ├── q6_s2_validate_log_driver.sh │ │ │ ├── q6_s3_validate_log_rotation.sh │ │ │ ├── q7_s1_validate_network.sh │ │ │ ├── q7_s2_validate_app1.sh │ │ │ ├── q7_s3_validate_ping.sh │ │ │ ├── q8_s1_validate_dockerfile.sh │ │ │ ├── q8_s2_validate_container.sh │ │ │ ├── q8_s3_validate_health_params.sh │ │ │ ├── q9_s1_validate_manifest.sh │ │ │ └── q9_s2_validate_platforms.sh │ │ └── 002 │ │ ├── answers.md │ │ ├── assessment.json │ │ ├── config.json │ │ └── scripts │ │ ├── setup │ │ ├── create_directories.sh │ │ ├── q10_setup.sh │ │ ├── q11_setup.sh │ │ ├── q12_setup_buggy_release.sh │ │ ├── q1_setup.sh │ │ ├── q2_setup.sh │ │ ├── q3_setup.sh │ │ ├── q4_setup.sh │ │ ├── q5_setup.sh │ │ ├── q6_setup.sh │ │ ├── q7_setup.sh │ │ ├── q8_setup.sh │ │ └── q9_setup.sh │ │ └── validation │ │ ├── q10_s1_validate_chart_packaged.sh │ │ ├── q10_s2_validate_local_repo.sh │ │ ├── q11_s1_validate_rollback.sh │ │ ├── q11_s2_validate_revision.sh │ │ ├── q12_s1_validate_diagnosis.sh │ │ ├── q12_s2_validate_fixed.sh │ │ ├── q1_s1_validate_helm_installed.sh │ │ ├── q2_s1_validate_repo_added.sh │ │ ├── q3_s1_validate_search_results.sh │ │ ├── q4_s1_validate_nginx_installed.sh │ │ ├── q4_s2_validate_service_config.sh │ │ ├── q5_s1_validate_releases_list.sh │ │ ├── q6_s1_validate_status.sh │ │ ├── q6_s2_validate_manifests.sh │ │ ├── q7_s1_validate_upgrade.sh │ │ ├── q7_s2_validate_replicas.sh │ │ ├── q8_s1_validate_values_file.sh │ │ ├── q8_s2_validate_redis_install.sh │ │ ├── q9_s1_validate_chart_created.sh │ │ └── q9_s2_validate_chart_metadata.sh ├── entrypoint.sh ├── package.json ├── src │ ├── app.js │ ├── config │ │ └── index.js │ ├── controllers │ │ ├── assessmentController.js │ │ ├── examController.js │ │ ├── remoteDesktopController.js │ │ └── sshController.js │ ├── middleware │ │ └── validators.js │ ├── routes │ │ ├── assessmentRoutes.js │ │ ├── examRoutes.js │ │ ├── remoteDesktopRoutes.js │ │ └── sshRoutes.js │ ├── services │ │ ├── examService.js │ │ ├── jumphostService.js │ │ ├── metricService.js │ │ ├── remoteDesktopService.js │ │ └── sshService.js │ └── utils │ │ ├── logger.js │ │ └── redisClient.js └── tests │ └── jumphostService.test.js ├── jumphost ├── .dockerignore ├── Dockerfile └── scripts │ ├── cleanup-exam-env.sh │ └── prepare-exam-env.sh ├── kind-cluster ├── .dockerignore ├── Dockerfile ├── entrypoint.sh └── scripts │ ├── env-cleanup │ ├── env-setup │ └── k3d-install.sh ├── nginx ├── .dockerignore ├── Dockerfile └── default.conf ├── remote-desktop ├── .dockerignore ├── Dockerfile ├── agent.py └── startup.sh ├── remote-terminal ├── Dockerfile └── motd └── scripts ├── COMPOSE-DEPLOY.md ├── house-keeping ├── BUILD-AND-PUSH.md └── build-and-push.sh ├── install.ps1 └── install.sh /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 12 | polar: # Replace with a single Polar username 13 | buy_me_a_coffee: nishan.b 14 | thanks_dev: # Replace with a single thanks.dev username 15 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 16 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Node.js dependencies and logs 2 | node_modules/ 3 | npm-debug.log 4 | yarn-debug.log 5 | yarn-error.log 6 | package-lock.json 7 | yarn.lock 8 | .npm/ 9 | 10 | # Environment variables 11 | .env 12 | .env.* 13 | !.env.example 14 | 15 | # Docker related 16 | .docker/ 17 | docker-compose.override.yml 18 | 19 | # Kubernetes secrets (in production, consider using sealed secrets or other secure methods) 20 | # k8s/secrets/ 21 | 22 | # Build outputs 23 | dist/ 24 | build/ 25 | tmp/ 26 | out/ 27 | logs/ 28 | *.log 29 | 30 | # IDE and editors 31 | .idea/ 32 | .vscode/ 33 | *.swp 34 | *.swo 35 | .DS_Store 36 | .history/ 37 | *.sublime* 38 | .project 39 | .classpath 40 | .settings/ 41 | *.code-workspace 42 | 43 | # OS generated files 44 | Thumbs.db 45 | ehthumbs.db 46 | Desktop.ini 47 | 48 | # Temporary files 49 | *.tmp 50 | *.bak 51 | .temp/ 52 | .cache/ 53 | 54 | # Testing 55 | coverage/ 56 | .nyc_output/ 57 | 58 | # Miscellaneous 59 | .vagrant/ 60 | .terraform/ 61 | terraform.tfstate 62 | terraform.tfstate.backup 63 | .terragrunt-cache/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /app/.dockerignore: -------------------------------------------------------------------------------- 1 | # Dependency directories 2 | node_modules/ 3 | npm-debug.log 4 | yarn-debug.log 5 | yarn-error.log 6 | 7 | # Build artifacts 8 | dist/ 9 | build/ 10 | *.tar.gz 11 | *.tgz 12 | 13 | # Environment files 14 | .env 15 | .env.local 16 | .env.development 17 | .env.test 18 | .env.production 19 | 20 | # Docker files 21 | Dockerfile 22 | docker-compose*.yml 23 | compose*.yaml 24 | .dockerignore 25 | 26 | # Logs 27 | logs/ 28 | *.log 29 | 30 | # Testing 31 | coverage/ 32 | .nyc_output/ 33 | .coverage 34 | .pytest_cache/ 35 | __pycache__/ 36 | *.pyc 37 | *.pyo 38 | *.pyd 39 | 40 | # OS specific 41 | .DS_Store 42 | Thumbs.db 43 | 44 | # Editor directories and files 45 | .idea/ 46 | .vscode/ 47 | *.swp 48 | *.swo 49 | *~ 50 | 51 | # Other unnecessary files 52 | *.gz 53 | *.zip 54 | *.tar 55 | *.rar 56 | tmp/ 57 | temp/ -------------------------------------------------------------------------------- /app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:16-alpine 2 | 3 | WORKDIR /app 4 | 5 | # Copy package.json and package-lock.json 6 | COPY package*.json ./ 7 | 8 | # Install dependencies 9 | RUN npm install --production 10 | 11 | # Copy the application files 12 | COPY server.js ./ 13 | COPY services/ ./services/ 14 | COPY public/ ./public/ 15 | 16 | # Ensure the public directory exists 17 | RUN mkdir -p /app/public 18 | 19 | # Expose the port 20 | EXPOSE 3000 21 | 22 | # Start the application 23 | CMD ["node", "server.js"] -------------------------------------------------------------------------------- /app/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Copy the HTML file to Nginx's directory 4 | cp /app/public/* /usr/share/nginx/html/ 5 | 6 | # Keep the container running 7 | tail -f /dev/null -------------------------------------------------------------------------------- /app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "killer-sh-clone-webapp", 3 | "version": "1.0.0", 4 | "description": "Web application for killer.sh clone with VNC access", 5 | "main": "server.js", 6 | "scripts": { 7 | "start": "node server.js", 8 | "dev": "nodemon server.js" 9 | }, 10 | "dependencies": { 11 | "cors": "^2.8.5", 12 | "express": "^4.21.2", 13 | "http-proxy-middleware": "^2.0.7", 14 | "socket.io": "4.7.2", 15 | "ssh2": "^1.14.0", 16 | "xterm": "^5.3.0" 17 | }, 18 | "devDependencies": { 19 | "nodemon": "^2.0.15" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /app/public/js/app.js: -------------------------------------------------------------------------------- 1 | document.addEventListener('DOMContentLoaded', function() { 2 | const vncFrame = document.getElementById('vnc-frame'); 3 | const connectBtn = document.getElementById('connect-btn'); 4 | const fullscreenBtn = document.getElementById('fullscreen-btn'); 5 | 6 | connectBtn.addEventListener('click', function() { 7 | // Connect to the VNC server through the service 8 | vncFrame.src = `http://${window.location.hostname}:${window.location.port}/vnc-proxy/`; 9 | }); 10 | 11 | fullscreenBtn.addEventListener('click', function() { 12 | if (vncFrame.requestFullscreen) { 13 | vncFrame.requestFullscreen(); 14 | } else if (vncFrame.webkitRequestFullscreen) { 15 | vncFrame.webkitRequestFullscreen(); 16 | } else if (vncFrame.msRequestFullscreen) { 17 | vncFrame.msRequestFullscreen(); 18 | } 19 | }); 20 | }); -------------------------------------------------------------------------------- /app/services/public-service.js: -------------------------------------------------------------------------------- 1 | const fs = require('fs'); 2 | const path = require('path'); 3 | 4 | class PublicService { 5 | constructor(publicDir) { 6 | this.publicDir = publicDir; 7 | this.indexHtmlSrc = path.join(__dirname, '..', 'index.html'); 8 | this.indexHtmlDest = path.join(publicDir, 'index.html'); 9 | } 10 | 11 | initialize() { 12 | this.createPublicDirectory(); 13 | this.copyIndexHtml(); 14 | } 15 | 16 | createPublicDirectory() { 17 | if (!fs.existsSync(this.publicDir)) { 18 | fs.mkdirSync(this.publicDir, { recursive: true }); 19 | console.log('Created public directory'); 20 | } 21 | } 22 | 23 | copyIndexHtml() { 24 | if (fs.existsSync(this.indexHtmlSrc) && !fs.existsSync(this.indexHtmlDest)) { 25 | fs.copyFileSync(this.indexHtmlSrc, this.indexHtmlDest); 26 | console.log('Copied index.html to public directory'); 27 | } 28 | } 29 | 30 | getPublicDir() { 31 | return this.publicDir; 32 | } 33 | } 34 | 35 | module.exports = PublicService; -------------------------------------------------------------------------------- /app/utils/staticFiles.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Static files utility 3 | * Handles the setup of necessary directories and files 4 | */ 5 | 6 | const fs = require('fs'); 7 | const path = require('path'); 8 | 9 | /** 10 | * Sets up the necessary static file structure 11 | * Creates public directory if it doesn't exist 12 | * Copies index.html to public directory if needed 13 | */ 14 | function setupStaticFiles() { 15 | // Create the public directory if it doesn't exist 16 | const publicDir = path.join(__dirname, '..', 'public'); 17 | if (!fs.existsSync(publicDir)) { 18 | fs.mkdirSync(publicDir, { recursive: true }); 19 | console.log('Created public directory'); 20 | } 21 | 22 | // Copy index.html to public directory if it doesn't exist 23 | const indexHtmlSrc = path.join(__dirname, '..', 'index.html'); 24 | const indexHtmlDest = path.join(publicDir, 'index.html'); 25 | if (fs.existsSync(indexHtmlSrc) && !fs.existsSync(indexHtmlDest)) { 26 | fs.copyFileSync(indexHtmlSrc, indexHtmlDest); 27 | console.log('Copied index.html to public directory'); 28 | } 29 | } 30 | 31 | module.exports = setupStaticFiles; -------------------------------------------------------------------------------- /docs/local-setup-guide.md: -------------------------------------------------------------------------------- 1 | # Local Setup Guide for CK-X Simulator 2 | 3 | ## Quick Setup 4 | 5 | 1. Clone the repository: 6 | ```bash 7 | git clone https://github.com/@nishanb/CK-X.git 8 | cd ck-x 9 | ``` 10 | 11 | 2. Run the deployment script: 12 | ```bash 13 | ./scripts/compose-deploy.sh 14 | ``` 15 | 16 | Alternatively, 17 | 18 | 2. Use the command `docker compose up` and manually navigate to `http://localhost:30080` in your browser 19 | 20 | The script will deploy all services locally and open the application in your browser. 21 | 22 | After making any changes to the code, you can redeploy with: 23 | ```bash 24 | docker compose up -d 25 | ``` 26 | 27 | This setup has been tested on Mac and Linux environments. -------------------------------------------------------------------------------- /facilitator/.dockerignore: -------------------------------------------------------------------------------- 1 | # Dependency directories 2 | node_modules/ 3 | npm-debug.log 4 | yarn-debug.log 5 | yarn-error.log 6 | 7 | # Build artifacts 8 | dist/ 9 | build/ 10 | *.tar.gz 11 | *.tgz 12 | 13 | # Environment files 14 | .env 15 | .env.local 16 | .env.development 17 | .env.test 18 | .env.production 19 | 20 | # Docker files 21 | Dockerfile 22 | docker-compose*.yml 23 | compose*.yaml 24 | .dockerignore 25 | 26 | # Logs 27 | logs/ 28 | *.log 29 | 30 | # Testing 31 | coverage/ 32 | .nyc_output/ 33 | .coverage 34 | .pytest_cache/ 35 | __pycache__/ 36 | *.pyc 37 | *.pyo 38 | *.pyd 39 | 40 | # OS specific 41 | .DS_Store 42 | Thumbs.db 43 | 44 | # Editor directories and files 45 | .idea/ 46 | .vscode/ 47 | *.swp 48 | *.swo 49 | *~ 50 | 51 | # Other unnecessary files 52 | *.gz 53 | *.zip 54 | *.tar 55 | *.rar 56 | tmp/ 57 | temp/ -------------------------------------------------------------------------------- /facilitator/.gitignore: -------------------------------------------------------------------------------- 1 | # Dependencies 2 | node_modules/ 3 | npm-debug.log 4 | yarn-debug.log 5 | yarn-error.log 6 | 7 | # Environment variables 8 | .env 9 | .env.local 10 | .env.development.local 11 | .env.test.local 12 | .env.production.local 13 | 14 | # Logs 15 | logs/ 16 | *.log 17 | 18 | # Runtime data 19 | pids/ 20 | *.pid 21 | *.seed 22 | *.pid.lock 23 | 24 | # Coverage 25 | coverage/ 26 | 27 | # Misc 28 | .DS_Store 29 | .idea/ 30 | .vscode/ 31 | *.swp 32 | *.swo -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lab": "cka-001", 3 | "workerNodes": 1, 4 | "answers": "assets/exams/cka/001/answers.md", 5 | "questions": "assessment.json", 6 | "totalMarks": 100, 7 | "lowScore": 40, 8 | "mediumScore": 60, 9 | "highScore": 90 10 | } -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q10_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 10: Health Check setup 3 | 4 | # Remove any existing pod with the same name 5 | kubectl delete pod health-check --ignore-not-found=true 6 | 7 | # Pre-pull the nginx image 8 | kubectl run prefetch-nginx --image=nginx --restart=Never --dry-run=client -o yaml | kubectl apply -f - 9 | 10 | # Create a ConfigMap with a custom nginx configuration that includes /healthz endpoint 11 | kubectl create configmap nginx-health-config --from-literal=nginx.conf=' 12 | events { 13 | worker_connections 1024; 14 | } 15 | http { 16 | server { 17 | listen 80; 18 | location /healthz { 19 | access_log off; 20 | return 200 "healthy\n"; 21 | } 22 | } 23 | }' --dry-run=client -o yaml | kubectl apply -f - 24 | 25 | sleep 5 26 | kubectl delete pod prefetch-nginx --ignore-not-found=true 27 | 28 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q1_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 1: Create namespace and pod 3 | 4 | # No specific setup needed as this is a creation task 5 | # Just ensure the namespace doesn't exist already 6 | kubectl delete namespace app-team1 --ignore-not-found=true 7 | 8 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q2_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 2: Static Pod setup 3 | 4 | # Ensure the static pod directory exists 5 | mkdir -p /etc/kubernetes/manifests/ 6 | 7 | # Remove any existing static pod with the same name 8 | rm -f /etc/kubernetes/manifests/static-web.yaml 9 | 10 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q3_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 3: Storage setup 3 | 4 | # Create storage namespace if it doesn't exist 5 | kubectl create namespace storage --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Remove any existing storage class and PVC with the same names 8 | kubectl delete storageclass fast-storage --ignore-not-found=true 9 | kubectl delete pvc -n storage data-pvc --ignore-not-found=true 10 | 11 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q4_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 4: Logging setup 3 | 4 | # Create monitoring namespace if it doesn't exist 5 | kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Remove any existing pod with the same name 8 | kubectl delete pod -n monitoring logger --ignore-not-found=true 9 | 10 | # Pull required images in advance to speed up pod creation 11 | kubectl run prefetch-busybox --image=busybox --restart=Never -n monitoring --dry-run=client -o yaml | kubectl apply -f - 12 | kubectl run prefetch-fluentd --image=fluentd:v1.14 --restart=Never -n monitoring --dry-run=client -o yaml | kubectl apply -f - 13 | 14 | # Wait for prefetch pods to be created 15 | sleep 5 16 | 17 | # Clean up prefetch pods 18 | kubectl delete pod -n monitoring prefetch-busybox --ignore-not-found=true 19 | kubectl delete pod -n monitoring prefetch-fluentd --ignore-not-found=true 20 | 21 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q5_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 5: RBAC setup 3 | 4 | # Remove any existing resources with the same names 5 | kubectl delete serviceaccount app-sa --ignore-not-found=true 6 | kubectl delete role pod-reader --ignore-not-found=true 7 | kubectl delete rolebinding read-pods --ignore-not-found=true 8 | 9 | # Create a test pod to verify RBAC permissions later 10 | kubectl run test-pod --image=nginx --restart=Never --dry-run=client -o yaml | kubectl apply -f - 11 | 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q6_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 6: Network Policy setup 3 | 4 | # Create networking namespace if it doesn't exist 5 | kubectl create namespace networking --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Remove any existing network policy 8 | kubectl delete networkpolicy -n networking db-policy --ignore-not-found=true 9 | 10 | # Create test pods with appropriate labels 11 | kubectl run frontend --image=nginx --labels=role=frontend -n networking --dry-run=client -o yaml | kubectl apply -f - 12 | kubectl run db --image=mysql --labels=role=db -n networking --env=MYSQL_ROOT_PASSWORD=password --dry-run=client -o yaml | kubectl apply -f - 13 | 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q7_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 7: Deployment and Service setup 3 | 4 | # Remove any existing deployment and service with the same names 5 | kubectl delete deployment web-app --ignore-not-found=true 6 | kubectl delete service web-service --ignore-not-found=true 7 | 8 | # Pre-pull the nginx image to speed up deployment creation 9 | kubectl run prefetch-nginx --image=nginx:1.20 --restart=Never --dry-run=client -o yaml | kubectl apply -f - 10 | sleep 5 11 | kubectl delete pod prefetch-nginx --ignore-not-found=true 12 | 13 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q8_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 8: Resource Management setup 3 | 4 | # Ensure monitoring namespace exists 5 | kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Remove any existing pod with the same name 8 | kubectl delete pod -n monitoring resource-pod --ignore-not-found=true 9 | 10 | # Pre-pull the nginx image 11 | kubectl run prefetch-nginx --image=nginx --restart=Never -n monitoring --dry-run=client -o yaml | kubectl apply -f - 12 | sleep 5 13 | kubectl delete pod -n monitoring prefetch-nginx --ignore-not-found=true 14 | 15 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/setup/q9_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 9: ConfigMap setup 3 | 4 | # Remove any existing configmap and pod with the same names 5 | kubectl delete configmap app-config --ignore-not-found=true 6 | kubectl delete pod config-pod --ignore-not-found=true 7 | 8 | # Pre-pull the nginx image 9 | kubectl run prefetch-nginx --image=nginx --restart=Never --dry-run=client -o yaml | kubectl apply -f - 10 | sleep 5 11 | kubectl delete pod prefetch-nginx --ignore-not-found=true 12 | 13 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/validation/q1_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate if namespace app-team1 exists 3 | 4 | NAMESPACE="app-team1" 5 | 6 | if kubectl get namespace $NAMESPACE &> /dev/null; then 7 | echo "✅ Namespace '$NAMESPACE' exists" 8 | exit 0 9 | else 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/validation/q1_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate if nginx pod exists and is running in app-team1 namespace 3 | 4 | NAMESPACE="app-team1" 5 | POD_NAME="nginx-pod" 6 | EXPECTED_IMAGE="nginx:1.19" 7 | 8 | # Check if pod exists 9 | if ! kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null; then 10 | echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'" 11 | exit 1 12 | fi 13 | 14 | # Check if pod is running 15 | POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}') 16 | if [ "$POD_STATUS" != "Running" ]; then 17 | echo "❌ Pod '$POD_NAME' exists but is not running (status: $POD_STATUS)" 18 | exit 1 19 | fi 20 | 21 | # Check if correct image is used 22 | POD_IMAGE=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].image}') 23 | if [ "$POD_IMAGE" != "$EXPECTED_IMAGE" ]; then 24 | echo "❌ Pod '$POD_NAME' is using incorrect image: $POD_IMAGE (expected: $EXPECTED_IMAGE)" 25 | exit 1 26 | fi 27 | 28 | echo "✅ Pod '$POD_NAME' exists, is running, and using correct image in namespace '$NAMESPACE'" 29 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/validation/q3_s1_validate_storageclass.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate if StorageClass exists with correct configuration 3 | 4 | SC_NAME="fast-storage" 5 | EXPECTED_PROVISIONER="kubernetes.io/no-provisioner" 6 | 7 | # Check if StorageClass exists 8 | if ! kubectl get storageclass $SC_NAME &> /dev/null; then 9 | echo "❌ StorageClass '$SC_NAME' not found" 10 | exit 1 11 | fi 12 | 13 | # Check if correct provisioner is used 14 | PROVISIONER=$(kubectl get storageclass $SC_NAME -o jsonpath='{.provisioner}') 15 | if [ "$PROVISIONER" != "$EXPECTED_PROVISIONER" ]; then 16 | echo "❌ StorageClass '$SC_NAME' using incorrect provisioner: $PROVISIONER (expected: $EXPECTED_PROVISIONER)" 17 | exit 1 18 | fi 19 | 20 | echo "✅ StorageClass '$SC_NAME' exists with correct provisioner" 21 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/validation/q3_s2_validate_pvc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate if PVC exists with correct configuration 3 | 4 | NAMESPACE="storage" 5 | PVC_NAME="data-pvc" 6 | SC_NAME="fast-storage" 7 | EXPECTED_SIZE="1Gi" 8 | 9 | # Check if PVC exists 10 | if ! kubectl get pvc $PVC_NAME -n $NAMESPACE &> /dev/null; then 11 | echo "❌ PVC '$PVC_NAME' not found in namespace '$NAMESPACE'" 12 | exit 1 13 | fi 14 | 15 | # Check if correct storage class is used 16 | STORAGE_CLASS=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.spec.storageClassName}') 17 | if [ "$STORAGE_CLASS" != "$SC_NAME" ]; then 18 | echo "❌ PVC '$PVC_NAME' using incorrect storage class: $STORAGE_CLASS (expected: $SC_NAME)" 19 | exit 1 20 | fi 21 | 22 | # Check if correct size is requested 23 | SIZE=$(kubectl get pvc $PVC_NAME -n $NAMESPACE -o jsonpath='{.spec.resources.requests.storage}') 24 | if [ "$SIZE" != "$EXPECTED_SIZE" ]; then 25 | echo "❌ PVC '$PVC_NAME' requesting incorrect size: $SIZE (expected: $EXPECTED_SIZE)" 26 | exit 1 27 | fi 28 | 29 | echo "✅ PVC '$PVC_NAME' exists with correct storage class and size" 30 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/validation/q5_s1_validate_sa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate if ServiceAccount exists 3 | 4 | SA_NAME="app-sa" 5 | NAMESPACE="default" 6 | 7 | # Check if ServiceAccount exists 8 | if ! kubectl get serviceaccount $SA_NAME -n $NAMESPACE &> /dev/null; then 9 | echo "❌ ServiceAccount '$SA_NAME' not found in namespace '$NAMESPACE'" 10 | exit 1 11 | fi 12 | 13 | echo "✅ ServiceAccount '$SA_NAME' exists in namespace '$NAMESPACE'" 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/001/scripts/validation/q9_s1_validate_configmap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate if ConfigMap exists with correct configuration 3 | 4 | CONFIGMAP_NAME="app-config" 5 | EXPECTED_KEY="APP_COLOR" 6 | EXPECTED_VALUE="blue" 7 | 8 | # Check if ConfigMap exists 9 | if ! kubectl get configmap $CONFIGMAP_NAME &> /dev/null; then 10 | echo "❌ ConfigMap '$CONFIGMAP_NAME' not found" 11 | exit 1 12 | fi 13 | 14 | # Check if ConfigMap has the required key 15 | if ! kubectl get configmap $CONFIGMAP_NAME -o jsonpath='{.data.APP_COLOR}' &> /dev/null; then 16 | echo "❌ ConfigMap '$CONFIGMAP_NAME' missing required key '$EXPECTED_KEY'" 17 | exit 1 18 | fi 19 | 20 | # Check if ConfigMap has the correct value 21 | CONFIG_VALUE=$(kubectl get configmap $CONFIGMAP_NAME -o jsonpath='{.data.APP_COLOR}') 22 | if [ "$CONFIG_VALUE" != "$EXPECTED_VALUE" ]; then 23 | echo "❌ ConfigMap '$CONFIGMAP_NAME' has incorrect value for '$EXPECTED_KEY': $CONFIG_VALUE (expected: $EXPECTED_VALUE)" 24 | exit 1 25 | fi 26 | 27 | echo "✅ ConfigMap '$CONFIGMAP_NAME' exists with correct configuration" 28 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lab": "cka-002", 3 | "workerNodes": 2, 4 | "answers": "assets/exams/cka/002/answers.md", 5 | "questions": "assessment.json", 6 | "totalMarks": 100, 7 | "lowScore": 60, 8 | "mediumScore": 75, 9 | "highScore": 85 10 | } -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q10_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace 5 | kubectl create namespace dns-config --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Create directory for test results if it doesn't exist 8 | mkdir -p /tmp/dns-test 9 | 10 | echo "Setup completed for Question 10" -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q11_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace 5 | kubectl create namespace helm-test --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Ensure helm is installed 8 | helm version || { 9 | echo "Helm is not installed" 10 | exit 1 11 | } 12 | 13 | # Remove bitnami repo if it exists (to test adding it) 14 | helm repo remove bitnami 2>/dev/null || true 15 | 16 | echo "Setup completed for Question 11" -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q12_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace 5 | kubectl create namespace kustomize --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Create directory structure for kustomize 8 | mkdir -p /tmp/exam/kustomize/{base,overlays/production} 9 | 10 | # Create initial base files 11 | cat > /tmp/exam/kustomize/base/kustomization.yaml </dev/null || true 9 | kubectl delete resourcequota --all -n limits 2>/dev/null || true 10 | 11 | echo "Setup completed for Question 14" -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q15_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace 5 | kubectl create namespace monitoring --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Pre-pull the resource consumer image to speed up deployment 8 | kubectl run pull-resource-consumer --image=gcr.io/kubernetes-e2e-test-images/resource-consumer:1.5 -n monitoring --dry-run=client -o yaml | kubectl apply -f - 9 | 10 | # Wait for the pull pod to complete 11 | sleep 10 12 | 13 | # Clean up the pull pod 14 | kubectl delete pod pull-resource-consumer -n monitoring 2>/dev/null || true 15 | 16 | echo "Setup completed for Question 15" -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q16_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace 5 | kubectl create namespace cluster-admin --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | echo "Setup completed for Question 16" -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q17_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace 5 | kubectl create namespace network --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Ensure NetworkPolicy API is enabled 8 | kubectl get networkpolicies -n network || { 9 | echo "NetworkPolicy API is not enabled" 10 | exit 1 11 | } 12 | 13 | # Delete any existing network policies 14 | kubectl delete networkpolicy --all -n network 2>/dev/null || true 15 | 16 | echo "Setup completed for Question 17" -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q18_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace 5 | kubectl create namespace upgrade --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Create directory for rollout history 8 | mkdir -p /tmp/exam 9 | 10 | # Clean up the pull pods 11 | kubectl delete pod pull-nginx-1-19 pull-nginx-1-20 -n upgrade 12 | 13 | echo "Setup completed for Question 18" -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/setup/q19_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Create namespace (reusing scheduling namespace) 5 | kubectl create namespace scheduling --dry-run=client -o yaml | kubectl apply -f - 6 | 7 | # Delete any existing PriorityClasses 8 | kubectl delete priorityclass high-priority low-priority 2>/dev/null || true 9 | 10 | # # Create the PriorityClasses 11 | # kubectl create -f - </dev/null || echo "not found") 6 | if [ "$DEPLOY_STATUS" = "not found" ]; then 7 | echo "Deployment dns-app not found" 8 | exit 1 9 | fi 10 | 11 | REPLICAS=$(echo $DEPLOY_STATUS | cut -d',' -f1) 12 | AVAILABLE=$(echo $DEPLOY_STATUS | cut -d',' -f2) 13 | 14 | if [ "$REPLICAS" != "2" ] || [ "$AVAILABLE" != "2" ]; then 15 | echo "Deployment does not have correct number of replicas" 16 | exit 1 17 | fi 18 | 19 | # Check if service exists and has correct port 20 | SVC_PORT=$(kubectl get svc dns-svc -n dns-config -o jsonpath='{.spec.ports[0].port}' 2>/dev/null || echo "not found") 21 | if [ "$SVC_PORT" = "not found" ]; then 22 | echo "Service dns-svc not found" 23 | exit 1 24 | fi 25 | 26 | if [ "$SVC_PORT" != "80" ]; then 27 | echo "Service port is not configured correctly" 28 | exit 1 29 | fi 30 | 31 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q10_s2_validate_dns.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if tester pod exists and is running 5 | POD_STATUS=$(kubectl get pod dns-tester -n dns-config -o jsonpath='{.status.phase}' 2>/dev/null || echo "not found") 6 | if [ "$POD_STATUS" = "not found" ]; then 7 | echo "DNS tester pod not found" 8 | exit 1 9 | fi 10 | 11 | if [ "$POD_STATUS" != "Running" ]; then 12 | echo "DNS tester pod is not running" 13 | exit 1 14 | fi 15 | 16 | # Test DNS resolution 17 | TEST_RESULT=$(kubectl exec -n dns-config dns-tester -- nslookup dns-svc 2>/dev/null || echo "failed") 18 | if echo "$TEST_RESULT" | grep -q "failed"; then 19 | echo "DNS resolution test failed" 20 | exit 1 21 | fi 22 | 23 | # Test FQDN resolution 24 | TEST_RESULT=$(kubectl exec -n dns-config dns-tester -- nslookup dns-svc.dns-config.svc.cluster.local 2>/dev/null || echo "failed") 25 | if echo "$TEST_RESULT" | grep -q "failed"; then 26 | echo "FQDN DNS resolution test failed" 27 | exit 1 28 | fi 29 | 30 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q10_s3_validate_results.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if results file exists in the pod 5 | FILE_CHECK=$(kubectl exec -n dns-config dns-tester -- test -f /tmp/dns-test.txt && echo "exists" || echo "not found") 6 | if [ "$FILE_CHECK" = "not found" ]; then 7 | echo "DNS test results file not found" 8 | exit 1 9 | fi 10 | 11 | # Check if file has content 12 | CONTENT=$(kubectl exec -n dns-config dns-tester -- cat /tmp/dns-test.txt 2>/dev/null || echo "") 13 | if [ -z "$CONTENT" ]; then 14 | echo "DNS test results file is empty" 15 | exit 1 16 | fi 17 | 18 | # Verify file contains required information 19 | if ! echo "$CONTENT" | grep -q "dns-svc.dns-config.svc.cluster.local"; then 20 | echo "FQDN resolution results not found in file" 21 | exit 1 22 | fi 23 | 24 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q11_s1_validate_repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if bitnami repo is added 5 | helm repo list | grep bitnami || { 6 | echo "Bitnami repository not found in helm repo list" 7 | exit 1 8 | } 9 | 10 | # Check if repo URL is correct 11 | REPO_URL=$(helm repo list | grep bitnami | awk '{print $2}') 12 | if [[ "$REPO_URL" != "https://charts.bitnami.com/bitnami" ]]; then 13 | echo "Incorrect repository URL. Expected https://charts.bitnami.com/bitnami, got $REPO_URL" 14 | exit 1 15 | fi 16 | 17 | # Check if repo is up to date 18 | helm repo update bitnami || { 19 | echo "Failed to update bitnami repository" 20 | exit 1 21 | } 22 | 23 | echo "Helm repository validation successful" 24 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q11_s2_validate_release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if release exists 5 | helm status web-release -n helm-test || { 6 | echo "Helm release web-release not found in namespace helm-test" 7 | exit 1 8 | } 9 | 10 | # Check if it's using nginx chart 11 | CHART=$(helm get manifest web-release -n helm-test | grep "chart:" | head -1) 12 | if [[ ! "$CHART" =~ "nginx" ]]; then 13 | echo "Release is not using nginx chart" 14 | exit 1 15 | fi 16 | 17 | # Check if service type is NodePort 18 | SERVICE_TYPE=$(kubectl get service web-release-nginx -n helm-test -o jsonpath='{.spec.type}') 19 | if [[ "$SERVICE_TYPE" != "NodePort" ]]; then 20 | echo "Service type is not NodePort. Current type: $SERVICE_TYPE" 21 | exit 1 22 | fi 23 | 24 | # Check replica count 25 | REPLICAS=$(kubectl get deployment web-release-nginx -n helm-test -o jsonpath='{.spec.replicas}') 26 | if [[ "$REPLICAS" != "2" ]]; then 27 | echo "Incorrect number of replicas. Expected 2, got $REPLICAS" 28 | exit 1 29 | fi 30 | 31 | echo "Helm release validation successful" 32 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q16_s1_validate_sa.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if ServiceAccount exists 5 | kubectl get serviceaccount app-admin -n cluster-admin || { 6 | echo "ServiceAccount app-admin not found in namespace cluster-admin" 7 | exit 1 8 | } 9 | 10 | # Check if token is automatically mounted 11 | AUTO_MOUNT=$(kubectl get serviceaccount app-admin -n cluster-admin -o jsonpath='{.automountServiceAccountToken}') 12 | if [[ "$AUTO_MOUNT" == "false" ]]; then 13 | echo "ServiceAccount token automounting is disabled" 14 | exit 1 15 | fi 16 | 17 | # Check if secret is created for the ServiceAccount 18 | # SECRET_NAME=$(kubectl get serviceaccount app-admin -n cluster-admin -o jsonpath='{.secrets[0].name}') 19 | # if [[ -z "$SECRET_NAME" ]]; then 20 | # echo "No token secret found for ServiceAccount" 21 | # exit 1 22 | # fi 23 | 24 | # # Verify secret exists 25 | # kubectl get secret $SECRET_NAME -n cluster-admin || { 26 | # echo "Token secret $SECRET_NAME not found" 27 | # exit 1 28 | # } 29 | 30 | echo "ServiceAccount validation successful" 31 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q17_s2_validate_policies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Helper to check if a policy exists 5 | check_policy_exists() { 6 | local name=$1 7 | if ! kubectl get networkpolicy "$name" -n network >/dev/null 2>&1; then 8 | echo "❌ $name not found" 9 | exit 1 10 | fi 11 | } 12 | 13 | # Helper to check if a policy egress allows traffic to a given app 14 | check_egress_to() { 15 | local policy=$1 16 | local app=$2 17 | local found=$(kubectl get networkpolicy "$policy" -n network -o jsonpath="{.spec.egress[*].to[*].podSelector.matchLabels.app}" | grep -w "$app" || true) 18 | if [[ -z "$found" ]]; then 19 | echo "❌ $policy does not allow egress to $app" 20 | exit 1 21 | fi 22 | } 23 | 24 | # Check required policies 25 | check_policy_exists web-policy 26 | check_policy_exists api-policy 27 | 28 | # Check egress rules 29 | check_egress_to web-policy api 30 | check_egress_to api-policy db 31 | 32 | echo "✅ NetworkPolicies validation successful" 33 | exit 0 34 | -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q1_s1_validate_pvc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if PVC exists 5 | kubectl get pvc data-pvc -n storage-task || { 6 | echo "PVC data-pvc not found in namespace storage-task" 7 | exit 1 8 | } 9 | 10 | # Validate storage class 11 | STORAGE_CLASS=$(kubectl get pvc data-pvc -n storage-task -o jsonpath='{.spec.storageClassName}') 12 | if [[ "$STORAGE_CLASS" != "standard" ]]; then 13 | echo "Incorrect storage class. Expected 'standard', got '$STORAGE_CLASS'" 14 | exit 1 15 | fi 16 | 17 | # Validate access mode 18 | ACCESS_MODE=$(kubectl get pvc data-pvc -n storage-task -o jsonpath='{.spec.accessModes[0]}') 19 | if [[ "$ACCESS_MODE" != "ReadWriteOnce" ]]; then 20 | echo "Incorrect access mode. Expected 'ReadWriteOnce', got '$ACCESS_MODE'" 21 | exit 1 22 | fi 23 | 24 | # Validate storage size 25 | STORAGE_SIZE=$(kubectl get pvc data-pvc -n storage-task -o jsonpath='{.spec.resources.requests.storage}') 26 | if [[ "$STORAGE_SIZE" != "2Gi" ]]; then 27 | echo "Incorrect storage size. Expected '2Gi', got '$STORAGE_SIZE'" 28 | exit 1 29 | fi 30 | 31 | echo "PVC validation successful" 32 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q1_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if pod exists 5 | kubectl get pod data-pod -n storage-task || { 6 | echo "Pod data-pod not found in namespace storage-task" 7 | exit 1 8 | } 9 | 10 | # Check if pod is running 11 | POD_STATUS=$(kubectl get pod data-pod -n storage-task -o jsonpath='{.status.phase}') 12 | if [[ "$POD_STATUS" != "Running" ]]; then 13 | echo "Pod is not in Running state. Current state: $POD_STATUS" 14 | exit 1 15 | fi 16 | 17 | # Check if pod is using nginx image 18 | POD_IMAGE=$(kubectl get pod data-pod -n storage-task -o jsonpath='{.spec.containers[0].image}') 19 | if [[ "$POD_IMAGE" != *"nginx"* ]]; then 20 | echo "Pod is not using nginx image. Current image: $POD_IMAGE" 21 | exit 1 22 | fi 23 | 24 | echo "Pod validation successful" 25 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q1_s3_validate_mount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if volume mount exists 5 | MOUNT_PATH=$(kubectl get pod data-pod -n storage-task -o jsonpath='{.spec.containers[0].volumeMounts[?(@.name=="data")].mountPath}') 6 | if [[ "$MOUNT_PATH" != "/usr/share/nginx/html" ]]; then 7 | echo "Volume not mounted at correct path. Expected '/usr/share/nginx/html', got '$MOUNT_PATH'" 8 | exit 1 9 | fi 10 | 11 | # Check if volume is using the PVC 12 | VOLUME_PVC=$(kubectl get pod data-pod -n storage-task -o jsonpath='{.spec.volumes[?(@.name=="data")].persistentVolumeClaim.claimName}') 13 | if [[ "$VOLUME_PVC" != "data-pvc" ]]; then 14 | echo "Pod is not using the correct PVC. Expected 'data-pvc', got '$VOLUME_PVC'" 15 | exit 1 16 | fi 17 | 18 | echo "Volume mount validation successful" 19 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q20_s1_validate_port.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if deployment exists 5 | kubectl get deployment failing-app -n troubleshoot || { 6 | echo "Deployment failing-app not found in namespace troubleshoot" 7 | exit 1 8 | } 9 | 10 | # Check container port configuration 11 | PORT=$(kubectl get deployment failing-app -n troubleshoot -o jsonpath='{.spec.template.spec.containers[0].ports[0].containerPort}') 12 | if [[ "$PORT" != "80" ]]; then 13 | echo "Incorrect container port. Expected 80, got $PORT" 14 | exit 1 15 | fi 16 | 17 | # Check if port is correctly configured in pods 18 | PODS=$(kubectl get pods -n troubleshoot -l app=failing-app -o jsonpath='{.items[*].metadata.name}') 19 | for POD in $PODS; do 20 | POD_PORT=$(kubectl get pod $POD -n troubleshoot -o jsonpath='{.spec.containers[0].ports[0].containerPort}') 21 | if [[ "$POD_PORT" != "80" ]]; then 22 | echo "Pod $POD has incorrect port configuration. Expected 80, got $POD_PORT" 23 | exit 1 24 | fi 25 | done 26 | 27 | echo "Container port validation successful" 28 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q2_s1_validate_sc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if StorageClass exists 5 | kubectl get storageclass fast-local || { 6 | echo "StorageClass fast-local not found" 7 | exit 1 8 | } 9 | 10 | # Validate provisioner 11 | PROVISIONER=$(kubectl get storageclass fast-local -o jsonpath='{.provisioner}') 12 | if [[ "$PROVISIONER" != "rancher.io/local-path" ]]; then 13 | echo "Incorrect provisioner. Expected 'rancher.io/local-path', got '$PROVISIONER'" 14 | exit 1 15 | fi 16 | 17 | # Validate volumeBindingMode 18 | BINDING_MODE=$(kubectl get storageclass fast-local -o jsonpath='{.volumeBindingMode}') 19 | if [[ "$BINDING_MODE" != "WaitForFirstConsumer" ]]; then 20 | echo "Incorrect volumeBindingMode. Expected 'WaitForFirstConsumer', got '$BINDING_MODE'" 21 | exit 1 22 | fi 23 | 24 | echo "StorageClass validation successful" 25 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q2_s2_validate_default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if StorageClass is marked as default 5 | IS_DEFAULT=$(kubectl get storageclass fast-local -o jsonpath='{.metadata.annotations.storageclass\.kubernetes\.io/is-default-class}') 6 | if [[ "$IS_DEFAULT" != "true" ]]; then 7 | echo "StorageClass fast-local is not marked as default" 8 | exit 1 9 | fi 10 | 11 | echo "Default StorageClass validation successful" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q2_s3_validate_no_other_default.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Count number of default StorageClasses 5 | DEFAULT_COUNT=$(kubectl get storageclass -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}' | wc -w) 6 | 7 | if [[ "$DEFAULT_COUNT" -ne 1 ]]; then 8 | echo "Found $DEFAULT_COUNT default StorageClasses. Expected exactly 1" 9 | exit 1 10 | fi 11 | 12 | # Verify the only default is our StorageClass 13 | DEFAULT_SC=$(kubectl get storageclass -o jsonpath='{.items[?(@.metadata.annotations.storageclass\.kubernetes\.io/is-default-class=="true")].metadata.name}') 14 | if [[ "$DEFAULT_SC" != "fast-local" ]]; then 15 | echo "Wrong StorageClass is default. Expected 'fast-local', got '$DEFAULT_SC'" 16 | exit 1 17 | fi 18 | 19 | echo "No other default StorageClass validation successful" 20 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q5_s1_validate_node_label.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if node exists 5 | kubectl get node k3d-cluster-agent-1 || { 6 | echo "Node k3d-cluster-agent-1 not found" 7 | exit 1 8 | } 9 | 10 | # Check if node has the required label 11 | LABEL_VALUE=$(kubectl get node k3d-cluster-agent-1 -o jsonpath='{.metadata.labels.disk}') 12 | if [[ "$LABEL_VALUE" != "ssd" ]]; then 13 | echo "Node k3d-cluster-agent-1 does not have the correct label. Expected disk=ssd, got disk=$LABEL_VALUE" 14 | exit 1 15 | fi 16 | 17 | echo "Node label validation successful" 18 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q5_s3_validate_pod_placement.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if all pods are running 5 | READY_PODS=$(kubectl get deployment app-scheduling -n scheduling -o jsonpath='{.status.readyReplicas}') 6 | if [[ "$READY_PODS" != "3" ]]; then 7 | echo "Not all pods are ready. Expected 3, got $READY_PODS" 8 | exit 1 9 | fi 10 | 11 | # Get all pods 12 | PODS=$(kubectl get pods -n scheduling -l app=app-scheduling -o jsonpath='{.items[*].metadata.name}') 13 | 14 | # Check each pod's node placement 15 | for POD in $PODS; do 16 | NODE=$(kubectl get pod $POD -n scheduling -o jsonpath='{.spec.nodeName}') 17 | if [[ "$NODE" != "k3d-cluster-agent-1" ]]; then 18 | echo "Pod $POD is running on wrong node. Expected k3d-cluster-agent-1, got $NODE" 19 | exit 1 20 | fi 21 | done 22 | 23 | echo "Pod placement validation successful" 24 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q6_s1_validate_psp.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | NAMESPACE="security" 5 | 6 | # Check if namespace exists 7 | kubectl get namespace "$NAMESPACE" > /dev/null || { 8 | echo "Namespace '$NAMESPACE' not found" 9 | exit 1 10 | } 11 | 12 | # Check enforce label 13 | ENFORCE=$(kubectl get namespace "$NAMESPACE" -o jsonpath='{.metadata.labels.pod-security\.kubernetes\.io/enforce}') 14 | if [[ "$ENFORCE" != "restricted" ]]; then 15 | echo "Namespace '$NAMESPACE' does not have enforce=restricted label" 16 | exit 1 17 | fi 18 | 19 | # Check enforce-version label 20 | VERSION=$(kubectl get namespace "$NAMESPACE" -o jsonpath='{.metadata.labels.pod-security\.kubernetes\.io/enforce-version}') 21 | if [[ "$VERSION" != "latest" ]]; then 22 | echo "Namespace '$NAMESPACE' does not have enforce-version=latest label" 23 | exit 1 24 | fi 25 | 26 | echo "PSA label validation successful" 27 | exit 0 28 | -------------------------------------------------------------------------------- /facilitator/assets/exams/cka/002/scripts/validation/q7_s1_validate_taint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # Check if node exists 5 | kubectl get node k3d-cluster-agent-1 || { 6 | echo "Node k3d-cluster-agent-1 not found" 7 | exit 1 8 | } 9 | 10 | # Check if taint exists 11 | TAINT=$(kubectl get node k3d-cluster-agent-1 -o jsonpath='{.spec.taints[?(@.key=="special-workload")]}') 12 | if [[ -z "$TAINT" ]]; then 13 | echo "Taint special-workload not found on node k3d-cluster-agent-1" 14 | exit 1 15 | fi 16 | 17 | # Check taint value 18 | TAINT_VALUE=$(kubectl get node k3d-cluster-agent-1 -o jsonpath='{.spec.taints[?(@.key=="special-workload")].value}') 19 | if [[ "$TAINT_VALUE" != "true" ]]; then 20 | echo "Incorrect taint value. Expected 'true', got '$TAINT_VALUE'" 21 | exit 1 22 | fi 23 | 24 | # Check taint effect 25 | TAINT_EFFECT=$(kubectl get node k3d-cluster-agent-1 -o jsonpath='{.spec.taints[?(@.key=="special-workload")].effect}') 26 | if [[ "$TAINT_EFFECT" != "NoSchedule" ]]; then 27 | echo "Incorrect taint effect. Expected 'NoSchedule', got '$TAINT_EFFECT'" 28 | exit 1 29 | fi 30 | 31 | echo "Node taint validation successful" 32 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lab": "ckad-001", 3 | "workerNodes":1 , 4 | "answers": "assets/exams/ckad/001/answers.md", 5 | "questions": "assessment.json", 6 | "totalMarks": 100, 7 | "lowScore": 40, 8 | "mediumScore": 60, 9 | "highScore": 90 10 | } 11 | -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q10_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 10: Create a Secret and use it in a Pod 4 | 5 | # Create the workloads namespace if it doesn't exist already 6 | if ! kubectl get namespace workloads &> /dev/null; then 7 | kubectl create namespace workloads 8 | fi 9 | 10 | # Delete any existing Secret and Pod with the same names 11 | kubectl delete secret db-credentials -n workloads --ignore-not-found=true 12 | kubectl delete pod secure-pod -n workloads --ignore-not-found=true 13 | 14 | echo "Setup complete for Question 10: Environment ready for creating Secret 'db-credentials' and Pod 'secure-pod'" 15 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q11_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 11: Create a CronJob for log cleaning 4 | 5 | # Create the workloads namespace if it doesn't exist already 6 | if ! kubectl get namespace workloads &> /dev/null; then 7 | kubectl create namespace workloads 8 | fi 9 | 10 | # Delete any existing CronJob with the same name 11 | kubectl delete cronjob log-cleaner -n workloads --ignore-not-found=true 12 | 13 | # Create a directory with some sample log files for demonstration 14 | mkdir -p /tmp/var/log 15 | touch /tmp/var/log/test1.log 16 | touch /tmp/var/log/test2.log 17 | touch /tmp/var/log/app.log 18 | touch /tmp/var/log/system.log 19 | 20 | echo "Setup complete for Question 11: Environment ready for creating CronJob 'log-cleaner'" 21 | echo "Note: In a real environment, log files would be on the host system. These sample files" 22 | echo " are for demonstration only and won't actually be accessible from the CronJob." 23 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q12_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 12: Create a Pod with liveness and readiness probes 4 | 5 | # Create the workloads namespace if it doesn't exist already 6 | if ! kubectl get namespace workloads &> /dev/null; then 7 | kubectl create namespace workloads 8 | fi 9 | 10 | # Delete any existing Pod with the same name 11 | kubectl delete pod health-pod -n workloads --ignore-not-found=true 12 | 13 | # Create an index.html and healthz endpoint for testing the probes 14 | cat < /tmp/index.html 15 | 16 | 17 | 18 | CKAD Exam 19 | 20 | 21 |

Welcome to the CKAD Practice Exam!

22 | 23 | 24 | EOF 25 | 26 | cat < /tmp/healthz 27 | OK 28 | EOF 29 | 30 | echo "Setup complete for Question 12: Environment ready for creating Pod 'health-pod' with liveness and readiness probes" 31 | echo "Note: In a real environment, you would need to set up files at /healthz in the container." 32 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q14_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 14: Install Helm and deploy Bitnami Nginx 4 | 5 | # Create the web namespace if it doesn't exist already 6 | if ! kubectl get namespace web &> /dev/null; then 7 | kubectl create namespace web 8 | fi 9 | 10 | # Delete any existing helm installations of nginx 11 | if command -v helm &> /dev/null; then 12 | helm uninstall nginx -n web --ignore-not-found 13 | fi 14 | 15 | echo "Setup complete for Question 14: Environment ready for installing Helm and deploying Bitnami Nginx" 16 | echo "Note: The candidate should add the Bitnami repo if not already present: helm repo add bitnami https://charts.bitnami.com/bitnami" 17 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q1_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 1: Create a deployment called nginx-deployment in namespace dev 4 | 5 | # Create the namespace if it doesn't exist already 6 | if kubectl get namespace dev &> /dev/null; then 7 | kubectl delete namespace dev --ignore-not-found=true 8 | fi 9 | 10 | # Delete any existing deployment with the same name to ensure a clean state 11 | kubectl delete deployment nginx-deployment -n dev --ignore-not-found=true 12 | 13 | echo "Setup complete for Question 1: Environment ready for creating nginx deployment in namespace 'dev'" 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q21_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 22: Pull and store nginx image in OCI format 4 | 5 | # Create the directory for storing OCI images 6 | mkdir -p /root/oci-images 7 | 8 | # Remove any existing content to ensure clean state 9 | rm -rf /root/oci-images/* 10 | 11 | # Make sure required tools are installed 12 | 13 | if ! command -v docker &> /dev/null; then 14 | echo "Installing docker for image pulling..." 15 | apt-get update 16 | apt-get install -y docker.io 17 | systemctl start docker 18 | fi 19 | 20 | echo "Setup complete for Question 22: Environment ready for pulling and storing the nginx image in OCI format" 21 | echo "Task: Pull the nginx:latest image and store it in OCI format in the directory /root/oci-images" 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q2_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 2: Create a PersistentVolume named 'pv-storage' 4 | 5 | # Create the storage-test namespace if it doesn't exist already 6 | if ! kubectl get namespace storage-test &> /dev/null; then 7 | kubectl create namespace storage-test 8 | fi 9 | 10 | # Delete any existing PV with the same name to ensure a clean state 11 | kubectl delete pv pv-storage --ignore-not-found=true 12 | 13 | # Create the /mnt/data directory on the host if possible (this may require privileged access) 14 | # In a real environment, this would need to be handled by the cluster admin 15 | echo "Note: Ensure /mnt/data directory exists on the node for the hostPath volume" 16 | 17 | echo "Setup complete for Question 2: Environment ready for creating PersistentVolume 'pv-storage'" 18 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q3_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 3: Create a StorageClass named 'fast-storage' 4 | 5 | # Delete any existing StorageClass with the same name to ensure a clean state 6 | kubectl delete storageclass slow-storage --ignore-not-found=true 7 | 8 | echo "Setup complete for Question 3: Environment ready for creating StorageClass 'fast-storage'" 9 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q4_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 4: Create a PersistentVolumeClaim named 'pvc-app' 4 | 5 | # Create the storage-test namespace if it doesn't exist already 6 | if ! kubectl get namespace storage-test &> /dev/null; then 7 | kubectl create namespace storage-test 8 | fi 9 | 10 | # Delete any existing PVC with the same name to ensure a clean state 11 | kubectl delete pvc pvc-app -n storage-test --ignore-not-found=true 12 | 13 | # Create the StorageClass if it doesn't exist (dependency for this question) 14 | # if ! kubectl get storageclass fast-storage &> /dev/null; then 15 | # cat < /dev/null; then 7 | kubectl create namespace troubleshooting 8 | fi 9 | 10 | # Delete any existing pod with the same name 11 | kubectl delete pod sidecar-pod -n troubleshooting --ignore-not-found=true 12 | 13 | echo "Setup complete for Question 6: Environment ready for creating a multi-container pod with shared volume" 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/setup/q9_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup for Question 9: Create a ConfigMap and use it in a Pod 4 | 5 | # Create the workloads namespace if it doesn't exist already 6 | if ! kubectl get namespace workloads &> /dev/null; then 7 | kubectl create namespace workloads 8 | fi 9 | 10 | # Delete any existing ConfigMap and Pod with the same names 11 | kubectl delete configmap app-config -n workloads --ignore-not-found=true 12 | kubectl delete pod config-pod -n workloads --ignore-not-found=true 13 | 14 | echo "Setup complete for Question 9: Environment ready for creating ConfigMap 'app-config' and Pod 'config-pod'" 15 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q10_s1_validate_secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the Secret 'db-credentials' exists in the 'workloads' namespace with correct data 4 | USERNAME=$(kubectl get secret db-credentials -n workloads -o jsonpath='{.data.username}' 2>/dev/null | base64 --decode) 5 | PASSWORD=$(kubectl get secret db-credentials -n workloads -o jsonpath='{.data.password}' 2>/dev/null | base64 --decode) 6 | 7 | if [ "$USERNAME" = "admin" ] && [ "$PASSWORD" = "securepass" ]; then 8 | echo "Success: Secret 'db-credentials' exists with correct data" 9 | exit 0 10 | else 11 | echo "Error: Secret 'db-credentials' does not have the correct data." 12 | echo "Expected: username=admin, password=securepass" 13 | echo "Found: username=$USERNAME, password=$PASSWORD" 14 | exit 1 15 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q10_s2_validate_pod_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the pod 'logging-pod' is running in the 'troubleshooting' namespace 4 | POD_NAME="secure-pod" 5 | NAMESPACE="workloads" 6 | 7 | # Check if the pod is running 8 | POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}') 9 | 10 | if [ "$POD_STATUS" = "Running" ]; then 11 | echo "Success: Pod '$POD_NAME' is running in namespace '$NAMESPACE'" 12 | exit 0 13 | else 14 | echo "Error: Pod '$POD_NAME' is not running in namespace '$NAMESPACE'" 15 | exit 1 16 | fi 17 | -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q11_s1_validate_cronjob_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the CronJob 'log-cleaner' exists in the 'workloads' namespace 4 | if kubectl get cronjob log-cleaner -n workloads &> /dev/null; then 5 | echo "Success: CronJob 'log-cleaner' exists in namespace 'workloads'" 6 | exit 0 7 | else 8 | echo "Error: CronJob 'log-cleaner' does not exist in namespace 'workloads'" 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q11_s2_validate_cronjob_schedule.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the CronJob 'log-cleaner' has the correct schedule (every hour) 4 | SCHEDULE=$(kubectl get cronjob log-cleaner -n workloads -o jsonpath='{.spec.schedule}' 2>/dev/null) 5 | 6 | if [ "$SCHEDULE" = "0 * * * *" ] || [ "$SCHEDULE" = "@hourly" ]; then 7 | echo "Success: CronJob 'log-cleaner' has the correct schedule (every hour): $SCHEDULE" 8 | exit 0 9 | else 10 | echo "Error: CronJob 'log-cleaner' does not have the correct schedule" 11 | echo "Expected: '0 * * * *' or '@hourly', Found: '$SCHEDULE'" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q11_s3_validate_cronjob_command.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CRONJOB_NAME="log-cleaner" 4 | NAMESPACE="workloads" 5 | 6 | # Expected values 7 | EXPECTED_COMMAND='["/bin/sh","-c"]' 8 | EXPECTED_ARGS='find /var/log -type f -name "*.log" -mtime +7 -delete' 9 | 10 | # Fetch actual values from the CronJob 11 | ACTUAL_COMMAND=$(kubectl get cronjob "$CRONJOB_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.jobTemplate.spec.template.spec.containers[0].command}") 12 | ACTUAL_ARGS=$(kubectl get cronjob "$CRONJOB_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.jobTemplate.spec.template.spec.containers[0].args[0]}") 13 | 14 | # Compare 15 | if [[ "$ACTUAL_COMMAND" == "$EXPECTED_COMMAND" && "$ACTUAL_ARGS" == "$EXPECTED_ARGS" ]]; then 16 | echo "✅ Success: CronJob '$CRONJOB_NAME' has the correct command and args" 17 | exit 0 18 | else 19 | echo "❌ Error: CronJob '$CRONJOB_NAME' does not have the correct command/args" 20 | echo "Actual command: $ACTUAL_COMMAND" 21 | echo "Actual args: $ACTUAL_ARGS" 22 | exit 1 23 | fi 24 | -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q14_s1_validate_helm_repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the Bitnami repository is added to Helm 3 | 4 | # Check if the Bitnami repository is added 5 | BITNAMI_REPO=$(helm repo list 2>/dev/null | grep -i bitnami) 6 | if [ $? -ne 0 ] || [ -z "$BITNAMI_REPO" ]; then 7 | echo "❌ Bitnami repository is not added to Helm" 8 | exit 1 9 | fi 10 | 11 | # Extract the URL from the repo list 12 | REPO_URL=$(echo "$BITNAMI_REPO" | awk '{print $2}') 13 | 14 | # Check if the URL is correct 15 | if [[ ! "$REPO_URL" =~ "charts.bitnami.com/bitnami" ]]; then 16 | echo "❌ Bitnami repository URL is incorrect: $REPO_URL" 17 | exit 1 18 | fi 19 | 20 | echo "✅ Bitnami repository is properly configured with URL: $REPO_URL" 21 | 22 | # Check if repo is up to date 23 | LAST_UPDATE=$(helm repo list 2>/dev/null | grep -i bitnami | awk '{print $3}') 24 | if [ -n "$LAST_UPDATE" ]; then 25 | echo "ℹ️ Last repository update: $LAST_UPDATE" 26 | fi 27 | 28 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q16_s1_validate_policy_created.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the NetworkPolicy 'allow-traffic' exists in the 'networking' namespace 4 | if kubectl get networkpolicy allow-traffic -n networking &> /dev/null; then 5 | echo "Success: NetworkPolicy 'allow-traffic' exists in namespace 'networking'" 6 | exit 0 7 | else 8 | echo "Error: NetworkPolicy 'allow-traffic' does not exist in namespace 'networking'" 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q17_s1_validate_service_type.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the Service 'internal-app' is of type ClusterIP in the 'networking' namespace 4 | SERVICE_TYPE=$(kubectl get service internal-app -n networking -o jsonpath='{.spec.type}' 2>/dev/null) 5 | 6 | if [ "$SERVICE_TYPE" = "ClusterIP" ]; then 7 | echo "Success: Service 'internal-app' is of correct type (ClusterIP)" 8 | exit 0 9 | else 10 | echo "Error: Service 'internal-app' is not of the correct type. Found: '$SERVICE_TYPE', Expected: 'ClusterIP'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q17_s2_validate_service_selector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the 'internal-app' ClusterIP service has selector app=backend 3 | 4 | SERVICE_NAME="internal-app" 5 | NAMESPACE="networking" 6 | EXPECTED_KEY="app" 7 | EXPECTED_VALUE="backend" 8 | 9 | # Check if the service exists 10 | if ! kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" > /dev/null 2>&1; then 11 | echo "❌ Service '$SERVICE_NAME' not found in namespace '$NAMESPACE'" 12 | exit 1 13 | fi 14 | 15 | # Fetch selector key and value 16 | ACTUAL_SELECTOR_VALUE=$(kubectl get svc "$SERVICE_NAME" -n "$NAMESPACE" -o jsonpath="{.spec.selector.$EXPECTED_KEY}") 17 | 18 | if [ "$ACTUAL_SELECTOR_VALUE" != "$EXPECTED_VALUE" ]; then 19 | echo "❌ Service selector mismatch" 20 | echo "Expected: $EXPECTED_KEY=$EXPECTED_VALUE" 21 | echo "Found: $EXPECTED_KEY=$ACTUAL_SELECTOR_VALUE" 22 | exit 1 23 | fi 24 | 25 | # Success 26 | echo "✅ Service '$SERVICE_NAME' in namespace '$NAMESPACE' has correct selector: $EXPECTED_KEY=$EXPECTED_VALUE" 27 | exit 0 28 | -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q1_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #bin/bash 2 | 3 | # Validate namespace if present then return 0 else return 1 4 | kubectl get namespace dev 5 | if [ $? -eq 0 ]; then 6 | echo "Namespace dev is present" 7 | exit 0 8 | else 9 | echo "Namespace dev is not present" 10 | exit 1 11 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q1_s2_validate_deployment.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the deployment 'nginx-deployment' exists in the 'dev' namespace 4 | if kubectl get deployment nginx-deployment -n dev &> /dev/null; then 5 | echo "Success: Deployment 'nginx-deployment' exists in namespace 'dev'" 6 | exit 0 7 | else 8 | echo "Error: Deployment 'nginx-deployment' does not exist in namespace 'dev'" 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q1_s3_validate_deployment_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the deployment 'nginx-deployment' in namespace 'dev' is using the correct image (nginx:latest) 4 | IMAGE=$(kubectl get deployment nginx-deployment -n dev -o jsonpath='{.spec.template.spec.containers[0].image}' 2>/dev/null) 5 | 6 | if [ "$IMAGE" = "nginx:latest" ]; then 7 | echo "Success: Deployment 'nginx-deployment' is using the correct image 'nginx:latest'" 8 | exit 0 9 | else 10 | echo "Error: Deployment 'nginx-deployment' is not using the correct image. Found: '$IMAGE', Expected: 'nginx:latest'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q1_s4_validate_deployment_replicas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the deployment 'nginx-deployment' in namespace 'dev' has 3 replicas 4 | REPLICAS=$(kubectl get deployment nginx-deployment -n dev -o jsonpath='{.spec.replicas}' 2>/dev/null) 5 | 6 | if [ "$REPLICAS" = "3" ]; then 7 | echo "Success: Deployment 'nginx-deployment' has the correct number of replicas (3)" 8 | exit 0 9 | else 10 | echo "Error: Deployment 'nginx-deployment' does not have the correct number of replicas. Found: '$REPLICAS', Expected: '3'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q20_s1_validate_job_created.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Check if the Job is created with the correct name in the networking namespace 3 | 4 | JOB_NAME="hello-job" 5 | NAMESPACE="networking" 6 | 7 | # Check if the job exists 8 | if kubectl get job ${JOB_NAME} -n ${NAMESPACE} &> /dev/null; then 9 | echo "✅ Job '${JOB_NAME}' exists in namespace '${NAMESPACE}'" 10 | exit 0 11 | else 12 | echo "❌ Job '${JOB_NAME}' does not exist in namespace '${NAMESPACE}'" 13 | exit 1 14 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q21_s1_validate_oci_dir_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that an OCI directory exists in the specified path 4 | 5 | OCI_DIR="/root/oci-images" 6 | 7 | if [ -d "$OCI_DIR" ]; then 8 | # Check if the directory has some content (not empty) 9 | if [ "$(ls -A $OCI_DIR)" ]; then 10 | echo "SUCCESS: OCI directory exists at $OCI_DIR and contains files." 11 | exit 0 12 | else 13 | echo "ERROR: OCI directory exists at $OCI_DIR but is empty. Did you store the image?" 14 | exit 1 15 | fi 16 | else 17 | echo "ERROR: OCI directory does not exist at $OCI_DIR." 18 | exit 1 19 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q2_s1_validate_pv_created.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the PersistentVolume 'pv-storage' exists 4 | if kubectl get pv pv-storage &> /dev/null; then 5 | echo "Success: PersistentVolume 'pv-storage' exists" 6 | exit 0 7 | else 8 | echo "Error: PersistentVolume 'pv-storage' does not exist" 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q2_s2_validate_pv_capacity.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the PersistentVolume 'pv-storage' has the correct capacity (1Gi) 4 | CAPACITY=$(kubectl get pv pv-storage -o jsonpath='{.spec.capacity.storage}' 2>/dev/null) 5 | 6 | if [ "$CAPACITY" = "1Gi" ]; then 7 | echo "Success: PersistentVolume 'pv-storage' has the correct capacity (1Gi)" 8 | exit 0 9 | else 10 | echo "Error: PersistentVolume 'pv-storage' does not have the correct capacity. Found: '$CAPACITY', Expected: '1Gi'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q2_s3_validate_pv_access_mode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the PersistentVolume 'pv-storage' has the correct access mode (ReadWriteOnce) 4 | ACCESS_MODE=$(kubectl get pv pv-storage -o jsonpath='{.spec.accessModes[0]}' 2>/dev/null) 5 | 6 | if [ "$ACCESS_MODE" = "ReadWriteOnce" ]; then 7 | echo "Success: PersistentVolume 'pv-storage' has the correct access mode (ReadWriteOnce)" 8 | exit 0 9 | else 10 | echo "Error: PersistentVolume 'pv-storage' does not have the correct access mode. Found: '$ACCESS_MODE', Expected: 'ReadWriteOnce'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q2_s4_validate_pv_reclaim_policy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the PersistentVolume 'pv-storage' has the correct reclaim policy (Retain) 4 | RECLAIM_POLICY=$(kubectl get pv pv-storage -o jsonpath='{.spec.persistentVolumeReclaimPolicy}' 2>/dev/null) 5 | 6 | if [ "$RECLAIM_POLICY" = "Retain" ]; then 7 | echo "Success: PersistentVolume 'pv-storage' has the correct reclaim policy (Retain)" 8 | exit 0 9 | else 10 | echo "Error: PersistentVolume 'pv-storage' does not have the correct reclaim policy. Found: '$RECLAIM_POLICY', Expected: 'Retain'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q3_s1_validate_storageclass_created.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the StorageClass 'fast-storage' exists 4 | if kubectl get storageclass fast-storage &> /dev/null; then 5 | echo "Success: StorageClass 'fast-storage' exists" 6 | exit 0 7 | else 8 | echo "Error: StorageClass 'fast-storage' does not exist" 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q3_s2_validate_storageclass_provisioner.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the StorageClass 'fast-storage' has the correct provisioner 4 | PROVISIONER=$(kubectl get storageclass fast-storage -o jsonpath='{.provisioner}' 2>/dev/null) 5 | 6 | if [ "$PROVISIONER" = "kubernetes.io/no-provisioner" ]; then 7 | echo "Success: StorageClass 'fast-storage' has the correct provisioner (kubernetes.io/no-provisioner)" 8 | exit 0 9 | else 10 | echo "Error: StorageClass 'fast-storage' does not have the correct provisioner. Found: '$PROVISIONER', Expected: 'kubernetes.io/no-provisioner'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q3_s3_validate_storageclass_binding_mode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the StorageClass 'slow-storage' has the correct volumeBindingMode 4 | BINDING_MODE=$(kubectl get storageclass fast-storage -o jsonpath='{.volumeBindingMode}' 2>/dev/null) 5 | 6 | if [ "$BINDING_MODE" = "WaitForFirstConsumer" ]; then 7 | echo "Success: StorageClass 'fast-storage' has the correct volumeBindingMode (WaitForFirstConsumer)" 8 | exit 0 9 | else 10 | echo "Error: StorageClass 'fast-storage' does not have the correct volumeBindingMode. Found: '$BINDING_MODE', Expected: 'WaitForFirstConsumer'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q4_s1_validate_pvc_created.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the PersistentVolumeClaim 'pvc-app' exists in the 'storage-test' namespace 4 | if kubectl get pvc pvc-app -n storage-test &> /dev/null; then 5 | echo "Success: PersistentVolumeClaim 'pvc-app' exists in namespace 'storage-test'" 6 | exit 0 7 | else 8 | echo "Error: PersistentVolumeClaim 'pvc-app' does not exist in namespace 'storage-test'" 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q4_s2_validate_pvc_size.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the PersistentVolumeClaim 'pvc-app' in namespace 'storage-test' has the correct storage size (500Mi) 4 | STORAGE_SIZE=$(kubectl get pvc pvc-app -n storage-test -o jsonpath='{.spec.resources.requests.storage}' 2>/dev/null) 5 | 6 | if [ "$STORAGE_SIZE" = "500Mi" ]; then 7 | echo "Success: PersistentVolumeClaim 'pvc-app' has the correct storage size (500Mi)" 8 | exit 0 9 | else 10 | echo "Error: PersistentVolumeClaim 'pvc-app' does not have the correct storage size. Found: '$STORAGE_SIZE', Expected: '500Mi'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q4_s3_validate_pvc_access_mode.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if PersistentVolumeClaim has correct access mode 4 | ACCESS_MODE=$(kubectl get pvc pvc-app -n storage-test -o jsonpath='{.spec.accessModes[0]}' 2>/dev/null) 5 | 6 | if [ "$ACCESS_MODE" = "ReadWriteOnce" ]; then 7 | echo "Success: PersistentVolumeClaim 'pvc-app' has the correct access mode (ReadWriteOnce)" 8 | exit 0 9 | else 10 | echo "Error: PersistentVolumeClaim 'pvc-app' does not have the correct access mode. Found: '$ACCESS_MODE', Expected: 'ReadWriteOnce'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q4_s4_validate_pvc_storageclass.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if PersistentVolumeClaim uses correct StorageClass 4 | STORAGE_CLASS=$(kubectl get pvc pvc-app -n storage-test -o jsonpath='{.spec.storageClassName}' 2>/dev/null) 5 | 6 | if [ "$STORAGE_CLASS" = "fast-storage" ]; then 7 | echo "Success: PersistentVolumeClaim 'pvc-app' uses correct StorageClass (fast-storage)" 8 | exit 0 9 | else 10 | echo "Error: PersistentVolumeClaim 'pvc-app' does not have the correct storage class. Found: '$STORAGE_CLASS', Expected: 'fast-storage'" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q5_s1_validate_pods_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the pods of deployment 'broken-app' are running in the 'troubleshooting' namespace 4 | READY_PODS=$(kubectl get deployment broken-app -n troubleshooting -o jsonpath='{.status.readyReplicas}' 2>/dev/null) 5 | TOTAL_PODS=$(kubectl get deployment broken-app -n troubleshooting -o jsonpath='{.status.replicas}' 2>/dev/null) 6 | 7 | if [ -z "$READY_PODS" ]; then 8 | READY_PODS=0 9 | fi 10 | 11 | if [ -z "$TOTAL_PODS" ]; then 12 | echo "Error: Deployment 'broken-app' does not exist in namespace 'troubleshooting'" 13 | exit 1 14 | fi 15 | 16 | if [ "$READY_PODS" -eq "$TOTAL_PODS" ] && [ "$READY_PODS" -gt 0 ]; then 17 | echo "Success: All pods in deployment 'broken-app' are running ($READY_PODS/$TOTAL_PODS)" 18 | exit 0 19 | else 20 | echo "Error: Not all pods in deployment 'broken-app' are running ($READY_PODS/$TOTAL_PODS)" 21 | exit 1 22 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q5_s2_validate_container_image.sh: -------------------------------------------------------------------------------- 1 | # validate the container image is correct having image name nginx 2 | # Validate if the pods of deployment 'broken-app' are running in the 'troubleshooting' namespace 3 | #check deployment status is runing or not 4 | DEPLOYMENT_STATUS=$(kubectl get deployment broken-app -n troubleshooting -o jsonpath='{.status.conditions[?(@.type=="Available")].status}') 5 | if [ "$DEPLOYMENT_STATUS" != "True" ]; then 6 | echo "Error: The deployment 'broken-app' is not running" 7 | exit 1 8 | fi 9 | 10 | IMAGE=$(kubectl get deployment broken-app -n troubleshooting -o jsonpath='{.spec.template.spec.containers[0].image}' | cut -d':' -f1) 11 | 12 | if [ "$IMAGE" == "nginx" ]; then 13 | echo "Success: The container image is correct" 14 | exit 0 15 | else 16 | echo "Error: The container image is not correct" 17 | exit 1 18 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q9_s1_validate_configmap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the ConfigMap 'app-config' exists in the 'workloads' namespace with correct data 4 | APP_ENV=$(kubectl get configmap app-config -n workloads -o jsonpath='{.data.APP_ENV}' 2>/dev/null) 5 | LOG_LEVEL=$(kubectl get configmap app-config -n workloads -o jsonpath='{.data.LOG_LEVEL}' 2>/dev/null) 6 | 7 | if [ "$APP_ENV" = "production" ] && [ "$LOG_LEVEL" = "info" ]; then 8 | echo "Success: ConfigMap 'app-config' exists with correct key-value pairs" 9 | exit 0 10 | else 11 | echo "Error: ConfigMap 'app-config' does not have the correct data." 12 | echo "Expected: APP_ENV=production, LOG_LEVEL=info" 13 | echo "Found: APP_ENV=$APP_ENV, LOG_LEVEL=$LOG_LEVEL" 14 | exit 1 15 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q9_s2_validate_pod_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the Pod 'config-pod' exists and is running in the 'workloads' namespace 4 | POD_STATUS=$(kubectl get pod config-pod -n workloads -o jsonpath='{.status.phase}' 2>/dev/null) 5 | 6 | if [ "$POD_STATUS" = "Running" ]; then 7 | echo "Success: Pod 'config-pod' is running in namespace 'workloads'" 8 | exit 0 9 | else 10 | echo "Error: Pod 'config-pod' is not running in namespace 'workloads'. Current status: $POD_STATUS" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/001/scripts/validation/q9_s3_validate_pod_env_vars.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate if the Pod 'config-pod' has the environment variables from the ConfigMap 4 | POD_APP_ENV=$(kubectl exec config-pod -n workloads -- env | grep APP_ENV | cut -d '=' -f 2 2>/dev/null) 5 | POD_LOG_LEVEL=$(kubectl exec config-pod -n workloads -- env | grep LOG_LEVEL | cut -d '=' -f 2 2>/dev/null) 6 | 7 | if [ "$POD_APP_ENV" = "production" ] && [ "$POD_LOG_LEVEL" = "info" ]; then 8 | echo "Success: Pod 'config-pod' has the correct environment variables from ConfigMap" 9 | exit 0 10 | else 11 | echo "Error: Pod 'config-pod' does not have the correct environment variables." 12 | echo "Expected: APP_ENV=production, LOG_LEVEL=info" 13 | echo "Found: APP_ENV=$POD_APP_ENV, LOG_LEVEL=$POD_LOG_LEVEL" 14 | exit 1 15 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lab": "ckad-002", 3 | "workerNodes": 1, 4 | "answers": "assets/exams/ckad/002/answers.md", 5 | "questions": "assessment.json", 6 | "totalMarks": 100, 7 | "lowScore": 50, 8 | "mediumScore": 70, 9 | "highScore": 90 10 | } -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q10_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the network-policy namespace if it exists 4 | echo "Setting up environment for Question 10 (Network Policies)..." 5 | kubectl delete namespace network-policy --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 10" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q11_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the security namespace if it exists 4 | echo "Setting up environment for Question 11 (Security Context)..." 5 | kubectl delete namespace security --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 11" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q12_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Make sure Docker is available 4 | which docker > /dev/null 2>&1 5 | if [[ $? -ne 0 ]]; then 6 | echo "Docker is not available on this system" 7 | exit 1 8 | fi 9 | 10 | # Create directory for working files if it doesn't exist 11 | mkdir -p /tmp 12 | 13 | # Clean up any existing resources that might conflict 14 | docker stop my-web > /dev/null 2>&1 15 | docker rm my-web > /dev/null 2>&1 16 | docker rmi my-nginx:v1 > /dev/null 2>&1 17 | rm -f /tmp/Dockerfile /tmp/index.html > /dev/null 2>&1 18 | 19 | echo "Setup complete for Question 12" 20 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q13_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the jobs namespace if it exists 4 | echo "Setting up environment for Question 13 (Jobs)..." 5 | kubectl delete namespace jobs --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 13" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q14_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the init-containers namespace if it exists 4 | echo "Setting up environment for Question 14 (Init Containers)..." 5 | kubectl delete namespace init-containers --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 14" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q15_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if Helm is installed 4 | if ! command -v helm &> /dev/null; then 5 | echo "Helm is not available, skipping setup" 6 | exit 0 7 | fi 8 | 9 | # Clean up any existing resources 10 | kubectl delete namespace helm-basics --ignore-not-found=true 11 | 12 | 13 | echo "Setup complete for Question 15" 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q16_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the health-checks namespace if it exists 4 | echo "Setting up environment for Question 16 (Health Checks)..." 5 | kubectl delete namespace health-checks --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 16" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q17_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the pod-lifecycle namespace if it exists 4 | echo "Setting up environment for Question 17 (Pod Lifecycle)..." 5 | kubectl delete namespace pod-lifecycle --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 17" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q18_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Setup complete for Question 18" 3 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q1_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Clean up any previous resources 4 | kubectl delete namespace core-concepts --ignore-not-found=true 5 | 6 | # Wait for namespace to be deleted 7 | echo "Setting up environment for Question 1..." 8 | sleep 2 9 | 10 | echo "Environment ready for Question 1." -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q20_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the pod-configuration namespace if it exists 4 | echo "Setting up environment for Question 20 (Pod Configuration)..." 5 | kubectl delete namespace pod-configuration --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 20" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q2_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the pod-concepts namespace if it exists 4 | echo "Setting up environment for Question 2 (Multi-Container Pod)..." 5 | kubectl delete namespace pod-concepts --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 2" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q3_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the app-namespace namespace if it exists 4 | echo "Setting up environment for Question 3 (Deployment)..." 5 | kubectl delete namespace app-namespace --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 3" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q4_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the config-management namespace if it exists 4 | echo "Setting up environment for Question 4 (ConfigMap)..." 5 | kubectl delete namespace config-management --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 4" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q5_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the resources namespace if it exists 4 | echo "Setting up environment for Question 5 (Resource Limits)..." 5 | kubectl delete namespace resources --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 5" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q6_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the services namespace if it exists 4 | echo "Setting up environment for Question 6 (Services)..." 5 | kubectl delete namespace services --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 6" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q7_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the persistence namespace if it exists 4 | echo "Setting up environment for Question 7 (Persistent Volume Claims)..." 5 | kubectl delete namespace persistence --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 7" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q8_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Delete the batch namespace if it exists 4 | echo "Setting up environment for Question 8 (CronJob)..." 5 | kubectl delete namespace batch --ignore-not-found=true 6 | 7 | # Wait for deletion to complete 8 | sleep 2 9 | 10 | # Confirm environment is ready 11 | echo "Environment ready for Question 8" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/setup/q9_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create the troubleshooting namespace 4 | kubectl create namespace troubleshooting 5 | 6 | # Create a broken deployment - using an invalid image name 7 | cat </dev/null) 5 | 6 | if [[ "$NS" == "networking" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'networking' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q11_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the security namespace exists 4 | NS=$(kubectl get namespace security -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "security" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'security' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q11_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the secure-app pod exists 4 | POD=$(kubectl get pod secure-app -n security -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "secure-app" ]]; then 7 | # Pod exists, now check the image 8 | IMAGE=$(kubectl get pod secure-app -n security -o jsonpath='{.spec.containers[0].image}' 2>/dev/null) 9 | if [[ "$IMAGE" == "nginx:alpine" ]]; then 10 | # Image is correct 11 | exit 0 12 | else 13 | echo "Pod 'secure-app' does not use the 'nginx:alpine' image. Found: $IMAGE" 14 | exit 1 15 | fi 16 | else 17 | # Pod does not exist 18 | echo "Pod 'secure-app' does not exist in the 'security' namespace" 19 | exit 1 20 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q11_s3_validate_user.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the secure-app pod runs as non-root user (UID 1000) 4 | POD_SECURITY_CONTEXT=$(kubectl get pod secure-app -n security -o jsonpath='{.spec.securityContext.runAsUser}' 2>/dev/null) 5 | 6 | if [[ "$POD_SECURITY_CONTEXT" == "1000" ]]; then 7 | # Pod runs with UID 1000 8 | exit 0 9 | else 10 | # Not running as UID 1000 at pod level, check container level 11 | CONTAINER_SECURITY_CONTEXT=$(kubectl get pod secure-app -n security -o jsonpath='{.spec.containers[0].securityContext.runAsUser}' 2>/dev/null) 12 | 13 | if [[ "$CONTAINER_SECURITY_CONTEXT" == "1000" ]]; then 14 | # Container runs with UID 1000 15 | exit 0 16 | else 17 | echo "Pod 'secure-app' does not run as UID 1000. Found pod: $POD_SECURITY_CONTEXT, container: $CONTAINER_SECURITY_CONTEXT" 18 | exit 1 19 | fi 20 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q11_s4_validate_security_context.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that security context is configured correctly 4 | # Check readOnlyRootFilesystem 5 | READONLY_ROOT=$(kubectl get pod secure-app -n security -o jsonpath='{.spec.containers[0].securityContext.readOnlyRootFilesystem}' 2>/dev/null) 6 | 7 | # Check if capabilities are dropped 8 | DROP_ALL=$(kubectl get pod secure-app -n security -o jsonpath='{.spec.containers[0].securityContext.capabilities.drop}' 2>/dev/null) 9 | 10 | if [[ "$READONLY_ROOT" == "true" ]]; then 11 | # readOnlyRootFilesystem is configured correctly 12 | if [[ "$DROP_ALL" == *"ALL"* ]]; then 13 | # Capabilities are dropped correctly 14 | exit 0 15 | else 16 | echo "Pod 'secure-app' does not drop all capabilities. Found: $DROP_ALL" 17 | exit 1 18 | fi 19 | else 20 | echo "Pod 'secure-app' does not have readOnlyRootFilesystem set to true. Found: $READONLY_ROOT" 21 | exit 1 22 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q12_s1_validate_dockerfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the Dockerfile exists 4 | if [[ ! -f "/tmp/Dockerfile" ]]; then 5 | echo "❌ File '/tmp/Dockerfile' not found" 6 | exit 1 7 | fi 8 | 9 | # Check if the Dockerfile contains necessary elements 10 | if ! grep -q "FROM.*nginx:alpine" /tmp/Dockerfile; then 11 | echo "❌ Dockerfile should use 'nginx:alpine' as base image" 12 | exit 1 13 | fi 14 | 15 | if ! grep -q "COPY.*index.html" /tmp/Dockerfile; then 16 | echo "❌ Dockerfile should copy 'index.html' file" 17 | exit 1 18 | fi 19 | 20 | if ! grep -q "EXPOSE.*80" /tmp/Dockerfile; then 21 | echo "❌ Dockerfile should expose port 80" 22 | exit 1 23 | fi 24 | 25 | echo "✅ Dockerfile exists with correct content" 26 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q12_s2_validate_html.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the index.html exists 4 | if [[ ! -f "/tmp/index.html" ]]; then 5 | echo "❌ File '/tmp/index.html' not found" 6 | exit 1 7 | fi 8 | 9 | # Check if the HTML contains necessary content 10 | if ! grep -q "Hello from CKAD Docker Question" /tmp/index.html; then 11 | echo "❌ HTML file should contain 'Hello from CKAD Docker Question'" 12 | exit 1 13 | fi 14 | 15 | # Check basic HTML structure 16 | if ! grep -q "" /tmp/index.html; then 17 | echo "❌ HTML file should have DOCTYPE declaration" 18 | exit 1 19 | fi 20 | 21 | if ! grep -q "" /tmp/index.html; then 22 | echo "❌ HTML file should contain tag" 23 | exit 1 24 | fi 25 | 26 | if ! grep -q "" /tmp/index.html; then 27 | echo "❌ HTML file should contain tag" 28 | exit 1 29 | fi 30 | 31 | echo "✅ index.html exists with correct content" 32 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q12_s3_validate_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the Docker image exists 4 | docker image inspect my-nginx:v1 &> /dev/null 5 | if [[ $? -ne 0 ]]; then 6 | echo "❌ Docker image 'my-nginx:v1' not found" 7 | exit 1 8 | fi 9 | 10 | #Docker image is built correctly 11 | echo "✅ Docker image 'my-nginx:v1' has been built correctly with all required elements" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q13_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the jobs namespace exists 4 | NS=$(kubectl get namespace jobs -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "jobs" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'jobs' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q13_s3_validate_job_policy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the job has correct restart policy and backoff limit 4 | # Check restart policy 5 | RESTART_POLICY=$(kubectl get job data-processor -n jobs -o jsonpath='{.spec.template.spec.restartPolicy}' 2>/dev/null) 6 | 7 | # Check backoff limit 8 | BACKOFF_LIMIT=$(kubectl get job data-processor -n jobs -o jsonpath='{.spec.backoffLimit}' 2>/dev/null) 9 | 10 | if [[ "$RESTART_POLICY" == "Never" ]]; then 11 | # Restart policy is correct 12 | if [[ "$BACKOFF_LIMIT" == "4" ]]; then 13 | # Backoff limit is correct 14 | exit 0 15 | else 16 | echo "Job 'data-processor' does not have correct backoff limit. Found: $BACKOFF_LIMIT (expected: 4)" 17 | exit 1 18 | fi 19 | else 20 | echo "Job 'data-processor' does not have correct restart policy. Found: $RESTART_POLICY (expected: Never)" 21 | exit 1 22 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q13_s4_validate_job_deadline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the job has correct active deadline seconds 4 | ACTIVE_DEADLINE_SECONDS=$(kubectl get job data-processor -n jobs -o jsonpath='{.spec.activeDeadlineSeconds}' 2>/dev/null) 5 | 6 | if [[ "$ACTIVE_DEADLINE_SECONDS" == "30" ]]; then 7 | # Active deadline seconds is correct 8 | exit 0 9 | else 10 | echo "Job 'data-processor' does not have correct active deadline seconds. Found: $ACTIVE_DEADLINE_SECONDS (expected: 30)" 11 | exit 1 12 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q14_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the init-containers namespace exists 4 | NS=$(kubectl get namespace init-containers -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "init-containers" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'init-containers' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q15_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the namespace exists 4 | kubectl get namespace helm-basics &> /dev/null 5 | if [[ $? -eq 0 ]]; then 6 | echo "✅ Namespace 'helm-basics' exists" 7 | exit 0 8 | else 9 | echo "❌ Namespace 'helm-basics' not found" 10 | exit 1 11 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q15_s2_validate_chart_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if Helm is installed 4 | if ! command -v helm &> /dev/null; then 5 | echo "❌ Helm is not available on this system" 6 | exit 1 7 | fi 8 | 9 | # Check if Bitnami repository is added 10 | REPO_EXISTS=$(helm repo list | grep bitnami | wc -l) 11 | if [[ "$REPO_EXISTS" -eq 0 ]]; then 12 | echo "❌ Bitnami repository is not added to Helm" 13 | exit 1 14 | fi 15 | 16 | # Check if the nginx chart is installed 17 | RELEASE_EXISTS=$(helm list -n helm-basics | grep nginx-release | wc -l) 18 | if [[ "$RELEASE_EXISTS" -eq 0 ]]; then 19 | echo "❌ nginx chart is not installed in the 'helm-basics' namespace" 20 | exit 1 21 | fi 22 | 23 | # Check if pods related to the release are running 24 | PODS_RUNNING=$(kubectl get pods -n helm-basics -l app.kubernetes.io/instance=nginx-release | grep Running | wc -l) 25 | if [[ "$PODS_RUNNING" -eq 0 ]]; then 26 | echo "❌ No pods from the nginx release are running" 27 | exit 1 28 | fi 29 | 30 | echo "✅ Helm chart 'nginx' is installed correctly in the 'helm-basics' namespace" 31 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q15_s3_validate_release_notes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the release notes file exists 4 | if [[ ! -f "/tmp/release-notes.txt" ]]; then 5 | echo "❌ File '/tmp/release-notes.txt' not found" 6 | exit 1 7 | fi 8 | 9 | # Check if the file has content 10 | if [[ ! -s "/tmp/release-notes.txt" ]]; then 11 | echo "❌ Release notes file is empty" 12 | exit 1 13 | fi 14 | 15 | # Check if the file actually contains release notes 16 | if ! grep -q "RELEASE NOTES" /tmp/release-notes.txt && ! grep -q "Bitnami" /tmp/release-notes.txt && ! grep -q "nginx" /tmp/release-notes.txt; then 17 | echo "❌ File does not appear to contain Helm release notes" 18 | exit 1 19 | fi 20 | 21 | echo "✅ Release notes are saved correctly to /tmp/release-notes.txt" 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q16_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the health-checks namespace exists 4 | NS=$(kubectl get namespace health-checks -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "health-checks" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'health-checks' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q16_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod exists with correct name and image 4 | POD=$(kubectl get pod health-check-pod -n health-checks -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "health-check-pod" ]]; then 7 | # Pod exists, now check image 8 | IMAGE=$(kubectl get pod health-check-pod -n health-checks -o jsonpath='{.spec.containers[0].image}' 2>/dev/null) 9 | 10 | if [[ "$IMAGE" == "nginx" ]]; then 11 | # Pod has correct image 12 | exit 0 13 | else 14 | echo "Pod 'health-check-pod' does not use the correct image. Found: $IMAGE (expected: nginx)" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'health-check-pod' does not exist in the 'health-checks' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q17_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod-lifecycle namespace exists 4 | NS=$(kubectl get namespace pod-lifecycle -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "pod-lifecycle" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'pod-lifecycle' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q17_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod exists with correct name and image 4 | POD=$(kubectl get pod lifecycle-pod -n pod-lifecycle -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "lifecycle-pod" ]]; then 7 | # Pod exists, now check image 8 | IMAGE=$(kubectl get pod lifecycle-pod -n pod-lifecycle -o jsonpath='{.spec.containers[0].image}' 2>/dev/null) 9 | 10 | if [[ "$IMAGE" == "nginx" ]]; then 11 | # Pod has correct image 12 | exit 0 13 | else 14 | echo "Pod 'lifecycle-pod' does not use the correct image. Found: $IMAGE (expected: nginx)" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'lifecycle-pod' does not exist in the 'pod-lifecycle' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q17_s5_validate_grace_period.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod has the correct termination grace period 4 | POD=$(kubectl get pod lifecycle-pod -n pod-lifecycle -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "lifecycle-pod" ]]; then 7 | # Pod exists, now check if it has a termination grace period 8 | GRACE_PERIOD=$(kubectl get pod lifecycle-pod -n pod-lifecycle -o jsonpath='{.spec.terminationGracePeriodSeconds}' 2>/dev/null) 9 | 10 | if [[ "$GRACE_PERIOD" == "30" ]]; then 11 | # Grace period is correctly set to 30 seconds 12 | exit 0 13 | elif [[ "$GRACE_PERIOD" == "" ]]; then 14 | echo "Pod 'lifecycle-pod' does not have a termination grace period specified (using default of 30 seconds)" 15 | exit 1 16 | else 17 | echo "Pod 'lifecycle-pod' has incorrect termination grace period. Found: $GRACE_PERIOD (expected: 45)" 18 | exit 1 19 | fi 20 | else 21 | # Pod does not exist 22 | echo "Pod 'lifecycle-pod' does not exist in the 'pod-lifecycle' namespace" 23 | exit 1 24 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q18_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the namespace exists 4 | kubectl get namespace crd-demo &> /dev/null 5 | if [[ $? -eq 0 ]]; then 6 | echo "✅ Namespace 'crd-demo' exists" 7 | exit 0 8 | else 9 | echo "❌ Namespace 'crd-demo' not found" 10 | exit 1 11 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q18_s2_validate_priority_class.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the high-priority PriorityClass exists 4 | PC=$(kubectl get priorityclass high-priority -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$PC" == "high-priority" ]]; then 7 | # PriorityClass exists, check the value 8 | VALUE=$(kubectl get priorityclass high-priority -o jsonpath='{.value}' 2>/dev/null) 9 | 10 | if [[ "$VALUE" == "1000" ]]; then 11 | # PriorityClass has correct value 12 | exit 0 13 | else 14 | echo "PriorityClass 'high-priority' exists but has incorrect priority value. Found: $VALUE (expected: 1000)" 15 | exit 1 16 | fi 17 | else 18 | # PriorityClass does not exist 19 | echo "PriorityClass 'high-priority' does not exist" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q18_s3_validate_cr_name.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the CRD exists first (prerequisite) 4 | kubectl get crd applications.training.ckad.io &> /dev/null 5 | if [[ $? -ne 0 ]]; then 6 | echo "❌ CRD 'applications.training.ckad.io' not found. Cannot validate custom resources." 7 | exit 1 8 | fi 9 | 10 | # Check if the custom resource exists 11 | kubectl get application my-app -n crd-demo &> /dev/null 12 | if [[ $? -ne 0 ]]; then 13 | echo "❌ Custom resource 'my-app' not found in namespace 'crd-demo'" 14 | exit 1 15 | fi 16 | 17 | echo "✅ Custom resource 'my-app' exists with correct name" 18 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q18_s3_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod exists and uses the high-priority priority class 4 | POD=$(kubectl get pod scheduled-pod -n pod-scheduling -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "scheduled-pod" ]]; then 7 | # Pod exists, check if it uses the priority class 8 | PRIORITY_CLASS=$(kubectl get pod scheduled-pod -n pod-scheduling -o jsonpath='{.spec.priorityClassName}' 2>/dev/null) 9 | 10 | if [[ "$PRIORITY_CLASS" == "high-priority" ]]; then 11 | # Pod uses correct priority class 12 | exit 0 13 | else 14 | echo "Pod 'scheduled-pod' does not use the 'high-priority' priority class. Found: $PRIORITY_CLASS" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'scheduled-pod' does not exist in the 'pod-scheduling' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q18_s4_validate_cr_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the custom resource exists first 4 | kubectl get application my-app -n crd-demo &> /dev/null 5 | if [[ $? -ne 0 ]]; then 6 | echo "❌ Custom resource 'my-app' not found in namespace 'crd-demo'" 7 | exit 1 8 | fi 9 | 10 | # Check if the image field is set correctly 11 | IMAGE=$(kubectl get application my-app -n crd-demo -o jsonpath='{.spec.image}' 2>/dev/null) 12 | if [[ "$IMAGE" != "nginx:1.19.0" ]]; then 13 | echo "❌ Custom resource should have spec.image='nginx:1.19.0'. Current value: '$IMAGE'" 14 | exit 1 15 | fi 16 | 17 | echo "✅ Custom resource has correct image field" 18 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q18_s4_validate_node_selector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod has the correct node selector 4 | POD=$(kubectl get pod scheduled-pod -n pod-scheduling -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "scheduled-pod" ]]; then 7 | # Pod exists, check if it has a node selector 8 | NODE_SELECTOR=$(kubectl get pod scheduled-pod -n pod-scheduling -o jsonpath='{.spec.nodeSelector.disk}' 2>/dev/null) 9 | 10 | if [[ "$NODE_SELECTOR" == "ssd" ]]; then 11 | # Pod has correct node selector 12 | exit 0 13 | else 14 | echo "Pod 'scheduled-pod' does not have the correct node selector. Expected nodeSelector.disk=ssd, found: $NODE_SELECTOR" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'scheduled-pod' does not exist in the 'pod-scheduling' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q18_s5_validate_cr_replicas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the custom resource exists first 4 | kubectl get application my-app -n crd-demo &> /dev/null 5 | if [[ $? -ne 0 ]]; then 6 | echo "❌ Custom resource 'my-app' not found in namespace 'crd-demo'" 7 | exit 1 8 | fi 9 | 10 | # Check if the replicas field is set correctly 11 | REPLICAS=$(kubectl get application my-app -n crd-demo -o jsonpath='{.spec.replicas}' 2>/dev/null) 12 | if [[ "$REPLICAS" != "3" ]]; then 13 | echo "❌ Custom resource should have spec.replicas=3. Current value: '$REPLICAS'" 14 | exit 1 15 | fi 16 | 17 | echo "✅ Custom resource has correct replicas field" 18 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q19_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the namespace exists 4 | kubectl get namespace custom-columns-demo &> /dev/null 5 | if [[ $? -eq 0 ]]; then 6 | echo "✅ Namespace 'custom-columns-demo' exists" 7 | exit 0 8 | else 9 | echo "❌ Namespace 'custom-columns-demo' not found" 10 | exit 1 11 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q19_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the network-pod exists 4 | POD=$(kubectl get pod network-pod -n pod-networking -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "network-pod" ]]; then 7 | # Pod exists, check image 8 | IMAGE=$(kubectl get pod network-pod -n pod-networking -o jsonpath='{.spec.containers[0].image}' 2>/dev/null) 9 | 10 | if [[ "$IMAGE" == "nginx:alpine" ]]; then 11 | # Pod has correct image 12 | exit 0 13 | else 14 | echo "Pod 'network-pod' has incorrect image. Found: $IMAGE (expected: nginx:alpine)" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'network-pod' does not exist in the 'pod-networking' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q19_s4_validate_dns_policy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod has the correct DNS policy 4 | POD=$(kubectl get pod network-pod -n pod-networking -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "network-pod" ]]; then 7 | # Pod exists, check DNS policy 8 | DNS_POLICY=$(kubectl get pod network-pod -n pod-networking -o jsonpath='{.spec.dnsPolicy}' 2>/dev/null) 9 | 10 | if [[ "$DNS_POLICY" == "ClusterFirstWithHostNet" ]]; then 11 | # Pod has correct DNS policy 12 | exit 0 13 | else 14 | echo "Pod 'network-pod' has incorrect DNS policy. Found: $DNS_POLICY (expected: ClusterFirstWithHostNet)" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'network-pod' does not exist in the 'pod-networking' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q1_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the core-concepts namespace exists 4 | NS=$(kubectl get namespace core-concepts -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "core-concepts" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'core-concepts' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q1_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the nginx-pod exists in core-concepts namespace 4 | POD=$(kubectl get pod nginx-pod -n core-concepts -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "nginx-pod" ]]; then 7 | # Pod exists with correct name 8 | exit 0 9 | else 10 | # Pod does not exist or has a different name 11 | echo "Pod 'nginx-pod' does not exist in namespace 'core-concepts'" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q1_s3_validate_pod_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the nginx-pod is using the nginx image 4 | IMAGE=$(kubectl get pod nginx-pod -n core-concepts -o jsonpath='{.spec.containers[0].image}' 2>/dev/null) 5 | 6 | if [[ "$IMAGE" == *"nginx"* ]]; then 7 | # Pod is using the correct image 8 | exit 0 9 | else 10 | # Pod is using a different image 11 | echo "Pod 'nginx-pod' is not using the nginx image. Found: $IMAGE" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q1_s4_validate_pod_labels.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the nginx-pod has the correct labels 4 | APP_LABEL=$(kubectl get pod nginx-pod -n core-concepts -o jsonpath='{.metadata.labels.app}' 2>/dev/null) 5 | ENV_LABEL=$(kubectl get pod nginx-pod -n core-concepts -o jsonpath='{.metadata.labels.env}' 2>/dev/null) 6 | 7 | if [[ "$APP_LABEL" == "web" && "$ENV_LABEL" == "prod" ]]; then 8 | # Pod has the correct labels 9 | exit 0 10 | else 11 | # Pod has incorrect or missing labels 12 | echo "Pod 'nginx-pod' does not have the correct labels. Found app=$APP_LABEL, env=$ENV_LABEL. Expected app=web, env=prod" 13 | exit 1 14 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q20_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if namespace exists 4 | NS_EXISTS=$(kubectl get namespace pod-configuration --no-headers --output=name 2>/dev/null | wc -l) 5 | if [[ "$NS_EXISTS" -eq 1 ]]; then 6 | echo "✅ Namespace 'pod-configuration' exists" 7 | exit 0 8 | else 9 | echo "❌ Namespace 'pod-configuration' not found" 10 | exit 1 11 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q20_s2_validate_configmap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the ConfigMap exists 4 | CM_EXISTS=$(kubectl get configmap app-config -n pod-configuration --no-headers --output=name 2>/dev/null | wc -l) 5 | if [[ "$CM_EXISTS" -eq 0 ]]; then 6 | echo "❌ ConfigMap 'app-config' not found in namespace 'pod-configuration'" 7 | exit 1 8 | fi 9 | 10 | # Check for required keys 11 | DB_HOST=$(kubectl get configmap app-config -n pod-configuration -o jsonpath='{.data.DB_HOST}' 2>/dev/null) 12 | if [[ -z "$DB_HOST" ]]; then 13 | echo "❌ ConfigMap 'app-config' is missing key 'DB_HOST'" 14 | exit 1 15 | fi 16 | 17 | DB_PORT=$(kubectl get configmap app-config -n pod-configuration -o jsonpath='{.data.DB_PORT}' 2>/dev/null) 18 | if [[ -z "$DB_PORT" ]]; then 19 | echo "❌ ConfigMap 'app-config' is missing key 'DB_PORT'" 20 | exit 1 21 | fi 22 | 23 | echo "✅ ConfigMap 'app-config' exists with required keys in namespace 'pod-configuration'" 24 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q20_s3_validate_secret.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Check if the Secret exists 4 | SECRET_EXISTS=$(kubectl get secret app-secret -n pod-configuration --no-headers --output=name 2>/dev/null | wc -l) 5 | if [[ "$SECRET_EXISTS" -eq 0 ]]; then 6 | echo "❌ Secret 'app-secret' not found in namespace 'pod-configuration'" 7 | exit 1 8 | fi 9 | 10 | # Check for required keys 11 | API_KEY=$(kubectl get secret app-secret -n pod-configuration -o jsonpath='{.data.API_KEY}' 2>/dev/null) 12 | if [[ -z "$API_KEY" ]]; then 13 | echo "❌ Secret 'app-secret' is missing key 'API_KEY'" 14 | exit 1 15 | fi 16 | 17 | API_SECRET=$(kubectl get secret app-secret -n pod-configuration -o jsonpath='{.data.API_SECRET}' 2>/dev/null) 18 | if [[ -z "$API_SECRET" ]]; then 19 | echo "❌ Secret 'app-secret' is missing key 'API_SECRET'" 20 | exit 1 21 | fi 22 | 23 | echo "✅ Secret 'app-secret' exists with required keys in namespace 'pod-configuration'" 24 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q2_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the multi-container namespace exists 4 | NS=$(kubectl get namespace multi-container -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "multi-container" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'multi-container' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q2_s2_validate_pod_containers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the multi-container-pod exists 4 | POD=$(kubectl get pod multi-container-pod -n multi-container -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "multi-container-pod" ]]; then 7 | # Pod exists, now check if it has two containers 8 | CONTAINER_COUNT=$(kubectl get pod multi-container-pod -n multi-container -o jsonpath='{.spec.containers}' 2>/dev/null | jq '. | length') 9 | 10 | if [[ "$CONTAINER_COUNT" == "2" ]]; then 11 | # Pod has two containers 12 | exit 0 13 | else 14 | echo "Pod 'multi-container-pod' does not have two containers. Found: $CONTAINER_COUNT" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'multi-container-pod' does not exist in the 'multi-container' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q2_s3_validate_container_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the container images are correct 4 | MAIN_CONTAINER_IMAGE=$(kubectl get pod multi-container-pod -n multi-container -o jsonpath='{.spec.containers[?(@.name=="main-container")].image}' 2>/dev/null) 5 | SIDECAR_CONTAINER_IMAGE=$(kubectl get pod multi-container-pod -n multi-container -o jsonpath='{.spec.containers[?(@.name=="sidecar-container")].image}' 2>/dev/null) 6 | 7 | if [[ "$MAIN_CONTAINER_IMAGE" == "nginx" ]]; then 8 | # Main container image is correct 9 | if [[ "$SIDECAR_CONTAINER_IMAGE" == "busybox" ]]; then 10 | # Sidecar container image is correct 11 | exit 0 12 | else 13 | echo "Sidecar container image is incorrect. Found: $SIDECAR_CONTAINER_IMAGE (expected: busybox)" 14 | exit 1 15 | fi 16 | else 17 | echo "Main container image is incorrect. Found: $MAIN_CONTAINER_IMAGE (expected: nginx)" 18 | exit 1 19 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q3_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod-design namespace exists 4 | NS=$(kubectl get namespace pod-design -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "pod-design" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'pod-design' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q3_s3_validate_replicas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the deployment has the correct number of replicas 4 | REPLICAS=$(kubectl get deployment frontend -n pod-design -o jsonpath='{.spec.replicas}' 2>/dev/null) 5 | 6 | if [[ "$REPLICAS" == "3" ]]; then 7 | # Deployment has correct number of replicas 8 | exit 0 9 | else 10 | # Deployment has incorrect number of replicas 11 | echo "Deployment 'frontend' does not have correct number of replicas. Found: $REPLICAS (expected: 3)" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q4_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the configuration namespace exists 4 | NS=$(kubectl get namespace configuration -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "configuration" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'configuration' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q4_s4_validate_pod_configmap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod uses ConfigMap as environment variables 4 | POD=$(kubectl get pod app-pod -n configuration -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "app-pod" ]]; then 7 | # Pod exists, now check if it uses ConfigMap as environment variables 8 | CONFIG_MAP_ENV=$(kubectl get pod app-pod -n configuration -o jsonpath='{.spec.containers[0].envFrom[?(@.configMapRef.name=="app-config")].configMapRef.name}' 2>/dev/null) 9 | 10 | if [[ "$CONFIG_MAP_ENV" == "app-config" ]]; then 11 | # Pod uses ConfigMap as environment variables 12 | exit 0 13 | else 14 | echo "Pod 'app-pod' does not use ConfigMap 'app-config' as environment variables" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'app-pod' does not exist in the 'configuration' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q5_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the observability namespace exists 4 | NS=$(kubectl get namespace observability -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "observability" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'observability' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q5_s2_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the probes-pod exists with correct name and image 4 | POD=$(kubectl get pod probes-pod -n observability -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "probes-pod" ]]; then 7 | # Pod exists, now check image 8 | IMAGE=$(kubectl get pod probes-pod -n observability -o jsonpath='{.spec.containers[0].image}' 2>/dev/null) 9 | 10 | if [[ "$IMAGE" == "nginx" ]]; then 11 | # Pod has correct image 12 | exit 0 13 | else 14 | echo "Pod 'probes-pod' does not use the correct image. Found: $IMAGE (expected: nginx)" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'probes-pod' does not exist in the 'observability' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q6_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the services namespace exists 4 | NS=$(kubectl get namespace services -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "services" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'services' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q7_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the state namespace exists 4 | NS=$(kubectl get namespace state -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "state" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'state' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q7_s3_validate_pvc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the db-pvc PersistentVolumeClaim exists 4 | PVC=$(kubectl get pvc db-pvc -n state -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$PVC" == "db-pvc" ]]; then 7 | # PVC exists, now check specs 8 | 9 | # Check access mode 10 | ACCESS_MODE=$(kubectl get pvc db-pvc -n state -o jsonpath='{.spec.accessModes[0]}' 2>/dev/null) 11 | 12 | # Check requested storage 13 | STORAGE=$(kubectl get pvc db-pvc -n state -o jsonpath='{.spec.resources.requests.storage}' 2>/dev/null) 14 | 15 | if [[ "$ACCESS_MODE" == "ReadWriteOnce" && "$STORAGE" == "500Mi" ]]; then 16 | # PVC is configured correctly 17 | exit 0 18 | else 19 | echo "PersistentVolumeClaim 'db-pvc' is not configured correctly." 20 | echo "Found access mode: $ACCESS_MODE (expected: ReadWriteOnce)" 21 | echo "Found requested storage: $STORAGE (expected: 500Mi)" 22 | exit 1 23 | fi 24 | else 25 | # PVC does not exist 26 | echo "PersistentVolumeClaim 'db-pvc' does not exist in the 'state' namespace" 27 | exit 1 28 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q7_s4_validate_pod.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the db-pod exists with correct image 4 | POD=$(kubectl get pod db-pod -n state -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$POD" == "db-pod" ]]; then 7 | # Pod exists, now check image 8 | IMAGE=$(kubectl get pod db-pod -n state -o jsonpath='{.spec.containers[0].image}' 2>/dev/null) 9 | 10 | if [[ "$IMAGE" == "mysql:5.7" ]]; then 11 | # Pod has correct image 12 | exit 0 13 | else 14 | echo "Pod 'db-pod' does not use the correct image. Found: $IMAGE (expected: mysql:5.7)" 15 | exit 1 16 | fi 17 | else 18 | # Pod does not exist 19 | echo "Pod 'db-pod' does not exist in the 'state' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q8_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the pod-design namespace exists 4 | NS=$(kubectl get namespace pod-design -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$NS" == "pod-design" ]]; then 7 | # Namespace exists 8 | exit 0 9 | else 10 | # Namespace does not exist 11 | echo "Namespace 'pod-design' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q8_s2_validate_cronjob.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the backup-job CronJob exists with correct name and schedule 4 | CRONJOB=$(kubectl get cronjob backup-job -n pod-design -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$CRONJOB" == "backup-job" ]]; then 7 | # CronJob exists, now check schedule 8 | SCHEDULE=$(kubectl get cronjob backup-job -n pod-design -o jsonpath='{.spec.schedule}' 2>/dev/null) 9 | 10 | if [[ "$SCHEDULE" == "*/5 * * * *" ]]; then 11 | # CronJob has correct schedule 12 | exit 0 13 | else 14 | echo "CronJob 'backup-job' does not have correct schedule. Found: $SCHEDULE (expected: */5 * * * *)" 15 | exit 1 16 | fi 17 | else 18 | # CronJob does not exist 19 | echo "CronJob 'backup-job' does not exist in the 'pod-design' namespace" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q9_s1_validate_deployment_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the broken-deployment exists in troubleshooting namespace 4 | DEPLOYMENT=$(kubectl get deployment broken-deployment -n troubleshooting -o jsonpath='{.metadata.name}' 2>/dev/null) 5 | 6 | if [[ "$DEPLOYMENT" == "broken-deployment" ]]; then 7 | # Deployment exists with correct name 8 | exit 0 9 | else 10 | # Deployment does not exist or has a different name 11 | echo "Deployment 'broken-deployment' does not exist in namespace 'troubleshooting'" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q9_s2_validate_deployment_replicas.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Validate that the broken-deployment exists in troubleshooting namespace 4 | DEPLOYMENT=$(kubectl get deployment broken-deployment -n troubleshooting -o jsonpath='{.spec.replicas}' 2>/dev/null) 5 | 6 | if [[ "$DEPLOYMENT" == "3" ]]; then 7 | # Deployment exists with correct name 8 | exit 0 9 | else 10 | # Deployment does not exist or has a different name 11 | echo "Deployment 'broken-deployment' does not have 3 replicas" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q9_s3_validate_pods_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Get the total number of pods for the deployment 4 | TOTAL_PODS=$(kubectl get deployment broken-deployment -n troubleshooting -o jsonpath='{.spec.replicas}' 2>/dev/null) 5 | 6 | # Get the number of running pods for the deployment 7 | RUNNING_PODS=$(kubectl get pods -n troubleshooting -l app=nginx --field-selector=status.phase=Running -o name 2>/dev/null | wc -l) 8 | 9 | if [[ "$RUNNING_PODS" -eq "$TOTAL_PODS" && "$TOTAL_PODS" -gt 0 ]]; then 10 | # All pods are running 11 | exit 0 12 | else 13 | # Not all pods are running 14 | echo "Not all pods are running for 'broken-deployment'. Running pods: $RUNNING_PODS, Expected: $TOTAL_PODS" 15 | exit 1 16 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/ckad/002/scripts/validation/q9_s4_validate_pods_image.sh: -------------------------------------------------------------------------------- 1 | # validate the container image is correct having image name nginx 2 | # Validate if the pods of deployment 'broken-app' are running in the 'troubleshooting' namespace 3 | #check deployment status is runing or not 4 | DEPLOYMENT_STATUS=$(kubectl get deployment broken-deployment -n troubleshooting -o jsonpath='{.status.conditions[?(@.type=="Available")].status}') 5 | if [ "$DEPLOYMENT_STATUS" != "True" ]; then 6 | echo "Error: The deployment 'broken-deployment' is not running" 7 | exit 1 8 | fi 9 | 10 | IMAGE=$(kubectl get deployment broken-deployment -n troubleshooting -o jsonpath='{.spec.template.spec.containers[0].image}' | cut -d':' -f1) 11 | 12 | if [ "$IMAGE" == "nginx" ]; then 13 | echo "Success: The container image is correct" 14 | exit 0 15 | else 16 | echo "Error: The container image is not correct" 17 | exit 1 18 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lab": "cks-001", 3 | "workerNodes": 1, 4 | "answers": "assets/exams/cks/001/answers.md", 5 | "questions": "assessment.json", 6 | "totalMarks": 100, 7 | "lowScore": 40, 8 | "mediumScore": 60, 9 | "highScore": 90 10 | } -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/setup/q3_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 3: API Security with Pod Security Standards 3 | 4 | # Create namespace if it doesn't exist 5 | kubectl create namespace api-security 2>/dev/null || true 6 | 7 | # Create the PSS viewer ServiceAccount 8 | kubectl apply -f - </dev/null || true 6 | 7 | # Create a test pod that can access the metadata endpoint 8 | kubectl apply -f - </dev/null || true 6 | 7 | # We'll just need to create the namespace for this question 8 | # The student will need to create a pod that mounts the host binaries 9 | 10 | echo "Setup completed for Question 5" 11 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/setup/q7_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup for Question 7: Service Account Caution 3 | 4 | # Create namespace if it doesn't exist 5 | kubectl create namespace service-account-caution 2>/dev/null || true 6 | 7 | # Create a default deployment without service account settings for comparison 8 | kubectl apply -f - </dev/null || true 6 | 7 | # Create a ConfigMap containing API server IP for reference 8 | # Note: In a Kind setup, the API server is typically available at 10.96.0.1 9 | kubectl apply -f - < /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi 13 | 14 | # Check if namespace has PSS enforce label set to baseline 15 | ENFORCE_LABEL=$(kubectl get namespace $NAMESPACE -o jsonpath='{.metadata.labels.pod-security\.kubernetes\.io/enforce}') 16 | if [ "$ENFORCE_LABEL" != "$EXPECTED_ENFORCE" ]; then 17 | echo "❌ Namespace doesn't have the correct pod-security.kubernetes.io/enforce label. Expected: $EXPECTED_ENFORCE, Got: $ENFORCE_LABEL" 18 | exit 1 19 | fi 20 | 21 | echo "✅ Namespace has correct Pod Security Standard labels" 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q1_s1_validate_policy_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the NetworkPolicy exists 3 | 4 | POLICY_NAME="secure-backend" 5 | NAMESPACE="network-security" 6 | 7 | kubectl get networkpolicy $POLICY_NAME -n $NAMESPACE &> /dev/null 8 | if [ $? -eq 0 ]; then 9 | echo "✅ NetworkPolicy '$POLICY_NAME' exists in namespace '$NAMESPACE'" 10 | exit 0 11 | else 12 | echo "❌ NetworkPolicy '$POLICY_NAME' not found in namespace '$NAMESPACE'" 13 | exit 1 14 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q2_s1_validate_ingress_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the Ingress exists 3 | 4 | INGRESS_NAME="secure-app" 5 | NAMESPACE="secure-ingress" 6 | 7 | kubectl get ingress $INGRESS_NAME -n $NAMESPACE &> /dev/null 8 | if [ $? -eq 0 ]; then 9 | echo "✅ Success: Ingress exists" 10 | exit 0 11 | else 12 | echo "❌ Failure: Ingress not found" 13 | exit 1 14 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q2_s2_validate_hostname.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the Ingress has correct hostname configuration 3 | 4 | INGRESS_NAME="secure-app" 5 | NAMESPACE="secure-ingress" 6 | EXPECTED_HOSTNAME="secure-app.example.com" 7 | 8 | # Check if ingress exists 9 | kubectl get ingress $INGRESS_NAME -n $NAMESPACE &> /dev/null 10 | if [ $? -ne 0 ]; then 11 | echo "❌ Ingress '$INGRESS_NAME' not found in namespace '$NAMESPACE'" 12 | exit 1 13 | fi 14 | 15 | # Check for correct hostname 16 | HOSTNAME=$(kubectl get ingress $INGRESS_NAME -n $NAMESPACE -o jsonpath='{.spec.rules[0].host}') 17 | if [ "$HOSTNAME" = "$EXPECTED_HOSTNAME" ]; then 18 | echo "✅ Ingress has correct hostname: $EXPECTED_HOSTNAME" 19 | exit 0 20 | else 21 | echo "❌ Ingress has incorrect hostname: $HOSTNAME (expected: $EXPECTED_HOSTNAME)" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q3_s1_validate_namespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the namespace exists with correct PSS label 3 | 4 | NAMESPACE="api-security" 5 | EXPECTED_PSS_LABEL="baseline" 6 | 7 | # Check if namespace exists 8 | kubectl get namespace $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi 13 | 14 | # Check for correct Pod Security Standard label 15 | PSS_LABEL=$(kubectl get namespace $NAMESPACE -o jsonpath='{.metadata.labels.pod-security\.kubernetes\.io/enforce}') 16 | if [ "$PSS_LABEL" = "$EXPECTED_PSS_LABEL" ]; then 17 | echo "✅ Namespace has correct Pod Security Standard label: $EXPECTED_PSS_LABEL" 18 | exit 0 19 | else 20 | echo "❌ Namespace does not have correct Pod Security Standard label (expected: $EXPECTED_PSS_LABEL, got: $PSS_LABEL)" 21 | exit 1 22 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q4_s1_validate_policy_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the NetworkPolicy exists 3 | 4 | POLICY_NAME="block-metadata" 5 | NAMESPACE="metadata-protect" 6 | 7 | kubectl get networkpolicy $POLICY_NAME -n $NAMESPACE &> /dev/null 8 | if [ $? -eq 0 ]; then 9 | echo "✅ NetworkPolicy '$POLICY_NAME' exists in namespace '$NAMESPACE'" 10 | exit 0 11 | else 12 | echo "❌ NetworkPolicy '$POLICY_NAME' not found in namespace '$NAMESPACE'" 13 | exit 1 14 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q5_s1_validate_pod_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the binary verification pod exists 3 | 4 | POD_NAME="verify-bin" 5 | NAMESPACE="binary-verify" 6 | 7 | # Check if pod exists 8 | kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'" 11 | exit 1 12 | fi 13 | 14 | # Check if pod is running 15 | POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}') 16 | if [ "$POD_STATUS" != "Running" ]; then 17 | echo "❌ Pod '$POD_NAME' is not in Running state (current state: $POD_STATUS)" 18 | exit 1 19 | fi 20 | 21 | # Check if pod uses the busybox image as required 22 | POD_IMAGE=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].image}') 23 | if [[ "$POD_IMAGE" != *"busybox"* ]]; then 24 | echo "❌ Pod '$POD_NAME' is not using the busybox image (current image: $POD_IMAGE)" 25 | exit 1 26 | fi 27 | 28 | echo "✅ Binary verification pod exists" 29 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q6_s1_validate_role_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the Role exists 3 | 4 | ROLE_NAME="app-reader-role" 5 | NAMESPACE="rbac-minimize" 6 | 7 | kubectl get role $ROLE_NAME -n $NAMESPACE &> /dev/null 8 | if [ $? -eq 0 ]; then 9 | echo "✅ Role '$ROLE_NAME' exists in namespace '$NAMESPACE'" 10 | exit 0 11 | else 12 | echo "❌ Role '$ROLE_NAME' not found in namespace '$NAMESPACE'" 13 | exit 1 14 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q7_s1_validate_immutable_filesystem.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the pod has an immutable filesystem 3 | 4 | POD_NAME="secure-pod" 5 | NAMESPACE="pod-security" 6 | 7 | # Check if the pod exists 8 | kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'" 11 | exit 1 12 | fi 13 | 14 | # Check if the pod has readOnlyRootFilesystem set to true 15 | READONLY_ROOT=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.spec.containers[0].securityContext.readOnlyRootFilesystem}') 16 | if [ "$READONLY_ROOT" != "true" ]; then 17 | echo "❌ Pod does not have readOnlyRootFilesystem set to true" 18 | exit 1 19 | fi 20 | 21 | echo "✅ Pod has immutable filesystem (readOnlyRootFilesystem: true)" 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q7_s1_validate_sa_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that ServiceAccount exists 3 | 4 | SA_NAME="minimal-sa" 5 | NAMESPACE="service-account-caution" 6 | 7 | # Check if namespace exists 8 | kubectl get namespace $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi 13 | 14 | # Check if the service account exists 15 | kubectl get serviceaccount $SA_NAME -n $NAMESPACE &> /dev/null 16 | if [ $? -ne 0 ]; then 17 | echo "❌ ServiceAccount '$SA_NAME' not found in namespace '$NAMESPACE'" 18 | exit 1 19 | fi 20 | 21 | echo "✅ ServiceAccount exists" 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q7_s2_validate_sa_automounting.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that ServiceAccount has automounting disabled 3 | 4 | SA_NAME="minimal-sa" 5 | NAMESPACE="service-account-caution" 6 | 7 | # Check if namespace exists 8 | kubectl get namespace $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi 13 | 14 | # Check if the service account exists 15 | kubectl get serviceaccount $SA_NAME -n $NAMESPACE &> /dev/null 16 | if [ $? -ne 0 ]; then 17 | echo "❌ ServiceAccount '$SA_NAME' not found in namespace '$NAMESPACE'" 18 | exit 1 19 | fi 20 | 21 | # Check if automountServiceAccountToken is set to false 22 | AUTOMOUNT=$(kubectl get serviceaccount $SA_NAME -n $NAMESPACE -o jsonpath='{.automountServiceAccountToken}') 23 | if [ "$AUTOMOUNT" != "false" ]; then 24 | echo "❌ ServiceAccount automountServiceAccountToken is not set to false" 25 | exit 1 26 | fi 27 | 28 | echo "✅ ServiceAccount has automounting disabled" 29 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q7_s4_validate_pod_automounting.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that pod automounting is explicitly disabled 3 | 4 | DEPLOYMENT_NAME="secure-app" 5 | NAMESPACE="service-account-caution" 6 | 7 | # Check if namespace exists 8 | kubectl get namespace $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi 13 | 14 | # Check if the deployment exists 15 | kubectl get deployment $DEPLOYMENT_NAME -n $NAMESPACE &> /dev/null 16 | if [ $? -ne 0 ]; then 17 | echo "❌ Deployment '$DEPLOYMENT_NAME' not found in namespace '$NAMESPACE'" 18 | exit 1 19 | fi 20 | 21 | # Check if automounting is explicitly disabled in pod spec 22 | POD_AUTOMOUNT=$(kubectl get deployment $DEPLOYMENT_NAME -n $NAMESPACE -o jsonpath='{.spec.template.spec.automountServiceAccountToken}') 23 | if [ "$POD_AUTOMOUNT" != "false" ]; then 24 | echo "❌ Pod spec does not have automountServiceAccountToken explicitly set to false" 25 | exit 1 26 | fi 27 | 28 | echo "✅ Pod automounting is explicitly disabled" 29 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q8_s1_validate_policy_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that the NetworkPolicy exists 3 | 4 | POLICY_NAME="api-server-policy" 5 | NAMESPACE="api-restrict" 6 | 7 | # Check if namespace exists 8 | kubectl get namespace $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi 13 | 14 | # Check if NetworkPolicy exists 15 | kubectl get networkpolicy $POLICY_NAME -n $NAMESPACE &> /dev/null 16 | if [ $? -ne 0 ]; then 17 | echo "❌ NetworkPolicy '$POLICY_NAME' not found in namespace '$NAMESPACE'" 18 | exit 1 19 | fi 20 | 21 | echo "✅ NetworkPolicy '$POLICY_NAME' exists in namespace '$NAMESPACE'" 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/cks/001/scripts/validation/q9_s1_validate_pod_exists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate that pod exists 3 | 4 | POD_NAME="secure-container" 5 | NAMESPACE="os-hardening" 6 | 7 | # Check if namespace exists 8 | kubectl get namespace $NAMESPACE &> /dev/null 9 | if [ $? -ne 0 ]; then 10 | echo "❌ Namespace '$NAMESPACE' not found" 11 | exit 1 12 | fi 13 | 14 | # Check if pod exists 15 | kubectl get pod $POD_NAME -n $NAMESPACE &> /dev/null 16 | if [ $? -ne 0 ]; then 17 | echo "❌ Pod '$POD_NAME' not found in namespace '$NAMESPACE'" 18 | exit 1 19 | fi 20 | 21 | # Check if pod is running 22 | POD_STATUS=$(kubectl get pod $POD_NAME -n $NAMESPACE -o jsonpath='{.status.phase}') 23 | if [ "$POD_STATUS" != "Running" ]; then 24 | echo "❌ Pod '$POD_NAME' is not in Running state (current state: $POD_STATUS)" 25 | exit 1 26 | fi 27 | 28 | echo "✅ Pod '$POD_NAME' exists and is running in namespace '$NAMESPACE'" 29 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lab": "docker-001", 3 | "workerNodes": 1, 4 | "answers": "assets/exams/other/001/answers.md", 5 | "questions": "assessment.json", 6 | "totalMarks": 100, 7 | "lowScore": 40, 8 | "mediumScore": 60, 9 | "highScore": 90 10 | } -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q10_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 10: Container resource limits 3 | 4 | # Remove any existing container with the same name 5 | docker rm -f limited-resources &> /dev/null 6 | 7 | # Pull the stress image 8 | docker pull progrium/stress:latest &> /dev/null || docker pull polinux/stress:latest &> /dev/null 9 | 10 | # If neither of the stress images are available, create a simple stress image 11 | if [ $? -ne 0 ]; then 12 | mkdir -p /tmp/exam/q10 13 | cat > /tmp/exam/q10/Dockerfile << EOF 14 | FROM alpine:latest 15 | RUN apk add --no-cache stress-ng 16 | ENTRYPOINT ["stress-ng"] 17 | EOF 18 | docker build -t stress:latest /tmp/exam/q10 19 | fi 20 | 21 | echo "Setup for Question 10 complete." 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q11_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 11: Docker Compose 3 | 4 | # Create directory for docker-compose file 5 | mkdir -p /tmp/exam/q11 6 | 7 | # Ensure docker-compose is installed 8 | if ! command -v docker-compose &> /dev/null; then 9 | echo "Installing docker-compose..." 10 | curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 11 | chmod +x /usr/local/bin/docker-compose 12 | fi 13 | 14 | # Pull required images 15 | docker pull nginx:alpine &> /dev/null 16 | docker pull postgres:13 &> /dev/null 17 | 18 | # Stop any existing containers from previous runs 19 | docker-compose -f /tmp/exam/q11/docker-compose.yml down &> /dev/null 20 | 21 | echo "Setup for Question 11 complete." 22 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q16_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 16: Docker Content Trust 3 | 4 | # Create directory for command file 5 | mkdir -p /tmp/exam/q16 6 | 7 | # Set up a local registry if not already running 8 | if ! docker ps | grep -q registry; then 9 | echo "Setting up local registry..." 10 | docker run -d -p 5000:5000 --name registry registry:2 11 | fi 12 | 13 | # Ensure any old DCT settings are cleared 14 | export DOCKER_CONTENT_TRUST=0 15 | 16 | # Ensure directory for notary exists 17 | mkdir -p ~/.docker/trust 18 | 19 | # Create a simple app for signing 20 | mkdir -p /tmp/exam/q16/secure-app 21 | cat > /tmp/exam/q16/secure-app/Dockerfile << EOF 22 | FROM alpine:latest 23 | CMD ["echo", "This is a signed secure image"] 24 | EOF 25 | 26 | # Remove any existing files 27 | rm -f /tmp/exam/q16/dct-commands.sh 28 | 29 | echo "Setup for Question 16 complete." 30 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q1_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 1: Docker image creation 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q1 6 | 7 | # Create a simple Dockerfile 8 | cat > /tmp/exam/q1/Dockerfile << EOF 9 | FROM alpine:latest 10 | WORKDIR /app 11 | COPY hello.sh . 12 | RUN chmod +x hello.sh 13 | CMD ["./hello.sh"] 14 | EOF 15 | 16 | # Create the hello script 17 | cat > /tmp/exam/q1/hello.sh << EOF 18 | #!/bin/sh 19 | echo "Hello from Docker Speed Run!" 20 | EOF 21 | 22 | # Make it executable 23 | chmod +x /tmp/exam/q1/hello.sh 24 | 25 | echo "Setup for Question 1 complete." 26 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q2_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 2: Docker container with port mapping and environment variables 3 | 4 | # Ensure nginx:alpine image is available 5 | docker pull nginx:alpine &> /dev/null 6 | 7 | # Remove any existing container with the same name 8 | docker rm -f web-server &> /dev/null 9 | 10 | echo "Setup for Question 2 complete." 11 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q3_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 3: Docker volumes 3 | 4 | # Remove any existing volume with the same name 5 | docker volume rm data-volume &> /dev/null 6 | 7 | # Remove any existing container with the same name 8 | docker rm -f volume-test &> /dev/null 9 | 10 | # Pull the alpine image 11 | docker pull alpine:latest &> /dev/null 12 | 13 | echo "Setup for Question 3 complete." 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q4_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 4: Multi-stage Docker build 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q4 6 | 7 | # Create a simple Go application 8 | cat > /tmp/exam/q4/main.go << EOF 9 | package main 10 | 11 | import ( 12 | "fmt" 13 | "net/http" 14 | ) 15 | 16 | func main() { 17 | http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { 18 | fmt.Fprintf(w, "Hello from Multi-Stage Docker Build!") 19 | }) 20 | 21 | fmt.Println("Server starting on port 8080...") 22 | http.ListenAndServe(":8080", nil) 23 | } 24 | EOF 25 | 26 | # Remove any existing image with the same name 27 | docker rmi multi-stage:latest &> /dev/null 28 | 29 | echo "Setup for Question 4 complete." 30 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q5_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 5: Docker daemon configuration 3 | 4 | # Backup original daemon.json if it exists 5 | if [ -f /etc/docker/daemon.json ]; then 6 | cp /etc/docker/daemon.json /etc/docker/daemon.json.backup 7 | else 8 | # Create empty daemon.json 9 | mkdir -p /etc/docker 10 | echo "{}" > /etc/docker/daemon.json 11 | fi 12 | 13 | # Create a reference file with expected content 14 | mkdir -p /tmp/exam/q5 15 | cat > /tmp/exam/q5/reference.json << EOF 16 | { 17 | "exec-opts": ["native.cgroupdriver=systemd"], 18 | "log-driver": "json-file", 19 | "log-opts": { 20 | "max-size": "100m" 21 | }, 22 | "storage-driver": "overlay2" 23 | } 24 | EOF 25 | 26 | echo "Setup for Question 5 complete." 27 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q6_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 6: Docker logging configuration 3 | 4 | # Ensure nginx:alpine image is available 5 | docker pull nginx:alpine &> /dev/null 6 | 7 | # Remove any existing container with the same name 8 | docker rm -f logging-test &> /dev/null 9 | 10 | echo "Setup for Question 6 complete." 11 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q7_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 7: Docker networking 3 | 4 | # Remove any existing network with the same name 5 | docker network rm app-network &> /dev/null 6 | 7 | # Remove any existing containers 8 | docker rm -f app1 app2 &> /dev/null 9 | 10 | # Ensure alpine image is available 11 | docker pull alpine:latest &> /dev/null 12 | 13 | echo "Setup for Question 7 complete." 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q8_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 8: Docker healthchecks 3 | 4 | # Create directory for Dockerfile 5 | mkdir -p /tmp/exam/q8 6 | 7 | # Remove any existing container and image 8 | docker rm -f healthy-app &> /dev/null 9 | docker rmi healthy-nginx &> /dev/null 10 | 11 | # Ensure nginx:alpine image is available 12 | docker pull nginx:alpine &> /dev/null 13 | 14 | echo "Setup for Question 8 complete." 15 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/setup/q9_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script for Question 9: Docker image manifests 3 | 4 | # Create directory for output 5 | mkdir -p /tmp/exam/q9 6 | 7 | # Ensure we have access to the manifest command 8 | export DOCKER_CLI_EXPERIMENTAL=enabled 9 | 10 | # Clean up any existing files 11 | rm -f /tmp/exam/q9/manifest.json /tmp/exam/q9/platforms.txt 12 | 13 | echo "Setup for Question 9 complete." 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q10_s1_validate_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 10, Step 1: Check if limited-resources container is running 3 | 4 | # Check if the container exists and is running 5 | docker inspect --format='{{.State.Running}}' limited-resources &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | running=$(docker inspect --format='{{.State.Running}}' limited-resources) 9 | if [ "$running" == "true" ]; then 10 | echo "✅ Container 'limited-resources' is running" 11 | exit 0 12 | else 13 | echo "❌ Container 'limited-resources' exists but is not running" 14 | exit 1 15 | fi 16 | else 17 | echo "❌ Container 'limited-resources' does not exist" 18 | exit 1 19 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q10_s2_validate_cpu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 10, Step 2: Check if CPU limit is set correctly 3 | 4 | # Check if the container exists 5 | docker inspect limited-resources &> /dev/null 6 | 7 | if [ $? -ne 0 ]; then 8 | echo "❌ Container 'limited-resources' does not exist" 9 | exit 1 10 | fi 11 | 12 | # Get the CPU limit (NanoCPUs is in billionths of a CPU, so 0.5 CPU = 500000000) 13 | nano_cpus=$(docker inspect --format='{{.HostConfig.NanoCpus}}' limited-resources) 14 | cpus=$(echo "scale=2; $nano_cpus / 1000000000" | bc) 15 | 16 | # Check if it's set to 0.5 CPU 17 | if (( $(echo "$cpus >= 0.49 && $cpus <= 0.51" | bc -l) )); then 18 | echo "✅ Container CPU limit is set correctly: $cpus CPUs" 19 | exit 0 20 | else 21 | echo "❌ Container CPU limit is not set to 0.5 CPUs: $cpus CPUs" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q10_s3_validate_memory.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 10, Step 3: Check if memory limit is set correctly 3 | 4 | # Check if the container exists 5 | docker inspect limited-resources &> /dev/null 6 | 7 | if [ $? -ne 0 ]; then 8 | echo "❌ Container 'limited-resources' does not exist" 9 | exit 1 10 | fi 11 | 12 | # Get the memory limit in bytes 13 | memory_bytes=$(docker inspect --format='{{.HostConfig.Memory}}' limited-resources) 14 | memory_mb=$(( memory_bytes / 1024 / 1024 )) 15 | 16 | # Check if it's set to 256MB (allow a small tolerance) 17 | if [[ "$memory_mb" -ge 255 && "$memory_mb" -le 257 ]]; then 18 | echo "✅ Container memory limit is set correctly: ${memory_mb}MB" 19 | exit 0 20 | else 21 | echo "❌ Container memory limit is not set to 256MB: ${memory_mb}MB" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q12_s1_validate_file.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 12, Step 1: Check if report file exists 3 | 4 | # Check if the report file exists 5 | if [ ! -f /tmp/exam/q12/image-report.txt ]; then 6 | echo "❌ Image report file does not exist at /tmp/exam/q12/image-report.txt" 7 | exit 1 8 | fi 9 | 10 | # Check if the report file has content 11 | if [ ! -s /tmp/exam/q12/image-report.txt ]; then 12 | echo "❌ Image report file exists but is empty" 13 | exit 1 14 | fi 15 | 16 | echo "✅ Image report file exists and has content" 17 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q12_s2_validate_base_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 12, Step 2: Check if report contains correct base image information 3 | 4 | # Check if the report file exists 5 | if [ ! -f /tmp/exam/q12/image-report.txt ]; then 6 | echo "❌ Image report file does not exist at /tmp/exam/q12/image-report.txt" 7 | exit 1 8 | fi 9 | 10 | # Check if the report contains base image information 11 | grep -i "base image\|from\|parent" /tmp/exam/q12/image-report.txt 12 | 13 | if [ $? -eq 0 ]; then 14 | # Get the actual base image for comparison 15 | base_image=$(docker inspect webapp:latest --format='{{.Config.Image}}' 2>/dev/null || docker history webapp:latest | tail -1 | awk '{print $1}') 16 | 17 | if [[ -n "$base_image" ]]; then 18 | echo "✅ Report contains base image information" 19 | exit 0 20 | else 21 | echo "✅ Report contains some base image information but we couldn't verify it" 22 | exit 0 23 | fi 24 | else 25 | echo "❌ Report does not contain base image information" 26 | echo "Expected information about 'Base Image' or similar" 27 | exit 1 28 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q13_s1_validate_diagnosis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 13, Step 1: Check if diagnosis file exists 3 | 4 | # Check if the diagnosis file exists 5 | if [ ! -f /tmp/exam/q13/diagnosis.txt ]; then 6 | echo "❌ Diagnosis file does not exist at /tmp/exam/q13/diagnosis.txt" 7 | exit 1 8 | fi 9 | 10 | # Check if the diagnosis file has content 11 | if [ ! -s /tmp/exam/q13/diagnosis.txt ]; then 12 | echo "❌ Diagnosis file exists but is empty" 13 | exit 1 14 | fi 15 | 16 | # Check if the diagnosis file mentions the config.json file that's missing 17 | if grep -q "config.json\|configuration" /tmp/exam/q13/diagnosis.txt; then 18 | echo "✅ Diagnosis file mentions the missing configuration file" 19 | exit 0 20 | else 21 | echo "❌ Diagnosis file does not mention the missing config.json file" 22 | echo "Content:" 23 | cat /tmp/exam/q13/diagnosis.txt 24 | exit 1 25 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q14_s1_validate_dockerfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 14, Step 1: Check if Dockerfile exists with non-root user configuration 3 | 4 | # Check if the Dockerfile exists 5 | if [ ! -f /tmp/exam/q14/Dockerfile ]; then 6 | echo "❌ Dockerfile does not exist at /tmp/exam/q14/Dockerfile" 7 | exit 1 8 | fi 9 | 10 | # Check if the Dockerfile has USER instruction 11 | grep -q "USER" /tmp/exam/q14/Dockerfile 12 | 13 | if [ $? -ne 0 ]; then 14 | echo "❌ Dockerfile does not contain USER instruction" 15 | exit 1 16 | fi 17 | 18 | # Check if the USER is set to appuser 19 | grep -q "USER.*appuser" /tmp/exam/q14/Dockerfile 20 | 21 | if [ $? -ne 0 ]; then 22 | echo "❌ Dockerfile does not set USER to 'appuser'" 23 | exit 1 24 | fi 25 | 26 | # Check for user creation with UID 1001 27 | grep -q "useradd.*1001" /tmp/exam/q14/Dockerfile || grep -q "adduser.*1001" /tmp/exam/q14/Dockerfile 28 | 29 | if [ $? -ne 0 ]; then 30 | echo "❌ Dockerfile does not create a user with UID 1001" 31 | exit 1 32 | fi 33 | 34 | echo "✅ Dockerfile exists with non-root user configuration" 35 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q1_s1_validate_image_v1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 1, Step 1: Check if docker-speedrun:v1 image exists 3 | 4 | # Check if the image exists 5 | docker image inspect docker-speedrun:v1 &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | echo "✅ Image 'docker-speedrun:v1' exists" 9 | exit 0 10 | else 11 | echo "❌ Image 'docker-speedrun:v1' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q1_s2_validate_image_latest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 1, Step 2: Check if docker-speedrun:latest image exists 3 | 4 | # Check if the image exists 5 | docker image inspect docker-speedrun:latest &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | echo "✅ Image 'docker-speedrun:latest' exists" 9 | exit 0 10 | else 11 | echo "❌ Image 'docker-speedrun:latest' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q2_s1_validate_container_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 2, Step 1: Check if container is running 3 | 4 | # Check if the container exists and is running 5 | docker inspect --format='{{.State.Running}}' web-server &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | running=$(docker inspect --format='{{.State.Running}}' web-server) 9 | if [ "$running" == "true" ]; then 10 | echo "✅ Container 'web-server' is running" 11 | exit 0 12 | else 13 | echo "❌ Container 'web-server' exists but is not running" 14 | exit 1 15 | fi 16 | else 17 | echo "❌ Container 'web-server' does not exist" 18 | exit 1 19 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q2_s2_validate_image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 2, Step 2: Check if container uses the correct image 3 | 4 | # Check if the container exists 5 | docker inspect web-server &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Get the image name 9 | image=$(docker inspect --format='{{.Config.Image}}' web-server) 10 | 11 | # Check if it's using nginx:alpine 12 | if [[ "$image" == "nginx:alpine" ]]; then 13 | echo "✅ Container 'web-server' is using the correct image: $image" 14 | exit 0 15 | else 16 | echo "❌ Container 'web-server' is using incorrect image: $image (expected: nginx:alpine)" 17 | exit 1 18 | fi 19 | else 20 | echo "❌ Container 'web-server' does not exist" 21 | exit 1 22 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q2_s3_validate_port.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 2, Step 3: Check if port mapping is correct 3 | 4 | # Check if the container exists 5 | docker inspect web-server &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Get the port mappings 9 | port_mappings=$(docker inspect --format='{{json .HostConfig.PortBindings}}' web-server) 10 | 11 | # Check if port 8080 is mapped to 80 12 | if [[ $port_mappings == *"8080"* && $port_mappings == *"80"* ]]; then 13 | echo "✅ Container 'web-server' has correct port mapping (8080->80)" 14 | exit 0 15 | else 16 | echo "❌ Container 'web-server' does not have correct port mapping (expected: 8080->80)" 17 | echo "Current port mappings: $port_mappings" 18 | exit 1 19 | fi 20 | else 21 | echo "❌ Container 'web-server' does not exist" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q2_s4_validate_env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 2, Step 4: Check if environment variable is set correctly 3 | 4 | # Check if the container exists 5 | docker inspect web-server &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Check if the environment variable is set 9 | env_var=$(docker exec web-server env | grep NGINX_HOST) 10 | 11 | if [[ "$env_var" == "NGINX_HOST=localhost" ]]; then 12 | echo "✅ Container 'web-server' has correct environment variable set: $env_var" 13 | exit 0 14 | else 15 | echo "❌ Container 'web-server' does not have correct environment variable" 16 | echo "Expected: NGINX_HOST=localhost" 17 | echo "Found: $env_var" 18 | exit 1 19 | fi 20 | else 21 | echo "❌ Container 'web-server' does not exist" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q3_s1_validate_volume.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 3, Step 1: Check if volume exists 3 | 4 | # Check if the volume exists 5 | docker volume inspect data-volume &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | echo "✅ Volume 'data-volume' exists" 9 | exit 0 10 | else 11 | echo "❌ Volume 'data-volume' does not exist" 12 | exit 1 13 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q3_s2_validate_container_mount.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 3, Step 2: Check if container was created with correct mount 3 | 4 | # Check if the container exists or existed 5 | docker inspect volume-test &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Get mount information 9 | mount_info=$(docker inspect --format='{{json .Mounts}}' volume-test) 10 | 11 | # Check if the volume is mounted at the correct path 12 | if [[ $mount_info == *"data-volume"* && $mount_info == *"/app/data"* ]]; then 13 | echo "✅ Container 'volume-test' was created with the correct mount" 14 | exit 0 15 | else 16 | echo "❌ Container 'volume-test' does not have correct volume mount" 17 | echo "Current mounts: $mount_info" 18 | exit 1 19 | fi 20 | else 21 | echo "❌ Container 'volume-test' does not exist" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q3_s3_validate_file_content.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 3, Step 3: Check if file exists in the volume with correct content 3 | 4 | # Check if the volume exists 5 | docker volume inspect data-volume &> /dev/null 6 | 7 | if [ $? -ne 0 ]; then 8 | echo "❌ Volume 'data-volume' does not exist" 9 | exit 1 10 | fi 11 | 12 | # Create a temporary container to check the volume content 13 | file_content=$(docker run --rm -v data-volume:/app/data alpine:latest cat /app/data/test.txt 2>/dev/null) 14 | 15 | if [ $? -eq 0 ]; then 16 | # Check if the file content is correct 17 | if [[ "$file_content" == "Docker volumes test" ]]; then 18 | echo "✅ File '/app/data/test.txt' exists in the volume with correct content" 19 | exit 0 20 | else 21 | echo "❌ File exists but has incorrect content" 22 | echo "Expected: 'Docker volumes test'" 23 | echo "Found: '$file_content'" 24 | exit 1 25 | fi 26 | else 27 | echo "❌ File '/app/data/test.txt' does not exist in the volume" 28 | exit 1 29 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q4_s1_validate_dockerfile.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 4, Step 1: Check if Dockerfile exists and has multiple stages 3 | 4 | # Check if the Dockerfile exists 5 | if [ ! -f /tmp/exam/q4/Dockerfile ]; then 6 | echo "❌ Dockerfile does not exist at /tmp/exam/q4/Dockerfile" 7 | exit 1 8 | fi 9 | 10 | # Check if the Dockerfile has multiple stages 11 | stages=$(grep -c "FROM" /tmp/exam/q4/Dockerfile) 12 | 13 | if [ "$stages" -ge 2 ]; then 14 | echo "✅ Dockerfile exists and has multiple stages ($stages stages detected)" 15 | exit 0 16 | else 17 | echo "❌ Dockerfile does not have multiple stages (found $stages FROM statements, need at least 2)" 18 | exit 1 19 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q4_s2_validate_image_built.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 4, Step 2: Check if image was built successfully 3 | 4 | # Check if the image exists 5 | docker image inspect multi-stage:latest &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | echo "✅ Image 'multi-stage:latest' exists" 9 | exit 0 10 | else 11 | echo "❌ Image 'multi-stage:latest' does not exist" 12 | exit 1 13 | fi 14 | -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q4_s3_validate_image_size.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 4, Step 3: Check if image size is optimized 3 | 4 | # Check if the image exists 5 | docker image inspect multi-stage:latest &> /dev/null 6 | 7 | if [ $? -ne 0 ]; then 8 | echo "❌ Image 'multi-stage:latest' does not exist" 9 | exit 1 10 | fi 11 | 12 | # Get the image size in MB 13 | image_size=$(docker image inspect multi-stage:latest --format='{{.Size}}') 14 | image_size_mb=$(echo "scale=2; $image_size / 1024 / 1024" | bc) 15 | 16 | # Check if the image size is less than 20MB 17 | if (( $(echo "$image_size_mb < 20" | bc -l) )); then 18 | echo "✅ Image size is optimized: ${image_size_mb}MB (less than 20MB)" 19 | exit 0 20 | else 21 | echo "❌ Image size is not optimized: ${image_size_mb}MB (expected: less than 20MB)" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q5_s1_validate_daemon_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 5, Step 1: Check if Docker daemon configuration has correct settings 3 | 4 | # Check if daemon.json exists 5 | if [ ! -f /etc/docker/daemon.json ]; then 6 | echo "❌ Docker daemon configuration file does not exist at /etc/docker/daemon.json" 7 | exit 1 8 | fi 9 | 10 | # Check for systemd cgroup driver in the configuration 11 | grep -q "native.cgroupdriver=systemd" /etc/docker/daemon.json 12 | 13 | if [ $? -eq 0 ]; then 14 | echo "✅ Docker daemon is configured with systemd cgroup driver" 15 | exit 0 16 | else 17 | echo "❌ Docker daemon is not configured with systemd cgroup driver" 18 | echo "Current configuration:" 19 | cat /etc/docker/daemon.json 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q5_s2_validate_cgroup_driver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 5, Step 2: Check if Docker service is running with systemd cgroup driver 3 | 4 | # Check if Docker service is running 5 | systemctl is-active docker &> /dev/null 6 | 7 | if [ $? -ne 0 ]; then 8 | echo "❌ Docker service is not running" 9 | exit 1 10 | fi 11 | 12 | # Check for systemd cgroup driver in Docker info 13 | docker info | grep -q "Cgroup Driver: systemd" 14 | 15 | if [ $? -eq 0 ]; then 16 | echo "✅ Docker service is running with systemd cgroup driver" 17 | exit 0 18 | else 19 | echo "❌ Docker service is not using systemd cgroup driver" 20 | echo "Current configuration:" 21 | docker info | grep "Cgroup Driver" 22 | exit 1 23 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q6_s1_validate_container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 6, Step 1: Check if logging-test container is running 3 | 4 | # Check if the container exists and is running 5 | docker inspect --format='{{.State.Running}}' logging-test &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | running=$(docker inspect --format='{{.State.Running}}' logging-test) 9 | if [ "$running" == "true" ]; then 10 | echo "✅ Container 'logging-test' is running" 11 | exit 0 12 | else 13 | echo "❌ Container 'logging-test' exists but is not running" 14 | exit 1 15 | fi 16 | else 17 | echo "❌ Container 'logging-test' does not exist" 18 | exit 1 19 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q6_s2_validate_log_driver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 6, Step 2: Check if container uses json-file logging driver 3 | 4 | # Check if the container exists 5 | docker inspect logging-test &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Get the logging driver 9 | log_driver=$(docker inspect --format='{{.HostConfig.LogConfig.Type}}' logging-test) 10 | 11 | if [[ "$log_driver" == "json-file" ]]; then 12 | echo "✅ Container 'logging-test' is using the json-file logging driver" 13 | exit 0 14 | else 15 | echo "❌ Container 'logging-test' is using incorrect logging driver: $log_driver (expected: json-file)" 16 | exit 1 17 | fi 18 | else 19 | echo "❌ Container 'logging-test' does not exist" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q6_s3_validate_log_rotation.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 6, Step 3: Check if log rotation settings are correct 3 | 4 | # Check if the container exists 5 | docker inspect logging-test &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Get the log config options 9 | log_opts=$(docker inspect --format='{{json .HostConfig.LogConfig.Config}}' logging-test) 10 | 11 | # Check for log rotation settings 12 | max_size=$(echo $log_opts | grep -o '"max-size":"[^"]*"' | cut -d':' -f2 | tr -d '"') 13 | max_file=$(echo $log_opts | grep -o '"max-file":"[^"]*"' | cut -d':' -f2 | tr -d '"') 14 | 15 | if [[ "$max_size" == "10m" && "$max_file" == "3" ]]; then 16 | echo "✅ Container 'logging-test' has correct log rotation settings: max-size=$max_size, max-file=$max_file" 17 | exit 0 18 | else 19 | echo "❌ Container 'logging-test' does not have correct log rotation settings" 20 | echo "Expected: max-size=10m, max-file=3" 21 | echo "Found: max-size=$max_size, max-file=$max_file" 22 | exit 1 23 | fi 24 | else 25 | echo "❌ Container 'logging-test' does not exist" 26 | exit 1 27 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q7_s1_validate_network.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 7, Step 1: Check if custom network exists with correct subnet 3 | 4 | # Check if the network exists 5 | docker network inspect app-network &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Get subnet information 9 | subnet=$(docker network inspect --format='{{range .IPAM.Config}}{{.Subnet}}{{end}}' app-network) 10 | 11 | if [[ "$subnet" == "172.18.0.0/16" ]]; then 12 | echo "✅ Network 'app-network' exists with correct subnet: $subnet" 13 | exit 0 14 | else 15 | echo "❌ Network 'app-network' exists but has incorrect subnet: $subnet (expected: 172.18.0.0/16)" 16 | exit 1 17 | fi 18 | else 19 | echo "❌ Network 'app-network' does not exist" 20 | exit 1 21 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q7_s2_validate_app1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 7, Step 2: Check if app1 container is running on app-network 3 | 4 | # Check if the container exists 5 | docker inspect app1 &> /dev/null 6 | 7 | if [ $? -eq 0 ]; then 8 | # Check if it's running 9 | running=$(docker inspect --format='{{.State.Running}}' app1) 10 | 11 | if [ "$running" != "true" ]; then 12 | echo "❌ Container 'app1' exists but is not running" 13 | exit 1 14 | fi 15 | 16 | # Check if it's on the app-network 17 | network=$(docker inspect --format='{{json .NetworkSettings.Networks}}' app1) 18 | 19 | if [[ $network == *"app-network"* ]]; then 20 | echo "✅ Container 'app1' is running on 'app-network'" 21 | exit 0 22 | else 23 | echo "❌ Container 'app1' is not connected to 'app-network'" 24 | echo "Current networks: $network" 25 | exit 1 26 | fi 27 | else 28 | echo "❌ Container 'app1' does not exist" 29 | exit 1 30 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q8_s2_validate_container.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 8, Step 2: Check if container is running with health check 3 | 4 | # Check if the container exists and is running 5 | docker inspect healthy-app &> /dev/null 6 | 7 | if [ $? -ne 0 ]; then 8 | echo "❌ Container 'healthy-app' does not exist" 9 | exit 1 10 | fi 11 | 12 | # Check if it's running 13 | running=$(docker inspect --format='{{.State.Running}}' healthy-app) 14 | 15 | if [ "$running" != "true" ]; then 16 | echo "❌ Container 'healthy-app' exists but is not running" 17 | exit 1 18 | fi 19 | 20 | # Check if health check is configured 21 | health_check=$(docker inspect --format='{{.Config.Healthcheck}}' healthy-app) 22 | 23 | if [ -z "$health_check" ]; then 24 | echo "❌ Container 'healthy-app' is running but has no health check configured" 25 | exit 1 26 | else 27 | echo "✅ Container 'healthy-app' is running with health check: $health_check" 28 | exit 0 29 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q9_s1_validate_manifest.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 9, Step 1: Check if manifest file exists 3 | 4 | # Check if the manifest file exists 5 | if [ ! -f /tmp/exam/q9/manifest.json ]; then 6 | echo "❌ Manifest file does not exist at /tmp/exam/q9/manifest.json" 7 | exit 1 8 | fi 9 | 10 | # Check if the manifest file has valid content 11 | grep -q "\"mediaType\"" /tmp/exam/q9/manifest.json 12 | 13 | if [ $? -eq 0 ]; then 14 | # Verify it's for nginx:1.21.0 15 | if grep -q "nginx:1.21.0" /tmp/exam/q9/manifest.json || grep -q "\"name\":\"nginx\"" /tmp/exam/q9/manifest.json; then 16 | echo "✅ Manifest file exists with valid content for nginx:1.21.0" 17 | exit 0 18 | else 19 | echo "❌ Manifest file exists but does not appear to be for nginx:1.21.0" 20 | exit 1 21 | fi 22 | else 23 | echo "❌ Manifest file exists but does not have valid manifest content" 24 | exit 1 25 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/001/scripts/validation/q9_s2_validate_platforms.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 9, Step 2: Check if platforms file exists with correct data 3 | 4 | # Check if the platforms file exists 5 | if [ ! -f /tmp/exam/q9/platforms.txt ]; then 6 | echo "❌ Platforms file does not exist at /tmp/exam/q9/platforms.txt" 7 | exit 1 8 | fi 9 | 10 | # Check if the platforms file has valid content 11 | grep -q "architecture\|os" /tmp/exam/q9/platforms.txt 12 | 13 | if [ $? -eq 0 ]; then 14 | # Check that it has at least basic platform info 15 | if grep -q "amd64\|arm64\|linux" /tmp/exam/q9/platforms.txt; then 16 | echo "✅ Platforms file exists with valid content" 17 | exit 0 18 | else 19 | echo "❌ Platforms file exists but does not have expected platform information" 20 | echo "Expected to see architecture and OS information like 'amd64', 'arm64', or 'linux'" 21 | echo "Content:" 22 | cat /tmp/exam/q9/platforms.txt 23 | exit 1 24 | fi 25 | else 26 | echo "❌ Platforms file exists but does not have valid platform content" 27 | echo "Content:" 28 | cat /tmp/exam/q9/platforms.txt 29 | exit 1 30 | fi -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "lab": "helm-001", 3 | "workerNodes": 1, 4 | "answers": "assets/exams/other/002/answers.md", 5 | "questions": "assessment.json", 6 | "totalMarks": 100, 7 | "lowScore": 40, 8 | "mediumScore": 60, 9 | "highScore": 90 10 | } -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/create_directories.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup script to create directories for all questions 3 | 4 | # Create main exam directory 5 | mkdir -p /tmp/exam 6 | 7 | # Create directories for each question 8 | for i in {1..12}; do 9 | mkdir -p /tmp/exam/q$i 10 | echo "Created directory for Question $i" 11 | done 12 | 13 | echo "All exam directories have been created successfully" 14 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q10_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 10 - Package Chart and Create Local Repo 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q10/charts 6 | 7 | # No other specific setup needed for this question 8 | # The student needs to package the webapp chart into a .tgz file 9 | # Create a local chart repository at /tmp/exam/q10/charts 10 | # Create an index file and add the local repo to Helm 11 | 12 | echo "Environment setup complete for Question 10" 13 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q11_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 11 - Roll Back Release 3 | 4 | # No specific setup needed for this question 5 | # The student needs to roll back the web-server release to its first revision 6 | # and verify the rollback was successful by checking revision number and replica count 7 | 8 | echo "Environment setup complete for Question 11" 9 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q1_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 1 - Verify Helm Installation 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q1 6 | 7 | # Ensure Helm is installed on the system 8 | # This is likely already set up as part of the environment initialization 9 | # but we'll check and report status 10 | 11 | if ! command -v helm &> /dev/null; then 12 | echo "❌ Helm is not installed on the system. This is a prerequisite for the helm-001 lab." 13 | exit 1 14 | else 15 | echo "✅ Helm is installed and available for the exam." 16 | fi 17 | 18 | # No other setup needed for this question as it only requires checking the helm version 19 | 20 | echo "Environment setup complete for Question 1" 21 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q2_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 2 - Adding Bitnami Helm Repository 3 | 4 | # No specific setup needed for this question 5 | # This is a task for the student to add the Bitnami repo 6 | # The student needs to run the following commands: 7 | # helm repo add bitnami https://charts.bitnami.com/bitnami 8 | # helm repo update 9 | # helm repo list 10 | 11 | echo "Environment setup complete for Question 2" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q3_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 3 - Search for Nginx Chart 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q3 6 | 7 | # No other setup needed for this question 8 | # The student will need to search for the nginx chart in the Bitnami repository 9 | # and save the results to /tmp/exam/q3/nginx-charts.txt 10 | 11 | echo "Environment setup complete for Question 3" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q4_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 4 - Install Nginx Chart 3 | 4 | # No specific setup needed for this question 5 | # The student needs to install the Bitnami nginx chart with release name web-server 6 | # They should set the service type to NodePort and port to 30080 7 | # The expected commands would be something like: 8 | # helm install web-server bitnami/nginx --set service.type=NodePort --set service.nodePorts.http=30080 9 | 10 | echo "Environment setup complete for Question 4" 11 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q5_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 5 - List Helm Releases 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q5 6 | 7 | # No other setup needed for this question 8 | # The student needs to list all Helm releases across all namespaces 9 | # and save the output to /tmp/exam/q5/releases.txt 10 | 11 | echo "Environment setup complete for Question 5" 12 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q6_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 6 - Get Release Status 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q6 6 | 7 | # No other specific setup needed for this question 8 | # The student needs to get the status of the web-server release 9 | # and save the output to /tmp/exam/q6/web-server-status.txt 10 | # They also need to get the manifest summary and save it to /tmp/exam/q6/web-server-manifests.txt 11 | 12 | echo "Environment setup complete for Question 6" 13 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q7_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 7 - Upgrade Release 3 | 4 | # No specific setup needed for this question 5 | # The student needs to upgrade the web-server release to set replica count to 3 6 | # The expected command would be something like: 7 | # helm upgrade web-server bitnami/nginx --set replicaCount=3 8 | 9 | echo "Environment setup complete for Question 7" 10 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q8_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 8 - Create Values File for Redis 3 | 4 | # Create necessary directories 5 | mkdir -p /tmp/exam/q8 6 | 7 | # No other specific setup needed for this question 8 | # The student needs to create a values file at /tmp/exam/q8/redis-values.yaml 9 | # with specific configuration for Redis installation 10 | # Then install the Bitnami Redis chart with release name cache-db 11 | 12 | echo "Environment setup complete for Question 8" 13 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/setup/q9_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Setup environment for Question 9 - Create New Helm Chart 3 | 4 | # No specific setup needed for this question 5 | # The student needs to create a new Helm chart named webapp 6 | # using the helm create command and modify Chart.yaml 7 | 8 | echo "Environment setup complete for Question 9" 9 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/validation/q10_s1_validate_chart_packaged.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 10, Step 1: Check if chart is packaged 3 | 4 | # Check if the packaged chart is in the current directory 5 | if ! ls webapp-*.tgz &>/dev/null; then 6 | # Check if it was moved to the repository directory already 7 | if ! ls /tmp/exam/q10/charts/webapp-*.tgz &>/dev/null; then 8 | echo "❌ Packaged chart (webapp-*.tgz) not found in current directory or repository directory" 9 | echo "Current directory contents:" 10 | ls -la 11 | echo "Repository directory contents (if exists):" 12 | ls -la /tmp/exam/q10/charts 2>/dev/null || echo "Repository directory does not exist yet" 13 | exit 1 14 | else 15 | echo "✅ Chart is packaged and moved to the repository directory" 16 | echo "Repository directory contents:" 17 | ls -la /tmp/exam/q10/charts 18 | exit 0 19 | fi 20 | fi 21 | 22 | echo "✅ Chart is packaged (but not yet moved to repository directory)" 23 | echo "Current directory contents:" 24 | ls -la webapp-*.tgz 25 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/validation/q4_s1_validate_nginx_installed.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 4, Step 1: Check if nginx chart is installed 3 | 4 | # Check if helm command is available 5 | if ! command -v helm &> /dev/null; then 6 | echo "❌ Helm command not found in PATH" 7 | exit 1 8 | fi 9 | 10 | # Check if the release exists 11 | release_info=$(helm list -n default -f "web-server" 2>&1) 12 | 13 | if [ $? -ne 0 ] || ! echo "$release_info" | grep -q "web-server"; then 14 | echo "❌ Release 'web-server' not found in namespace 'default'" 15 | echo "Current releases:" 16 | helm list -n default 17 | exit 1 18 | fi 19 | 20 | # Check if it's using the Bitnami nginx chart 21 | if ! helm get values web-server -n default | grep -q "nginx"; then 22 | echo "❌ Release 'web-server' does not appear to be using the nginx chart" 23 | echo "Release information:" 24 | echo "$release_info" 25 | exit 1 26 | fi 27 | 28 | echo "✅ Nginx chart is installed with release name 'web-server'" 29 | echo "Release information:" 30 | echo "$release_info" 31 | exit 0 -------------------------------------------------------------------------------- /facilitator/assets/exams/other/002/scripts/validation/q7_s1_validate_upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Validate script for Question 7, Step 1: Check if release was upgraded 3 | 4 | # Check if helm command is available 5 | if ! command -v helm &> /dev/null; then 6 | echo "❌ Helm command not found in PATH" 7 | exit 1 8 | fi 9 | 10 | # Check if the release exists 11 | release_info=$(helm list -n default -f "web-server" 2>&1) 12 | 13 | if [ $? -ne 0 ] || ! echo "$release_info" | grep -q "web-server"; then 14 | echo "❌ Release 'web-server' not found in namespace 'default'" 15 | echo "Current releases:" 16 | helm list -n default 17 | exit 1 18 | fi 19 | 20 | # Check if it's been upgraded (revision > 1) 21 | revision=$(helm list -n default -f "web-server" -o json | jq -r '.[0].revision') 22 | 23 | if [ -z "$revision" ] || [ "$revision" = "null" ]; then 24 | echo "❌ Could not determine revision for release 'web-server'" 25 | exit 1 26 | fi 27 | 28 | if [ "$revision" -le 1 ]; then 29 | echo "❌ Release 'web-server' has not been upgraded (revision: $revision)" 30 | exit 1 31 | fi 32 | 33 | echo "✅ Release 'web-server' has been upgraded to revision $revision" 34 | exit 0 -------------------------------------------------------------------------------- /facilitator/entrypoint.sh: -------------------------------------------------------------------------------- 1 | 2 | find /usr/src/app/assets/exams -type d -path "*/*/scripts" | while read scripts_dir; do \ 3 | exam_dir=$(dirname "$scripts_dir"); \ 4 | cd "$exam_dir"; \ 5 | echo "Creating tar archive of scripts in $exam_dir"; \ 6 | tar -czf assets.tar.gz scripts/; \ 7 | rm -rf scripts/; \ 8 | cd - > /dev/null; \ 9 | done 10 | 11 | echo "Assets created" 12 | 13 | # Start the application 14 | node src/app.js -------------------------------------------------------------------------------- /facilitator/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "facilitator", 3 | "version": "1.0.0", 4 | "main": "src/app.js", 5 | "scripts": { 6 | "start": "node src/app.js", 7 | "dev": "nodemon src/app.js", 8 | "test": "echo \"Error: no test specified\" && exit 1" 9 | }, 10 | "keywords": [], 11 | "author": "", 12 | "license": "ISC", 13 | "description": "A service that acts as an SSH jumphost and provides exam management API endpoints", 14 | "dependencies": { 15 | "axios": "^1.8.4", 16 | "cors": "^2.8.5", 17 | "dotenv": "^16.3.1", 18 | "express": "^4.18.2", 19 | "helmet": "^7.1.0", 20 | "joi": "^17.11.0", 21 | "morgan": "^1.10.0", 22 | "redis": "^4.6.13", 23 | "ssh2": "^1.15.0", 24 | "uuid": "^11.1.0", 25 | "winston": "^3.11.0" 26 | }, 27 | "devDependencies": { 28 | "eslint": "^8.56.0", 29 | "nodemon": "^3.0.2" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /facilitator/src/config/index.js: -------------------------------------------------------------------------------- 1 | require('dotenv').config(); 2 | 3 | const config = { 4 | port: process.env.PORT || 3000, 5 | env: process.env.NODE_ENV || 'development', 6 | 7 | ssh: { 8 | host: process.env.SSH_HOST || 'jumphost', 9 | port: parseInt(process.env.SSH_PORT || '22', 10), 10 | username: process.env.SSH_USERNAME || 'candidate', 11 | // Password is optional as jumphost allows passwordless authentication 12 | password: process.env.SSH_PASSWORD, 13 | privateKeyPath: process.env.SSH_PRIVATE_KEY_PATH, 14 | }, 15 | 16 | logging: { 17 | level: process.env.LOG_LEVEL || 'info', 18 | }, 19 | 20 | remoteDesktop: { 21 | host: process.env.REMOTE_DESKTOP_HOST || 'remote-desktop', 22 | port: process.env.REMOTE_DESKTOP_PORT || 5000 23 | }, 24 | }; 25 | 26 | module.exports = config; -------------------------------------------------------------------------------- /facilitator/src/controllers/remoteDesktopController.js: -------------------------------------------------------------------------------- 1 | const remoteDesktopService = require('../services/remoteDesktopService'); 2 | const logger = require('../utils/logger'); 3 | 4 | /** 5 | * Controller for handling remote desktop operations 6 | */ 7 | class RemoteDesktopController { 8 | /** 9 | * Copy content to remote desktop clipboard 10 | * @param {Object} req - Express request object 11 | * @param {Object} res - Express response object 12 | */ 13 | async copyToClipboard(req, res) { 14 | try { 15 | const { content } = req.body; 16 | 17 | if (!content) { 18 | return res.sendStatus(400); 19 | } 20 | 21 | await remoteDesktopService.copyToClipboard(content); 22 | res.sendStatus(204); 23 | 24 | } catch (error) { 25 | logger.error('Error in copyToClipboard controller', { 26 | error: error.message 27 | }); 28 | res.sendStatus(500); 29 | } 30 | } 31 | } 32 | 33 | module.exports = new RemoteDesktopController(); -------------------------------------------------------------------------------- /facilitator/src/routes/assessmentRoutes.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const assessmentController = require('../controllers/assessmentController'); 3 | 4 | const router = express.Router(); 5 | 6 | /** 7 | * @route GET /api/v1/assements 8 | * @desc Get all assessments 9 | * @access Public 10 | */ 11 | router.get('/', assessmentController.getAssessments); 12 | 13 | module.exports = router; -------------------------------------------------------------------------------- /facilitator/src/routes/remoteDesktopRoutes.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const router = express.Router(); 3 | const remoteDesktopController = require('../controllers/remoteDesktopController'); 4 | 5 | /** 6 | * @route POST /api/remote-desktop/clipboard 7 | * @desc Copy content to remote desktop clipboard 8 | * @access Private 9 | */ 10 | router.post('/clipboard', remoteDesktopController.copyToClipboard); 11 | 12 | module.exports = router; -------------------------------------------------------------------------------- /facilitator/src/routes/sshRoutes.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const sshController = require('../controllers/sshController'); 3 | const { validateExecuteCommand } = require('../middleware/validators'); 4 | 5 | const router = express.Router(); 6 | 7 | /** 8 | * @route POST /api/v1/execute 9 | * @desc Execute a command on the SSH jumphost 10 | * @access Public 11 | */ 12 | router.post('/execute', validateExecuteCommand, sshController.executeCommand); 13 | 14 | module.exports = router; -------------------------------------------------------------------------------- /jumphost/.dockerignore: -------------------------------------------------------------------------------- 1 | # Version control 2 | .git/ 3 | .gitignore 4 | 5 | # Temporary files 6 | *.tmp 7 | *.temp 8 | tmp/ 9 | temp/ 10 | 11 | # Build artifacts 12 | *.tar.gz 13 | *.tgz 14 | 15 | # Docker files 16 | Dockerfile 17 | docker-compose*.yml 18 | compose*.yaml 19 | .dockerignore 20 | 21 | # Logs 22 | logs/ 23 | *.log 24 | 25 | # OS specific 26 | .DS_Store 27 | Thumbs.db 28 | 29 | # Editor directories and files 30 | .idea/ 31 | .vscode/ 32 | *.swp 33 | *.swo 34 | *~ 35 | 36 | # Other unnecessary files 37 | *.gz 38 | *.zip 39 | *.tar 40 | *.rar -------------------------------------------------------------------------------- /jumphost/scripts/cleanup-exam-env.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | exec >> /proc/1/fd/1 2>&1 3 | 4 | # cleanup-exam-env.sh 5 | # 6 | # This script cleans up the exam environment on the jumphost. 7 | # It removes all resources created during the exam to prepare for a new exam. 8 | # 9 | # Usage: cleanup-exam-env.sh 10 | # 11 | # Example: cleanup-exam-env.sh 12 | 13 | # Log function with timestamp 14 | log() { 15 | echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" 16 | } 17 | 18 | log "Starting exam environment cleanup" 19 | log "Cleaning up cluster $CLUSTER_NAME" 20 | ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null candidate@k8s-api-server "env-cleanup $CLUSTER_NAME" 21 | 22 | #cleanup docker env 23 | log "Cleaning up docker environment" 24 | docker system prune -a --volumes -fa 25 | docker network prune -fa 26 | docker image prune -fa 27 | 28 | # Remove the exam environment directory 29 | log "Removing exam environment directory" 30 | rm -rf /tmp/exam-env 31 | rm -rf /tmp/exam 32 | 33 | # Remove the exam assets directory 34 | log "Removing exam assets directory" 35 | rm -rf /tmp/exam-assets 36 | 37 | log "Exam environment cleanup completed successfully" 38 | exit 0 -------------------------------------------------------------------------------- /kind-cluster/.dockerignore: -------------------------------------------------------------------------------- 1 | # Version control 2 | .git/ 3 | .gitignore 4 | 5 | # Temporary files 6 | *.tmp 7 | *.temp 8 | tmp/ 9 | temp/ 10 | 11 | # Build artifacts 12 | *.tar.gz 13 | *.tgz 14 | 15 | # Docker files 16 | Dockerfile 17 | docker-compose*.yml 18 | compose*.yaml 19 | .dockerignore 20 | 21 | # Logs 22 | logs/ 23 | *.log 24 | 25 | # OS specific 26 | .DS_Store 27 | Thumbs.db 28 | 29 | # Editor directories and files 30 | .idea/ 31 | .vscode/ 32 | *.swp 33 | *.swo 34 | *~ 35 | 36 | # Other unnecessary files 37 | *.gz 38 | *.zip 39 | *.tar 40 | *.rar -------------------------------------------------------------------------------- /nginx/.dockerignore: -------------------------------------------------------------------------------- 1 | # Version control 2 | .git/ 3 | .gitignore 4 | 5 | # Temporary files 6 | *.tmp 7 | *.temp 8 | tmp/ 9 | temp/ 10 | 11 | # Build artifacts 12 | *.tar.gz 13 | *.tgz 14 | 15 | # Docker files 16 | Dockerfile 17 | docker-compose*.yml 18 | compose*.yaml 19 | .dockerignore 20 | 21 | # Logs 22 | logs/ 23 | *.log 24 | 25 | # OS specific 26 | .DS_Store 27 | Thumbs.db 28 | 29 | # Editor directories and files 30 | .idea/ 31 | .vscode/ 32 | *.swp 33 | *.swo 34 | *~ 35 | 36 | # Other unnecessary files 37 | *.gz 38 | *.zip 39 | *.tar 40 | *.rar -------------------------------------------------------------------------------- /nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.21-alpine 2 | 3 | # Remove the default Nginx configuration 4 | RUN rm /etc/nginx/conf.d/default.conf 5 | 6 | # Copy our custom configuration 7 | COPY default.conf /etc/nginx/conf.d/ 8 | 9 | # Expose port 80 10 | EXPOSE 80 11 | 12 | # Start Nginx 13 | CMD ["nginx", "-g", "daemon off;"] -------------------------------------------------------------------------------- /remote-desktop/.dockerignore: -------------------------------------------------------------------------------- 1 | # Version control 2 | .git/ 3 | .gitignore 4 | 5 | # Temporary files 6 | *.tmp 7 | *.temp 8 | tmp/ 9 | temp/ 10 | 11 | # Build artifacts 12 | *.tar.gz 13 | *.tgz 14 | 15 | # Docker files 16 | Dockerfile 17 | docker-compose*.yml 18 | compose*.yaml 19 | .dockerignore 20 | 21 | # Logs 22 | logs/ 23 | *.log 24 | 25 | # OS specific 26 | .DS_Store 27 | Thumbs.db 28 | 29 | # Editor directories and files 30 | .idea/ 31 | .vscode/ 32 | *.swp 33 | *.swo 34 | *~ 35 | 36 | # Other unnecessary files 37 | *.gz 38 | *.zip 39 | *.tar 40 | *.rar -------------------------------------------------------------------------------- /remote-desktop/startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Log startup 4 | echo "Starting CKAD VNC service at $(date)" 5 | 6 | echo "echo 'Use Ctrl + Shift + C for copying and Ctrl + Shift + V for pasting'" >> /home/candidate/.bashrc 7 | 8 | # Run in the background - don't block the main container startup 9 | python3 /tmp/agent.py & 10 | 11 | exit 0 -------------------------------------------------------------------------------- /remote-terminal/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use a minimal Alpine Linux base image 2 | FROM alpine:latest 3 | 4 | # Install OpenSSH server 5 | RUN apk add --no-cache openssh openrc 6 | 7 | # Create candidate user and set password 8 | RUN adduser -D candidate && echo "candidate:password" | chpasswd 9 | 10 | # Configure SSH for password authentication 11 | RUN echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config 12 | RUN echo "StrictHostKeyChecking no" >> /etc/ssh/ssh_config 13 | RUN echo "UserKnownHostsFile=/dev/null" >> /etc/ssh/ssh_config 14 | 15 | # Copy the MOTD file 16 | COPY motd /etc/motd 17 | 18 | # Generate SSH host keys (necessary for SSH to work) 19 | RUN ssh-keygen -A 20 | 21 | # Expose SSH port 22 | EXPOSE 22 23 | 24 | # Start SSH daemon when the container runs 25 | CMD ["/usr/sbin/sshd", "-D"] 26 | -------------------------------------------------------------------------------- /remote-terminal/motd: -------------------------------------------------------------------------------- 1 | ========================================================================== 2 | Welcome to CK-X Exam Terminal 3 | ========================================================================== 4 | 5 | You are now connected to the exam environment terminal. 6 | 7 | - Use this terminal to complete the exam tasks 8 | - All commands are executed as the 'candidate' user 9 | - Your work is automatically saved and evaluated 10 | 11 | IMPORTANT: Do not attempt to modify system files or bypass restrictions. 12 | Such actions violate the terms of the exam. 13 | 14 | ========================================================================== 15 | --------------------------------------------------------------------------------