├── .github └── workflows │ └── docker-multiarch-build.yaml ├── .gitignore ├── AIOps ├── ChatGPT │ └── rubra.ai │ │ ├── README.md │ │ ├── Vagrantfile │ │ ├── custom-git.sh │ │ ├── docker_build.sh │ │ └── run_rubra.ai.sh └── K8sGPT │ ├── Vagrantfile-LocalAI │ ├── Vagrantfile │ ├── controlplane_node.sh │ ├── extra-k8s-pkgs.sh │ ├── extra-k8s-pkgs │ │ ├── get-helm-3.9.1.sh │ │ ├── helm-repo-add.sh │ │ ├── metallb-iprange.yaml │ │ ├── metallb-l2mode.yaml │ │ ├── metallb-native-v0.13.7.yaml │ │ ├── metrics-server-0.6.1.yaml │ │ ├── nfs-exporter.sh │ │ ├── nfs-provisioner.yaml │ │ └── storageclass.yaml │ ├── k8s_env_build.sh │ ├── k8s_pkg_cfg.sh │ ├── resize2fs.sh │ ├── run_models.sh │ ├── single_darwin_ollama_w_k8sgpt.sh │ └── worker_nodes.sh │ └── run_ollama_n_k8sgpt │ ├── README.md │ ├── modelfile │ └── run_ollama_n_k8sgpt.sh ├── Argo └── argo-cd │ └── app-gitops.yaml ├── Bento ├── README.md ├── bento │ ├── AMZ_README_FIRST.md │ ├── CHANGELOG.md │ ├── CONTRIBUTING.md │ ├── Gemfile │ ├── LICENSE │ ├── NOTICE.md │ ├── README.md │ ├── Rakefile │ ├── TESTING.md │ ├── amazonlinux-2023-aarch64-virtualbox-build.sh │ ├── amazonlinux-2023-x86_64-virtualbox-build.sh │ ├── bento.gemspec │ ├── bin │ │ └── bento │ ├── builds.yml │ ├── builds │ │ ├── build_files │ │ │ └── .gitkeep │ │ ├── failed_testing │ │ │ └── .gitkeep │ │ ├── ubuntu-22.04-aarch64._metadata.json │ │ └── uploaded │ │ │ └── .gitkeep │ ├── lib │ │ ├── bento.rb │ │ └── bento │ │ │ ├── buildmetadata.rb │ │ │ ├── cli.rb │ │ │ ├── common.rb │ │ │ ├── normalize.rb │ │ │ ├── packerexec.rb │ │ │ ├── providermetadata.rb │ │ │ ├── runner.rb │ │ │ ├── test.rb │ │ │ ├── test_templates │ │ │ ├── bootstrap.sh.erb │ │ │ └── kitchen.yml.erb │ │ │ ├── upload.rb │ │ │ └── version.rb │ ├── os_pkrvars │ │ ├── almalinux │ │ │ ├── almalinux-8-aarch64.pkrvars.hcl │ │ │ ├── almalinux-8-x86_64.pkrvars.hcl │ │ │ ├── almalinux-9-aarch64.pkrvars.hcl │ │ │ └── almalinux-9-x86_64.pkrvars.hcl │ │ ├── amazonlinux │ │ │ ├── amazonlinux-2023-aarch64.pkrvars.hcl │ │ │ └── amazonlinux-2023-x86_64.pkrvars.hcl │ │ ├── centos │ │ │ ├── centos-stream-10-aarch64.pkrvars.hcl │ │ │ ├── centos-stream-10-x86_64.pkrvars.hcl │ │ │ ├── centos-stream-9-aarch64.pkrvars.hcl │ │ │ └── centos-stream-9-x86_64.pkrvars.hcl │ │ ├── debian │ │ │ ├── debian-11-aarch64.pkrvars.hcl │ │ │ ├── debian-11-x86_64.pkrvars.hcl │ │ │ ├── debian-12-aarch64.pkrvars.hcl │ │ │ └── debian-12-x86_64.pkrvars.hcl │ │ ├── fedora │ │ │ ├── fedora-40-aarch64.pkrvars.hcl │ │ │ ├── fedora-40-x86_64.pkrvars.hcl │ │ │ ├── fedora-41-aarch64.pkrvars.hcl │ │ │ ├── fedora-41-x86_64.pkrvars.hcl │ │ │ ├── fedora-42-aarch64.pkrvars.hcl │ │ │ └── fedora-42-x86_64.pkrvars.hcl │ │ ├── freebsd │ │ │ ├── freebsd-13-aarch64.pkrvars.hcl │ │ │ ├── freebsd-13-x86_64.pkrvars.hcl │ │ │ ├── freebsd-14-aarch64.pkrvars.hcl │ │ │ └── freebsd-14-x86_64.pkrvars.hcl │ │ ├── macos │ │ │ ├── macos-14-aarch64.pkrvars.hcl │ │ │ └── macos-15-aarch64.pkrvars.hcl │ │ ├── opensuse │ │ │ ├── opensuse-leap-15-aarch64.pkrvars.hcl │ │ │ └── opensuse-leap-15-x86_64.pkrvars.hcl │ │ ├── oraclelinux │ │ │ ├── oraclelinux-8-aarch64.pkrvars.hcl │ │ │ ├── oraclelinux-8-x86_64.pkrvars.hcl │ │ │ ├── oraclelinux-9-aarch64.pkrvars.hcl │ │ │ └── oraclelinux-9-x86_64.pkrvars.hcl │ │ ├── rhel │ │ │ ├── rhel-8-aarch64.pkrvars.hcl │ │ │ ├── rhel-8-x86_64.pkrvars.hcl │ │ │ ├── rhel-9-aarch64.pkrvars.hcl │ │ │ └── rhel-9-x86_64.pkrvars.hcl │ │ ├── rockylinux │ │ │ ├── rockylinux-8-aarch64.pkrvars.hcl │ │ │ ├── rockylinux-8-x86_64.pkrvars.hcl │ │ │ ├── rockylinux-9-aarch64.pkrvars.hcl │ │ │ └── rockylinux-9-x86_64.pkrvars.hcl │ │ ├── sles │ │ │ ├── sles-15-aarch64.pkrvars.hcl │ │ │ └── sles-15-x86_64.pkrvars.hcl │ │ ├── solaris │ │ │ └── solaris-11-x86_64.pkrvars.hcl │ │ ├── ubuntu │ │ │ ├── ubuntu-22.04-aarch64.pkrvars.hcl │ │ │ ├── ubuntu-22.04-x86_64.pkrvars.hcl │ │ │ ├── ubuntu-24.04-aarch64.pkrvars.hcl │ │ │ └── ubuntu-24.04-x86_64.pkrvars.hcl │ │ └── windows │ │ │ ├── windows-10-x86_64.pkrvars.hcl │ │ │ ├── windows-11-aarch64.pkrvars.hcl │ │ │ ├── windows-11-x86_64.pkrvars.hcl │ │ │ ├── windows-2016-x86_64.pkrvars.hcl │ │ │ ├── windows-2019-x86_64.pkrvars.hcl │ │ │ ├── windows-2022-x86_64.pkrvars.hcl │ │ │ └── windows-2025-x86_64.pkrvars.hcl │ └── packer_templates │ │ ├── http │ │ ├── amazon │ │ │ ├── meta-data │ │ │ └── user-data │ │ ├── debian │ │ │ └── preseed.cfg │ │ ├── fedora │ │ │ └── ks.cfg │ │ ├── freebsd │ │ │ └── installerconfig │ │ ├── opensuse │ │ │ ├── autoinst-uefi.xml │ │ │ └── autoinst.xml │ │ ├── rhel │ │ │ ├── 10ks.cfg │ │ │ ├── 8ks.cfg │ │ │ └── 9ks.cfg │ │ ├── sles │ │ │ ├── 12-autoinst.xml │ │ │ └── 15-autoinst.xml │ │ ├── solaris │ │ │ ├── default.xml │ │ │ └── profile.xml │ │ └── ubuntu │ │ │ ├── meta-data │ │ │ └── user-data │ │ ├── pkr-builder.pkr.hcl │ │ ├── pkr-sources.pkr.hcl │ │ ├── pkr-variables.pkr.hcl │ │ ├── scripts │ │ ├── _common │ │ │ ├── metadata.sh │ │ │ ├── minimize.sh │ │ │ ├── motd.sh │ │ │ ├── parallels.sh │ │ │ ├── parallels_post_cleanup_debian_ubuntu.sh │ │ │ ├── sshd.sh │ │ │ ├── vagrant.sh │ │ │ ├── virtualbox.sh │ │ │ └── vmware.sh │ │ ├── custom_post_hoon.sh │ │ ├── custom_pre_hoon.sh │ │ ├── debian │ │ │ ├── cleanup_debian.sh │ │ │ ├── hyperv_debian.sh │ │ │ ├── networking_debian.sh │ │ │ ├── sudoers_debian.sh │ │ │ ├── systemd_debian.sh │ │ │ └── update_debian.sh │ │ ├── fedora │ │ │ ├── build-tools_fedora.sh │ │ │ ├── cleanup_dnf.sh │ │ │ ├── install-supporting-packages_fedora.sh │ │ │ ├── networking_fedora.sh │ │ │ ├── real-tmp_fedora.sh │ │ │ └── update_dnf.sh │ │ ├── freebsd │ │ │ ├── cleanup_freebsd.sh │ │ │ ├── minimize_freebsd.sh │ │ │ ├── postinstall_freebsd.sh │ │ │ ├── sudoers_freebsd.sh │ │ │ ├── update_freebsd.sh │ │ │ └── vmtools_freebsd.sh │ │ ├── macos │ │ │ ├── disable_auto_update.sh │ │ │ ├── parallels-tools.sh │ │ │ ├── shrink.sh │ │ │ ├── system-default.sh │ │ │ ├── system-update-complete.sh │ │ │ ├── system-update.sh │ │ │ ├── vagrant.sh │ │ │ └── vmware-tools.sh │ │ ├── rhel │ │ │ ├── cleanup_dnf.sh │ │ │ └── update_dnf.sh │ │ ├── solaris │ │ │ ├── minimize_solaris.sh │ │ │ ├── update_solaris.sh │ │ │ └── vmtools_solaris.sh │ │ ├── suse │ │ │ ├── cleanup_suse.sh │ │ │ ├── remove-dvd-source_suse.sh │ │ │ ├── repositories_suse.sh │ │ │ ├── sudoers_suse.sh │ │ │ ├── unsupported-modules_suse.sh │ │ │ ├── update_suse.sh │ │ │ ├── vagrant_group_suse.sh │ │ │ └── zypper-locks_suse.sh │ │ ├── ubuntu │ │ │ ├── cleanup_ubuntu.sh │ │ │ ├── hyperv_ubuntu.sh │ │ │ ├── networking_ubuntu.sh │ │ │ ├── sudoers_ubuntu.sh │ │ │ ├── systemd_ubuntu.sh │ │ │ └── update_ubuntu.sh │ │ └── windows │ │ │ ├── cleanup.ps1 │ │ │ ├── configure-power.ps1 │ │ │ ├── disable-screensaver.ps1 │ │ │ ├── disable-system-restore.ps1 │ │ │ ├── disable-windows-defender.ps1 │ │ │ ├── disable-windows-uac.ps1 │ │ │ ├── disable-windows-updates.ps1 │ │ │ ├── eject-media.ps1 │ │ │ ├── enable-file-sharing.ps1 │ │ │ ├── enable-remote-desktop.ps1 │ │ │ ├── optimize.ps1 │ │ │ ├── provision.ps1 │ │ │ ├── remove-apps.ps1 │ │ │ ├── remove-capabilities.ps1 │ │ │ ├── remove-features.ps1 │ │ │ ├── remove-one-drive-and-teams.ps1 │ │ │ └── ui-tweaks.ps1 │ │ ├── vagrantfile-freebsd.template │ │ ├── vagrantfile-windows.template │ │ └── win_answer_files │ │ ├── 10 │ │ ├── Autounattend.xml │ │ └── hyperv-gen2 │ │ │ └── Autounattend.xml │ │ ├── 11 │ │ ├── Autounattend.xml │ │ ├── arm64 │ │ │ └── Autounattend.xml │ │ └── hyperv-gen2 │ │ │ └── Autounattend.xml │ │ ├── 2016 │ │ ├── Autounattend.xml │ │ └── hyperv-gen2 │ │ │ └── Autounattend.xml │ │ ├── 2019 │ │ ├── Autounattend.xml │ │ └── hyperv-gen2 │ │ │ └── Autounattend.xml │ │ ├── 2022 │ │ ├── Autounattend.xml │ │ └── hyperv-gen2 │ │ │ └── Autounattend.xml │ │ └── 2025 │ │ ├── Autounattend.xml │ │ └── hyperv-gen2 │ │ └── Autounattend.xml └── update_bento_repo_w_custom.sh ├── Cloud-Native └── Samples │ ├── 1.cp-istioctl-v1.16.1.sh │ ├── 2.ins-istio-demo.sh │ ├── 3.ins-online-boutique-app.sh │ ├── deploy-chk-info-w-LB.yaml │ └── deploy-nginx-w-LB.yaml ├── Docker ├── Dockerfiles │ ├── audit-trail(chk-log) │ │ ├── Dockerfile │ │ └── nginx.conf │ ├── chk-info │ │ ├── Dockerfile │ │ ├── app.conf │ │ ├── cert.crt │ │ ├── cert.key │ │ └── index.html │ ├── dashboard │ │ └── Dockerfile │ ├── echo-hname(chk-hn) │ │ ├── Dockerfile │ │ ├── cert.crt │ │ ├── cert.key │ │ └── nginx.conf │ ├── echo-ip(chk-ip) │ │ ├── Dockerfile │ │ ├── cert.crt │ │ ├── cert.key │ │ └── nginx.conf │ ├── healthz-nginx │ │ ├── Dockerfile │ │ ├── app.conf │ │ ├── cert.crt │ │ ├── cert.key │ │ └── healthz │ ├── hello │ │ ├── v1 │ │ │ ├── .app.conf.swp │ │ │ ├── Dockerfile │ │ │ ├── app.conf │ │ │ ├── cert.crt │ │ │ ├── cert.key │ │ │ └── index.html │ │ └── v2 │ │ │ ├── Dockerfile │ │ │ ├── app.conf │ │ │ ├── cert.crt │ │ │ ├── cert.key │ │ │ └── index.html │ ├── hpa-cpu-memory │ │ ├── Dockerfile │ │ ├── index.html │ │ ├── memory.py │ │ ├── nginx.conf │ │ └── run.sh │ ├── kubecon-eu │ │ ├── latest │ │ │ ├── .app.conf.swp │ │ │ ├── Dockerfile │ │ │ ├── app.conf │ │ │ ├── cert.crt │ │ │ ├── cert.key │ │ │ └── index.html │ │ └── swp-img │ │ │ ├── .app.conf.swp │ │ │ ├── Dockerfile │ │ │ ├── app.conf │ │ │ ├── cert.crt │ │ │ └── cert.key │ ├── kustomize │ │ └── Dockerfile │ ├── multi-proc │ │ ├── Dockerfile │ │ └── supervisord.conf │ ├── mysql-auth │ │ ├── Dockerfile │ │ ├── docker-entrypoint.sh │ │ └── old │ │ │ ├── Dockerfile │ │ │ ├── config │ │ │ ├── conf.d │ │ │ │ └── docker.cnf │ │ │ └── my.cnf │ │ │ └── docker-entrypoint.sh │ ├── net-tools-ifn │ │ ├── Dockerfile │ │ ├── bashrc │ │ └── curlchk │ ├── net-tools │ │ ├── Dockerfile │ │ ├── bashrc │ │ └── curlchk │ ├── ollama-gemma2:2b │ │ └── Dockerfile │ ├── ollama-llama3.2:1b │ │ └── Dockerfile │ ├── ollama-qwen2.5:1.5b │ │ └── Dockerfile │ ├── sleepy │ │ ├── Dockerfile │ │ └── bashrc │ ├── ssh-root │ │ ├── Dockerfile │ │ └── bashrc │ ├── ssh │ │ ├── Dockerfile │ │ └── bashrc │ └── tardy-nginx │ │ ├── Dockerfile │ │ └── startup.sh ├── Registry │ ├── create-registry.sh │ ├── remover.sh │ └── tls.csr ├── build-kind │ ├── Basic │ │ ├── .mvn │ │ │ └── wrapper │ │ │ │ ├── MavenWrapperDownloader.java │ │ │ │ ├── maven-wrapper.jar │ │ │ │ └── maven-wrapper.properties │ │ ├── Dockerfile │ │ ├── mvnw │ │ ├── pom.xml │ │ └── src │ │ │ └── main │ │ │ ├── java │ │ │ └── com │ │ │ │ └── stark │ │ │ │ └── Industries │ │ │ │ ├── UltronPRJApplication.java │ │ │ │ └── UltronPRJController.java │ │ │ └── resources │ │ │ └── application.properties │ ├── MultiStage │ │ └── Dockerfile │ ├── NoHost │ │ └── Dockerfile │ └── Optimal │ │ ├── .mvn │ │ └── wrapper │ │ │ ├── MavenWrapperDownloader.java │ │ │ ├── maven-wrapper.jar │ │ │ └── maven-wrapper.properties │ │ ├── Dockerfile │ │ ├── build-in-host.sh │ │ ├── mvnw │ │ ├── pom.xml │ │ └── src │ │ └── main │ │ ├── java │ │ └── com │ │ │ └── stark │ │ │ └── Industries │ │ │ ├── UltronPRJApplication.java │ │ │ └── UltronPRJController.java │ │ └── resources │ │ └── application.properties ├── docker-builder │ ├── README.md │ ├── Vagrantfile │ ├── custom-git.sh │ ├── docker_build.sh │ ├── env_build.sh │ └── minikube-build.sh ├── index-BindMount.html └── index-Volume.html ├── GCP ├── gcloud-helper.sh └── gcloud_conv ├── GitOps └── deploy-chk-info.yaml ├── Jenkins └── dev-to-prod │ ├── deploy-dev-qa-101.freestyle │ ├── deploy-qa-passed-prod.freestyle │ └── namespaces.yaml ├── Keycloak ├── README.md ├── oncloud-1.site │ ├── .cmd │ ├── Deploy-infra │ │ ├── .cmd │ │ ├── 1-1.deploy-gke-env-w-keycloak.sh │ │ ├── 1-2.deploy-gke-ingress-4-https-keycloak.yaml │ │ ├── 1-3.clientconfig-gke-keycloak-w-oidc.yaml │ │ ├── 2-1.deploy-eks-env-only.sh │ │ ├── eksctl-config │ │ │ └── keycloak-w-oidc.yaml │ │ └── planB-keycloak-by-helm.yaml │ ├── EKS │ │ ├── 1.clusterrolebinding-4-devops-group-as-admin.yaml │ │ ├── 2-oncloud-1.swtich-ctx-hoon-to-soojin.sh │ │ └── 3.set-cred-4-oidc-user.sh │ ├── GKE │ │ ├── 1.clusterrolebinding-4-devops-group-as-admin-NOTWORK.yaml │ │ ├── 2-hoon.clusterrolebinding-4-devops-user-as-admin.yaml │ │ └── 2-soojin.clusterrolebinding-4-devops-user-as-admin.yaml │ └── kubelogin-installer.sh ├── oncloud-2.site │ ├── Deploy-infra │ │ ├── .cmd │ │ ├── 1-1.deploy-gke-env-w-keycloak.sh │ │ ├── 1-2.deploy-gke-ingress-4-https-keycloak.yaml │ │ ├── 1-3.clientconfig-gke-keycloak-w-oidc.yaml │ │ ├── 2-1.deploy-eks-env-only.sh │ │ ├── eksctl-config │ │ │ └── keycloak-w-oidc.yaml │ │ └── planB-keycloak-by-helm.yaml │ ├── EKS │ │ ├── 1.clusterrolebinding-4-devops-group-as-admin.yaml │ │ ├── 2-oncloud-2.swtich-ctx-hoon-to-soojin.sh │ │ └── 3.set-cred-4-oidc-user.sh │ ├── GKE │ │ ├── 1.clusterrolebinding-4-devops-group-as-admin-NOTWORK.yaml │ │ ├── 2-hoon.clusterrolebinding-4-devops-user-as-admin.yaml │ │ └── 2-soojin.clusterrolebinding-4-devops-user-as-admin.yaml │ └── kubelogin-installer.sh └── prerequisite-install-tools.sh ├── NXOSv ├── mac │ └── Vagrantfile └── win │ └── Vagrantfile ├── PaC └── CEL │ ├── GCP-Organization-Policy │ └── README.md │ ├── MutatingAdmissionPolicy │ └── README.md │ ├── README.md │ └── ValidatingAdmissionPolicy │ ├── README.md │ ├── gatekeeper-{{Rego,CEL}} │ ├── 0.gatekeeper_installer.yaml │ ├── CEL │ │ ├── 1-1.CEL-ConstraintTemplate-NoHostNetwork.yaml │ │ ├── 1-2.CEL-Constraint-NoHostNetwork.yaml │ │ ├── 2-1.CEL-ConstraintTemplate-NoHostPath.yaml │ │ ├── 2-2.CEL-Constraint-NoHostPath.yaml │ │ ├── 3-1.CEL-ConstraintTemplate-NoReplicasLess3.yaml │ │ └── 3-2.CEL-Constraint-NoReplicasLess3.yaml │ └── Rego │ │ ├── 1-1.Rego-ConstraintTemplate-NoHostNetwork.yaml │ │ ├── 1-2.Rego-Constraint-NoHostNetwork.yaml │ │ ├── 2-1.Rego-ConstraintTemplate-NoHostPath.yaml │ │ ├── 2-2.Rego-Constraint-NoHostPath.yaml │ │ ├── 3-1.Rego-ConstraintTemplate-NoReplicasLess3.yaml │ │ └── 3-2.Rego-Constraint-NoReplicasLess3.yaml │ ├── k8s_native-{{CEL}} │ ├── 1.CEL-ValidatingAdmissionPolicy-NoHostNetwork.yaml │ ├── 2.CEL-ValidatingAdmissionPolicy-NoHostPath.yaml │ └── 3.CEL-ValidatingAdmissionPolicy-NoReplicasLess3.yaml │ ├── kyverno-{{Yaml,CEL}} │ ├── 0.kyverno_installer.yaml │ ├── CEL │ │ ├── 1.CEL-ClusterPolicy-NoHostNetwork.yaml │ │ ├── 2.CEL-ClusterPolicy-NoHostPath.yaml │ │ └── 3.CEL-ClusterPolicy-NoReplicasLess3.yaml │ └── Yaml │ │ ├── 1.Yaml-ClusterPolicy-NoHostNetwork.yaml │ │ ├── 2.Yaml-ClusterPolicy-NoHostPath.yaml │ │ └── 3.Yaml-ClusterPolicy-NoReplicasLess3.yaml │ └── sample-apps │ ├── hostNetwork │ ├── no-hostNetwork.yaml │ └── yes-hostNetwork.yaml │ ├── hostPath │ ├── no-hostPath.yaml │ ├── no-matched-replicas.yaml │ ├── yes-hostPath.yaml │ └── yes-matched-deployment.yaml │ └── replicas │ ├── 1-replicas.yaml │ └── 3-replicas.yaml ├── Prometheus ├── gke │ ├── gke-dcgm-exporter.yaml │ └── gke-node-exporter.yaml └── mk-demo-prom-darksite │ ├── README.md │ ├── Vagrantfile │ ├── demo promlens.url │ ├── minikube-build.sh │ ├── prom.sh │ └── startup-prom.sh ├── README.md ├── Terraform └── t7m-Console │ ├── README.md │ ├── Vagrantfile │ └── t7m_pkg_cfg.sh ├── k8s ├── C │ ├── k8s-MultipleMaster_by_kubespray │ │ ├── Readme │ │ ├── Vagrantfile │ │ ├── auto_pass.sh │ │ ├── config.sh │ │ ├── install_pkg.sh │ │ └── pre-kubespray.sh │ ├── k8s-SingleMaster-1.13.1 │ │ ├── Vagrantfile │ │ ├── config.sh │ │ ├── install_pkg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-SingleMaster-18.9_9_w_auto-compl │ │ ├── Vagrantfile │ │ ├── config.sh │ │ ├── install_pkg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-min-1.16.15-iprange16 │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-min-5GB-1.16.15-iprange1 │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-min-5GiB-1.20.1-iprange1 │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-min-5GiB-1.25.0 │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ ├── vagrantup-v1.20.output │ │ ├── vagrantup-v1.25.output │ │ └── work_nodes.sh │ ├── k8s-min-containerD-only-MST │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-min-containerD │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-min-post-systemd │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-min-systemd │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-multicontext │ │ ├── Vagrantfile │ │ ├── console.sh │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-natvie-lab │ │ ├── Readme.txt │ │ ├── Vagrantfile │ │ ├── config.sh │ │ └── install_pkg.sh │ └── k8s-rook-ceph │ │ ├── README.md │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ ├── pkg_by_helm.sh │ │ ├── rook-ceph.sh │ │ └── work_nodes.sh ├── CNI │ ├── 172.16_net_calico_v3.17.1.yaml │ ├── 172.16_net_calico_v3.26.0.yaml │ ├── calico-quay-v3.29.2.yaml │ ├── calico-quay-v3.30.1.yaml │ └── cilium-v1.17.4-w-hubble.yaml ├── U │ ├── k8s-multicontext-vagrant-user │ │ ├── Vagrantfile │ │ ├── console.sh │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ ├── k8s-multicontext │ │ ├── Vagrantfile │ │ ├── console.sh │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ └── work_nodes.sh │ └── k8s-v1.27.0 │ │ ├── README.md │ │ ├── Vagrantfile │ │ ├── k8s_env_build.sh │ │ ├── k8s_pkg_cfg.sh │ │ ├── master_node.sh │ │ ├── pkg_by_helm.sh │ │ └── work_nodes.sh ├── extra-pkgs │ ├── v1.24.4 │ │ ├── get-helm-3.9.1.sh │ │ ├── helm-repo-add.sh │ │ ├── metallb-iprange.yaml │ │ ├── metallb-l2mode.yaml │ │ ├── metallb-native-v0.13.3.yaml │ │ ├── metrics-server-0.6.1.yaml │ │ ├── nfs-exporter.sh │ │ ├── nfs-provisioner.yaml │ │ └── storageclass.yaml │ ├── v1.26.1 │ │ ├── get-helm-3.9.1.sh │ │ ├── helm-repo-add.sh │ │ ├── metallb-iprange.yaml │ │ ├── metallb-l2mode.yaml │ │ ├── metallb-native-v0.13.7.yaml │ │ ├── metrics-server-0.6.1.yaml │ │ ├── nfs-exporter.sh │ │ ├── nfs-provisioner.yaml │ │ └── storageclass.yaml │ ├── v1.27.2 │ │ ├── README.md │ │ ├── get-helm-3.12.0.sh │ │ ├── metallb-iprange.yaml │ │ ├── metallb-l2mode.yaml │ │ ├── metallb-native-v0.13.10.yaml │ │ ├── metrics-server-notls-0.6.3.yaml │ │ ├── nfs-exporter.sh │ │ ├── nfs-provisioner-4.0.2.yaml │ │ └── storageclass.yaml │ ├── v1.30 │ │ ├── README.md │ │ ├── get_helm_v3.14.0.sh │ │ ├── ingress-ctrl-loadbalancer-v1.10.1.yaml │ │ ├── ingress-ctrl-nodeport-v1.10.1.yaml │ │ ├── metallb-iprange.yaml │ │ ├── metallb-l2mode.yaml │ │ ├── metallb-native-v0.14.4.yaml │ │ ├── metrics-server-notls-v0.7.1.yaml │ │ ├── nfs-provisioner-v4.0.2.yaml │ │ ├── nfs_exporter.sh │ │ └── storageclass.yaml │ └── v1.32 │ │ ├── README.md │ │ ├── cilium-iprange.yaml │ │ ├── cilium-l2mode.yaml │ │ ├── get_helm_v3.17.1.sh │ │ ├── ingress-ctrl-loadbalancer-v1.10.1.yaml │ │ ├── ingress-ctrl-nodeport-v1.10.1.yaml │ │ ├── metallb-iprange.yaml │ │ ├── metallb-l2mode.yaml │ │ ├── metallb-native-v0.14.9.yaml │ │ ├── metrics-server-notls-v0.7.1.yaml │ │ ├── nfs-provisioner-v4.0.2.yaml │ │ ├── nfs_exporter.sh │ │ └── storageclass.yaml ├── k8s-console │ ├── README.md │ ├── Vagrantfile │ ├── grap_kubeconfig.sh │ └── k8s_pkg_cfg.sh └── kwok │ ├── README.md │ ├── Vagrantfile │ └── kwok-build.sh ├── manifests ├── 172.16_net_calico.yaml ├── 172.16_net_calico_v1.yaml ├── CNI │ ├── 172.16_eht1_net_calico_v3.25.0.yaml │ └── 172.16_net_calico_v3.24.5.yaml ├── bash-completion.sh ├── busybox.yaml ├── echo-hname.yaml ├── echo-ip.yaml ├── ingress │ ├── 0.4.6 │ │ ├── cmd │ │ ├── deploy-hn.yaml │ │ ├── deploy-ip.yaml │ │ ├── deploy-nginx.yaml │ │ ├── ingress.yaml │ │ ├── ingress_ctrl_loadbalancer.yaml │ │ └── ingress_ctrl_nodeport.yaml │ ├── 1.5.1 │ │ ├── .cmd │ │ ├── deploy-hn.yaml │ │ ├── deploy-ip.yaml │ │ ├── deploy-nginx.yaml │ │ ├── ingress.yaml │ │ └── ingress_ctrl_loadbalancer.yaml │ └── README.md ├── metrics-server.yaml ├── nginx-pod.yaml ├── req_page.ps1 ├── rollout-nginx.yaml ├── svc │ ├── ingress-config.yaml │ ├── ingress-nginx.yaml │ ├── ingress.yaml │ ├── metallb-0.10.2.yaml │ ├── metallb-0.9.6.yaml │ ├── metallb-l2config-xd.yaml │ ├── metallb-l2config.yaml │ ├── metallb-svc.yaml │ ├── metallb.yaml │ ├── nodeport.yaml │ ├── porter-l2config.yaml │ └── porter-svc.yaml └── vol │ ├── dynamic-pvc-deploy.yaml │ ├── dynamic-pvc.yaml │ ├── limits-pvc.yaml │ ├── nfs-client-provisioner │ ├── 0.Builder_nfs_server.sh │ ├── 1.nfs-client+.yaml │ ├── 2-1.claim-pvc1Gi.yaml │ ├── 2-2.use-pvc.yaml │ ├── 3.sts-claim-vct1Gi.yaml │ └── Readme.md │ ├── nfs-ip.yaml │ ├── nfs-provisioner │ ├── 1.nfs-provisioner+.yaml │ ├── 2.claim-pvc1Gi.yaml │ ├── 3.sts-claim-vct1Gi.yaml │ └── Readme.md │ ├── nfs-pv.yaml │ ├── nfs-pvc-deploy.yaml │ ├── nfs-pvc-sts-svc.yaml │ ├── nfs-pvc-sts.yaml │ ├── nfs-pvc.yaml │ ├── quota-pvc.yaml │ └── spare_local-storage │ ├── local-pv-set.yaml │ ├── local-pvc-sts.yaml │ └── local-sc.yaml ├── nGrinder └── k8s │ ├── agents.yaml │ └── controller.yaml └── tools ├── convert-img-vmdk-qcow2 ├── convert-img-vmx-qcow2 ├── k8s_rc.sh └── kubetail.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | .DS_Store 3 | *.vmdk 4 | -------------------------------------------------------------------------------- /AIOps/ChatGPT/rubra.ai/README.md: -------------------------------------------------------------------------------- 1 | # testing rubra.ai 2 | 3 | install scripts 4 | ``` 5 | curl -sfL https://get.rubra.ai | bash -s -- start 6 | ``` 7 | 8 | 9 | -------------------------------------------------------------------------------- /AIOps/ChatGPT/rubra.ai/docker_build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # avoid 'dpkg-reconfigure: unable to re-open stdin: No file or directory' 4 | export DEBIAN_FRONTEND=noninteractive 5 | 6 | # add docker-ce repo with containerd 7 | apt-get update && apt-get install gnupg lsb-release 8 | curl -fsSL \ 9 | https://download.docker.com/linux/ubuntu/gpg \ 10 | | gpg --dearmor -o /etc/apt/keyrings/docker-archive-keyring.gpg 11 | echo \ 12 | "deb [arch=$(dpkg --print-architecture) \ 13 | signed-by=/etc/apt/keyrings/docker-archive-keyring.gpg] \ 14 | https://download.docker.com/linux/ubuntu \ 15 | $(lsb_release -cs) stable" \ 16 | | tee /etc/apt/sources.list.d/docker.list > /dev/null 17 | 18 | # install & enable docker 19 | apt-get update 20 | apt-get install docker-ce=$1 -y 21 | 22 | # install docker-compose 23 | curl -L https://github.com/docker/compose/releases/download/v2.24.4/docker-compose-linux-x86_64 -o /usr/local/bin/docker-compose 24 | chmod 744 /usr/local/bin/docker-compose 25 | 26 | -------------------------------------------------------------------------------- /AIOps/ChatGPT/rubra.ai/run_rubra.ai.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # avoid 'dpkg-reconfigure: unable to re-open stdin: No file or directory' 4 | export DEBIAN_FRONTEND=noninteractive 5 | 6 | # unknown issue for python 7 | mv /usr/local/lib/python3.10/dist-packages/ /usr/local/lib/python3.10/site-packages 8 | 9 | # get rubra.ai platform 10 | curl -sfL https://get.rubra.ai | bash -s -- start 11 | 12 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/Vagrantfile-LocalAI/extra-k8s-pkgs/helm-repo-add.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | helm repo add edu https://k8s-edu.github.io/helm-charts/graf 4 | helm repo update 5 | 6 | # helm auto-completion 7 | helm completion bash >/etc/bash_completion.d/helm 8 | # reload bash shell 9 | exec bash 10 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/Vagrantfile-LocalAI/extra-k8s-pkgs/metallb-iprange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: k8s-svc-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 192.168.1.11-192.168.1.99 9 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/Vagrantfile-LocalAI/extra-k8s-pkgs/metallb-l2mode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: layer2-mode 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/Vagrantfile-LocalAI/extra-k8s-pkgs/nfs-exporter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | nfsdir=/nfs_shared/$1 3 | 4 | if [ $# -eq 0 ]; then 5 | echo "usage: nfs-exporter.sh "; exit 0 6 | fi 7 | 8 | if [[ ! -d /nfs_shared ]]; then 9 | mkdir /nfs_shared 10 | fi 11 | 12 | if [[ ! -d $nfsdir ]]; then 13 | mkdir -p $nfsdir 14 | echo "$nfsdir 192.168.1.0/24(rw,sync,no_root_squash)" >> /etc/exports 15 | if [[ $(systemctl is-enabled nfs-server) -eq "disabled" ]]; then 16 | systemctl enable nfs-server 17 | fi 18 | systemctl restart nfs-server 19 | fi 20 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/Vagrantfile-LocalAI/extra-k8s-pkgs/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: managed-nfs-storage 5 | # or choose another name, must match deployment's env PROVISIONER_NAME' 6 | provisioner: k8s-sigs.io/nfs-subdir-external-provisioner 7 | parameters: 8 | # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. 9 | pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" 10 | onDelete: delete 11 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/Vagrantfile-LocalAI/single_darwin_ollama_w_k8sgpt.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | curl -LO https://ollama.com/download/Ollama-darwin.zip 4 | unzip Ollama-darwin.zip 5 | mv Ollama.app /Applications 6 | open /Applications/Ollama.app 7 | 8 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/Vagrantfile-LocalAI/worker_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for worker nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 6 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/run_ollama_n_k8sgpt/README.md: -------------------------------------------------------------------------------- 1 | ## KubeCon China 2024's docs 2 | 3 | [KubeCon China 2024] #1 run_ollama_n_k8sgpt.sh per model 4 | - Original: https://docs.google.com/document/d/1Ppj50GDRfH9MLHlCJaHu0VTcWpiP7j-ATatf2x7QyWQ/edit?usp=sharing 5 | - ShortURL: https://url.kr/l5uk81 6 | 7 | [KubeCon China 2024] #2 safetensor_2_gguf for ollama 8 | - Original: https://docs.google.com/document/d/1lTJBhJczab6E34f5JICVx1TYmc65Vrf6urVcWeTSHig/edit?usp=sharing 9 | - ShortURL: https://url.kr/msvd2g 10 | 11 | -------------------------------------------------------------------------------- /AIOps/K8sGPT/run_ollama_n_k8sgpt/modelfile: -------------------------------------------------------------------------------- 1 | FROM llama3-chinese:8b.gguf 2 | 3 | TEMPLATE "{{ if .System }}<|start_header_id|>system<|end_header_id|> 4 | 5 | {{ .System }}<|eot_id|>{{ end }}{{ if .Prompt }}<|start_header_id|>user<|end_header_id|> 6 | 7 | {{ .Prompt }}<|eot_id|>{{ end }}<|start_header_id|>assistant<|end_header_id|> 8 | 9 | {{ .Response }}<|eot_id|>" 10 | SYSTEM """A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. 好奇的用户与人工智能助手聊天。对于用户提出的问题,人工智能助手会给出详细而礼貌的回答。""" 11 | PARAMETER num_keep 24 12 | PARAMETER stop <|start_header_id|> 13 | PARAMETER stop <|end_header_id|> 14 | PARAMETER stop <|eot_id|> 15 | -------------------------------------------------------------------------------- /Argo/argo-cd/app-gitops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: gitops 5 | namespace: argocd 6 | spec: 7 | project: default 8 | source: 9 | repoURL: https://github.com/sysnet4admin/IaC.git 10 | targetRevision: HEAD 11 | path: GitOps 12 | destination: 13 | server: https://kubernetes.default.svc 14 | namespace: default 15 | syncPolicy: 16 | automated: 17 | # selfHeal: false 18 | # prune: false 19 | selfHeal: true 20 | 21 | -------------------------------------------------------------------------------- /Bento/README.md: -------------------------------------------------------------------------------- 1 | # Bento's source 2 | https://github.com/chef/bento 3 | 4 | # Run 5 | ```bash 6 | cd bento 7 | bento build --cpus 4 --only=vmware-iso.vm os_pkrvars/ubuntu/ubuntu-22.04-aarch64.pkrvars.hcl 8 | ``` 9 | 10 | # Run & Move 11 | ```bash 12 | cd bento 13 | bento build --cpus 4 --only=vmware-iso.vm os_pkrvars/ubuntu/ubuntu-22.04-aarch64.pkrvars.hcl \ 14 | ; mv builds/ubuntu-22.04-aarch64.virtualbox.box ~/Documents/vagrant_cloud 15 | ``` 16 | 17 | # Dry-run 18 | ```bash 19 | cd bento 20 | bento build --cpus 4 --dry-run --only=vmware-iso.vm os_pkrvars/ubuntu/ubuntu-22.04-aarch64.pkrvars.hcl 21 | ``` 22 | 23 | # Modified 24 | - ubuntu-22.04-aarch64.pkrvars.hcl in local iso 25 | + pkr-builder.pkr.hcl 26 | - custom_pre_hoon.sh 27 | - custom_post_hoon.sh 28 | 29 | -------------------------------------------------------------------------------- /Bento/bento/AMZ_README_FIRST.md: -------------------------------------------------------------------------------- 1 | This is not your normal Bento box. Instead of building a system from an ISO we're building a system from an Amazon provided vm hdd image files. This means the process is a bit different than usual. 2 | 3 | # Building this box 4 | 5 | Simply run one of the amazonlinxu-2*-build.sh scripts 6 | 7 | These scripts will: 8 | 9 | 1. Download the vm image file for Amazon Linux 2 or 2023 and place it in the amz_working_files directory. Amazon hosts these at and . 10 | 1. It will prepare the VDI file for packer and export it as a OVF file 11 | 1. It will run the packer build 12 | 1. Lastly it will clean up the leftover files in the working directory on successful completion 13 | -------------------------------------------------------------------------------- /Bento/bento/CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | Please refer to 2 | -------------------------------------------------------------------------------- /Bento/bento/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gemspec 4 | 5 | group :development do 6 | gem 'cookstyle' 7 | gem 'rake', '>= 12' 8 | end 9 | -------------------------------------------------------------------------------- /Bento/bento/NOTICE.md: -------------------------------------------------------------------------------- 1 | # Bento NOTICE 2 | 3 | Chef Bento 4 | Copyright 2019-2023, Progress Software Corporation 5 | Copyright 2012-2019, Chef Software, Inc. 6 | 7 | The baseboxes in the "definitions" directory is from Tim Dysinger's 8 | "basebox" project. 9 | 10 | Author: Tim Dysinger () 11 | Copyright 2011-2012, Tim Dysinger () 12 | -------------------------------------------------------------------------------- /Bento/bento/TESTING.md: -------------------------------------------------------------------------------- 1 | Please refer to 2 | -------------------------------------------------------------------------------- /Bento/bento/bento.gemspec: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | require_relative 'lib/bento/version' 3 | 4 | Gem::Specification.new do |s| 5 | s.name = 'bento' 6 | s.version = Bento::VERSION 7 | s.summary = 'Bento builds generic Vagrant boxes ' 8 | s.description = s.summary 9 | s.license = 'Apache-2.0' 10 | s.author = 'Many many Chef employees over the years' 11 | s.email = 'oss@chef.io' 12 | s.homepage = 'https://github.com/chef/bento/' 13 | 14 | s.required_ruby_version = '>= 3.0' 15 | 16 | s.add_dependency 'mixlib-shellout', '>= 2.3.2' 17 | 18 | s.bindir = 'bin' 19 | s.executables = %w(bento) 20 | 21 | s.require_path = 'lib' 22 | s.files = %w(LICENSE Gemfile) + Dir.glob('*.gemspec') + Dir.glob('lib/**/*') 23 | end 24 | -------------------------------------------------------------------------------- /Bento/bento/bin/bento: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | Signal.trap('INT') { exit 1 } 3 | 4 | $stdout.sync = true 5 | $stderr.sync = true 6 | 7 | require_relative '../lib/bento/cli' 8 | 9 | begin 10 | Runner.new(Options.parse(ARGV)).start 11 | rescue => ex 12 | warn ">>> #{ex.message}" 13 | exit(($CHILD_STATUS && $CHILD_STATUS.exitstatus) || 99) 14 | end 15 | -------------------------------------------------------------------------------- /Bento/bento/builds/build_files/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Bento/bento/builds/build_files/.gitkeep -------------------------------------------------------------------------------- /Bento/bento/builds/failed_testing/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Bento/bento/builds/failed_testing/.gitkeep -------------------------------------------------------------------------------- /Bento/bento/builds/ubuntu-22.04-aarch64._metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ubuntu-22.04", 3 | "version": "202506.20.0", 4 | "arch": "aarch64", 5 | "build_timestamp": "20250620062228", 6 | "packer_command": "packer build -timestamp-ui -force -var cpus=4 -only=virtualbox-iso.vm -var-file=ubuntu-22.04-aarch64.pkrvars.hcl ../../packer_templates", 7 | "git_revision": "5ed0555296ac1173bcba3589a11f61f9caf50165", 8 | "git_status": "dirty", 9 | "box_basename": "ubuntu-22.04", 10 | "template": "ubuntu-22.04-aarch64", 11 | "packer": "Packer v1.13.1", 12 | "providers": [ 13 | { 14 | "name": "virtualbox", 15 | "version": "7.1.10", 16 | "file": "ubuntu-22.04-aarch64.virtualbox.box", 17 | "checksum_type": "sha256", 18 | "checksum": "cf68428a9623ae3fdb41227752a6dbc3a11226fac34a35196eb33fcb5ab3e9d1", 19 | "size": "835 MB", 20 | "build_time": 553, 21 | "build_cpus": "4" 22 | } 23 | ] 24 | } -------------------------------------------------------------------------------- /Bento/bento/builds/uploaded/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Bento/bento/builds/uploaded/.gitkeep -------------------------------------------------------------------------------- /Bento/bento/lib/bento.rb: -------------------------------------------------------------------------------- 1 | require 'bento/common' 2 | require 'bento/version' 3 | -------------------------------------------------------------------------------- /Bento/bento/lib/bento/test_templates/bootstrap.sh.erb: -------------------------------------------------------------------------------- 1 | echo "Shell Provisioner Says What" 2 | -------------------------------------------------------------------------------- /Bento/bento/lib/bento/test_templates/kitchen.yml.erb: -------------------------------------------------------------------------------- 1 | --- 2 | provisioner: 3 | name: <%= @provisioner %> 4 | 5 | platforms: 6 | <% @providers.each do |k,v| -%> 7 | - name: <%= "#{@boxname}-#{@arch}-#{k}" %> 8 | driver: 9 | name: vagrant 10 | provider: <%= k %> 11 | vm_hostname: bento-test 12 | box: bento-<%= @boxname %> 13 | box_url: file://<%= Dir.pwd %>/builds/<%= v['file'] %> 14 | box_arch: <%= @arch %> 15 | synced_folders: 16 | <% if k =~ /hyperv/ -%> 17 | - [".", "/vagrant", "disabled: true"] 18 | <% else -%> 19 | - [".", "/vagrant", "disabled: <%= @share_disabled %>"] 20 | <% end -%> 21 | <% end -%> 22 | suites: 23 | - name: default 24 | -------------------------------------------------------------------------------- /Bento/bento/lib/bento/version.rb: -------------------------------------------------------------------------------- 1 | module Bento 2 | VERSION = '4.1.3'.freeze 3 | end 4 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/almalinux/almalinux-8-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "almalinux" 2 | os_version = "8.10" 3 | os_arch = "aarch64" 4 | iso_url = "https://repo.almalinux.org/almalinux/8/isos/aarch64/AlmaLinux-8.10-aarch64-boot.iso" 5 | iso_checksum = "file:https://repo.almalinux.org/almalinux/8/isos/aarch64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg inst.repo=https://repo.almalinux.org/almalinux/8/BaseOS/aarch64/os/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/almalinux/almalinux-8-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "almalinux" 2 | os_version = "8.10" 3 | os_arch = "x86_64" 4 | iso_url = "https://repo.almalinux.org/almalinux/8/isos/x86_64/AlmaLinux-8.10-x86_64-boot.iso" 5 | iso_checksum = "file:https://repo.almalinux.org/almalinux/8/isos/x86_64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "RedHat_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg inst.repo=https://repo.almalinux.org/almalinux/8/BaseOS/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/almalinux/almalinux-9-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "almalinux" 2 | os_version = "9.6" 3 | os_arch = "aarch64" 4 | iso_url = "https://repo.almalinux.org/almalinux/9/isos/aarch64/AlmaLinux-9.6-aarch64-boot.iso" 5 | iso_checksum = "file:https://repo.almalinux.org/almalinux/9/isos/aarch64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle9_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://repo.almalinux.org/almalinux/9/BaseOS/aarch64/os/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/almalinux/almalinux-9-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "almalinux" 2 | os_version = "9.6" 3 | os_arch = "x86_64" 4 | iso_url = "https://repo.almalinux.org/almalinux/9/isos/x86_64/AlmaLinux-9.6-x86_64-boot.iso" 5 | iso_checksum = "file:https://repo.almalinux.org/almalinux/9/isos/x86_64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "RedHat_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://repo.almalinux.org/almalinux/9/BaseOS/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/amazonlinux/amazonlinux-2023-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "amazonlinux" 2 | os_version = "2023" 3 | os_arch = "x86_64" 4 | vbox_source_path = "https://cdn.amazonlinux.com/al2023/os-images/latest/vmware/al2023-vmware_esx-2023.3.20240312.0-kernel-6.1-x86_64.xfs.gpt.ova" 5 | vbox_checksum = "file:https://cdn.amazonlinux.com/al2023/os-images/latest/vmware/SHA256SUMS" 6 | parallels_guest_os_type = "fedora-core" 7 | vbox_guest_os_type = "Fedora_64" 8 | vmware_guest_os_type = "fedora-64" 9 | sources_enabled = [ 10 | "source.virtualbox-ovf.vm" 11 | ] 12 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/centos/centos-stream-10-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "centos-stream" 2 | os_version = "10" 3 | os_arch = "aarch64" 4 | iso_url = "https://mirror.stream.centos.org/10-stream/BaseOS/aarch64/iso/CentOS-Stream-10-latest-aarch64-boot.iso" 5 | iso_checksum = "file:https://mirror.stream.centos.org/10-stream/BaseOS/aarch64/iso/CentOS-Stream-10-latest-aarch64-boot.iso.SHA256SUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle9_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/10ks.cfg inst.repo=https://mirror.stream.centos.org/10-stream/BaseOS/aarch64/os/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/centos/centos-stream-10-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "centos-stream" 2 | os_version = "10" 3 | os_arch = "x86_64" 4 | iso_url = "https://mirror.stream.centos.org/10-stream/BaseOS/x86_64/iso/CentOS-Stream-10-latest-x86_64-boot.iso" 5 | iso_checksum = "file:https://mirror.stream.centos.org/10-stream/BaseOS/x86_64/iso/CentOS-Stream-10-latest-x86_64-boot.iso.SHA256SUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "RedHat_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/10ks.cfg inst.repo=https://mirror.stream.centos.org/10-stream/BaseOS/x86_64/os/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/centos/centos-stream-9-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "centos-stream" 2 | os_version = "9" 3 | os_arch = "aarch64" 4 | iso_url = "https://mirror.stream.centos.org/9-stream/BaseOS/aarch64/iso/CentOS-Stream-9-latest-aarch64-boot.iso" 5 | iso_checksum = "file:https://mirror.stream.centos.org/9-stream/BaseOS/aarch64/iso/CentOS-Stream-9-latest-aarch64-boot.iso.SHA256SUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle9_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://mirror.stream.centos.org/9-stream/BaseOS/aarch64/os/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/centos/centos-stream-9-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "centos-stream" 2 | os_version = "9" 3 | os_arch = "x86_64" 4 | iso_url = "https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/iso/CentOS-Stream-9-latest-x86_64-boot.iso" 5 | iso_checksum = "file:https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/iso/CentOS-Stream-9-latest-x86_64-boot.iso.SHA256SUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "RedHat_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/debian/debian-11-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "debian" 2 | os_version = "11.11" 3 | os_arch = "x86_64" 4 | iso_url = "https://cdimage.debian.org/cdimage/archive/latest-oldstable/amd64/iso-cd/debian-11.11.0-amd64-netinst.iso" 5 | iso_checksum = "file:https://cdimage.debian.org/cdimage/archive/latest-oldstable/amd64/iso-cd/SHA256SUMS" 6 | parallels_guest_os_type = "debian" 7 | vbox_guest_os_type = "Debian11_64" 8 | vmware_guest_os_type = "debian-64" 9 | boot_command = ["auto preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/debian/preseed.cfg netcfg/get_hostname={{ .Name }}"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/debian/debian-12-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "debian" 2 | os_version = "12.10" 3 | os_arch = "x86_64" 4 | iso_url = "https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/debian-12.10.0-amd64-netinst.iso" 5 | iso_checksum = "file:https://cdimage.debian.org/debian-cd/current/amd64/iso-cd/SHA256SUMS" 6 | parallels_guest_os_type = "debian" 7 | vbox_guest_os_type = "Debian12_64" 8 | vmware_guest_os_type = "debian-64" 9 | boot_command = ["auto preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/debian/preseed.cfg netcfg/get_hostname={{ .Name }}"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/fedora/fedora-40-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "fedora" 2 | os_version = "40" 3 | os_arch = "aarch64" 4 | iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/40/Server/aarch64/iso/Fedora-Server-netinst-aarch64-40-1.14.iso" 5 | iso_checksum = "file:https://download.fedoraproject.org/pub/fedora/linux/releases/40/Server/aarch64/iso/Fedora-Server-40-1.14-aarch64-CHECKSUM" 6 | parallels_guest_os_type = "fedora-core" 7 | vbox_guest_os_type = "Fedora_arm64" 8 | vmware_guest_os_type = "arm-fedora-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora/ks.cfg inst.repo=https://download.fedoraproject.org/pub/fedora/linux/releases/40/Server/aarch64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/fedora/fedora-40-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "fedora" 2 | os_version = "40" 3 | os_arch = "x86_64" 4 | iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/40/Server/x86_64/iso/Fedora-Server-netinst-x86_64-40-1.14.iso" 5 | iso_checksum = "file:https://download.fedoraproject.org/pub/fedora/linux/releases/40/Server/x86_64/iso/Fedora-Server-40-1.14-x86_64-CHECKSUM" 6 | parallels_guest_os_type = "fedora-core" 7 | vbox_guest_os_type = "Fedora_64" 8 | vmware_guest_os_type = "fedora-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora/ks.cfg inst.repo=https://download.fedoraproject.org/pub/fedora/linux/releases/40/Server/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/fedora/fedora-41-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "fedora" 2 | os_version = "41" 3 | os_arch = "aarch64" 4 | iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/aarch64/iso/Fedora-Server-netinst-aarch64-41-1.4.iso" 5 | iso_checksum = "file:https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/aarch64/iso/Fedora-Server-41-1.4-aarch64-CHECKSUM" 6 | parallels_guest_os_type = "fedora-core" 7 | vbox_guest_os_type = "Fedora_arm64" 8 | vmware_guest_os_type = "arm-fedora-64" 9 | parallels_boot_wait = "0s" 10 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora/ks.cfg inst.repo=https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/aarch64/os/ "] 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/fedora/fedora-41-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "fedora" 2 | os_version = "41" 3 | os_arch = "x86_64" 4 | iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/x86_64/iso/Fedora-Server-netinst-x86_64-41-1.4.iso" 5 | iso_checksum = "file:https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/x86_64/iso/Fedora-Server-41-1.4-x86_64-CHECKSUM" 6 | parallels_guest_os_type = "fedora-core" 7 | vbox_guest_os_type = "Fedora_64" 8 | vmware_guest_os_type = "fedora-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora/ks.cfg inst.repo=https://download.fedoraproject.org/pub/fedora/linux/releases/41/Server/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/fedora/fedora-42-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "fedora" 2 | os_version = "42" 3 | os_arch = "aarch64" 4 | iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/aarch64/iso/Fedora-Server-netinst-aarch64-42-1.1.iso" 5 | iso_checksum = "file:https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/aarch64/iso/Fedora-Server-42-1.1-aarch64-CHECKSUM" 6 | parallels_guest_os_type = "fedora-core" 7 | vbox_guest_os_type = "Fedora_arm64" 8 | vmware_guest_os_type = "arm-fedora-64" 9 | parallels_boot_wait = "0s" 10 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora/ks.cfg inst.repo=https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/aarch64/os/ "] 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/fedora/fedora-42-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "fedora" 2 | os_version = "42" 3 | os_arch = "x86_64" 4 | iso_url = "https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/x86_64/iso/Fedora-Server-netinst-x86_64-42-1.1.iso" 5 | iso_checksum = "file:https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/x86_64/iso/Fedora-Server-42-1.1-x86_64-CHECKSUM" 6 | parallels_guest_os_type = "fedora-core" 7 | vbox_guest_os_type = "Fedora_64" 8 | vmware_guest_os_type = "fedora-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/fedora/ks.cfg inst.repo=https://download.fedoraproject.org/pub/fedora/linux/releases/42/Server/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/opensuse/opensuse-leap-15-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "opensuse-leap" 2 | os_version = "15.6" 3 | os_arch = "aarch64" 4 | iso_url = "https://download.opensuse.org/distribution/leap/15.6/iso/openSUSE-Leap-15.6-NET-aarch64-Media.iso" 5 | iso_checksum = "file:https://download.opensuse.org/distribution/leap/15.6/iso/openSUSE-Leap-15.6-NET-aarch64-Media.iso.sha256" 6 | parallels_guest_os_type = "opensuse" 7 | vbox_guest_os_type = "OpenSUSE_Leap_arm64" 8 | vmware_guest_os_type = "arm-other-64" 9 | boot_command = ["e biosdevname=0 net.ifnames=0 netdevice=eth0 netsetup=dhcp lang=en_US textmode=1 modprobe.blacklist=vmwgfx autoyast=http://{{ .HTTPIP }}:{{ .HTTPPort }}/opensuse/autoinst-uefi.xml"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/opensuse/opensuse-leap-15-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "opensuse-leap" 2 | os_version = "15.6" 3 | os_arch = "x86_64" 4 | iso_url = "https://download.opensuse.org/distribution/leap/15.6/iso/openSUSE-Leap-15.6-NET-x86_64-Media.iso" 5 | iso_checksum = "file:https://download.opensuse.org/distribution/leap/15.6/iso/openSUSE-Leap-15.6-NET-x86_64-Media.iso.sha256" 6 | parallels_guest_os_type = "opensuse" 7 | vbox_guest_os_type = "OpenSUSE_Leap_64" 8 | vmware_guest_os_type = "opensuse-64" 9 | boot_command = ["linux biosdevname=0 net.ifnames=0 netdevice=eth0 netsetup=dhcp lang=en_US textmode=1 autoyast=http://{{ .HTTPIP }}:{{ .HTTPPort }}/opensuse/autoinst.xml"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/oraclelinux/oraclelinux-8-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "oraclelinux" 2 | os_version = "8.10" 3 | os_arch = "aarch64" 4 | iso_url = "https://yum.oracle.com/ISOS/OracleLinux/OL8/u10/aarch64/OracleLinux-R8-U10-aarch64-dvd.iso" 5 | iso_checksum = "file:https://linux.oracle.com/security/gpg/checksum/OracleLinux-R8-U10-Server-aarch64.checksum" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle9_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | parallels_boot_wait = "0s" 10 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg inst.repo=https://yum.oracle.com/repo/OracleLinux/OL8/baseos/latest/aarch64/ x"] 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/oraclelinux/oraclelinux-8-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "oraclelinux" 2 | os_version = "8.10" 3 | os_arch = "x86_64" 4 | iso_url = "https://yum.oracle.com/ISOS/OracleLinux/OL8/u10/x86_64/OracleLinux-R8-U10-x86_64-boot.iso" 5 | iso_checksum = "file:https://linux.oracle.com/security/gpg/checksum/OracleLinux-R8-U10-Server-x86_64.checksum" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg inst.repo=https://yum.oracle.com/repo/OracleLinux/OL8/baseos/latest/x86_64/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/oraclelinux/oraclelinux-9-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "oraclelinux" 2 | os_version = "9.5" 3 | os_arch = "aarch64" 4 | iso_url = "https://yum.oracle.com/ISOS/OracleLinux/OL9/u5/aarch64/OracleLinux-R9-U5-aarch64-boot-uek.iso" 5 | iso_checksum = "file:https://linux.oracle.com/security/gpg/checksum/OracleLinux-R9-U5-Server-aarch64.checksum" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle9_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://yum.oracle.com/repo/OracleLinux/OL9/baseos/latest/aarch64/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/oraclelinux/oraclelinux-9-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "oraclelinux" 2 | os_version = "9.5" 3 | os_arch = "x86_64" 4 | iso_url = "https://yum.oracle.com/ISOS/OracleLinux/OL9/u5/x86_64/OracleLinux-R9-U5-x86_64-boot.iso" 5 | iso_checksum = "file:https://linux.oracle.com/security/gpg/checksum/OracleLinux-R9-U5-Server-x86_64.checksum" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://yum.oracle.com/repo/OracleLinux/OL9/baseos/latest/x86_64/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rhel/rhel-8-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rhel" 2 | os_version = "8.10" 3 | os_arch = "aarch64" 4 | iso_url = "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux" 5 | iso_checksum = "none" 6 | parallels_guest_os_type = "rhel" 7 | vbox_guest_os_type = "Oracle_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = [" inst.text inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rhel/rhel-8-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rhel" 2 | os_version = "8.10" 3 | os_arch = "x86_64" 4 | iso_url = "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux" 5 | iso_checksum = "none" 6 | parallels_guest_os_type = "rhel" 7 | vbox_guest_os_type = "RedHat_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.text inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rhel/rhel-9-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rhel" 2 | os_version = "9.5" 3 | os_arch = "aarch64" 4 | iso_url = "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux" 5 | iso_checksum = "none" 6 | parallels_guest_os_type = "rhel" 7 | vbox_guest_os_type = "Oracle9_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = [" inst.text inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rhel/rhel-9-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rhel" 2 | os_version = "9.5" 3 | os_arch = "x86_64" 4 | iso_url = "https://www.redhat.com/en/technologies/linux-platforms/enterprise-linux" 5 | iso_checksum = "none" 6 | parallels_guest_os_type = "rhel" 7 | vbox_guest_os_type = "RedHat_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.text inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rockylinux/rockylinux-8-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rockylinux" 2 | os_version = "8.10" 3 | os_arch = "aarch64" 4 | iso_url = "https://download.rockylinux.org/pub/rocky/8/isos/aarch64/Rocky-8.10-aarch64-boot.iso" 5 | iso_checksum = "file:https://download.rockylinux.org/pub/rocky/8/isos/aarch64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg inst.repo=https://download.rockylinux.org/pub/rocky/8/BaseOS/aarch64/os/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rockylinux/rockylinux-8-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rockylinux" 2 | os_version = "8.10" 3 | os_arch = "x86_64" 4 | iso_url = "https://download.rockylinux.org/pub/rocky/8/isos/x86_64/Rocky-8.10-x86_64-boot.iso" 5 | iso_checksum = "file:https://download.rockylinux.org/pub/rocky/8/isos/x86_64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/8ks.cfg inst.repo=https://download.rockylinux.org/pub/rocky/8/BaseOS/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rockylinux/rockylinux-9-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rockylinux" 2 | os_version = "9.6" 3 | os_arch = "aarch64" 4 | iso_url = "https://download.rockylinux.org/pub/rocky/9/isos/aarch64/Rocky-9.6-aarch64-boot.iso" 5 | iso_checksum = "file:https://download.rockylinux.org/pub/rocky/9/isos/aarch64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle9_arm64" 8 | vmware_guest_os_type = "arm-rhel9-64" 9 | boot_command = ["e inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://download.rockylinux.org/pub/rocky/9/BaseOS/aarch64/os/ x"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/rockylinux/rockylinux-9-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "rockylinux" 2 | os_version = "9.6" 3 | os_arch = "x86_64" 4 | iso_url = "https://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-9.6-x86_64-boot.iso" 5 | iso_checksum = "file:https://download.rockylinux.org/pub/rocky/9/isos/x86_64/CHECKSUM" 6 | parallels_guest_os_type = "centos" 7 | vbox_guest_os_type = "Oracle_64" 8 | vmware_guest_os_type = "centos-64" 9 | boot_command = [" inst.ks=http://{{ .HTTPIP }}:{{ .HTTPPort }}/rhel/9ks.cfg inst.repo=https://download.rockylinux.org/pub/rocky/9/BaseOS/x86_64/os/ "] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/sles/sles-15-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "suse" 2 | os_version = "15.6" 3 | os_arch = "aarch64" 4 | iso_url = "https://updates.suse.com/SUSE/Products/SLE-Product-SLES/15-SP6/aarch64/iso/SLE-15-SP6-Online-aarch64-GM-Media1.iso" 5 | iso_checksum = "56f67a6d10a502901cc2c291231089e375dd7c2b51fc951d5deaa57439c2686e" 6 | parallels_guest_os_type = "opensuse" 7 | vbox_guest_os_type = "OpenSUSE_Leap_arm64" 8 | vmware_guest_os_type = "arm-other-64" 9 | boot_command = ["linux netdevice=eth0 netsetup=dhcp install=cd:/ lang=en_US autoyast=http://{{ .HTTPIP }}:{{ .HTTPPort }}/sles/15-autoinst.xml textmode=1"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/sles/sles-15-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "suse" 2 | os_version = "15.6" 3 | os_arch = "x86_64" 4 | iso_url = "https://updates.suse.com/SUSE/Products/SLE-Product-SLES/15-SP6/x86_64/iso/SLE-15-SP6-Online-x86_64-GM-Media1.iso" 5 | iso_checksum = "c816a46b76de157e49d6b931284b9827b3781260f0cdd2a365296246313dd7ce" 6 | parallels_guest_os_type = "suse" 7 | vbox_guest_os_type = "SUSE_LE_64" 8 | vmware_guest_os_type = "sles15-64" 9 | boot_command = ["linux netdevice=eth0 netsetup=dhcp install=cd:/ lang=en_US autoyast=http://{{ .HTTPIP }}:{{ .HTTPPort }}/sles/15-autoinst.xml textmode=1"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/ubuntu/ubuntu-22.04-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "ubuntu" 2 | os_version = "22.04" 3 | os_arch = "aarch64" 4 | iso_url = "/Users/hj/Documents/vagrant_cloud/iso/ubuntu-22.04.5-live-server-arm64.iso" 5 | iso_checksum = "eafec62cfe760c30cac43f446463e628fada468c2de2f14e0e2bc27295187505" 6 | parallels_guest_os_type = "ubuntu" 7 | vbox_guest_os_type = "Ubuntu_arm64" 8 | vmware_guest_os_type = "arm-ubuntu-64" 9 | boot_command = ["e autoinstall ds=nocloud-net\\;s=http://{{.HTTPIP}}:{{.HTTPPort}}/ubuntu/"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/ubuntu/ubuntu-22.04-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "ubuntu" 2 | os_version = "22.04" 3 | os_arch = "x86_64" 4 | iso_url = "https://releases.ubuntu.com/jammy/ubuntu-22.04.5-live-server-amd64.iso" 5 | iso_checksum = "file:https://releases.ubuntu.com/jammy/SHA256SUMS" 6 | parallels_guest_os_type = "ubuntu" 7 | vbox_guest_os_type = "Ubuntu_64" 8 | vmware_guest_os_type = "ubuntu-64" 9 | boot_command = ["cset gfxpayload=keeplinux /casper/vmlinuz quiet autoinstall ds=nocloud-net\\;s=http://{{.HTTPIP}}:{{.HTTPPort}}/ubuntu/ ---initrd /casper/initrdboot"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/ubuntu/ubuntu-24.04-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "ubuntu" 2 | os_version = "24.04" 3 | os_arch = "aarch64" 4 | iso_url = "https://cdimage.ubuntu.com/releases/noble/release/ubuntu-24.04.2-live-server-arm64.iso" 5 | iso_checksum = "file:https://cdimage.ubuntu.com/releases/noble/release/SHA256SUMS" 6 | parallels_guest_os_type = "ubuntu" 7 | vbox_guest_os_type = "Ubuntu24_LTS_arm64" 8 | vmware_guest_os_type = "arm-ubuntu-64" 9 | boot_command = ["e autoinstall ds=nocloud-net\\;s=http://{{.HTTPIP}}:{{.HTTPPort}}/ubuntu/"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/ubuntu/ubuntu-24.04-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "ubuntu" 2 | os_version = "24.04" 3 | os_arch = "x86_64" 4 | iso_url = "https://releases.ubuntu.com/noble/ubuntu-24.04.2-live-server-amd64.iso" 5 | iso_checksum = "file:https://releases.ubuntu.com/noble/SHA256SUMS" 6 | parallels_guest_os_type = "ubuntu" 7 | vbox_guest_os_type = "Ubuntu_64" 8 | vmware_guest_os_type = "ubuntu-64" 9 | boot_command = ["e autoinstall ds=nocloud-net\\;s=http://{{.HTTPIP}}:{{.HTTPPort}}/ubuntu/"] 10 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/windows/windows-10-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "windows" 2 | os_version = "10" 3 | os_arch = "x86_64" 4 | is_windows = true 5 | # Download url's found at https://www.microsoft.com/en-us/evalcenter/download-windows-10-enterprise 6 | iso_url = "https://software-static.download.prss.microsoft.com/dbazure/988969d5-f34g-4e03-ac9d-1f9786c66750/19045.2006.220908-0225.22h2_release_svc_refresh_CLIENTENTERPRISEEVAL_OEMRET_x64FRE_en-us.iso" 7 | iso_checksum = "EF7312733A9F5D7D51CFA04AC497671995674CA5E1058D5164D6028F0938D668" 8 | parallels_guest_os_type = "win-10" 9 | vbox_guest_os_type = "Windows10_64" 10 | vmware_guest_os_type = "windows9-64" 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/windows/windows-11-aarch64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "windows" 2 | os_version = "11" 3 | os_arch = "aarch64" 4 | is_windows = true 5 | hyperv_generation = 2 6 | # Download url's found at https://www.microsoft.com/en-us/evalcenter/download-windows-11-enterprise 7 | iso_url = "https://software-static.download.prss.microsoft.com/dbazure/888969d5-f34g-4e03-ac9d-1f9786c66749/26100.1.240331-1435.ge_release_CLIENTENTERPRISEEVAL_OEMRET_A64FRE_en-us.iso" 8 | iso_checksum = "DAD633276073F14F3E0373EF7E787569E216D54942CE522B39451C8F2D38AD43" 9 | sources_enabled = ["source.parallels-iso.vm", "source.qemu.vm", "source.vmware-iso.vm"] 10 | parallels_guest_os_type = "win-11" 11 | vbox_guest_os_type = "Windows11_arm64" 12 | vmware_guest_os_type = "arm-windows11-64" 13 | vmware_firmware = "efi-secure" 14 | parallels_boot_wait = "180s" 15 | boot_command = ["", ] 16 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/windows/windows-11-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "windows" 2 | os_version = "11" 3 | os_arch = "x86_64" 4 | is_windows = true 5 | # Download url's found at https://www.microsoft.com/en-us/evalcenter/download-windows-11-enterprise 6 | iso_url = "https://software-static.download.prss.microsoft.com/dbazure/888969d5-f34g-4e03-ac9d-1f9786c66749/26100.1742.240906-0331.ge_release_svc_refresh_CLIENTENTERPRISEEVAL_OEMRET_x64FRE_en-us.iso" 7 | iso_checksum = "755A90D43E826A74B9E1932A34788B898E028272439B777E5593DEE8D53622AE" 8 | parallels_guest_os_type = "win-11" 9 | vbox_guest_os_type = "Windows11_64" 10 | vmware_guest_os_type = "windows9srv-64" 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/windows/windows-2016-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "windows" 2 | os_version = "2016" 3 | os_arch = "x86_64" 4 | is_windows = true 5 | # Download url's found at https://www.microsoft.com/en-us/evalcenter/download-windows-server-2016 6 | iso_url = "https://software-static.download.prss.microsoft.com/pr/download/Windows_Server_2016_Datacenter_EVAL_en-us_14393_refresh.ISO" 7 | iso_checksum = "1ce702a578a3cb1ac3d14873980838590f06d5b7101c5daaccbac9d73f1fb50f" 8 | parallels_guest_os_type = "win-2016" 9 | vbox_guest_os_type = "Windows2016_64" 10 | vmware_guest_os_type = "windows9srv-64" 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/windows/windows-2019-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "windows" 2 | os_version = "2019" 3 | os_arch = "x86_64" 4 | is_windows = true 5 | # Download url's found at https://www.microsoft.com/en-us/evalcenter/download-windows-server-2019 6 | iso_url = "https://software-static.download.prss.microsoft.com/dbazure/988969d5-f34g-4e03-ac9d-1f9786c66749/17763.3650.221105-1748.rs5_release_svc_refresh_SERVER_EVAL_x64FRE_en-us.iso" 7 | iso_checksum = "6dae072e7f78f4ccab74a45341de0d6e2d45c39be25f1f5920a2ab4f51d7bcbb" 8 | parallels_guest_os_type = "win-2019" 9 | vbox_guest_os_type = "Windows2019_64" 10 | vmware_guest_os_type = "windows9srv-64" 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/windows/windows-2022-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "windows" 2 | os_version = "2022" 3 | os_arch = "x86_64" 4 | is_windows = true 5 | # Download url's found at https://www.microsoft.com/en-us/evalcenter/download-windows-server-2022 6 | iso_url = "https://software-static.download.prss.microsoft.com/sg/download/888969d5-f34g-4e03-ac9d-1f9786c66749/SERVER_EVAL_x64FRE_en-us.iso" 7 | iso_checksum = "3e4fa6d8507b554856fc9ca6079cc402df11a8b79344871669f0251535255325" 8 | parallels_guest_os_type = "win-2022" 9 | vbox_guest_os_type = "Windows2022_64" 10 | vmware_guest_os_type = "windows9srv-64" 11 | -------------------------------------------------------------------------------- /Bento/bento/os_pkrvars/windows/windows-2025-x86_64.pkrvars.hcl: -------------------------------------------------------------------------------- 1 | os_name = "windows" 2 | os_version = "2025" 3 | os_arch = "x86_64" 4 | is_windows = true 5 | # Download url's found at https://www.microsoft.com/en-us/evalcenter/download-windows-server-2025 6 | iso_url = "https://software-static.download.prss.microsoft.com/dbazure/888969d5-f34g-4e03-ac9d-1f9786c66749/26100.1742.240906-0331.ge_release_svc_refresh_SERVER_EVAL_x64FRE_en-us.iso" 7 | iso_checksum = "d0ef4502e350e3c6c53c15b1b3020d38a5ded011bf04998e950720ac8579b23d" 8 | parallels_guest_os_type = "win-2022" 9 | vbox_guest_os_type = "Windows2022_64" 10 | vmware_guest_os_type = "windows9srv-64" 11 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/http/amazon/meta-data: -------------------------------------------------------------------------------- 1 | local-hostname: vagrant 2 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/http/amazon/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | #vim:syntax=yaml 3 | users: 4 | - name: vagrant 5 | lock_passwd: false 6 | passwd: $6$TbOu26d1hYO4EC/D$MymO7cTo/tID7tkH8TtAGyAdaK9nHwGQfDD9KwQmql3xP1BPPc67YCmoR8UO3Dw5jacOx3GvnBLqvAmHLHC4H0 7 | sudo: ALL=(ALL) NOPASSWD:ALL 8 | 9 | # Enable password authentication for SSH 10 | write_files: 11 | - path: /etc/cloud/cloud.cfg.d/90_enable_ssh_pwauth.cfg 12 | content: | 13 | runcmd: 14 | - sed -i "s/^PasswordAuthentication.*/PasswordAuthentication yes/" /etc/ssh/sshd_config 15 | - systemctl restart sshd 16 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/http/ubuntu/meta-data: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Bento/bento/packer_templates/http/ubuntu/meta-data -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/_common/metadata.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | mkdir -p /etc; 4 | cp /tmp/bento-metadata.json /etc/bento-metadata.json; 5 | chmod 0444 /etc/bento-metadata.json; 6 | rm -f /tmp/bento-metadata.json; 7 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/_common/motd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | bento=' 4 | This system is built by the Bento project by Chef Software 5 | More information can be found at https://github.com/chef/bento 6 | 7 | Use of this system is acceptance of the OS vendor EULA and License Agreements.' 8 | 9 | if [ -d /etc/update-motd.d ]; then 10 | MOTD_CONFIG='/etc/update-motd.d/99-bento' 11 | 12 | cat >> "$MOTD_CONFIG" <> /etc/motd 25 | chmod 0755 /etc/motd 26 | fi 27 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/_common/parallels_post_cleanup_debian_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | case "$PACKER_BUILDER_TYPE" in 4 | parallels-iso|parallels-pvm) 5 | # This runs in a special script after cleanup_{ubuntu|debian}.sh, which cleans up the kernel headers 6 | # in general (we want to keep them for Parallels Desktop only). 7 | 8 | echo "Installing linux-kernel-headers for the current kernel version, to allow re-compilation of Parallels Tools upon boot" 9 | 10 | if [ -f "/usr/bin/apt-get" ]; then 11 | apt-get install -y linux-headers-"$(uname -r)" 12 | fi 13 | ;; 14 | esac 15 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/_common/sshd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | SSHD_CONFIG="/etc/ssh/sshd_config" 4 | 5 | # ensure that there is a trailing newline before attempting to concatenate 6 | # shellcheck disable=SC1003 7 | sed -i -e '$a\' "$SSHD_CONFIG" 8 | 9 | USEDNS="UseDNS no" 10 | if grep -q -E "^[[:space:]]*UseDNS" "$SSHD_CONFIG"; then 11 | sed -i "s/^\s*UseDNS.*/${USEDNS}/" "$SSHD_CONFIG" 12 | else 13 | echo "$USEDNS" >>"$SSHD_CONFIG" 14 | fi 15 | 16 | GSSAPI="GSSAPIAuthentication no" 17 | if grep -q -E "^[[:space:]]*GSSAPIAuthentication" "$SSHD_CONFIG"; then 18 | sed -i "s/^\s*GSSAPIAuthentication.*/${GSSAPI}/" "$SSHD_CONFIG" 19 | else 20 | echo "$GSSAPI" >>"$SSHD_CONFIG" 21 | fi 22 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/custom_pre_hoon.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # add google dns server to avoid dns query error 4 | cat </etc/resolv.conf 5 | nameserver 8.8.8.8 6 | EOF 7 | 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/debian/hyperv_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | case "$PACKER_BUILDER_TYPE" in 4 | hyperv-iso) 5 | echo "installing packaging for hyper-v" 6 | apt-get -y install linux-image-virtual linux-tools-virtual linux-cloud-tools-virtual; 7 | esac 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/debian/networking_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # Disable Predictable Network Interface names and use eth0 4 | sed -i 's/en[[:alnum:]]*/eth0/g' /etc/network/interfaces; 5 | sed -i 's/GRUB_CMDLINE_LINUX="\(.*\)"/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 \1"/g' /etc/default/grub; 6 | update-grub; 7 | 8 | # Adding a 2 sec delay to the interface up, to make the dhclient happy 9 | echo "pre-up sleep 2" >> /etc/network/interfaces 10 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/debian/sudoers_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # Only add the secure path line if it is not already present 4 | grep -q 'secure_path' /etc/sudoers \ 5 | || sed -i -e '/Defaults\s\+env_reset/a Defaults\tsecure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"' /etc/sudoers; 6 | 7 | # Set up password-less sudo for the vagrant user 8 | echo 'vagrant ALL=(ALL) NOPASSWD:ALL' >/etc/sudoers.d/99_vagrant; 9 | chmod 440 /etc/sudoers.d/99_vagrant; 10 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/debian/systemd_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=751636 4 | apt-get install libpam-systemd 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/debian/update_debian.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | arch="$(uname -r | sed 's/^.*[0-9]\{1,\}\.[0-9]\{1,\}\.[0-9]\{1,\}\(-[0-9]\{1,2\}\)-//')" 4 | 5 | # Disable systemd apt timers/services 6 | systemctl stop apt-daily.timer; 7 | systemctl stop apt-daily-upgrade.timer; 8 | systemctl disable apt-daily.timer; 9 | systemctl disable apt-daily-upgrade.timer; 10 | systemctl mask apt-daily.service; 11 | systemctl mask apt-daily-upgrade.service; 12 | systemctl daemon-reload; 13 | 14 | # Disable periodic activities of apt 15 | cat </etc/apt/apt.conf.d/10periodic; 16 | APT::Periodic::Enable "0"; 17 | APT::Periodic::Update-Package-Lists "0"; 18 | APT::Periodic::Download-Upgradeable-Packages "0"; 19 | APT::Periodic::AutocleanInterval "0"; 20 | APT::Periodic::Unattended-Upgrade "0"; 21 | EOF 22 | 23 | apt-get update; 24 | 25 | apt-get -y upgrade linux-image-"$arch"; 26 | apt-get -y install linux-headers-"$(uname -r)"; 27 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/fedora/build-tools_fedora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | # Installing build tools here because Fedora 22+ will not do so during kickstart 3 | dnf -y install kernel-headers kernel-devel-"$(uname -r)" elfutils-libelf-devel gcc make perl 4 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/fedora/install-supporting-packages_fedora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | # Chef with Fedora >= 30 requires libxcrypt-compat to be installed 3 | dnf -y install libxcrypt-compat 4 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/fedora/networking_fedora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | if [[ "$PACKER_BUILDER_TYPE" == virtualbox* ]]; then 4 | 5 | ## https://access.redhat.com/site/solutions/58625 (subscription required) 6 | # add 'single-request-reopen' so it is included when /etc/resolv.conf is generated 7 | cat >> /etc/NetworkManager/dispatcher.d/fix-slow-dns <> /etc/resolv.conf 10 | EOF 11 | chmod +x /etc/NetworkManager/dispatcher.d/fix-slow-dns 12 | systemctl restart NetworkManager.service 13 | echo 'Slow DNS fix applied (single-request-reopen)' 14 | else 15 | echo 'Slow DNS fix not required for this platform, skipping' 16 | fi 17 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/fedora/real-tmp_fedora.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | echo "Don't use the tmpfs based /tmp dir that is limited to 50% of RAM" 3 | systemctl mask tmp.mount 4 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/fedora/update_dnf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # update all packages 4 | dnf -y upgrade 5 | 6 | reboot; 7 | sleep 60; 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/freebsd/cleanup_freebsd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # cleanup orphaned packages and cache 4 | pkg autoremove --yes 5 | pkg clean --yes --all 6 | rm -f /var/db/pkg/repo-FreeBSD.sqlite 7 | 8 | # Purge files we don't need any longer 9 | rm -rf /var/db/freebsd-update/files; 10 | mkdir -p /var/db/freebsd-update/files; 11 | rm -f /var/db/freebsd-update/*-rollback; 12 | rm -rf /var/db/freebsd-update/install.*; 13 | rm -rf /boot/kernel.old; 14 | rm -f /boot/kernel*/*.symbols; 15 | rm -f /*.core; 16 | rm -rf /var/cache/pkg; 17 | rm -f /usr/home/vagrant/*.iso; 18 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/freebsd/minimize_freebsd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | case "$PACKER_BUILDER_TYPE" in 4 | qemu) exit 0 ;; 5 | esac 6 | 7 | ZROOT="zroot/ROOT/default" 8 | 9 | zfs set compression=off $ZROOT; 10 | dd if=/dev/zero of=/EMPTY bs=1m || echo "dd(1) exits after taking over all the space" 11 | sync 12 | rm -f /EMPTY; 13 | # Block until the empty file has been removed, otherwise, Packer 14 | # will try to kill the box while the disk is still full and that's bad 15 | sync; 16 | zfs set compression=lz4 $ZROOT; 17 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/freebsd/sudoers_freebsd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | pkg install -y sudo; 4 | echo "vagrant ALL=(ALL) NOPASSWD: ALL" >>/usr/local/etc/sudoers; 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/freebsd/update_freebsd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | freebsd_update="/usr/sbin/freebsd-update --not-running-from-cron"; 4 | 5 | # Update FreeBSD 6 | # NOTE: the install action fails if there are no updates so || true it 7 | env PAGER=/bin/cat "$freebsd_update" fetch || true; 8 | env PAGER=/bin/cat "$freebsd_update" install || true; 9 | 10 | # shellcheck disable=SC2154 11 | if [ "$pkg_branch" != "quarterly" ]; then 12 | sed -i.bak -e "s,pkg+http://pkg.FreeBSD.org/\${ABI}/quarterly,pkg+http://pkg.FreeBSD.org/\${ABI}/${pkg_branch}," /etc/pkg/FreeBSD.conf 13 | rm -f /etc/pkg/FreeBSD.conf.bak 14 | fi 15 | 16 | env ASSUME_ALWAYS_YES=true pkg update; 17 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/macos/disable_auto_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | sleep 10 4 | 5 | echo 'Disable automatic updates' 6 | # TOGGLE ALL OFF (auto checking is on to show other prefs are toggled off) 7 | # before setting values quit system preferences & stop software update - stops defaults cache breaking 'AutomaticCheckEnabled' 8 | osascript -e "tell application \"System Preferences\" to quit" 9 | softwareupdate --schedule off 10 | defaults write /Library/Preferences/com.apple.SoftwareUpdate.plist AutomaticCheckEnabled -bool No 11 | defaults write /Library/Preferences/com.apple.SoftwareUpdate.plist AutomaticDownload -bool NO 12 | defaults write /Library/Preferences/com.apple.SoftwareUpdate.plist ConfigDataInstall -bool NO 13 | defaults write /Library/Preferences/com.apple.SoftwareUpdate.plist CriticalUpdateInstall -bool NO 14 | defaults write /Library/Preferences/com.apple.commerce.plist AutoUpdateRestartRequired -bool NO 15 | defaults write /Library/Preferences/com.apple.commerce.plist AutoUpdate -bool NO 16 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/macos/parallels-tools.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -e .PACKER_BUILDER_TYPE ] || echo "$PACKER_BUILDER_TYPE" | grep -q '^parallels'; then 4 | echo "Installing Parallels Tools..." 5 | installer -pkg /Volumes/Parallels\ Tools/Install.app/Contents/Resources/Install.mpkg -target / 6 | 7 | # This usually works but gives a failed to eject error 8 | hdiutil detach /Volumes/Parallels\ Tools || echo "exit code $? is suppressed"; 9 | 10 | # Reboot is needed for tools install 11 | reboot 12 | fi 13 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/macos/system-update-complete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -euo 3 | IFS="$(printf ' \n\t')" 4 | 5 | # wait for the update process to complete 6 | if (grep "Action.*restart" ~/Library/Logs/packer_softwareupdate.log); then 7 | tail -f /var/log/install.log | sed '/.*Setup Assistant.*ISAP.*Done.*/ q' | grep ISAP || true 8 | sleep 180 9 | fi 10 | 11 | echo "Software update completed" 12 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/macos/system-update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Downloading and installing system updates..." 4 | softwareupdate -i -r -R 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/rhel/update_dnf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # update all packages 4 | dnf -y upgrade --skip-broken 5 | 6 | reboot; 7 | sleep 60; 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/solaris/minimize_solaris.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -ux 2 | 3 | dd if=/dev/zero of=/EMPTY bs=1048576 4 | rm -f /EMPTY 5 | # Block until the empty file has been removed, otherwise, Packer 6 | # will try to kill the box while the disk is still full and that's bad 7 | sync 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/solaris/update_solaris.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | pkg update pkg:/package/pkg || true 4 | pkg update --accept || true 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/solaris/vmtools_solaris.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # Add pkgadd auto-answer file 4 | sudo mkdir -p /tmp 5 | sudo chmod 777 /tmp 6 | { 7 | echo "mail=" 8 | echo "instance=overwrite" 9 | echo "partial=nocheck" 10 | echo "runlevel=nocheck" 11 | echo "idepend=nocheck" 12 | echo "rdepend=nocheck" 13 | echo "space=nocheck" 14 | echo "setuid=nocheck" 15 | echo "conflict=nocheck" 16 | echo "action=nocheck" 17 | echo "basedir=default" 18 | } > /tmp/nocheck 19 | 20 | if [ -f /home/vagrant/.vbox_version ]; then 21 | mkdir /tmp/vbox 22 | ls 23 | echo "all" | sudo -i pkgadd -a /tmp/nocheck -d /media/VBOXADDITIONS_*/VBoxSolarisAdditions.pkg 24 | fi 25 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/suse/remove-dvd-source_suse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | zypper removerepo "$(zypper repos | grep 'SLES' | awk '{ print $3 }' | grep "^SLES")"; 4 | exit 0 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/suse/repositories_suse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | version=$(grep VERSION= /etc/os-release | cut -f2 -d\" | cut -f1 -d\ ) 4 | 5 | zypper removerepo "openSUSE-${version}-0" 6 | 7 | zypper ar http://download.opensuse.org/distribution/leap/"${version}"/repo/oss/ openSUSE-Leap-"${version}"-Oss 8 | zypper ar http://download.opensuse.org/distribution/leap/"${version}"/repo/non-oss/ openSUSE-Leap-"${version}"-Non-Oss 9 | zypper ar http://download.opensuse.org/update/leap/"${version}"/oss/ openSUSE-Leap-"${version}"-Update 10 | zypper ar http://download.opensuse.org/update/leap/"${version}"/non-oss/ openSUSE-Leap-"${version}"-Update-Non-Oss 11 | 12 | zypper refresh 13 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/suse/sudoers_suse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # update sudoers - can't do this in autoinst.xml 4 | printf "\nupdate sudoers ..." 5 | printf "vagrant ALL=(ALL) NOPASSWD: ALL\n" >> /etc/sudoers 6 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/suse/unsupported-modules_suse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Enable unsupported kernel modules, so vboxguest can install 4 | echo 'allow_unsupported_modules 1' > /etc/modprobe.d/10-unsupported-modules.conf 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/suse/update_suse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | echo "updating all packages" 4 | zypper update -y 5 | 6 | reboot 7 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/suse/vagrant_group_suse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # User 'vagrant' belogs to the 'users' group by default so we need to 4 | # create a new group 'vagrant' and put our user there. 5 | 6 | groupadd -f vagrant 7 | gpasswd -a vagrant vagrant 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/suse/zypper-locks_suse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # remove zypper locks on removed packages to avoid later dependency problems 4 | zypper --non-interactive rl \* 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/ubuntu/hyperv_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | case "$PACKER_BUILDER_TYPE" in 4 | hyperv-iso) 5 | echo "installing packaging for hyper-v" 6 | apt-get -y install linux-image-virtual linux-tools-virtual linux-cloud-tools-virtual; 7 | esac 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/ubuntu/networking_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | ubuntu_version="$(lsb_release -r | awk '{print $2}')"; 4 | major_version="$(echo "$ubuntu_version" | awk -F. '{print $1}')"; 5 | 6 | if [ "$major_version" -ge "18" ]; then 7 | echo "Create netplan config for eth0" 8 | cat </etc/netplan/01-netcfg.yaml; 9 | network: 10 | version: 2 11 | ethernets: 12 | eth0: 13 | dhcp4: true 14 | EOF 15 | else 16 | # Adding a 2 sec delay to the interface up, to make the dhclient happy 17 | echo "pre-up sleep 2" >> /etc/network/interfaces; 18 | fi 19 | 20 | # Disable Predictable Network Interface names and use eth0 21 | [ -e /etc/network/interfaces ] && sed -i 's/en[[:alnum:]]*/eth0/g' /etc/network/interfaces; 22 | sed -i 's/GRUB_CMDLINE_LINUX="\(.*\)"/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 \1"/g' /etc/default/grub; 23 | update-grub; 24 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/ubuntu/sudoers_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | sed -i -e '/Defaults\s\+env_reset/a Defaults\texempt_group=sudo' /etc/sudoers; 4 | 5 | # Set up password-less sudo for the vagrant user 6 | echo 'vagrant ALL=(ALL) NOPASSWD:ALL' >/etc/sudoers.d/99_vagrant; 7 | chmod 440 /etc/sudoers.d/99_vagrant; 8 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/ubuntu/systemd_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=751636 4 | apt-get install libpam-systemd 5 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/windows/disable-screensaver.ps1: -------------------------------------------------------------------------------- 1 | Set-StrictMode -Version Latest 2 | $ProgressPreference = 'SilentlyContinue' 3 | $ErrorActionPreference = 'Stop' 4 | 5 | trap { 6 | Write-Host 7 | Write-Host "ERROR: $_" 8 | ($_.ScriptStackTrace -split '\r?\n') -replace '^(.*)$','ERROR: $1' | Write-Host 9 | ($_.Exception.ToString() -split '\r?\n') -replace '^(.*)$','ERROR EXCEPTION: $1' | Write-Host 10 | Write-Host 11 | Write-Host 'Sleeping for 60m to give you time to look around the virtual machine before self-destruction...' 12 | Start-Sleep -Seconds (60*60) 13 | Exit 1 14 | } 15 | 16 | Write-Host 'Disable the screensaver' 17 | Set-ItemProperty -Path 'HKCU:\Control Panel\Desktop' -Name ScreenSaveActive -Type DWORD -Value 0 18 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/windows/disable-system-restore.ps1: -------------------------------------------------------------------------------- 1 | Set-StrictMode -Version Latest 2 | $ProgressPreference = 'SilentlyContinue' 3 | $ErrorActionPreference = 'Stop' 4 | 5 | trap { 6 | Write-Host 7 | Write-Host "ERROR: $_" 8 | ($_.ScriptStackTrace -split '\r?\n') -replace '^(.*)$','ERROR: $1' | Write-Host 9 | ($_.Exception.ToString() -split '\r?\n') -replace '^(.*)$','ERROR EXCEPTION: $1' | Write-Host 10 | Write-Host 11 | Write-Host 'Sleeping for 60m to give you time to look around the virtual machine before self-destruction...' 12 | Start-Sleep -Seconds (60*60) 13 | Exit 1 14 | } 15 | 16 | $osInfo = Get-CimInstance -ClassName Win32_OperatingSystem 17 | if ($osInfo.ProductType -eq 1) 18 | { 19 | # system-restore isn't on servers 20 | Write-Host 'Disabling System Restore' 21 | Disable-ComputerRestore -Drive "C:\" 22 | } 23 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/scripts/windows/enable-file-sharing.ps1: -------------------------------------------------------------------------------- 1 | Set-StrictMode -Version Latest 2 | $ProgressPreference = 'SilentlyContinue' 3 | $ErrorActionPreference = 'Stop' 4 | 5 | trap { 6 | Write-Host 7 | Write-Host "ERROR: $_" 8 | ($_.ScriptStackTrace -split '\r?\n') -replace '^(.*)$','ERROR: $1' | Write-Host 9 | ($_.Exception.ToString() -split '\r?\n') -replace '^(.*)$','ERROR EXCEPTION: $1' | Write-Host 10 | Write-Host 11 | Write-Host 'Sleeping for 60m to give you time to look around the virtual machine before self-destruction...' 12 | Start-Sleep -Seconds (60*60) 13 | Exit 1 14 | } 15 | 16 | Write-Host 'Enable filesharing' 17 | Enable-NetFirewallRule -DisplayGroup "File and Printer Sharing" 18 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/vagrantfile-freebsd.template: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure(2) do |config| 5 | config.ssh.shell = "sh" 6 | 7 | # Disable the base shared folder, Guest Tools supporting this feature are 8 | # unavailable for all providers. 9 | config.vm.synced_folder ".", "/vagrant", disabled: true 10 | 11 | config.vm.provider :parallels do |prl, override| 12 | # Guest Tools are unavailable. 13 | prl.check_guest_tools = false 14 | prl.functional_psf = false 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /Bento/bento/packer_templates/vagrantfile-windows.template: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure(2) do |config| 5 | config.vm.guest = :windows 6 | config.vm.communicator = "winrm" 7 | config.vm.boot_timeout = 300 8 | config.vm.network :forwarded_port, guest: 3389, host: 3389, id: 'rdp', auto_correct: true 9 | config.vm.network :forwarded_port, guest: 5985, host: 5985, id: "winrm", auto_correct: true 10 | 11 | config.vm.provider "virtualbox" do |vb| 12 | vb.cpus = 2 13 | vb.memory = 4096 14 | end 15 | 16 | config.vm.provider 'hyperv' do |hv| 17 | hv.ip_address_timeout = 240 18 | hv.memory = 4096 19 | hv.cpus = 2 20 | hv.enable_virtualization_extensions = true 21 | end 22 | 23 | config.vm.provider :libvirt do |domain| 24 | domain.memory = 4096 25 | domain.cpus = 2 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /Cloud-Native/Samples/1.cp-istioctl-v1.16.1.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cd ~ 4 | curl -L https://istio.io/downloadIstio | ISTIO_VERSION=1.16.1 TARGET_ARCH=x86_64 sh - 5 | cd istio-1.16.1 6 | cp bin/istioctl /usr/local/bin 7 | 8 | -------------------------------------------------------------------------------- /Cloud-Native/Samples/2.ins-istio-demo.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | istioctl install --set profile=demo -y 4 | kubectl create ns online-boutique 5 | kubectl label namespace online-boutique istio-injection=enabled 6 | 7 | -------------------------------------------------------------------------------- /Cloud-Native/Samples/3.ins-online-boutique-app.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mkdir -p ~/istio-1.16.1/MSA 4 | cd ~/istio-1.16.1/MSA 5 | git clone https://github.com/GoogleCloudPlatform/microservices-demo.git online-boutique 6 | kubectl apply -f ./release/kubernetes-manifests.yaml -n online-boutique 7 | 8 | -------------------------------------------------------------------------------- /Cloud-Native/Samples/deploy-chk-info-w-LB.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-chk-info 5 | labels: 6 | app: deploy-chk-info 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-chk-info 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-chk-info 16 | spec: 17 | containers: 18 | - name: chk-info 19 | image: sysnet4admin/chk-info 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: lb-chk-info 25 | spec: 26 | selector: 27 | app: deploy-chk-info 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: LoadBalancer 33 | 34 | -------------------------------------------------------------------------------- /Cloud-Native/Samples/deploy-nginx-w-LB.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-nginx 5 | labels: 6 | app: deploy-nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: lb-nginx 25 | spec: 26 | selector: 27 | app: deploy-nginx 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: LoadBalancer 33 | 34 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/audit-trail(chk-log)/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.26.0-alpine-slim 2 | 3 | LABEL Name=audit-trail(chk-log) Version=1.26.0 4 | 5 | # default alias from /bin/bash to /bin/ash 6 | RUN apk add --no-cache bash \ 7 | && ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime 8 | 9 | COPY nginx.conf /etc/nginx/nginx.conf 10 | RUN ln -sf /dev/stdout /var/log/nginx/access.log \ 11 | && ln -sf /dev/stderr /var/log/nginx/error.log \ 12 | && mkdir -p /audit 13 | 14 | EXPOSE 80 15 | 16 | STOPSIGNAL SIGTERM 17 | 18 | CMD ["nginx", "-g", "daemon off;"] 19 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/audit-trail(chk-log)/nginx.conf: -------------------------------------------------------------------------------- 1 | #user nobody; 2 | worker_processes 1; 3 | #error_log logs/error.log; 4 | #error_log logs/error.log notice; 5 | #error_log logs/error.log info; 6 | #pid logs/nginx.pid; 7 | user root; 8 | events { 9 | worker_connections 1024; 10 | } 11 | http { 12 | log_format audit '$time_local $server_addr $request_method'; 13 | server { 14 | listen 80; 15 | server_name localhost; 16 | access_log /audit/audit_$hostname.log audit; 17 | location / { 18 | root /tmp; 19 | default_type text/html; 20 | return 200 'pod_n: $hostname | ip_dest: $server_addr\n'; 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/chk-info/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.26.0-alpine-slim 2 | 3 | LABEL Name=chk-info Version=1.26.0 4 | 5 | RUN apk add --no-cache bash \ 6 | && rm -rf /etc/nginx/conf.d/default.conf 7 | COPY app.conf /etc/nginx/conf.d/ 8 | COPY index.html /usr/share/nginx/html 9 | 10 | COPY cert.crt /etc/nginx/conf.d/cert.crt 11 | COPY cert.key /etc/nginx/conf.d/cert.key 12 | 13 | CMD ["nginx", "-g", "daemon off;"] 14 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/chk-info/app.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80 default_server; 3 | server_name app_server; 4 | 5 | root /usr/share/nginx/html; 6 | error_log /var/log/nginx/app-server-error.log notice; 7 | index index.html; 8 | expires -1; 9 | 10 | sub_filter_once off; 11 | sub_filter 'server_hostname' '$hostname'; 12 | sub_filter 'server_address' '$server_addr:$server_port'; 13 | sub_filter 'server_url' '$request_uri'; 14 | sub_filter 'remote_addr' '$remote_addr:$remote_port'; 15 | sub_filter 'server_date' '$time_local'; 16 | sub_filter 'client_browser' '$http_user_agent'; 17 | sub_filter 'request_id' '$request_id'; 18 | sub_filter 'nginx_version' '$nginx_version'; 19 | sub_filter 'document_root' '$document_root'; 20 | sub_filter 'proxied_for_ip' '$http_x_forwarded_for'; 21 | } 22 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/dashboard/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14.14.0-alpine as builder 2 | COPY . . 3 | RUN npm ci && npm run build 4 | 5 | FROM nginx:1.26.0-alpine-slim 6 | COPY --from=builder /public /public 7 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/echo-hname(chk-hn)/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.26.0-alpine-slim 2 | 3 | LABEL Name=echo-hname(chk-hn) Version=1.26.0 4 | 5 | RUN apk add --no-cache bash 6 | 7 | COPY nginx.conf /etc/nginx/nginx.conf 8 | COPY cert.crt /etc/nginx/conf.d/cert.crt 9 | COPY cert.key /etc/nginx/conf.d/cert.key 10 | 11 | CMD ["nginx", "-g", "daemon off;"] 12 | 13 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/echo-ip(chk-ip)/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.26.0-alpine-slim 2 | 3 | LABEL Name=echo-ip(chk-ip) Version=1.26.0 4 | 5 | RUN apk add --no-cache bash 6 | 7 | COPY nginx.conf /etc/nginx/nginx.conf 8 | COPY cert.crt /etc/nginx/conf.d/cert.crt 9 | COPY cert.key /etc/nginx/conf.d/cert.key 10 | 11 | CMD ["nginx", "-g", "daemon off;"] 12 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/healthz-nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.26.0-alpine-slim 2 | 3 | LABEL Name=healthz-nginx Version=1.26.0 4 | 5 | RUN apk add --no-cache bash \ 6 | && rm -rf /etc/nginx/conf.d/default.conf 7 | 8 | COPY app.conf /etc/nginx/conf.d/ 9 | COPY healthz /usr/share/nginx/html 10 | 11 | COPY cert.crt /etc/nginx/conf.d/cert.crt 12 | COPY cert.key /etc/nginx/conf.d/cert.key 13 | 14 | CMD ["nginx", "-g", "daemon off;"] 15 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/healthz-nginx/app.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80 default_server; 3 | server_name app_server; 4 | 5 | location /healthz { 6 | add_header purpose health-check; 7 | } 8 | 9 | root /usr/share/nginx/html; 10 | error_log /var/log/nginx/app-server-error.log notice; 11 | index healthz index.html; 12 | expires -1; 13 | 14 | sub_filter_once off; 15 | sub_filter 'server_hostname' '$hostname'; 16 | sub_filter 'server_address' '$server_addr:$server_port'; 17 | sub_filter 'server_url' '$request_uri'; 18 | sub_filter 'remote_addr' '$remote_addr:$remote_port'; 19 | sub_filter 'server_date' '$time_local'; 20 | sub_filter 'client_browser' '$http_user_agent'; 21 | sub_filter 'request_id' '$request_id'; 22 | sub_filter 'nginx_version' '$nginx_version'; 23 | sub_filter 'document_root' '$document_root'; 24 | sub_filter 'proxied_for_ip' '$http_x_forwarded_for'; 25 | } 26 | 27 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/healthz-nginx/healthz: -------------------------------------------------------------------------------- 1 |
2 | System Status 3 |

It is looking good. 4 | HTTP ready to service.

5 |
6 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/hello/v1/.app.conf.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Docker/Dockerfiles/hello/v1/.app.conf.swp -------------------------------------------------------------------------------- /Docker/Dockerfiles/hello/v1/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.24.0-alpine-slim 2 | 3 | LABEL Name=hello Version=v1 4 | 5 | RUN apk add --no-cache bash \ 6 | && rm -rf /etc/nginx/conf.d/default.conf 7 | 8 | COPY app.conf /etc/nginx/conf.d/ 9 | COPY index.html /usr/share/nginx/html 10 | 11 | COPY cert.crt /etc/nginx/conf.d/cert.crt 12 | COPY cert.key /etc/nginx/conf.d/cert.key 13 | 14 | CMD ["nginx", "-g", "daemon off;"] 15 | 16 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/hello/v1/index.html: -------------------------------------------------------------------------------- 1 | hello-v1 2 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/hello/v2/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.24.0-alpine-slim 2 | 3 | LABEL Name=hello Version=v2 4 | 5 | RUN apk add --no-cache bash \ 6 | && rm -rf /etc/nginx/conf.d/default.conf 7 | 8 | COPY app.conf /etc/nginx/conf.d/ 9 | COPY index.html /usr/share/nginx/html 10 | 11 | COPY cert.crt /etc/nginx/conf.d/cert.crt 12 | COPY cert.key /etc/nginx/conf.d/cert.key 13 | 14 | CMD ["nginx", "-g", "daemon off;"] 15 | 16 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/hello/v2/index.html: -------------------------------------------------------------------------------- 1 | hello-v2 2 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/hpa-cpu-memory/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.26.0-alpine-slim 2 | 3 | LABEL NAME=hpa-cpu-memory Version=1.26.0 4 | 5 | RUN apk add --no-cache fcgiwrap spawn-fcgi python3 bash 6 | 7 | COPY nginx.conf /etc/nginx/nginx.conf 8 | COPY index.html /usr/share/nginx/html/index.html 9 | COPY memory.py /tmp/memory.py 10 | COPY run.sh /run.sh 11 | 12 | EXPOSE 80 13 | 14 | ENTRYPOINT ["/run.sh"] 15 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/hpa-cpu-memory/memory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import time 3 | 4 | tmp_array = [0]*1000000 5 | 6 | with open("/proc/1/fd/1", "w") as f: 7 | print("Memory test started, wait for 5minutes", file=f) 8 | f.flush() 9 | 10 | print("Content-Type: text/html") 11 | print() 12 | time.sleep(300) 13 | 14 | with open("/proc/1/fd/1", "w") as f: 15 | print("Memory test finished", file=f) 16 | f.flush() 17 | 18 | print("memory test finished") 19 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/hpa-cpu-memory/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | spawn-fcgi -p 9001 -P /run/spawn-fcgi.pid -- /usr/bin/fcgiwrap -c 3; 3 | ./docker-entrypoint.sh nginx; 4 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/kubecon-eu/latest/.app.conf.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Docker/Dockerfiles/kubecon-eu/latest/.app.conf.swp -------------------------------------------------------------------------------- /Docker/Dockerfiles/kubecon-eu/latest/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.27.3-alpine-slim 2 | 3 | LABEL Name=kubecon-eu Version=latest 4 | 5 | RUN apk add --no-cache bash \ 6 | && rm -rf /etc/nginx/conf.d/default.conf 7 | 8 | COPY app.conf /etc/nginx/conf.d/ 9 | COPY index.html /usr/share/nginx/html 10 | 11 | COPY cert.crt /etc/nginx/conf.d/cert.crt 12 | COPY cert.key /etc/nginx/conf.d/cert.key 13 | 14 | CMD ["nginx", "-g", "daemon off;"] 15 | 16 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/kubecon-eu/latest/index.html: -------------------------------------------------------------------------------- 1 | kubecon-eu-latest-new 2 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/kubecon-eu/swp-img/.app.conf.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Docker/Dockerfiles/kubecon-eu/swp-img/.app.conf.swp -------------------------------------------------------------------------------- /Docker/Dockerfiles/kubecon-eu/swp-img/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | # Switch base-img 3 | ARG BASE_IMAGE=nginx:1.27.3-alpine-slim 4 | #ARG BASE_IMAGE=nginx:1.26.0-alpine-slim 5 | 6 | FROM ${BASE_IMAGE} 7 | 8 | LABEL Name=kubecon-eu Version=${NGINX_VERSION} 9 | 10 | RUN apk add --no-cache bash \ 11 | && rm -rf /etc/nginx/conf.d/default.conf \ 12 | && echo "Nginx Version is ${NGINX_VERSION}" > /usr/share/nginx/html/index.html 13 | 14 | COPY app.conf /etc/nginx/conf.d/ 15 | COPY cert.crt /etc/nginx/conf.d/cert.crt 16 | COPY cert.key /etc/nginx/conf.d/cert.key 17 | 18 | CMD ["nginx", "-g", "daemon off;"] 19 | 20 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/kustomize/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20 2 | 3 | LABEL Name=kustomize Version=5.4.1 4 | 5 | WORKDIR /tmp 6 | RUN set -ex; wget "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv5.4.1/kustomize_v5.4.1_linux_amd64.tar.gz" \ 7 | && tar -xvf kustomize_v5.4.1_linux_amd64.tar.gz \ 8 | && chmod 744 kustomize \ 9 | && cp kustomize /bin/kustomize 10 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/multi-proc/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:21.10 2 | RUN apt-get update && apt-get install -y \ 3 | supervisor \ 4 | nginx \ 5 | && rm -rf /var/lib/apt/lists/* 6 | 7 | RUN mkdir -p /var/log/supervisor 8 | COPY supervisord.conf /etc/supervisor/conf.d/supervisord.conf 9 | CMD ["/usr/bin/supervisord"] 10 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/multi-proc/supervisord.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | 4 | [program:startup] 5 | priority=1 6 | command=/usr/bin/sleep 60 7 | stdout_logfile=/var/log/supervisor/%(program_name)s.log 8 | stderr_logfile=/var/log/supervisor/%(program_name)s.log 9 | autorestart=false 10 | startsecs=0 11 | 12 | [program:nginx] 13 | priority=10 14 | command=nginx -g "daemon off;" 15 | stdout_logfile=/var/log/supervisor/nginx.log 16 | stderr_logfile=/var/log/supervisor/nginx.log 17 | autorestart=true 18 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/mysql-auth/old/config/conf.d/docker.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | skip-host-cache 3 | skip-name-resolve 4 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/net-tools-ifn/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.13.5 2 | 3 | LABEL Name=net-tools Version=3.13.5 4 | 5 | USER root 6 | RUN apk add --no-cache bash tzdata openntpd bind-tools curl 7 | RUN ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime 8 | 9 | ENV HOSTNAME net-tools 10 | COPY bashrc /root/.bashrc 11 | COPY curlchk /usr/local/bin/curlchk 12 | RUN chmod 700 /usr/local/bin/curlchk 13 | 14 | # forward request and error for curl logs to docker log collector 15 | RUN ln -sf /proc/1/fd/1 /var/log/curl.log 16 | 17 | ENTRYPOINT ["sleep", "infinity"] 18 | 19 | 20 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/net-tools-ifn/bashrc: -------------------------------------------------------------------------------- 1 | alias update='apk update && apk upgrade' 2 | export HISTTIMEFORMAT="%d/%m/%y %T " 3 | export PS1='\u@\h:\W \$ ' 4 | alias l='ls -CF' 5 | alias la='ls -A' 6 | alias ll='ls -alF' 7 | alias ls='ls --color=auto' 8 | export PS1="\[\e[31m\][\[\e[m\]\[\e[38;5;172m\]\u\[\e[m\]@\[\e[38;5;153m\]\h\[\e[m\] \[\e[38;5;214m\]\W\[\e[m\]\[\e[31m\]]\[\e[m\]\\$ " 9 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/net-tools-ifn/curlchk: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" $1) 3 | 4 | if [ "$HTTP_CODE" = "200" ]; then 5 | echo "`date` Okay [$1] is working properly" >> /var/log/curl.log 6 | else 7 | echo "`date` NOOO [$1] is not working on" >> /var/log/curl.log 8 | fi 9 | 10 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/net-tools/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.13.5 2 | 3 | LABEL Name=net-tools Version=3.13.5 4 | 5 | USER root 6 | RUN apk add --no-cache bash tzdata openntpd bind-tools curl 7 | RUN ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime 8 | 9 | ENV HOSTNAME net-tools 10 | COPY bashrc /root/.bashrc 11 | COPY curlchk /usr/local/bin/curlchk 12 | RUN chmod 700 /usr/local/bin/curlchk 13 | 14 | # forward request and error for curl logs to docker log collector 15 | RUN ln -sf /proc/1/fd/1 /var/log/curl.log 16 | 17 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/net-tools/bashrc: -------------------------------------------------------------------------------- 1 | alias update='apk update && apk upgrade' 2 | export HISTTIMEFORMAT="%d/%m/%y %T " 3 | export PS1='\u@\h:\W \$ ' 4 | alias l='ls -CF' 5 | alias la='ls -A' 6 | alias ll='ls -alF' 7 | alias ls='ls --color=auto' 8 | export PS1="\[\e[31m\][\[\e[m\]\[\e[38;5;172m\]\u\[\e[m\]@\[\e[38;5;153m\]\h\[\e[m\] \[\e[38;5;214m\]\W\[\e[m\]\[\e[31m\]]\[\e[m\]\\$ " 9 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/net-tools/curlchk: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | HTTP_CODE=$(curl -s -o /dev/null -w "%{http_code}" $1) 3 | 4 | if [ "$HTTP_CODE" = "200" ]; then 5 | echo "`date` Okay [$1] is working properly" >> /var/log/curl.log 6 | else 7 | echo "`date` NOOO [$1] is not working on" >> /var/log/curl.log 8 | fi 9 | 10 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/ollama-gemma2:2b/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ollama/ollama:0.3.14 2 | RUN ollama serve & sleep 1 && ollama pull gemma2:2b 3 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/ollama-llama3.2:1b/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ollama/ollama:0.3.14 2 | RUN ollama serve & sleep 1 && ollama pull llama3.2:1b 3 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/ollama-qwen2.5:1.5b/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ollama/ollama:0.3.14 2 | RUN ollama serve & sleep 1 && ollama pull qwen2.5:1.5b 3 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/sleepy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.13.5 2 | 3 | LABEL Name=sleepy Version=3.13.5 4 | 5 | USER root 6 | RUN apk add --no-cache bash tzdata openntpd 7 | RUN ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime 8 | 9 | ENV HOSTNAME sleepy 10 | COPY bashrc /root/.bashrc 11 | 12 | ENTRYPOINT ["sleep", "infinity"] 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/sleepy/bashrc: -------------------------------------------------------------------------------- 1 | alias update='apk update && apk upgrade' 2 | export HISTTIMEFORMAT="%d/%m/%y %T " 3 | export PS1='\u@\h:\W \$ ' 4 | alias l='ls -CF' 5 | alias la='ls -A' 6 | alias ll='ls -alF' 7 | alias ls='ls --color=auto' 8 | export PS1="\[\e[31m\][\[\e[m\]\[\e[38;5;172m\]\u\[\e[m\]@\[\e[38;5;153m\]\h\[\e[m\] \[\e[38;5;214m\]\W\[\e[m\]\[\e[31m\]]\[\e[m\]\\$ " 9 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/ssh-root/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.13.5 2 | 3 | LABEL Name=ssh-root Version=3.13.5 4 | 5 | RUN set -x \ 6 | && apk add --no-cache openssh bash tzdata openntpd bind-tools curl \ 7 | && sed 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' -i /etc/ssh/sshd_config \ 8 | && echo 'root:vagrant' | chpasswd \ 9 | && ssh-keygen -f /etc/ssh/ssh_host_rsa_key -N '' -t rsa \ 10 | && ssh-keygen -f /etc/ssh/ssh_host_dsa_key -N '' -t dsa \ 11 | && mkdir -p /var/run/sshd \ 12 | && ln -sf /usr/share/zoneinfo/Asia/Seoul /etc/localtime 13 | 14 | ENV HOSTNAME ssh-root 15 | COPY bashrc /root/.bashrc 16 | 17 | EXPOSE 22 18 | CMD ["/usr/sbin/sshd","-D"] 19 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/ssh-root/bashrc: -------------------------------------------------------------------------------- 1 | alias update='apk update && apk upgrade' 2 | export HISTTIMEFORMAT="%d/%m/%y %T " 3 | export PS1='\u@\h:\W \$ ' 4 | alias l='ls -CF' 5 | alias la='ls -A' 6 | alias ll='ls -alF' 7 | alias ls='ls --color=auto' 8 | export PS1="\[\e[31m\][\[\e[m\]\[\e[38;5;172m\]\u\[\e[m\]@\[\e[38;5;153m\]\h\[\e[m\] \[\e[38;5;214m\]\W\[\e[m\]\[\e[31m\]]\[\e[m\]\\$ " 9 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/ssh/bashrc: -------------------------------------------------------------------------------- 1 | alias update='apk update && apk upgrade' 2 | export HISTTIMEFORMAT="%d/%m/%y %T " 3 | export PS1='\u@\h:\W \$ ' 4 | alias l='ls -CF' 5 | alias la='ls -A' 6 | alias ll='ls -alF' 7 | alias ls='ls --color=auto' 8 | export PS1="\[\e[31m\][\[\e[m\]\[\e[38;5;172m\]\u\[\e[m\]@\[\e[38;5;153m\]\h\[\e[m\] \[\e[38;5;214m\]\W\[\e[m\]\[\e[31m\]]\[\e[m\]\\$ " 9 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/tardy-nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.26.0-alpine-slim 2 | 3 | LABEL Name=tardy-nginx Version=1.26.0 4 | 5 | COPY startup.sh / 6 | RUN apk add --no-cache bash \ 7 | && chmod +x /startup.sh 8 | CMD ["/startup.sh"] 9 | RUN echo "tardy again" > /usr/share/nginx/html/index.html 10 | 11 | RUN ln -sf /proc/1/fd/1 /var/log/sleep.log 12 | 13 | -------------------------------------------------------------------------------- /Docker/Dockerfiles/tardy-nginx/startup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | echo "deep sleep" > /var/log/sleep.log && sleep 60 4 | touch /tmp/healthy-on && nginx -g "daemon off;" 5 | -------------------------------------------------------------------------------- /Docker/Registry/create-registry.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | certdir=/etc/docker/certs.d/192.168.1.10:8443 3 | mkdir /data 4 | mkdir -p $certdir 5 | openssl req -x509 -nodes -newkey rsa:4096 -keyout tls.key -out tls.crt -days 365 \ 6 | -config tls.csr -extensions v3_req 7 | cp tls.crt $certdir 8 | yum install sshpass -y 9 | for i in {1..3} 10 | do 11 | sshpass -p vagrant ssh -o StrictHostKeyChecking=no root@192.168.1.10$i mkdir -p $certdir 12 | sshpass -p vagrant scp tls.crt 192.168.1.10$i:$certdir 13 | done 14 | 15 | docker run -d \ 16 | --restart=always \ 17 | --name registry \ 18 | -v /root/IaC/Docker/Registry:/certs:ro \ 19 | -v /data:/var/lib/registry \ 20 | -e REGISTRY_HTTP_ADDR=0.0.0.0:443 \ 21 | -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/tls.crt \ 22 | -e REGISTRY_HTTP_TLS_KEY=/certs/tls.key \ 23 | -p 8443:443 \ 24 | registry:2 -------------------------------------------------------------------------------- /Docker/Registry/remover.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | certdir=/etc/docker/certs.d/192.168.1.10:8443 3 | rm -f tls.key tls.crt 4 | rm -rf /data 5 | rm -rf $certdir 6 | 7 | yum -y install sshpass 8 | for i in {1..3} 9 | do 10 | sshpass -p vagrant ssh -o StrictHostKeyChecking=no root@192.168.1.10$i rm -rf $certdir 11 | done 12 | 13 | yum remove sshpass -y 14 | docker rm -f registry 15 | docker rmi registry:2 -------------------------------------------------------------------------------- /Docker/Registry/tls.csr: -------------------------------------------------------------------------------- 1 | [req] 2 | distinguished_name = private_registry_cert_req 3 | x509_extensions = v3_req 4 | prompt = no 5 | 6 | [private_registry_cert_req] 7 | C = KR 8 | ST = SEOUL 9 | L = SEOUL 10 | O = gilbut 11 | OU = Book_k8sInfra 12 | CN = 192.168.1.10 13 | 14 | [v3_req] 15 | keyUsage = keyEncipherment, dataEncipherment 16 | extendedKeyUsage = serverAuth 17 | subjectAltName = @alt_names 18 | 19 | [alt_names] 20 | DNS.0 = m-k8s 21 | IP.0 = 192.168.1.10 22 | -------------------------------------------------------------------------------- /Docker/build-kind/Basic/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Docker/build-kind/Basic/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /Docker/build-kind/Basic/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar 3 | -------------------------------------------------------------------------------- /Docker/build-kind/Basic/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8 2 | LABEL description="Echo IP Java Application" 3 | EXPOSE 60431 4 | COPY ./target/app-in-host.jar /opt/app-in-image.jar 5 | WORKDIR /opt 6 | ENTRYPOINT [ "java", "-jar", "app-in-image.jar" ] -------------------------------------------------------------------------------- /Docker/build-kind/Basic/src/main/java/com/stark/Industries/UltronPRJApplication.java: -------------------------------------------------------------------------------- 1 | package com.stark.Industries; 2 | 3 | import org.springframework.boot.SpringApplication; 4 | import org.springframework.boot.autoconfigure.SpringBootApplication; 5 | 6 | @SpringBootApplication 7 | public class UltronPRJApplication { 8 | 9 | public static void main(String[] args) { 10 | SpringApplication.run(UltronPRJApplication.class, args); 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /Docker/build-kind/Basic/src/main/java/com/stark/Industries/UltronPRJController.java: -------------------------------------------------------------------------------- 1 | package com.stark.Industries; 2 | 3 | import org.springframework.web.bind.annotation.RequestMapping; 4 | import org.springframework.web.bind.annotation.RestController; 5 | 6 | import javax.servlet.http.HttpServletRequest; 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | 10 | @RestController 11 | public class UltronPRJController { 12 | 13 | @RequestMapping("/") 14 | public String hello(HttpServletRequest request){ 15 | String result = "src: "+request.getRemoteAddr()+" / dest: "+request.getServerName()+"\n"; 16 | return result; 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /Docker/build-kind/Basic/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | server.port=80 -------------------------------------------------------------------------------- /Docker/build-kind/MultiStage/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8 AS int-build 2 | LABEL description="Java Application builder" 3 | RUN git clone https://github.com/iac-source/inbuilder.git 4 | WORKDIR inbuilder 5 | RUN chmod 755 mvnw 6 | RUN ./mvnw clean package 7 | 8 | FROM gcr.io/distroless/java:8 9 | LABEL description="Echo IP Java Application" 10 | EXPOSE 60434 11 | COPY --from=int-build inbuilder/target/app-in-host.jar /opt/app-in-image.jar 12 | WORKDIR /opt 13 | ENTRYPOINT [ "java", "-jar", "app-in-image.jar" ] -------------------------------------------------------------------------------- /Docker/build-kind/NoHost/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM openjdk:8 2 | LABEL description="Echo IP Java Application" 3 | EXPOSE 60433 4 | RUN git clone https://github.com/iac-source/inbuilder.git 5 | WORKDIR inbuilder 6 | RUN chmod 755 mvnw 7 | RUN ./mvnw clean package 8 | RUN mv target/app-in-host.jar /opt/app-in-image.jar 9 | WORKDIR /opt 10 | ENTRYPOINT [ "java", "-jar", "app-in-image.jar" ] -------------------------------------------------------------------------------- /Docker/build-kind/Optimal/.mvn/wrapper/maven-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sysnet4admin/IaC/5a41fd9f73f6c5943dd9f4fe55cd416192a59468/Docker/build-kind/Optimal/.mvn/wrapper/maven-wrapper.jar -------------------------------------------------------------------------------- /Docker/build-kind/Optimal/.mvn/wrapper/maven-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip 2 | wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar 3 | -------------------------------------------------------------------------------- /Docker/build-kind/Optimal/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gcr.io/distroless/java:8 2 | LABEL description="Echo IP Java Application" 3 | EXPOSE 60432 4 | COPY ./target/app-in-host.jar /opt/app-in-image.jar 5 | WORKDIR /opt 6 | ENTRYPOINT [ "java", "-jar", "app-in-image.jar" ] -------------------------------------------------------------------------------- /Docker/build-kind/Optimal/build-in-host.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | yum -y install java-1.8.0-openjdk-devel 3 | ./mvnw clean package 4 | docker build -t optimal-img . -------------------------------------------------------------------------------- /Docker/build-kind/Optimal/src/main/java/com/stark/Industries/UltronPRJApplication.java: -------------------------------------------------------------------------------- 1 | package com.stark.Industries; 2 | 3 | import org.springframework.boot.SpringApplication; 4 | import org.springframework.boot.autoconfigure.SpringBootApplication; 5 | 6 | @SpringBootApplication 7 | public class UltronPRJApplication { 8 | 9 | public static void main(String[] args) { 10 | SpringApplication.run(UltronPRJApplication.class, args); 11 | } 12 | 13 | } 14 | -------------------------------------------------------------------------------- /Docker/build-kind/Optimal/src/main/java/com/stark/Industries/UltronPRJController.java: -------------------------------------------------------------------------------- 1 | package com.stark.Industries; 2 | 3 | import org.springframework.web.bind.annotation.RequestMapping; 4 | import org.springframework.web.bind.annotation.RestController; 5 | 6 | import javax.servlet.http.HttpServletRequest; 7 | import java.util.HashMap; 8 | import java.util.Map; 9 | 10 | @RestController 11 | public class UltronPRJController { 12 | 13 | @RequestMapping("/") 14 | public String hello(HttpServletRequest request){ 15 | String result = "src: "+request.getRemoteAddr()+" / dest: "+request.getServerName()+"\n"; 16 | return result; 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /Docker/build-kind/Optimal/src/main/resources/application.properties: -------------------------------------------------------------------------------- 1 | server.port=80 -------------------------------------------------------------------------------- /Docker/docker-builder/README.md: -------------------------------------------------------------------------------- 1 | # docker-builder 2 | 3 | 도커 빌드하고, 쿠버 테스트하고 하는 환경을 통합하기 위해서 베이그런트 스크립트를 만들었습니다. 4 | - docker 5 | - minikube 6 | - custom-git 7 | 8 | ## 사용법 9 | ```bash 10 | vagrant up 11 | ``` 12 | 끝나면, VM(cube-bldr)에 접속(`127.0.0.1:60084`)해서 아래의 명령을 실행하고 나오는 결과를 13 | ```bash 14 | cat ~/.ssh/id_rsa.pub 15 | ``` 16 | 깃허브 SSH Key로 등록하면 됩니다 17 | 18 | custom-git은 자유롭게 개인 수정해서 쓰면 됩니다. :) 19 | (현재는github에 제 계정으로 되어 있습니다.) 20 | 21 | -------------------------------------------------------------------------------- /Docker/docker-builder/custom-git.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # git clone k8s-code 4 | git clone https://github.com/sysnet4admin/_Lecture_k8s_learning.kit.git $HOME/_Lecture_k8s_learning.kit 5 | find $HOME/_Lecture_k8s_learning.kit -regex ".*\.\(sh\)" -exec chmod 700 {} \; 6 | # remote add 7 | cd $HOME/_Lecture_k8s_learning.kit 8 | git remote set-url origin git+ssh://git@github.com/sysnet4admin/_Lecture_k8s_learning.kit.git 9 | 10 | # git clone IaC 11 | git clone https://github.com/sysnet4admin/IaC.git $HOME/IaC 12 | # remote add 13 | cd $HOME/IaC 14 | git remote set-url origin git+ssh://git@github.com/sysnet4admin/IaC.git 15 | 16 | git config --global user.name "Hoon Jo" 17 | git config --global user.email pagaia@hotmail.com 18 | 19 | # ssh key gen 20 | ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa <</dev/null 2>&1 21 | echo "***********************" 22 | echo "cat ~/.ssh/id_rsa.pub and put your SSH Keys in github" 23 | -------------------------------------------------------------------------------- /Docker/docker-builder/docker_build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # add docker-ce repo 4 | yum install yum-utils -y 5 | yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 6 | 7 | # install docker 8 | yum install docker-ce-$1 docker-ce-cli-$1 containerd.io-$2 -y 9 | 10 | # Ready to install for k8s 11 | systemctl enable --now docker 12 | -------------------------------------------------------------------------------- /Docker/docker-builder/env_build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # vim configuration 4 | echo 'alias vi=vim' >> /etc/profile 5 | 6 | # Set SELinux in permissive mode (effectively disabling it) 7 | setenforce 0 8 | sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config 9 | 10 | # RHEL/CentOS 7 have reported traffic issues being routed incorrectly due to iptables bypassed 11 | cat < /etc/sysctl.d/k8s.conf 12 | net.bridge.bridge-nf-call-ip6tables = 1 13 | net.bridge.bridge-nf-call-iptables = 1 14 | EOF 15 | modprobe br_netfilter 16 | 17 | # config DNS 18 | cat < /etc/resolv.conf 19 | nameserver 1.1.1.1 #cloudflare DNS 20 | nameserver 8.8.8.8 #Google DNS 21 | EOF 22 | 23 | # install util packages 24 | yum install epel-release -y 25 | yum install vim-enhanced -y 26 | yum install git -y 27 | -------------------------------------------------------------------------------- /Docker/index-BindMount.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Nginx Web Server 6 | 7 | 8 |

Running Bind Mount

9 | 10 | -------------------------------------------------------------------------------- /Docker/index-Volume.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Nginx Web Server 6 | 7 | 8 |

Running Volume

9 | 10 | -------------------------------------------------------------------------------- /GitOps/deploy-chk-info.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: gitops-chk-info 6 | name: gitops-chk-info 7 | spec: 8 | replicas: 5 9 | selector: 10 | matchLabels: 11 | app: gitops-chk-info 12 | template: 13 | metadata: 14 | labels: 15 | app: gitops-chk-info 16 | spec: 17 | containers: 18 | - image: sysnet4admin/chk-info 19 | name: gitops-chk-info 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: lb-gitops-chk-info 25 | spec: 26 | selector: 27 | app: gitops-chk-info 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: LoadBalancer 33 | -------------------------------------------------------------------------------- /Jenkins/dev-to-prod/deploy-dev-qa-101.freestyle: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | docker build -t 192.168.1.10:8443/echo-ip . 3 | docker push 192.168.1.10:8443/echo-ip 4 | kubectl apply -f https://raw.githubusercontent.com/IaC-Source/dev-prod/main/echo-ip-dev.yaml 5 | for try in {1..30} 6 | do 7 | export ready=$(kubectl get deployment --selector=app=fs-echo-ip-dev -n dev -o jsonpath --template="{.items[0].status.readyReplicas}") 8 | echo "trying $try: ready $ready"; 9 | if [ "$ready" == "1" ]; then 10 | exit 0 11 | fi 12 | sleep 1 13 | done 14 | exit 1 15 | -------------------------------------------------------------------------------- /Jenkins/dev-to-prod/deploy-qa-passed-prod.freestyle: -------------------------------------------------------------------------------- 1 | kubectl apply -f https://raw.githubusercontent.com/IaC-Source/dev-prod/main/echo-ip-prod.yaml 2 | -------------------------------------------------------------------------------- /Jenkins/dev-to-prod/namespaces.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev 5 | --- 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: prod -------------------------------------------------------------------------------- /Keycloak/README.md: -------------------------------------------------------------------------------- 1 | # Reference List 2 | 3 | ### Demo Source Code: 4 | https://github.com/sysnet4admin/IaC/tree/main/Keycloak 5 | 6 | ### Demo Reference Topology: 7 | https://developer.okta.com/blog/2021/11/08/k8s-api-server-oidc 8 | 9 | ### GKE Ingress Configuration: 10 | https://cloud.google.com/kubernetes-engine/docs/concepts/ingress 11 | https://cloud.google.com/kubernetes-engine/docs/tutorials/http-balancer 12 | https://cloud.google.com/kubernetes-engine/docs/how-to/managed-certs 13 | 14 | ### kubelogin Binary: 15 | https://github.com/int128/kubelogin 16 | 17 | ### GKE OIDC Configuration Guide: 18 | https://cloud.google.com/kubernetes-engine/docs/how-to/oidc 19 | 20 | ### AWS eksctl Configuration Guide (w/ OIDC Configuration): 21 | https://eksctl.io/usage/schema/ 22 | 23 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/.cmd: -------------------------------------------------------------------------------- 1 | 2 | run 1-1 3 | run 1-2 4 | 5 | # create cloud dns and input A record and select static IP 6 | # And several minutes later. 7 | # check certifcate 8 | 9 | openssl s_client -connect oncloud-1.site:443 10 | CONNECTED(00000005) 11 | depth=3 C = BE, O = GlobalSign nv-sa, OU = Root CA, CN = GlobalSign Root CA 12 | verify return:1 13 | depth=2 C = US, O = Google Trust Services LLC, CN = GTS Root R1 14 | verify return:1 15 | depth=1 C = US, O = Google Trust Services LLC, CN = GTS CA 1D4 16 | verify return:1 17 | depth=0 CN = oncloud-1.site 18 | verify return:1 19 | 20 | 21 | # few minutes later. 22 | # and then open browser. input "oncloud-2.site" 23 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/Deploy-infra/.cmd: -------------------------------------------------------------------------------- 1 | 2 | run 1-1 3 | run 1-2 4 | 5 | # create cloud dns and input A record and select static IP 6 | # And several minutes later. 7 | # check certifcate 8 | 9 | openssl s_client -connect oncloud-1.site:443 10 | CONNECTED(00000005) 11 | depth=3 C = BE, O = GlobalSign nv-sa, OU = Root CA, CN = GlobalSign Root CA 12 | verify return:1 13 | depth=2 C = US, O = Google Trust Services LLC, CN = GTS Root R1 14 | verify return:1 15 | depth=1 C = US, O = Google Trust Services LLC, CN = GTS CA 1D4 16 | verify return:1 17 | depth=0 CN = oncloud-1.site 18 | verify return:1 19 | 20 | 21 | # few minutes later. 22 | # and then open browser. input "oncloud-2.site" 23 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/Deploy-infra/1-2.deploy-gke-ingress-4-https-keycloak.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: keycloak-ingress 5 | annotations: 6 | spec.ingressClassName: "gce" 7 | kubernetes.io/ingress.global-static-ip-name: "hj-keycloak-oncloud-1-static-ip" 8 | networking.gke.io/managed-certificates: "keycloak-managed-cert" 9 | networking.gke.io/v1beta1.FrontendConfig: "keycloak-config" 10 | spec: 11 | defaultBackend: 12 | service: 13 | name: keycloak 14 | port: 15 | number: 80 16 | 17 | --- 18 | apiVersion: networking.gke.io/v1beta1 19 | kind: FrontendConfig 20 | metadata: 21 | name: keycloak-config 22 | spec: 23 | redirectToHttps: 24 | enabled: true 25 | 26 | --- 27 | apiVersion: networking.gke.io/v1 28 | kind: ManagedCertificate 29 | metadata: 30 | name: keycloak-managed-cert 31 | spec: 32 | domains: 33 | - "oncloud-1.site" 34 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/Deploy-infra/1-3.clientconfig-gke-keycloak-w-oidc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: authentication.gke.io/v2alpha1 2 | kind: ClientConfig 3 | metadata: 4 | name: default 5 | namespace: kube-public 6 | spec: 7 | authentication: 8 | - name: oidc 9 | oidc: 10 | # should change clientSecret after applying keycloak 11 | clientSecret: 6qfWVLJ91PrGABqqXD613ScVk6j2Qw1d 12 | clientID: k8s-auth 13 | cloudConsoleRedirectURI: https://console.cloud.google.com/kubernetes/oidc 14 | extraParams: resource=token-groups-claim 15 | groupPrefix: '-' 16 | groupsClaim: groups 17 | issuerURI: https://oncloud-1.site/realms/kubernetes 18 | kubectlRedirectURI: http://localhost:8000 19 | scopes: openid 20 | userClaim: preferred_username 21 | userPrefix: '-' 22 | internalServer: "" 23 | name: hj-keycloak-oncloud-1-gke 24 | # server: manually input after applying clientconfig or searching and add 25 | status: {} 26 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/Deploy-infra/2-1.deploy-eks-env-only.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eksctl create cluster -f eksctl-config/keycloak-w-oidc.yaml 4 | 5 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/Deploy-infra/eksctl-config/keycloak-w-oidc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: hj-keycloak-oncloud-1-eks 6 | region: us-east-2 7 | version: "1.27" 8 | 9 | nodeGroups: 10 | - name: compute-group 11 | instanceType: t3a.small 12 | desiredCapacity: 3 13 | 14 | identityProviders: 15 | - name: keycloak 16 | type: oidc 17 | issuerUrl: https://oncloud-1.site/realms/kubernetes 18 | clientId: k8s-auth 19 | usernameClaim: preferred_username 20 | usernamePrefix: 21 | groupsClaim: groups 22 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/EKS/1.clusterrolebinding-4-devops-group-as-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: devops 13 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/EKS/2-oncloud-1.swtich-ctx-hoon-to-soojin.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | aws eks update-kubeconfig --name hj-keycloak-oncloud-1-eks --region us-east-2 --profile=soojin --alias=soojin-ctx-as-user-oncloud-1 4 | 5 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/EKS/3.set-cred-4-oidc-user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | kubectl config set-credentials oidc-user \ 4 | --exec-api-version=client.authentication.k8s.io/v1beta1 \ 5 | --exec-command=kubelogin 6 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/GKE/1.clusterrolebinding-4-devops-group-as-admin-NOTWORK.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: devops 13 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/GKE/2-hoon.clusterrolebinding-4-devops-user-as-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: User 12 | name: hj@mz.co.kr 13 | -------------------------------------------------------------------------------- /Keycloak/oncloud-1.site/GKE/2-soojin.clusterrolebinding-4-devops-user-as-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: User 12 | name: soojin@mz.co.kr 13 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/Deploy-infra/.cmd: -------------------------------------------------------------------------------- 1 | 2 | run 1-1 3 | run 1-2 4 | 5 | # create cloud dns and input A record and select static IP 6 | # And several minutes later. 7 | # check certifcate 8 | 9 | openssl s_client -connect oncloud-2.site:443 10 | CONNECTED(00000005) 11 | depth=3 C = BE, O = GlobalSign nv-sa, OU = Root CA, CN = GlobalSign Root CA 12 | verify return:1 13 | depth=2 C = US, O = Google Trust Services LLC, CN = GTS Root R1 14 | verify return:1 15 | depth=1 C = US, O = Google Trust Services LLC, CN = GTS CA 1D4 16 | verify return:1 17 | depth=0 CN = oncloud-2.site 18 | verify return:1 19 | 20 | 21 | # few minutes later. 22 | # and then open browser. input "oncloud-2.site" 23 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/Deploy-infra/1-2.deploy-gke-ingress-4-https-keycloak.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: keycloak-ingress 5 | annotations: 6 | spec.ingressClassName: "gce" 7 | kubernetes.io/ingress.global-static-ip-name: "hj-keycloak-oncloud-2-static-ip" 8 | networking.gke.io/managed-certificates: "keycloak-managed-cert" 9 | networking.gke.io/v1beta1.FrontendConfig: "keycloak-config" 10 | spec: 11 | defaultBackend: 12 | service: 13 | name: keycloak 14 | port: 15 | number: 80 16 | 17 | --- 18 | apiVersion: networking.gke.io/v1beta1 19 | kind: FrontendConfig 20 | metadata: 21 | name: keycloak-config 22 | spec: 23 | redirectToHttps: 24 | enabled: true 25 | 26 | --- 27 | apiVersion: networking.gke.io/v1 28 | kind: ManagedCertificate 29 | metadata: 30 | name: keycloak-managed-cert 31 | spec: 32 | domains: 33 | - "oncloud-2.site" 34 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/Deploy-infra/1-3.clientconfig-gke-keycloak-w-oidc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: authentication.gke.io/v2alpha1 2 | kind: ClientConfig 3 | metadata: 4 | name: default 5 | namespace: kube-public 6 | spec: 7 | authentication: 8 | - name: oidc 9 | oidc: 10 | # should change clientSecret after deploying keycloak 11 | clientSecret: Ay640idvu4XFZSkWYFmLsLk3NdzuOfDI 12 | clientID: k8s-auth 13 | cloudConsoleRedirectURI: https://console.cloud.google.com/kubernetes/oidc 14 | extraParams: resource=token-groups-claim 15 | groupPrefix: '-' 16 | groupsClaim: groups 17 | issuerURI: https://oncloud-2.site/realms/kubernetes 18 | kubectlRedirectURI: http://localhost:8000 19 | scopes: openid 20 | userClaim: preferred_username 21 | userPrefix: '-' 22 | internalServer: "" 23 | name: hj-keycloak-oncloud-2-gke 24 | # server: manually input after applying clientconfig or searching and add 25 | status: {} 26 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/Deploy-infra/2-1.deploy-eks-env-only.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | eksctl create cluster -f eksctl-config/keycloak-w-oidc.yaml 4 | 5 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/Deploy-infra/eksctl-config/keycloak-w-oidc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: eksctl.io/v1alpha5 2 | kind: ClusterConfig 3 | 4 | metadata: 5 | name: hj-keycloak-oncloud-2-eks 6 | region: us-east-2 7 | version: "1.27" 8 | 9 | nodeGroups: 10 | - name: compute-group 11 | instanceType: t3a.small 12 | desiredCapacity: 3 13 | 14 | identityProviders: 15 | - name: keycloak 16 | type: oidc 17 | issuerUrl: https://oncloud-2.site/realms/kubernetes 18 | clientId: k8s-auth 19 | usernameClaim: preferred_username 20 | usernamePrefix: 21 | groupsClaim: groups 22 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/EKS/1.clusterrolebinding-4-devops-group-as-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: devops 13 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/EKS/2-oncloud-2.swtich-ctx-hoon-to-soojin.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | aws eks update-kubeconfig --name hj-keycloak-oncloud-2-eks --region us-east-2 --profile=soojin --alias=soojin-ctx-as-user-oncloud-2 4 | 5 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/EKS/3.set-cred-4-oidc-user.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | kubectl config set-credentials oidc-user \ 4 | --exec-api-version=client.authentication.k8s.io/v1beta1 \ 5 | --exec-command=kubelogin 6 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/GKE/1.clusterrolebinding-4-devops-group-as-admin-NOTWORK.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: Group 12 | name: devops 13 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/GKE/2-hoon.clusterrolebinding-4-devops-user-as-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: User 12 | name: hj@mz.co.kr 13 | -------------------------------------------------------------------------------- /Keycloak/oncloud-2.site/GKE/2-soojin.clusterrolebinding-4-devops-user-as-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: oidc-group-cluster-admin 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: cluster-admin 9 | subjects: 10 | - apiGroup: rbac.authorization.k8s.io 11 | kind: User 12 | name: soojin@mz.co.kr 13 | -------------------------------------------------------------------------------- /Keycloak/prerequisite-install-tools.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # krew 4 | ( 5 | set -x; cd "$(mktemp -d)" && 6 | OS="$(uname | tr '[:upper:]' '[:lower:]')" && 7 | ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" && 8 | KREW="krew-${OS}_${ARCH}" && 9 | curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" && 10 | tar zxvf "${KREW}.tar.gz" && 11 | ./"${KREW}" install krew 12 | ) 13 | 14 | export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH" 15 | 16 | # eksctl 17 | brew install eksctl 18 | 19 | # kubelogin for GCP 20 | gcloud components install kubectl-oidc 21 | 22 | # kubelogin for AWS 23 | kubectl krew install oidc-login 24 | 25 | -------------------------------------------------------------------------------- /PaC/CEL/GCP-Organization-Policy/README.md: -------------------------------------------------------------------------------- 1 | ## Organization Policy 2 | https://cloud.google.com/resource-manager/docs/organization-policy/overview 3 | 4 | Control-Plane Access Control (only 32bit allowed) 5 | ``` 6 | Resource type: container.googleapis.com/Cluster 7 | Conditions: 8 | resource.masterAuthorizedNetworksConfig.enabled != true || 9 | resource.masterAuthorizedNetworksConfig.cidrBlocks.exists(value,!value.cidrBlock.endsWith("/32")) 10 | Action: Deny 11 | 12 | ``` 13 | -------------------------------------------------------------------------------- /PaC/CEL/MutatingAdmissionPolicy/README.md: -------------------------------------------------------------------------------- 1 | ## Mutating admission by CEL 2 | 3 | This repo provide **PaC Pratice for KubeCon** 4 | Attendee could run each of folder for your own testing purpose. 5 | 6 | (WIP) 7 | ``` 8 | $ ls -1 9 | README.md 10 | gatekeeper-{{Rego,CEL}} <<< OPA, it provided Rego & CEL 11 | k8s_native-{{CEL}} <<< k8s native, it provided CEL only 12 | kyverno-{{Yaml,CEL}} <<< kyverno , it provided Yaml & CEL 13 | sample-apps <<< After deploying each of PaC, you could use this sample app for testing purpose. 14 | ``` 15 | -------------------------------------------------------------------------------- /PaC/CEL/README.md: -------------------------------------------------------------------------------- 1 | ## Policy as Code 2 | (inspired by https://github.com/SeongJuMoon/policy-as-code-for-k8s) 3 | 4 | This repo provide PaC Pratice for each of KubeCon or others purpose 5 | 6 | Updated **19 Jan 2025** 7 | ``` 8 | $ ls -1 9 | GCP-Organization-Policy <<< GCP Oriented, it provided CEL 10 | MutatingAdmissionPolicy <<< `Alpha` stage 11 | README.md 12 | ValidatingAdmissionPolicy <<< `GA` stage, it provided CEL or DSL 13 | ``` 14 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/README.md: -------------------------------------------------------------------------------- 1 | ## Validating admission by CEL 2 | 3 | This repo provide **PaC Pratice for KubeCon** 4 | Attendee could run each of folder for your own testing purpose. 5 | 6 | ``` 7 | $ ls -1 8 | README.md 9 | gatekeeper-{{Rego,CEL}} <<< OPA, it provided Rego & CEL 10 | k8s_native-{{CEL}} <<< k8s native, it provided CEL only 11 | kyverno-{{Yaml,CEL}} <<< kyverno , it provided Yaml & CEL 12 | sample-apps <<< After deploying each of PaC, you could use this sample app for testing purpose. 13 | ``` 14 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/CEL/1-1.CEL-ConstraintTemplate-NoHostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: celconstrainttemplatenohostnetwork 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: CELConstraintTemplateNoHostNetwork 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | code: 13 | - engine: K8sNativeValidation 14 | source: 15 | validations: 16 | - expression: "!has(object.spec.hostNetwork) || 17 | object.spec.hostNetwork != true" 18 | message: "HostNetwork is not allowed for the Pod" 19 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/CEL/1-2.CEL-Constraint-NoHostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: CELConstraintTemplateNoHostNetwork 3 | metadata: 4 | name: cel-constraintnohostnetwork 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/CEL/2-1.CEL-ConstraintTemplate-NoHostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: celconstrainttemplatenohostpath 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: CELConstraintTemplateNoHostPath 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | code: 13 | - engine: K8sNativeValidation 14 | source: 15 | validations: 16 | - expression: "!has(object.spec.template.spec.volumes) || 17 | object.spec.template.spec.volumes.all(volume, !has(volume.hostPath))" 18 | message: "HostPath is not allowed for template of Pod" 19 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/CEL/2-2.CEL-Constraint-NoHostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: CELConstraintTemplateNoHostPath 3 | metadata: 4 | name: cel-constraintnohostpath 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["apps"] 9 | # Replicas is not matched due to testing purpose 10 | kinds: ["DaemonSet", "Deployment"] 11 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/CEL/3-1.CEL-ConstraintTemplate-NoReplicasLess3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: celconstrainttemplatenoreplicasless3 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: CELConstraintTemplateNoReplicasLess3 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | code: 13 | - engine: K8sNativeValidation 14 | source: 15 | validations: 16 | - expression: "!has(object.spec.replicas) || 17 | object.spec.replicas > 2" 18 | message: "Replicas is not allowed less than 3" 19 | 20 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/CEL/3-2.CEL-Constraint-NoReplicasLess3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: CELConstraintTemplateNoReplicasLess3 3 | metadata: 4 | name: cel-constraintnoreplicasless3 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["apps"] 9 | kinds: ["Deployment"] 10 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/Rego/1-1.Rego-ConstraintTemplate-NoHostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: regoconstrainttemplatenohostnetwork 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: RegoConstraintTemplateNoHostNetwork 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | rego: | 13 | package regoconstrainttemplatenohostnetwork 14 | 15 | violation[{"msg": msg}] { 16 | input.review.kind.kind == "Pod" 17 | input.review.object.spec.hostNetwork == true 18 | msg := "HostNetwork is not allowed for the Pod" 19 | } 20 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/Rego/1-2.Rego-Constraint-NoHostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: RegoConstraintTemplateNoHostNetwork 3 | metadata: 4 | name: rego-constraintnohostnetwork 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: [""] 9 | kinds: ["Pod"] 10 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/Rego/2-1.Rego-ConstraintTemplate-NoHostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: regoconstrainttemplatenohostpath 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: RegoConstraintTemplateNoHostPath 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | rego: | 13 | package regoconstrainttemplatenohostpath 14 | 15 | violation[{"msg": msg}] { 16 | # applying all objects 17 | # input.review.kind.kind == "Pod" 18 | input.review.object.spec.template.spec.volumes[_].hostPath 19 | msg := "HostPath is not allowed for the template of Pod" 20 | } 21 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/Rego/2-2.Rego-Constraint-NoHostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: RegoConstraintTemplateNoHostPath 3 | metadata: 4 | name: rego-constraintnohostpath 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["apps"] 9 | # Replicas is not matched due to testing purpose 10 | kinds: ["DaemonSet", "Deployment"] 11 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/Rego/3-1.Rego-ConstraintTemplate-NoReplicasLess3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: templates.gatekeeper.sh/v1beta1 2 | kind: ConstraintTemplate 3 | metadata: 4 | name: regoconstrainttemplatenoreplicasless3 5 | spec: 6 | crd: 7 | spec: 8 | names: 9 | kind: RegoConstraintTemplateNoReplicasLess3 10 | targets: 11 | - target: admission.k8s.gatekeeper.sh 12 | rego: | 13 | package regoconstrainttemplatenoreplicasless3 14 | 15 | violation[{"msg": msg}] { 16 | input.review.kind.kind == "Deployment" 17 | input.review.object.spec.replicas < 2 18 | msg := "Replicas is not allowed less than 3" 19 | } 20 | 21 | # beauty calculate sample by paramters.min in violatinon 22 | # provided := input.review.object.spec.replicas 23 | # required := input.parameters.min 24 | # missing := required - provided 25 | # missing > 0 26 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/gatekeeper-{{Rego,CEL}}/Rego/3-2.Rego-Constraint-NoReplicasLess3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: constraints.gatekeeper.sh/v1beta1 2 | kind: RegoConstraintTemplateNoReplicasLess3 3 | metadata: 4 | name: rego-constraintnoreplicasless3 5 | spec: 6 | match: 7 | kinds: 8 | - apiGroups: ["apps"] 9 | kinds: ["Deployment"] 10 | # parameters will use at ConstraintTemplate 11 | # parameters: 12 | # min: 2 13 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/k8s_native-{{CEL}}/1.CEL-ValidatingAdmissionPolicy-NoHostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: admissionregistration.k8s.io/v1 2 | kind: ValidatingAdmissionPolicy 3 | metadata: 4 | name: celvalidatingadmissionpolicynohostnetwork 5 | spec: 6 | matchConstraints: 7 | resourceRules: 8 | - apiGroups: [""] 9 | apiVersions: ["v1"] 10 | operations: ["CREATE","UPDATE"] 11 | resources: ["pods"] 12 | validations: 13 | - expression: "!has(object.spec.hostNetwork) || 14 | object.spec.hostNetwork != true" 15 | message: "HostNetwork is not allowed for the Pod" 16 | --- 17 | apiVersion: admissionregistration.k8s.io/v1 18 | kind: ValidatingAdmissionPolicyBinding 19 | metadata: 20 | name: celvalidatingadmissionpolicynohostnetwork-binding 21 | spec: 22 | policyName: celvalidatingadmissionpolicynohostnetwork 23 | validationActions: [Deny] 24 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/k8s_native-{{CEL}}/2.CEL-ValidatingAdmissionPolicy-NoHostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: admissionregistration.k8s.io/v1 2 | kind: ValidatingAdmissionPolicy 3 | metadata: 4 | name: celvalidatingadmissionpolicynohostpath 5 | spec: 6 | matchConstraints: 7 | resourceRules: 8 | - apiGroups: ["*"] 9 | apiVersions: ["*"] 10 | operations: ["CREATE","UPDATE"] 11 | resources: ["daemonsets","deployments"] 12 | validations: 13 | - expression: "!has(object.spec.template.spec.volumes) || 14 | object.spec.template.spec.volumes.all(volume, !has(volume.hostPath))" 15 | message: "HostPath is not allowed for template of Pod" 16 | --- 17 | apiVersion: admissionregistration.k8s.io/v1 18 | kind: ValidatingAdmissionPolicyBinding 19 | metadata: 20 | name: celvalidatingadmissionpolicynohostpath-binding 21 | spec: 22 | policyName: celvalidatingadmissionpolicynohostpath 23 | validationActions: [Deny] 24 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/k8s_native-{{CEL}}/3.CEL-ValidatingAdmissionPolicy-NoReplicasLess3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: admissionregistration.k8s.io/v1 2 | kind: ValidatingAdmissionPolicy 3 | metadata: 4 | name: celvalidatingadmissionpolicynoreplicasless3 5 | spec: 6 | matchConstraints: 7 | resourceRules: 8 | - apiGroups: ["apps"] 9 | apiVersions: ["v1"] 10 | operations: ["CREATE","UPDATE"] 11 | resources: ["deployments"] 12 | validations: 13 | - expression: "!has(object.spec.replicas) || 14 | object.spec.replicas > 2" 15 | message: "Replicas is not allowed less than 3" 16 | --- 17 | apiVersion: admissionregistration.k8s.io/v1 18 | kind: ValidatingAdmissionPolicyBinding 19 | metadata: 20 | name: celvalidatingadmissionpolicynoreplicasless3-binding 21 | spec: 22 | policyName: celvalidatingadmissionpolicynoreplicasless3 23 | validationActions: [Deny] 24 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/kyverno-{{Yaml,CEL}}/CEL/1.CEL-ClusterPolicy-NoHostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: celclusterpolicynohostnetwork 5 | spec: 6 | validationFailureAction: Enforce 7 | rules: 8 | - name: CELClusterPolicyNoHostNetwork 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - Pod 14 | operations: 15 | - CREATE 16 | - UPDATE 17 | validate: 18 | cel: 19 | expressions: 20 | - expression: "!has(object.spec.hostNetwork) || 21 | object.spec.hostNetwork != true" 22 | message: "HostNetwork is not allowed for the Pod" 23 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/kyverno-{{Yaml,CEL}}/CEL/2.CEL-ClusterPolicy-NoHostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: celclusterpolicynohostpath 5 | spec: 6 | validationFailureAction: Enforce 7 | rules: 8 | - name: CELClusterPolicyNoHostPath 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - DaemonSet 14 | - Deployment 15 | operations: 16 | - CREATE 17 | - UPDATE 18 | validate: 19 | cel: 20 | expressions: 21 | - expression: "!has(object.spec.template.spec.volumes) || 22 | object.spec.template.spec.volumes.all(volume, !has(volume.hostPath))" 23 | message: "HostPath is not allowed for template of Pod" 24 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/kyverno-{{Yaml,CEL}}/CEL/3.CEL-ClusterPolicy-NoReplicasLess3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: celclusterpolicynoreplicasless3 5 | spec: 6 | validationFailureAction: Enforce 7 | rules: 8 | - name: CELClusterPolicyNoReplicasLess3 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - Deployment 14 | operations: 15 | - CREATE 16 | - UPDATE 17 | validate: 18 | cel: 19 | expressions: 20 | - expression: "!has(object.spec.replicas) || 21 | object.spec.replicas > 2" 22 | message: "Replicas is not allowed less than 3" 23 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/kyverno-{{Yaml,CEL}}/Yaml/1.Yaml-ClusterPolicy-NoHostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: yamlclusterpolicynohostnetwork 5 | spec: 6 | validationFailureAction: Enforce 7 | rules: 8 | - name: YamlClusterPolicyNoHostNetwork 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - Pod 14 | validate: 15 | message: "HostNetwork is not allowed for the Pod" 16 | pattern: 17 | spec: 18 | =(hostNetwork): "false" 19 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/kyverno-{{Yaml,CEL}}/Yaml/2.Yaml-ClusterPolicy-NoHostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: yamlclusterpolicynohostpath 5 | spec: 6 | validationFailureAction: Enforce 7 | rules: 8 | - name: YamlClusterPolicyNoHostPath 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - DaemonSet 14 | - Deployment 15 | validate: 16 | message: "HostPath is not allowed for the template of Pod" 17 | pattern: 18 | spec: 19 | template: 20 | spec: 21 | =(volumes): 22 | - X(hostPath): "null" 23 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/kyverno-{{Yaml,CEL}}/Yaml/3.Yaml-ClusterPolicy-NoReplicasLess3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: yamlclusterpolicynoreplicasless3 5 | spec: 6 | validationFailureAction: Enforce 7 | rules: 8 | - name: YamlClusterPolicyNoReplicasLess3 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - Deployment 14 | validate: 15 | message: "Replicas is not allowed less than 3" 16 | pattern: 17 | spec: 18 | =(replicas): ">2" 19 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/hostNetwork/no-hostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: no-hostnetwork 5 | spec: 6 | containers: 7 | - name: nginx 8 | image: nginx:1.27.2-alpine-slim 9 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/hostNetwork/yes-hostNetwork.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: yes-hostnetwork 5 | spec: 6 | hostNetwork: true 7 | containers: 8 | - name: nginx 9 | image: nginx:1.27.2-alpine-slim 10 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/hostPath/no-hostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: no-hostpath 5 | labels: 6 | app: no-hostpath 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: no-hostpath 11 | template: 12 | metadata: 13 | labels: 14 | app: no-hostpath 15 | spec: 16 | containers: 17 | - name: host-mon 18 | image: sysnet4admin/sleepy 19 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/hostPath/no-matched-replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | labels: 5 | app: no-matched-replicass 6 | name: no-matched-replicas 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: no-matched-replicas 12 | template: 13 | metadata: 14 | labels: 15 | app: no-matched-replicas 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.27.2-alpine-slim 20 | volumeMounts: 21 | - mountPath: /host-log 22 | name: hostpath-directory 23 | volumes: 24 | - name: hostpath-directory 25 | hostPath: 26 | path: /var/log 27 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/hostPath/yes-hostPath.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: yes-hostpath 5 | labels: 6 | app: yes-hostpath 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: yes-hostpath 11 | template: 12 | metadata: 13 | labels: 14 | app: yes-hostpath 15 | spec: 16 | containers: 17 | - name: host-mon 18 | image: sysnet4admin/sleepy 19 | volumeMounts: 20 | - mountPath: /host-log 21 | name: hostpath-directory 22 | volumes: 23 | - name: hostpath-directory 24 | hostPath: 25 | path: /var/log 26 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/hostPath/yes-matched-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: yes-matched-deployment 6 | name: yes-matched-deployment 7 | spec: 8 | replicas: 2 9 | selector: 10 | matchLabels: 11 | app: yes-matched-deployment 12 | template: 13 | metadata: 14 | labels: 15 | app: yes-matched-deployment 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.27.2-alpine-slim 20 | volumeMounts: 21 | - mountPath: /host-log 22 | name: hostpath-directory 23 | volumes: 24 | - name: hostpath-directory 25 | hostPath: 26 | path: /var/log 27 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/replicas/1-replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: 1-replicas 6 | name: 1-replicas 7 | spec: 8 | replicas: 1 9 | selector: 10 | matchLabels: 11 | app: 1-replicas 12 | template: 13 | metadata: 14 | labels: 15 | app: 1-replicas 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.27.2-alpine-slim 20 | -------------------------------------------------------------------------------- /PaC/CEL/ValidatingAdmissionPolicy/sample-apps/replicas/3-replicas.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: 3-replicas 6 | name: 3-replicas 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: 3-replicas 12 | template: 13 | metadata: 14 | labels: 15 | app: 3-replicas 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.27.2-alpine-slim 20 | -------------------------------------------------------------------------------- /Prometheus/mk-demo-prom-darksite/demo promlens.url: -------------------------------------------------------------------------------- 1 | [InternetShortcut] 2 | URL=https://demo.promlens.com/ 3 | -------------------------------------------------------------------------------- /Prometheus/mk-demo-prom-darksite/prom.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # https://blog.marcnuri.com/prometheus-grafana-setup-minikube 4 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 5 | helm install prometheus prometheus-community/prometheus 6 | 7 | kubectl expose service prometheus-server --type=NodePort --target-port=9090 --name=prometheus-server-np 8 | minikube service prometheus-server-np 9 | kubectl patch service prometheus-server-np --namespace=default \ 10 | --type='json' \ 11 | --patch='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value":30000}]' 12 | -------------------------------------------------------------------------------- /Prometheus/mk-demo-prom-darksite/startup-prom.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # startup minikube 4 | minikube start --driver=none 5 | echo "Wait for minikube boot up properly...in few seconds" 6 | echo "===================================================" 7 | echo ""; sleep 10 8 | 9 | # deploy prometheus 10 | install prometheus prometheus-community/prometheus 11 | 12 | kubectl expose service prometheus-server --type=NodePort --target-port=9090 --name=prometheus-server-np 13 | minikube service prometheus-server-np 14 | kubectl patch service prometheus-server-np --namespace=default \ 15 | --type='json' \ 16 | --patch='[{"op": "replace", "path": "/spec/ports/0/nodePort", "value":30000}]' 17 | 18 | echo "" 19 | echo "promehtues's graph: http://192.168.1.231:300000" 20 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # IaC 2 | Infrastructure as Code 3 | 4 | ## Change History 5 | Default branch change from `master` to `main` 6 | (27 Oct 2023) -------------------------------------------------------------------------------- /k8s/C/k8s-MultipleMaster_by_kubespray/Readme: -------------------------------------------------------------------------------- 1 | Running kubesprary 2 | 1. login m11-k8s 3 | 2. sh auto_pass.sh 4 | 3. ansible-playbook kubespray/cluster.yml -i ansible_hosts.ini 5 | (if you need to add or remove for hosts, please modify ansible_hosts.ini manually.) -------------------------------------------------------------------------------- /k8s/C/k8s-MultipleMaster_by_kubespray/auto_pass.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #Auto_Pass 3 | #if you want to filter only ip then [grep -o '[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}'] 4 | 5 | #make a directory 6 | mkdir ~/.ssh 7 | 8 | #Read hosts from file 9 | readarray hosts < /etc/hosts 10 | 11 | ##1.known_hosts## 12 | if [ ! -f ~/.ssh/known_hosts ]; then 13 | for host in ${hosts[@]}; do 14 | ssh-keyscan -t ecdsa ${host} >> ~/.ssh/known_hosts 15 | done 16 | fi 17 | 18 | ##2.authorized_keys 19 | if [ ! -f ~/.ssh/id_rsa.pub ]; then 20 | ssh-keygen -b 2048 -t rsa -f ~/.ssh/id_rsa -q -N '' 21 | for host in ${hosts[@]}; do 22 | sshpass -p vagrant ssh-copy-id -f ${host} 23 | done 24 | fi -------------------------------------------------------------------------------- /k8s/C/k8s-MultipleMaster_by_kubespray/install_pkg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install sshpass -y 7 | 8 | -------------------------------------------------------------------------------- /k8s/C/k8s-SingleMaster-1.13.1/install_pkg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker -y && systemctl enable --now docker 10 | 11 | # install kubernetes and kubectl will install only master node 12 | if [ $2 = 'install_kubectl' ]; then 13 | yum install kubectl-$1 -y 14 | fi 15 | yum install kubelet-$1 kubeadm-$1 -y 16 | systemctl enable --now kubelet -------------------------------------------------------------------------------- /k8s/C/k8s-SingleMaster-1.13.1/master_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # init kubernetes 4 | kubeadm init --token 123456.1234567890123456 --token-ttl 0 \ 5 | --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.1.10 6 | 7 | # config for master node only 8 | mkdir -p $HOME/.kube 9 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 10 | chown $(id -u):$(id -g) $HOME/.kube/config 11 | 12 | # config for kubernetes's network 13 | kubectl apply -f \ 14 | https://raw.githubusercontent.com/sysnet4admin/IaC/master/manifests/172.16_net_calico.yaml -------------------------------------------------------------------------------- /k8s/C/k8s-SingleMaster-1.13.1/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 -------------------------------------------------------------------------------- /k8s/C/k8s-SingleMaster-18.9_9_w_auto-compl/install_pkg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-18.09.9-3.el7 docker-ce-cli-18.09.9-3.el7 \ 10 | containerd.io-1.2.6-3.3.el7 -y 11 | systemctl enable --now docker 12 | 13 | # install kubernetes and kubectl will install only master node 14 | if [ $2 = 'install_kubectl' ]; then 15 | yum install kubectl-$1 -y 16 | fi 17 | yum install kubelet-$1 kubeadm-$1 -y 18 | systemctl enable --now kubelet 19 | 20 | -------------------------------------------------------------------------------- /k8s/C/k8s-SingleMaster-18.9_9_w_auto-compl/master_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # init kubernetes 4 | kubeadm init --token 123456.1234567890123456 --token-ttl 0 \ 5 | --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.1.10 6 | 7 | # config for master node only 8 | mkdir -p $HOME/.kube 9 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 10 | chown $(id -u):$(id -g) $HOME/.kube/config 11 | 12 | # raw_address for gitcontent 13 | raw_git="raw.githubusercontent.com/sysnet4admin/IaC/master/manifests" 14 | 15 | # config for kubernetes's network 16 | kubectl apply -f https://$raw_git/172.16_net_calico.yaml 17 | 18 | # install bash-completion for kubectl 19 | yum install bash-completion -y 20 | 21 | # kubectl completion on bash-completion dir 22 | kubectl completion bash >/etc/bash_completion.d/kubectl 23 | 24 | # alias kubectl to k 25 | echo 'alias k=kubectl' >> ~/.bashrc 26 | echo 'complete -F __start_kubectl k' >> ~/.bashrc -------------------------------------------------------------------------------- /k8s/C/k8s-SingleMaster-18.9_9_w_auto-compl/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 -------------------------------------------------------------------------------- /k8s/C/k8s-min-1.16.15-iprange16/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install util packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-$2 docker-ce-cli-$2 containerd.io-$3 -y 10 | 11 | # install kubernetes 12 | # both kubelet and kubectl will install by dependency 13 | # but aim to latest version. so fixed version by manually 14 | yum install kubelet-$1 kubectl-$1 kubeadm-$1 -y 15 | 16 | # Ready to install for k8s 17 | systemctl enable --now docker 18 | systemctl enable --now kubelet 19 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-1.16.15-iprange16/master_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # init kubernetes 4 | kubeadm init --token 123456.1234567890123456 --token-ttl 0 \ 5 | --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.16.10 6 | 7 | # config for master node only 8 | mkdir -p $HOME/.kube 9 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 10 | chown $(id -u):$(id -g) $HOME/.kube/config 11 | 12 | # raw_address for gitcontent 13 | raw_git="raw.githubusercontent.com/sysnet4admin/IaC/master/manifests" 14 | 15 | # config for kubernetes's network 16 | kubectl apply -f https://$raw_git/172.16_net_calico.yaml 17 | 18 | # install bash-completion for kubectl 19 | yum install bash-completion -y 20 | 21 | # kubectl completion on bash-completion dir 22 | kubectl completion bash >/etc/bash_completion.d/kubectl 23 | 24 | # alias kubectl to k 25 | echo 'alias k=kubectl' >> ~/.bashrc 26 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 27 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-1.16.15-iprange16/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.16.10:6443 -------------------------------------------------------------------------------- /k8s/C/k8s-min-5GB-1.16.15-iprange1/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install util packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-$2 docker-ce-cli-$2 containerd.io-$3 -y 10 | 11 | # install kubernetes 12 | # both kubelet and kubectl will install by dependency 13 | # but aim to latest version. so fixed version by manually 14 | yum install kubelet-$1 kubectl-$1 kubeadm-$1 -y 15 | 16 | # Ready to install for k8s 17 | systemctl enable --now docker 18 | systemctl enable --now kubelet 19 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-5GB-1.16.15-iprange1/master_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # init kubernetes 4 | kubeadm init --token 123456.1234567890123456 --token-ttl 0 \ 5 | --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.1.10 6 | 7 | # config for master node only 8 | mkdir -p $HOME/.kube 9 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 10 | chown $(id -u):$(id -g) $HOME/.kube/config 11 | 12 | # raw_address for gitcontent 13 | raw_git="raw.githubusercontent.com/sysnet4admin/IaC/master/manifests" 14 | 15 | # config for kubernetes's network 16 | kubectl apply -f https://$raw_git/172.16_net_calico.yaml 17 | 18 | # install bash-completion for kubectl 19 | yum install bash-completion -y 20 | 21 | # kubectl completion on bash-completion dir 22 | kubectl completion bash >/etc/bash_completion.d/kubectl 23 | 24 | # alias kubectl to k 25 | echo 'alias k=kubectl' >> ~/.bashrc 26 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 27 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-5GB-1.16.15-iprange1/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 -------------------------------------------------------------------------------- /k8s/C/k8s-min-5GiB-1.20.1-iprange1/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install util packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-$2 docker-ce-cli-$2 containerd.io-$3 -y 10 | 11 | # install kubernetes 12 | # both kubelet and kubectl will install by dependency 13 | # but aim to latest version. so fixed version by manually 14 | yum install kubelet-$1 kubectl-$1 kubeadm-$1 -y 15 | 16 | # Ready to install for k8s 17 | systemctl enable --now docker 18 | systemctl enable --now kubelet 19 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-5GiB-1.20.1-iprange1/master_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # init kubernetes 4 | kubeadm init --token 123456.1234567890123456 --token-ttl 0 \ 5 | --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.1.10 6 | 7 | # config for master node only 8 | mkdir -p $HOME/.kube 9 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 10 | chown $(id -u):$(id -g) $HOME/.kube/config 11 | 12 | # raw_address for gitcontent 13 | raw_git="raw.githubusercontent.com/sysnet4admin/IaC/master/manifests" 14 | 15 | # config for kubernetes's network 16 | kubectl apply -f https://$raw_git/172.16_net_calico.yaml 17 | 18 | # install bash-completion for kubectl 19 | yum install bash-completion -y 20 | 21 | # kubectl completion on bash-completion dir 22 | kubectl completion bash >/etc/bash_completion.d/kubectl 23 | 24 | # alias kubectl to k 25 | echo 'alias k=kubectl' >> ~/.bashrc 26 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 27 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-5GiB-1.20.1-iprange1/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 -------------------------------------------------------------------------------- /k8s/C/k8s-min-5GiB-1.25.0/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 \ 6 | --cri-socket=unix:///run/containerd/containerd.sock 7 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-containerD-only-MST/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only (w/ containerd) 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 \ 6 | --cri-socket=unix:///run/containerd/containerd.sock 7 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-containerD/master_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # init kubernetes (w/ containerd) 4 | kubeadm init --token 123456.1234567890123456 --token-ttl 0 \ 5 | --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.1.10 \ 6 | --cri-socket=unix:///run/containerd/containerd.sock 7 | 8 | # config for master node only 9 | mkdir -p $HOME/.kube 10 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 11 | chown $(id -u):$(id -g) $HOME/.kube/config 12 | 13 | # raw_address for gitcontent 14 | raw_git="raw.githubusercontent.com/sysnet4admin/IaC/master/manifests" 15 | 16 | # config for kubernetes's network 17 | kubectl apply -f https://$raw_git/172.16_net_calico_v1.yaml 18 | 19 | # install bash-completion for kubectl 20 | yum install bash-completion -y 21 | 22 | # kubectl completion on bash-completion dir 23 | kubectl completion bash >/etc/bash_completion.d/kubectl 24 | 25 | # alias kubectl to k 26 | echo 'alias k=kubectl' >> ~/.bashrc 27 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 28 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-containerD/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only (w/ containerd) 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 \ 6 | --cri-socket=unix:///run/containerd/containerd.sock 7 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-post-systemd/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-$2 docker-ce-cli-$2 containerd.io-$3 -y 10 | 11 | # install kubernetes 12 | # both kubelet and kubectl will install by dependency 13 | # but aim to latest version. so fixed version by manually 14 | yum install kubelet-$1 kubectl-$1 kubeadm-$1 -y 15 | 16 | # Ready to install for k8s 17 | systemctl enable --now docker 18 | systemctl enable --now kubelet -------------------------------------------------------------------------------- /k8s/C/k8s-min-post-systemd/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 6 | 7 | # cgroupdriver still cgroupfs -------------------------------------------------------------------------------- /k8s/C/k8s-min-systemd/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install util packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-$2 docker-ce-cli-$2 containerd.io-$3 -y 10 | 11 | # install kubernetes 12 | # both kubelet and kubectl will install by dependency 13 | # but aim to latest version. so fixed version by manually 14 | yum install kubelet-$1 kubectl-$1 kubeadm-$1 -y 15 | 16 | # Ready to install for k8s 17 | systemctl enable --now docker 18 | systemctl enable --now kubelet 19 | 20 | # docker daemon config for systemd from cgroupfs & restart 21 | cat < /etc/docker/daemon.json 22 | { 23 | "exec-opts": ["native.cgroupdriver=systemd"] 24 | } 25 | EOF 26 | systemctl daemon-reload && systemctl restart docker -------------------------------------------------------------------------------- /k8s/C/k8s-min-systemd/master_node.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # init kubernetes 4 | kubeadm init --token 123456.1234567890123456 --token-ttl 0 \ 5 | --pod-network-cidr=172.16.0.0/16 --apiserver-advertise-address=192.168.1.10 6 | 7 | # config for master node only 8 | mkdir -p $HOME/.kube 9 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 10 | chown $(id -u):$(id -g) $HOME/.kube/config 11 | 12 | # raw_address for gitcontent 13 | raw_git="raw.githubusercontent.com/sysnet4admin/IaC/master/manifests" 14 | 15 | # config for kubernetes's network 16 | kubectl apply -f https://$raw_git/172.16_net_calico_v1.yaml 17 | 18 | # install bash-completion for kubectl 19 | yum install bash-completion -y 20 | 21 | # kubectl completion on bash-completion dir 22 | kubectl completion bash >/etc/bash_completion.d/kubectl 23 | 24 | # alias kubectl to k 25 | echo 'alias k=kubectl' >> ~/.bashrc 26 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 27 | -------------------------------------------------------------------------------- /k8s/C/k8s-min-systemd/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 -------------------------------------------------------------------------------- /k8s/C/k8s-multicontext/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install util packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-$2 docker-ce-cli-$2 containerd.io-$3 -y 10 | 11 | # install kubernetes 12 | # both kubelet and kubectl will install by dependency 13 | # but aim to latest version. so fixed version by manually 14 | yum install kubelet-$1 kubectl-$1 kubeadm-$1 -y 15 | 16 | # Ready to install for k8s 17 | systemctl enable --now docker 18 | systemctl enable --now kubelet 19 | -------------------------------------------------------------------------------- /k8s/C/k8s-multicontext/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.$1:6443 -------------------------------------------------------------------------------- /k8s/C/k8s-natvie-lab/Readme.txt: -------------------------------------------------------------------------------- 1 | #init limitation 2 | kubeadm init --apiserver-advertise-address=192.168.1.10 -------------------------------------------------------------------------------- /k8s/C/k8s-natvie-lab/install_pkg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | -------------------------------------------------------------------------------- /k8s/C/k8s-rook-ceph/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install util packages 4 | yum install epel-release -y 5 | yum install vim-enhanced -y 6 | yum install git -y 7 | 8 | # install docker 9 | yum install docker-ce-$2 docker-ce-cli-$2 containerd.io-$3 -y 10 | 11 | # install kubernetes 12 | # both kubelet and kubectl will install by dependency 13 | # but aim to latest version. so fixed version by manually 14 | yum install kubelet-$1 kubectl-$1 kubeadm-$1 -y 15 | 16 | # Ready to install for k8s 17 | systemctl enable --now docker 18 | systemctl enable --now kubelet 19 | 20 | # docker daemon config for systemd from cgroupfs & restart 21 | cat < /etc/docker/daemon.json 22 | { 23 | "exec-opts": ["native.cgroupdriver=systemd"] 24 | } 25 | EOF 26 | systemctl daemon-reload && systemctl restart docker -------------------------------------------------------------------------------- /k8s/C/k8s-rook-ceph/pkg_by_helm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install addtional packages 4 | helm repo add edu https://k8s-edu.github.io/helm-charts 5 | helm repo update 6 | helm completion bash >/etc/bash_completion.d/helm 7 | exec bash 8 | #sh $HOME/_Lecture_k8s_learning.kit/ch9/9.6/installer-by-helm/metallb-intaller-by-helm.sh 9 | #sh $HOME/_Lecture_k8s_learning.kit/ch5/5.6/nfs-exporter.sh dynamic-vol 10 | #sh $HOME/_Lecture_k8s_learning.kit/ch9/9.6/installer-by-helm/nfs-provisioner-installer-by-helm.sh 11 | -------------------------------------------------------------------------------- /k8s/C/k8s-rook-ceph/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 6 | -------------------------------------------------------------------------------- /k8s/U/k8s-multicontext-vagrant-user/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # update package list 4 | apt update 5 | 6 | # install docker 7 | apt install -y docker-ce=$2 docker-ce-cli=$2 containerd.io=$3 8 | 9 | # install kubernetes 10 | # both kubelet and kubectl will install by dependency 11 | # but aim to latest version. so fixed version by manually 12 | apt install -y kubelet=$1 kubectl=$1 kubeadm=$1 13 | 14 | # Ready to install for k8s 15 | systemctl enable --now docker 16 | systemctl enable --now kubelet 17 | 18 | # docker daemon config for systemd from cgroupfs & restart 19 | cat < /etc/docker/daemon.json 20 | { 21 | "exec-opts": ["native.cgroupdriver=systemd"] 22 | } 23 | EOF 24 | systemctl daemon-reload && systemctl restart docker 25 | -------------------------------------------------------------------------------- /k8s/U/k8s-multicontext-vagrant-user/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Fixed Internal-IP 4 | cat < /etc/default/kubelet 5 | KUBELET_EXTRA_ARGS=--node-ip=192.168.1.$2 6 | EOF 7 | 8 | # config for work_nodes only 9 | kubeadm join --token 123456.1234567890123456 \ 10 | --discovery-token-unsafe-skip-ca-verification 192.168.1.$1:6443 11 | 12 | -------------------------------------------------------------------------------- /k8s/U/k8s-multicontext/k8s_pkg_cfg.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # update package list 4 | apt update 5 | 6 | # install docker 7 | apt install -y docker-ce=$2 docker-ce-cli=$2 containerd.io=$3 8 | 9 | # install kubernetes 10 | # both kubelet and kubectl will install by dependency 11 | # but aim to latest version. so fixed version by manually 12 | apt install -y kubelet=$1 kubectl=$1 kubeadm=$1 13 | 14 | # Ready to install for k8s 15 | systemctl enable --now docker 16 | systemctl enable --now kubelet 17 | 18 | # docker daemon config for systemd from cgroupfs & restart 19 | cat < /etc/docker/daemon.json 20 | { 21 | "exec-opts": ["native.cgroupdriver=systemd"] 22 | } 23 | EOF 24 | systemctl daemon-reload && systemctl restart docker 25 | -------------------------------------------------------------------------------- /k8s/U/k8s-multicontext/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Fixed Internal-IP 4 | cat < /etc/default/kubelet 5 | KUBELET_EXTRA_ARGS=--node-ip=192.168.1.$2 6 | EOF 7 | 8 | # config for work_nodes only 9 | kubeadm join --token 123456.1234567890123456 \ 10 | --discovery-token-unsafe-skip-ca-verification 192.168.1.$1:6443 11 | 12 | -------------------------------------------------------------------------------- /k8s/U/k8s-v1.27.0/README.md: -------------------------------------------------------------------------------- 1 | ### 실습 랩 All-in-one (v1.24.8) 2 | 3 | 이름 | 버전 | 빈칸 4 | ---- | ---- | ---- 5 | kubernetes | v1.24.8 | 6 | ContainerD | 1.6.8 | 7 | MetalLB | v0.10.2 | 8 | nfs-provisioner | 4.0.2 | 9 | Metrics Server | 0.5.0 | 10 | Kustomize | 4.2.0 | 11 | Helm | 3.6.3 | 12 | -------------------------------------------------------------------------------- /k8s/U/k8s-v1.27.0/pkg_by_helm.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # install addtional packages 4 | helm repo add edu https://k8s-edu.github.io/helm-charts 5 | helm repo update 6 | helm completion bash >/etc/bash_completion.d/helm 7 | exec bash 8 | #sh $HOME/_Lecture_k8s_learning.kit/ch9/9.6/installer-by-helm/metallb-intaller-by-helm.sh 9 | #sh $HOME/_Lecture_k8s_learning.kit/ch5/5.6/nfs-exporter.sh dynamic-vol 10 | #sh $HOME/_Lecture_k8s_learning.kit/ch9/9.6/installer-by-helm/nfs-provisioner-installer-by-helm.sh 11 | -------------------------------------------------------------------------------- /k8s/U/k8s-v1.27.0/work_nodes.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # config for work_nodes only (w/ containerd) 4 | kubeadm join --token 123456.1234567890123456 \ 5 | --discovery-token-unsafe-skip-ca-verification 192.168.1.10:6443 \ 6 | --cri-socket=unix:///run/containerd/containerd.sock 7 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.24.4/helm-repo-add.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | helm repo add edu https://k8s-edu.github.io/helm-charts/prom 4 | helm repo update 5 | 6 | # helm auto-completion 7 | helm completion bash >/etc/bash_completion.d/helm 8 | # reload bash shell 9 | exec bash 10 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.24.4/metallb-iprange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: k8s-svc-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 192.168.1.11-192.168.1.99 9 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.24.4/metallb-l2mode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: layer2-mode 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.24.4/nfs-exporter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | nfsdir=/nfs_shared/$1 3 | 4 | if [ $# -eq 0 ]; then 5 | echo "usage: nfs-exporter.sh "; exit 0 6 | fi 7 | 8 | if [[ ! -d /nfs_shared ]]; then 9 | mkdir /nfs_shared 10 | fi 11 | 12 | if [[ ! -d $nfsdir ]]; then 13 | mkdir -p $nfsdir 14 | echo "$nfsdir 192.168.1.0/24(rw,sync,no_root_squash)" >> /etc/exports 15 | if [[ $(systemctl is-enabled nfs) -eq "disabled" ]]; then 16 | systemctl enable nfs 17 | fi 18 | systemctl restart nfs 19 | fi -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.24.4/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: managed-nfs-storage 5 | # or choose another name, must match deployment's env PROVISIONER_NAME' 6 | provisioner: k8s-sigs.io/nfs-subdir-external-provisioner 7 | parameters: 8 | # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. 9 | pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" 10 | onDelete: delete 11 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.26.1/helm-repo-add.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | helm repo add edu https://k8s-edu.github.io/helm-charts/graf 4 | helm repo update 5 | 6 | # helm auto-completion 7 | helm completion bash >/etc/bash_completion.d/helm 8 | # reload bash shell 9 | exec bash 10 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.26.1/metallb-iprange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: k8s-svc-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 192.168.1.11-192.168.1.99 9 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.26.1/metallb-l2mode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: layer2-mode 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.26.1/nfs-exporter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | nfsdir=/nfs_shared/$1 3 | 4 | if [ $# -eq 0 ]; then 5 | echo "usage: nfs-exporter.sh "; exit 0 6 | fi 7 | 8 | if [[ ! -d /nfs_shared ]]; then 9 | mkdir /nfs_shared 10 | fi 11 | 12 | if [[ ! -d $nfsdir ]]; then 13 | mkdir -p $nfsdir 14 | echo "$nfsdir 192.168.1.0/24(rw,sync,no_root_squash)" >> /etc/exports 15 | if [[ $(systemctl is-enabled nfs) -eq "disabled" ]]; then 16 | systemctl enable nfs 17 | fi 18 | systemctl restart nfs 19 | fi -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.26.1/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: managed-nfs-storage 5 | # or choose another name, must match deployment's env PROVISIONER_NAME' 6 | provisioner: k8s-sigs.io/nfs-subdir-external-provisioner 7 | parameters: 8 | # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. 9 | pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" 10 | onDelete: delete 11 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.27.2/README.md: -------------------------------------------------------------------------------- 1 | ### k8s-extra-pkgs (v1.27.2) 2 | 3 | 이름 | 버전 | 빈칸 4 | ---- | ---- | ---- 5 | kubernetes | v1.27.2 | 6 | MetalLB | v0.13.10 | 7 | nfs-provisioner | 4.0.2 | 8 | Metrics Server | 0.6.3 | 9 | Helm | 3.12.0 | 10 | 11 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.27.2/metallb-iprange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: k8s-svc-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 192.168.1.11-192.168.1.99 9 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.27.2/metallb-l2mode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: layer2-mode 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.27.2/nfs-exporter.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | nfsdir=/nfs_shared/$1 3 | 4 | if [ $# -eq 0 ]; then 5 | echo "usage: nfs-exporter.sh "; exit 0 6 | fi 7 | 8 | if [[ ! -d /nfs_shared ]]; then 9 | mkdir /nfs_shared 10 | fi 11 | 12 | if [[ ! -d $nfsdir ]]; then 13 | mkdir -p $nfsdir 14 | echo "$nfsdir 192.168.1.0/24(rw,sync,no_root_squash)" >> /etc/exports 15 | if [[ $(systemctl is-enabled nfs) -eq "disabled" ]]; then 16 | systemctl enable nfs 17 | fi 18 | systemctl restart nfs 19 | fi -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.27.2/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: managed-nfs-storage 5 | # or choose another name, must match deployment's env PROVISIONER_NAME' 6 | provisioner: k8s-sigs.io/nfs-subdir-external-provisioner 7 | parameters: 8 | # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. 9 | pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" 10 | onDelete: delete 11 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.30/README.md: -------------------------------------------------------------------------------- 1 | ### k8s-extra-pkgs (v1.30) 2 | 3 | 이름 | 버전 | 빈칸 4 | ---- | ---- | ---- 5 | kubernetes | v1.30.x | 6 | MetalLB | v0.14.4 | 7 | nfs-provisioner | 4.0.2 | 8 | Metrics Server | 0.7.1 | 9 | Helm | 3.14.0 | 10 | 11 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.30/metallb-iprange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: k8s-svc-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 192.168.1.11-192.168.1.99 9 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.30/metallb-l2mode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: layer2-mode 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.30/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: managed-nfs-storage 5 | # or choose another name, must match deployment's env PROVISIONER_NAME' 6 | provisioner: k8s-sigs.io/nfs-subdir-external-provisioner 7 | parameters: 8 | # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. 9 | pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" 10 | onDelete: delete 11 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.32/README.md: -------------------------------------------------------------------------------- 1 | ### k8s-extra-pkgs (v1.32) 2 | 3 | 이름 | 버전 | 빈칸 4 | ---- | ---- | ---- 5 | kubernetes | 1.32.x | 6 | MetalLB | 0.14.9 | 7 | Ingress-ctrl | 1.10.1 | (Latest: 1.20.0) 8 | nfs-provisioner | 4.0.2 | 9 | Metrics Server | 0.7.1 | (Latest: 0.7.2) 10 | Helm | 3.17.1 | 11 | 12 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.32/cilium-iprange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cilium.io/v2alpha1 2 | kind: CiliumLoadBalancerIPPool 3 | metadata: 4 | name: k8s-svc-pool 5 | spec: 6 | blocks: 7 | - start: 192.168.1.11 8 | stop: 192.168.1.99 9 | 10 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.32/cilium-l2mode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cilium.io/v2alpha1 2 | kind: CiliumL2AnnouncementPolicy 3 | metadata: 4 | name: layer2-mode 5 | spec: 6 | interfaces: 7 | - ^eth[0-9]+ 8 | externalIPs: true 9 | loadBalancerIPs: true 10 | 11 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.32/metallb-iprange.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: k8s-svc-pool 5 | namespace: metallb-system 6 | spec: 7 | addresses: 8 | - 192.168.1.11-192.168.1.99 9 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.32/metallb-l2mode.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: layer2-mode 5 | namespace: metallb-system 6 | -------------------------------------------------------------------------------- /k8s/extra-pkgs/v1.32/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: managed-nfs-storage 5 | # or choose another name, must match deployment's env PROVISIONER_NAME' 6 | provisioner: k8s-sigs.io/nfs-subdir-external-provisioner 7 | parameters: 8 | # waits for nfs.io/storage-path annotation, if not specified will accept as empty string. 9 | pathPattern: "${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path}" 10 | onDelete: delete 11 | -------------------------------------------------------------------------------- /k8s/k8s-console/README.md: -------------------------------------------------------------------------------- 1 | # k8s(kubernetes)-console 2 | 3 | ## 설치된 패키지 4 | - kubectl 1.26.0 5 | - helm 3.10.3 6 | 7 | ## 설치된 플러그인 8 | kubectx 9 | kubens 10 | kube-ps1 11 | 12 | ## 정보 13 | - autocompletion 완료 14 | - alias는 kubectl은 k로 helm은 h로 완료 15 | 16 | # 추가 정보 17 | kubeconfig scrap preconfig가 vagrantfile에 포함되어 있으니 필요시 사용 18 | 19 | -------------------------------------------------------------------------------- /k8s/k8s-console/grap_kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # create .kube_config dir 4 | mkdir ~/.kube 5 | 6 | # copy kubeconfig by sshpass 7 | sshpass -p 'vagrant' scp -o StrictHostKeyChecking=no root@192.168.1.10:/etc/kubernetes/admin.conf ~/.kube/config 8 | 9 | # git clone k8s-code 10 | git clone https://github.com/sysnet4admin/_Lecture_k8s_learning.kit.git $HOME/_Lecture_k8s_learning.kit 11 | find $HOME/_Lecture_k8s_learning.kit -regex ".*\.\(sh\)" -exec chmod 700 {} \; 12 | 13 | # make rerepo-k8s-learning.kit and put permission 14 | cat < /usr/local/bin/rerepo-k8s-learning.kit 15 | #!/usr/bin/env bash 16 | rm -rf $HOME/_Lecture_k8s_learning.kit 17 | git clone https://github.com/sysnet4admin/_Lecture_k8s_learning.kit.git $HOME/_Lecture_k8s_learning.kit 18 | find $HOME/_Lecture_k8s_learning.kit -regex ".*\.\(sh\)" -exec chmod 700 {} \; 19 | EOF 20 | chmod 700 /usr/local/bin/rerepo-k8s-learning.kit 21 | -------------------------------------------------------------------------------- /manifests/bash-completion.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | #Usage: 3 | #1. bash <(curl -s https://raw.githubusercontent.com/sysnet4admin/IaC/master/manifests/bash-completion.sh) 4 | 5 | # install bash-completion for kubectl 6 | yum install bash-completion -y 7 | 8 | # kubectl completion on bash-completion dir 9 | kubectl completion bash >/etc/bash_completion.d/kubectl 10 | 11 | # alias kubectl to k 12 | echo 'alias k=kubectl' >> ~/.bashrc 13 | echo 'complete -F __start_kubectl k' >> ~/.bashrc 14 | 15 | #Reload rc 16 | su - 17 | 18 | echo "k8s bash-completion install successfully" -------------------------------------------------------------------------------- /manifests/busybox.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: busybox 5 | namespace: default 6 | spec: 7 | containers: 8 | - name: busybox 9 | image: busybox:1.28.4 10 | command: [ 'sh', '-c', 'sleep 3600' ] 11 | imagePullPolicy: IfNotPresent 12 | restartPolicy: Always 13 | -------------------------------------------------------------------------------- /manifests/echo-hname.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: echo-hname 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: echo-hname 19 | image: sysnet4admin/echo-hname -------------------------------------------------------------------------------- /manifests/echo-ip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: echo-ip 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: echo-ip 19 | image: sysnet4admin/echo-ip -------------------------------------------------------------------------------- /manifests/ingress/0.4.6/cmd: -------------------------------------------------------------------------------- 1 | kubectl apply $(ls deploy*.yaml | awk ' { print " -f " $1 } ') 2 | -------------------------------------------------------------------------------- /manifests/ingress/0.4.6/deploy-hn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-hn 5 | labels: 6 | app: deploy-hn 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-hn 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-hn 16 | spec: 17 | containers: 18 | - name: chk-hn 19 | image: sysnet4admin/chk-hn 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: ing-hn 25 | spec: 26 | selector: 27 | app: deploy-hn 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: ClusterIP 33 | -------------------------------------------------------------------------------- /manifests/ingress/0.4.6/deploy-ip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-ip 5 | labels: 6 | app: deploy-ip 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-ip 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-ip 16 | spec: 17 | containers: 18 | - name: chk-ip 19 | image: sysnet4admin/chk-ip 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: ing-ip 25 | spec: 26 | selector: 27 | app: deploy-ip 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: ClusterIP 33 | -------------------------------------------------------------------------------- /manifests/ingress/0.4.6/deploy-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-nginx 5 | labels: 6 | app: deploy-nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: ing-default 25 | spec: 26 | selector: 27 | app: deploy-nginx 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: ClusterIP 33 | -------------------------------------------------------------------------------- /manifests/ingress/0.4.6/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-ingress 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: ing-default 16 | port: 17 | number: 80 18 | - path: /hn 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: ing-hn 23 | port: 24 | number: 80 25 | - path: /ip 26 | pathType: Prefix 27 | backend: 28 | service: 29 | name: ing-ip 30 | port: 31 | number: 80 32 | -------------------------------------------------------------------------------- /manifests/ingress/1.5.1/.cmd: -------------------------------------------------------------------------------- 1 | kubectl apply $(ls deploy*.yaml | awk ' { print " -f " $1 } ') 2 | -------------------------------------------------------------------------------- /manifests/ingress/1.5.1/deploy-hn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-hn 5 | labels: 6 | app: deploy-hn 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-hn 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-hn 16 | spec: 17 | containers: 18 | - name: chk-hn 19 | image: sysnet4admin/chk-hn 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: ing-hn 25 | spec: 26 | selector: 27 | app: deploy-hn 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: ClusterIP 33 | -------------------------------------------------------------------------------- /manifests/ingress/1.5.1/deploy-ip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-ip 5 | labels: 6 | app: deploy-ip 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-ip 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-ip 16 | spec: 17 | containers: 18 | - name: chk-ip 19 | image: sysnet4admin/chk-ip 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: ing-ip 25 | spec: 26 | selector: 27 | app: deploy-ip 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: ClusterIP 33 | -------------------------------------------------------------------------------- /manifests/ingress/1.5.1/deploy-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-nginx 5 | labels: 6 | app: deploy-nginx 7 | spec: 8 | replicas: 3 9 | selector: 10 | matchLabels: 11 | app: deploy-nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: deploy-nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx 20 | --- 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: ing-default 25 | spec: 26 | selector: 27 | app: deploy-nginx 28 | ports: 29 | - name: http 30 | port: 80 31 | targetPort: 80 32 | type: ClusterIP 33 | -------------------------------------------------------------------------------- /manifests/ingress/1.5.1/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-ingress 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: ing-default 16 | port: 17 | number: 80 18 | - path: /hn 19 | pathType: Prefix 20 | backend: 21 | service: 22 | name: ing-hn 23 | port: 24 | number: 80 25 | - path: /ip 26 | pathType: Prefix 27 | backend: 28 | service: 29 | name: ing-ip 30 | port: 31 | number: 80 32 | -------------------------------------------------------------------------------- /manifests/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-pod 5 | spec: 6 | containers: 7 | - name: container-name 8 | image: nginx 9 | -------------------------------------------------------------------------------- /manifests/req_page.ps1: -------------------------------------------------------------------------------- 1 | #!/bin/powershell 2 | Param ( 3 | [Parameter(Mandatory=$true)] 4 | $IPwPort 5 | ) 6 | 7 | $i=0; while($true) 8 | { 9 | % { $i++; write-host -NoNewline "$i $_" } 10 | (Invoke-RestMethod "http://$IPwPort")-replace '\n', " " 11 | } -------------------------------------------------------------------------------- /manifests/rollout-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: rollout-nginx 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: nginx 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.15.12 18 | -------------------------------------------------------------------------------- /manifests/svc/ingress-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: ingress-nginx 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | rules: 9 | - http: 10 | paths: 11 | - path: 12 | backend: 13 | serviceName: hname-svc-default 14 | servicePort: 80 15 | - path: /ip 16 | backend: 17 | serviceName: ip-svc 18 | servicePort: 80 19 | - path: /your-directory 20 | backend: 21 | serviceName: your-svc 22 | servicePort: 80 23 | -------------------------------------------------------------------------------- /manifests/svc/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: ingress-nginx 6 | spec: 7 | ports: 8 | - name: http 9 | protocol: TCP 10 | port: 80 11 | targetPort: 80 12 | nodePort: 30100 13 | - name: https 14 | protocol: TCP 15 | port: 443 16 | targetPort: 443 17 | nodePort: 30101 18 | selector: 19 | app.kubernetes.io/name: ingress-nginx 20 | type: NodePort -------------------------------------------------------------------------------- /manifests/svc/metallb-l2config-xd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: metallb-ip-range 10 | protocol: layer2 11 | addresses: 12 | - 192.168.1.11-192.168.1.49 -------------------------------------------------------------------------------- /manifests/svc/metallb-l2config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | namespace: metallb-system 5 | name: config 6 | data: 7 | config: | 8 | address-pools: 9 | - name: metallb-ip-range 10 | protocol: layer2 11 | addresses: 12 | - 192.168.1.11-192.168.1.19 -------------------------------------------------------------------------------- /manifests/svc/metallb-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: metallb-svc 5 | spec: 6 | ports: 7 | - port: 80 # access port 8 | targetPort: 80 9 | selector: 10 | app: lb-hname-pods # expose app label 11 | type: LoadBalancer -------------------------------------------------------------------------------- /manifests/svc/nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: np-svc 5 | spec: 6 | selector: 7 | app: np-pods 8 | ports: 9 | - name: http 10 | protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | nodePort: 30000 14 | type: NodePort -------------------------------------------------------------------------------- /manifests/svc/porter-l2config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: network.kubesphere.io/v1alpha1 2 | kind: Eip 3 | metadata: 4 | name: eip-sample-pool 5 | spec: 6 | address: 192.168.1.21-192.168.1.23 7 | protocol: layer2 8 | disable: false -------------------------------------------------------------------------------- /manifests/svc/porter-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: porter-svc 5 | annotations: 6 | lb.kubesphere.io/v1alpha1: porter 7 | protocol.porter.kubesphere.io/v1alpha1: layer2 8 | spec: 9 | ports: 10 | - port: 80 # access port 11 | targetPort: 80 12 | selector: 13 | app: lb-hname-pods # expose app label 14 | type: LoadBalancer -------------------------------------------------------------------------------- /manifests/vol/dynamic-pvc-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: dynamic-pvc-deploy 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: dynamic-pvc-deploy 10 | template: 11 | metadata: 12 | labels: 13 | app: dynamic-pvc-deploy 14 | spec: 15 | containers: 16 | - name: audit-trail 17 | image: sysnet4admin/audit-trail 18 | volumeMounts: 19 | - name: dynamic-vol # same name of volumes's name 20 | mountPath: /audit 21 | volumes: 22 | - name: dynamic-vol 23 | persistentVolumeClaim: 24 | claimName: dynamic-pvc -------------------------------------------------------------------------------- /manifests/vol/dynamic-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: dynamice-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 100Gi 11 | # storageClassName: -------------------------------------------------------------------------------- /manifests/vol/limits-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: LimitRange 3 | metadata: 4 | name: storagelimits 5 | spec: 6 | limits: 7 | - type: PersistentVolumeClaim 8 | max: 9 | storage: 5Mi 10 | min: 11 | storage: 1Mi -------------------------------------------------------------------------------- /manifests/vol/nfs-client-provisioner/0.Builder_nfs_server.sh: -------------------------------------------------------------------------------- 1 | mkdir /nfs_shared 2 | echo '/nfs_shared 192.168.1.0/24(rw,sync,no_root_squash)' >> /etc/exports 3 | systemctl enable --now nfs -------------------------------------------------------------------------------- /manifests/vol/nfs-client-provisioner/2-1.claim-pvc1Gi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: claim-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | resources: 9 | requests: 10 | storage: 1Gi # changeable value as you want 11 | storageClassName: nfs-sc -------------------------------------------------------------------------------- /manifests/vol/nfs-client-provisioner/2-2.use-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: use-pvc 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: use-pvc 10 | template: 11 | metadata: 12 | labels: 13 | app: use-pvc 14 | spec: 15 | containers: 16 | - name: echo-nginx 17 | image: sysnet4admin/echo-nginx 18 | volumeMounts: 19 | - name: pvc-vol # same name of volumes's name 20 | mountPath: /pvc-vol 21 | volumes: 22 | - name: pvc-vol 23 | persistentVolumeClaim: 24 | claimName: claim-pvc # same name of pvc that was created -------------------------------------------------------------------------------- /manifests/vol/nfs-client-provisioner/3.sts-claim-vct1Gi.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: add-pvc 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: add-pvc 11 | serviceName: add-pvc 12 | template: 13 | metadata: 14 | labels: 15 | app: add-pvc 16 | spec: 17 | containers: 18 | - name: echo-nginx 19 | image: sysnet4admin/echo-nginx 20 | volumeMounts: 21 | - name: vct-vol # same name of volumes's name 22 | mountPath: /pvc-vol 23 | volumeClaimTemplates: 24 | - metadata: 25 | name: vct-vol 26 | spec: 27 | accessModes: [ "ReadWriteMany" ] 28 | storageClassName: "nfs-sc" 29 | resources: 30 | requests: 31 | storage: 1Gi -------------------------------------------------------------------------------- /manifests/vol/nfs-client-provisioner/Readme.md: -------------------------------------------------------------------------------- 1 | Kubernetes NFS-Client Provisioner 2 | ================================= 3 | + **Source**: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client -------------------------------------------------------------------------------- /manifests/vol/nfs-ip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nfs-ip 5 | spec: 6 | replicas: 4 7 | selector: 8 | matchLabels: 9 | app: nfs-ip 10 | template: 11 | metadata: 12 | labels: 13 | app: nfs-ip 14 | spec: 15 | containers: 16 | - name: audit-trail 17 | image: sysnet4admin/audit-trail 18 | volumeMounts: 19 | - name: nfs-vol 20 | mountPath: /audit 21 | volumes: 22 | - name: nfs-vol 23 | nfs: 24 | server: 192.168.1.10 25 | path: /nfs_shared -------------------------------------------------------------------------------- /manifests/vol/nfs-provisioner/2.claim-pvc1Gi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: claim-svr-pvc 5 | annotations: 6 | volume.beta.kubernetes.io/storage-class: "nfs-svr-sc" 7 | spec: 8 | accessModes: 9 | - ReadWriteMany 10 | resources: 11 | requests: 12 | storage: 1Gi -------------------------------------------------------------------------------- /manifests/vol/nfs-provisioner/3.sts-claim-vct1Gi.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: StatefulSet 4 | metadata: 5 | name: add-svr-pvc 6 | spec: 7 | replicas: 1 8 | selector: 9 | matchLabels: 10 | app: add-svr-pvc 11 | serviceName: add-svr-pvc 12 | template: 13 | metadata: 14 | labels: 15 | app: add-svr-pvc 16 | spec: 17 | containers: 18 | - name: echo-nginx 19 | image: sysnet4admin/echo-nginx 20 | volumeMounts: 21 | - name: vct-svr-vol # same name of volumes's name 22 | mountPath: /pvc-vol 23 | volumeClaimTemplates: 24 | - metadata: 25 | name: vct-svr-vol 26 | spec: 27 | accessModes: [ "ReadWriteMany" ] 28 | storageClassName: "nfs-svr-sc" 29 | resources: 30 | requests: 31 | storage: 1Gi -------------------------------------------------------------------------------- /manifests/vol/nfs-provisioner/Readme.md: -------------------------------------------------------------------------------- 1 | nfs-provisioner 2 | =============== 3 | + **Source**: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs 4 | + Helm: https://github.com/helm/charts/tree/master/stable/nfs-server-provisioner -------------------------------------------------------------------------------- /manifests/vol/nfs-pv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: nfs-pv 5 | spec: 6 | capacity: 7 | storage: 100Mi 8 | accessModes: 9 | - ReadWriteMany 10 | persistentVolumeReclaimPolicy: Retain 11 | nfs: 12 | server: 192.168.1.10 13 | path: /nfs_shared -------------------------------------------------------------------------------- /manifests/vol/nfs-pvc-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nfs-pvc-deploy 5 | spec: 6 | replicas: 4 7 | selector: 8 | matchLabels: 9 | app: nfs-pvc-deploy 10 | template: 11 | metadata: 12 | labels: 13 | app: nfs-pvc-deploy 14 | spec: 15 | containers: 16 | - name: audit-trail 17 | image: sysnet4admin/audit-trail 18 | volumeMounts: 19 | - name: nfs-vol 20 | mountPath: /audit 21 | volumes: 22 | - name: nfs-vol 23 | persistentVolumeClaim: 24 | claimName: nfs-pvc -------------------------------------------------------------------------------- /manifests/vol/nfs-pvc-sts-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nfs-pvc-sts-svc 5 | spec: 6 | selector: 7 | app: nfs-pvc-sts 8 | ports: 9 | - port: 80 10 | type: LoadBalancer -------------------------------------------------------------------------------- /manifests/vol/nfs-pvc-sts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: nfs-pvc-sts 5 | spec: 6 | replicas: 4 7 | serviceName: nfs-pvc-sts #statefulset need it 8 | selector: 9 | matchLabels: 10 | app: nfs-pvc-sts 11 | template: 12 | metadata: 13 | labels: 14 | app: nfs-pvc-sts 15 | spec: 16 | containers: 17 | - name: audit-trail 18 | image: sysnet4admin/audit-trail 19 | volumeMounts: 20 | - name: nfs-vol # same name of volumes's name 21 | mountPath: /audit 22 | volumes: 23 | - name: nfs-vol 24 | persistentVolumeClaim: 25 | claimName: nfs-pvc -------------------------------------------------------------------------------- /manifests/vol/nfs-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: nfs-pvc 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | resources: 9 | requests: 10 | storage: 10Mi -------------------------------------------------------------------------------- /manifests/vol/quota-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | name: storagequota 5 | spec: 6 | hard: 7 | persistentvolumeclaims: "5" 8 | requests.storage: "25Mi" -------------------------------------------------------------------------------- /manifests/vol/spare_local-storage/local-pvc-sts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: local-pvc-sts 5 | spec: 6 | replicas: 4 7 | serviceName: local-pvc-sts #statefulset need it 8 | selector: 9 | matchLabels: 10 | app: local-pvc-sts 11 | template: 12 | metadata: 13 | labels: 14 | app: local-pvc-sts 15 | spec: 16 | containers: 17 | - name: audit-trail 18 | image: sysnet4admin/audit-trail 19 | volumeClaimTemplates: 20 | - metadata: 21 | name: local-pvc-sts 22 | spec: 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 10Gi 28 | storageClassName: local-sc -------------------------------------------------------------------------------- /manifests/vol/spare_local-storage/local-sc.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: local-sc 5 | provisioner: kubernetes.io/no-provisioner 6 | volumeBindingMode: WaitForFirstConsumer 7 | -------------------------------------------------------------------------------- /nGrinder/k8s/agents.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | labels: 5 | app: ngrinder-agent 6 | name: ngrinder-agent 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: ngrinder-agent 11 | template: 12 | metadata: 13 | labels: 14 | app: ngrinder-agent 15 | spec: 16 | containers: 17 | - name: ngrinder-agent 18 | image: ngrinder/agent:3.5.4 19 | # LB ip or domain name to connect the nGrinder controller 20 | # args: [ngrinder.test.com:80] 21 | args: [nnn.nnn.nnn.nnn:80] 22 | -------------------------------------------------------------------------------- /tools/convert-img-vmdk-qcow2: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # uncompress ova files 4 | echo "Uncompress ova files" 5 | for f in *.ova 6 | do 7 | tar -xvf "$f" 8 | done 9 | 10 | # converting from vmdk to qcow2 11 | echo "Converting from vmdk to qcow2" 12 | for f in *.vmdk 13 | do 14 | qemu-img convert -O qcow2 "$f" "$f".qcow2 15 | qemu-img convert -c -O qcow2 "$f".qcow2 "$f"-shrunk.qcow2 16 | mv -f "$f"-shrunk.qcow2 ../qcow2_files/x86-64/"${f:0:34}"-x86-64.qcow2 17 | done 18 | 19 | # remove all temp-converting files 20 | echo "Remove all temp-converting files" 21 | rm -f *.ovf 22 | rm -f *.mf 23 | rm -f *.vmdk 24 | rm -f *.qcow2 25 | 26 | -------------------------------------------------------------------------------- /tools/kubetail.sh: -------------------------------------------------------------------------------- 1 | #Main_Source_From: 2 | # - https://github.com/johanhaleby/kubetail 3 | 4 | # usage: 5 | # 1. Create 6 | # - bash <(curl -s https://raw.githubusercontent.com/sysnet4admin/IaC/master/tools/kubetail.sh) 7 | 8 | curl -O https://raw.githubusercontent.com/johanhaleby/kubetail/master/kubetail 9 | chmod 755 kubetail 10 | mv ~/kubetail /usr/local/bin 11 | echo "kubetail install successfully" 12 | echo "" 13 | echo "Example: " 14 | echo "1. kubetail -l component=speaker -n metallb-system" 15 | echo "2. kubetail -l k8s-app=calico-node -n kube-system" 16 | echo "3. kubetail " 17 | --------------------------------------------------------------------------------