├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── backend ├── aks │ ├── Makefile │ ├── clean.sh │ ├── defaults.sh │ ├── deploy.sh │ ├── deps.sh │ ├── kubeconfig.sh │ └── tfsetup.sh ├── caasp4os │ ├── Makefile │ ├── defaults.sh │ ├── deploy.sh │ ├── destroy.sh │ ├── docker │ │ └── skuba │ │ │ ├── Dockerfile │ │ │ ├── Makefile │ │ │ └── build.sh │ ├── docker_skuba.sh │ ├── kubeconfig.sh │ ├── lib │ │ └── skuba.sh │ ├── prepare.sh │ └── terraform-os │ │ ├── cloud-init │ │ ├── common.tpl │ │ └── register-ibs.tpl │ │ ├── generate-cpi-conf.sh │ │ ├── lbaas-cap.tf │ │ ├── master-instance.tf │ │ ├── security-groups-cap.tf │ │ ├── terraform.tfvars.skel │ │ └── worker-instance.tf ├── check.sh ├── ekcp │ ├── Makefile │ ├── clean.sh │ ├── defaults.sh │ ├── kubeconfig.sh │ ├── prepare.sh │ └── up.sh ├── eks │ ├── Makefile │ ├── clean.sh │ ├── defaults.sh │ ├── deploy.sh │ ├── deps.sh │ ├── kubeconfig.sh │ └── tfsetup.sh ├── gke │ ├── Makefile │ ├── clean.sh │ ├── defaults.sh │ ├── deploy.sh │ ├── deps.sh │ ├── find-resources.sh │ ├── force-clean-cluster.sh │ ├── kubeconfig.sh │ ├── lib │ │ └── auth.sh │ └── tfsetup.sh ├── imported │ ├── Makefile │ ├── clean.sh │ ├── defaults.sh │ └── kubeconfig.sh ├── kind │ ├── Makefile │ ├── clean.sh │ ├── defaults.sh │ ├── deps.sh │ ├── docker_kubeconfig.sh │ ├── kubeconfig.sh │ ├── prepare.sh │ ├── restart.sh │ ├── start.sh │ ├── stop.sh │ ├── up.sh │ └── up_if_not_exists.sh └── minikube │ ├── Makefile │ ├── clean.sh │ ├── defaults.sh │ ├── deps.sh │ ├── kubeconfig.sh │ ├── start.sh │ ├── stop.sh │ └── up.sh ├── config └── config.toml ├── contrib ├── assets │ ├── .gitignore │ ├── scf_states.png │ ├── scf_states.tex │ ├── states.png │ └── states.tex └── samples │ ├── eirini-persi-test │ ├── main.go │ └── manifest.yml │ └── ticking_app │ ├── main.go │ └── manifest.yml ├── include ├── buildir.sh ├── colors.sh ├── common.sh ├── defaults_global.sh ├── defaults_global_private.sh ├── func.sh └── versioning.sh ├── kube ├── brats │ └── pod.yaml.erb ├── catapult-sync │ ├── Dockerfile │ ├── cronjobs │ └── sync.sh ├── catapult-web │ ├── Dockerfile │ └── main.go ├── catapult-wtty │ └── Dockerfile ├── cats │ └── pod.yaml.erb ├── cf-operator │ ├── boshdeployment.yaml │ ├── password.yaml │ └── qstatefulset_tolerations.yaml ├── dind.yaml ├── registry.yaml ├── smokes │ └── pod.yaml.erb ├── socks.yaml ├── socks │ ├── Dockerfile │ ├── LICENSE │ ├── Makefile │ ├── README.md │ └── main.go ├── storageclass.yaml └── task.yaml ├── modules ├── common │ ├── Makefile │ ├── defaults.sh │ └── deps.sh ├── experimental │ ├── Makefile │ ├── airgap_down.sh │ ├── airgap_up.sh │ ├── cilium-block-egress.yaml │ ├── defaults.sh │ ├── eirini_release.sh │ ├── eirinifs.sh │ ├── tf_auto_deploy.sh │ └── tf_force_clean.sh ├── extra │ ├── Makefile │ ├── concourse.sh │ ├── defaults.sh │ ├── drone.sh │ ├── fissile.sh │ ├── gitea.sh │ ├── ingress.sh │ ├── ingress_forward.sh │ ├── kwt.sh │ ├── kwt_connect.sh │ ├── log.sh │ ├── registry.sh │ ├── task.sh │ ├── terminal.sh │ ├── top.sh │ └── web.sh ├── kubecf │ ├── Makefile │ ├── brats_setup.sh │ ├── build.sh │ ├── chart.sh │ ├── clean.sh │ ├── defaults.sh │ ├── gen_config.sh │ ├── install.sh │ ├── klog.sh │ ├── login.sh │ ├── minibroker.sh │ ├── precheck.sh │ ├── purge.sh │ ├── stemcell_build.sh │ └── upgrade.sh ├── metrics │ ├── Makefile │ ├── chart.sh │ ├── clean.sh │ ├── defaults.sh │ ├── gen-config.sh │ ├── install.sh │ └── upgrade.sh ├── scf │ ├── Makefile │ ├── brats_setup.sh │ ├── build.sh │ ├── chart.sh │ ├── clean.sh │ ├── defaults.sh │ ├── gen_config.sh │ ├── install.sh │ ├── klog.sh │ ├── login.sh │ ├── minibroker.sh │ ├── precheck.sh │ ├── purge.sh │ ├── stemcell_build.sh │ └── upgrade.sh ├── stratos │ ├── Makefile │ ├── chart.sh │ ├── clean.sh │ ├── defaults.sh │ ├── gen-config.sh │ ├── install.sh │ ├── reachable.sh │ └── upgrade.sh └── tests │ ├── Makefile │ ├── autoscaler.sh │ ├── brats.sh │ ├── cats.sh │ ├── cats_scf.sh │ ├── defaults.sh │ ├── eirini_persi.sh │ ├── kubecats.sh │ ├── kubecf-test.sh │ ├── kubesmokes.sh │ ├── sample-ticking.sh │ ├── sample.sh │ ├── smoke.sh │ ├── smoke_scf.sh │ └── stress-benchmark.sh ├── scripts └── image.sh └── tests ├── .gitignore ├── Makefile ├── integration_tests.sh ├── lint.sh ├── mocks ├── helm ├── kubectl ├── kubectl_output_get_configmap.json ├── kubectl_output_get_nodes.json ├── kubectl_output_get_pods ├── ssh └── terraform └── unit_tests.sh /.gitignore: -------------------------------------------------------------------------------- 1 | build*/ 2 | shunit2/ -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: bash 2 | 3 | services: 4 | - docker 5 | 6 | before_install: 7 | - | 8 | STABLE=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt) 9 | curl -LO https://storage.googleapis.com/kubernetes-release/release/$STABLE/bin/linux/amd64/kubectl 10 | - chmod +x ./kubectl 11 | - sudo mv ./kubectl /usr/local/bin/kubectl 12 | - curl -L "https://packages.cloudfoundry.org/stable?release=linux64-binary&source=github" | tar -zx 13 | - sudo mv cf /usr/local/bin 14 | - sudo chmod +x /usr/local/bin/cf 15 | - sudo pip install yamllint 16 | 17 | script: 18 | - make catapult-test 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.opensuse.org/opensuse/tumbleweed:latest 2 | # Catapult dependencies: 3 | RUN zypper ref && zypper in --no-recommends -y git zip wget docker ruby gzip make jq curl which unzip bazel1.2 direnv 4 | RUN echo 'eval $(direnv hook bash)' >> ~/.bashrc 5 | 6 | RUN wget "https://github.com/mikefarah/yq/releases/download/3.2.1/yq_linux_amd64" -O /usr/local/bin/yq && \ 7 | chmod +x /usr/local/bin/yq 8 | 9 | RUN wget "https://github.com/krishicks/yaml-patch/releases/download/v0.0.10/yaml_patch_linux" -O /usr/local/bin/yaml-patch && \ 10 | chmod +x /usr/local/bin/yaml-patch 11 | 12 | # Extras, mostly for the terminal image (that could be split in another image) 13 | RUN zypper in --no-recommends -y vim zsh tmux glibc-locale glibc-i18ndata python ruby python3 python3-pip cf-cli gnuplot 14 | 15 | RUN zypper ar --priority 100 https://download.opensuse.org/repositories/devel:/languages:/go/openSUSE_Factory/devel:languages:go.repo && \ 16 | zypper --gpg-auto-import-keys -n in --no-recommends -y --from=devel_languages_go go1.13 17 | 18 | RUN zypper ar --priority 100 https://download.opensuse.org/repositories/Cloud:Tools/openSUSE_Tumbleweed/Cloud:Tools.repo && \ 19 | zypper --gpg-auto-import-keys -n in --no-recommends -y Cloud_Tools:kubernetes-client 20 | 21 | RUN helm_version=v3.1.1 \ 22 | && wget https://get.helm.sh/helm-${helm_version}-linux-amd64.tar.gz -O - | tar xz -C /usr/bin --strip-components=1 linux-amd64/helm 23 | 24 | # k8s backends dependencies: 25 | RUN zypper in --no-recommends -y terraform 26 | 27 | RUN zypper in --no-recommends -y python-xml 28 | RUN curl "https://s3.amazonaws.com/aws-cli/awscli-bundle.zip" -o "awscli-bundle.zip" && \ 29 | unzip awscli-bundle.zip && rm awscli-bundle.zip && \ 30 | ./awscli-bundle/install --install-dir=/usr/lib/ --bin-location=/usr/local/bin/aws && \ 31 | rm -rf awscli-bundle* 32 | 33 | RUN curl -o aws-iam-authenticator https://amazon-eks.s3-us-west-2.amazonaws.com/1.14.6/2019-08-22/bin/linux/amd64/aws-iam-authenticator && \ 34 | chmod +x aws-iam-authenticator && mv aws-iam-authenticator /usr/local/bin/ 35 | 36 | RUN curl https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-292.0.0-linux-x86_64.tar.gz \ 37 | > /tmp/google-cloud-sdk.tar.gz \ 38 | && mkdir -p /usr/local/gcloud \ 39 | && tar -C /usr/local/gcloud -xvf /tmp/google-cloud-sdk.tar.gz && rm -rf /tmp/google-cloud-sdk.tar.gz \ 40 | && /usr/local/gcloud/google-cloud-sdk/install.sh --override-components gcloud --quiet 41 | ENV PATH $PATH:/usr/local/gcloud/google-cloud-sdk/bin 42 | 43 | RUN curl -o kubectl-aws https://amazon-eks.s3-us-west-2.amazonaws.com/1.14.6/2019-08-22/bin/linux/amd64/kubectl && \ 44 | mv kubectl-aws /usr/local/bin/ && chmod +x /usr/local/bin/kubectl-aws 45 | 46 | RUN zypper in --no-recommends -y gcc libffi-devel python3-devel libopenssl-devel 47 | RUN curl -o install.py https://azurecliprod.blob.core.windows.net/install.py && \ 48 | sed -i 's/python3-devel/python38-devel/g' install.py && \ 49 | printf "/usr/local/lib/azure-cli\n/usr/local/bin\n\n\n" | python3 ./install.py && \ 50 | rm ./install.py 51 | 52 | # KubeCF dependencies: 53 | RUN zypper in --no-recommends -y python3-yamllint ShellCheck 54 | 55 | RUN zypper rm -y glibc-locale && zypper clean --all 56 | 57 | ADD . /catapult 58 | WORKDIR /catapult 59 | ENTRYPOINT [ "/usr/bin/make" ] 60 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [![Build Status](https://travis-ci.com/SUSE/catapult.svg?branch=master)](https://travis-ci.com/SUSE/catapult) Catapult 2 | 3 | $> git clone https://github.com/SUSE/catapult.git && cd catapult 4 | $> make all 5 | 6 | This will start a local [Kind](https://github.com/kubernetes-sigs/kind) cluster 7 | and deploy kubecf on top of it. Remove everything with `make clean`. 8 | 9 | Next, check the [First steps wiki page](https://github.com/SUSE/catapult/wiki/First-steps) 10 | or do: 11 | 12 | $> make help 13 | $> make help-all 14 | 15 | 16 | # Description 17 | 18 | Catapult is a CI implementation for [KubeCF](https://github.com/SUSE/kubecf), 19 | [SCF](https://github.com/SUSE/scf) & 20 | [Stratos](https://github.com/cloudfoundry/stratos), 21 | designed from the ground-up to work locally. This allows iterating and using it 22 | for manual tests and development of the products, in addition to running it in 23 | your favourite CI scheduler (Concourse, Gitlab…). 24 | 25 | Catapult supports several k8s backends: can create CaaSP4, GKE, EKS clusters on its 26 | own, and you can bring your own cluster with the "imported" backend. 27 | 28 | It is implemented as a little lifecycle manager (a finite state machine), written 29 | with Makefiles and Bash scripts. 30 | 31 | The deployments achieved with Catapult are not production ready; don't expect 32 | them to be in the future either. They are for developing and testing. 33 | 34 | It also contains some goodies to aid in development and testing deployments (see 35 | `make module-extra-*`). 36 | 37 | To use it in a CI, like travis, see for example: 38 | * [.travis.yml](https://github.com/SUSE/catapult/blob/master/.travis.yml) on this 39 | repository, to CI Catapult itself 40 | * [kubecf post-publish](https://github.com/SUSE/kubecf/tree/master/.concourse) 41 | 42 | # Documentation 43 | 44 | For now, all documentation is in the [project wiki](https://github.com/SUSE/catapult/wiki). 45 | 46 | # Contributing 47 | 48 | Please run catapult's linting, unit tests, integration tests, etc for a full TDD 49 | experience, as PRs are gated through them (see "build status" label): 50 | 51 | $> make catapult-tests 52 | 53 | Debug catapult with `DEBUG_MODE=true`. 54 | 55 | You can get your local development for [SCF](https://github.com/SUSE/scf) 56 | or [KubeCF](https://github.com/SUSE/kubecf), with all needed catapult deps, with: 57 | 58 | $> docker run -v /var/run/docker.sock:/var/run/docker.sock -ti --rm splatform/catapult:latest dind 59 | 60 | Check out [Run in Docker](https://github.com/SUSE/catapult/wiki/Run-in-Docker) 61 | page on the wiki for more options. 62 | -------------------------------------------------------------------------------- /backend/aks/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: kubeconfig 4 | kubeconfig: 5 | ./kubeconfig.sh 6 | 7 | .PHONY: deps 8 | deps: 9 | ./deps.sh 10 | 11 | .PHONY: tfsetup 12 | tfsetup: 13 | ./tfsetup.sh 14 | 15 | .PHONY: deploy 16 | deploy: deps tfsetup 17 | ./deploy.sh 18 | 19 | .PHONY: clean 20 | clean: deps tfsetup 21 | ./clean.sh 22 | 23 | .PHONY: find-resources 24 | find-resources: 25 | echo "$@ not implemented yet for ${BACKEND}" 26 | exit 1 27 | 28 | .PHONY: force-clean-cluster 29 | force-clean-cluster: 30 | echo "$@ not implemented yet for ${BACKEND}" 31 | exit 1 32 | 33 | .PHONY: all 34 | all:: 35 | @echo 'WARNING: stil WIP. Use it on your own risk.' 36 | @echo 'Kindly waiting for 5s…'; sleep 5 37 | all:: deploy 38 | -------------------------------------------------------------------------------- /backend/aks/clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Requires: 4 | # - azure credentials present 5 | 6 | . ./defaults.sh 7 | . ../../include/common.sh 8 | 9 | if [ -d "$BUILD_DIR" ]; then 10 | . .envrc 11 | # Required env vars for deploying via Azure SP. 12 | # see: https://www.terraform.io/docs/providers/azurerm/guides/service_principal_client_secret.html#configuring-the-service-principal-in-terraform 13 | export ARM_CLIENT_ID="${AZURE_APP_ID}" 14 | export ARM_CLIENT_SECRET="${AZURE_PASSWORD}" 15 | export ARM_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID}" 16 | export ARM_TENANT_ID="${AZURE_TENANT_ID}" 17 | 18 | pushd cap-terraform/aks || exit 19 | if [[ -f "${KUBECONFIG}" && ! -f aksk8scfg ]]; then 20 | cp "${KUBECONFIG}" aksk8scfg 21 | fi 22 | terraform init 23 | terraform destroy -auto-approve 24 | popd || exit 25 | rm -rf "$BUILD_DIR" 26 | ok "AKS cluster deleted successfully" 27 | else 28 | warn "BUILD_DIR ${BUILD_DIR} not found" 29 | fi 30 | 31 | -------------------------------------------------------------------------------- /backend/aks/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # AKS options 4 | ################## 5 | 6 | KUBECTL_VERSION="${KUBECTL_VERSION:-v1.19.3}" 7 | 8 | AZURE_CLUSTER_NAME="${AZURE_CLUSTER_NAME:-${OWNER}-cap}" 9 | AZURE_NODE_COUNT="${AZURE_NODE_COUNT:-3}" 10 | AZURE_LOCATION="${AZURE_LOCATION:-westus}" 11 | AZURE_RESOURCE_GROUP="${AZURE_RESOURCE_GROUP:-}" 12 | 13 | AZURE_APP_ID="${AZURE_APP_ID:-}" 14 | AZURE_PASSWORD="${AZURE_PASSWORD:-}" 15 | AZURE_TENANT_ID="${AZURE_TENANT_ID:-}" 16 | AZURE_SUBSCRIPTION_ID="${AZURE_SUBSCRIPTION_ID:-}" 17 | 18 | AZURE_DNS_JSON="${AZURE_DNS_JSON:-}" 19 | AZURE_DNS_RESOURCE_GROUP="${AZURE_DNS_RESOURCE_GROUP:-susecap-domain}" 20 | AZURE_DNS_DOMAIN="${AZURE_DNS_DOMAIN:-${AZURE_CLUSTER_NAME}.susecap.net}" 21 | 22 | # Optional: SSH key file for Azure to use. If unset, take first in SSH agent. 23 | AZURE_SSH_KEY="${AZURE_SSH_KEY:-}" 24 | 25 | # Settings for terraform state save/restore 26 | # 27 | # Set to a non-empty key to trigger state save in deploy.sh. 28 | TF_KEY="${TF_KEY:-}" 29 | 30 | # 31 | # s3 bucket and bucket region to save state to. Ignored when 32 | # TF_KEY is empty (default, see above). 33 | TF_BUCKET="${TF_BUCKET:-cap-ci-tf}" 34 | TF_REGION="${TF_REGION:-us-west-2}" 35 | -------------------------------------------------------------------------------- /backend/aks/deps.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ../../include/common.sh 4 | [[ -d "${BUILD_DIR}" ]] || exit 0 5 | . .envrc 6 | 7 | if [[ "$DOWNLOAD_CATAPULT_DEPS" == "false" ]]; then 8 | ok "Skipping downloading AKS deps, using host binaries" 9 | exit 0 10 | fi 11 | 12 | azclipath=bin/az 13 | if [ ! -e "$azclipath" ]; then 14 | # needs gcc libffi-devel python3-devel libopenssl-devel 15 | curl -o install.py https://azurecliprod.blob.core.windows.net/install.py && \ 16 | printf "y\n$(pwd)/.lib/azure-cli\n$(pwd)/bin\nY\n$(pwd)/.envrc\n " | python3 ./install.py && \ 17 | rm ./install.py 18 | fi 19 | 20 | terraformpath=bin/terraform 21 | if [ ! -e "$terraformpath" ]; then 22 | if [[ "$OSTYPE" == "darwin"* ]]; then 23 | curl -o terraform.zip https://releases.hashicorp.com/terraform/0.12.29/terraform_0.12.29_darwin_amd64.zip 24 | else 25 | curl -o terraform.zip https://releases.hashicorp.com/terraform/0.12.29/terraform_0.12.29_linux_amd64.zip 26 | fi 27 | unzip terraform.zip && rm -rf terraform.zip 28 | chmod +x terraform && mv terraform bin/ 29 | fi 30 | -------------------------------------------------------------------------------- /backend/aks/kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | if [ ! -f "$KUBECFG" ]; then 8 | err "No KUBECFG given - you need to pass one!" 9 | exit 1 10 | fi 11 | 12 | cp "$KUBECFG" kubeconfig 13 | kubectl get nodes > /dev/null 2>&1 || exit 14 | ok "Kubeconfig for $BACKEND correctly imported" 15 | -------------------------------------------------------------------------------- /backend/aks/tfsetup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | [[ -d "${BUILD_DIR}" ]] || exit 0 6 | . .envrc 7 | 8 | if ! [[ -d cap-terraform ]]; then 9 | git clone "${CAP_TERRAFORM_REPOSITORY}" -b "${CAP_TERRAFORM_BRANCH}" 10 | fi 11 | pushd cap-terraform/aks || exit 12 | git checkout "${CAP_TERRAFORM_BRANCH}" 13 | git pull 14 | 15 | # terraform needs helm client installed and configured: 16 | helm_init_client 17 | 18 | # Note, ssh-key.pub is generated in deploy, and will be lost in ci deployments unless persisted somewhere 19 | cat < terraform.tfvars 20 | cluster_name = "${AZURE_CLUSTER_NAME}" 21 | az_resource_group = "${AZURE_RESOURCE_GROUP}" 22 | client_id = "${AZURE_APP_ID}" 23 | client_secret = "${AZURE_PASSWORD}" 24 | ssh_public_key = "./sshkey.pub" 25 | instance_count = "${AZURE_NODE_COUNT}" 26 | location = "${AZURE_LOCATION}" 27 | agent_admin = "cap-admin" 28 | cluster_labels = { 29 | "catapult-cluster" = "${AZURE_CLUSTER_NAME}", 30 | "owner" = "${OWNER}" 31 | } 32 | k8s_version = "${KUBECTL_VERSION#v}" 33 | azure_dns_json = "${AZURE_DNS_JSON}" 34 | dns_zone_rg = "${AZURE_DNS_RESOURCE_GROUP}" 35 | HEREDOC 36 | 37 | if [ -n "${TF_KEY}" ] ; then 38 | cat > backend.tf </dev/null | grep -qi persistent; then 18 | kubectl delete storageclass persistent 19 | wait 20 | fi 21 | 22 | if [ -d deployment ]; then 23 | pushd deployment || exit 24 | info "Destroying infrastructure with Terraform…" 25 | if [[ ! -v OS_PASSWORD ]]; then 26 | err "Missing openstack credentials" && exit 1 27 | fi 28 | skuba_container terraform init 29 | skuba_container terraform destroy -auto-approve 30 | info "Terraform infrastructure destroyed" 31 | # TODO see deployment/my-cluster/cloud/ 32 | # TODO external-dns will fail to delete DNS entries 33 | # https://github.com/kubernetes-sigs/external-dns/pull/1255 34 | popd || exit 35 | else 36 | info "No Terraform infrastructure present" 37 | fi 38 | 39 | popd || exit 40 | rm -rf "$BUILD_DIR" 41 | fi 42 | ok "CaaSP4 on Openstack succesfully destroyed!" 43 | -------------------------------------------------------------------------------- /backend/caasp4os/docker/skuba/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.opensuse.org/opensuse/leap/15.2/images/totest/containers/opensuse/leap:15.2 2 | 3 | ARG VERSION 4 | ARG REPO_ENV 5 | ARG REPO 6 | 7 | ARG IBS="http://download.suse.de/ibs" 8 | # ARG IBS="http://ibs-mirror.prv.suse.net/ibs" 9 | RUN for repo in SLE-Product-SLES SLE-Module-Basesystem SLE-Module-Containers SLE-Module-Public-Cloud; do \ 10 | zypper ar $IBS/SUSE/Products/$repo/15-SP2/x86_64/product ${repo}_pool; \ 11 | zypper ar $IBS/SUSE/Updates/$repo/15-SP2/x86_64/update ${repo}_updates; \ 12 | done 13 | RUN zypper ar $IBS/SUSE/Products/SUSE-CAASP/4.5/x86_64/product CAASP_pool 14 | # RUN zypper ar $IBS/Updates/SUSE-CAASP/4.5/x86_64/update CAASP_updates 15 | RUN zypper ar --no-gpgcheck $IBS"${REPO}" "skuba-${REPO_ENV}" 16 | 17 | RUN zypper refresh; zypper -n dist-upgrade 18 | RUN zypper --gpg-auto-import-keys ref -s 19 | 20 | 21 | # RUN zypper ar --no-gpgcheck "http://download.suse.de/ibs/SUSE/Products/SUSE-CAASP/4.5/x86_64/product/" caasp-product 22 | RUN zypper ar --no-gpgcheck "$IBS/SUSE:/CA/SLE_15_SP2/SUSE:CA.repo" 23 | # RUN zypper ref 24 | RUN zypper in --auto-agree-with-licenses --no-confirm -t product caasp 25 | RUN zypper in --auto-agree-with-licenses --no-confirm ca-certificates-suse openssh 26 | 27 | RUN zypper -n in -t pattern SUSE-CaaSP-Management 28 | RUN zypper up 29 | RUN zypper clean -a 30 | 31 | VOLUME ["/app"] 32 | 33 | WORKDIR /app 34 | -------------------------------------------------------------------------------- /backend/caasp4os/docker/skuba/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: devel 2 | devel: 3 | ./build.sh devel /Devel:/CaaSP:/4.5/SLE_15_SP2/ 4 | 5 | .PHONY: staging 6 | staging: 7 | ./build.sh staging /SUSE:/SLE-15-SP2:/Update:/Products:/CASP45/staging/ 8 | 9 | .PHONY: product 10 | product: 11 | ./build.sh product /SUSE/Products/SUSE-CAASP/4.5/x86_64/product/ 12 | 13 | .PHONY: update 14 | update: 15 | ./build.sh update /SUSE/Updates/SUSE-CAASP/4.5/x86_64/update/ 16 | -------------------------------------------------------------------------------- /backend/caasp4os/docker/skuba/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | REPO_ENV="$1" 4 | REPO="$2" 5 | IMAGE_NAME="skuba/$REPO_ENV" 6 | 7 | if [[ "$(docker images -q skuba/$CAASP_VER 2> /dev/null)" == "" ]]; then 8 | echo ">>> INFO: Building $IMAGE_NAME" 9 | docker build --no-cache -t "$IMAGE_NAME" \ 10 | --build-arg VERSION="$(date -I)" \ 11 | --build-arg REPO_ENV="$REPO_ENV" \ 12 | --build-arg REPO="$REPO" . 13 | fi 14 | -------------------------------------------------------------------------------- /backend/caasp4os/docker_skuba.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ../../include/common.sh 4 | pushd "$ROOT_DIR"/backend/caasp4os || exit 5 | . defaults.sh 6 | 7 | set -Eeuo pipefail 8 | 9 | if [[ "$(docker images -q skuba/$CAASP_VER 2> /dev/null)" == "" ]]; then 10 | info "Creating skuba/$CAASP_VER container image…" 11 | make -C docker/skuba/ "$CAASP_VER" 12 | ok "skuba/$CAASP_VER container image created!" 13 | fi 14 | -------------------------------------------------------------------------------- /backend/caasp4os/kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | if [ ! -f "$KUBECFG" ]; then 8 | err "No KUBECFG given - you need to pass one!" 9 | exit 1 10 | fi 11 | 12 | cp "$KUBECFG" kubeconfig 13 | kubectl get nodes > /dev/null 2>&1 || exit 14 | ok "Kubeconfig for $BACKEND correctly imported" 15 | -------------------------------------------------------------------------------- /backend/caasp4os/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | # Takes a newly deployed Caasp4 cluster, provided by the kubeconfig, and prepares 5 | # it for CAP 6 | # 7 | # Requires: 8 | # kubectl & helm binaries 9 | 10 | . ./defaults.sh 11 | . ../../include/common.sh 12 | . .envrc 13 | 14 | create_rolebinding() { 15 | 16 | kubectl create clusterrolebinding admin --clusterrole=cluster-admin --user=system:serviceaccount:kube-system:default 17 | kubectl create clusterrolebinding uaaadmin --clusterrole=cluster-admin --user=system:serviceaccount:uaa:default 18 | kubectl create clusterrolebinding scfadmin --clusterrole=cluster-admin --user=system:serviceaccount:scf:default 19 | 20 | kubectl apply -f - </dev/null | grep -qi tiller; then 60 | # Tiller already present 61 | helm init --client-only 62 | else 63 | kubectl create serviceaccount tiller --namespace kube-system 64 | helm init --wait 65 | fi 66 | else 67 | helm_init 68 | fi 69 | } 70 | 71 | create_cpi_storageclass() { 72 | if ! kubectl get storageclass 2>/dev/null | grep -qi persistent; then 73 | kubectl apply -f - < /dev/null 2 | - zypper in -y -l --auto-agree-with-product-licenses -t product caasp 3 | - zypper in -y -l -t pattern SUSE-CaaSP-Node &> /dev/null 4 | - zypper up -y -l --auto-agree-with-product-licenses 2>&1>/dev/null 5 | -------------------------------------------------------------------------------- /backend/caasp4os/terraform-os/generate-cpi-conf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #shellcheck disable=SC2145,SC2016 3 | log() { (>&1 echo -e "$@") ; } 4 | cmd() { log "[ CMD ] $@" ; } 5 | info() { log "[ INFO ] $@" ; } 6 | error() { (>&2 echo -e "[ ERROR ] $@") ;} 7 | 8 | if [ -z "${OS_AUTH_URL}" ] || [ -z "${OS_USERNAME}" ] || \ 9 | [ -z "${OS_PASSWORD}" ] || [ -z "${OS_PROJECT_ID}" ] || \ 10 | [ -z "${OS_PRIVATE_SUBNET_ID}" ] || [ -z "${OS_PUBLIC_NET_ID}" ]; then 11 | error '$OS_AUTH_URL $OS_USERNAME $OS_PASSWORD $OS_PROJECT_ID' 12 | error '$OS_PRIVATE_SUBNET_ID $OS_PUBLIC_NET_ID must be specified' 13 | error 'Please download and source your OpenStack RC file' 14 | exit 1 15 | fi 16 | 17 | OPENSTACK_CONF="openstack.conf" 18 | 19 | umask 077 20 | 21 | cat << EOF > "${OPENSTACK_CONF}" 22 | [Global] 23 | auth-url="${OS_AUTH_URL}" 24 | username="${OS_USERNAME}" 25 | password="${OS_PASSWORD}" 26 | tenant-id="${OS_PROJECT_ID}" 27 | tenant-name="${OS_PROJECT_NAME}" 28 | domain-id="${OS_USER_DOMAIN_ID}" 29 | domain-name="${OS_USER_DOMAIN_NAME}" 30 | region="${OS_REGION_NAME}" 31 | ca-file="${CA_FILE}" 32 | [LoadBalancer] 33 | lb-version=v2 34 | subnet-id="${OS_PRIVATE_SUBNET_ID}" 35 | floating-network-id="${OS_PUBLIC_NET_ID}" 36 | create-monitor=yes 37 | monitor-delay=1m 38 | monitor-timeout=30s 39 | monitor-max-retries=3 40 | manage-security-groups=true 41 | [BlockStorage] 42 | trust-device-path=false 43 | bs-version=v2 44 | ignore-volume-az=true 45 | EOF 46 | 47 | umask 022 48 | 49 | [ -z "$OS_PROJECT_NAME" ] && sed -i '/^tenant-name=/d' "${OPENSTACK_CONF}" 50 | [ -z "$OS_USER_DOMAIN_ID" ] && sed -i '/^domain-id=/d' "${OPENSTACK_CONF}" 51 | [ -z "$OS_USER_DOMAIN_NAME" ] && sed -i '/^domain-name=/d' "${OPENSTACK_CONF}" 52 | [ -z "$CA_FILE" ] && sed -i '/^ca-file=/d' "${OPENSTACK_CONF}" 53 | 54 | if [ -z "${TR_STACK}" ] || [ -z "${TR_LB_IP}" ] || \ 55 | [ -z "$TR_MASTER_IPS" ] || [ -z "$TR_WORKER_IPS" ] || \ 56 | [ -z "${TR_USERNAME}" ]; then 57 | error '$TR_STACK $TR_LB_IP $TR_MASTER_IPS $TR_WORKER_IPS must be specified' 58 | exit 1 59 | fi 60 | 61 | info "### Run following commands to bootstrap skuba cluster:\\n" 62 | cmd " skuba cluster init --control-plane ${TR_LB_IP} --cloud-provider openstack ${TR_STACK}-cluster" 63 | cmd " mv openstack.conf ${TR_STACK}-cluster/cloud/openstack/openstack.conf" 64 | cmd " cd ${TR_STACK}-cluster" 65 | 66 | i=0 67 | for MASTER in $TR_MASTER_IPS; do 68 | if [ $i -eq "0" ]; then 69 | cmd " skuba node bootstrap --target ${MASTER} --sudo --user ${TR_USERNAME} caasp-master-${TR_STACK}-0" 70 | else 71 | cmd " skuba node join --role master --target ${MASTER} --sudo --user ${TR_USERNAME} caasp-master-${TR_STACK}-${i}" 72 | fi 73 | ((++i)) 74 | done 75 | 76 | i=0 77 | for WORKER in $TR_WORKER_IPS; do 78 | cmd " skuba node join --role worker --target ${WORKER} --sudo --user ${TR_USERNAME} caasp-worker-${TR_STACK}-${i}" 79 | ((++i)) 80 | done 81 | -------------------------------------------------------------------------------- /backend/caasp4os/terraform-os/lbaas-cap.tf: -------------------------------------------------------------------------------- 1 | resource "openstack_lb_listener_v2" "uaa_listener" { 2 | protocol = "TCP" 3 | protocol_port = "2793" 4 | loadbalancer_id = openstack_lb_loadbalancer_v2.lb.id 5 | name = "${var.stack_name}-uaa-listener" 6 | 7 | depends_on = [ 8 | openstack_lb_loadbalancer_v2.lb 9 | ] 10 | } 11 | 12 | resource "openstack_lb_pool_v2" "uaa_pool" { 13 | name = "${var.stack_name}-uaa-pool" 14 | protocol = "TCP" 15 | lb_method = "ROUND_ROBIN" 16 | listener_id = openstack_lb_listener_v2.uaa_listener.id 17 | } 18 | 19 | resource "openstack_lb_member_v2" "uaa_member" { 20 | count = var.workers 21 | pool_id = openstack_lb_pool_v2.uaa_pool.id 22 | address = element(openstack_compute_instance_v2.worker.*.access_ip_v4, count.index) 23 | subnet_id = openstack_networking_subnet_v2.subnet.id 24 | protocol_port = 2793 25 | } 26 | 27 | resource "openstack_lb_monitor_v2" "uaa_monitor" { 28 | pool_id = openstack_lb_pool_v2.uaa_pool.id 29 | type = "TCP" 30 | url_path = "/healthz" 31 | expected_codes = 200 32 | delay = 10 33 | timeout = 5 34 | max_retries = 3 35 | } 36 | -------------------------------------------------------------------------------- /backend/caasp4os/terraform-os/security-groups-cap.tf: -------------------------------------------------------------------------------- 1 | resource "openstack_networking_secgroup_v2" "secgroup_cap" { 2 | name = "${var.stack_name}-cap_lb_secgroup" 3 | description = "CAP security group" 4 | } 5 | 6 | resource "openstack_networking_secgroup_rule_v2" "http" { 7 | direction = "ingress" 8 | ethertype = "IPv4" 9 | protocol = "tcp" 10 | port_range_min = 80 11 | port_range_max = 80 12 | remote_ip_prefix = "0.0.0.0/0" 13 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 14 | } 15 | 16 | resource "openstack_networking_secgroup_rule_v2" "https" { 17 | direction = "ingress" 18 | ethertype = "IPv4" 19 | protocol = "tcp" 20 | port_range_min = 443 21 | port_range_max = 443 22 | remote_ip_prefix = "0.0.0.0/0" 23 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 24 | } 25 | 26 | resource "openstack_networking_secgroup_rule_v2" "port_2222" { 27 | direction = "ingress" 28 | ethertype = "IPv4" 29 | protocol = "tcp" 30 | port_range_min = 2222 31 | port_range_max = 2222 32 | remote_ip_prefix = "0.0.0.0/0" 33 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 34 | } 35 | 36 | resource "openstack_networking_secgroup_rule_v2" "port_2793" { 37 | direction = "ingress" 38 | ethertype = "IPv4" 39 | protocol = "tcp" 40 | port_range_min = 2793 41 | port_range_max = 2793 42 | remote_ip_prefix = "0.0.0.0/0" 43 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 44 | } 45 | 46 | resource "openstack_networking_secgroup_rule_v2" "port_4443" { 47 | direction = "ingress" 48 | ethertype = "IPv4" 49 | protocol = "tcp" 50 | port_range_min = 4443 51 | port_range_max = 4443 52 | remote_ip_prefix = "0.0.0.0/0" 53 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 54 | } 55 | 56 | resource "openstack_networking_secgroup_rule_v2" "port_7443" { 57 | direction = "ingress" 58 | ethertype = "IPv4" 59 | protocol = "tcp" 60 | port_range_min = 7443 61 | port_range_max = 7443 62 | remote_ip_prefix = "0.0.0.0/0" 63 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 64 | } 65 | 66 | resource "openstack_networking_secgroup_rule_v2" "port_8443" { 67 | direction = "ingress" 68 | ethertype = "IPv4" 69 | protocol = "tcp" 70 | port_range_min = 8443 71 | port_range_max = 8443 72 | remote_ip_prefix = "0.0.0.0/0" 73 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 74 | } 75 | 76 | resource "openstack_networking_secgroup_rule_v2" "tcp_high" { 77 | direction = "ingress" 78 | ethertype = "IPv4" 79 | protocol = "tcp" 80 | port_range_min = 10000 81 | port_range_max = 65535 82 | remote_ip_prefix = "0.0.0.0/0" 83 | security_group_id = openstack_networking_secgroup_v2.secgroup_cap.id 84 | } 85 | -------------------------------------------------------------------------------- /backend/check.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . include/common.sh 4 | . .envrc 5 | 6 | curl -s -Lo kube-ready-state-check.sh https://raw.githubusercontent.com/cloudfoundry-incubator/kubecf/master/dev/kube/kube-ready-state-check.sh 7 | chmod +x kube-ready-state-check.sh 8 | mv kube-ready-state-check.sh bin/ 9 | 10 | info "Testing imported k8s cluster" 11 | 12 | kube-ready-state-check.sh kube || true 13 | 14 | info "Adding cap-values configmap if missing" 15 | if ! kubectl get configmap cap-values -n kube-system 2>/dev/null | grep -qi cap-values; then 16 | ROOTFS=overlay-xfs 17 | # take first worker node as public ip if DOMAIN is not explicitly set: 18 | PUBLIC_IP="$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type == "InternalIP").address' | head -n 1)" 19 | 20 | if [ -z "${DOMAIN}" ]; then 21 | DOMAIN="$PUBLIC_IP.$MAGICDNS" 22 | fi 23 | if ! kubectl get configmap -n kube-system 2>/dev/null | grep -qi cap-values; then 24 | kubectl create configmap -n kube-system cap-values \ 25 | --from-literal=garden-rootfs-driver="${ROOTFS}" \ 26 | --from-literal=public-ip="${PUBLIC_IP}" \ 27 | --from-literal=domain="${DOMAIN}" \ 28 | --from-literal=platform="${BACKEND}" 29 | fi 30 | fi 31 | 32 | info "Initializing helm client" 33 | helm_init 34 | 35 | ok "k8s cluster imported successfully" 36 | -------------------------------------------------------------------------------- /backend/ekcp/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: clean 4 | clean: 5 | ./clean.sh 6 | 7 | .PHONY: up 8 | up: 9 | ./up.sh 10 | 11 | .PHONY: kubeconfig 12 | kubeconfig: 13 | ./kubeconfig.sh 14 | 15 | .PHONY: deps 16 | deps: 17 | # no-op 18 | 19 | .PHONY: prepare 20 | prepare: 21 | ./prepare.sh 22 | 23 | #.PHONY: prepare 24 | #prepare: 25 | # make -C ../kind prepare 26 | 27 | .PHONY: find-resources 28 | find-resources: 29 | echo "$@ not implemented yet for ${BACKEND}" 30 | exit 1 31 | 32 | .PHONY: force-clean-cluster 33 | force-clean-cluster: 34 | echo "$@ not implemented yet for ${BACKEND}" 35 | exit 1 36 | 37 | .PHONY: all 38 | all: up kubeconfig prepare 39 | -------------------------------------------------------------------------------- /backend/ekcp/clean.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | 6 | if [ -d "$BUILD_DIR" ]; then 7 | . .envrc 8 | curl -X DELETE http://"$EKCP_HOST"/"${CLUSTER_NAME}" 9 | popd || exit 10 | rm -rf "$BUILD_DIR" 11 | fi 12 | 13 | if [ -n "$FORCE_DELETE" ]; then 14 | curl -X DELETE http://"$EKCP_HOST"/"${CLUSTER_NAME}" 15 | fi 16 | -------------------------------------------------------------------------------- /backend/ekcp/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # EKCP options 4 | ############## 5 | 6 | EKCP_HOST=${EKCP_HOST:-} 7 | FORCE_DELETE=${FORCE_DELETE:-"false"} 8 | HELM_VERSION="${HELM_VERSION:-v3.1.1}" 9 | -------------------------------------------------------------------------------- /backend/ekcp/kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | curl http://"$EKCP_HOST"/kubeconfig/"${CLUSTER_NAME}" > kubeconfig 8 | ok "Kubeconfig for $BACKEND correctly imported" 9 | -------------------------------------------------------------------------------- /backend/ekcp/up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | curl -d "name=${CLUSTER_NAME}" -X POST http://$EKCP_HOST/new 8 | -------------------------------------------------------------------------------- /backend/eks/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: kubeconfig 4 | kubeconfig: 5 | ./kubeconfig.sh 6 | 7 | .PHONY: deps 8 | deps: 9 | ./deps.sh 10 | 11 | .PHONY: tfsetup 12 | tfsetup: 13 | ./tfsetup.sh 14 | 15 | .PHONY: deploy 16 | deploy: deps tfsetup 17 | ./deploy.sh 18 | 19 | .PHONY: clean 20 | clean: deps tfsetup 21 | ./clean.sh 22 | 23 | .PHONY: find-resources 24 | find-resources: 25 | echo "$@ not implemented yet for ${BACKEND}" 26 | exit 1 27 | 28 | .PHONY: force-clean-cluster 29 | force-clean-cluster: 30 | echo "$@ not implemented yet for ${BACKEND}" 31 | exit 1 32 | 33 | .PHONY: all 34 | all: deploy 35 | -------------------------------------------------------------------------------- /backend/eks/clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Requires: 4 | # - aws credentials present 5 | 6 | . ./defaults.sh 7 | . ../../include/common.sh 8 | 9 | if [ -d "${BUILD_DIR}" ]; then 10 | . .envrc 11 | if [ -d "cap-terraform/eks" ]; then 12 | pushd cap-terraform/eks || exit 13 | terraform init 14 | terraform destroy -auto-approve 15 | popd || exit 16 | rm -rf cap-terraform 17 | fi 18 | 19 | popd || exit 20 | rm -rf "${BUILD_DIR}" 21 | ok "EKS cluster deleted successfully" 22 | else 23 | warn "BUILD_DIR ${BUILD_DIR} not found" 24 | fi 25 | 26 | -------------------------------------------------------------------------------- /backend/eks/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # EKS options 4 | ############# 5 | 6 | EKS_CLUSTER_NAME="${EKS_CLUSTER_NAME:-${OWNER}-cap}" 7 | EKS_LOCATION="${EKS_LOCATION:-us-west-2}" 8 | EKS_KEYPAIR="${EKS_KEYPAIR:-${OWNER}-terraform}" 9 | EKS_CLUSTER_LABEL=${EKS_CLUSTER_LABEL:-\{key = \"${OWNER}-eks-cluster\"\}} 10 | 11 | EKS_HOSTED_ZONE_NAME="${EKS_HOSTED_ZONE_NAME:-qa.aws.howdoi.website}" 12 | EKS_DOMAIN="${EKS_DOMAIN:-${EKS_CLUSTER_NAME}.${EKS_HOSTED_ZONE_NAME}}" 13 | EKS_DEPLOYER_ROLE_ARN="${EKS_DEPLOYER_ROLE_ARN:-}" 14 | EKS_CLUSTER_ROLE_NAME="${EKS_CLUSTER_ROLE_NAME:-}" 15 | EKS_CLUSTER_ROLE_ARN="${EKS_CLUSTER_ROLE_ARN:-}" 16 | EKS_WORKER_NODE_ROLE_NAME="${EKS_WORKER_NODE_ROLE_NAME:-}" 17 | EKS_WORKER_NODE_ROLE_ARN="${EKS_WORKER_NODE_ROLE_ARN:-}" 18 | 19 | KUBE_AUTHORIZED_ROLE_ARN="${KUBE_AUTHORIZED_ROLE_ARN:-}" 20 | 21 | # Settings for terraform state save/restore 22 | # 23 | # Set to a non-empty key to trigger state save in deploy.sh. 24 | TF_KEY="${TF_KEY:-}" 25 | 26 | # s3 bucket and bucket region to save state to. Ignored when 27 | # TF_KEY is empty (default, see above). 28 | TF_BUCKET="${TF_BUCKET:-cap-ci-tf}" 29 | TF_REGION="${TF_REGION:-us-west-2}" 30 | 31 | AWS_ACCESS_KEY_ID="${AWS_ACCESS_KEY_ID:-}" 32 | AWS_SECRET_ACCESS_KEY="${AWS_SECRET_ACCESS_KEY:-}" 33 | -------------------------------------------------------------------------------- /backend/eks/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Requires: 4 | # - aws credentials present 5 | 6 | . ./defaults.sh 7 | . ../../include/common.sh 8 | . .envrc 9 | 10 | if ! aws sts get-caller-identity ; then 11 | err "Missing aws credentials, run aws configure" 12 | # Use predefined aws env vars 13 | # https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html 14 | exit 1 15 | fi 16 | 17 | pushd cap-terraform/eks || exit 18 | terraform init 19 | terraform plan -out=my-plan 20 | # Retry once due to intermittent issues we hit 21 | terraform apply -auto-approve my-plan || terraform apply -auto-approve my-plan 22 | # get kubectl for eks: 23 | # aws eks --region "$EKS_LOCATION" update-kubeconfig --name "$EKS_CLUSTER_NAME" 24 | # or: 25 | terraform output kubeconfig > "$KUBECONFIG" 26 | popd || exit 27 | 28 | # wait for cluster ready: 29 | wait_ns kube-system 30 | 31 | # test deployment: 32 | kubectl get svc 33 | 34 | ROOTFS=overlay-xfs 35 | # take first worker node as public ip: 36 | PUBLIC_IP="$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type == "InternalIP").address' | head -n 1)" 37 | if ! kubectl get configmap -n kube-system 2>/dev/null | grep -qi cap-values; then 38 | kubectl create configmap -n kube-system cap-values \ 39 | --from-literal=garden-rootfs-driver="${ROOTFS}" \ 40 | --from-literal=public-ip="${PUBLIC_IP}" \ 41 | --from-literal=services="lb" \ 42 | --from-literal=domain="${EKS_DOMAIN}" \ 43 | --from-literal=platform=eks 44 | fi 45 | 46 | create_rolebinding() { 47 | 48 | kubectl create clusterrolebinding admin --clusterrole=cluster-admin --user=system:serviceaccount:kube-system:default 49 | kubectl create clusterrolebinding uaaadmin --clusterrole=cluster-admin --user=system:serviceaccount:uaa:default 50 | kubectl create clusterrolebinding scfadmin --clusterrole=cluster-admin --user=system:serviceaccount:scf:default 51 | 52 | kubectl apply -f - < kubeconfig 35 | 36 | elif [[ $mtype == "text/plain" ]] ; then 37 | info "Using kubeconfig ..." 38 | 39 | cp "$KUBECFG" kubeconfig 40 | else 41 | err "Please check your KUBECFG" 42 | fi 43 | 44 | if ! aws sts get-caller-identity ; then 45 | err "Missing aws credentials, run aws configure" 46 | # Use predefined aws env vars 47 | # https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html 48 | exit 1 49 | fi 50 | 51 | kubectl get nodes > /dev/null 2>&1 || exit 52 | 53 | ok "Kubeconfig for $BACKEND correctly imported" 54 | -------------------------------------------------------------------------------- /backend/eks/tfsetup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Requires: 4 | # - aws credentials present 5 | 6 | . ./defaults.sh 7 | . ../../include/common.sh 8 | [[ -d "${BUILD_DIR}" ]] || exit 0 9 | . .envrc 10 | 11 | if ! [[ -d cap-terraform ]]; then 12 | git clone "${CAP_TERRAFORM_REPOSITORY}" -b "${CAP_TERRAFORM_BRANCH}" 13 | fi 14 | pushd cap-terraform/eks || exit 15 | git checkout "${CAP_TERRAFORM_BRANCH}" 16 | git pull 17 | 18 | # terraform needs helm client installed and configured: 19 | helm_init_client 20 | 21 | cat < terraform.tfvars 22 | cluster_name = "${EKS_CLUSTER_NAME}" 23 | region = "${EKS_LOCATION}" 24 | keypair_name = "${EKS_KEYPAIR}" 25 | cluster_labels = ${EKS_CLUSTER_LABEL} 26 | hosted_zone_name = "${EKS_HOSTED_ZONE_NAME}" 27 | external_dns_aws_access_key = "${AWS_ACCESS_KEY_ID}" 28 | external_dns_aws_secret_key = "${AWS_SECRET_ACCESS_KEY}" 29 | deployer_role_arn = "${EKS_DEPLOYER_ROLE_ARN}" 30 | cluster_role_name = "${EKS_CLUSTER_ROLE_NAME}" 31 | cluster_role_arn = "${EKS_CLUSTER_ROLE_ARN}" 32 | worker_node_role_name = "${EKS_WORKER_NODE_ROLE_NAME}" 33 | worker_node_role_arn = "${EKS_WORKER_NODE_ROLE_ARN}" 34 | kube_authorized_role_arn = "${KUBE_AUTHORIZED_ROLE_ARN}" 35 | HEREDOC 36 | 37 | if [ -n "${TF_KEY}" ] ; then 38 | cat > backend.tf < kubeclusterreference 27 | --- 28 | kind: ClusterReference 29 | platform: gke 30 | cluster-name: ${GKE_CLUSTER_NAME} 31 | cluster-zone: ${GKE_LOCATION} 32 | project: ${GKE_PROJECT} 33 | EOF 34 | 35 | # wait for cluster ready: 36 | wait_ns kube-system 37 | 38 | info "Configuring deployed GKE cluster…" 39 | 40 | ROOTFS=overlay-xfs 41 | # take first worker node as public ip: 42 | PUBLIC_IP="$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type == "InternalIP").address' | head -n 1)" 43 | if ! kubectl get configmap -n kube-system 2>/dev/null | grep -qi cap-values; then 44 | kubectl create configmap -n kube-system cap-values \ 45 | --from-literal=garden-rootfs-driver="${ROOTFS}" \ 46 | --from-literal=public-ip="${PUBLIC_IP}" \ 47 | --from-literal=services="lb" \ 48 | --from-literal=domain="${GKE_DNSDOMAIN}" \ 49 | --from-literal=platform=gke 50 | fi 51 | 52 | create_rolebinding() { 53 | 54 | kubectl create clusterrolebinding admin --clusterrole=cluster-admin --user=system:serviceaccount:kube-system:default 55 | kubectl create clusterrolebinding uaaadmin --clusterrole=cluster-admin --user=system:serviceaccount:uaa:default 56 | kubectl create clusterrolebinding scfadmin --clusterrole=cluster-admin --user=system:serviceaccount:scf:default 57 | 58 | kubectl apply -f - <> .envrc 21 | fi 22 | 23 | terraformpath=bin/terraform 24 | if [ ! -e "$terraformpath" ]; then 25 | if [[ "$OSTYPE" == "darwin"* ]]; then 26 | curl -o terraform.zip https://releases.hashicorp.com/terraform/0.12.29/terraform_0.12.29_darwin_amd64.zip 27 | else 28 | curl -o terraform.zip https://releases.hashicorp.com/terraform/0.12.29/terraform_0.12.29_linux_amd64.zip 29 | fi 30 | unzip terraform.zip && rm -rf terraform.zip 31 | chmod +x terraform && mv terraform bin/ 32 | fi 33 | -------------------------------------------------------------------------------- /backend/gke/force-clean-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Requires: 4 | # - gcloud credentials present 5 | # - jq 6 | 7 | # OPTIONS: 8 | # GKE_CRED_JSON gcloud credentials. 9 | # GKE_LOCATION The AZ the cluster is deployed into. 10 | # GKE_PROJECT gcloud project. 11 | # OWNER The owner of the clusters to filter on 12 | # RESOURCE_LIST Resources to remove, as generated by `make find-resources`. 13 | # CLUSTER_SELECTOR JQ expression for filtering clusters, defaults to all. 14 | # ADDRESS_SELECTOR JQ expression for filtering addresses, defaults to all. 15 | # DISK_SELECTOR JQ expression for filtering disks, defaluts to all. 16 | # ACTUALLY_DELETE Delete will only be executed if this is set to `true`. 17 | 18 | . ./defaults.sh 19 | . ../../include/common.sh 20 | . .envrc 21 | . "${ROOT_DIR}/backend/gke/lib/auth.sh" 22 | 23 | # Check that the resource list is given 24 | if [[ ! -r "${RESOURCE_LIST:-}" ]]; then 25 | err "Could not read resource list ${RESOURCE_LIST:-(RESOURCE_LIST not set)}" 26 | exit 1 27 | fi 28 | 29 | if [[ "${ACTUALLY_DELETE:-}" = "true" ]]; then 30 | PREFIX="" 31 | else 32 | PREFIX="echo" 33 | # shellcheck disable=SC2016 34 | warn 'ACTUALLY_DELETE not set to `true`, doing dry-run' 35 | fi 36 | 37 | CLUSTER_JQ_EXPR=" 38 | .clusters[] 39 | | select(.labels.owner == \"${OWNER}\") 40 | | select(${CLUSTER_SELECTOR:-true}) 41 | | .name 42 | " 43 | for cluster in $(jq -r "${CLUSTER_JQ_EXPR}" "${RESOURCE_LIST}"); do 44 | info "Removing cluster ${cluster}" 45 | ${PREFIX} gcloud container clusters delete "${cluster}" \ 46 | --quiet --project="${GKE_PROJECT}" --zone="${GKE_LOCATION}" 47 | done 48 | 49 | for address in $(jq -r ".addresses[] | select(${ADDRESS_SELECTOR:-true}) | .name" "${RESOURCE_LIST}"); do 50 | info "Removing address ${address}" 51 | ${PREFIX} gcloud compute addresses delete "${address}" --quiet --region="${GKE_LOCATION%-[abcdef]}" 52 | done 53 | 54 | for disk in $(jq -r ".disks[] | select(${DISK_SELECTOR:-true}) | .name" "${RESOURCE_LIST}"); do 55 | info "Removing disk ${disk}" 56 | ${PREFIX} gcloud compute disks delete "${disk}" \ 57 | --quiet --project="${GKE_PROJECT}" --zone="${GKE_LOCATION}" 58 | done 59 | 60 | dns_zone="$(jq -r '.dns.zone' "${RESOURCE_LIST}")" 61 | mapfile -t recordsets < <( 62 | jq -r --arg zone "${dns_zone}" ' 63 | . as $root 64 | | .dns.entries[] 65 | | select([.info["external-dns/owner"]] | inside($root.clusters | map(.name)) | not) 66 | | .recordsets[] 67 | | ["--name=\(.name)", "--type=\(.type)", "--zone=\($zone)", "--ttl=\(.ttl)"] + .rrdatas 68 | | @sh 69 | ' "${RESOURCE_LIST}" 70 | ) 71 | 72 | if [[ "${#recordsets[@]}" -gt 0 ]]; then 73 | ${PREFIX} gcloud dns record-sets transaction start --zone="${dns_zone}" 74 | for recordset in "${recordsets[@]}"; do 75 | record_name="$(grep --perl-regexp --only-matching -- "--name='?\K[^']+(?=')" <<<"${recordset}")" 76 | record_type="$(grep --perl-regexp --only-matching -- "--type='?\K[^']+(?=')" <<<"${recordset}")" 77 | info "Removing DNS record set ${record_name} (type ${record_type})" 78 | eval "${PREFIX} gcloud dns record-sets transaction remove ${recordset}" 79 | done 80 | info "Committing DNS changes…" 81 | ${PREFIX} gcloud dns record-sets transaction execute --zone="${dns_zone}" 82 | fi 83 | -------------------------------------------------------------------------------- /backend/gke/kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # KUBECFG can be a kubeconfig file or a cluster reference file for local deployments 4 | # KUBECFG has to be a kubeclusterreference for CI usage, check gke/deploy.sh for format 5 | if [ ! -f "$KUBECFG" ]; then 6 | err "No KUBECFG given - you need to pass one!" 7 | exit 1 8 | fi 9 | 10 | if [[ $(yq r ${KUBECFG} kind) == "ClusterReference" ]]; then 11 | # Process kubeclusterreference file 12 | echo "Using kubeclusterreference ..." 13 | GKE_CLUSTER_NAME="$(yq r ${KUBECFG} cluster-name)" 14 | GKE_CLUSTER_ZONE="$(yq r ${KUBECFG} cluster-zone)" 15 | GKE_PROJECT="$(yq r ${KUBECFG} project)" 16 | export GKE_CLUSTER_NAME GKE_CLUSTER_ZONE GKE_PROJECT 17 | elif [[ $(yq r ${KUBECFG} kind) == "Config" ]]; then 18 | echo "Using kubeconfig ..." 19 | else 20 | echo "Please check your KUBECFG" 21 | exit 1 22 | fi 23 | 24 | . ./defaults.sh 25 | . ../../include/common.sh 26 | . .envrc 27 | . "${ROOT_DIR}/backend/gke/lib/auth.sh" 28 | 29 | gcloud container clusters get-credentials ${GKE_CLUSTER_NAME} --zone ${GKE_CLUSTER_ZONE} 30 | 31 | # kubeconfig gets hardcoded paths for gcloud bin, reevaluate them: 32 | gcloud_path="$(which gcloud)" 33 | gcloud_path_esc=$(echo "$gcloud_path" | sed 's_/_\\/_g') 34 | sed -e "s/\(cmd-path\: \).*/\1$gcloud_path_esc/" kubeconfig > kubeconfig.bkp 35 | mv kubeconfig.bkp kubeconfig 36 | 37 | kubectl get nodes 1> /dev/null || exit 38 | 39 | ok "Kubeconfig for $BACKEND correctly imported" 40 | -------------------------------------------------------------------------------- /backend/gke/lib/auth.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This file is expected to be sourced from the other scripts in the GKE directory; 4 | # it is just the standard block to ensure we have authenticated correctly. 5 | 6 | # check gcloud credentials: 7 | info "Using creds from GKE_CRED_JSON…" 8 | gcloud auth revoke 2>/dev/null || true 9 | gcloud auth activate-service-account --project "$GKE_PROJECT" --key-file "$GKE_CRED_JSON" 10 | if [[ $(gcloud auth list --format="value(account)" | wc -l ) -le 0 ]]; then 11 | err "GKE_CRED_JSON creds don't authenticate, aborting" && exit 1 12 | fi 13 | -------------------------------------------------------------------------------- /backend/gke/tfsetup.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Requires: 4 | # - gcloud credentials present 5 | 6 | . ./defaults.sh 7 | . ../../include/common.sh 8 | [[ -d "${BUILD_DIR}" ]] || exit 0 9 | . .envrc 10 | 11 | 12 | if ! [[ -d cap-terraform ]]; then 13 | git clone "${CAP_TERRAFORM_REPOSITORY}" -b "${CAP_TERRAFORM_BRANCH}" 14 | fi 15 | pushd cap-terraform/gke || exit 16 | git checkout "${CAP_TERRAFORM_BRANCH}" 17 | git pull 18 | 19 | # Clear out any existing variables file 20 | cat > terraform.tfvars.json < terraform.tfvars.temp.json 42 | mv terraform.tfvars.temp.json terraform.tfvars.json 43 | fi 44 | 45 | if [ -n "${GKE_INSTANCE_TYPE}" ] ; then 46 | jq --monochrome-output --arg type "${GKE_INSTANCE_TYPE}" \ 47 | '.instance_type = $type' terraform.tfvars.json > terraform.tfvars.temp.json 48 | mv terraform.tfvars.temp.json terraform.tfvars.json 49 | fi 50 | 51 | if [ -n "${TF_KEY}" ] ; then 52 | cat > backend.tf < kind-config.yaml < kind-config.yaml </dev/null | grep -qi cap-values; then 59 | kubectl create configmap -n kube-system cap-values \ 60 | --from-literal=public-ip="${container_ip}" \ 61 | --from-literal=domain="$domain" \ 62 | --from-literal=services="$services" \ 63 | --from-literal=platform="minikube" 64 | fi 65 | 66 | ok "Minikube is ready" 67 | -------------------------------------------------------------------------------- /config/config.toml: -------------------------------------------------------------------------------- 1 | disabled_plugins = ["aufs", "btrfs", "zfs"] 2 | root = "/var/lib/containerd" 3 | state = "/run/containerd" 4 | oom_score = 0 5 | 6 | [grpc] 7 | address = "/run/containerd/containerd.sock" 8 | uid = 0 9 | gid = 0 10 | max_recv_message_size = 16777216 11 | max_send_message_size = 16777216 12 | 13 | [debug] 14 | address = "" 15 | uid = 0 16 | gid = 0 17 | level = "" 18 | 19 | [metrics] 20 | address = "" 21 | grpc_histogram = false 22 | 23 | [cgroup] 24 | path = "" 25 | 26 | [plugins] 27 | [plugins.cgroups] 28 | no_prometheus = false 29 | [plugins.cri] 30 | stream_server_address = "127.0.0.1" 31 | stream_server_port = "0" 32 | enable_selinux = false 33 | sandbox_image = "k8s.gcr.io/pause:3.1" 34 | stats_collect_period = 10 35 | systemd_cgroup = false 36 | enable_tls_streaming = false 37 | max_container_log_line_size = 16384 38 | [plugins.cri.containerd] 39 | snapshotter = "overlayfs" 40 | no_pivot = false 41 | [plugins.cri.containerd.default_runtime] 42 | runtime_type = "io.containerd.runtime.v1.linux" 43 | runtime_engine = "" 44 | runtime_root = "" 45 | [plugins.cri.containerd.untrusted_workload_runtime] 46 | runtime_type = "" 47 | runtime_engine = "" 48 | runtime_root = "" 49 | [plugins.cri.cni] 50 | bin_dir = "/opt/cni/bin" 51 | conf_dir = "/etc/cni/net.d" 52 | conf_template = "" 53 | [plugins.cri.registry] 54 | [plugins.cri.registry.mirrors] 55 | [plugins.cri.registry.mirrors."local.insecure-registry.io"] 56 | endpoint = ["http://localhost:32001"] 57 | [plugins.cri.registry.mirrors."docker.io"] 58 | endpoint = ["https://registry-1.docker.io"] 59 | [plugins.cri.x509_key_pair_streaming] 60 | tls_cert_file = "" 61 | tls_key_file = "" 62 | [plugins.diff-service] 63 | default = ["walking"] 64 | [plugins.linux] 65 | shim = "containerd-shim" 66 | runtime = "runc" 67 | runtime_root = "" 68 | no_shim = false 69 | shim_debug = false 70 | [plugins.opt] 71 | path = "/opt/containerd" 72 | [plugins.restart] 73 | interval = "10s" 74 | [plugins.scheduler] 75 | pause_threshold = 0.02 76 | deletion_threshold = 0 77 | mutation_threshold = 100 78 | schedule_delay = "0s" 79 | startup_delay = "100ms" -------------------------------------------------------------------------------- /contrib/assets/scf_states.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/catapult/f2c1a1b31db60769da5e8d3f8f73bb7d18856179/contrib/assets/scf_states.png -------------------------------------------------------------------------------- /contrib/assets/scf_states.tex: -------------------------------------------------------------------------------- 1 | % create a png with latexmk scf_states.tex -shell-escape 2 | 3 | % \documentclass[12pt]{article} % use this one for pdf output 4 | \documentclass[preview,border=4mm,convert={density=600,outext=.png}]{standalone} % use this one for png output 5 | 6 | \usepackage[english]{babel} 7 | \usepackage[utf8x]{inputenc} 8 | \usepackage{amsmath} 9 | \usepackage{tikz} 10 | \usetikzlibrary{arrows,automata} 11 | \begin{document} 12 | 13 | \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3.5cm, 14 | scale = 1,transform shape, thick] 15 | 16 | \node[state] (k8s) {$k8s$}; 17 | \node[state] (scf-chart) [below of=k8s] {$scf_0$}; 18 | \node[state] (scf-gen-config) [below of=scf-chart] {$scf_1$}; 19 | \node[state] (scf-install) [below of=scf-gen-config] {$scf_2$}; 20 | \node[state] (scf-login) [below of=scf-install] {$scf$}; 21 | \node (all) [below right of=k8s] {$all$}; 22 | 23 | \path 24 | (k8s) edge[bend right, left] node {make\ scf-chart} (scf-chart) 25 | (scf-chart) edge[bend right, left] node {make\ scf-gen-config} (scf-gen-config) 26 | (scf-gen-config) edge[bend right, left] node {make\ scf-install} (scf-install) 27 | (scf-install) edge[bend right, left] node {make\ scf-login} (scf-login) 28 | (all) edge[bend right, right] node {make\ scf-clean} (k8s); 29 | 30 | \end{tikzpicture} 31 | \end{document} -------------------------------------------------------------------------------- /contrib/assets/states.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SUSE/catapult/f2c1a1b31db60769da5e8d3f8f73bb7d18856179/contrib/assets/states.png -------------------------------------------------------------------------------- /contrib/assets/states.tex: -------------------------------------------------------------------------------- 1 | % create a png with latexmk states.tex -shell-escape 2 | 3 | % \documentclass[12pt]{article} % use this one for pdf output 4 | \documentclass[preview,border=4mm,convert={density=600,outext=.png}]{standalone} % use this one for png output 5 | 6 | \usepackage[english]{babel} 7 | \usepackage[utf8x]{inputenc} 8 | \usepackage{amsmath} 9 | \usepackage{tikz} 10 | \usetikzlibrary{arrows,automata} 11 | \begin{document} 12 | 13 | \begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3.5cm, 14 | scale = 1,transform shape, thick] 15 | 16 | \node[state,initial] (zero) {$\emptyset$}; 17 | \node[state] (k8s) [below of=zero] {$k8s$}; 18 | \node[state] (scf) [below of=k8s] {$scf$}; 19 | \node[state] (stratos) [below of=scf,above=2cm, right=5cm] {$stratos$}; 20 | \node[state,accepting] (tested) [below of=scf] {$tested$}; 21 | \node (all) [right of=zero, right=0.5cm] {}; 22 | 23 | \path 24 | (zero) edge[bend right, left] node {make\ k8s} (k8s) 25 | (k8s) edge[bend right,right] node {make\ clean} (zero) 26 | (all) edge[above] node {make\ clean} (zero) 27 | (k8s) edge[bend right, left] node {make\ scf} (scf) 28 | (scf) edge[bend right,right] node {make\ scf-clean} (k8s) 29 | (scf) edge[bend right, left] node {make\ stratos} (stratos) 30 | (scf) edge[loop left, left] node {make\ scf-upgrade} (scf) 31 | (stratos) edge[bend right,right] node {make\ stratos-clean} (scf) 32 | (stratos) edge[loop right, right] node {make\ stratos-upgrade} (stratos) 33 | (scf) edge[bend right, left] node {make\ test\_*} (tested) 34 | (stratos) edge[bend left, right] node {make\ test\_*} (tested); 35 | 36 | \end{tikzpicture} 37 | \end{document} -------------------------------------------------------------------------------- /contrib/samples/eirini-persi-test/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io/ioutil" 7 | "net/http" 8 | "os" 9 | "path" 10 | ) 11 | 12 | type Volume struct { 13 | Dir string `json:"container_dir"` 14 | } 15 | 16 | type Persi struct { 17 | VolumeMounts []Volume `json:"volume_mounts"` 18 | } 19 | 20 | var Created bool 21 | 22 | func main() { 23 | fmt.Println("Starting eirini persi app") 24 | Created = false 25 | http.HandleFunc("/", handler) 26 | 27 | Services := os.Getenv("VCAP_SERVICES") // Get JSON from Env 28 | var Persi map[string][]Persi // This is our structure 29 | err := json.Unmarshal([]byte(Services), &Persi) 30 | if err != nil { 31 | panic(err) 32 | } 33 | PersiDir := Persi["eirini-persi"][0].VolumeMounts[0].Dir 34 | PersiTestFile := path.Join(PersiDir, "persitest") 35 | if !fileExists(PersiTestFile) { 36 | fmt.Println("Creating persistence file in ", PersiTestFile) 37 | d1 := []byte("persitest\n") 38 | err := ioutil.WriteFile(PersiTestFile, d1, os.ModePerm) 39 | if err != nil { 40 | panic(err) 41 | } 42 | Created = true 43 | } else { 44 | fmt.Println("Persistence file already exists") 45 | } 46 | 47 | http.ListenAndServe(":"+os.Getenv("PORT"), nil) 48 | } 49 | 50 | func fileExists(filename string) bool { 51 | info, err := os.Stat(filename) 52 | if os.IsNotExist(err) { 53 | return false 54 | } 55 | return !info.IsDir() 56 | } 57 | func handler(w http.ResponseWriter, r *http.Request) { 58 | fmt.Println("Serving request", r) 59 | if Created { 60 | 61 | fmt.Fprintf(w, "1") 62 | return 63 | } 64 | 65 | fmt.Fprintf(w, "0") 66 | } 67 | -------------------------------------------------------------------------------- /contrib/samples/eirini-persi-test/manifest.yml: -------------------------------------------------------------------------------- 1 | applications: 2 | - name: persitest 3 | stack: cflinuxfs3 4 | command: ./persi-test 5 | buildpack: binary_buildpack 6 | memory: 20M 7 | disk_quota: 20M 8 | -------------------------------------------------------------------------------- /contrib/samples/ticking_app/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | import ( 3 | "fmt" 4 | "net/http" 5 | "os" 6 | "time" 7 | ) 8 | 9 | func main() { 10 | http.HandleFunc("/", handler) 11 | 12 | ticker := time.NewTicker(2 * time.Second) 13 | instanceGuid := os.Getenv("CF_INSTANCE_GUID") + ":" + os.Getenv("CF_INSTANCE_INDEX") 14 | 15 | go func() { 16 | for t := range ticker.C { 17 | 18 | fmt.Printf("[%s] Ticking %s\n", instanceGuid, t.Format("2006-01-02 15:04:05")) 19 | } 20 | }() 21 | 22 | http.ListenAndServe(":"+os.Getenv("PORT"), nil) 23 | } 24 | 25 | func handler(w http.ResponseWriter, r *http.Request) { 26 | fmt.Fprintf(w, "Leave me alone, I'm ticking in logs!") 27 | } 28 | -------------------------------------------------------------------------------- /contrib/samples/ticking_app/manifest.yml: -------------------------------------------------------------------------------- 1 | applications: 2 | - name: ticking 3 | stack: cflinuxfs3 4 | command: ./log_producing_app 5 | buildpack: binary_buildpack 6 | memory: 20M 7 | disk_quota: 20M 8 | -------------------------------------------------------------------------------- /include/buildir.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # duplicated in s/include/common.sh, needed for bootstrapping: 4 | . $ROOT_DIR/include/common.sh 5 | set +Eeuo pipefail # unset options as we will call include/common.sh again 6 | 7 | info "Creating $BUILD_DIR" 8 | 9 | if [ ! -d "$BUILD_DIR" ]; then 10 | mkdir "$BUILD_DIR" 11 | fi 12 | 13 | . $ROOT_DIR/include/common.sh # Reload, as we just created BUILD_DIR 14 | 15 | if [ ! -d "bin" ]; then 16 | mkdir bin 17 | fi 18 | 19 | info "Generating .envrc" 20 | cat < .envrc 21 | export CLUSTER_NAME=${CLUSTER_NAME} 22 | export BACKEND=${BACKEND} 23 | HEREDOC 24 | 25 | cat <> .envrc 26 | export KUBECONFIG="$(pwd)"/kubeconfig 27 | 28 | export HELM_HOME="$(pwd)"/.helm # for helm 2 29 | # The following are needed for helm 3: 30 | export XDG_CACHE_HOME="$(pwd)/.cache" 31 | export XDG_CONFIG_HOME="$(pwd)/.config" 32 | export XDG_DATA_HOME="$(pwd)/.local/share" 33 | 34 | export CF_HOME="$(pwd)" 35 | export PATH="$(pwd)"/bin:"$PATH" 36 | export MINIKUBE_HOME="$(pwd)"/.minikube 37 | export CLOUDSDK_CONFIG="$(pwd)/.config/gcloud" 38 | HEREDOC_APPEND 39 | 40 | info "Generating default options file" 41 | rm -rf defaults.sh 42 | echo '#!/usr/bin/env bash' >> defaults.sh 43 | echo >> defaults.sh 44 | echo '# DISCLAIMER!!!!!!!!' >> defaults.sh 45 | echo '# DISCLAIMER!!!!!!!! CHANGING THIS FILE HAS NO EFFECT ANYWHERE for now' >> defaults.sh 46 | echo '# DISCLAIMER!!!!!!!! It is a concat of all possible options,' >> defaults.sh 47 | echo '# DISCLAIMER!!!!!!!! only for your viewing pleasure' >> defaults.sh 48 | echo '# DISCLAIMER!!!!!!!!' >> defaults.sh 49 | sed '1d' "$ROOT_DIR"/include/defaults_global.sh >> defaults.sh 50 | set +x 51 | sed '1d' "$ROOT_DIR"/include/defaults_global_private.sh >> defaults.sh 52 | debug_mode 53 | 54 | for d in "$ROOT_DIR"/backend/*/ ; do 55 | if [ -f "$d"/defaults.sh ]; then 56 | sed '1d' "$d"/defaults.sh >> defaults.sh 57 | fi 58 | done 59 | for d in "$ROOT_DIR"/modules/*/ ; do 60 | if [ -f "$d"/defaults.sh ]; then 61 | sed '1d' "$d"/defaults.sh >> defaults.sh 62 | fi 63 | done 64 | 65 | popd || exit 66 | -------------------------------------------------------------------------------- /include/colors.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function red() { printf '\e[31m%b\e[0m' "$1" ; } 4 | function green() { printf '\e[32m%b\e[0m' "$1" ; } 5 | function blue() { printf '\e[34m%b\e[0m' "$1" ; } 6 | function cyan() { printf '\e[36m%b\e[0m' "$1" ; } 7 | -------------------------------------------------------------------------------- /include/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export DEBUG_MODE="${DEBUG_MODE:-false}" 4 | source "$ROOT_DIR"/include/func.sh 5 | source "$ROOT_DIR"/include/colors.sh 6 | 7 | debug_mode 8 | 9 | if [ -n "${CONFIG:-}" ]; then 10 | load_env_from_json "$CONFIG" 11 | fi 12 | 13 | export VALUES_OVERRIDE="${VALUES_OVERRIDE:-}" 14 | OVERRIDE= 15 | if [ -n "$VALUES_OVERRIDE" ] && [ -f "$VALUES_OVERRIDE" ]; then 16 | OVERRIDE=$(cat "$VALUES_OVERRIDE") 17 | export OVERRIDE 18 | fi 19 | 20 | export BACKEND="${BACKEND:-kind}" 21 | export CLUSTER_NAME=${CLUSTER_NAME:-$BACKEND} 22 | #export ROOT_DIR="$(git rev-parse --show-toplevel)" 23 | export BUILD_DIR="$ROOT_DIR"/build${CLUSTER_NAME} 24 | 25 | # Forces our build context 26 | [ -d "$BUILD_DIR" ] && pushd "$BUILD_DIR" || true 27 | 28 | . "$ROOT_DIR"/include/defaults_global.sh 29 | set +x 30 | . "$ROOT_DIR"/include/defaults_global_private.sh 31 | debug_mode 32 | 33 | info "Loading" 34 | 35 | # set as much restrictive bash options as possible for following scripts. 36 | # If needed, relax options in the specific script. 37 | set -Eeuo pipefail 38 | -------------------------------------------------------------------------------- /include/defaults_global.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Global options 4 | ################ 5 | 6 | export DOCKER_REGISTRY="${DOCKER_REGISTRY:-registry.suse.com}" 7 | export DOCKER_ORG="${DOCKER_ORG:-cap}" 8 | 9 | export DEFAULT_STACK="${DEFAULT_STACK:-from_chart}" # from_chart, sle15, sle12, cfslinuxfs2, cfslinuxfs3 10 | export MAGICDNS="${MAGICDNS:-omg.howdoi.website}" 11 | export ENABLE_EIRINI="${ENABLE_EIRINI:-false}" 12 | export EKCP_PROXY="${EKCP_PROXY:-}" 13 | export KUBEPROXY_PORT="${KUBEPROXY_PORT:-2224}" 14 | export QUIET_OUTPUT="${QUIET_OUTPUT:-false}" 15 | 16 | # Download binaries of helm, kubectl, cf, etc 17 | export DOWNLOAD_BINS="${DOWNLOAD_BINS:-true}" 18 | 19 | # Download binaries of catapult dependencies 20 | export DOWNLOAD_CATAPULT_DEPS="${DOWNLOAD_CATAPULT_DEPS:-true}" 21 | export CAP_TERRAFORM_REPOSITORY="${CAP_TERRAFORM_REPOSITORY:-https://github.com/SUSE/cap-terraform.git}" 22 | export CAP_TERRAFORM_BRANCH="${CAP_TERRAFORM_BRANCH:-cap-ci}" 23 | -------------------------------------------------------------------------------- /include/defaults_global_private.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Global private defaults (not shown on output) 4 | ############################################### 5 | 6 | export DOCKER_USERNAME="${DOCKER_USERNAME:-}" 7 | export DOCKER_PASSWORD="${DOCKER_PASSWORD:-}" 8 | 9 | # Only for scf, for kubecf we let it be generated, and read it 10 | export CLUSTER_PASSWORD="${CLUSTER_PASSWORD:-password}" 11 | -------------------------------------------------------------------------------- /include/versioning.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -Eeo pipefail 4 | 5 | GIT_ROOT=${GIT_ROOT:-$(git rev-parse --show-toplevel)} 6 | GIT_DESCRIBE=${GIT_DESCRIBE:-$(git describe --tags --long)} 7 | GIT_BRANCH=${GIT_BRANCH:-$(git name-rev --name-only HEAD)} 8 | if [ -n "$GIT_DESCRIBE" ]; then 9 | 10 | GIT_TAG=${GIT_TAG:-$(echo ${GIT_DESCRIBE} | gawk -F - '{ print $1 }' )} 11 | GIT_COMMITS=${GIT_COMMITS:-$(echo ${GIT_DESCRIBE} | gawk -F - '{ print $2 }' )} 12 | GIT_SHA=${GIT_SHA:-$(echo ${GIT_DESCRIBE} | gawk -F - '{ print $3 }' )} 13 | 14 | ARTIFACT_NAME=${ARTIFACT_NAME:-$(basename "$(git config --get remote.origin.url)" .git | sed s/^scf-//)} 15 | ARTIFACT_VERSION=${GIT_TAG}.${GIT_COMMITS}.${GIT_SHA} 16 | else 17 | # shellcheck disable=SC2034 18 | ARTIFACT_VERSION=latest 19 | fi 20 | -------------------------------------------------------------------------------- /kube/catapult-sync/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.6 2 | 3 | ARG EKCP_HOST 4 | # copy crontabs for root user 5 | COPY cronjobs /etc/crontabs/root 6 | COPY sync.sh /usr/local/bin/sync.sh 7 | RUN chmod +x /usr/local/bin/sync.sh 8 | RUN apk update && apk add docker curl jq 9 | RUN sed -i "s/sh/EKCP_HOST=$EKCP_HOST sh/" /etc/crontabs/root 10 | 11 | # start crond with log level 8 in foreground, output to stderr 12 | CMD ["crond", "-f", "-d", "8"] 13 | -------------------------------------------------------------------------------- /kube/catapult-sync/cronjobs: -------------------------------------------------------------------------------- 1 | */5 * * * * flock -x /tmp/cron -c "sh /usr/local/bin/sync.sh 2>&1" 2 | -------------------------------------------------------------------------------- /kube/catapult-sync/sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | CLUSTERS="$(curl $EKCP_HOST| jq -rc '.Clusters[].Name')" 5 | TTY_IMAGE="${TTY_IMAGE:-catapult-wtty}" 6 | ACTIVE_TTYS="$(docker ps --format '{{.Names}}' --filter "name=catapult-wtty")" 7 | 8 | echo "Remove dead wttys" 9 | for i in $ACTIVE_TTYS; do 10 | c=$(echo $i | sed 's/catapult-wtty-//') 11 | if echo $CLUSTERS | grep -q -v $c; 12 | then 13 | docker rm --force $i 14 | fi 15 | done 16 | 17 | echo "Creating new ttys" 18 | v=$(($(echo $ACTIVE_TTYS | wc -w)+1)) 19 | for i in $CLUSTERS; do 20 | if echo $ACTIVE_TTYS | grep -q -v $i; 21 | then 22 | port=$((70+$v)) 23 | echo "Creating tty for $i at $port" 24 | docker run --name catapult-wtty-$i -d --rm -p 70$port:8080 -e EKCP_HOST=$EKCP_HOST -e CLUSTER_NAME=$i "$TTY_IMAGE" 25 | fi 26 | v=$(($v+1)) 27 | done -------------------------------------------------------------------------------- /kube/catapult-web/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM splatform/catapult 2 | #FROM golang:alpine 3 | #RUN apk update && apk add docker 4 | ENV TTY_IMAGE=catapult-wtty 5 | COPY main.go /app/ 6 | 7 | ENTRYPOINT ["go", "run", "/app/main.go"] -------------------------------------------------------------------------------- /kube/catapult-wtty/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM splatform/catapult 2 | 3 | ARG TTYD_VERSION=1.5.2 4 | ARG TTYD_OS_TYPE=linux.x86_64 5 | RUN wget https://github.com/tsl0922/ttyd/releases/download/$TTYD_VERSION/ttyd_$TTYD_OS_TYPE -O /usr/bin/ttyd 6 | RUN chmod +x /usr/bin/ttyd 7 | RUN zypper install -y tmux 8 | ENV BACKEND=ekcp 9 | 10 | EXPOSE 8080 11 | WORKDIR /catapult 12 | 13 | ENTRYPOINT [ "/usr/bin/ttyd", "-p" ,"8080", "/usr/bin/make" ] 14 | # Spawn a tmux inside this container: 15 | #ENTRYPOINT [ "/usr/bin/ttyd", "-p" ,"8080", "tmux", "new", "-A", "-s", "catapult", "/usr/bin/make" ] 16 | CMD [ "recover", "module-extra-terminal" ] 17 | -------------------------------------------------------------------------------- /kube/cf-operator/boshdeployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: ops-scale 6 | data: 7 | ops: | 8 | - type: replace 9 | path: /instance_groups/name=nats?/instances 10 | value: 2 11 | --- 12 | apiVersion: v1 13 | kind: ConfigMap 14 | metadata: 15 | name: nats-manifest 16 | data: 17 | manifest: | 18 | --- 19 | name: nats-deployment 20 | releases: 21 | - name: nats 22 | version: "26" 23 | url: docker.io/cfcontainerization 24 | stemcell: 25 | os: opensuse-42.3 26 | version: 30.g9c91e77-30.80-7.0.0_257.gb97ced55 27 | instance_groups: 28 | - name: nats 29 | instances: 1 30 | jobs: 31 | - name: nats 32 | release: nats 33 | properties: 34 | nats: 35 | user: admin 36 | password: ((nats_password)) 37 | quarks: 38 | ports: 39 | - name: "nats" 40 | protocol: "TCP" 41 | internal: 4222 42 | - name: "nats-routes" 43 | protocol: TCP 44 | internal: 4223 45 | variables: 46 | - name: nats_password 47 | type: password 48 | --- 49 | apiVersion: quarks.cloudfoundry.org/v1alpha1 50 | kind: BOSHDeployment 51 | metadata: 52 | name: nats-deployment 53 | spec: 54 | manifest: 55 | name: nats-manifest 56 | type: configmap 57 | ops: 58 | - name: ops-scale 59 | type: configmap 60 | -------------------------------------------------------------------------------- /kube/cf-operator/password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: quarks.cloudfoundry.org/v1alpha1 2 | kind: QuarksSecret 3 | metadata: 4 | name: generate-password 5 | spec: 6 | type: password 7 | secretName: gen-secret1 8 | -------------------------------------------------------------------------------- /kube/cf-operator/qstatefulset_tolerations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: quarks.cloudfoundry.org/v1alpha1 2 | kind: QuarksStatefulSet 3 | metadata: 4 | name: example-quarks-statefulset 5 | spec: 6 | template: 7 | metadata: 8 | labels: 9 | app: example-statefulset 10 | spec: 11 | replicas: 1 12 | template: 13 | metadata: 14 | labels: 15 | app: example-statefulset 16 | spec: 17 | containers: 18 | - name: busybox 19 | image: busybox 20 | command: 21 | - sleep 22 | - "3600" 23 | tolerations: 24 | - key: "key" 25 | operator: "Equal" 26 | value: "value" 27 | effect: "NoSchedule" 28 | -------------------------------------------------------------------------------- /kube/dind.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: dind 6 | namespace: catapult 7 | labels: 8 | app: catapult-dind 9 | spec: 10 | containers: 11 | - name: dind-daemon 12 | image: docker:1.12.6-dind 13 | resources: 14 | requests: 15 | cpu: 20m 16 | memory: 512Mi 17 | securityContext: 18 | privileged: true 19 | volumeMounts: 20 | - name: docker-graph-storage 21 | mountPath: /var/lib/docker 22 | volumes: 23 | - name: docker-graph-storage 24 | emptyDir: {} 25 | --- 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | name: dind 30 | namespace: catapult 31 | spec: 32 | selector: 33 | app: catapult-dind 34 | ports: 35 | - protocol: TCP 36 | port: 2375 37 | targetPort: 2375 38 | -------------------------------------------------------------------------------- /kube/registry.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: container-registry 6 | --- 7 | kind: PersistentVolumeClaim 8 | apiVersion: v1 9 | metadata: 10 | name: registry-claim 11 | namespace: container-registry 12 | spec: 13 | accessModes: 14 | - ReadWriteMany 15 | volumeMode: Filesystem 16 | resources: 17 | requests: 18 | storage: 5Gi 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | labels: 24 | app: registry 25 | name: registry 26 | namespace: container-registry 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: registry 32 | template: 33 | metadata: 34 | labels: 35 | app: registry 36 | spec: 37 | containers: 38 | - name: registry 39 | image: cdkbot/registry-amd64:2.6 40 | env: 41 | - name: REGISTRY_HTTP_ADDR 42 | value: :5000 43 | - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY 44 | value: /var/lib/registry 45 | - name: REGISTRY_STORAGE_DELETE_ENABLED 46 | value: "yes" 47 | ports: 48 | - containerPort: 5000 49 | name: registry 50 | protocol: TCP 51 | volumeMounts: 52 | - mountPath: /var/lib/registry 53 | name: registry-data 54 | volumes: 55 | - name: registry-data 56 | persistentVolumeClaim: 57 | claimName: registry-claim 58 | --- 59 | apiVersion: v1 60 | kind: Service 61 | metadata: 62 | labels: 63 | app: registry 64 | name: registry 65 | namespace: container-registry 66 | spec: 67 | type: NodePort 68 | selector: 69 | app: registry 70 | ports: 71 | - name: "registry" 72 | port: 5000 73 | targetPort: 5000 74 | nodePort: 32001 75 | -------------------------------------------------------------------------------- /kube/smokes/pod.yaml.erb: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Pod 4 | metadata: 5 | name: smokes 6 | spec: 7 | restartPolicy: Never 8 | containers: 9 | - name: smokes 10 | image: splatform/concourse-brats 11 | command: 12 | - /bin/sh 13 | - -ec 14 | - | 15 | #!/bin/bash 16 | 17 | set -ex 18 | 19 | git clone $SMOKES_REPO 20 | 21 | pushd cf-smoke-tests || exit 22 | cat > config.json <" 57 | - name: CLUSTER_PASSWORD 58 | value: "<%= ENV["CLUSTER_PASSWORD"] %>" 59 | - name: SMOKES_REPO 60 | value: "<%= ENV["SMOKES_REPO"] %>" 61 | -------------------------------------------------------------------------------- /kube/socks.yaml: -------------------------------------------------------------------------------- 1 | #https://github.com/weaveworks/build-tools/blob/master/socks/connect.sh 2 | --- 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: socksproxy 7 | namespace: default 8 | spec: 9 | containers: 10 | - name: socksproxy 11 | image: weaveworks/socksproxy 12 | ports: 13 | - name: socks 14 | containerPort: 8000 15 | - name: http 16 | containerPort: 8080 17 | -------------------------------------------------------------------------------- /kube/socks/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gliderlabs/alpine 2 | WORKDIR / 3 | COPY proxy / 4 | EXPOSE 8000 5 | EXPOSE 8080 6 | ENTRYPOINT ["/proxy"] 7 | 8 | ARG revision 9 | LABEL maintainer="Weaveworks " \ 10 | org.opencontainers.image.title="socks" \ 11 | org.opencontainers.image.source="https://github.com/weaveworks/build-tools/tree/master/socks" \ 12 | org.opencontainers.image.revision="${revision}" \ 13 | org.opencontainers.image.vendor="Weaveworks" 14 | -------------------------------------------------------------------------------- /kube/socks/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2018 Weaveworks. All rights reserved. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /kube/socks/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all clean 2 | 3 | IMAGE_TAR=image.tar 4 | IMAGE_NAME=weaveworks/socksproxy 5 | GIT_REVISION := $(shell git rev-parse HEAD) 6 | PROXY_EXE=proxy 7 | NETGO_CHECK=@strings $@ | grep cgo_stub\\\.go >/dev/null || { \ 8 | rm $@; \ 9 | echo "\nYour go standard library was built without the 'netgo' build tag."; \ 10 | echo "To fix that, run"; \ 11 | echo " sudo go clean -i net"; \ 12 | echo " sudo go install -tags netgo std"; \ 13 | false; \ 14 | } 15 | 16 | all: $(IMAGE_TAR) 17 | 18 | $(IMAGE_TAR): Dockerfile $(PROXY_EXE) 19 | docker build --build-arg=revision=$(GIT_REVISION) -t $(IMAGE_NAME) . 20 | docker save $(IMAGE_NAME):latest > $@ 21 | 22 | $(PROXY_EXE): *.go 23 | go get -tags netgo ./$(@D) 24 | go build -ldflags "-extldflags \"-static\" -linkmode=external" -tags netgo -o $@ ./$(@D) 25 | $(NETGO_CHECK) 26 | 27 | clean: 28 | -docker rmi $(IMAGE_NAME) 29 | rm -rf $(PROXY_EXE) $(IMAGE_TAR) 30 | go clean ./... 31 | -------------------------------------------------------------------------------- /kube/socks/README.md: -------------------------------------------------------------------------------- 1 | The sock server implementation in this directory is forked from 2 | https://github.com/weaveworks/build-tools/tree/master/socks. 3 | 4 | -------------------------------------------------------------------------------- /kube/socks/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "net" 7 | "net/http" 8 | "os" 9 | "strings" 10 | "text/template" 11 | 12 | socks5 "github.com/armon/go-socks5" 13 | "github.com/weaveworks/common/mflag" 14 | "github.com/weaveworks/common/mflagext" 15 | ) 16 | 17 | type pacFileParameters struct { 18 | HostMatch string 19 | SocksDestination string 20 | Aliases map[string]string 21 | } 22 | 23 | const ( 24 | pacfile = ` 25 | function FindProxyForURL(url, host) { 26 | if(shExpMatch(host, "{{.HostMatch}}")) { 27 | return "SOCKS5 {{.SocksDestination}}"; 28 | } 29 | {{range $key, $value := .Aliases}} 30 | if (host == "{{$key}}") { 31 | return "SOCKS5 {{.SocksDestination}}"; 32 | } 33 | {{end}} 34 | return "DIRECT"; 35 | } 36 | ` 37 | ) 38 | 39 | func main() { 40 | var ( 41 | as []string 42 | hostMatch string 43 | socksDestination string 44 | ) 45 | mflagext.ListVar(&as, []string{"a", "-alias"}, []string{}, "Specify hostname aliases in the form alias:hostname. Can be repeated.") 46 | mflag.StringVar(&hostMatch, []string{"h", "-host-match"}, "*.weave.local", "Specify main host shExpMatch expression in pacfile") 47 | mflag.StringVar(&socksDestination, []string{"d", "-socks-destination"}, "localhost:8000", "Specify destination host:port in pacfile") 48 | mflag.Parse() 49 | 50 | var aliases = map[string]string{} 51 | for _, a := range as { 52 | parts := strings.SplitN(a, ":", 2) 53 | if len(parts) != 2 { 54 | fmt.Printf("'%s' is not a valid alias.\n", a) 55 | mflag.Usage() 56 | os.Exit(1) 57 | } 58 | aliases[parts[0]] = parts[1] 59 | } 60 | 61 | go socksProxy(aliases) 62 | 63 | t := template.Must(template.New("pacfile").Parse(pacfile)) 64 | http.HandleFunc("/proxy.pac", func(w http.ResponseWriter, r *http.Request) { 65 | w.Header().Set("Content-Type", "application/x-ns-proxy-autoconfig") 66 | t.Execute(w, pacFileParameters{hostMatch, socksDestination, aliases}) 67 | }) 68 | 69 | if err := http.ListenAndServe(":8080", nil); err != nil { 70 | panic(err) 71 | } 72 | } 73 | 74 | type aliasingResolver struct { 75 | aliases map[string]string 76 | socks5.NameResolver 77 | } 78 | 79 | func (r aliasingResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { 80 | if alias, ok := r.aliases[name]; ok { 81 | return r.NameResolver.Resolve(ctx, alias) 82 | } 83 | return r.NameResolver.Resolve(ctx, name) 84 | } 85 | 86 | func socksProxy(aliases map[string]string) { 87 | conf := &socks5.Config{ 88 | Resolver: aliasingResolver{ 89 | aliases: aliases, 90 | NameResolver: socks5.DNSResolver{}, 91 | }, 92 | } 93 | server, err := socks5.New(conf) 94 | if err != nil { 95 | panic(err) 96 | } 97 | if err := server.ListenAndServe("tcp", ":8000"); err != nil { 98 | panic(err) 99 | } 100 | } 101 | -------------------------------------------------------------------------------- /kube/storageclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: hostpath-provisioner 5 | namespace: kube-system 6 | --- 7 | 8 | kind: ClusterRole 9 | apiVersion: rbac.authorization.k8s.io/v1beta1 10 | metadata: 11 | name: hostpath-provisioner 12 | namespace: kube-system 13 | rules: 14 | - apiGroups: [""] 15 | resources: ["persistentvolumes"] 16 | verbs: ["get", "list", "watch", "create", "delete"] 17 | - apiGroups: [""] 18 | resources: ["persistentvolumeclaims"] 19 | verbs: ["get", "list", "watch", "update"] 20 | - apiGroups: ["storage.k8s.io"] 21 | resources: ["storageclasses"] 22 | verbs: ["get", "list", "watch"] 23 | - apiGroups: [""] 24 | resources: ["events"] 25 | verbs: ["list", "watch", "create", "update", "patch"] 26 | --- 27 | 28 | kind: ClusterRoleBinding 29 | apiVersion: rbac.authorization.k8s.io/v1beta1 30 | metadata: 31 | name: hostpath-provisioner 32 | namespace: kube-system 33 | subjects: 34 | - kind: ServiceAccount 35 | name: hostpath-provisioner 36 | namespace: kube-system 37 | roleRef: 38 | kind: ClusterRole 39 | name: hostpath-provisioner 40 | apiGroup: rbac.authorization.k8s.io 41 | --- 42 | 43 | apiVersion: rbac.authorization.k8s.io/v1beta1 44 | kind: Role 45 | metadata: 46 | name: hostpath-provisioner 47 | namespace: kube-system 48 | rules: 49 | - apiGroups: [""] 50 | resources: ["secrets"] 51 | verbs: ["create", "get", "delete"] 52 | --- 53 | 54 | apiVersion: rbac.authorization.k8s.io/v1beta1 55 | kind: RoleBinding 56 | metadata: 57 | name: hostpath-provisioner 58 | namespace: kube-system 59 | roleRef: 60 | apiGroup: rbac.authorization.k8s.io 61 | kind: Role 62 | name: hostpath-provisioner 63 | subjects: 64 | - kind: ServiceAccount 65 | name: hostpath-provisioner 66 | --- 67 | 68 | # -- Create a pod in the kube-system namespace to run the host path provisioner 69 | apiVersion: v1 70 | kind: Pod 71 | metadata: 72 | namespace: kube-system 73 | name: hostpath-provisioner 74 | spec: 75 | serviceAccountName: hostpath-provisioner 76 | containers: 77 | - name: hostpath-provisioner 78 | image: mazdermind/hostpath-provisioner:latest 79 | imagePullPolicy: "IfNotPresent" 80 | env: 81 | - name: NODE_NAME 82 | valueFrom: 83 | fieldRef: 84 | fieldPath: spec.nodeName 85 | - name: PV_DIR 86 | value: /mnt/kubernetes-pv-manual 87 | 88 | volumeMounts: 89 | - name: pv-volume 90 | mountPath: /mnt/kubernetes-pv-manual 91 | volumes: 92 | - name: pv-volume 93 | hostPath: 94 | path: /mnt/kubernetes-pv-manual 95 | --- 96 | 97 | # -- Create the standard storage class for running on-node hostpath storage 98 | apiVersion: storage.k8s.io/v1 99 | kind: StorageClass 100 | metadata: 101 | namespace: kube-system 102 | name: persistent 103 | annotations: 104 | storageclass.beta.kubernetes.io/is-default-class: "true" 105 | labels: 106 | kubernetes.io/cluster-service: "true" 107 | addonmanager.kubernetes.io/mode: EnsureExists 108 | provisioner: hostpath 109 | -------------------------------------------------------------------------------- /kube/task.yaml: -------------------------------------------------------------------------------- 1 | #https://github.com/weaveworks/build-tools/blob/master/socks/connect.sh 2 | --- 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: task 7 | namespace: catapult 8 | spec: 9 | containers: 10 | - name: task 11 | image: splatform/catapult 12 | command: ["/bin/bash"] 13 | imagePullPolicy: Always 14 | args: ["-c", "while true; do sleep 50000;done"] 15 | env: 16 | - name: DOCKER_HOST 17 | value: tcp://dind:2375 18 | -------------------------------------------------------------------------------- /modules/common/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | .PHONY: deps 3 | deps: 4 | ./deps.sh 5 | 6 | .PHONY: all 7 | all: deps -------------------------------------------------------------------------------- /modules/common/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Common options 4 | ################## 5 | 6 | # Cluster owner (for metadata) 7 | OWNER="${OWNER:-$(whoami)}" 8 | 9 | # Dependencies options 10 | ###################### 11 | 12 | # Default versions in case the are undefined in the backend: 13 | KUBECTL_VERSION="${KUBECTL_VERSION:-v1.17.0}" 14 | HELM_VERSION="${HELM_VERSION:-v3.2.4}" 15 | -------------------------------------------------------------------------------- /modules/common/deps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . "$ROOT_DIR/backend/$BACKEND/defaults.sh" 4 | . ./defaults.sh 5 | . ../../include/common.sh 6 | . .envrc 7 | 8 | if [[ "$OSTYPE" == "darwin"* ]]; then 9 | export HELM_OS_TYPE="${HELM_OS_TYPE:-darwin-amd64}" 10 | export KUBECTL_OS_TYPE="${KUBECTL_OS_TYPE:-darwin}" 11 | export CFCLI_OS_TYPE="${CFCLI_OS_TYPE:-macosx64}" 12 | export YAMLPATCH_OS_TYPE="${YAMLPATCH_OS_TYPE:-darwin}" 13 | export YQ_OS_TYPE="${YQ_OS_TYPE:-darwin_amd64}" 14 | else 15 | export HELM_OS_TYPE="${HELM_OS_TYPE:-linux-amd64}" 16 | export KUBECTL_OS_TYPE="${KUBECTL_OS_TYPE:-linux}" 17 | export CFCLI_OS_TYPE="${CFCLI_OS_TYPE:-linux64}" 18 | export YAMLPATCH_OS_TYPE="${YAMLPATCH_OS_TYPE:-linux}" 19 | export YQ_OS_TYPE="${YQ_OS_TYPE:-linux_amd64}" 20 | fi 21 | 22 | 23 | if [[ "$DOWNLOAD_BINS" == "false" ]]; then 24 | ok "Skipping downloading bins, using host binaries" 25 | else 26 | info "Downloading specific helm, kubectl, cf versions…" 27 | if [ ! -e "bin/helm" ]; then 28 | curl -sSL https://get.helm.sh/helm-${HELM_VERSION}-${HELM_OS_TYPE}.tar.gz | tar zxf - 29 | mv $HELM_OS_TYPE/helm bin/ 30 | if [ -e "$HELM_OS_TYPE/tiller" ]; then 31 | mv $HELM_OS_TYPE/tiller bin/ 32 | fi 33 | rm -rf "$HELM_OS_TYPE" 34 | info "Helm version:" 35 | helm_info 36 | fi 37 | 38 | if [ ! -e "bin/kubectl" ]; then 39 | curl -sSLO https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/${KUBECTL_OS_TYPE}/amd64/kubectl 40 | mv kubectl bin/ 41 | chmod +x bin/kubectl 42 | info "Kubectl version:" 43 | kubectl version 2>&1 | grep Client || true 44 | fi 45 | 46 | if [ ! -e "bin/cf" ]; then 47 | curl -sSL "https://packages.cloudfoundry.org/stable?release=${CFCLI_OS_TYPE}-binary&version=7.1.0&source=github" | tar -zx 48 | rm -rf "$CFCLI_OS_TYPE" cf LICENSE NOTICE 49 | mv cf7 bin/cf 50 | chmod +x bin/cf 51 | info "CF cli version:" 52 | cf version 53 | fi 54 | fi 55 | 56 | 57 | if [[ "$DOWNLOAD_CATAPULT_DEPS" == "false" ]]; then 58 | ok "Skipping downloading catapult dependencies, using host binaries" 59 | else 60 | yamlpatchpath=bin/yaml-patch 61 | if [ ! -e "$yamlpatchpath" ]; then 62 | wget "https://github.com/krishicks/yaml-patch/releases/download/v0.0.10/yaml_patch_${YAMLPATCH_OS_TYPE}" -O $yamlpatchpath 63 | chmod +x $yamlpatchpath 64 | fi 65 | 66 | yqpath=bin/yq 67 | if [ ! -e "$yqpath" ]; then 68 | wget "https://github.com/mikefarah/yq/releases/download/3.2.1/yq_${YQ_OS_TYPE}" -O $yqpath 69 | chmod +x $yqpath 70 | fi 71 | fi 72 | 73 | 74 | ok "Deps correctly downloaded" 75 | -------------------------------------------------------------------------------- /modules/experimental/Makefile: -------------------------------------------------------------------------------- 1 | # no default goal 2 | # .DEFAULT_GOAL := 3 | 4 | .PHONY: eirini_release 5 | eirini_release: 6 | ./eirini_release.sh 7 | 8 | .PHONY: eirinifs 9 | eirinifs: 10 | ./eirinifs.sh 11 | 12 | .PHONY: airgap-up 13 | airgap-up: airgap-down 14 | ./airgap_up.sh 15 | 16 | PHONY: airgap-down 17 | airgap-down: 18 | ./airgap_down.sh 19 | 20 | .PHONY: tf_force_clean 21 | tf_force_clean: 22 | ./tf_force_clean.sh 23 | 24 | .PHONY: tf_auto_deploy 25 | tf_auto_deploy: 26 | ./tf_auto_deploy.sh 27 | -------------------------------------------------------------------------------- /modules/experimental/airgap_down.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc || exit 0 6 | 7 | if [[ ${BACKEND} != "caasp4os" ]]; then 8 | info "airgap simulation only works with caasp4os type backends. No airgap rules to clean" 9 | exit 0 10 | fi 11 | 12 | airgap_down_node() { 13 | local kube_node host_ip 14 | kube_node=$1 15 | info "Removing airgap iptables rules for ${kube_node}" 16 | host_ip=$(ssh sles@$kube_node 'echo $SSH_CONNECTION' | awk '{ print $1 }') 17 | if ! grep -qE "([0-9]{1,3}\.){3}[0-9]{1,3}" <<< $host_ip; then 18 | err "Couldn't get catapult host IP from CaaSP node for iptables whitelist" 19 | exit 1 20 | fi 21 | # shellcheck disable=SC2087 22 | ssh -T sles@${kube_node} << EOF 23 | sudo -s << 'EOS' 24 | if iptables -D OUTPUT -j DROP -d 0.0.0.0/0 2>/dev/null; then 25 | iptables -D OUTPUT -j ACCEPT -d ${host_ip} 2>/dev/null || true 26 | iptables -D OUTPUT -j ACCEPT -d 0.0.0.0 2>/dev/null || true 27 | iptables -D OUTPUT -j ACCEPT -d 10.0.0.0/8 2>/dev/null || true 28 | iptables -D OUTPUT -j ACCEPT -d 100.64.0.0/10 2>/dev/null || true 29 | iptables -D OUTPUT -j ACCEPT -d 127.0.0.0/8 2>/dev/null || true 30 | iptables -D OUTPUT -j ACCEPT -d 172.16.0.0/12 2>/dev/null || true 31 | iptables -D OUTPUT -j ACCEPT -d 192.168.0.0/16 2>/dev/null || true 32 | else 33 | echo "Could not remove DROP 0.0.0.0/0 in OUTPUT chain. Skipping other iptable deletions" 34 | fi 35 | EOS 36 | EOF 37 | } 38 | 39 | kube_nodes=$(kubectl get nodes -o json | jq -r '.items[] | .status.addresses[] | select(.type=="InternalIP").address') 40 | kube_nodes_unreachable=$(kubectl get nodes -o json | jq -C -r '[.items[] | select((.spec.taints // [])[] | .key == "node.kubernetes.io/unreachable") | .status.addresses[] | select(.type=="InternalIP").address] | unique[]') 41 | kube_nodes_reachable=$(comm -23 <(echo "${kube_nodes}") <(echo "${kube_nodes_unreachable}")) 42 | for kube_node in ${kube_nodes_reachable}; do 43 | airgap_down_node ${kube_node} 44 | done 45 | kubectl delete --ignore-not-found -n cf-operator -f ../modules/experimental/cilium-block-egress.yaml 46 | kubectl delete --ignore-not-found -n scf -f ../modules/experimental/cilium-block-egress.yaml 47 | -------------------------------------------------------------------------------- /modules/experimental/airgap_up.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | if [[ ${BACKEND} != "caasp4os" ]]; then 8 | err "airgap simulation only works with caasp4os type backends" 9 | exit 1 10 | fi 11 | 12 | airgap_up_node() { 13 | local kube_node host_ip 14 | kube_node=$1 15 | info "Adding airgap iptables rules for ${kube_node}" 16 | host_ip=$(ssh sles@$kube_node 'echo $SSH_CONNECTION' | awk '{ print $1 }') 17 | if ! grep -qE "([0-9]{1,3}\.){3}[0-9]{1,3}" <<< $host_ip; then 18 | err "Couldn't get catapult host IP from CaaSP node for iptables whitelist" 19 | exit 1 20 | fi 21 | # shellcheck disable=SC2087 22 | ssh -T sles@${kube_node} << EOF 23 | sudo -s << 'EOS' 24 | iptables -A OUTPUT -j ACCEPT -d ${host_ip} 25 | iptables -A OUTPUT -j ACCEPT -d 0.0.0.0 26 | iptables -A OUTPUT -j ACCEPT -d 10.0.0.0/8 27 | iptables -A OUTPUT -j ACCEPT -d 100.64.0.0/10 28 | iptables -A OUTPUT -j ACCEPT -d 127.0.0.0/8 29 | iptables -A OUTPUT -j ACCEPT -d 172.16.0.0/12 30 | iptables -A OUTPUT -j ACCEPT -d 192.168.0.0/16 31 | iptables -A OUTPUT -j DROP -d 0.0.0.0/0 32 | EOS 33 | EOF 34 | } 35 | 36 | kube_nodes=$(kubectl get nodes -o json | jq -r '.items[] | .status.addresses[] | select(.type=="InternalIP").address') 37 | kube_nodes_unreachable=$(kubectl get nodes -o json | jq -C -r '[.items[] | select((.spec.taints // [])[] | .key == "node.kubernetes.io/unreachable") | .status.addresses[] | select(.type=="InternalIP").address] | unique[]') 38 | kube_nodes_reachable=$(comm -23 <(echo "${kube_nodes}") <(echo "${kube_nodes_unreachable}")) 39 | for kube_node in ${kube_nodes_reachable}; do 40 | airgap_up_node ${kube_node} 41 | done 42 | kubectl create namespace cf-operator 2>/dev/null|| true 43 | kubectl create namespace scf 2>/dev/null || true 44 | kubectl create -n cf-operator -f ../modules/experimental/cilium-block-egress.yaml 45 | kubectl create -n scf -f ../modules/experimental/cilium-block-egress.yaml 46 | 47 | info "Cluster ${CLUSTER_NAME} is now running a simulated airgapped setup. Run \`make module-experimental-airgap-down\` to restore internet access" 48 | 49 | -------------------------------------------------------------------------------- /modules/experimental/cilium-block-egress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "cilium.io/v2" 2 | kind: CiliumNetworkPolicy 3 | metadata: 4 | name: "cilium-airgap-simulation" 5 | spec: 6 | - endpointSelector: 7 | matchLabels: {} 8 | egress: 9 | - toEndpoints: 10 | - matchLabels: 11 | "k8s:io.kubernetes.pod.namespace": cf-operator 12 | ingress: 13 | - fromEndpoints: 14 | - matchLabels: 15 | "k8s:io.kubernetes.pod.namespace": cf-operator 16 | - endpointSelector: 17 | matchLabels: {} 18 | egress: 19 | - toEndpoints: 20 | - matchLabels: 21 | "k8s:io.kubernetes.pod.namespace": scf 22 | - endpointSelector: 23 | matchLabels: {} 24 | ingress: 25 | - fromEndpoints: 26 | - matchLabels: 27 | "k8s:io.kubernetes.pod.namespace": scf 28 | - endpointSelector: 29 | matchLabels: 30 | {} 31 | egress: 32 | - toCIDR: 33 | - 0.0.0.0/32 34 | - 10.0.0.0/8 35 | - 100.64.0.0/10 36 | - 127.0.0.0/8 37 | - 172.16.0.0/12 38 | - 192.168.0.0/16 39 | -------------------------------------------------------------------------------- /modules/experimental/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Experimental module options 4 | ############################# 5 | 6 | EIRINI_RELEASE_REPO="${EIRINI_RELEASE_REPO:-https://github.com/mudler/eirini-release}" 7 | EIRINI_RELEASE_CHECKOUT="${EIRINI_RELEASE_CHECKOUT:-eirini_logging}" 8 | 9 | EIRINIFS=${EIRINIFS:-https://github.com/os-fun/eirinifs.git} 10 | EIRINISSH=${EIRINISSH:-https://github.com/SUSE/eirini-ssh} 11 | -------------------------------------------------------------------------------- /modules/experimental/eirinifs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Builds and patch eirinifs in a live cluster 4 | 5 | . ./defaults.sh 6 | . ../../include/common.sh 7 | . .envrc 8 | 9 | [ ! -d "eirinifs" ] && git clone --recurse-submodules "${EIRINIFS}" 10 | pushd eirinifs || exit 11 | git pull 12 | popd || exit 13 | [ ! -d "diego-ssh" ] && git clone --recurse-submodules "${EIRINISSH}" 14 | pushd diego-ssh || exit 15 | git pull 16 | popd || exit 17 | 18 | pushd diego-ssh/cmd/sshd || exit 19 | go build 20 | popd || exit 21 | 22 | cp -rfv diego-ssh/cmd/sshd/sshd eirinifs/image 23 | pushd eirinifs || exit 24 | 25 | docker run --rm --privileged -it --workdir / -v $PWD:/eirinifs eirini/ci /bin/bash -c "/eirinifs/ci/build-eirinifs/task.sh && mv /go/src/github.com/cloudfoundry-incubator/eirinifs/image/eirinifs.tar /eirinifs/image" 26 | 27 | sudo chmod 777 image/eirinifs.tar && kubectl cp image/eirinifs.tar scf/bits-0:/var/vcap/store/bits-service/assets/eirinifs.tar 28 | 29 | popd || exit 30 | kubectl exec -it -n scf bits-0 -- bash -c -l "monit restart bits-service" 31 | -------------------------------------------------------------------------------- /modules/extra/Makefile: -------------------------------------------------------------------------------- 1 | # no default goal 2 | # .DEFAULT_GOAL := 3 | 4 | .PHONY: ktw 5 | kwt: 6 | ./kwt.sh 7 | 8 | .PHONY: kwt-connect 9 | kwt-connect: 10 | ./kwt_connect.sh 11 | 12 | .PHONY: task 13 | task: 14 | ./task.sh 15 | 16 | .PHONY: terminal 17 | terminal: 18 | ./terminal.sh 19 | 20 | .PHONY: web 21 | web: 22 | ./web.sh 23 | 24 | .PHONY: ingress 25 | ingress: 26 | ./ingress.sh 27 | 28 | .PHONY: ingress-forward 29 | ingress-forward: 30 | ./ingress_forward.sh 31 | 32 | .PHONY: registry 33 | registry: 34 | ./registry.sh 35 | 36 | .PHONY: concourse 37 | concourse: 38 | ./concourse.sh 39 | 40 | .PHONY: log 41 | log: 42 | ./log.sh 43 | 44 | .PHONY: fissile 45 | fissile: 46 | ./fissile.sh 47 | 48 | .PHONY: top 49 | top: 50 | ./top.sh 51 | 52 | .PHONY: drone 53 | drone: 54 | ./drone.sh 55 | 56 | 57 | .PHONY: gitea 58 | gitea: 59 | ./gitea.sh -------------------------------------------------------------------------------- /modules/extra/concourse.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | LOCAL_ACCESS=${LOCAL_ACCESS:-true} 7 | CONCOURSE_PASSWORD="${CONCOURSE_PASSWORD:-password}" 8 | CONCOURSE_USER="${CONCOURSE_USER:-admin}" 9 | CONCOURSE_DRIVER="${CONCOURSE_DRIVER:-btrfs}" 10 | 11 | info "Deploying concourse from the helm charts" 12 | domain=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 13 | public_ip=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["public-ip"]') 14 | aux_external_ips=("$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type == "InternalIP").address')") 15 | external_ips+="\"$public_ip\"" 16 | for (( i=0; i < ${#aux_external_ips[@]}; i++ )); do 17 | external_ips+=", \"${aux_external_ips[$i]}\"" 18 | done 19 | 20 | if [ "${LOCAL_ACCESS}" == "true" ]; then 21 | domain="127.0.0.1:8080" 22 | fi 23 | 24 | helm_delete catapult-concourse || true 25 | kubectl delete pvc -l app=catapult-concourse-worker || true 26 | kubectl delete namespace catapult-concourse-main || true 27 | 28 | cat > concourse-values.yml < gitea-config-values.yaml < nginx_proxy_deployment.yaml < nginx_proxy_service.yaml < securitygroup.json </dev/null | grep -qi minibroker ; then 17 | # minibroker testsuite may leave leftovers, 18 | # https://github.com/SUSE/minibroker-integration-tests/issues/24 19 | helm ls -n minibroker --short | xargs -L1 helm delete -n minibroker 20 | fi 21 | if kubectl get namespaces 2>/dev/null | grep -qi minibroker ; then 22 | kubectl delete --ignore-not-found namespace minibroker 23 | fi 24 | 25 | if helm_ls 2>/dev/null | grep -qi susecf-scf ; then 26 | helm_delete susecf-scf --namespace scf 27 | fi 28 | if kubectl get namespaces 2>/dev/null | grep -qi scf ; then 29 | kubectl delete --ignore-not-found namespace scf 30 | fi 31 | 32 | if kubectl get psp 2>/dev/null | grep -qi susecf-scf ; then 33 | kubectl delete --ignore-not-found psp susecf-scf-default 34 | fi 35 | 36 | if helm_ls 2>/dev/null | grep -qi cf-operator ; then 37 | helm_delete cf-operator --namespace cf-operator 38 | fi 39 | if kubectl get namespaces 2>/dev/null | grep -qi cf-operator ; then 40 | kubectl delete --ignore-not-found namespace cf-operator 41 | fi 42 | 43 | for webhook in $(kubectl get validatingwebhookconfigurations.admissionregistration.k8s.io \ 44 | --no-headers -o custom-columns=":metadata.name" | grep cf-operator); 45 | do 46 | kubectl delete validatingwebhookconfigurations.admissionregistration.k8s.io "$webhook" 47 | done 48 | 49 | for webhook in $(kubectl get mutatingwebhookconfigurations.admissionregistration.k8s.io \ 50 | --no-headers -o custom-columns=":metadata.name" | grep cf-operator); 51 | do 52 | kubectl delete mutatingwebhookconfigurations.admissionregistration.k8s.io "$webhook" 53 | done 54 | 55 | for crd in $(kubectl get crd \ 56 | --no-headers -o custom-columns=":metadata.name" | grep quark); 57 | do 58 | kubectl delete crd "$crd" 59 | done 60 | 61 | if kubectl get namespaces 2>/dev/null | grep -qi eirini ; then 62 | kubectl delete --ignore-not-found namespace eirini 63 | fi 64 | if helm_ls 2>/dev/null | grep -qi metrics-server ; then 65 | helm_delete metrics-server 66 | fi 67 | 68 | rm -rf scf-config-values.yaml chart helm kube "$CF_HOME"/.cf 69 | 70 | rm -rf cf-operator* kubecf* assets templates Chart.yaml values.yaml Metadata.yaml \ 71 | imagelist.txt requirements.lock requirements.yaml 72 | 73 | ok "Cleaned up KubeCF from the k8s cluster" 74 | -------------------------------------------------------------------------------- /modules/kubecf/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # KUBECF options 4 | ################ 5 | 6 | # kubecf-chart revelant: 7 | 8 | CHART_URL="${CHART_URL:-}" # FIXME deprecated, used in SCF_CHART 9 | SCF_CHART="${SCF_CHART:-$CHART_URL}" # set to empty to download from GH, "from_repo" to download from repo, or abs path to file 10 | 11 | SCF_HELM_VERSION="${SCF_HELM_VERSION:-}" 12 | OPERATOR_CHART_URL="${OPERATOR_CHART_URL:-latest}" 13 | 14 | # kubecf-gen-config relevant: 15 | 16 | KUBECF_SERVICES="${KUBECF_SERVICES:-}" # empty, lb, ingress, hardcoded. If not empty, overwrites the cluster's cap-values "services" 17 | GARDEN_ROOTFS_DRIVER="${GARDEN_ROOTFS_DRIVER:-overlay-xfs}" 18 | STORAGECLASS="${STORAGECLASS:-persistent}" 19 | AUTOSCALER="${AUTOSCALER:-false}" 20 | HA="${HA:-false}" 21 | 22 | OVERRIDE="${OVERRIDE:-}" 23 | CONFIG_OVERRIDE="${CONFIG_OVERRIDE:-$OVERRIDE}" 24 | 25 | BRAIN_VERBOSE="${BRAIN_VERBOSE:-false}" 26 | BRAIN_INORDER="${BRAIN_INORDER:-false}" 27 | BRAIN_INCLUDE="${BRAIN_INCLUDE:-}" 28 | BRAIN_EXCLUDE="${BRAIN_EXCLUDE:-}" 29 | 30 | CATS_NODES="${CATS_NODES:-1}" 31 | GINKGO_EXTRA_FLAGS="${GINKGO_EXTRA_FLAGS:-}" 32 | CATS_FLAKE_ATTEMPTS="${CATS_FLAKE_ATTEMPTS:-5}" 33 | CATS_TIMEOUT_SCALE="${CATS_TIMEOUT_SCALE:-3.0}" 34 | 35 | 36 | # kubecf-build relevant: 37 | 38 | SCF_LOCAL="${SCF_LOCAL:-}" 39 | 40 | # relevant to several: 41 | 42 | HELM_VERSION="${HELM_VERSION:-v3.1.1}" 43 | 44 | SCF_REPO="${SCF_REPO:-https://github.com/cloudfoundry-incubator/kubecf}" 45 | SCF_BRANCH="${SCF_BRANCH:-master}" 46 | 47 | # klog relevant: 48 | 49 | KUBECF_NAMESPACE="${KUBECF_NAMESPACE:-scf}" 50 | KUBECF_VERSION="${KUBECF_VERSION:-}" 51 | CF_OPERATOR_VERSION="${CF_OPERATOR_VERSION:-}" 52 | -------------------------------------------------------------------------------- /modules/kubecf/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | if [[ $ENABLE_EIRINI == true ]] ; then 9 | # [ ! -f "helm/cf/templates/eirini-namespace.yaml" ] && kubectl create namespace eirini 10 | if ! kubectl get clusterrole system:metrics-server &> /dev/null; then 11 | helm_install metrics-server stable/metrics-server\ 12 | --set args[0]="--kubelet-preferred-address-types=InternalIP" \ 13 | --set args[1]="--kubelet-insecure-tls" || true 14 | 15 | echo "Waiting for metrics server to come up..." 16 | wait_ns default 17 | sleep 10 18 | fi 19 | fi 20 | 21 | SCF_CHART="kubecf" 22 | if [ -d "deploy/helm/scf" ]; then 23 | SCF_CHART="deploy/helm/scf" 24 | fi 25 | 26 | if [ "$OPERATOR_CHART_URL" = latest ]; then 27 | info "Sourcing operator from kubecf charts" 28 | info "Getting latest cf-operator chart (override with OPERATOR_CHART_URL)" 29 | OPERATOR_CHART_URL=$(yq r $SCF_CHART/Metadata.yaml operatorChartUrl) 30 | 31 | # If still empty, grab latest one 32 | if [ "$OPERATOR_CHART_URL" = latest ]; then 33 | info "Fallback to use latest GH release of cf-operator" 34 | OPERATOR_CHART_URL=$(curl -s https://api.github.com/repos/cloudfoundry-incubator/cf-operator/releases/latest | grep "browser_download_url.*tgz" | cut -d : -f 2,3 | tr -d \" | tr -d " ") 35 | fi 36 | fi 37 | 38 | info "Installing cf-operator" 39 | 40 | # Detect the chart version to handle different install parameters 41 | operator_version="$(helm_chart_app_version "${OPERATOR_CHART_URL}")" 42 | operator_install_args=( 43 | --set "operator-webhook-use-service-reference=true" 44 | --set "customResources.enableInstallation=true" 45 | ) 46 | info "operator_version: ${operator_version}" 47 | if [[ "${operator_version%%.*}" -ge 5 ]]; then 48 | info "operator_version is greater than 5" 49 | info "setting param global.singleNamespace.name" 50 | operator_install_args+=(--set "global.singleNamespace.name=scf") 51 | else 52 | # quarks-operator 4.x uses a different key to target namespace to watch 53 | info "operator_version is less than 5" 54 | info "setting param global.operator.watchNamespace" 55 | operator_install_args+=(--set "global.operator.watchNamespace=scf") 56 | fi 57 | 58 | if [[ "${DOCKER_REGISTRY}" != "registry.suse.com" ]]; then 59 | operator_install_args+=(--set "image.org=${DOCKER_REGISTRY}/${DOCKER_ORG}") 60 | operator_install_args+=(--set "quarks-job.image.org=${DOCKER_REGISTRY}/${DOCKER_ORG}") 61 | operator_install_args+=(--set "operator.boshDNSDockerImage=${DOCKER_REGISTRY}/${DOCKER_ORG}/coredns:0.1.0-1.6.7-bp152.1.2") 62 | operator_install_args+=(--set "createWatchNamespace=false") 63 | operator_install_args+=(--set "quarks-job.createWatchNamespace=false") 64 | operator_install_args+=(--set "global.singleNamespace.create=false") 65 | operator_install_args+=(--set "quarks-job.singleNamespace.createNamespace=false") 66 | operator_install_args+=(--set "quarks-job.global.singleNamespace.create=false") 67 | fi 68 | 69 | 70 | echo "Installing CFO from: ${OPERATOR_CHART_URL}" 71 | 72 | kubectl create namespace cf-operator || true 73 | # Install the operator 74 | 75 | helm_install cf-operator "${OPERATOR_CHART_URL}" --namespace cf-operator \ 76 | "${operator_install_args[@]}" 77 | 78 | info "Wait for cf-operator to be ready" 79 | 80 | wait_for_cf-operator 81 | 82 | ok "cf-operator ready" 83 | 84 | helm_install susecf-scf ${SCF_CHART} \ 85 | --namespace scf \ 86 | --values scf-config-values.yaml 87 | 88 | sleep 540 89 | 90 | wait_for_kubecf 91 | 92 | ok "KubeCF deployed successfully" 93 | -------------------------------------------------------------------------------- /modules/kubecf/klog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | curl -Lo klog.sh "$SCF_REPO"/raw/"$SCF_BRANCH"/dev/kube/klog.sh 8 | chmod +x klog.sh 9 | mv klog.sh bin/ 10 | 11 | HOME=${BUILD_DIR} klog.sh -f ${KUBECF_NAMESPACE} 12 | -------------------------------------------------------------------------------- /modules/kubecf/login.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | if [ -n "$EKCP_PROXY" ]; then 9 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 10 | fi 11 | 12 | domain=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 13 | admin_pass=$(kubectl get secret --namespace scf \ 14 | var-cf-admin-password \ 15 | -o jsonpath='{.data.password}' | base64 --decode) 16 | 17 | mkdir -p "$CF_HOME" 18 | 19 | # It might take some time for external DNS records to update so make a few attempts to login before bailing out. 20 | n=0 21 | until [ $n -ge 20 ] 22 | do 23 | set +e 24 | cf login --skip-ssl-validation -a https://api."$domain" -u admin -p "$admin_pass" -o system 25 | exit_code=$? 26 | set -e 27 | if [ $exit_code -eq 0 ]; then 28 | 29 | cf create-space tmp 30 | cf target -s tmp 31 | 32 | ok "Logged in to KubeCF" 33 | break 34 | fi 35 | 36 | n=$[$n+1] 37 | sleep 60 38 | done 39 | 40 | if [ $exit_code -ne 0 ] ; then 41 | err "Could not log into KubeCF" 42 | exit $exit_code 43 | fi 44 | -------------------------------------------------------------------------------- /modules/kubecf/minibroker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | # delete previous deployments 8 | if cf service-brokers 2>/dev/null | grep -qi minibroker ; then 9 | cf delete-service-broker minibroker -f 10 | fi 11 | if helm_ls 2>/dev/null | grep -qi minibroker ; then 12 | helm_delete minibroker --namespace minibroker 13 | fi 14 | if kubectl get namespaces 2>/dev/null | grep -qi minibroker ; then 15 | kubectl delete --ignore-not-found namespace minibroker 16 | fi 17 | 18 | ORG=$(cf target | grep "org:" | tr -s " " | cut -d " " -f 2) 19 | 20 | kubectl create namespace minibroker 21 | helm repo add suse https://kubernetes-charts.suse.com/ && helm repo update 22 | helm_install minibroker suse/minibroker --namespace minibroker --set "defaultNamespace=minibroker" 23 | 24 | wait_ns minibroker 25 | 26 | # username and password are dummies 27 | cf create-service-broker minibroker username password http://minibroker-minibroker.minibroker.svc.cluster.local 28 | 29 | cf service-brokers 30 | 31 | info "Listing services and plans that the minibroker service has access to:" 32 | cf service-access -b minibroker 33 | 34 | info "Enabling postgresql service" 35 | cf enable-service-access postgresql -b minibroker -p 11-6-0 36 | echo > postgresql.json '[{ "protocol": "tcp", "destination": "10.0.0.0/8", "ports": "5432", "description": "Allow PostgreSQL traffic" }]' 37 | cf create-security-group postgresql_networking postgresql.json 38 | cf bind-security-group postgresql_networking $ORG 39 | 40 | info "Enabling redis service" 41 | cf enable-service-access redis -b minibroker -p 5-0-7 42 | echo > redis.json '[{ "protocol": "tcp", "destination": "10.0.0.0/8", "ports": "6379", "description": "Allow Redis traffic" }]' 43 | cf create-security-group redis_networking redis.json 44 | cf bind-security-group redis_networking $ORG 45 | 46 | info "Create postgresql service" 47 | cf create-service postgresql 11-6-0 postgresql-service 48 | wait_ns minibroker 49 | 50 | info "Create redis service" 51 | cf create-service redis 5-0-7 redis-service 52 | wait_ns minibroker 53 | 54 | ok "Deployed minibroker and services successfully" 55 | -------------------------------------------------------------------------------- /modules/kubecf/precheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | curl -Lo kube-ready-state-check.sh "$SCF_REPO"/raw/"$SCF_BRANCH"/bin/dev/kube-ready-state-check.sh 9 | chmod +x kube-ready-state-check.sh 10 | mv kube-ready-state-check.sh bin/ 11 | 12 | kube-ready-state-check.sh kube 13 | -------------------------------------------------------------------------------- /modules/kubecf/purge.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | # Purging it's a best-effort action 7 | set +e 8 | 9 | info "Purging all apps, buildpacks and services from the CF instance" 10 | 11 | # Delete leftover apps 12 | for app in $(cf apps | gawk '{print $1}'); do cf delete -f $app; done 13 | 14 | # Delete all buildpacks (in case there are leftovers) 15 | for buildpack in $(cf buildpacks | tail -n +4 | gawk '{print $1}'); do cf delete-buildpack -f $buildpack; done 16 | 17 | if [ -n "$CF_STACK" ]; then 18 | for buildpack in $(cf buildpacks | tail -n +4 | gawk '{print $1}'); do cf delete-buildpack -f $buildpack -s "$CF_STACK"; done 19 | fi 20 | 21 | # Delete all services 22 | for service in $(cf services | tail -n +4 | gawk '{print $1}'); do cf delete-service -f $service; done 23 | 24 | ok "Purge completed" 25 | -------------------------------------------------------------------------------- /modules/kubecf/stemcell_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | 7 | [ ! -d "bosh-linux-stemcell-builder" ] && \ 8 | git clone https://github.com/SUSE/bosh-linux-stemcell-builder.git 9 | 10 | pushd bosh-linux-stemcell-builder || exit 11 | git checkout devel 12 | make all 13 | popd || exit 14 | -------------------------------------------------------------------------------- /modules/kubecf/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | info "Upgrading CFO…" 8 | helm list -A 9 | 10 | OPERATOR_DIR=cf-operator 11 | if [ -f quarks/Chart.yaml ]; then 12 | OPERATOR_DIR=quarks 13 | fi 14 | helm_upgrade cf-operator "${OPERATOR_DIR}/" \ 15 | --namespace cf-operator \ 16 | --set "global.singleNamespace.name=scf" 17 | 18 | info "Wait for cf-operator to be ready" 19 | 20 | wait_for_cf-operator 21 | 22 | ok "cf-operator ready" 23 | helm list -A 24 | 25 | info "Upgrading KubeCF…" 26 | helm list -A 27 | 28 | helm_upgrade susecf-scf kubecf/ \ 29 | --namespace scf \ 30 | --values scf-config-values.yaml 31 | sleep 10 32 | 33 | wait_for_kubecf 34 | 35 | ok "KubeCF deployment upgraded successfully" 36 | helm list -A 37 | -------------------------------------------------------------------------------- /modules/metrics/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: clean 4 | clean: 5 | ./clean.sh 6 | 7 | .PHONY: chart 8 | chart: 9 | ./chart.sh 10 | 11 | .PHONY: gen-config 12 | gen-config: 13 | ./gen-config.sh 14 | 15 | .PHONY: install 16 | install: 17 | ./install.sh 18 | 19 | .PHONY: upgrade 20 | upgrade: 21 | ./upgrade.sh 22 | 23 | .PHONY: all 24 | all: clean chart gen-config install 25 | -------------------------------------------------------------------------------- /modules/metrics/chart.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | # remove old uncompressed chart 8 | rm -rf metrics 9 | 10 | if [ "$METRICS_CHART" = "latest" ]; then 11 | warn "No metrics chart url given - using latest public release from kubernetes-charts.suse.com" 12 | HELM_REPO="https://kubernetes-charts.suse.com/" 13 | HELM_REPO_NAME="suse" 14 | 15 | helm_init_client 16 | helm repo add "$HELM_REPO_NAME" $HELM_REPO 17 | helm repo update 18 | helm fetch "$HELM_REPO_NAME"/metrics 19 | tar -xvf metrics-* 20 | rm metrics-*.tgz 21 | METRICS_CHART_NAME=$(cat metrics/values.yaml | grep imageTag | cut -d " " -f2) 22 | else 23 | if echo "$METRICS_CHART" | grep -q "http"; then 24 | # curl -L "$METRICS_CHART" -o stratos-metrics-chart 25 | err "METRICS_CHART download needs authentication, please download manually" 26 | exit 1 27 | else 28 | cp -rfv "$METRICS_CHART" stratos-metrics-chart 29 | fi 30 | 31 | if echo "$METRICS_CHART" | grep -q "tgz"; then 32 | tar -xvf stratos-metrics-chart -C ./ 33 | else 34 | unzip -o stratos-metrics-chart 35 | fi 36 | rm stratos-metrics-chart 37 | METRICS_CHART_NAME="$METRICS_CHART" 38 | fi 39 | 40 | # save STRATOS_CHART_NAME on cap-values configmap 41 | kubectl patch -n kube-system configmap cap-values -p $'data:\n metrics-chart: "'$METRICS_CHART_NAME'"' 42 | 43 | ok "Stratos-metrics chart uncompressed" 44 | -------------------------------------------------------------------------------- /modules/metrics/clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | if helm_ls 2>/dev/null | grep -qi susecf-metrics ; then 7 | helm_delete susecf-metrics -n stratos-metrics 8 | fi 9 | if kubectl get namespaces 2>/dev/null | grep -qi metrics ; then 10 | kubectl delete namespace stratos-metrics 11 | fi 12 | 13 | # delete METRICS_CHART on cap-values configmap 14 | if [[ -n "$(kubectl get -o json -n kube-system configmap cap-values | jq -r '.data["metrics-chart"] // empty')" ]]; then 15 | kubectl patch -n kube-system configmap cap-values --type json -p '[{"op": "remove", "path": "/data/metrics-chart"}]' 16 | fi 17 | 18 | rm -rf metrics stratos-metrics-values.yaml scf-config-values-for-metrics.yaml 19 | 20 | ok "Stratos-metrics removed" 21 | -------------------------------------------------------------------------------- /modules/metrics/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Metrics options 4 | ################# 5 | 6 | METRICS_CHART="${METRICS_CHART:-latest}" 7 | -------------------------------------------------------------------------------- /modules/metrics/gen-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | info "Generating stratos-metrics config values" 7 | 8 | KUBE_API_ENDPOINT=$(kubectl config view -o json | jq -r '.clusters[].cluster.server') 9 | DOMAIN=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 10 | UAAADMINCLIENTSECRET=$(kubectl get secret var-uaa-admin-client-secret -n scf --output jsonpath="{.data['password']}" | base64 --decode) 11 | 12 | cp scf-config-values-for-stratos.yaml scf-config-values-for-metrics.yaml 13 | 14 | cat < op.yml 15 | - op: add 16 | path: /prometheus 17 | value: 18 | imagePullSecrets: 19 | - name: regsecret 20 | - op: replace 21 | path: /kube/registry/hostname 22 | value: 23 | "${DOCKER_REGISTRY}" 24 | - op: replace 25 | path: /kube/registry/username 26 | value: 27 | "${DOCKER_USERNAME}" 28 | - op: replace 29 | path: /kube/registry/password 30 | value: 31 | "${DOCKER_PASSWORD}" 32 | - op: replace 33 | path: /kube/organization 34 | value: 35 | "${DOCKER_ORG}" 36 | EOF 37 | 38 | yamlpatch op.yml scf-config-values-for-metrics.yaml 39 | 40 | cat < stratos-metrics-values.yaml 41 | --- 42 | kubernetes: 43 | apiEndpoint: "${KUBE_API_ENDPOINT}" 44 | cloudFoundry: 45 | apiEndpoint: "api.${DOMAIN}" 46 | uaaAdminClient: admin 47 | uaaAdminClientSecret: "${UAAADMINCLIENTSECRET}" 48 | skipSslVerification: "true" 49 | prometheus: 50 | kubeStateMetrics: 51 | enabled: true 52 | HEREDOC 53 | 54 | ok "Stratos-metrics config values generated" 55 | -------------------------------------------------------------------------------- /modules/metrics/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | info "Deploying stratos-metrics" 8 | 9 | if [[ "$HELM_VERSION" == v3* ]]; then 10 | kubectl create namespace "stratos-metrics" 11 | fi 12 | helm_install susecf-metrics ./metrics \ 13 | --namespace stratos-metrics \ 14 | --values scf-config-values-for-metrics.yaml \ 15 | --values stratos-metrics-values.yaml 16 | 17 | wait_ns metrics 18 | 19 | kubectl get service susecf-metrics-metrics-nginx --namespace stratos-metrics 20 | 21 | ok "Stratos-metrics deployed successfully" 22 | -------------------------------------------------------------------------------- /modules/metrics/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | info "Upgrading stratos-metrics" 8 | 9 | METRICS_CHART_NAME=$(cat metrics/values.yaml | grep imageTag | cut -d " " -f2) 10 | # save METRICS_CHART on cap-values configmap 11 | kubectl patch -n kube-system configmap cap-values -p $'data:\n metrics-chart: "'$METRICS_CHART_NAME'"' 12 | 13 | helm_upgrade susecf-metrics ./metrics \ 14 | --namespace stratos-metrics \ 15 | --values scf-config-values-for-metrics.yaml \ 16 | --values stratos-metrics-values.yaml 17 | 18 | wait_ns metrics 19 | 20 | ok "Stratos-metrics deployment upgraded successfully" 21 | -------------------------------------------------------------------------------- /modules/scf/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: clean 4 | clean: 5 | ./clean.sh 6 | 7 | .PHONY: chart 8 | chart: 9 | ./chart.sh 10 | 11 | .PHONY: gen-config 12 | gen-config: 13 | ./gen_config.sh 14 | 15 | .PHONY: install 16 | install: 17 | ./install.sh 18 | 19 | .PHONY: login 20 | login: 21 | ./login.sh 22 | 23 | .PHONY: purge 24 | purge: 25 | ./purge.sh 26 | 27 | .PHONY: all 28 | all: clean chart gen-config install 29 | 30 | ## one-offs: 31 | 32 | .PHONY: scf-precheck 33 | scf-precheck: 34 | ./precheck.sh 35 | 36 | .PHONY: scf-brats-setup 37 | scf-brats-setup: 38 | ./brats_setup.sh 39 | 40 | .PHONY: upgrade 41 | upgrade: 42 | ./upgrade.sh 43 | 44 | .PHONY: build-scf-from-source 45 | build-scf-from-source: 46 | ./build.sh 47 | 48 | .PHONY: scf-klog 49 | scf-klog: 50 | ./klog.sh 51 | 52 | .PHONY: stemcell_build 53 | stemcell_build: 54 | ./stemcell_build.sh 55 | 56 | .PHONY: minibroker 57 | minibroker: 58 | ./minibroker.sh 59 | -------------------------------------------------------------------------------- /modules/scf/brats_setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | 7 | domain=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 8 | public_ip=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["public-ip"]') 9 | aux_external_ips=("$(kubectl get nodes -o json | jq -r '.items[].status.addresses[] | select(.type == "InternalIP").address')") 10 | external_ips+="\"$public_ip\"" 11 | for (( i=0; i < ${#aux_external_ips[@]}; i++ )); do 12 | external_ips+=", \"${aux_external_ips[$i]}\"" 13 | done 14 | 15 | cat > nginx_proxy_deployment.yaml < nginx_proxy_service.yaml < securitygroup.json < scf_chart_url 58 | else 59 | cp -rfv "$SCF_CHART" chart 60 | fi 61 | 62 | if echo "$SCF_CHART" | grep -q "tgz"; then 63 | tar -xvf chart -C ./ 64 | else 65 | unzip -o chart 66 | fi 67 | 68 | ok "Chart uncompressed" 69 | -------------------------------------------------------------------------------- /modules/scf/clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc || exit 0 6 | 7 | # if no kubeconfig, no cf. Exit 8 | [ -f "$KUBECONFIG" ] || exit 0 9 | 10 | if [ "$EMBEDDED_UAA" != "true" ]; then 11 | if helm_ls 2>/dev/null | grep -qi susecf-uaa ; then 12 | helm_delete susecf-uaa --namespace uaa 13 | fi 14 | if kubectl get namespaces 2>/dev/null | grep -qi uaa ; then 15 | kubectl delete --ignore-not-found namespace uaa 16 | fi 17 | fi 18 | 19 | if helm_ls 2>/dev/null | grep -qi susecf-scf ; then 20 | helm_delete susecf-scf --namespace scf 21 | fi 22 | if kubectl get namespaces 2>/dev/null | grep -qi scf ; then 23 | kubectl delete --ignore-not-found namespace scf 24 | fi 25 | 26 | if kubectl get psp 2>/dev/null | grep -qi susecf-scf ; then 27 | kubectl delete --ignore-not-found psp susecf-scf-default 28 | fi 29 | 30 | if helm_ls 2>/dev/null | grep -qi cf-operator ; then 31 | helm_delete cf-operator --namespace cf-operator 32 | fi 33 | if kubectl get namespaces 2>/dev/null | grep -qi cf-operator ; then 34 | kubectl delete --ignore-not-found namespace cf-operator 35 | fi 36 | 37 | if [[ "$ENABLE_EIRINI" == true ]] ; then 38 | if kubectl get namespaces 2>/dev/null | grep -qi eirini ; then 39 | kubectl delete --ignore-not-found namespace eirini 40 | fi 41 | if helm_ls 2>/dev/null | grep -qi metrics-server ; then 42 | helm_delete metrics-server 43 | fi 44 | fi 45 | 46 | rm -rf scf-config-values.yaml chart helm kube "$CF_HOME"/.cf kube-ready-state-check.sh 47 | 48 | ok "Cleaned up scf from the k8s cluster" 49 | -------------------------------------------------------------------------------- /modules/scf/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # SCF options 4 | ############# 5 | 6 | # scf-chart revelant: 7 | 8 | CHART_URL="${CHART_URL:-}" # FIXME deprecated, used in SCF_CHART 9 | SCF_CHART="${SCF_CHART:-$CHART_URL}" 10 | 11 | SCF_HELM_VERSION="${SCF_HELM_VERSION:-}" 12 | OPERATOR_CHART_URL="${OPERATOR_CHART_URL:-latest}" 13 | 14 | # scf-gen-config relevant: 15 | 16 | SCF_SERVICES="${SCF_SERVICES:-lb}" # lb, ingress 17 | GARDEN_ROOTFS_DRIVER="${GARDEN_ROOTFS_DRIVER:-overlay-xfs}" 18 | DIEGO_SIZING="${DIEGO_SIZING:-$SIZING}" 19 | STORAGECLASS="${STORAGECLASS:-persistent}" 20 | AUTOSCALER="${AUTOSCALER:-false}" 21 | 22 | EMBEDDED_UAA="${EMBEDDED_UAA:-false}" 23 | 24 | HA="${HA:-false}" 25 | if [ "$HA" = "true" ]; then 26 | SIZING="${SIZING:-2}" 27 | else 28 | SIZING="${SIZING:-1}" 29 | fi 30 | 31 | UAA_UPGRADE="${UAA_UPGRADE:-true}" 32 | 33 | OVERRIDE="${OVERRIDE:-}" 34 | CONFIG_OVERRIDE="${CONFIG_OVERRIDE:-$OVERRIDE}" 35 | 36 | BRAIN_VERBOSE="${BRAIN_VERBOSE:-false}" 37 | BRAIN_INORDER="${BRAIN_INORDER:-false}" 38 | BRAIN_INCLUDE="${BRAIN_INCLUDE:-}" 39 | BRAIN_EXCLUDE="${BRAIN_EXCLUDE:-}" 40 | 41 | CATS_NODES="${CATS_NODES:-1}" 42 | CATS_FLAKE_ATTEMPTS="${CATS_FLAKE_ATTEMPTS:-5}" 43 | CATS_TIMEOUT_SCALE="${CATS_TIMEOUT_SCALE:-3.0}" 44 | 45 | 46 | # scf-build relevant: 47 | 48 | SCF_LOCAL="${SCF_LOCAL:-}" 49 | 50 | # relevant to several: 51 | 52 | HELM_VERSION="${HELM_VERSION:-v3.1.1}" 53 | 54 | SCF_REPO="${SCF_REPO:-https://github.com/SUSE/scf}" 55 | SCF_BRANCH="${SCF_BRANCH:-develop}" 56 | -------------------------------------------------------------------------------- /modules/scf/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | domain=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 9 | services=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["services"]') 10 | 11 | if [[ $ENABLE_EIRINI == true ]] ; then 12 | # [ ! -f "helm/cf/templates/eirini-namespace.yaml" ] && kubectl create namespace eirini 13 | if ! helm_ls 2>/dev/null | grep -qi metrics-server ; then 14 | helm_install metrics-server stable/metrics-server\ 15 | --set args[0]="--kubelet-preferred-address-types=InternalIP" \ 16 | --set args[1]="--kubelet-insecure-tls" || true 17 | fi 18 | 19 | echo "Waiting for metrics server to come up..." 20 | wait_ns default 21 | sleep 10 22 | fi 23 | 24 | if [ "${EMBEDDED_UAA}" != "true" ]; then 25 | 26 | kubectl create namespace "uaa" 27 | helm_install susecf-uaa helm/uaa --namespace uaa --values scf-config-values.yaml 28 | 29 | wait_ns uaa 30 | if [ "$services" == "lb" ]; then 31 | external_dns_annotate_uaa uaa "$domain" 32 | fi 33 | 34 | SECRET=$(kubectl get pods --namespace uaa \ 35 | -o jsonpath='{.items[?(.metadata.name=="uaa-0")].spec.containers[?(.name=="uaa")].env[?(.name=="INTERNAL_CA_CERT")].valueFrom.secretKeyRef.name}') 36 | 37 | CA_CERT="$(kubectl get secret "$SECRET" --namespace uaa \ 38 | -o jsonpath="{.data['internal-ca-cert']}" | base64 --decode -)" 39 | 40 | kubectl create namespace "scf" 41 | helm_install susecf-scf helm/cf --namespace scf \ 42 | --values scf-config-values.yaml \ 43 | --set "secrets.UAA_CA_CERT=${CA_CERT}" 44 | else 45 | kubectl create namespace "scf" 46 | helm_install susecf-scf helm/cf --namespace scf \ 47 | --values scf-config-values.yaml \ 48 | --set enable.uaa=true 49 | 50 | wait_ns uaa 51 | if [ "$services" == "lb" ]; then 52 | external_dns_annotate_uaa uaa "$domain" 53 | fi 54 | fi 55 | 56 | wait_ns scf 57 | if [ "$services" == "lb" ]; then 58 | external_dns_annotate_scf scf "$domain" 59 | fi 60 | 61 | ok "SCF deployed successfully" 62 | -------------------------------------------------------------------------------- /modules/scf/klog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | curl -Lo klog.sh "$SCF_REPO"/raw/"$SCF_BRANCH"/container-host-files/opt/scf/bin/klog.sh 8 | chmod +x klog.sh 9 | mv klog.sh bin/ 10 | 11 | klog.sh 12 | -------------------------------------------------------------------------------- /modules/scf/login.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | if [ -n "$EKCP_PROXY" ]; then 9 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 10 | fi 11 | 12 | domain=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 13 | 14 | mkdir -p "$CF_HOME" 15 | 16 | # It might take some time for external DNS records to update so make a few attempts to login before bailing out. 17 | n=0 18 | until [ $n -ge 20 ] 19 | do 20 | cf login --skip-ssl-validation -a https://api."$domain" -u admin -p "$CLUSTER_PASSWORD" -o system 21 | exit_code=$? 22 | if [ $exit_code -eq 0 ]; then 23 | 24 | cf create-space tmp 25 | cf target -s tmp 26 | 27 | ok "Logged in to SCF" 28 | break 29 | fi 30 | 31 | n=$[$n+1] 32 | sleep 60 33 | done 34 | 35 | if [ $exit_code -ne 0 ] ; then 36 | err "Could not log into SCF" 37 | fi 38 | -------------------------------------------------------------------------------- /modules/scf/minibroker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | # delete previous deployments 8 | if cf service-brokers 2>/dev/null | grep -qi minibroker ; then 9 | cf delete-service-broker minibroker -f 10 | fi 11 | if helm_ls 2>/dev/null | grep -qi minibroker ; then 12 | helm_delete minibroker 13 | fi 14 | if kubectl get namespaces 2>/dev/null | grep -qi minibroker ; then 15 | kubectl delete --ignore-not-found namespace minibroker 16 | fi 17 | 18 | ORG=$(cf target | grep "org:" | tr -s " " | cut -d " " -f 2) 19 | 20 | helm_install minibroker suse/minibroker --namespace minibroker --set "defaultNamespace=minibroker" 21 | 22 | wait_ns minibroker 23 | 24 | # username and password are dummies 25 | cf create-service-broker minibroker username password http://minibroker-minibroker.minibroker.svc.cluster.local 26 | 27 | cf service-brokers 28 | 29 | info "Listing services and plans that the minibroker service has access to:" 30 | cf service-access -b minibroker 31 | 32 | info "Enabling postgresql service" 33 | cf enable-service-access postgresql -b minibroker -p 11-6-0 34 | echo > postgresql.json '[{ "protocol": "tcp", "destination": "10.0.0.0/8", "ports": "5432", "description": "Allow PostgreSQL traffic" }]' 35 | cf create-security-group postgresql_networking postgresql.json 36 | cf bind-security-group postgresql_networking $ORG 37 | 38 | info "Enabling redis service" 39 | cf enable-service-access redis -b minibroker -p 5-0-7 40 | echo > redis.json '[{ "protocol": "tcp", "destination": "10.0.0.0/8", "ports": "6379", "description": "Allow Redis traffic" }]' 41 | cf create-security-group redis_networking redis.json 42 | cf bind-security-group redis_networking $ORG 43 | 44 | info "Create postgresql service" 45 | cf create-service postgresql 11-6-0 postgresql-service 46 | wait_ns minibroker 47 | 48 | info "Create redis service" 49 | cf create-service redis 5-0-7 redis-service 50 | wait_ns minibroker 51 | 52 | ok "Deployed minibroker and services successfully" 53 | -------------------------------------------------------------------------------- /modules/scf/precheck.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | curl -Lo kube-ready-state-check.sh "$SCF_REPO"/raw/"$SCF_BRANCH"/bin/dev/kube-ready-state-check.sh 9 | chmod +x kube-ready-state-check.sh 10 | mv kube-ready-state-check.sh bin/ 11 | 12 | kube-ready-state-check.sh kube 13 | -------------------------------------------------------------------------------- /modules/scf/purge.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | # Purging it's a best-effort action 7 | set +e 8 | 9 | info "Purging all apps, buildpacks and services from the CF instance" 10 | 11 | # Delete leftover apps 12 | for app in $(cf apps | gawk '{print $1}'); do cf delete -f $app; done 13 | 14 | # Delete all buildpacks (in case there are leftovers) 15 | for buildpack in $(cf buildpacks | tail -n +4 | gawk '{print $1}'); do cf delete-buildpack -f $buildpack; done 16 | 17 | if [ -n "$CF_STACK" ]; then 18 | for buildpack in $(cf buildpacks | tail -n +4 | gawk '{print $1}'); do cf delete-buildpack -f $buildpack -s "$CF_STACK"; done 19 | fi 20 | 21 | # Delete all services 22 | for service in $(cf services | tail -n +4 | gawk '{print $1}'); do cf delete-service -f $service; done 23 | 24 | ok "Purge completed" 25 | -------------------------------------------------------------------------------- /modules/scf/stemcell_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | 7 | [ ! -d "bosh-linux-stemcell-builder" ] && \ 8 | git clone https://github.com/SUSE/bosh-linux-stemcell-builder.git 9 | 10 | pushd bosh-linux-stemcell-builder || exit 11 | git checkout devel 12 | make all 13 | popd || exit 14 | -------------------------------------------------------------------------------- /modules/scf/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | if [ -n "$SCF_CHART" ]; then 9 | # save SCF_CHART on cap-values configmap 10 | kubectl patch -n kube-system configmap cap-values -p $'data:\n chart: "'$SCF_CHART'"' 11 | fi 12 | 13 | SECRET=$(kubectl get pods --namespace uaa \ 14 | -o jsonpath='{.items[?(.metadata.name=="uaa-0")].spec.containers[?(.name=="uaa")].env[?(.name=="INTERNAL_CA_CERT")].valueFrom.secretKeyRef.name}') 15 | 16 | CA_CERT="$(kubectl get secret "$SECRET" --namespace uaa \ 17 | -o jsonpath="{.data['internal-ca-cert']}" | base64 --decode -)" 18 | 19 | if [ "$UAA_UPGRADE" == true ]; then 20 | helm_upgrade susecf-uaa helm/uaa/ --values scf-config-values.yaml \ 21 | --set "secrets.UAA_CA_CERT=${CA_CERT}" 22 | wait_ns uaa 23 | fi 24 | 25 | helm_upgrade susecf-scf helm/cf/ --values scf-config-values.yaml \ 26 | --set "secrets.UAA_CA_CERT=${CA_CERT}" 27 | 28 | wait_ns scf 29 | 30 | ok "SCF deployment upgraded successfully" 31 | -------------------------------------------------------------------------------- /modules/stratos/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: clean 4 | clean: 5 | ./clean.sh 6 | 7 | .PHONY: chart 8 | chart: 9 | ./chart.sh 10 | 11 | .PHONY: gen-config 12 | gen-config: 13 | ./gen-config.sh 14 | 15 | .PHONY: install 16 | install: 17 | ./install.sh 18 | 19 | .PHONY: upgrade 20 | upgrade: 21 | ./upgrade.sh 22 | 23 | .PHONY: reachable 24 | reachable: 25 | ./reachable.sh 26 | 27 | .PHONY: all 28 | all: clean chart gen-config install 29 | -------------------------------------------------------------------------------- /modules/stratos/chart.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | # remove old uncompressed chart 8 | rm -rf console 9 | 10 | if [ "$STRATOS_CHART" = "latest" ]; then 11 | warn "No stratos chart url given - using latest public release from kubernetes-charts.suse.com" 12 | HELM_REPO="https://kubernetes-charts.suse.com/" 13 | HELM_REPO_NAME="suse" 14 | 15 | helm_init_client 16 | helm repo add "$HELM_REPO_NAME" $HELM_REPO 17 | helm repo update 18 | helm fetch "$HELM_REPO_NAME"/console 19 | tar -xvf console-* 20 | rm console-*.tgz 21 | STRATOS_CHART_NAME=$(cat console/values.yaml | grep consoleVersion | cut -d " " -f2) 22 | else 23 | if echo "$STRATOS_CHART" | grep -q "http"; then 24 | curl -L "$STRATOS_CHART" -o stratos-chart 25 | else 26 | cp -rfv "$STRATOS_CHART" stratos-chart 27 | fi 28 | 29 | if echo "$STRATOS_CHART" | grep -q "tgz"; then 30 | tar -xvf stratos-chart -C ./ 31 | else 32 | unzip -o stratos-chart 33 | fi 34 | rm stratos-chart 35 | STRATOS_CHART_NAME="$STRATOS_CHART" 36 | fi 37 | 38 | # save STRATOS_CHART_NAME on cap-values configmap 39 | kubectl patch -n kube-system configmap cap-values -p $'data:\n stratos-chart: "'$STRATOS_CHART_NAME'"' 40 | 41 | ok "Stratos chart uncompressed" 42 | -------------------------------------------------------------------------------- /modules/stratos/clean.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | if helm_ls 2>/dev/null | grep -qi susecf-console ; then 7 | helm_delete susecf-console 8 | fi 9 | kubectl delete --ignore-not-found namespace stratos 10 | 11 | # delete STRATOS_CHART on cap-values configmap 12 | if [[ -n "$(kubectl get -o json -n kube-system configmap cap-values | jq -r '.data["stratos-chart"] // empty')" ]]; then 13 | kubectl patch -n kube-system configmap cap-values --type json -p '[{"op": "remove", "path": "/data/stratos-chart"}]' 14 | fi 15 | 16 | rm -rf console scf-config-values-for-stratos.yaml 17 | 18 | ok "Stratos removed" 19 | -------------------------------------------------------------------------------- /modules/stratos/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # stratos module options 4 | ######################## 5 | 6 | STRATOS_CHART="${STRATOS_CHART:-latest}" 7 | -------------------------------------------------------------------------------- /modules/stratos/gen-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | info "Generating stratos config values from scf values" 7 | 8 | cp scf-config-values.yaml scf-config-values-for-stratos.yaml 9 | public_ip=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["public-ip"]') 10 | domain=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 11 | 12 | cat < op.yml 13 | - op: add 14 | path: /console 15 | value: 16 | service: 17 | externalIPs: ["${public_ip}"] 18 | servicePort: 8443 19 | ingress: 20 | enabled: true 21 | host: ${domain} 22 | - op: replace 23 | path: /kube/registry/hostname 24 | value: 25 | "${DOCKER_REGISTRY}" 26 | - op: replace 27 | path: /kube/registry/username 28 | value: 29 | "${DOCKER_USERNAME}" 30 | - op: replace 31 | path: /kube/registry/password 32 | value: 33 | "${DOCKER_PASSWORD}" 34 | - op: replace 35 | path: /kube/organization 36 | value: 37 | "${DOCKER_ORG}" 38 | - op: add 39 | path: /services 40 | value: 41 | loadbalanced: true 42 | EOF 43 | 44 | yamlpatch op.yml scf-config-values-for-stratos.yaml 45 | 46 | ok "Stratos config values generated" 47 | -------------------------------------------------------------------------------- /modules/stratos/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | info "Deploying stratos" 8 | 9 | if [[ "$HELM_VERSION" == v3* ]]; then 10 | kubectl create namespace "stratos" 11 | fi 12 | helm_install suse-console ./console \ 13 | --namespace stratos \ 14 | --values scf-config-values-for-stratos.yaml 15 | 16 | wait_ns stratos 17 | 18 | kubectl get services suse-console-ui-ext -n stratos 19 | 20 | domain=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 21 | services=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["services"]') 22 | 23 | if [ "$services" == "lb" ]; then 24 | external_dns_annotate_stratos stratos "$domain" 25 | fi 26 | 27 | ok "Stratos deployed successfully" 28 | -------------------------------------------------------------------------------- /modules/stratos/reachable.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | if [ -n "$EKCP_PROXY" ]; then 9 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 10 | fi 11 | 12 | domain=$(kubectl get configmap -n kube-system cap-values -o jsonpath='{.data.domain}') 13 | external_ip=$(kubectl get services suse-console-ui-ext -n stratos -o jsonpath='{.status.loadBalancer.ingress[0].ip}') 14 | port=$(kubectl get services suse-console-ui-ext -n stratos -o jsonpath='{.spec.ports[0].port}') 15 | 16 | # check if stratos is reachable via the ip 17 | n=0 18 | exit_code=1 19 | until [ $n -ge 20 ] 20 | do 21 | curl -k https://"${external_ip}":"${port}" | grep "SUSE Stratos Console" 22 | exit_code=$? 23 | if [ $exit_code -eq 0 ]; then 24 | ok "Reachable via IP" 25 | break 26 | fi 27 | 28 | n=$[$n+1] 29 | sleep 60 30 | done 31 | 32 | if [ $exit_code -ne 0 ] ; then 33 | err "Not reachable via IP" 34 | fi 35 | 36 | # check via the domain name 37 | # It might take some time for external DNS records to update so make a few attempts to login before bailing out. 38 | n=0 39 | exit_code=1 40 | until [ $n -ge 20 ] 41 | do 42 | curl -k https://console."${domain}":"${port}" | grep "SUSE Stratos Console" 43 | exit_code=$? 44 | if [ $exit_code -eq 0 ]; then 45 | ok "Reachable via hostname https://console.${domain}:${port}" 46 | break 47 | fi 48 | 49 | n=$[$n+1] 50 | sleep 60 51 | done 52 | 53 | if [ $exit_code -ne 0 ] ; then 54 | fail "Not reachable via hostname https://console.${domain}:${port}" 55 | fi 56 | -------------------------------------------------------------------------------- /modules/stratos/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | # save STRATOS_CHART on cap-values configmap 7 | STRATOS_CHART_NAME=$(cat console/values.yaml | grep consoleVersion | cut -d " " -f2) 8 | kubectl patch -n kube-system configmap cap-values -p $'data:\n stratos-chart: "'$STRATOS_CHART_NAME'"' 9 | 10 | helm_upgrade suse-console ./console \ 11 | --namespace stratos \ 12 | --values scf-config-values-for-stratos.yaml 13 | 14 | wait_ns stratos 15 | 16 | ok "Stratos deployment upgraded successfully" 17 | -------------------------------------------------------------------------------- /modules/tests/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: smoke 4 | smoke: 5 | ./smoke.sh 6 | 7 | .PHONY: smoke-kube 8 | smoke-kube: 9 | ./kubesmokes.sh 10 | 11 | .PHONY: kubecats 12 | kubecats: 13 | ./kubecats.sh 14 | 15 | .PHONY: brats 16 | brats: 17 | ./brats.sh 18 | 19 | .PHONY: test-eirini-persi 20 | test-eirini-persi: 21 | ./eirini_persi.sh 22 | 23 | .PHONY: smoke-scf 24 | smoke-scf: 25 | ./smoke_scf.sh 26 | 27 | .PHONY: cats 28 | cats: 29 | ./cats.sh 30 | 31 | .PHONY: cats-scf 32 | cats-scf: 33 | ./cats_scf.sh 34 | 35 | .PHONY: stress-benchmark 36 | stress-benchmark: 37 | ./stress-benchmark.sh 38 | 39 | .PHONY: sample 40 | sample: 41 | ./sample.sh 42 | 43 | .PHONY: sample-ticking 44 | sample-ticking: 45 | ./sample-ticking.sh 46 | 47 | .PHONY: autoscaler 48 | autoscaler: 49 | ./autoscaler.sh 50 | 51 | .PHONY: kubecf 52 | kubecf: 53 | ./kubecf-test.sh 54 | 55 | # test all the things! 56 | .PHONY: all 57 | # Enable only reliable ones: 58 | all: smoke 59 | -------------------------------------------------------------------------------- /modules/tests/autoscaler.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | # install the autoscaler cf cli plugin 8 | cf add-plugin-repo "CF-Community" "https://plugins.cloudfoundry.org" 9 | cf install-plugin -r CF-Community app-autoscaler-plugin -f 10 | 11 | # clone sample app 12 | SAMPLE_FOLDER=autoscaled-app 13 | [ ! -d "$SAMPLE_FOLDER" ] && git clone --recurse-submodules "$SAMPLE_APP_REPO" "$SAMPLE_FOLDER" 14 | pushd "$SAMPLE_FOLDER" || exit 15 | if [ -n "$EKCP_PROXY" ]; then 16 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 17 | fi 18 | 19 | # push app without starting 20 | cf push autoscaled-app --no-start 21 | 22 | # bind the service instance to the app and attach policy 23 | cat > autoscaler-policy.json <=", 33 | "cool_down_secs": 300, 34 | "adjustment": "+1" 35 | }] 36 | } 37 | EOF 38 | cf attach-autoscaling-policy autoscaled-app autoscaler-policy.json 39 | 40 | # start app 41 | cf start autoscaled-app 42 | -------------------------------------------------------------------------------- /modules/tests/brats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | # defaults.sh needs CLUSTER_PASSWORD: 5 | . "$ROOT_DIR"/modules/tests/defaults.sh 6 | . .envrc 7 | 8 | 9 | DOMAIN=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 10 | public_ip=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["public-ip"]') 11 | DEPLOYED_CHART=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["chart"]') 12 | admin_pass=$(kubectl get secret --namespace scf \ 13 | var-cf-admin-password \ 14 | -o jsonpath='{.data.password}' | base64 --decode) 15 | 16 | info 17 | info "@@@@@@@@@" 18 | info "Running BRATs on deployed chart $DEPLOYED_CHART" 19 | info "@@@@@@@@@" 20 | info 21 | 22 | kubectl create namespace catapult || true 23 | kubectl delete pod brats -n catapult || true 24 | kubectl create -f "$ROOT_DIR"/kube/dind.yaml -n catapult || true 25 | 26 | export BRATS_CF_HOST="api.$DOMAIN" 27 | export PROXY_HOST="$public_ip" 28 | export PROXY_SCHEME="$PROXY_SCHEME" 29 | export BRATS_CF_USERNAME="$BRATS_CF_USERNAME" 30 | export BRATS_CF_PASSWORD="$admin_pass" 31 | export PROXY_PORT="$PROXY_PORT" 32 | export PROXY_USERNAME="$PROXY_USERNAME" 33 | export PROXY_PASSWORD="$PROXY_PASSWORD" 34 | export BRATS_TEST_SUITE="$BRATS_TEST_SUITE" 35 | export CF_STACK="$CF_STACK" 36 | export GINKGO_ATTEMPTS="$GINKGO_ATTEMPTS" 37 | export BRATS_BUILDPACK="$BRATS_BUILDPACK" 38 | export BRATS_BUILDPACK_URL="$BRATS_BUILDPACK_URL" 39 | export BRATS_BUILDPACK_VERSION="$BRATS_BUILDPACK_VERSION" 40 | 41 | pod_definition=$(erb "$ROOT_DIR"/kube/brats/pod.yaml.erb) 42 | redacted_pod_definition=$(echo -e "$pod_definition" | sed -e '/COMPOSER/,+1d') 43 | cat < artifacts/"$(date +'%H:%M-%Y-%m-%d')"_brats.log 57 | status="$(container_status "catapult" "brats")" 58 | kubectl delete pod -n catapult brats 59 | exit "$status" 60 | -------------------------------------------------------------------------------- /modules/tests/cats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | 7 | DOMAIN=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 8 | 9 | if [ "${DEFAULT_STACK}" = "from_chart" ]; then 10 | DEFAULT_STACK=$(helm inspect helm/cf/ | grep DEFAULT_STACK | sed 's~DEFAULT_STACK:~~g' | sed 's~"~~g' | sed 's~\s~~g') 11 | export DEFAULT_STACK 12 | fi 13 | 14 | [ ! -d "cf-acceptance-tests" ] && git clone https://github.com/cloudfoundry/cf-acceptance-tests 15 | 16 | pushd cf-acceptance-tests || exit 17 | cat > config.json < 'tests-registry-credentials'] 39 | puts obj.to_json 40 | EOF 41 | } 42 | 43 | if kubectl get secrets -n scf 2>/dev/null | grep -qi tests-registry-credentials; then 44 | kubectl delete secret tests-registry-credentials -n scf 45 | fi 46 | if kubectl get pods -n scf 2>/dev/null | grep -qi acceptance-tests; then 47 | kubectl delete pod acceptance-tests -n scf 48 | fi 49 | 50 | SECRETS_FILE=${SECRETS_FILE:-"$ROOT_DIR"/../cloudfoundry/secure/concourse-secrets.yml.gpg} 51 | # Create secret for the imagePullSecrets we renamed in the scf images: 52 | kubectl create secret docker-registry tests-registry-credentials \ 53 | --namespace scf \ 54 | --docker-server="$(grep "docker-internal-registry:" <<< "$(gpg --decrypt --batch "$SECRETS_FILE")" | cut -d ' ' -f 3- )" \ 55 | --docker-username="$(grep "docker-internal-username:" <<< "$(gpg --decrypt --batch "$SECRETS_FILE")" | cut -d ' ' -f 3- )" \ 56 | --docker-password="$(grep "docker-internal-password:" <<< "$(gpg --decrypt --batch "$SECRETS_FILE")" | cut -d ' ' -f 3- )" 57 | 58 | image=$(gawk '$1 == "image:" { gsub(/"/, "", $2); print $2 }' kube/cf/bosh-task/acceptance-tests.yaml) 59 | kubectl run \ 60 | --namespace scf \ 61 | --attach \ 62 | --restart=Never \ 63 | --image="$image" \ 64 | --overrides="$(kube_overrides)" \ 65 | "smoke-tests" ||: 66 | 67 | wait_container_attached "scf" "acceptance-tests" 68 | 69 | mkdir -p artifacts 70 | kubectl logs -f acceptance-tests -n scf > artifacts/"$(date +'%H:%M-%Y-%m-%d')"_acceptance-tests.log 71 | 72 | exit "$(container_status "scf" "acceptance-tests")" 73 | -------------------------------------------------------------------------------- /modules/tests/defaults.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # SMOKES option 4 | ############### 5 | 6 | SMOKES_REPO="${SMOKES_REPO:-https://github.com/cloudfoundry/cf-smoke-tests}" 7 | 8 | # CATS options 9 | ############## 10 | 11 | CATS_REPO="${CATS_REPO:-https://github.com/cloudfoundry/cf-acceptance-tests}" 12 | 13 | # BRATS options 14 | ############### 15 | 16 | PROXY_SCHEME="${PROXY_SCHEME:-http}" 17 | BRATS_CF_USERNAME="${BRATS_CF_USERNAME:-admin}" 18 | PROXY_PORT="${PROXY_PORT:-9002}" 19 | PROXY_USERNAME="${PROXY_USERNAME:-username}" 20 | PROXY_PASSWORD="${PROXY_PASSWORD:-password}" 21 | BRATS_TEST_SUITE="${BRATS_TEST_SUITE:-brats}" 22 | CF_STACK="${CF_STACK:-sle15}" 23 | GINKGO_ATTEMPTS="${GINKGO_ATTEMPTS:-3}" 24 | 25 | BRATS_BUILDPACK="${BRATS_BUILDPACK}" 26 | BRATS_BUILDPACK_URL="${BRATS_BUILDPACK_URL}" 27 | BRATS_BUILDPACK_VERSION="${BRATS_BUILDPACK_VERSION}" 28 | 29 | # Sample app options 30 | #################### 31 | 32 | SAMPLE_APP_REPO="${SAMPLE_APP_REPO:-https://github.com/cloudfoundry-samples/cf-sample-app-nodejs}" 33 | 34 | # KubeCF tests options 35 | ###################### 36 | 37 | KUBECF_CHECKOUT="${KUBECF_CHECKOUT:-}" 38 | KUBECF_TEST_SUITE="${KUBECF_TEST_SUITE:-smokes}" # smokes, sits, brain, cats, cats-internetless 39 | KUBECF_DEPLOYMENT_NAME="${KUBECF_DEPLOYMENT_NAME:-susecf-scf}" 40 | KUBECF_NAMESPACE="${KUBECF_NAMESPACE:-scf}" 41 | 42 | # Stress test options 43 | ##################### 44 | 45 | STRESS_TEST_REPO="${STRESS_TEST_REPO:-https://github.com/mudler/cf-benchmark-tools}" 46 | TEST_CONCURRENCY="${TEST_CONCURRENCY:-2}" 47 | TEST_RATE="${TEST_RATE:-2}" 48 | TEST_DURATION="${TEST_DURATION:-20m}" 49 | -------------------------------------------------------------------------------- /modules/tests/eirini_persi.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | # Don't touch original copy 7 | cp -rfv ../contrib/samples/eirini-persi-test ./ 8 | 9 | pushd eirini-persi-test || exit 10 | 11 | go build -o persi-test main.go 12 | 13 | if [ -n "$EKCP_PROXY" ]; then 14 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 15 | export http_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 16 | fi 17 | 18 | 19 | cf delete-service -f eirini-persi-mount || true 20 | cf delete -f persitest || true 21 | 22 | cf service-brokers 23 | cf marketplace -s eirini-persi 24 | cf enable-service-access eirini-persi 25 | cf create-service eirini-persi default eirini-persi-mount 26 | kubectl get pvc -n eirini || true 27 | cf push --no-start 28 | cf bind-service persitest eirini-persi-mount 29 | cf start persitest 30 | url=http://"$(cf a | grep "persitest" | gawk '{ print $6 }')" 31 | [[ $(curl "$url") == "1" ]] || exit 1 32 | cf restage persitest 33 | [[ $(curl "$url") == "0" ]] || exit 1 34 | -------------------------------------------------------------------------------- /modules/tests/kubecats.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | DOMAIN=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 9 | export DOMAIN 10 | DEPLOYED_CHART=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["chart"]') 11 | 12 | info 13 | info "@@@@@@@@@" 14 | info "Running CATs on deployed chart $DEPLOYED_CHART" 15 | info "@@@@@@@@@" 16 | info 17 | 18 | kubectl create namespace catapult || true 19 | kubectl delete pod cats -n catapult || true 20 | 21 | if [ "${DEFAULT_STACK}" = "from_chart" ]; then 22 | DEFAULT_STACK=$(helm inspect helm/cf/ | grep DEFAULT_STACK | sed 's~DEFAULT_STACK:~~g' | sed 's~"~~g' | sed 's~\s~~g') 23 | export DEFAULT_STACK 24 | fi 25 | 26 | export CATS_REPO=$CATS_REPO 27 | pod_definition=$(erb "$ROOT_DIR"/kube/cats/pod.yaml.erb) 28 | cat < artifacts/"$(date +'%H:%M-%Y-%m-%d')"_cats.log 42 | status="$(container_status "catapult" "cats")" 43 | kubectl delete pod -n catapult cats 44 | exit "$status" 45 | -------------------------------------------------------------------------------- /modules/tests/kubesmokes.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | 8 | DOMAIN=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 9 | export DOMAIN 10 | DEPLOYED_CHART=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["chart"]') 11 | 12 | info 13 | info "@@@@@@@@@" 14 | info "Running Smoke tests on deployed chart $DEPLOYED_CHART" 15 | info "@@@@@@@@@" 16 | info 17 | 18 | kubectl create namespace catapult || true 19 | kubectl delete pod smokes -n catapult || true 20 | 21 | export SMOKES_REPO=$SMOKES_REPO 22 | pod_definition=$(erb "$ROOT_DIR"/kube/smokes/pod.yaml.erb) 23 | cat < artifacts/"$(date +'%H:%M-%Y-%m-%d')"_smokes.log 37 | status="$(container_status "catapult" "smokes")" 38 | kubectl delete pod -n catapult smokes 39 | exit "$status" 40 | -------------------------------------------------------------------------------- /modules/tests/sample-ticking.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | # Don't touch original copy 7 | cp -rfv ../contrib/samples/ticking_app ./ 8 | 9 | pushd ticking_app || exit 10 | 11 | go build -o log_producing_app main.go 12 | 13 | if [ -n "$EKCP_PROXY" ]; then 14 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 15 | fi 16 | 17 | cf push 18 | -------------------------------------------------------------------------------- /modules/tests/sample.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | SAMPLE_FOLDER=$(basename "$SAMPLE_APP_REPO") 8 | 9 | [ ! -d "$SAMPLE_FOLDER" ] && git clone --recurse-submodules "$SAMPLE_APP_REPO" "$SAMPLE_FOLDER" 10 | 11 | # if we have a java spring app we need to built it before pushing 12 | if [ -f "${SAMPLE_FOLDER}/gradlew" ]; then 13 | pushd "$SAMPLE_FOLDER" || exit 1 14 | ./gradlew clean assemble 15 | popd || exit 1 16 | fi 17 | 18 | pushd "$SAMPLE_FOLDER" || exit 19 | 20 | if [ -n "$EKCP_PROXY" ]; then 21 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 22 | fi 23 | 24 | cf push 25 | -------------------------------------------------------------------------------- /modules/tests/smoke.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ../../include/common.sh 4 | . .envrc 5 | 6 | DOMAIN=$(kubectl get configmap -n kube-system cap-values -o json | jq -r '.data["domain"]') 7 | 8 | [ ! -d "cf-smoke-tests" ] && git clone https://github.com/cloudfoundry/cf-smoke-tests 9 | 10 | pushd cf-smoke-tests || exit 11 | cat > config.json < 'tests-registry-credentials'] 39 | puts obj.to_json 40 | EOF 41 | } 42 | 43 | if kubectl get secrets -n scf 2>/dev/null | grep -qi tests-registry-credentials; then 44 | kubectl delete secret tests-registry-credentials -n scf 45 | fi 46 | if kubectl get pods -n scf 2>/dev/null | grep -qi smoke-tests; then 47 | kubectl delete pod smoke-tests -n scf 48 | fi 49 | 50 | SECRETS_FILE=${SECRETS_FILE:-"$ROOT_DIR"/../cloudfoundry/secure/concourse-secrets.yml.gpg} 51 | # Create secret for the imagePullSecrets we renamed in the scf images: 52 | kubectl create secret docker-registry tests-registry-credentials \ 53 | --namespace scf \ 54 | --docker-server="$(grep "docker-internal-registry:" <<< "$(gpg --decrypt --batch "$SECRETS_FILE")" | cut -d ' ' -f 3- )" \ 55 | --docker-username="$(grep "docker-internal-username:" <<< "$(gpg --decrypt --batch "$SECRETS_FILE")" | cut -d ' ' -f 3- )" \ 56 | --docker-password="$(grep "docker-internal-password:" <<< "$(gpg --decrypt --batch "$SECRETS_FILE")" | cut -d ' ' -f 3- )" 57 | 58 | image=$(gawk '$1 == "image:" { gsub(/"/, "", $2); print $2 }' kube/cf/bosh-task/smoke-tests.yaml) 59 | kubectl run \ 60 | --namespace scf \ 61 | --attach \ 62 | --restart=Never \ 63 | --image="$image" \ 64 | --overrides="$(kube_overrides)" \ 65 | "smoke-tests" ||: 66 | 67 | wait_container_attached "scf" "smoke-tests" 68 | 69 | mkdir -p artifacts 70 | kubectl logs -f smoke-tests -n scf > artifacts/"$(date +'%H:%M-%Y-%m-%d')"_smoke-tests.log 71 | 72 | exit "$(container_status "scf" "smoke-tests")" 73 | -------------------------------------------------------------------------------- /modules/tests/stress-benchmark.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . ./defaults.sh 4 | . ../../include/common.sh 5 | . .envrc 6 | 7 | STRESS_FOLDER=$(basename "$STRESS_TEST_REPO") 8 | 9 | [ ! -d "$STRESS_FOLDER" ] && git clone --recurse-submodules "$STRESS_TEST_REPO" "$STRESS_FOLDER" 10 | 11 | pushd "$STRESS_FOLDER" || exit 12 | 13 | if [ -n "$EKCP_PROXY" ]; then 14 | export https_proxy=socks5://127.0.0.1:${KUBEPROXY_PORT} 15 | fi 16 | 17 | 18 | export TEST_CONCURRENCY TEST_RATE TEST_DURATION 19 | ROOT_DIR=$BUILD_DIR/cf-benchmark-tools bash ./start_benchmark.sh 20 | 21 | ok "stressbenchmark finished, results in $BUILD_DIR/cf-benchmark-tools/bench*" 22 | -------------------------------------------------------------------------------- /scripts/image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | . include/versioning.sh 4 | 5 | TAG=${TAG:-$ARTIFACT_VERSION} 6 | DOCKER_IMAGE=${DOCKER_IMAGE:-${DOCKER_ORG}catapult:${TAG}} 7 | 8 | docker build --rm --no-cache -t ${DOCKER_IMAGE} . 9 | -------------------------------------------------------------------------------- /tests/.gitignore: -------------------------------------------------------------------------------- 1 | shunit2/ -------------------------------------------------------------------------------- /tests/Makefile: -------------------------------------------------------------------------------- 1 | .DEFAULT_GOAL := all 2 | 3 | .PHONY: integration-tests 4 | integration-tests: 5 | ./integration_tests.sh 6 | 7 | .PHONY: unit-tests 8 | unit-tests: 9 | ./unit_tests.sh 10 | 11 | .PHONY: lint 12 | lint: 13 | ./lint.sh 14 | 15 | .PHONY: all 16 | all: lint unit-tests integration-tests 17 | -------------------------------------------------------------------------------- /tests/integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ROOT_DIR="$(git rev-parse --show-toplevel)" 4 | export CLUSTER_NAME=test 5 | export BACKEND=kind 6 | export ENABLE_EIRINI=false 7 | 8 | [ ! -d "shunit2" ] && git clone https://github.com/kward/shunit2.git 9 | 10 | setUp() { 11 | ROOT_DIR="$(git rev-parse --show-toplevel)" 12 | export ROOT_DIR 13 | pushd "$ROOT_DIR" || exit 14 | } 15 | 16 | tearDown() { 17 | ROOT_DIR="$(git rev-parse --show-toplevel)" 18 | export ROOT_DIR 19 | pushd "$ROOT_DIR" || exit 20 | rm -rf buildtest 21 | } 22 | 23 | # # Tests creation and deletion of build directory 24 | # testDeployment() { 25 | # rm -rf buildtest 26 | # make scf-deploy 27 | # deployst=$? 28 | # pushd buildtest || exit 29 | # source .envrc 30 | # popd || exit 31 | # assertTrue 'create buildir' "[ -d 'buildtest' ]" 32 | # PODS="$(kubectl get pods -n scf)" 33 | # SVCS="$(kubectl get svc -n scf)" 34 | # assertContains 'contains scf-cc-worker-v1 pod' "$PODS" 'scf-cc-worker-v1' 35 | # assertContains 'contains cf-operator-webhook svc' "$SVCS" 'cf-operator-webhook' 36 | # assertContains 'contains scf-api svc' "$SVCS" 'scf-api' 37 | # assertTrue 'deploys successfully' "[ \"$deployst\" == \"0\"]" 38 | # make tests-smoke-kube 39 | # kubert=$? 40 | # assertTrue 'smoke pass successfully' "[ \"$kubert\" == \"0\"]" 41 | # make clean 42 | # assertTrue 'clean buildir' "[ ! -d 'buildtest' ]" 43 | # } 44 | 45 | testKind() { 46 | rm -rf buildtest 47 | BACKEND=kind DOWNLOAD_CATAPULT_DEPS=true make k8s 48 | # with DOWNLOAD_CATAPULT_DEPS=true the following needs to happen: 49 | assertTrue 'kind binary is present' "[ -f 'buildtest/bin/kind' ]" 50 | deployst=$? 51 | echo "DEPLOYS: $deployst" 52 | assertTrue 'create buildir' "[ -d 'buildtest' ]" 53 | assertEquals 'deploys successfully' "$deployst" "0" 54 | make scf-chart 55 | assertTrue 'helm folder is present' "[ -d 'buildtest/helm' ]" 56 | AUTOSCALER=true make scf-gen-config 57 | VALUES_FILE=$(cat $ROOT_DIR/buildtest/scf-config-values.yaml) 58 | assertContains 'generates correctly AUTOSCALER' "$VALUES_FILE" "autoscaler: true" 59 | make module-extra-ingress 60 | deployst=$? 61 | assertEquals 'deploys ingress successfully' "$deployst" "0" 62 | echo "#!/bin/bash" > buildtest/test.sh 63 | echo "set -ex" >> buildtest/test.sh 64 | echo "echo 'test'" >> buildtest/test.sh 65 | chmod +x buildtest/test.sh 66 | TASK_SCRIPT="$PWD/buildtest/test.sh" make module-extra-task 67 | taskst=$? 68 | assertEquals 'Executes task successfully' "$taskst" "0" 69 | make clean 70 | assertTrue 'clean buildir' "[ ! -d 'buildtest' ]" 71 | } 72 | 73 | # Load shUnit2. 74 | . ./shunit2/shunit2 75 | -------------------------------------------------------------------------------- /tests/lint.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | . ../include/common.sh 4 | 5 | retcode=0 6 | BUILDDIR_REGEXP="^$ROOT_DIR/build" 7 | 8 | info "Linting shell scripts" && debug_mode 9 | SH_FILES=$(find "$ROOT_DIR" -type f -name '*.sh' -o -name '*.ksh' -o -name '*.bash' | grep -v "shunit2" | grep -v "$BUILDDIR_REGEXP" ) 10 | # SC1090 we are building paths with $BACKEND 11 | shellcheck --severity=warning -e SC1090 $SH_FILES || retcode=1 12 | 13 | info "Linting yamls" && debug_mode 14 | YML_FILES=$(find "$ROOT_DIR" -type f -name '*.yaml' -o -name '*.yml' | grep -v "shunit2" | grep -v "$BUILDDIR_REGEXP") 15 | yamllint -d "{extends: relaxed, rules: {line-length: {max: 120}}}" --strict $YML_FILES || retcode=1 16 | 17 | if [[ $retcode == 1 ]] ; then 18 | err "Linting failed" && exit 1 19 | else 20 | ok "Linting passed" 21 | fi 22 | -------------------------------------------------------------------------------- /tests/mocks/helm: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -Eeuo pipefail 4 | 5 | echo "Using helmmock" 6 | 7 | exit 0 -------------------------------------------------------------------------------- /tests/mocks/kubectl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -Eeuo pipefail 4 | 5 | SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" 6 | 7 | if [ "$1" = get ] && [ "$2" = configmap ]; then 8 | cat "$SCRIPTPATH"/kubectl_output_get_configmap.json 9 | fi 10 | 11 | if [ "$1" = get ] && [ "$2" = nodes ]; then 12 | cat "$SCRIPTPATH"/kubectl_output_get_nodes.json 13 | fi 14 | 15 | if [ "$1" = get ] && [ "$2" = pods ]; then 16 | cat "$SCRIPTPATH"/kubectl_output_get_pods 17 | fi 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /tests/mocks/kubectl_output_get_configmap.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "data": { 4 | "chart": "https://github.com/SUSE/scf/releases/download/2.19.1/scf-sle-2.19.1%2Bcf9.5.0.0.gd8c18ede.zip", 5 | "domain": "172.17.0.2.omg.howdoi.website", 6 | "platform": "kind", 7 | "public-ip": "172.17.0.2" 8 | }, 9 | "kind": "ConfigMap", 10 | "metadata": { 11 | "creationTimestamp": "2019-12-06T14:09:55Z", 12 | "name": "cap-values", 13 | "namespace": "kube-system", 14 | "resourceVersion": "808", 15 | "selfLink": "/api/v1/namespaces/kube-system/configmaps/cap-values", 16 | "uid": "8902e8ff-1eab-4235-8422-8eca812f040e" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tests/mocks/ssh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -Eeuo pipefail 4 | 5 | echo "Using helmmock ssh" >&2 6 | echo "1.1.1.1" 7 | 8 | exit 0 9 | -------------------------------------------------------------------------------- /tests/mocks/terraform: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -Eeuo pipefail 4 | 5 | echo "Using terraform helmmock" 6 | 7 | exit 0 8 | --------------------------------------------------------------------------------