├── .adr-dir ├── .envrc ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── pull_request_template.md └── workflows │ ├── check-links.yml │ └── tests.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── NOTICE ├── README.md ├── build ├── build.sh ├── generate-kbld-config.sh ├── kbld.yml └── routecontroller-image.yml ├── ci ├── README.md ├── cf-k8s-networking-images.md ├── cf-k8s-networking-images.yml ├── cf-k8s-networking-istio-upgrade.md ├── cf-k8s-networking-istio-upgrade.yml ├── cf-k8s-networking.md ├── cf-k8s-networking.yml ├── cf-k8s-pr.yml ├── cf-k8s-upgrade.yml ├── dockerfiles │ ├── deploy │ │ └── Dockerfile │ ├── test │ │ └── Dockerfile │ └── upgrade │ │ ├── Dockerfile │ │ ├── roll.sh │ │ └── setup.sh ├── github-actions │ └── publish-docker-image ├── istio-upgrade.yml ├── reconfigure ├── scaling.yml ├── tasks │ ├── build-image │ │ ├── task.sh │ │ └── task.yml │ ├── cf4k8s │ │ ├── generate-github-release.sh │ │ ├── generate-github-release.yml │ │ ├── generate-integration-config.sh │ │ ├── generate-integration-config.yml │ │ ├── push-app.sh │ │ ├── push-app.yml │ │ ├── run-upgrade-uptime-test.sh │ │ └── run-upgrade-uptime-test.yml │ ├── docker │ │ ├── add-updated-digest.sh │ │ ├── add-updated-digest.yml │ │ ├── annotate.sh │ │ ├── annotate.yml │ │ ├── update-image-digest-in-cf-for-k8s.sh │ │ └── update-image-digest-in-cf-for-k8s.yml │ ├── helpers.sh │ ├── istio │ │ ├── deploy-istio.sh │ │ ├── deploy-istio.yml │ │ ├── enable-sidecar-injection.sh │ │ ├── enable-sidecar-injection.yml │ │ ├── install-grafana-dashboard.sh │ │ └── install-grafana-dashboard.yml │ ├── k8s │ │ ├── kubectl-apply.sh │ │ └── kubectl-apply.yml │ ├── scale │ │ ├── pave-cf-for-scale-tests.sh │ │ ├── pave-cf-for-scale-tests.yml │ │ ├── run-scale-tests.sh │ │ └── run-scale-tests.yml │ ├── team │ │ ├── create-community-chore.sh │ │ ├── create-community-chore.yml │ │ ├── create-istio-bump-story.sh │ │ ├── create-istio-bump-story.yml │ │ ├── create-istio-osm-story.sh │ │ └── create-istio-osm-story.yml │ └── tests │ │ ├── run-networking-acceptance-gke.sh │ │ ├── run-networking-acceptance-gke.yml │ │ ├── run-routecontroller-integration-tests.yml │ │ ├── run-routecontroller-units.yml │ │ └── stress │ │ ├── run-stress-tests.sh │ │ └── run-stress-tests.yml └── team-helpers.yml ├── code-of-conduct.md ├── config ├── crd │ └── networking.cloudfoundry.org_routes.yaml ├── routecontroller │ ├── cluster-role-binding.yaml │ ├── cluster-role.yaml │ ├── routecontroller-configmap.yaml │ ├── routecontroller.yaml │ └── service-account.yaml └── values │ ├── _defaults.yml │ └── images.yml ├── doc ├── access-logs.md ├── architecture-decisions │ ├── 0001-record-architecture-decisions.md │ ├── 0002-directly-create-istio-resources.md │ ├── 0003-tagging-and-publishing-docker-images.md │ ├── 0004-strategy-for-securing-network-traffic.md │ ├── 0005-networking-acceptance-tests.md │ ├── 0006-rewrite-http-liveness-readiness-probes-for-healthchecks.md │ ├── 0007-maintain-generated-istio.md │ ├── 0008-implement-workarounds-for-capi-and-log-cache-to-unblock-global-strict-mtls.md │ ├── 0009-kubebuilder-controllers-dynamic-client-over-generated-clients.md │ ├── 0010-route-crd-and-kubebuilder-instead-of-metacontroller.md │ ├── 0011-use-kind-clusters-for-routecontroller-integration.md │ ├── 0012-routecontroller-route-deletion-finalizer.md │ ├── 0013-rename-master-branch.md │ ├── 0014-ingress-gateway-as-a-daemon-set.md │ ├── 0015-app-access-logs-from-ingressgateway.md │ ├── 0016-job-for-upgrading-istio-sidecars-on-workloads.md │ ├── 0017-moving-istio-configuration-out-of-this-repo.md │ ├── 0018-create-policy-server.md │ └── 0019-route-crd-and-contour-controller.md ├── assets │ ├── architecture.png │ ├── duration-flamegraph.jpg │ ├── ingress-gateway-topology-directly-to-worker-nodes.jpg │ ├── ingress-gateway-topology-external-lb.jpg │ ├── ingress-gateway-topology-lb-service.jpg │ ├── ingress-routing-no-lb.png │ ├── ingress-to-sys-non-tls.jpg │ ├── ingress-to-sys-tls.jpg │ ├── liveness-probe-adr-1.png │ ├── liveness-probe-adr-2.png │ ├── liveness-probe-adr-3.png │ ├── network-configuration.png │ ├── network-egress.png │ ├── network-envoy-tap.sh │ ├── network-envoy.png │ ├── network-ksniff-wireshark.png │ ├── network-simple-envoy.yaml │ ├── network-traffic.png │ ├── routecontroller-data-flow-diagram.png │ └── routecontroller-design.png ├── ingress-routing-topology.md ├── metrics │ ├── README.md │ └── indicators.yml ├── network-stack.md └── update-istio.md ├── hack └── cf4k8s │ ├── README.md │ ├── create-and-deploy.sh │ ├── create-huge.sh │ ├── destroy.sh │ ├── fetch-acceptance-values.sh │ ├── methods.sh │ └── redeploying-acceptance.md ├── routecontroller ├── .gitignore ├── Dockerfile ├── Makefile ├── PROJECT ├── apis │ ├── istio │ │ └── networking │ │ │ └── v1alpha3 │ │ │ ├── groupversion_info.go │ │ │ ├── virtualservice_types.go │ │ │ └── zz_generated.deepcopy.go │ └── networking │ │ └── v1alpha1 │ │ ├── groupversion_info.go │ │ ├── route_types.go │ │ └── zz_generated.deepcopy.go ├── cfg │ ├── cfg_suite_test.go │ ├── config.go │ └── config_test.go ├── config │ ├── certmanager │ │ ├── certificate.yaml │ │ ├── kustomization.yaml │ │ └── kustomizeconfig.yaml │ ├── crd │ │ ├── bases │ │ │ ├── _.yaml │ │ │ └── networking.cloudfoundry.org_routes.yaml │ │ ├── kustomization.yaml │ │ ├── kustomizeconfig.yaml │ │ └── patches │ │ │ ├── cainjection_in_routes.yaml │ │ │ └── webhook_in_routes.yaml │ ├── default │ │ ├── kustomization.yaml │ │ ├── manager_auth_proxy_patch.yaml │ │ ├── manager_webhook_patch.yaml │ │ └── webhookcainjection_patch.yaml │ ├── manager │ │ ├── kustomization.yaml │ │ └── manager.yaml │ ├── prometheus │ │ ├── kustomization.yaml │ │ └── monitor.yaml │ ├── rbac │ │ ├── auth_proxy_role.yaml │ │ ├── auth_proxy_role_binding.yaml │ │ ├── auth_proxy_service.yaml │ │ ├── kustomization.yaml │ │ ├── leader_election_role.yaml │ │ ├── leader_election_role_binding.yaml │ │ ├── role.yaml │ │ ├── role_binding.yaml │ │ ├── route_editor_role.yaml │ │ └── route_viewer_role.yaml │ ├── samples │ │ ├── route-with-no-destinations.yaml │ │ └── route-with-single-destination.yaml │ └── webhook │ │ ├── kustomization.yaml │ │ ├── kustomizeconfig.yaml │ │ ├── manifests.yaml │ │ └── service.yaml ├── controllers │ └── networking │ │ └── route_controller.go ├── go.mod ├── go.sum ├── hack │ └── boilerplate.go.txt ├── integration │ ├── README.md │ ├── fixtures │ │ ├── context-path-route-for-single-fqdn1.yaml │ │ ├── context-path-route-for-single-fqdn2.yaml │ │ ├── istio-virtual-service.yaml │ │ ├── multiple-routes-with-different-fqdn.yaml │ │ ├── multiple-routes-with-same-fqdn.yaml │ │ ├── route-without-any-destination.yaml │ │ ├── route.yaml │ │ ├── single-route-with-multiple-destinations.yaml │ │ ├── single-route-with-no-destination.yaml │ │ ├── single-route-with-single-destination.yaml │ │ └── single-route-with-updated-single-destination.yaml │ ├── integration_suite_test.go │ └── integration_test.go ├── main.go ├── resourcebuilders │ ├── resourcebuilders_suite_test.go │ ├── service_builder.go │ ├── service_builder_test.go │ ├── virtual_service_builder.go │ └── virtual_service_builder_test.go ├── scripts │ ├── integration │ ├── stress │ └── test └── stress │ ├── README.md │ ├── fixtures │ ├── cluster.yml │ ├── route_template.yml │ └── service.yml │ ├── matchers │ └── exit_and_log_matcher.go │ ├── stress_suite_test.go │ └── stress_test.go ├── scripts └── vendir-sync-local ├── test ├── acceptance │ ├── README.md │ ├── acceptance_test.go │ ├── assets │ │ ├── allow-ingress-from-apps-network-policy.yaml │ │ ├── app │ │ │ └── index.html │ │ ├── outbound-network-request-app │ │ │ ├── go.mod │ │ │ └── main.go │ │ └── system-component.yml │ ├── bin │ │ └── test_local │ ├── cfg │ │ └── cfg.go │ ├── go.mod │ ├── go.sum │ ├── mtls_test.go │ ├── policy_test.go │ └── startup_connectivity_test.go ├── scale │ ├── README.md │ ├── go.mod │ ├── go.sum │ ├── internal │ │ └── collector │ │ │ └── route_mapper.go │ ├── scale_suite_test.go │ └── scale_test.go └── uptime │ ├── README.md │ ├── control_plane_uptime_test.go │ ├── data_plane_uptime_test.go │ ├── go.mod │ ├── go.sum │ ├── internal │ ├── checker │ │ └── upgrade.go │ ├── collector │ │ └── request.go │ └── uptime │ │ ├── control_plane_results.go │ │ └── data_plane_results.go │ └── uptime_tests_suite_test.go └── version /.adr-dir: -------------------------------------------------------------------------------- 1 | doc/architecture-decisions 2 | -------------------------------------------------------------------------------- /.envrc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/.envrc -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Summary 11 | _In your own words, describe the issue._ 12 | 13 | ### Deployment Configuration 14 | * **cf-for-k8s version:** `[cf-for-k8s sha / tag hyperlink here]` 15 | * **cf-k8s-networking version:** `[cf-k8s-networking sha / tag hyperlink here (check vendir.yml file in cf-for-k8s)]` 16 | * **Deploy command:** [Please include the `kapp deploy...` command, including all `config-optional` files] 17 | * **Kubernetes CLI and API version:** [`kubectl version`] 18 | * **IaaS:** [IaaS name(s) here (GKE, AKS, EKS, PKS, minikube, kind, etc)] 19 | 20 | [Describe any other special configuration here] 21 | 22 | ### Reproduction Steps 23 | _What steps/actions led to the issue?_ 24 | 25 | ### Logs 26 | _It's helpful to include snippets of the error response or logs output_ 27 | 28 | ### Expected behavior 29 | _What was the expected result?_ 30 | 31 | ### Additional context 32 | _Add any other context about the problem here._ 33 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "[REQUEST]" 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ### Is your feature request related to a problem? Please describe. 11 | _A clear and concise description of what the problem is._ 12 | 13 | ### Describe the solution you'd like 14 | _A clear and concise description of what you want to happen._ 15 | 16 | ### Describe alternatives you've considered 17 | _A clear and concise description of any alternative solutions or features you've considered._ 18 | 19 | ### Additional context 20 | _Add any other context or documents about the feature request here. For larger feature requests, we found using Google docs to collaborate is effective._ 21 | 22 | ### Contributions 23 | _Are you able to contribute the changes to make this feature work?_ 24 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | > Thanks for contributing to cf-k8s-networking! 4 | > 5 | > We've designed this PR template to speed up the PR review and merge process - please use it. 6 | 7 | Checkout the [contributing guidelines](https://github.com/cloudfoundry/cf-k8s-networking/blob/develop/CONTRIBUTING.md) 8 | 9 | ### Summary of changes 10 | _Please describe the change here._ 11 | 12 | ### Related Issue(s) 13 | _Please link the corresponding issue to this PR._ 14 | 15 | ### Additional Context 16 | -_Include any links to related PRs, stories, slack discussions, etc... that will help establish context._ 17 | -_Is there anything else of note that the reviewers should know about this change?_ 18 | 19 | ### Acceptance Steps 20 | _Please provide a series of instructions (eg kubectl or cf cli commands) for how our Product Manager can verify that your changes were properly integrated_ 21 | 22 | 23 | _Tag your pair, your PM, and/or team_ 24 | 25 | > _It's helpful to tag a few other folks on your team or your team alias in case we need to follow up later._ 26 | 27 | -------------------------------------------------------------------------------- /.github/workflows/check-links.yml: -------------------------------------------------------------------------------- 1 | name: Check Dead Links 2 | 3 | on: push 4 | 5 | jobs: 6 | markdown-link-check: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@master 10 | - uses: gaurav-nelson/github-action-markdown-link-check@v1 11 | with: 12 | base-branch: develop 13 | check-modified-files-only: 'yes' 14 | -------------------------------------------------------------------------------- /.github/workflows/tests.yml: -------------------------------------------------------------------------------- 1 | name: Test and Tag 2 | on: [push] 3 | jobs: 4 | routecontroller_tests: 5 | name: Route Controller Tests 6 | runs-on: ubuntu-latest 7 | container: 8 | image: gcr.io/cf-networking-images/cfroutesync-integration-test-env 9 | steps: 10 | - name: Set up Go 11 | uses: actions/setup-go@v1 12 | with: 13 | go-version: 1.16 14 | - name: Check out code 15 | uses: actions/checkout@v1 16 | - name: Run tests 17 | working-directory: routecontroller 18 | run: scripts/test 19 | routecontroller_docker_push: 20 | name: Route Controller Docker Push 21 | needs: [routecontroller_tests] 22 | runs-on: ubuntu-latest 23 | container: 24 | image: "concourse/docker-image-resource:ubuntu" 25 | steps: 26 | - name: Check out code 27 | uses: actions/checkout@v1 28 | - name: Create Docker Image 29 | run: ci/github-actions/publish-docker-image 30 | env: 31 | GCR_SERVICE_ACCOUNT_JSON: ${{ secrets.GCR_SERVICE_ACCOUNT_JSON }} 32 | IMAGE_REPO: gcr.io/cf-networking-images/cf-k8s-networking/routecontroller 33 | DOCKERFILE_PATH: routecontroller/Dockerfile 34 | WORKING_DIR: routecontroller 35 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | bin/*.sh 2 | !bin/test 3 | !bin/env 4 | !bin/go 5 | /pkg 6 | *.test 7 | *.swp 8 | /tmp 9 | *.iml 10 | .idea/ 11 | tags 12 | .DS_Store 13 | *.coverprofile 14 | cfroutesync/integration/.kube 15 | .kube 16 | 17 | routecontroller/stress/results.json 18 | build/sources/ 19 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2019-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. 2 | 3 | This project is licensed to you under the Apache License, Version 2.0 (the "License"). 4 | You may not use this project except in compliance with the License. 5 | 6 | This project may include a number of subcomponents with separate copyright notices 7 | and license terms. Your use of these subcomponents is subject to the terms and 8 | conditions of the subcomponent's license, as noted in the LICENSE file. 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | cf-k8s-networking 2 | --- 3 | Routing and networking for Cloud Foundry running on Kubernetes. 4 | 5 | ## Deploying 6 | 7 | CF-K8s-Networking is a component of CF-for-K8s. To deploy CF-for-K8s reference 8 | the following documentation: 9 | 10 | * [Deploy Cloud Foundry on 11 | Kubernetes](https://github.com/cloudfoundry/cf-for-k8s/blob/master/docs/deploy.md) 12 | * [Deploy Cloud Foundry 13 | Locally](https://github.com/cloudfoundry/cf-for-k8s/blob/6e4ba5cc0514481a0675ea83731449c752b1dcad/docs/deploy-local.md) 14 | 15 | ## Architecture 16 | 17 | ![Architecture Diagram of 18 | CF-K8s-Networking](doc/assets/routecontroller-data-flow-diagram.png) 19 | 20 | * **RouteController:** Watches the Kubernetes API for Route CRs and translates 21 | the Route CRs into Istio Virtual Service CRs and Kubernetes Services 22 | accordingly to enable routing to applications deployed by Cloud Foundry. 23 | 24 | * **Istio:** CF-K8s-Networking currently depends on [Istio](https://istio.io/). 25 | * Istio serves as both our gateway router for ingress networking, replacing 26 | the role of the Gorouters in CF for VMs, and service mesh for (eventually) 27 | container-to-container networking policy enforcement. 28 | * We provide a manifest for installing our custom configuration for Istio, 29 | [here](https://github.com/cloudfoundry/cf-for-k8s/blob/master/config/istio/istio-generated/xxx-generated-istio.yaml). 30 | * Istio provides us with security features out of the box, such as: 31 | * Automatic Envoy sidecar injection for system components and application workloads 32 | * `Sidecar` Kubernetes resources that can limit egress traffic from workload `Pod`s 33 | * Transparent mutual TLS (mTLS) everywhere 34 | * (Eventually) app identity certificates using [SPIFFE](https://spiffe.io/) issued by Istio Citadel 35 | * Istio should be treated as an "implementation detail" of the platform and 36 | our reliance on it is subject to change 37 | * Istio config is located in [cf-for-k8s](https://github.com/cloudfoundry/cf-for-k8s) and it's managed by the cf-k8s-networking team. 38 | 39 | ## Contributing 40 | For information about how to contribute, develop against our codebase, and run 41 | our various test suites, check out our [Contributing guidelines](CONTRIBUTING.md). 42 | 43 | -------------------------------------------------------------------------------- /build/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" 6 | KBLD_CONFIG_DIR="$(mktemp -d)" 7 | KBLD_LOCK_FILE="${SCRIPT_DIR}/kbld.lock.yml" 8 | 9 | function cleanup() { 10 | echo "Cleaning up..." 11 | rm -rf "${KBLD_CONFIG_DIR}" 12 | } 13 | 14 | trap cleanup EXIT 15 | 16 | pushd "${SCRIPT_DIR}" > /dev/null 17 | "${SCRIPT_DIR}/generate-kbld-config.sh" "${KBLD_CONFIG_DIR}/kbld.yml" 18 | 19 | kbld -f "${KBLD_CONFIG_DIR}" -f "${SCRIPT_DIR}/routecontroller-image.yml" --lock-output "${KBLD_LOCK_FILE}" 20 | 21 | popd > /dev/null 22 | -------------------------------------------------------------------------------- /build/generate-kbld-config.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | set -euo pipefail 4 | 5 | SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 6 | 7 | function generate_kbld_config() { 8 | local kbld_config_path="${1}" 9 | 10 | local source_path 11 | source_path="${SCRIPT_DIR}/.." 12 | 13 | pushd "${source_path}" > /dev/null 14 | local git_ref 15 | git_ref=$(git rev-parse HEAD) 16 | popd > /dev/null 17 | 18 | echo "Creating CF Networking release kbld config with ytt..." 19 | local kbld_config_values 20 | kbld_config_values=$(cat < "${kbld_config_path}" 29 | } 30 | 31 | function main() { 32 | local kbld_config_path="${1}" 33 | 34 | generate_kbld_config "${kbld_config_path}" 35 | } 36 | 37 | main "$@" 38 | -------------------------------------------------------------------------------- /build/kbld.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: kbld.k14s.io/v1alpha1 4 | kind: Config 5 | minimumRequiredVersion: 0.28.0 6 | sources: 7 | - imageRepo: cloudfoundry/routecontroller 8 | path: ../routecontroller 9 | pack: 10 | build: 11 | builder: paketobuildpacks/builder:tiny 12 | buildpacks: 13 | - gcr.io/paketo-buildpacks/go 14 | rawOptions: 15 | - --env 16 | - #@ "BP_OCI_REVISION={}".format(data.values.git_ref) 17 | - --env 18 | - #@ "BP_OCI_SOURCE={}".format(data.values.git_url) 19 | 20 | destinations: 21 | - imageRepo: cloudfoundry/routecontroller 22 | newImage: index.docker.io/cloudfoundry/routecontroller 23 | -------------------------------------------------------------------------------- /build/routecontroller-image.yml: -------------------------------------------------------------------------------- 1 | # NOTE: There is no reference in the templates for this image. This file is a token to trigger the kbld process. 2 | --- 3 | image: cloudfoundry/routecontroller 4 | 5 | -------------------------------------------------------------------------------- /ci/README.md: -------------------------------------------------------------------------------- 1 | ## CF K8s Networking pipeline 2 | 3 | The main pipeline for cf-k8s-networking lives in the `ci` directory and can be viewed [here](https://release-integration.ci.cf-app.com/teams/main/pipelines/cf-k8s-networking). -------------------------------------------------------------------------------- /ci/cf-k8s-networking-images.md: -------------------------------------------------------------------------------- 1 | # cf-k8s-networking-images 2 | 3 | ## Purpose 4 | This pipeline builds some of the images used by cf-for-k8s networking and the images used in the validation of networking in cf-for-k8s. Once built, the images are pushed to repositories in the cloudfoundry Dockerhub organization. 5 | 6 | ## build-and-annotate-fluent-bit-image 7 | Builds the Docker image for the fluentbit sidecar that we colocate with the Istio Ingressgateway Pods. This sidecar is responsible for getting access logs for apps into the logging system. It automatically commits an image bump to the `develop` branch of cf-for-k8s which is tested by CI. 8 | 9 | Disclaimer: This does not actually annotate the image. 10 | 11 | ## build-upgrade-sidecars-job-image 12 | Builds the Docker image for the upgrade-sidecars Job that we run after upgrading Istio. This Job is responsible for rolling out apps and system components so that they have an up to date sidecar proxy. It automatically commits an image bump to the `develop` branch of cf-for-k8s which is tested by CI. 13 | 14 | ## build-httpbin-image 15 | This builds a modified version of the [httpbin](https://httpbin.org) app that the networking acceptance tests use internally for testing. 16 | 17 | ## build-proxy-image 18 | This builds the [CF "proxy" app](https://github.com/cf-routing/proxy) that the networking acceptance tests use internally for testing Pod to Pod connectivity. 19 | -------------------------------------------------------------------------------- /ci/cf-k8s-networking-istio-upgrade.md: -------------------------------------------------------------------------------- 1 | # cf-k8s-networking-istio-upgrade 2 | 3 | ## Purpose 4 | This pipeline is designed to test istio upgrades as new patch releases become available within a specified minor version line. Upon validation, the release update is pushed to the `istio-version-bump` branch on `cf-for-k8s`. 5 | 6 | ## Updating 7 | To update the minor version line being tested by the pipeline, update the `tag_filter` field of the `istio-release` resource in the pipeline template. 8 | 9 | ## Test Environment 10 | The pipeline uses a gke cluster from the cf-for-k8s cluster pool. See the k8s-pool-management pipeline and the terraform templates in `cf-for-k8s-deploy/gke` for more information on those environments. 11 | 12 | ## Validation 13 | To validate the new version, the pipeline deploys the current version of istio as part of a fresh installation of cf-for-k8s. It then bumps the istio version using uptimer to measure availability of an app and the cf api server during the upgrade. Subsequently, we run the cf-k8s-networking acceptance tests to validate the behavior of networking once the upgrade is complete. 14 | 15 | ## Disclaimer 16 | This pipeline is not currently committing back to cf-for-k8s, so bumping istio requires a pr be opened manually against the `istio-version-bump` branch or a clone of it. -------------------------------------------------------------------------------- /ci/cf-k8s-networking.md: -------------------------------------------------------------------------------- 1 | # cf-k8s-networking-images 2 | 3 | ## Purpose 4 | This pipeline builds and tests the `routecontroller`, a controller responsible for the reconciliation of cf Routes in cf-for-k8s. Once built the image is pushed to a repository in the cloudfoundry Dockerhub organization. 5 | 6 | Once the new image is available it is validated against cf-for-k8s and subsequently bumped on the cf-k8s-networking release-candidate branch. This triggers a CI pipeline of the cf-for-k8s repo itself, which integrates the new release. 7 | 8 | ## Test Environment 9 | The pipeline uses a gke cluster from the cf-for-k8s cluster pool. See the k8s-pool-management pipeline and the terraform templates in `cf-for-k8s-deploy/gke` for more information on those environments. 10 | 11 | ## Validation 12 | To validate routecontroller, we run [cf-for-k8s smoke-tests](https://github.com/cloudfoundry/cf-for-k8s/tree/develop/tests/smoke), [a subset of cf-acceptance-tests](https://github.com/cloudfoundry/cf-for-k8s/blob/develop/ci/tasks/run-cats/task.sh), and [cf-k8s-networking-acceptance-tests](https://github.com/cloudfoundry/cf-k8s-networking/tree/develop/test/acceptance). 13 | -------------------------------------------------------------------------------- /ci/dockerfiles/deploy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM cloudfoundry/cf-deployment-concourse-tasks 2 | MAINTAINER https://github.com/cloudfoundry/cf-k8s-networking 3 | 4 | ENV HELM_VERSION 2.12.3 5 | ENV KAPP_VERSION "v0.33.0" 6 | ENV KAPP_CHECKSUM "2a3328c9eca9f43fe639afb524501d9d119feeea52c8a913639cfb96e38e93d1" 7 | ENV YTT_VERSION "v0.30.0" 8 | ENV YTT_CHECKSUM "456e58c70aef5cd4946d29ed106c2b2acbb4d0d5e99129e526ecb4a859a36145" 9 | ENV KBLD_VERSION "v0.25.0" 10 | ENV KBLD_CHECKSUM "e998d54944d3b0915d4c78c3fa604163c89b9951ac1dcbdc380075cfd5aead29" 11 | 12 | RUN \ 13 | apt update && \ 14 | apt -y install --fix-missing \ 15 | docker \ 16 | htop \ 17 | libpython-dev \ 18 | lsof \ 19 | psmisc \ 20 | python \ 21 | strace \ 22 | wget \ 23 | libfontconfig1-dev libfreetype6 libssl-dev libpng-dev libjpeg-dev \ 24 | jq \ 25 | ruby-all-dev \ 26 | vim \ 27 | zip \ 28 | python-pip \ 29 | && \ 30 | apt clean 31 | 32 | 33 | # Temp fix to get around apt-key issues with canonical 34 | RUN chmod 1777 /tmp 35 | 36 | # gcloud SDK 37 | RUN export CLOUD_SDK_REPO="cloud-sdk-$(lsb_release -c -s)" && \ 38 | echo "deb http://packages.cloud.google.com/apt $CLOUD_SDK_REPO main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && \ 39 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ 40 | apt-get update -y && apt-get install google-cloud-sdk -y 41 | 42 | # install kubectl 43 | RUN apt-get install kubectl 44 | 45 | # Get Helm 46 | RUN wget https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz -P /tmp && \ 47 | tar -xvf /tmp/helm-v${HELM_VERSION}-linux-amd64.tar.gz -C /tmp && \ 48 | mv /tmp/linux-amd64/helm /usr/local/bin/helm 49 | 50 | # install k14s utils (ytt, kapp, kbld) 51 | RUN wget -O- https://github.com/k14s/ytt/releases/download/${YTT_VERSION}/ytt-linux-amd64 > /tmp/ytt && \ 52 | echo "${YTT_CHECKSUM} /tmp/ytt" | shasum -c - && \ 53 | mv /tmp/ytt /usr/local/bin/ytt && \ 54 | chmod +x /usr/local/bin/ytt 55 | 56 | RUN wget -O- https://github.com/k14s/kapp/releases/download/${KAPP_VERSION}/kapp-linux-amd64 > /tmp/kapp && \ 57 | echo "${KAPP_CHECKSUM} /tmp/kapp" | shasum -c - && \ 58 | mv /tmp/kapp /usr/local/bin/kapp && \ 59 | chmod +x /usr/local/bin/kapp 60 | 61 | RUN wget -O- https://github.com/k14s/kbld/releases/download/${KBLD_VERSION}/kbld-linux-amd64 > /tmp/kbld && \ 62 | echo "${KBLD_CHECKSUM} /tmp/kbld" | shasum -c - && \ 63 | mv /tmp/kbld /usr/local/bin/kbld && \ 64 | chmod +x /usr/local/bin/kbld 65 | 66 | # Clean up 67 | RUN apt-get remove -y python-dev apt-transport-https && \ 68 | apt-get -y clean && apt-get -y autoremove --purge && rm -rf /etc/apt/ && \ 69 | rm -rf /tmp/* && \ 70 | find /var/lib/apt/lists -type f | xargs rm -f && \ 71 | find /var/cache/debconf -type f -name '*-old' | xargs rm -f && \ 72 | find /var/log -type f -user root | xargs rm -rf && \ 73 | for file in $(find /var/log -type f -user syslog); do echo > $file; done 74 | -------------------------------------------------------------------------------- /ci/dockerfiles/test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM cloudfoundry/cflinuxfs3 2 | 3 | ENV GO_VERSION 1.13.1 4 | 5 | RUN \ 6 | apt update && \ 7 | apt -y install --fix-missing \ 8 | build-essential \ 9 | curl \ 10 | docker \ 11 | git \ 12 | netcat-openbsd \ 13 | htop \ 14 | libpython-dev \ 15 | lsof \ 16 | psmisc \ 17 | python \ 18 | strace \ 19 | libreadline6-dev\ 20 | lsb-core \ 21 | wget \ 22 | unzip \ 23 | libfontconfig1-dev libfreetype6 libssl-dev libpng-dev libjpeg-dev \ 24 | jq \ 25 | libssl-dev \ 26 | libssl1.0.0 \ 27 | libxml2-dev \ 28 | libxslt-dev \ 29 | libyaml-dev \ 30 | openssl \ 31 | vim \ 32 | zip \ 33 | python-pip \ 34 | zlib1g-dev \ 35 | && \ 36 | apt clean 37 | 38 | # Temp fix to get around apt-key issues with canonical 39 | RUN chmod 1777 /tmp 40 | 41 | # https://kubernetes.io/docs/tasks/tools/install-kubectl/ 42 | RUN echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | tee -a /etc/apt/sources.list.d/kubernetes.list && \ 43 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ 44 | apt-get update -y && apt-get install kubectl -y 45 | 46 | # Install go 47 | RUN curl "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xvz -C /usr/local/ 48 | 49 | # Add golang environment variables 50 | ENV HOME /root 51 | ENV GOPATH /root/go 52 | ENV PATH "${PATH}:/usr/local/go/bin:/root/bin:${GOPATH}/bin" 53 | 54 | # Install ginkgo 55 | RUN go get github.com/onsi/ginkgo/ginkgo 56 | 57 | # Install controller-gen for integration tests 58 | RUN mkdir -p /tmp/controller-gen && \ 59 | cd /tmp/controller-gen && \ 60 | go mod init tmp && \ 61 | go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.4 && \ 62 | rm -rf /tmp/controller-gen 63 | 64 | # Install docker 65 | RUN sudo curl -sSL https://get.docker.com/ | sh 66 | 67 | # Install k14s 68 | RUN curl -L https://k14s.io/install.sh | bash 69 | -------------------------------------------------------------------------------- /ci/dockerfiles/upgrade/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM cloudfoundry/cflinuxfs3 2 | 3 | COPY setup.sh . 4 | RUN ./setup.sh 5 | COPY roll.sh . 6 | ENTRYPOINT ./roll.sh 7 | -------------------------------------------------------------------------------- /ci/dockerfiles/upgrade/roll.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while true; do 4 | sleep 1 5 | deposets=$(kubectl get daemonsets,pods,deployments -n istio-system -l "cloudfoundry.org/istio_version notin ($ISTIO_VERSION)" | wc -l) 6 | if [[ $deposets == 0 ]]; then 7 | break 8 | fi 9 | echo "Didn't quite find it this time... will try again in a sec" 10 | done 11 | 12 | kubectl -n cf-workloads rollout restart statefulset 13 | kubectl -n cf-workloads delete jobs -l "cloudfoundry.org/istio_version notin ($ISTIO_VERSION)" 14 | -------------------------------------------------------------------------------- /ci/dockerfiles/upgrade/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install kubectl 4 | curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl 5 | chmod +x ./kubectl 6 | mv ./kubectl /usr/local/bin/kubectl 7 | -------------------------------------------------------------------------------- /ci/github-actions/publish-docker-image: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${GITHUB_SHA:?}" 7 | : "${GITHUB_REF:?}" 8 | : "${GCR_SERVICE_ACCOUNT_JSON:?}" 9 | : "${IMAGE_REPO:?}" 10 | : "${DOCKERFILE_PATH:?}" 11 | : "${WORKING_DIR:?}" 12 | 13 | # To extract the branch name from refs/heads/feature-branch-1 14 | # https://stackoverflow.com/a/58034787 15 | branch_name=${GITHUB_REF##*/} 16 | 17 | if [ ${branch_name} == "develop" ]; then 18 | echo "Pushes to develop are handled by Concourse" 19 | exit 0 20 | fi 21 | 22 | # docker login 23 | echo "${GCR_SERVICE_ACCOUNT_JSON}" | docker login -u _json_key --password-stdin https://gcr.io 24 | 25 | # build image 26 | img=$(docker build -q -f "${DOCKERFILE_PATH}" "${WORKING_DIR}") 27 | 28 | docker tag ${img} ${IMAGE_REPO}:${GITHUB_SHA} 29 | docker push ${IMAGE_REPO}:${GITHUB_SHA} 30 | 31 | echo "Tagging and pushing image for branch ${branch_name}" 32 | docker tag ${img} ${IMAGE_REPO}:${branch_name} 33 | docker push ${IMAGE_REPO}:${branch_name} 34 | 35 | -------------------------------------------------------------------------------- /ci/reconfigure: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e -u 4 | export PIPELINE=${1:-cf-k8s-pipeline} 5 | export TARGET=cf-k8s 6 | 7 | echo "setting pipeline \"${PIPELINE}\"" 8 | 9 | fly -t $TARGET sync 10 | fly -t $TARGET status || fly -t $TARGET login -b 11 | 12 | ciDir="$(cd $(dirname $0); pwd)" 13 | fly -t $TARGET \ 14 | set-pipeline -p $PIPELINE \ 15 | -c "$ciDir/$PIPELINE.yml" 16 | -------------------------------------------------------------------------------- /ci/scaling.yml: -------------------------------------------------------------------------------- 1 | --- 2 | resource_types: [] 3 | resources: 4 | - name: cf-for-k8s 5 | type: git 6 | icon: github 7 | source: 8 | uri: git@github.com:cloudfoundry/cf-for-k8s 9 | private_key: ((github_private_key.private_key)) 10 | branch: scale-istio-1.6.9 11 | ignore_paths: 12 | - ci/** 13 | 14 | - name: cf-k8s-networking 15 | type: git 16 | icon: github 17 | source: 18 | uri: git@github.com:cloudfoundry/cf-k8s-networking.git 19 | private_key: ((github_private_key.private_key)) 20 | branch: develop 21 | ignore_paths: 22 | - config/values/images.yml # Do not want resource to trigger on image digest updates 23 | 24 | - name: cf-k8s-networking-ci 25 | type: git 26 | icon: github 27 | source: 28 | uri: git@github.com:cloudfoundry/cf-k8s-networking.git 29 | private_key: ((github_private_key.private_key)) 30 | branch: develop 31 | paths: 32 | - ci 33 | - config 34 | groups: 35 | - name: scale-testing 36 | jobs: 37 | - scale-test 38 | - manually-delete-gke-cluster 39 | 40 | # Weekly Scale Tests 41 | jobs: 42 | - name: scale-test 43 | serial: true 44 | serial_groups: [scale-test] 45 | plan: 46 | - in_parallel: 47 | - get: cf-for-k8s 48 | - get: cf-k8s-networking-ci 49 | - get: cf-k8s-networking 50 | - task: create-gke-cluster 51 | file: cf-k8s-networking-ci/ci/tasks/cf4k8s/create-gke-cluster.yml 52 | params: 53 | CLUSTER_NAME: &scale-testing-cluster-name ci-scale-testing-cluster 54 | GCP_SERVICE_ACCOUNT_KEY: ((shared_gcp_account_creds)) 55 | ENABLE_IP_ALIAS: true 56 | MACHINE_TYPE: "n1-standard-8" 57 | NUM_NODES: 34 58 | REGIONAL_CLUSTER: ®ional-cluster true 59 | - task: install-cf 60 | file: cf-k8s-networking-ci/ci/tasks/cf4k8s/install-cf-for-k8s.yml 61 | params: 62 | CF_DOMAIN: &scale-testing-domain "ci-scale-testing.routing.lol" 63 | CLUSTER_NAME: *scale-testing-cluster-name 64 | GCP_SERVICE_ACCOUNT_KEY: ((shared_gcp_account_creds)) 65 | KPACK_GCR_ACCOUNT_KEY: ((gcp_gcr_service_account_key)) 66 | KAPP_TIMEOUT: "45m" 67 | REGIONAL_CLUSTER: *regional-cluster 68 | - task: pave-cf-for-scale-tests 69 | file: cf-k8s-networking-ci/ci/tasks/scale/pave-cf-for-scale-tests.yml 70 | params: 71 | NUMBER_OF_APPS: 1000 72 | - task: run-scale-tests 73 | file: cf-k8s-networking-ci/ci/tasks/scale/run-scale-tests.yml 74 | params: 75 | NUMBER_OF_APPS: 1000 76 | 77 | - name: manually-delete-gke-cluster 78 | serial_groups: [scale-test] 79 | plan: 80 | - in_parallel: 81 | - get: cf-k8s-networking-ci 82 | - task: destroy-cluster 83 | file: cf-k8s-networking-ci/ci/tasks/cf4k8s/destroy-cluster.yml 84 | params: 85 | CF_DOMAIN: *scale-testing-domain 86 | CLUSTER_NAME: *scale-testing-cluster-name 87 | GCP_SERVICE_ACCOUNT_KEY: ((shared_gcp_account_creds)) 88 | -------------------------------------------------------------------------------- /ci/tasks/build-image/task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | trap "pkill dockerd" EXIT 5 | 6 | start-docker & 7 | echo 'until docker info; do sleep 5; done' >/usr/local/bin/wait_for_docker 8 | chmod +x /usr/local/bin/wait_for_docker 9 | timeout 300 wait_for_docker 10 | 11 | <<<"$DOCKERHUB_PASSWORD" docker login --username "$DOCKERHUB_USERNAME" --password-stdin 12 | 13 | cf-k8s-networking/build/build.sh 14 | 15 | image_ref="$(yq -r '.overrides[] | select(.image | test("/routecontroller")).newImage' cf-k8s-networking/build/kbld.lock.yml)" 16 | sed -i'' -e "s| routecontroller:.*| routecontroller: \"$image_ref\"|" cf-k8s-networking/config/values/images.yml 17 | 18 | pushd cf-k8s-networking > /dev/null 19 | git config user.name "${GIT_COMMIT_USERNAME}" 20 | git config user.email "${GIT_COMMIT_EMAIL}" 21 | git add config/values/images.yml 22 | 23 | # dont make a commit if there aren't new images 24 | if ! git diff --cached --exit-code; then 25 | echo "committing!" 26 | git commit -m "images.yml updated by CI" 27 | else 28 | echo "no changes to images, not bothering with a commit" 29 | fi 30 | popd > /dev/null 31 | 32 | cp -R cf-k8s-networking/. updated-cf-k8s-networking/ 33 | -------------------------------------------------------------------------------- /ci/tasks/build-image/task.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: cloudfoundry/cf-for-k8s-dind 7 | 8 | params: 9 | DOCKERHUB_USERNAME: ((dockerhub.username)) 10 | DOCKERHUB_PASSWORD: ((dockerhub.password)) 11 | GIT_COMMIT_EMAIL: cf-release-integration@pivotal.io 12 | GIT_COMMIT_USERNAME: "relint-ci" 13 | 14 | inputs: 15 | - name: cf-k8s-networking 16 | 17 | outputs: 18 | - name: updated-cf-k8s-networking 19 | 20 | run: 21 | path: cf-k8s-networking/ci/tasks/build-image/task.sh 22 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/generate-github-release.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | function write_release_name() { 6 | mkdir -p release-text 7 | version=$(cat version/version) 8 | 9 | echo "v${version}" > release-text/name 10 | } 11 | 12 | function write_release_body() { 13 | tmp_dir=$(mktemp -d) 14 | 15 | # Generate git diff 16 | pushd cf-k8s-networking > /dev/null 17 | from_ref=$(git tag --sort=version:refname | egrep "^v[0-9]+\.[0-9]+\.[0-9]+$" | tail -2 | head -1) 18 | to_ref=$(git tag --sort=version:refname | egrep "^v[0-9]+\.[0-9]+\.[0-9]+$" | tail -1) 19 | 20 | # During ship-what job we want to compare version since last tag. Since 21 | # the new version tag hasn't been committed we can key off that to 22 | # understand if we are in ship-what 23 | if [[ "${to_ref}" != "v${version}" ]]; then 24 | from_ref=$(git tag --sort=version:refname | egrep "^v[0-9]+\.[0-9]+\.[0-9]+$" | tail -1) 25 | to_ref="HEAD" 26 | fi 27 | 28 | diff_string="${from_ref}...${to_ref}" 29 | echo "comparing ${diff_string}:" 30 | git log "${diff_string}" | { egrep -o '\[\#([0-9]+)' || true; } | cut -d# -f2 | sort | uniq > "${tmp_dir}/stories.raw" 31 | popd > /dev/null 32 | 33 | # Iterate through the found story links 34 | while read -r story_id 35 | do 36 | curl -s "https://www.pivotaltracker.com/services/v5/stories/${story_id}" 37 | done < "${tmp_dir}/stories.raw" > "${tmp_dir}/stories.json" 38 | 39 | cat "${tmp_dir}/stories.json" | \ 40 | jq -r 'select(.current_state == "accepted") | "- ["+.name+"]("+.url+")"' \ 41 | > release-text/body.md 42 | } 43 | 44 | function main() { 45 | write_release_name 46 | write_release_body 47 | 48 | cat release-text/body.md 49 | } 50 | 51 | main 52 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/generate-github-release.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: cloudfoundry/cf-for-k8s-ci 6 | 7 | inputs: 8 | - name: cf-k8s-networking 9 | - name: version 10 | - name: cf-k8s-networking-ci 11 | 12 | outputs: 13 | - name: release-text 14 | 15 | run: 16 | path: cf-k8s-networking-ci/ci/tasks/cf4k8s/generate-github-release.sh 17 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/generate-integration-config.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | function write_cats_config() { 6 | admin_password=$(cat env-metadata/cf-admin-password.txt) 7 | dns_domain=$(cat env-metadata/dns-domain.txt) 8 | mkdir -p integration-config 9 | cat <<- EOF > "integration-config/config.json" 10 | { 11 | "api": "api.${dns_domain}", 12 | "admin_user": "admin", 13 | "admin_password": "${admin_password}", 14 | "apps_domain": "apps.${dns_domain}" 15 | } 16 | EOF 17 | } 18 | 19 | function main() { 20 | write_cats_config 21 | } 22 | 23 | main 24 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/generate-integration-config.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: cloudfoundry/cf-for-k8s-ci 6 | 7 | inputs: 8 | - name: cf-k8s-networking-ci 9 | - name: env-metadata 10 | 11 | outputs: 12 | - name: integration-config 13 | 14 | run: 15 | path: cf-k8s-networking-ci/ci/tasks/cf4k8s/generate-integration-config.sh 16 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/push-app.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${APP_NAME:?}" 7 | : "${ORG_NAME:?}" 8 | : "${SPACE_NAME:?}" 9 | : "${INSTANCES:?}" 10 | 11 | ROOT="$(cd "$(dirname "${0}")/../../../.." && pwd)" 12 | 13 | function target_cf() { 14 | local cf_domain=$(cat "${ROOT}/cf-install-values/cf-install-values.yml" | \ 15 | grep system_domain | awk '{print $2}' | tr -d '"') 16 | 17 | cf_with_retry api --skip-ssl-validation "https://api.${cf_domain}" 18 | local password=$(cat "${ROOT}/cf-install-values/cf-install-values.yml" | \ 19 | grep cf_admin_password | awk '{print $2}') 20 | cf_with_retry auth "admin" "${password}" 21 | } 22 | 23 | function create_org_and_space() { 24 | cf_with_retry create-org "${ORG_NAME}" 25 | cf_with_retry create-space -o "${ORG_NAME}" "${SPACE_NAME}" 26 | } 27 | 28 | function deploy_app() { 29 | local name="${1}" 30 | cf_with_retry push "${name}" -o "cfrouting/httpbin" -i "${INSTANCES}" 31 | } 32 | 33 | function cf_with_retry() { 34 | cf_command=$* 35 | 36 | set +euo pipefail 37 | 38 | for i in {1..3} 39 | do 40 | echo "Running cf ${cf_command}..." 41 | cf $cf_command && set -euo pipefail && return 42 | sleep 10 43 | done 44 | 45 | echo "cf_with_retry command has failed 3 times" 46 | exit 47 | } 48 | 49 | function main() { 50 | target_cf 51 | create_org_and_space 52 | cf_with_retry target -o "${ORG_NAME}" -s "${SPACE_NAME}" 53 | cf_with_retry enable-feature-flag diego_docker 54 | deploy_app "${APP_NAME}" 55 | } 56 | 57 | main 58 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/push-app.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: gcr.io/cf-routing/cf-k8s-networking/k8s-deploy 6 | 7 | inputs: 8 | - name: cf-k8s-networking-ci 9 | - name: cf-install-values 10 | 11 | run: 12 | path: cf-k8s-networking-ci/ci/tasks/cf4k8s/push-app.sh 13 | 14 | params: 15 | APP_NAME: 16 | ORG_NAME: 17 | SPACE_NAME: 18 | INSTANCES: 1 19 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/run-upgrade-uptime-test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | source cf-k8s-networking-ci/ci/tasks/helpers.sh 6 | 7 | function run_upgrade_uptime_tests() { 8 | pushd "cf-k8s-networking/test/uptime" 9 | ginkgo -v -r -p . 10 | popd 11 | } 12 | 13 | function main() { 14 | target_k8s_cluster #from helpers.sh 15 | INSTALL_VALUES_FILEPATH=cf-install-values/cf-install-values.yml target_cf_with_install_values #from helpers.sh 16 | run_upgrade_uptime_tests 17 | } 18 | 19 | main 20 | -------------------------------------------------------------------------------- /ci/tasks/cf4k8s/run-upgrade-uptime-test.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: gcr.io/cf-routing/cf-k8s-networking/k8s-deploy 6 | 7 | inputs: 8 | - name: cf-for-k8s 9 | - name: cf-k8s-networking-ci 10 | - name: cf-k8s-networking 11 | - name: cf-install-values 12 | 13 | run: 14 | path: cf-k8s-networking-ci/ci/tasks/cf4k8s/run-upgrade-uptime-test.sh 15 | 16 | params: 17 | # Required for performing the test 18 | CF_APP_DOMAIN: 19 | DATA_PLANE_APP_NAME: 20 | CONTROL_PLANE_APP_NAME: 21 | 22 | UPGRADE_DISCOVERY_TIMEOUT: 1m 23 | DATA_PLANE_SLO_PERCENTAGE: 0.95 24 | DATA_PLANE_SLO_MAX_REQUEST_LATENCY: 100ms 25 | CONTROL_PLANE_SLO_PERCENTAGE: 0.95 26 | CONTROL_PLANE_SLO_MAX_ROUTE_PROPAGATION_TIME: 15s 27 | CONTROL_PLANE_SLO_DATA_PLANE_AVAILABILITY_PERCENTAGE: 0.99 28 | CONTROL_PLANE_SLO_DATA_PLANE_MAX_REQUEST_LATENCY: 200ms 29 | CONTROL_PLANE_SLO_SAMPLE_CAPTURE_TIME: 10s 30 | 31 | # required for targeting the test cluster and CF 32 | GCP_SERVICE_ACCOUNT_KEY: 33 | CLUSTER_NAME: 34 | GCP_PROJECT: 35 | GCP_REGION: 36 | 37 | TARGET_ORG: 38 | TARGET_SPACE: 39 | -------------------------------------------------------------------------------- /ci/tasks/docker/add-updated-digest.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${COMPONENT_NAME:?}" 7 | 8 | pushd image-resource > /dev/null 9 | digest="$(cat digest)" 10 | popd 11 | 12 | pushd cf-k8s-networking 13 | sed -i "s/cloudfoundry\/$COMPONENT_NAME@.*/cloudfoundry\/$COMPONENT_NAME@$digest/" config/values/images.yml 14 | 15 | git config user.name "${GIT_COMMIT_USERNAME}" 16 | git config user.email "${GIT_COMMIT_EMAIL}" 17 | 18 | if [[ -n $(git status --porcelain) ]]; then 19 | echo "changes detected, will commit..." 20 | git add config/values/images.yml 21 | git commit -m "Update ${COMPONENT_NAME} image digest to ${digest}" 22 | 23 | git log -1 --color | cat 24 | else 25 | echo "no changes in repo, no commit necessary" 26 | fi 27 | popd 28 | 29 | shopt -s dotglob 30 | cp -r cf-k8s-networking/* cf-k8s-networking-modified 31 | -------------------------------------------------------------------------------- /ci/tasks/docker/add-updated-digest.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: cloudfoundry/cf-for-k8s-ci 7 | 8 | inputs: 9 | - name: cf-k8s-networking 10 | - name: cf-for-k8s-ci 11 | - name: image-resource 12 | 13 | outputs: 14 | - name: cf-k8s-networking-modified 15 | 16 | run: 17 | path: cf-k8s-networking-ci/ci/tasks/docker/add-updated-digest.sh 18 | 19 | params: 20 | COMPONENT_NAME: # e.g. routecontroller 21 | GIT_COMMIT_USERNAME: "relint-ci" 22 | GIT_COMMIT_EMAIL: "cf-release-integration@pivotal.io" 23 | -------------------------------------------------------------------------------- /ci/tasks/docker/annotate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | if [[ -d input-image-tar ]]; then 6 | deplab --image-tar input-image-tar/image.tar \ 7 | --git repository \ 8 | --output-tar output-image/image.tar 9 | elif [[ -d input-image-name ]]; then 10 | deplab --image "$(cat input-image-name/name.txt)" \ 11 | --git repository \ 12 | --output-tar output-image/image.tar 13 | else 14 | echo "When using this task, you must specify EITHER input-image-tar OR input-image-name" 15 | fi 16 | 17 | -------------------------------------------------------------------------------- /ci/tasks/docker/annotate.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: registry-image 5 | source: 6 | # Deplab is a tool to annotate Docker images with additional metadata, such 7 | # as SHA of the source code used to build an image, the list of used packages 8 | # in the image, etc. 9 | repository: dev.registry.pivotal.io/navcon/deplab-task 10 | tag: dev 11 | username: pivotal-cf-networking@pivotal.io 12 | password: ((pivotal_cf_networking_pivnet_password)) 13 | 14 | inputs: 15 | # When using this task, you must specify EITHER input-image-tar OR input-image-name 16 | # input-image-tar should contain an image.tar file 17 | - name: input-image-tar 18 | optional: true 19 | # input-image-name should contain a name.txt file with the name of the image 20 | - name: input-image-name 21 | optional: true 22 | # repository is the git respository containing the source code for the image 23 | - name: repository 24 | - name: cf-k8s-networking-ci 25 | 26 | outputs: 27 | # output-image will contain a new annotated image under image.tar 28 | - name: output-image 29 | 30 | run: 31 | path: cf-k8s-networking-ci/ci/tasks/docker/annotate.sh 32 | -------------------------------------------------------------------------------- /ci/tasks/docker/update-image-digest-in-cf-for-k8s.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | # ENV 5 | : "${COMPONENT_NAME:?}" 6 | : "${TARGET_FILE:?}" 7 | 8 | pushd image-resource > /dev/null 9 | digest="$(cat digest)" 10 | popd 11 | 12 | pushd cf-for-k8s-develop 13 | sed -r -i "s|(cloudfoundry/${COMPONENT_NAME})@sha256:[a-f0-9]+|\1@${digest}|" "${TARGET_FILE}" 14 | ./build/istio/build.sh 15 | 16 | git config user.name "${GIT_COMMIT_USERNAME}" 17 | git config user.email "${GIT_COMMIT_EMAIL}" 18 | 19 | if [[ -n $(git status --porcelain) ]]; then 20 | echo "changes detected, will commit..." 21 | git add "${TARGET_FILE}" 22 | git add "config/istio/istio-generated" 23 | git commit -m "Update ${COMPONENT_NAME} image digest to ${digest}" 24 | 25 | git log -1 --color | cat 26 | else 27 | echo "no changes in repo, no commit necessary" 28 | fi 29 | popd 30 | 31 | # include dot files in * globing 32 | shopt -s dotglob 33 | cp -r cf-for-k8s-develop/* cf-for-k8s-modified/ 34 | -------------------------------------------------------------------------------- /ci/tasks/docker/update-image-digest-in-cf-for-k8s.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: cloudfoundry/cf-for-k8s-ci 7 | 8 | inputs: 9 | - name: cf-k8s-networking-ci 10 | - name: cf-for-k8s-develop 11 | - name: image-resource 12 | 13 | outputs: 14 | - name: cf-for-k8s-modified 15 | 16 | run: 17 | path: cf-k8s-networking-ci/ci/tasks/docker/update-image-digest-in-cf-for-k8s.sh 18 | 19 | params: 20 | TARGET_FILE: # e.g. config/values.yml 21 | COMPONENT_NAME: # e.g. routecontroller 22 | GIT_COMMIT_USERNAME: "relint-ci" 23 | GIT_COMMIT_EMAIL: "cf-release-integration@pivotal.io" 24 | -------------------------------------------------------------------------------- /ci/tasks/helpers.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # NOTE: Because these are designed to be used in concourse tasks, they use 4 | # exit 1. In my experience, this means that running them locally will cause 5 | # your terminal, tmux session, or ssh session to exit. 6 | 7 | function target_cf_with_install_values() { 8 | if [ -z "${INSTALL_VALUES_FILEPATH}" ]; then 9 | echo "INSTALL_VALUES_FILEPATH is empty. Defaulting to use \"cf-install-values/cf-install-values.yml\" file" 10 | INSTALL_VALUES_FILEPATH="cf-install-values/cf-install-values.yml" 11 | fi 12 | 13 | if [ -z "${TARGET_ORG}" ]; then 14 | echo "TARGET_ORG is empty. Please supply the org to target" 15 | exit 1 16 | fi 17 | 18 | if [ -z "${TARGET_SPACE}" ]; then 19 | echo "TARGET_SPACE is empty. Please supply the space to target" 20 | exit 1 21 | fi 22 | 23 | local cf_domain=$(cat "${INSTALL_VALUES_FILEPATH}" | \ 24 | grep system_domain | awk '{print $2}' | tr -d '"') 25 | 26 | cf api --skip-ssl-validation "https://api.${cf_domain}" 27 | local password=$(cat "${INSTALL_VALUES_FILEPATH}" | \ 28 | grep cf_admin_password | awk '{print $2}') 29 | cf auth "admin" "${password}" 30 | 31 | cf target -o "${TARGET_ORG}" -s "${TARGET_SPACE}" 32 | } 33 | 34 | function target_k8s_cluster() { 35 | if [ -z "${CLUSTER_NAME}" ]; then 36 | echo "CLUSTER_NAME is empty. Please supply the name of the cluster you wish to target." 37 | exit 1 38 | fi 39 | 40 | if [ -z "${GCP_SERVICE_ACCOUNT_KEY}" ]; then 41 | echo "GCP_SERVICE_ACCOUNT_KEY is empty. Please supply the GCP Service Account Key to access \"${CLUSTER_NAME}\". Note, this is the actual key, not a filepath to the key." 42 | exit 1 43 | fi 44 | 45 | if [ -z "${GCP_PROJECT}" ]; then 46 | echo "GCP_PROJECT is empty. Please supply the GCP project that ${CLUSTER_NAME} is part of." 47 | exit 1 48 | fi 49 | 50 | if [ -z "${GCP_REGION}" ]; then 51 | echo "GCP_REGION is empty. Please supply the GCP region that ${CLUSTER_NAME} is part of." 52 | exit 1 53 | fi 54 | 55 | gcloud auth activate-service-account --key-file=<(echo "${GCP_SERVICE_ACCOUNT_KEY}") --project="${GCP_PROJECT}" 1>/dev/null 2>&1 56 | gcloud container clusters get-credentials ${CLUSTER_NAME} --region="${GCP_REGION}" 1>/dev/null 2>&1 57 | } 58 | 59 | function initialize_gke_env_vars() { 60 | if [ -f "gke-env-metadata/cluster_name" ]; then 61 | export CLUSTER_NAME="$(cat gke-env-metadata/cluster_name)" 62 | fi 63 | if [ -f "gke-env-metadata/cf_domain" ]; then 64 | export CF_DOMAIN="$(cat gke-env-metadata/cf_domain)" 65 | fi 66 | # TODO initialize other env vars if necessary 67 | } 68 | -------------------------------------------------------------------------------- /ci/tasks/istio/deploy-istio.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${KUBECONFIG_CONTEXT:?}" 7 | : "${SHARED_DNS_ZONE_NAME:?}" 8 | : "${DNS_DOMAIN:?}" 9 | : "${GCP_DNS_SERVICE_ACCOUNT_KEY:?}" 10 | : "${GCP_PROJECT_ID:?}" 11 | 12 | function install_istio() { 13 | workspace=${PWD} 14 | export KUBECONFIG="${PWD}/kubeconfig/config" 15 | generate_script="${PWD}/cf-k8s-networking/config/istio/generate.sh" 16 | 17 | kubectl config use-context ${KUBECONFIG_CONTEXT} 18 | 19 | # Install Istio with its dependencies (--dangerous-allow-all-symlink-destinations is required for process substitution on Linux) 20 | # fixed in https://github.com/k14s/ytt/commit/7e1876698b4ea633ac44368168b43f51d55f5645 21 | # removed when ytt is upgraded 22 | ytt --dangerous-allow-all-symlink-destinations \ 23 | -f istio.yaml=<("${generate_script}" --set values.grafana.enabled=true) \ 24 | | kubectl apply -f - 25 | 26 | } 27 | 28 | function configure_dns() { 29 | tmp_dir="$(mktemp -d /tmp/deploy-istio.XXXXXXXX)" 30 | service_key_path="${tmp_dir}/gcp.json" 31 | 32 | echo "${GCP_DNS_SERVICE_ACCOUNT_KEY}" > "${service_key_path}" 33 | gcloud auth activate-service-account --key-file="${service_key_path}" 34 | gcloud config set project "${GCP_PROJECT_ID}" 35 | 36 | echo "Discovering Istio Gateway LB IP" 37 | external_static_ip="" 38 | while [ -z $external_static_ip ]; do 39 | sleep 10 40 | external_static_ip=$(kubectl get services/istio-ingressgateway -n istio-system --output="jsonpath={.status.loadBalancer.ingress[0].ip}") 41 | done 42 | 43 | echo "Configuring DNS for external IP: ${external_static_ip}" 44 | gcloud dns record-sets transaction start --zone="${SHARED_DNS_ZONE_NAME}" 45 | gcp_records_json="$( gcloud dns record-sets list --zone "${SHARED_DNS_ZONE_NAME}" --name "*.${DNS_DOMAIN}" --format=json )" 46 | record_count="$( echo "${gcp_records_json}" | jq 'length' )" 47 | if [ "${record_count}" != "0" ]; then 48 | existing_record_ip="$( echo "${gcp_records_json}" | jq -r '.[0].rrdatas | join(" ")' )" 49 | gcloud dns record-sets transaction remove --name "*.${DNS_DOMAIN}" --type=A --zone="${SHARED_DNS_ZONE_NAME}" --ttl=300 "${existing_record_ip}" --verbosity=debug 50 | fi 51 | gcloud dns record-sets transaction add --name "*.${DNS_DOMAIN}" --type=A --zone="${SHARED_DNS_ZONE_NAME}" --ttl=300 "${external_static_ip}" --verbosity=debug 52 | 53 | echo "Contents of transaction.yaml:" 54 | cat transaction.yaml 55 | gcloud dns record-sets transaction execute --zone="${SHARED_DNS_ZONE_NAME}" --verbosity=debug 56 | } 57 | 58 | function main() { 59 | install_istio 60 | configure_dns 61 | } 62 | 63 | main 64 | -------------------------------------------------------------------------------- /ci/tasks/istio/deploy-istio.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: gcr.io/cf-routing/cf-k8s-networking/k8s-deploy 7 | 8 | inputs: 9 | - name: cf-k8s-networking 10 | - name: cf-k8s-networking-ci 11 | - name: kubeconfig 12 | 13 | run: 14 | path: cf-k8s-networking-ci/ci/tasks/istio/deploy-istio.sh 15 | 16 | params: 17 | KUBECONFIG_CONTEXT: 18 | # - kubectl k8s context to use from kubeconfig 19 | SHARED_DNS_ZONE_NAME: 20 | DNS_DOMAIN: 21 | GCP_DNS_SERVICE_ACCOUNT_KEY: 22 | GCP_PROJECT_ID: 23 | -------------------------------------------------------------------------------- /ci/tasks/istio/enable-sidecar-injection.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${KUBECONFIG_CONTEXT:?}" 7 | 8 | function enabled_sidecar_injection() { 9 | workspace=${PWD} 10 | export KUBECONFIG="${PWD}/kubeconfig/config" 11 | 12 | # Enable Istio Sidecar Injection for app workloads 13 | kubectl label namespace cf-workloads istio-injection=enabled --overwrite=true 14 | kubectl label namespace cf-system istio-injection=enabled --overwrite=true 15 | } 16 | 17 | function main() { 18 | enabled_sidecar_injection 19 | } 20 | 21 | main 22 | -------------------------------------------------------------------------------- /ci/tasks/istio/enable-sidecar-injection.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: gcr.io/cf-routing/cf-k8s-networking/k8s-deploy 7 | 8 | inputs: 9 | - name: cf-k8s-networking 10 | - name: cf-k8s-networking-ci 11 | - name: kubeconfig 12 | 13 | run: 14 | path: cf-k8s-networking-ci/ci/tasks/istio/enable-sidecar-injection.sh 15 | 16 | params: 17 | KUBECONFIG_CONTEXT: 18 | # - kubectl k8s context to use from kubeconfig 19 | -------------------------------------------------------------------------------- /ci/tasks/istio/install-grafana-dashboard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${KUBECONFIG_CONTEXT:?}" 7 | 8 | function install_grafana_dashboard() { 9 | export KUBECONFIG="${PWD}/kubeconfig/config" 10 | kubectl config use-context ${KUBECONFIG_CONTEXT} 11 | 12 | dashboard_file="${PWD}/cf-k8s-networking/doc/metrics/dashboard.json" 13 | 14 | jq -n '{ "dashboard": input }' $dashboard_file | jq '.dashboard.id = null' | jq '.dashboard.uid = "indicators"' -c > "/tmp/dashboard.json" 15 | 16 | kubectl proxy --port=8080 & 17 | proxy_pid=$! 18 | 19 | # Allow proxy to come up 20 | sleep 5 21 | 22 | # Delete old dashboard 23 | curl -H 'Accept: application/json' -XDELETE http://localhost:8080/api/v1/namespaces/istio-system/services/grafana:http/proxy/api/dashboards/uid/indicators 24 | # Create dashboard 25 | curl -H 'Content-Type: application/json' -H 'Accept: application/json' -XPOST http://localhost:8080/api/v1/namespaces/istio-system/services/grafana:http/proxy/api/dashboards/db -d "@/tmp/dashboard.json" 26 | 27 | kill ${proxy_pid} 28 | } 29 | 30 | 31 | function main() { 32 | install_grafana_dashboard 33 | } 34 | 35 | main 36 | -------------------------------------------------------------------------------- /ci/tasks/istio/install-grafana-dashboard.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: gcr.io/cf-routing/cf-k8s-networking/k8s-deploy 7 | 8 | inputs: 9 | - name: cf-k8s-networking 10 | - name: cf-k8s-networking-ci 11 | - name: kubeconfig 12 | 13 | run: 14 | path: cf-k8s-networking-ci/ci/tasks/istio/install-grafana-dashboard.sh 15 | 16 | params: 17 | KUBECONFIG_CONTEXT: 18 | # - kubectl k8s context to use from kubeconfig 19 | -------------------------------------------------------------------------------- /ci/tasks/k8s/kubectl-apply.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${KUBECONFIG_CONTEXT:?}" 7 | : "${FILES_TO_APPLY:?}" 8 | 9 | function kubectl_apply_all() { 10 | workspace=${PWD} 11 | export KUBECONFIG="${workspace}/kubeconfig/config" 12 | 13 | pushd k8s-config-dir > /dev/null 14 | kubectl config use-context ${KUBECONFIG_CONTEXT} 15 | 16 | for file in ${FILES_TO_APPLY} 17 | do 18 | echo "Applying ${file}" 19 | kubectl apply -f $file 20 | sleep 5 # give k8s time to converge 21 | done 22 | popd 23 | } 24 | 25 | function main() { 26 | kubectl_apply_all 27 | } 28 | 29 | main 30 | -------------------------------------------------------------------------------- /ci/tasks/k8s/kubectl-apply.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: gcr.io/cf-routing/cf-k8s-networking/k8s-deploy 7 | 8 | inputs: 9 | - name: cf-k8s-networking 10 | - name: cf-k8s-networking-ci 11 | - name: k8s-config-dir 12 | - name: kubeconfig 13 | 14 | run: 15 | path: cf-k8s-networking-ci/ci/tasks/k8s/kubectl-apply.sh 16 | 17 | params: 18 | KUBECONFIG_CONTEXT: 19 | # - kubectl k8s context to use from kubeconfig 20 | FILES_TO_APPLY: 21 | # - List of k8s yaml files to be applied 22 | # - Quoted and space-separated 23 | # - Files will be applied in the order they're listed 24 | # - Paths are relative to root of the `k8s-config-dir` input 25 | 26 | -------------------------------------------------------------------------------- /ci/tasks/scale/pave-cf-for-scale-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | : "${NUMBER_OF_APPS:?}" 6 | 7 | # don't change this without also changing scale_suite_test.go 8 | # must be power of 10 (1, 100, 1000, etc) 9 | APPS_PER_SPACE=10 10 | 11 | function login() { 12 | cf api --skip-ssl-validation "https://api.$(cat env-metadata/dns-domain.txt)" 13 | CF_USERNAME=admin CF_PASSWORD=$(cat env-metadata/cf-admin-password.txt) cf auth 14 | } 15 | 16 | function prepare_cf_foundation() { 17 | cf enable-feature-flag diego_docker 18 | cf update-quota default -r 3000 -m 3000G 19 | } 20 | 21 | function deploy_apps() { 22 | org_name_prefix="scale-tests" 23 | space_name_prefix="scale-tests" 24 | 25 | # we subtract 1 here because `seq` is inclusive on both sides 26 | number_of_org_spaces="$((NUMBER_OF_APPS / APPS_PER_SPACE - 1))" 27 | number_of_apps_per_org_space="$((NUMBER_OF_APPS / number_of_org_spaces - 1))" 28 | 29 | for n in $(seq 0 ${number_of_org_spaces}) 30 | do 31 | org_name="${org_name_prefix}-${n}" 32 | space_name="${space_name_prefix}-${n}" 33 | cf create-org "${org_name}" 34 | cf create-space -o "${org_name}" "${space_name}" 35 | cf target -o "${org_name}" -s "${space_name}" 36 | 37 | for i in $(seq 0 ${number_of_apps_per_org_space}) 38 | do 39 | name="bin-$((n * APPS_PER_SPACE + i))" 40 | echo $name 41 | cf push $name -o cfrouting/proxy -m 128M -k 256M -i 2 & 42 | # let's give CF time to push an app, sometimes it uses the next org/space if 43 | # don't give enough time 44 | sleep 5 45 | done 46 | wait 47 | done 48 | } 49 | 50 | function main() { 51 | sleep 10 52 | # hopefully wait for til it works? 53 | curl -vvv --retry 300 -k "https://api.$(cat env-metadata/dns-domain.txt)" 54 | 55 | login 56 | prepare_cf_foundation 57 | deploy_apps 58 | } 59 | 60 | main 61 | -------------------------------------------------------------------------------- /ci/tasks/scale/pave-cf-for-scale-tests.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: cloudfoundry/cf-for-k8s-ci 6 | 7 | inputs: 8 | - name: env-metadata 9 | - name: cf-k8s-networking-ci 10 | 11 | run: 12 | path: cf-k8s-networking-ci/ci/tasks/scale/pave-cf-for-scale-tests.sh 13 | 14 | params: 15 | NUMBER_OF_APPS: 16 | -------------------------------------------------------------------------------- /ci/tasks/scale/run-scale-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | : "${NUMBER_OF_APPS:?}" 6 | 7 | function login_and_target() { 8 | cf api --skip-ssl-validation "https://api.$(cat env-metadata/dns-domain.txt)" 9 | CF_USERNAME=admin CF_PASSWORD=$(cat env-metadata/cf-admin-password.txt) cf auth 10 | } 11 | 12 | function run_scale_test() { 13 | export DOMAIN="apps.ci-scale-testing.routing.lol" 14 | export CLEANUP="true" #Remove when we run these tests regularly after they start to pass 15 | export NUMBER_OF_APPS=${NUMBER_OF_APPS} 16 | 17 | pushd cf-k8s-networking/test/scale 18 | ginkgo -v . 19 | popd 20 | } 21 | 22 | function main() { 23 | login_and_target 24 | run_scale_test 25 | } 26 | 27 | main 28 | -------------------------------------------------------------------------------- /ci/tasks/scale/run-scale-tests.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: gcr.io/cf-routing/cf-k8s-networking/k8s-deploy 6 | 7 | inputs: 8 | - name: env-metadata 9 | - name: cf-k8s-networking-ci 10 | - name: cf-k8s-networking 11 | 12 | run: 13 | path: cf-k8s-networking-ci/ci/tasks/scale/run-scale-tests.sh 14 | 15 | params: 16 | NUMBER_OF_APPS: 17 | -------------------------------------------------------------------------------- /ci/tasks/team/create-community-chore.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${TRACKER_TOKEN:?}" 7 | 8 | project_id=2407973 9 | 10 | # echo "Fetching Template Story..." 11 | template_story=`curl -s -X GET -H "X-TrackerToken: $TRACKER_TOKEN" "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/173205458" | sed -E "s|YYYY/MM/DD|$(date '+%Y/%m/%d')|" | jq '. + {"current_state": "started"}'` 12 | template_tasks=`curl -s -X GET -H "X-TrackerToken: $TRACKER_TOKEN" "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/173205458/tasks"` 13 | 14 | echo "Creating story..." 15 | 16 | story_id=`curl -s -X POST -H "X-TrackerToken: $TRACKER_TOKEN" -H "Content-Type: application/json" -d "$template_story" "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories" | jq .id` 17 | curl -s -X PUT -H "X-TrackerToken: $TRACKER_TOKEN" -H "Content-Type: application/json" -d '{"current_state":"unstarted"}' "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/$story_id" > /dev/null 18 | echo $template_tasks | jq -c '(.[])' | xargs -n1 -I{} curl -s -X POST -H "X-TrackerToken: $TRACKER_TOKEN" -H "Content-Type: application/json" -d '{}' "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/$story_id/tasks" > /dev/null 19 | 20 | echo "Created Story id $story_id" 21 | 22 | if [[ $story_id == "null" ]]; then 23 | exit 1 24 | fi 25 | -------------------------------------------------------------------------------- /ci/tasks/team/create-community-chore.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: cloudfoundry/cf-for-k8s-ci 6 | 7 | inputs: 8 | - name: cf-k8s-networking 9 | 10 | run: 11 | path: cf-k8s-networking/ci/tasks/team/create-community-chore.sh 12 | 13 | params: 14 | TRACKER_TOKEN: 15 | -------------------------------------------------------------------------------- /ci/tasks/team/create-istio-bump-story.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${TRACKER_TOKEN:?}" 7 | 8 | project_id=2407973 9 | 10 | ISTIO_VERSION="$(cat istio-release/tag)" 11 | 12 | # echo "Fetching Template Story..." 13 | template_story=`curl -s -X GET -H "X-TrackerToken: $TRACKER_TOKEN" "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/174831722" | sed -E "s|X\.X\.X|${ISTIO_VERSION}|" | jq '. + {"current_state": "started"}'` 14 | 15 | echo "Creating story..." 16 | 17 | story_id=`curl -s -X POST -H "X-TrackerToken: $TRACKER_TOKEN" -H "Content-Type: application/json" -d "$template_story" "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories" | jq .id` 18 | curl -s -X PUT -H "X-TrackerToken: $TRACKER_TOKEN" -H "Content-Type: application/json" -d '{"current_state":"unstarted"}' "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/$story_id" > /dev/null 19 | 20 | echo "Created Story id $story_id" 21 | 22 | if [[ $story_id == "null" ]]; then 23 | exit 1 24 | fi 25 | -------------------------------------------------------------------------------- /ci/tasks/team/create-istio-bump-story.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: cloudfoundry/cf-for-k8s-ci 6 | 7 | inputs: 8 | - name: cf-k8s-networking-ci 9 | - name: istio-release 10 | 11 | run: 12 | path: cf-k8s-networking-ci/ci/tasks/team/create-istio-bump-story.sh 13 | 14 | params: 15 | TRACKER_TOKEN: 16 | -------------------------------------------------------------------------------- /ci/tasks/team/create-istio-osm-story.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | # ENV 6 | : "${TRACKER_TOKEN:?}" 7 | 8 | project_id=2382853 9 | 10 | ISTIO_VERSION="$(cat istio-release/tag)" 11 | 12 | # echo "Fetching Template Story..." 13 | template_story=`curl -s -X GET -H "X-TrackerToken: $TRACKER_TOKEN" "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/174851272" | sed -E "s|X\.X\.X|${ISTIO_VERSION}|" | jq '. + {"current_state": "started"}'` 14 | 15 | echo "Creating story..." 16 | 17 | story_id=`curl -s -X POST -H "X-TrackerToken: $TRACKER_TOKEN" -H "Content-Type: application/json" -d "$template_story" "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories" | jq .id` 18 | curl -s -X PUT -H "X-TrackerToken: $TRACKER_TOKEN" -H "Content-Type: application/json" -d '{"current_state":"unstarted"}' "https://www.pivotaltracker.com/services/v5/projects/$project_id/stories/$story_id" > /dev/null 19 | 20 | echo "Created Story id $story_id" 21 | 22 | if [[ $story_id == "null" ]]; then 23 | exit 1 24 | fi 25 | -------------------------------------------------------------------------------- /ci/tasks/team/create-istio-osm-story.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: cloudfoundry/cf-for-k8s-ci 6 | 7 | inputs: 8 | - name: cf-k8s-networking-ci 9 | - name: istio-release 10 | 11 | run: 12 | path: cf-k8s-networking-ci/ci/tasks/team/create-istio-osm-story.sh 13 | 14 | params: 15 | TRACKER_TOKEN: 16 | -------------------------------------------------------------------------------- /ci/tasks/tests/run-networking-acceptance-gke.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euo pipefail 4 | 5 | source cf-for-k8s-ci/ci/helpers/gke.sh 6 | 7 | function main() { 8 | local cluster_name 9 | cluster_name="$(cat pool-lock/name)" 10 | gcloud_auth "${cluster_name}" 11 | 12 | local config="${PWD}/integration-config/${INTEGRATION_CONFIG_FILE}" 13 | local kube_config="${PWD}/kube-config.yml" 14 | 15 | pushd cf-k8s-networking/test/acceptance > /dev/null 16 | ./bin/test_local "${config}" "${kube_config}" 17 | popd 18 | } 19 | 20 | main 21 | -------------------------------------------------------------------------------- /ci/tasks/tests/run-networking-acceptance-gke.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: cloudfoundry/cf-for-k8s-ci 7 | 8 | inputs: 9 | - name: cf-for-k8s-ci 10 | - name: cf-k8s-networking 11 | - name: cf-k8s-networking-ci 12 | - name: integration-config 13 | - name: pool-lock 14 | 15 | params: 16 | CONFIG_KEEP_CLUSTER: 17 | # - Optional 18 | # - Set to non empty value to keep changes done on Kubernetes cluster after the test run 19 | CONFIG_KEEP_CF: 20 | # - Optional 21 | # - Set to non empty value to keep changes done on CF after the test run 22 | FLAKE_ATTEMPTS: 23 | GCP_PROJECT_NAME: 24 | GCP_PROJECT_ZONE: 25 | GCP_SERVICE_ACCOUNT_JSON: 26 | INTEGRATION_CONFIG_FILE: "config.json" 27 | # - JSON file with configurations 28 | 29 | run: 30 | path: cf-k8s-networking-ci/ci/tasks/tests/run-networking-acceptance-gke.sh 31 | -------------------------------------------------------------------------------- /ci/tasks/tests/run-routecontroller-integration-tests.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: cloudfoundry/cf-for-k8s-dind 7 | tag: cf-k8s-networking-integration 8 | 9 | inputs: 10 | - name: cf-k8s-networking 11 | - name: concourse-dcind 12 | 13 | # concourse-dcind/entrypoint.sh starts the docker daemon 14 | run: 15 | path: concourse-dcind/entrypoint.sh 16 | args: 17 | - cf-k8s-networking/routecontroller/scripts/integration 18 | 19 | -------------------------------------------------------------------------------- /ci/tasks/tests/run-routecontroller-units.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: cloudfoundry/cf-for-k8s-dind 7 | tag: cf-k8s-networking-integration 8 | 9 | inputs: 10 | - name: cf-k8s-networking 11 | 12 | run: 13 | path: cf-k8s-networking/routecontroller/scripts/test 14 | args: 15 | - "local" 16 | 17 | -------------------------------------------------------------------------------- /ci/tasks/tests/stress/run-stress-tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euox pipefail 4 | 5 | echo "Starting stress tests..." 6 | 7 | cp routecontroller-stress-results/results.json cf-k8s-networking/routecontroller/stress/ 8 | 9 | concourse-dcind/entrypoint.sh cf-k8s-networking/routecontroller/scripts/stress 10 | 11 | cp cf-k8s-networking/routecontroller/stress/results.json routecontroller-stress-results/results.json 12 | 13 | pushd cf-k8s-networking > /dev/null 14 | git_sha="$(cat .git/ref)" 15 | popd 16 | 17 | pushd routecontroller-stress-results 18 | git config user.name "${GIT_COMMIT_USERNAME}" 19 | git config user.email "${GIT_COMMIT_EMAIL}" 20 | git add . 21 | git commit -m "Stress test results for cf-k8s-networking commit SHA ${git_sha}" 22 | popd 23 | 24 | shopt -s dotglob 25 | cp -r routecontroller-stress-results/* routecontroller-stress-results-modified 26 | 27 | -------------------------------------------------------------------------------- /ci/tasks/tests/stress/run-stress-tests.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: gcr.io/cf-networking-images/cf-k8s-networking/kind-integration-test-env 6 | inputs: 7 | - name: cf-k8s-networking 8 | - name: cf-k8s-networking-ci 9 | - name: concourse-dcind 10 | - name: routecontroller-stress-results 11 | outputs: 12 | - name: routecontroller-stress-results-modified 13 | params: 14 | GIT_COMMIT_USERNAME: "CF Networking Team CI Bot" 15 | GIT_COMMIT_EMAIL: "CF-Networking@pivotal.io" 16 | run: 17 | path: cf-k8s-networking-ci/ci/tasks/tests/stress/run-stress-tests.sh 18 | -------------------------------------------------------------------------------- /ci/team-helpers.yml: -------------------------------------------------------------------------------- 1 | resources: 2 | - name: cf-k8s-networking 3 | type: git 4 | icon: github 5 | source: 6 | uri: git@github.com:cloudfoundry/cf-k8s-networking 7 | private_key: ((github_private_key.private_key)) 8 | branch: develop 9 | - name: before-the-workday-starts 10 | type: time 11 | icon: clock 12 | source: 13 | start: 12:00 AM 14 | stop: 8:00 AM 15 | days: [Monday, Tuesday, Wednesday, Thursday, Friday] 16 | location: America/Los_Angeles 17 | 18 | jobs: 19 | - name: create-community-chore 20 | plan: 21 | - in_parallel: 22 | - get: before-the-workday-starts 23 | trigger: true 24 | - get: cf-k8s-networking 25 | - task: do_it 26 | file: cf-k8s-networking/ci/tasks/team/create-community-chore.yml 27 | params: 28 | TRACKER_TOKEN: ((tracker_api_token)) 29 | -------------------------------------------------------------------------------- /code-of-conduct.md: -------------------------------------------------------------------------------- 1 | # Cloud Foundry Community Code of Conduct 2 | 3 | Please refer to our [Community Code of Conduct](https://www.cloudfoundry.org/code-of-conduct/) 4 | -------------------------------------------------------------------------------- /config/routecontroller/cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRoleBinding 6 | metadata: 7 | name: routecontroller 8 | namespace: #@ data.values.systemNamespace 9 | labels: 10 | app.kubernetes.io/name: routecontroller 11 | app.kubernetes.io/component: cf-networking 12 | app.kubernetes.io/part-of: cloudfoundry 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: routecontroller 17 | subjects: 18 | - kind: ServiceAccount 19 | name: routecontroller 20 | namespace: #@ data.values.systemNamespace 21 | -------------------------------------------------------------------------------- /config/routecontroller/cluster-role.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: rbac.authorization.k8s.io/v1 5 | kind: ClusterRole 6 | metadata: 7 | name: routecontroller 8 | namespace: #@ data.values.systemNamespace 9 | labels: 10 | app.kubernetes.io/name: routecontroller 11 | app.kubernetes.io/component: cf-networking 12 | app.kubernetes.io/part-of: cloudfoundry 13 | rules: 14 | - apiGroups: ["networking.cloudfoundry.org"] 15 | resources: ["routes", "routes/status"] 16 | verbs: ["create", "delete", "get", "update", "list", "watch"] 17 | - apiGroups: ["networking.istio.io"] 18 | resources: ["virtualservices"] 19 | verbs: ["create", "delete", "get", "update", "list", "watch"] 20 | - apiGroups: ["coordination.k8s.io"] 21 | resources: ["leases"] 22 | verbs: ["create", "delete", "get", "update", "list", "watch"] 23 | - apiGroups: [""] 24 | resources: ["services"] 25 | verbs: ["create", "delete", "get", "update", "list", "watch"] 26 | - apiGroups: [""] 27 | resources: ["events"] 28 | verbs: ["create"] 29 | - apiGroups: [""] 30 | resources: ["configmaps"] 31 | verbs: ["create", "delete", "get", "update"] 32 | -------------------------------------------------------------------------------- /config/routecontroller/routecontroller-configmap.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | --- 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: routecontroller-config 7 | namespace: #@ data.values.systemNamespace 8 | annotations: 9 | kapp.k14s.io/versioned: "" 10 | kapp.k14s.io/num-versions: "2" 11 | labels: 12 | app.kubernetes.io/name: routecontroller-config 13 | app.kubernetes.io/component: cf-networking 14 | app.kubernetes.io/part-of: cloudfoundry 15 | data: 16 | LEADER_ELECTION_NAMESPACE: #@ data.values.systemNamespace 17 | ISTIO_GATEWAY_NAME: #@ data.values.systemNamespace + "/istio-ingressgateway" 18 | RESYNC_INTERVAL: "900" 19 | -------------------------------------------------------------------------------- /config/routecontroller/routecontroller.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: routecontroller 8 | namespace: #@ data.values.systemNamespace 9 | labels: 10 | app: routecontroller 11 | app.kubernetes.io/name: routecontroller 12 | app.kubernetes.io/component: cf-networking 13 | app.kubernetes.io/part-of: cloudfoundry 14 | spec: 15 | replicas: 1 16 | selector: 17 | matchLabels: 18 | app: routecontroller 19 | template: 20 | metadata: 21 | namespace: #@ data.values.systemNamespace 22 | annotations: 23 | prometheus.io/path: /metrics 24 | prometheus.io/port: "8080" 25 | prometheus.io/scrape: "true" 26 | labels: 27 | app: routecontroller 28 | app.kubernetes.io/name: routecontroller 29 | app.kubernetes.io/component: cf-networking 30 | app.kubernetes.io/part-of: cloudfoundry 31 | spec: 32 | containers: 33 | - name: routecontroller 34 | image: #@ data.values.images.routecontroller 35 | args: ["--enable-leader-election=true"] 36 | resources: 37 | limits: 38 | cpu: 100m 39 | memory: 10Gi 40 | requests: 41 | cpu: 100m 42 | memory: 20Mi 43 | envFrom: 44 | - configMapRef: 45 | name: routecontroller-config 46 | terminationGracePeriodSeconds: 10 47 | serviceAccountName: routecontroller 48 | -------------------------------------------------------------------------------- /config/routecontroller/service-account.yaml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:data", "data") 2 | 3 | --- 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | name: routecontroller 8 | namespace: #@ data.values.systemNamespace 9 | labels: 10 | app.kubernetes.io/name: routecontroller 11 | app.kubernetes.io/component: cf-networking 12 | app.kubernetes.io/part-of: cloudfoundry 13 | -------------------------------------------------------------------------------- /config/values/_defaults.yml: -------------------------------------------------------------------------------- 1 | #@data/values 2 | --- 3 | #! Default values for cf-k8s-networking. 4 | #! This is a YTT-formatted file. 5 | systemNamespace: cf-system 6 | workloadsNamespace: cf-workloads 7 | 8 | service: 9 | externalPort: 80 10 | -------------------------------------------------------------------------------- /config/values/images.yml: -------------------------------------------------------------------------------- 1 | #@ load("@ytt:overlay", "overlay") 2 | #@data/values 3 | --- 4 | #@overlay/match missing_ok=True 5 | images: 6 | routecontroller: "index.docker.io/cloudfoundry/routecontroller@sha256:72db6b8d2d5ceeee7084f2263409ec2e9c0ad14b5aafa43ab2d68283cd663667" 7 | -------------------------------------------------------------------------------- /doc/access-logs.md: -------------------------------------------------------------------------------- 1 | ## Access Logs 2 | 3 | This documentation has been moved to [cf-for-k8s/docs/platform_operators/gateway-access-logs.md](https://github.com/cloudfoundry/cf-for-k8s/blob/master/docs/platform_operators/gateway-access-logs.md). 4 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0001-record-architecture-decisions.md: -------------------------------------------------------------------------------- 1 | # 1. Record architecture decisions 2 | 3 | Date: 2019-10-22 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | We need to record the architectural decisions made on this project. 12 | 13 | ## Decision 14 | 15 | We will use Architecture Decision Records, as [described by Michael Nygard](http://thinkrelevance.com/blog/2011/11/15/documenting-architecture-decisions). 16 | 17 | ## Consequences 18 | 19 | See Michael Nygard's article, linked above. For a lightweight ADR toolset, see Nat Pryce's [adr-tools](https://github.com/npryce/adr-tools). 20 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0002-directly-create-istio-resources.md: -------------------------------------------------------------------------------- 1 | # 2. Directly Create Istio Resources 2 | 3 | Date: 2019-10-22 4 | 5 | ## Status 6 | 7 | Superseded by [ADR 10](0010-route-crd-and-kubebuilder-instead-of-metacontroller.md) 8 | 9 | ## Context 10 | 11 | In the [original proposal](https://docs.google.com/document/d/1EYRBVuQedU1r0zexgi8oMSOEFgaMNzM8JWBje3XuweU) for basic http 12 | ingress routing for CF on Kubernetes, we proposed writing a controller that wrote custom Route resources to the Kubernetes 13 | API. Additionally we would develop a second controller that read the Route CRDs and would create k8s Services and Istio 14 | VirtualServices. 15 | 16 | We discovered several issues with this design. First, we realized that we must have a single VirtualService per FQDN. 17 | While multiple VirtualServices for the same FQDN are [technically permitted by Istio](https://istio.io/docs/ops/traffic-management/deploy-guidelines/#multiple-virtual-services-and-destination-rules-for-the-same-host), 18 | the order in which the match rules for the paths are applied is non-deterministic. In CF we expect that the longest path 19 | prefix is matched first, so this behavior did not suit our needs. 20 | 21 | Since we had to aggregate multiple Route resources to construct a single VirtualService, this meant we could not use 22 | Metacontroller for our second controller. Having multiple "parent" Routes for a single set of "children" VirtualServices would 23 | violate Metacontroller's assumptions. 24 | 25 | While we could build a custom second controller using Kubebuilder, we decided that for simplicity and expediency we could 26 | just omit the creation of the Route CRDs for the time being. 27 | 28 | ## Decision 29 | 30 | * CF Route Syncer will directly create k8s Services and Istio VirtualServices instead of creating intermediate Route CRDs 31 | 32 | ## Consequences 33 | 34 | * We will be able to implement a demo-able MVP more rapidly 35 | * We will only have to maintain a single metacontroller webhook rather than 36 | implement a second controller that aggregates Route CRDs 37 | * We will no longer have a close-representation of Cloud Controller routes in 38 | the k8s API 39 | * This will couple us more tightly with Istio, but we believe we can easily undo 40 | this decision 41 | 42 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0003-tagging-and-publishing-docker-images.md: -------------------------------------------------------------------------------- 1 | # 3. Tagging and Publishing Dev/CI Images 2 | 3 | Date: 2019-10-31 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | (🎃 Happy Halloween 👻) 11 | 12 | We need to have a way of creating and deploying container images of our software for development and CI use. 13 | 14 | We didn't want to break the `latest` image tag every time we did local development and we need to have CI deploy a consistent image between runs. 15 | 16 | ## Decision 17 | 18 | ### Local development 19 | Each development environment pipeline deploys off a dedicated docker tag. e.g. `eirini-dev-1` environment deploys the 20 | docker image tagged `eirini-dev-1` so when developing locally (i.e. without pushing to Git) 21 | we can tag and push images with that dedicated tag and redeploy easily. 22 | 23 | Example Workflow: 24 | ```bash 25 | environment_name=eirini-dev-1 26 | docker tag $img gcr.io/cf-routing/cf-k8s-networking/cfroutesync:$environment_name 27 | docker push gcr.io/cf-routing/cf-k8s-networking/cfroutesync:$environment_name 28 | ``` 29 | 30 | ### Branch development 31 | A github action will trigger on pushes to all branches and publish a Docker image tagged with the git SHA and the branch name. 32 | 33 | #### Develop branch 34 | When we push to the `develop` branch we will tag the image with the git SHA, branch name, and `latest`. 35 | 36 | ## Consequences 37 | 38 | We will likely be producing a significant amount of images with this workflow (one for each push) so eventually we will need to figure out a way of pruning old ones. 39 | For now though our images are pretty small so we feel we can defer this work. 40 | 41 | ## Addendum 42 | 2020-06-26: Replaced "master" branch with "develop" branch as described in this 43 | [ADR](./0013-rename-master-branch.md). 44 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0005-networking-acceptance-tests.md: -------------------------------------------------------------------------------- 1 | # 5. Networking Acceptance Tests 2 | 3 | Date: 2020-02-04 4 | Updated: 2020-06-05 5 | 6 | ## Status 7 | 8 | Accepted 9 | 10 | ## Context 11 | 12 | We wrote a set of [networking-acceptance-tests](../../test/acceptance) that require a CF for Kubernetes 13 | environment set up correctly in order to test networking behavior in an integrated environment. 14 | 15 | ## Decision 16 | 17 | These tests are different from [integration tests](../../routecontroller/integration) since they require an integrated environment setup. 18 | These tests are different from CATs in that they test specialized networking setup in CF for Kubernetes. 19 | 20 | We've decided to keep these acceptance tests in this repository because it is simple 21 | and they are run in CI and rely on this 22 | [script](../../ci/tasks/tests/run-networking-acceptance-gke.sh). 23 | 24 | Tests should be included in [networking-acceptance-tests](../../test/acceptance) if they require a CF for 25 | Kubernetes environment and test the setup of networking. 26 | 27 | ## Addendum 28 | 2020-06-19: Updated the "integration tests" link to point to the 29 | `routecontroller` directory and updated the "script" link as per [ADR 30 | 010](./0010-route-crd-and-kubebuilder-instead-of-metacontroller.md) 31 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0006-rewrite-http-liveness-readiness-probes-for-healthchecks.md: -------------------------------------------------------------------------------- 1 | # 6. Rewrite HTTP Liveness and Readiness Probes for Healthchecks 2 | 3 | Date: 2020-02-05 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | With Istio auto mTLS enabled in `STRICT` mode, [http liveness and readiness 11 | probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/) no longer work because the `kubelet`, which makes the http requests, does not have Istio issued certificates. 12 | 13 | Istio [supports rewriting `http` probes](https://istio.io/docs/ops/configuration/mesh/app-health-check/#enable-globally-via-install-option) during the sidecar injection process. 14 | 15 | #### Figure 1 16 | _Liveness probe flow when Istio mTLS is disabled or `PERMISSIVE`. Probe `GET` request regularly travels through the Envoy sidecar to the app._ 17 | 18 | ![No mTLS/PERMISSIVE mTLS mode liveness probe diagram](../assets/liveness-probe-adr-1.png) 19 | 20 | #### Figure 2 21 | _Liveness probe flow when Istio mTLS is `STRICT` and the probe is not rewritten. Probe `GET` request fails at the Envoy sidecar because it does not include the correct certificates._ 22 | 23 | ![STRICT mTLS liveness probe diagram with no probe rewrite](../assets/liveness-probe-adr-2.png) 24 | 25 | #### Figure 3 26 | _Liveness probe flow when Istio mTLS is `STRICT` and the probe **is rewritten by Istio**. Probe `GET` request bypasses the sidecar and goes through the Istio `pilot-agent` instead. The `pilot-agent` is configured to direct the request to the app._ 27 | 28 | ![STRICT mTLS liveness probe diagram with probe rewrite](../assets/liveness-probe-adr-3.png) 29 | 30 | 31 | ## Decision 32 | We have decided to install Istio with 33 | 34 | `--set values.sidecarInjectorWebhook.rewriteAppHTTPProbe=true` 35 | 36 | This will rewrite the liveness and readiness probes on any app pods when injecting a sidecar into the app pod. Any namespaces that have the label `istio-injection=enabled` will have their liveness and readiness probes rewritten, as shown in Figure 3. 37 | 38 | ## Consequences 39 | * We believe that rewriting the liveness and readiness probes will be compatible with the [health check configuration](https://docs.cloudfoundry.org/devguide/deploy-apps/healthchecks.html) that app devs can currently use in Cloud Foundry 40 | * In the future, if CF app devs want more complex http healthchecks that involve more than checking the response code of the healthcheck endpoint, we may have to do some significant work. 41 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0007-maintain-generated-istio.md: -------------------------------------------------------------------------------- 1 | # 7. Maintain Generated Istio 2 | 3 | Date: 2020-02-19 4 | 5 | ## Status 6 | 7 | Partially superseded by [ADR 8 | 17](./0017-moving-istio-configuration-out-of-this-repo.md). To look at the files 9 | at the moment this ADR was written you can browse files at [3f55af5 10 | commit](https://github.com/cloudfoundry/cf-k8s-networking/tree/3f55af54912a527de16a8f70645018e4f13f9dba). 11 | 12 | ## Context 🤔 13 | Cf-k8s-networking was designed to be integrated with 14 | [cf-for-k8s](https://github.com/cloudfoundry/cf-for-k8s/). The Istio 15 | installation used to be maintained by 16 | [cf-for-k8s](https://github.com/cloudfoundry/cf-for-k8s/), but the networking 17 | team needed to be able to easily make changes to [Istio](https://istio.io/) 18 | configuration to enable more networking features for [Cloud 19 | Foundry](https://www.cloudfoundry.org/). 20 | 21 | 22 | ## Decision 23 | We decided to move the scripts to build Istio configuration, and maintain a 24 | generated Istio configuration within the cf-k8s-networking repository. 25 | 26 | The build scripts and `ytt` overlays for Istio live in this repo (links removed 27 | as they are no longer relevant or accurate). **UPDATE** This configuration has 28 | moved as a result of [ADR 29 | 017](./0017-moving-istio-configuration-out-of-this-repo.md). 30 | 31 | ## Consequences 32 | When making changes to anything related to the Istio installation (build scripts, `ytt` overlays, Istio configuration), developers need to also generate the new corresponding Istio yaml following the doc [doc/update-istio.md](../update-istio.md) 33 | 34 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0008-implement-workarounds-for-capi-and-log-cache-to-unblock-global-strict-mtls.md: -------------------------------------------------------------------------------- 1 | # 8. Implement Workarounds for CAPI and Log-Cache to Unblock Global STRICT mTLS 2 | 3 | Date: 2020-02-24 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | We need to turn on STRICT mTLS for all components on the mesh. However, some 12 | components are currently incompatible with this mode. 13 | 14 | CAPI is incompatible because it uses an init container to run migrations. This 15 | init container comes up before the sidecar, so it is unable to establish an mTLS 16 | connection with the capi database. This causes the init container to fail and 17 | prevents capi from coming up. See [this 18 | issue](https://github.com/cloudfoundry/capi-k8s-release/issues/12) in capi. 19 | 20 | Log-cache is incompatible because it is configured to establish its own tls 21 | connection, which is incompatible with the mTLS the sidecars are attempting to 22 | establish. 23 | 24 | ## Decision 25 | 26 | We have provided configuration workarounds in the form of Policies, that were 27 | placed in the cf-for-k8s repo to be owned by the respective teams that manage 28 | the troublesome components. 29 | 30 | [Pull Request](https://github.com/cloudfoundry/cf-for-k8s/pull/35) 31 | 32 | 33 | ## Consequences 34 | 35 | These components will accept plain text communication. We don't consider this to 36 | be a significant issue because both already implement encryption in some form on 37 | their own. It would be best in the long run if they could stop being exceptions 38 | though. 39 | 40 | The log-cache and capi teams now have to care about istio configuration, and 41 | will eventually need to make changes to their components to eliminate these 42 | workarounds. However, our work is no longer blocked on their changes, so we 43 | consider this an absolute win. 44 | 45 | This is the way. 🗞🙃 46 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0009-kubebuilder-controllers-dynamic-client-over-generated-clients.md: -------------------------------------------------------------------------------- 1 | # 9. Kubebuilder Controllers: Use the Controller Runtime Dynamic Client over Generated Clients 2 | 3 | Date: 2020-04-17 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | ### Kubebuilder uses dynamic controller-runtime clients by default 12 | Kubebuilder uses the 13 | [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) 14 | library. Controller Runtime has a dynamic 15 | [client](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/client) that 16 | is used by Kubebuilder controllers by default, rather than a generated 17 | [client](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/generating-clientset.md). 18 | When you `kubebuilder create api`, it creates the api types and in order to 19 | interact with these api types the controller is supplied a controller-runtime 20 | client in it's controller scaffolding so you can CRUD that api type in the 21 | Kubernetes API. 22 | 23 | ### Problems with generated clients 24 | 25 | Using third party generated clients can also be problamatic because of the 26 | transitive dependency on the Kubernetes 27 | [client-go](https://github.com/kubernetes/client-go) library and our own 28 | depdendency on client-go. When our controllers want to use a newer version of 29 | the client-go library, this can cause problems for our third party generated 30 | clients because they will use a different version of client-go. This doesn't 31 | cause problems if client-go keeps the same interface, but we have seen newer 32 | versions of client-go break its public interface causing compilation issues. 33 | 34 | If the third party libraries ensured they updated their libraries to use the 35 | same version of client-go in a timely manner, this could be less of a 36 | problem. However, this puts a dependency on these third party libraries to 37 | keep their client-go libraries up-to-date. 38 | 39 | ## Decision 40 | 41 | We will only use the controller-runtime client to interact with Kubernetes 42 | API objects instead of generated clients. This limits our dependency on third 43 | party libraries that can cause conflicts with the client-go library. 44 | 45 | ## Consequences 46 | 47 | - To interact with Istio objects we won't use the istio/client-go library and 48 | instead use the controller-runtime client with the istio/api library 49 | directly. This does require us to wrap the istio/api objects in our own 50 | Kubernetes specific API structs. 51 | - Updating our version of client-go won't require us to bump a plethora of 52 | third party libraries that also use client-go. -------------------------------------------------------------------------------- /doc/architecture-decisions/0011-use-kind-clusters-for-routecontroller-integration.md: -------------------------------------------------------------------------------- 1 | # 11. Use KIND Clusters for Routecontroller Integration 2 | 3 | Date: 2020-05-05 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | We are working to get up and running quickly with our new routecontroller 12 | refactor. Much of our work using kubebuilder is informed by what the 13 | ingress-router team learned during their work with it. 14 | 15 | The cfroutesync integration tests used GKE and were very fast, however they were 16 | unwieldy and difficult to reason about, as they'd been written to test a very 17 | specific set of circumstances. Since we were not happy with the way those 18 | integration tests worked, we had an opportunity to rethink our tests, and there 19 | was a model available from ingress-router, KIND seemed like the best option. 20 | 21 | ## Decision 22 | 23 | We decided to use KIND as it is a full and lightweight Kubernetes environment 24 | that creates clusters within docker containers. 25 | 26 | ## Consequences 27 | 28 | * Our tests run a bit slower because we're creating a KIND cluster before each 29 | * Our tests cannot pollute each other because a new cluster is used each time 30 | * Test setup is far less complicated, no more provisioning a GKE cluster before 31 | you can run them 32 | * Tests are easier to reason about and write because there are no surprise 33 | resources on it like with the cfroutesync tests 34 | * You can run `ginkgo .` and run all the tests, locally, without any setup, in 35 | under 10 minutes 36 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0012-routecontroller-route-deletion-finalizer.md: -------------------------------------------------------------------------------- 1 | # 12. Routecontroller Route Deletion Finalizer 2 | 3 | Date: 2020-05-11 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | A [finalizer](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#finalizers) allows you to write asynchronous pre-delete logic, such as deleting associated resources. Here's an [example](https://book.kubebuilder.io/reference/using-finalizers.html) of finalizers can be used with Kubebuilder. 11 | 12 | Without a finalizer, we could rely on cascading deletion of child objects when Routes are deleted and `RequeueAfter` to rebuild the virtual services and services from its routes every `ResyncInterval` seconds. `ResyncInterval` is currently set to 30 seconds. 13 | 14 | However, this doesn’t meet our SLO to handle changes within 10 seconds. [Cascading deletes don’t work within 10 seconds](https://github.com/kubernetes/kubernetes/blob/af67408c172630d59996207a2f3587ea88c96572/test/integration/garbagecollector/garbage_collector_test.go#L385-L392), so meeting the SLO would require `ResyncInterval` to be less than 10 seconds, which seems unreasonable. 15 | 16 | Cascading deletion alone also doesn’t handle the case of a virtual service being owned by >1 Route. This means a cascading delete cannot update the virtual service’s contents to not include the paths related to that deleted route. So we would need to rely on `RequeueAfter` on a different route with the same FQDN for those updates, which would be slow, and a strange behavior to support. 17 | 18 | 19 | ## Decision 20 | 21 | In order to handle all of the cases: deleting services, deleting virtual services owned by only that route and updating virtual services owned by many routes, we rely on a finalizer, so we can have a “fast path” to all of these cases. 22 | 23 | Finalizers do a “soft delete” to keep the route in the K8s API while handling deletion/updates to the route’s child objects. 24 | 25 | Using finalizers allows us to implement all of the cases in our route deletion logic. This helps us meet our SLO of having 95% of route changes being reflected within 10 seconds.`RequeueAfter` serves as a “sync” to handle disaster recovery scenarios when unexpected operations outside of normal controller reconciliation happen (i.e child resources are deleted in etcd). 26 | 27 | ## Consequences 28 | 29 | * We can meet our SLO of having 95% of route changes being reflected within 10 seconds when routes are deleted 30 | * Routes will fail to be deleted if there is no routecontroller available to resolve the finalizer 31 | * As a result, cf-for-k8s will now delete the workload namespaces first when deleting a CF deployment 32 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0013-rename-master-branch.md: -------------------------------------------------------------------------------- 1 | # 13. Rename cf-k8s-networking master branch to develop 2 | 3 | Date: 2020-06-26 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | A [proposal](https://lists.cloudfoundry.org/g/cf-dev/topic/75070528#9059) was 12 | made on the cf-dev mailing list to rename our `master` branch to `main` to make 13 | the CF community a more welcoming and inclusive environment. 14 | 15 | ## Decision 16 | 17 | We are going to rename our `master` branch to `develop`. `develop` was chosen 18 | instead of `main` by team consensus because it better describes the use of the 19 | branch. 20 | 21 | ## Consequences 22 | 23 | - Better description of what the branch is used for 24 | - May not be consistent with other teams in the foundation. However, it was also 25 | decided that if consistency is more valuable, it is a quick change to use 26 | whatever the foundation consensus is. 27 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0014-ingress-gateway-as-a-daemon-set.md: -------------------------------------------------------------------------------- 1 | # 14. Ingress Gateway as a daemon set instead of a deployment 2 | 3 | Date: 2020-06-26 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | By default the Istio Ingress Gateway is deployed as a Kubernetes Deployment. 12 | Along with a Kubernetes Load Balancer Service. This is fine for clusters that 13 | support Load Balancer Services. For clusters that do not, it takes more effort 14 | to configure the Istio Ingress Gateway in a way that is accessible from outside 15 | the cluster while also using the well-known http/https ports (80/443). 16 | 17 | ## Decision 18 | 19 | CF-K8s-Networking changes the Istio Ingress Gateway to be deployed as a Daemon 20 | Set to make it easier for users that can't use a Kubernetes Load Balancer 21 | Service on their clusters to try cf-for-k8s. By deploying a Daemon Set we can 22 | bind port 80 and 443 on each Node to the Istio Ingress Gateway directly. This 23 | allows a user to send traffic to each node on port 80 and 443 without 24 | needing a Kubernetes Service. 25 | 26 | ## Consequences 27 | 28 | - Easier for an operator to get started with cf-for-k8s. 29 | - Less control over the number of Istio Ingress Gateways for the cluster. There 30 | is performance concerns with having a large number of gateways on a large 31 | cluster. 32 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0016-job-for-upgrading-istio-sidecars-on-workloads.md: -------------------------------------------------------------------------------- 1 | # 16. Job for Upgrading Istio Sidecars on Workloads 2 | 3 | Date: 2020-08-11 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | Istio's service mesh capabilites are facilitated via sidecars injected into 12 | workload pods. These sidecars run an Istio-patched version of Envoy that is tied 13 | to the version of Istio that injects them. 14 | 15 | Typically when new versions of Istio are released, new versions of the sidecars 16 | are released as well. Istio has been good so far about supporting older versions 17 | of sidecars that were deployed before Istio was upgraded, but it is still 18 | [documented best practice](https://istio.io/latest/docs/setup/upgrade/) to roll 19 | all the pods after an Istio upgrade. 20 | 21 | As an additional constraint, the operators of cf-for-k8s clusters expect to be 22 | able to perform upgrades in one `kapp deploy`, with no post-install hooks or 23 | other bash scripts. This limits our options considerably. See this [Slack 24 | thread](https://cloudfoundry.slack.com/archives/CH9LF6V1P/p1592521879117400) on 25 | that constraint. 26 | 27 | ## Decision 28 | 29 | We will use the kubernetes 30 | [Job](https://kubernetes.io/docs/concepts/workloads/controllers/job/) resource 31 | to run the kubectl command needed to roll workload pods, after waiting for the 32 | new Istio control plane to be up and healthy. 33 | 34 | To that end, we will add the necessary minimal `ServiceAccounts` and `Roles` 35 | needed to list resources in the `istio-system` namespace, and restart resources 36 | in the configured workload namespace. We will also build and maintain a 37 | container image that contains the Job's logic. 38 | 39 | All istio components will be tagged with their Istio version so that the job can 40 | positively determine that the correct version of control plane components are 41 | alive and healthy. We will also name the job according to it's Istio version, so 42 | that we can take advantage of `Jobs` inherent immutability in cases where a 43 | cf-for-k8s upgrade does not contain a new Istio version (pushing the same job 44 | again will not cause it to rerun, preventing workloads from rolling 45 | unnecessarily). Subsequent jobs will clean up previous ones. 46 | 47 | ## Consequences 48 | 49 | * Apps will always have the current version of Istio's side car 50 | * Apps deployed with a single instance will experience downtime during upgrades 51 | * This may break some uptime testing that other teams are doing, but 52 | deploying 2 instances should fix them without requiring significant 53 | additional resources 54 | * A completed job will hang around in the configured workload namespace, but 55 | only platform operators will see that 56 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0017-moving-istio-configuration-out-of-this-repo.md: -------------------------------------------------------------------------------- 1 | # 17. Moving Istio and Related Configuration to CF-for-K8s Repo 2 | 3 | Date: 2020-09-15 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | This ADR partially reverts the decision made in [ADR # 7. Maintain Generated 12 | Istio](./0007-maintain-generated-istio.md). 13 | 14 | The networking config and related Istio config is spread widely throughout both 15 | cf-for-k8s and cf-k8s-networking. Having the config in both places has made 16 | processes such as updating networking config, versioning routecontroller, 17 | upgrading Istio, deciding where some networking config should exist (in this repo or 18 | in the cf-for-k8s repo), and so on complicated. 19 | 20 | 21 | ## Decision 22 | 23 | We will move Istio configuration to cf-for-k8s repo to reduce the overhead of 24 | having incompatibility between cf-k8s-networking and cf-for-k8s. The CF K8s 25 | Networking team will remain responsible for the Istio config in the 26 | cf-for-k8s repo. We are going to keep the network acceptance tests (NATs) in 27 | this repo and run in our 28 | [Concourse CI](https://networking.ci.cf-app.com/teams/cf-k8s/pipelines/cf-k8s-pipeline). 29 | 30 | More details on this decision: 31 | 32 | * move Istio config generation and overlays folder `istio-install` to 33 | [cf-for-k8s/build/istio](https://github.com/cloudfoundry/cf-for-k8s/tree/master/build/istio) 34 | * move Istio generated and other networking config folders `config/istio`, 35 | `config/istio-generated` to [cf-for-k8s/config/istio](https://github.com/cloudfoundry/cf-for-k8s/tree/master/config/istio) 36 | * overlays directly related to Istio installation should be created in 37 | cf-for-k8s/build/istio 38 | * Istio values should not be created in cf-for-k8s values config but via 39 | starlark functions in [cf-for-k8s/config/istio](https://github.com/cloudfoundry/cf-for-k8s/tree/master/config/istio), e.g. for `istio_version` value: 40 | ``` 41 | #@ def build_version(): 42 | #@ return "1.6.4" 43 | #@ end 44 | ``` 45 | * when contributing to networking in cf-for-k8s open PR and tag it with 46 | `networking` tag to differentiate those PRs in our CI. 47 | * create CI job to run acceptance tests upon new networking PRs in cf-for-k8s 48 | * update the documentation to reflect the change 49 | * update CI jobs depending on Istio config in this repo (such istio-upgrade, 50 | images, scaling, etc) 51 | 52 | 53 | ## Consequences 54 | 55 | * cf-k8s-networking now mostly only contains routecontroller, CI and tests. 56 | * Istio config now lives in the [cf-for-k8s 57 | repo](https://github.com/cloudfoundry/cf-for-k8s/tree/master/config/istio) and 58 | whenever need to make changes to Istio config, we do so through a PR to 59 | cf-for-k8s. 60 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0018-create-policy-server.md: -------------------------------------------------------------------------------- 1 | # 18. Create a Policy Server to Manage Network Policy 2 | 3 | Date: 2020-10-27 4 | 5 | ## Status 6 | 7 | Accepted 8 | 9 | ## Context 10 | 11 | Well implemented service oriented apps typically include backend services who 12 | never serve requests from users or clients outside the foundation. 13 | 14 | Currently, for an app to reach a backend service, the backend service must 15 | expose itself through the ingress gateway, and the app must hairpin through the 16 | ingress gateway to reach it. This is a security concern, backend apps should not 17 | be accessible outside the foundation. 18 | 19 | CF for VMs provides an API mechanism for configuring which apps are permitted to 20 | communicate with which other apps, called [Network 21 | Policy](https://docs.cloudfoundry.org/devguide/deploy-apps/cf-networking.html#create-policies). 22 | The job that provides this API is called policy-server, it has its own database 23 | and API endpoint that the CLI communicates with. 24 | 25 | Kubernetes provides the [NetworkPolicy Resource](https://kubernetes.io/docs/concepts/services-networking/network-policies/) 26 | which serves similar outcomes. 27 | 28 | Our objective is to create Kubernetes NetworkPolicy from CF Network Policy. 29 | 30 | For more background, see the [exploration 31 | document](https://docs.google.com/document/d/1qAYy737uB7orT8St56wg5MbnlrbuA9NoJib8dv4mNK0/edit#) 32 | 33 | ## Decision 34 | 35 | We will write a new stateless component which implements the existing CF Network 36 | Policy API endpoint. It will read and write Kubernetes NetworkPolicy directly to 37 | the API server and use it as its store, rather than maintaining its own 38 | database. 39 | 40 | For specific implementation details, example commands, and example resources, 41 | see [the exploration 42 | document](https://docs.google.com/document/d/1qAYy737uB7orT8St56wg5MbnlrbuA9NoJib8dv4mNK0/edit#heading=h.pb0he04m6fbf). 43 | 44 | ## Consequences 45 | 46 | - The API Server will be the source of truth for CF Network Policy, effectively 47 | making CF Network Policy and Kubernetes NetworkPolicy one and the same. 48 | - As this uses entirely built-in Kubernetes resources, we do not add any 49 | external dependencies. 50 | - This component may also need to write or modify Sidecar resources on 51 | foundations using Istio sidecars, as sidecars are not currently configured 52 | for app to app communication. 53 | - There will be no path to import a cf-for-vms policy-server database into 54 | cf-for-k8s 55 | - Our new component will need to conform to the new observability initiative 56 | (incl the distributed tracing work) that CAPI is doing so that this configuration 57 | can be traced as well. 58 | -------------------------------------------------------------------------------- /doc/architecture-decisions/0019-route-crd-and-contour-controller.md: -------------------------------------------------------------------------------- 1 | # 19. Allow Alternative Ingress Solution Provider in RouteController 2 | 3 | Date: 2020-10-26 4 | 5 | ## Status 6 | 7 | Proposal 8 | 9 | ## Context 10 | In our efforts to allow more optionality in ingress solutions for CF-for-K8s, we 11 | want to allow the use of Contour as a potential alternative to Istio. 12 | 13 | ### Proposed Design 14 | 15 | 1. Adding a configuration option `ingress_solution_provider` to CF-for-K8s, with 16 | potential values `istio` or `contour`. 17 | 2. Extend routecontroller to respect the value configured by `ingress_solution_provider: contour`. It will create the appropriate resources based on the chosen provider. 18 | 19 | #### Why not make a separate controller altogether? 20 | 21 | We are opting to extend routecontroller instead of make a separate one because 22 | we believe it to be simpler. Best practice is to have one controller reconciling 23 | objects of one type. Because routecontroller only watches for route CRs, it 24 | doesn't break that best practice. Whether or Virtual Services or HTTPProxies are 25 | created as a result does not matter. 26 | 27 | It seems overbearing to maintain a separate controller and all the boilerplate 28 | around it when all we really need to a separate resource builder. 29 | 30 | We plan to make routecontroller only create one type of resource or another, 31 | never both. This will prevent the confusing situation of istio resources 32 | existing in the cluster when contour is the selected ingress solution provider, 33 | or vice versa. 34 | 35 | ### Open Questions 36 | 1. What do we do about the config? Does contour config live in cf-k8s-networking 37 | and eventually it moves to cf-for-k8s? 38 | * The config will live in cf-for-k8s. 39 | 2. What happens if an operator wants to change their ingress solution provider, 40 | will they have to redeploy CF-for-K8s? Is that a big deal? 41 | * Yes, they will. 42 | 3. Looks like Contour deploys Envoys as a daemonset. Is that gonna be a problem? 43 | * This is just part of the quickstart.yaml for learning contour, we can 44 | change it to Deployment if we need to. 45 | 46 | ## Decision 47 | Waiting on Review 48 | 49 | ## Consequences 50 | * RouteController will only be able to create resources for 1 type of ingress 51 | solution at a time. When an operator makes a decision by 52 | configuring `ingress_solution_provider`, only resources related to the 53 | specified provider will be created. 54 | * The available networking abilities are limited to that of the selected ingress 55 | solution provider. Outcomes achievable only with Istio will not be available 56 | if Contour is selected. 57 | 58 | #### Using Kubebuilder 59 | * Provides Community buy-in; the `kubebuilder` framework is the encouraged way to engineer a CRD 60 | * Provides built-in best practices for writing a controller, including: shared caching, retries, back-offs, leader election for high availability deployments, etc... 61 | 62 | #### For Reference 63 | The proposal and discussion for the Route CRD and design can be found [here](https://docs.google.com/document/d/1DF7eTBut1I74w_sVaQ4eeF74iQes1nG3iUv7iJ7E35U/edit?usp=sharing). 64 | 65 | -------------------------------------------------------------------------------- /doc/assets/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/architecture.png -------------------------------------------------------------------------------- /doc/assets/duration-flamegraph.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/duration-flamegraph.jpg -------------------------------------------------------------------------------- /doc/assets/ingress-gateway-topology-directly-to-worker-nodes.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/ingress-gateway-topology-directly-to-worker-nodes.jpg -------------------------------------------------------------------------------- /doc/assets/ingress-gateway-topology-external-lb.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/ingress-gateway-topology-external-lb.jpg -------------------------------------------------------------------------------- /doc/assets/ingress-gateway-topology-lb-service.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/ingress-gateway-topology-lb-service.jpg -------------------------------------------------------------------------------- /doc/assets/ingress-routing-no-lb.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/ingress-routing-no-lb.png -------------------------------------------------------------------------------- /doc/assets/ingress-to-sys-non-tls.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/ingress-to-sys-non-tls.jpg -------------------------------------------------------------------------------- /doc/assets/ingress-to-sys-tls.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/ingress-to-sys-tls.jpg -------------------------------------------------------------------------------- /doc/assets/liveness-probe-adr-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/liveness-probe-adr-1.png -------------------------------------------------------------------------------- /doc/assets/liveness-probe-adr-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/liveness-probe-adr-2.png -------------------------------------------------------------------------------- /doc/assets/liveness-probe-adr-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/liveness-probe-adr-3.png -------------------------------------------------------------------------------- /doc/assets/network-configuration.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/network-configuration.png -------------------------------------------------------------------------------- /doc/assets/network-egress.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/doc/assets/network-egress.png -------------------------------------------------------------------------------- /doc/assets/network-envoy-tap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ################################################################# 4 | # This script creates a rudimentary EnvoyFilter for 5 | # tapping into (inbound) HTTP traffc of a CF app. 6 | # One file per http request is created unter /etc/istio/proxy/ 7 | ################################################################# 8 | 9 | 10 | set -eo pipefail 11 | 12 | if [ -z "$KUBECONFIG" ]; then 13 | echo "KUBECONFIG not set." 14 | exit 1 15 | fi 16 | 17 | APP_NAME=$1 18 | 19 | if [ -z "$APP_NAME" ]; then 20 | echo "Usage: $0 " 21 | exit 1 22 | fi 23 | 24 | APP_GUID=$(cf app "$APP_NAME" --guid) 25 | 26 | kubectl -n cf-workloads apply -f - < /dev/null; then 16 | echo "Deleting cluster: ${CLUSTER_NAME} ..." 17 | gcloud container clusters delete ${CLUSTER_NAME} --project ${GCP_PROJECT} --zone us-west1-a 18 | else 19 | echo "${CLUSTER_NAME} already deleted! Continuing..." 20 | fi 21 | } 22 | 23 | function delete_dns() { 24 | echo "Deleting DNS for: *.${CF_DOMAIN}" 25 | gcloud dns record-sets transaction start --project ${GCP_PROJECT} --zone="${SHARED_DNS_ZONE_NAME}" 26 | gcp_records_json="$( gcloud dns record-sets list --project ${GCP_PROJECT} --zone "${SHARED_DNS_ZONE_NAME}" --name "*.${CF_DOMAIN}" --format=json )" 27 | record_count="$( echo "${gcp_records_json}" | jq 'length' )" 28 | if [ "${record_count}" != "0" ]; then 29 | existing_record_ip="$( echo "${gcp_records_json}" | jq -r '.[0].rrdatas | join(" ")' )" 30 | gcloud dns record-sets transaction remove --name "*.${CF_DOMAIN}" --type=A --project ${GCP_PROJECT} --zone="${SHARED_DNS_ZONE_NAME}" --ttl=300 "${existing_record_ip}" --verbosity=debug 31 | fi 32 | 33 | echo "Contents of transaction.yaml:" 34 | cat transaction.yaml 35 | gcloud dns record-sets transaction execute --project ${GCP_PROJECT} --zone="${SHARED_DNS_ZONE_NAME}" --verbosity=debug 36 | } 37 | 38 | function cleanup() { 39 | rm -rf /tmp/${CF_DOMAIN}* 40 | } 41 | 42 | function main() { 43 | delete_dns 44 | delete_cluster 45 | cleanup 46 | } 47 | 48 | main 49 | -------------------------------------------------------------------------------- /hack/cf4k8s/fetch-acceptance-values.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p /tmp/good-acceptance/ 4 | 5 | gsutil cp gs://cf-k8s-networking/good-acceptance/cf-install-values.yml /tmp/good-acceptance/cf-values.yml 6 | 7 | echo "You can now find the values for good-acceptance in /tmp/good-acceptance/cf-values.yml" 8 | -------------------------------------------------------------------------------- /hack/cf4k8s/redeploying-acceptance.md: -------------------------------------------------------------------------------- 1 | ## Redeploying the Acceptance Environment 2 | 3 | 0. Let the deploy finish 4 | 1. In this directory, run `./fetch-acceptance-values.sh` 5 | 1. Check out the commit SHA of cf-for-k8s-master in your local cf-for-k8s: 6 | - Click `get: cf-for-k8s-master` and copy the value for `commit` 7 | - `cd ~/workspace/cf-for-k8s/` 8 | - `git checkout ` 9 | 1. Make any changes desired to `/tmp/good-acceptance/cf-values.yml` 10 | 1. Run `./create-and-deploy.sh good-acceptance` 11 | 12 | You're done! 13 | -------------------------------------------------------------------------------- /routecontroller/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Kubernetes Generated files - skip generated files, except for vendored files 17 | 18 | !vendor/**/zz_generated.* 19 | 20 | # editor and IDE paraphernalia 21 | .idea 22 | *.swp 23 | *.swo 24 | *~ 25 | -------------------------------------------------------------------------------- /routecontroller/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.15 AS build 2 | 3 | COPY ./ /go/src/routecontroller/ 4 | WORKDIR /go/src/routecontroller/ 5 | RUN go install 6 | 7 | FROM cloudfoundry/run:tiny 8 | COPY --from=build /go/bin/routecontroller /routecontroller/ 9 | WORKDIR /routecontroller 10 | ENTRYPOINT ["/routecontroller/routecontroller"] 11 | -------------------------------------------------------------------------------- /routecontroller/Makefile: -------------------------------------------------------------------------------- 1 | 2 | # Image URL to use all building/pushing image targets 3 | IMG ?= controller:latest 4 | # Produce CRDs that work back to Kubernetes 1.11 (no version conversion) 5 | CRD_OPTIONS ?= "crd:trivialVersions=true" 6 | 7 | # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) 8 | ifeq (,$(shell go env GOBIN)) 9 | GOBIN=$(shell go env GOPATH)/bin 10 | else 11 | GOBIN=$(shell go env GOBIN) 12 | endif 13 | 14 | all: manager 15 | 16 | # Run tests 17 | test: generate fmt vet manifests 18 | go test ./... -coverprofile cover.out 19 | 20 | # Build manager binary 21 | manager: generate fmt vet 22 | go build -o bin/manager main.go 23 | 24 | # Run against the configured Kubernetes cluster in ~/.kube/config 25 | run: generate fmt vet manifests 26 | go run ./main.go 27 | 28 | # Install CRDs into a cluster 29 | install: manifests 30 | kustomize build config/crd | kubectl apply -f - 31 | 32 | # Uninstall CRDs from a cluster 33 | uninstall: manifests 34 | kustomize build config/crd | kubectl delete -f - 35 | 36 | # Deploy controller in the configured Kubernetes cluster in ~/.kube/config 37 | deploy: manifests 38 | cd config/manager && kustomize edit set image controller=${IMG} 39 | kustomize build config/default | kubectl apply -f - 40 | 41 | # Generate manifests e.g. CRD, RBAC etc. 42 | manifests: controller-gen 43 | $(CONTROLLER_GEN) $(CRD_OPTIONS) rbac:roleName=manager-role webhook paths="./..." output:crd:artifacts:config=config/crd/bases 44 | cp config/crd/bases/networking.cloudfoundry.org_routes.yaml ../config/crd/networking.cloudfoundry.org_routes.yaml 45 | 46 | # Run go fmt against code 47 | fmt: 48 | go fmt ./... 49 | 50 | # Run go vet against code 51 | vet: 52 | go vet ./... 53 | 54 | # Generate code 55 | generate: controller-gen 56 | $(CONTROLLER_GEN) object:headerFile=./hack/boilerplate.go.txt paths="./..." 57 | 58 | # Build the docker image 59 | docker-build: test 60 | docker build . -t ${IMG} 61 | 62 | # Push the docker image 63 | docker-push: 64 | docker push ${IMG} 65 | 66 | # find or download controller-gen 67 | # download controller-gen if necessary 68 | controller-gen: 69 | ifeq (, $(shell which controller-gen)) 70 | @{ \ 71 | set -e ;\ 72 | CONTROLLER_GEN_TMP_DIR=$$(mktemp -d) ;\ 73 | cd $$CONTROLLER_GEN_TMP_DIR ;\ 74 | go mod init tmp ;\ 75 | go get sigs.k8s.io/controller-tools/cmd/controller-gen@v0.2.4 ;\ 76 | rm -rf $$CONTROLLER_GEN_TMP_DIR ;\ 77 | } 78 | CONTROLLER_GEN=$(GOBIN)/controller-gen 79 | else 80 | CONTROLLER_GEN=$(shell which controller-gen) 81 | endif 82 | -------------------------------------------------------------------------------- /routecontroller/PROJECT: -------------------------------------------------------------------------------- 1 | domain: cloudfoundry.org 2 | multigroup: true 3 | repo: code.cloudfoundry.org/cf-k8s-networking/routecontroller 4 | resources: 5 | - group: apps 6 | kind: Route 7 | version: v1alpha1 8 | - group: networking 9 | kind: VirtualService 10 | version: v1alpha3 11 | version: "2" 12 | -------------------------------------------------------------------------------- /routecontroller/apis/istio/networking/v1alpha3/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1alpha3 contains API Schema definitions for the networking v1alpha3 API group 17 | // +kubebuilder:object:generate=true 18 | // +groupName=networking.cloudfoundry.org 19 | package v1alpha3 20 | 21 | import ( 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "sigs.k8s.io/controller-runtime/pkg/scheme" 24 | ) 25 | 26 | var ( 27 | // GroupVersion is group version used to register these objects 28 | GroupVersion = schema.GroupVersion{Group: "networking.istio.io", Version: "v1alpha3"} 29 | 30 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 31 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 32 | 33 | // AddToScheme adds the types in this group-version to the given scheme. 34 | AddToScheme = SchemeBuilder.AddToScheme 35 | ) 36 | -------------------------------------------------------------------------------- /routecontroller/apis/istio/networking/v1alpha3/virtualservice_types.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // +kubebuilder:skip 17 | package v1alpha3 18 | 19 | import ( 20 | "bufio" 21 | "bytes" 22 | 23 | "github.com/gogo/protobuf/jsonpb" 24 | 25 | istiov1alpha3 "istio.io/api/networking/v1alpha3" 26 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 27 | ) 28 | 29 | // VirtualServiceSpec defines the desired state of VirtualService 30 | type VirtualServiceSpec struct { 31 | // Important: Run "make" to regenerate code after modifying this file 32 | istiov1alpha3.VirtualService `json:",inline"` 33 | } 34 | 35 | // VirtualServiceStatus defines the observed state of VirtualService 36 | type VirtualServiceStatus struct { 37 | // INSERT ADDITIONAL STATUS FIELD - define observed state of cluster 38 | // Important: Run "make" to regenerate code after modifying this file 39 | } 40 | 41 | // +kubebuilder:object:root=true 42 | 43 | // VirtualService is the Schema for the virtualservices API 44 | type VirtualService struct { 45 | metav1.TypeMeta `json:",inline"` 46 | metav1.ObjectMeta `json:"metadata,omitempty"` 47 | 48 | Spec VirtualServiceSpec `json:"spec,omitempty"` 49 | Status VirtualServiceStatus `json:"status,omitempty"` 50 | } 51 | 52 | // +kubebuilder:object:root=true 53 | 54 | // VirtualServiceList contains a list of VirtualService 55 | type VirtualServiceList struct { 56 | metav1.TypeMeta `json:",inline"` 57 | metav1.ListMeta `json:"metadata,omitempty"` 58 | Items []VirtualService `json:"items"` 59 | } 60 | 61 | func init() { 62 | SchemeBuilder.Register(&VirtualService{}, &VirtualServiceList{}) 63 | } 64 | 65 | func (p *VirtualServiceSpec) MarshalJSON() ([]byte, error) { 66 | buffer := bytes.Buffer{} 67 | writer := bufio.NewWriter(&buffer) 68 | marshaler := jsonpb.Marshaler{} 69 | err := marshaler.Marshal(writer, &p.VirtualService) 70 | if err != nil { 71 | return nil, err 72 | } 73 | 74 | writer.Flush() 75 | return buffer.Bytes(), nil 76 | } 77 | 78 | func (p *VirtualServiceSpec) UnmarshalJSON(b []byte) error { 79 | reader := bytes.NewReader(b) 80 | unmarshaler := jsonpb.Unmarshaler{} 81 | err := unmarshaler.Unmarshal(reader, &p.VirtualService) 82 | if err != nil { 83 | return err 84 | } 85 | return nil 86 | } 87 | -------------------------------------------------------------------------------- /routecontroller/apis/networking/v1alpha1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1alpha1 contains API Schema definitions for the networking v1alpha1 API group 17 | // +kubebuilder:object:generate=true 18 | // +groupName=networking.cloudfoundry.org 19 | package v1alpha1 20 | 21 | import ( 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "sigs.k8s.io/controller-runtime/pkg/scheme" 24 | ) 25 | 26 | var ( 27 | // GroupVersion is group version used to register these objects 28 | GroupVersion = schema.GroupVersion{Group: "networking.cloudfoundry.org", Version: "v1alpha1"} 29 | 30 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 31 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 32 | 33 | // AddToScheme adds the types in this group-version to the given scheme. 34 | AddToScheme = SchemeBuilder.AddToScheme 35 | ) 36 | -------------------------------------------------------------------------------- /routecontroller/cfg/cfg_suite_test.go: -------------------------------------------------------------------------------- 1 | package cfg_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestCfg(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Cfg Suite") 13 | } 14 | -------------------------------------------------------------------------------- /routecontroller/cfg/config.go: -------------------------------------------------------------------------------- 1 | package cfg 2 | 3 | import ( 4 | "errors" 5 | "fmt" 6 | "os" 7 | "time" 8 | ) 9 | 10 | type Config struct { 11 | ResyncInterval time.Duration 12 | Istio struct { 13 | // The Istio Gateway the route controller applies to 14 | Gateway string 15 | } 16 | LeaderElectionNamespace string 17 | } 18 | 19 | func Load() (*Config, error) { 20 | c := &Config{} 21 | var exists bool 22 | c.Istio.Gateway, exists = os.LookupEnv("ISTIO_GATEWAY_NAME") 23 | 24 | if !exists { 25 | return nil, errors.New("ISTIO_GATEWAY_NAME not configured") 26 | } 27 | 28 | c.LeaderElectionNamespace, exists = os.LookupEnv("LEADER_ELECTION_NAMESPACE") 29 | 30 | if !exists { 31 | return nil, errors.New("LEADER_ELECTION_NAMESPACE not configured") 32 | } 33 | 34 | var err error 35 | resync_interval, exists := os.LookupEnv("RESYNC_INTERVAL") 36 | 37 | if exists { 38 | c.ResyncInterval, err = time.ParseDuration(fmt.Sprintf("%ss", resync_interval)) 39 | if err != nil { 40 | return nil, errors.New("could not parse the RESYNC_INTERVAL duration") 41 | } 42 | } else { 43 | c.ResyncInterval = 30 * time.Second 44 | } 45 | 46 | return c, nil 47 | } 48 | -------------------------------------------------------------------------------- /routecontroller/cfg/config_test.go: -------------------------------------------------------------------------------- 1 | package cfg_test 2 | 3 | import ( 4 | "os" 5 | "time" 6 | 7 | "code.cloudfoundry.org/cf-k8s-networking/routecontroller/cfg" 8 | . "github.com/onsi/ginkgo" 9 | . "github.com/onsi/gomega" 10 | ) 11 | 12 | var _ = Describe("Config", func() { 13 | Describe("Load", func() { 14 | BeforeEach(func() { 15 | err := os.Setenv("ISTIO_GATEWAY_NAME", "some-gateway") 16 | Expect(err).NotTo(HaveOccurred()) 17 | err = os.Setenv("RESYNC_INTERVAL", "15") 18 | Expect(err).NotTo(HaveOccurred()) 19 | err = os.Setenv("LEADER_ELECTION_NAMESPACE", "my-good-namespace") 20 | Expect(err).NotTo(HaveOccurred()) 21 | }) 22 | 23 | It("loads the config", func() { 24 | config, err := cfg.Load() 25 | Expect(err).NotTo(HaveOccurred()) 26 | 27 | Expect(config.Istio.Gateway).To(Equal("some-gateway")) 28 | Expect(config.ResyncInterval).To(Equal(15 * time.Second)) 29 | Expect(config.LeaderElectionNamespace).To(Equal("my-good-namespace")) 30 | }) 31 | 32 | Context("when the ISTIO_GATEWAY_NAME env var is not set", func() { 33 | BeforeEach(func() { 34 | err := os.Unsetenv("ISTIO_GATEWAY_NAME") 35 | Expect(err).NotTo(HaveOccurred()) 36 | }) 37 | 38 | It("returns an error", func() { 39 | _, err := cfg.Load() 40 | Expect(err).To(MatchError("ISTIO_GATEWAY_NAME not configured")) 41 | }) 42 | }) 43 | 44 | Context("when the LEADER_ELECTION_NAMESPACE env var is not set", func() { 45 | BeforeEach(func() { 46 | err := os.Unsetenv("LEADER_ELECTION_NAMESPACE") 47 | Expect(err).NotTo(HaveOccurred()) 48 | }) 49 | 50 | It("returns an error", func() { 51 | _, err := cfg.Load() 52 | Expect(err).To(MatchError("LEADER_ELECTION_NAMESPACE not configured")) 53 | }) 54 | }) 55 | 56 | Context("when the RESYNC_INTERVAL env var is not set", func() { 57 | BeforeEach(func() { 58 | err := os.Unsetenv("RESYNC_INTERVAL") 59 | Expect(err).NotTo(HaveOccurred()) 60 | }) 61 | 62 | It("defaults to 30 seconds", func() { 63 | config, err := cfg.Load() 64 | Expect(err).NotTo(HaveOccurred()) 65 | Expect(config.ResyncInterval).To(Equal(30 * time.Second)) 66 | }) 67 | }) 68 | }) 69 | }) 70 | -------------------------------------------------------------------------------- /routecontroller/config/certmanager/certificate.yaml: -------------------------------------------------------------------------------- 1 | # The following manifests contain a self-signed issuer CR and a certificate CR. 2 | # More document can be found at https://docs.cert-manager.io 3 | # WARNING: Targets CertManager 0.11 check https://docs.cert-manager.io/en/latest/tasks/upgrading/index.html for breaking changes 4 | apiVersion: cert-manager.io/v1alpha2 5 | kind: Issuer 6 | metadata: 7 | name: selfsigned-issuer 8 | namespace: system 9 | spec: 10 | selfSigned: {} 11 | --- 12 | apiVersion: cert-manager.io/v1alpha2 13 | kind: Certificate 14 | metadata: 15 | name: serving-cert # this name should match the one appeared in kustomizeconfig.yaml 16 | namespace: system 17 | spec: 18 | # $(SERVICE_NAME) and $(SERVICE_NAMESPACE) will be substituted by kustomize 19 | dnsNames: 20 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc 21 | - $(SERVICE_NAME).$(SERVICE_NAMESPACE).svc.cluster.local 22 | issuerRef: 23 | kind: Issuer 24 | name: selfsigned-issuer 25 | secretName: webhook-server-cert # this secret will not be prefixed, since it's not managed by kustomize 26 | -------------------------------------------------------------------------------- /routecontroller/config/certmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - certificate.yaml 3 | 4 | configurations: 5 | - kustomizeconfig.yaml 6 | -------------------------------------------------------------------------------- /routecontroller/config/certmanager/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This configuration is for teaching kustomize how to update name ref and var substitution 2 | nameReference: 3 | - kind: Issuer 4 | group: cert-manager.io 5 | fieldSpecs: 6 | - kind: Certificate 7 | group: cert-manager.io 8 | path: spec/issuerRef/name 9 | 10 | varReference: 11 | - kind: Certificate 12 | group: cert-manager.io 13 | path: spec/commonName 14 | - kind: Certificate 15 | group: cert-manager.io 16 | path: spec/dnsNames 17 | -------------------------------------------------------------------------------- /routecontroller/config/crd/bases/_.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | controller-gen.kubebuilder.io/version: v0.2.4 8 | creationTimestamp: null 9 | spec: 10 | group: "" 11 | names: 12 | kind: "" 13 | plural: "" 14 | scope: "" 15 | status: 16 | acceptedNames: 17 | kind: "" 18 | plural: "" 19 | conditions: null 20 | storedVersions: null 21 | -------------------------------------------------------------------------------- /routecontroller/config/crd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # This kustomization.yaml is not intended to be run by itself, 2 | # since it depends on service name and namespace that are out of this kustomize package. 3 | # It should be run by config/default 4 | resources: 5 | - bases/networking.cloudfoundry.org_routes.yaml 6 | # +kubebuilder:scaffold:crdkustomizeresource 7 | 8 | patchesStrategicMerge: 9 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. 10 | # patches here are for enabling the conversion webhook for each CRD 11 | #- patches/webhook_in_routes.yaml 12 | # +kubebuilder:scaffold:crdkustomizewebhookpatch 13 | 14 | # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. 15 | # patches here are for enabling the CA injection for each CRD 16 | #- patches/cainjection_in_routes.yaml 17 | # +kubebuilder:scaffold:crdkustomizecainjectionpatch 18 | 19 | # the following config is for teaching kustomize how to do kustomization for CRDs. 20 | configurations: 21 | - kustomizeconfig.yaml 22 | -------------------------------------------------------------------------------- /routecontroller/config/crd/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # This file is for teaching kustomize how to substitute name and namespace reference in CRD 2 | nameReference: 3 | - kind: Service 4 | version: v1 5 | fieldSpecs: 6 | - kind: CustomResourceDefinition 7 | group: apiextensions.k8s.io 8 | path: spec/conversion/webhookClientConfig/service/name 9 | 10 | namespace: 11 | - kind: CustomResourceDefinition 12 | group: apiextensions.k8s.io 13 | path: spec/conversion/webhookClientConfig/service/namespace 14 | create: false 15 | 16 | varReference: 17 | - path: metadata/annotations 18 | -------------------------------------------------------------------------------- /routecontroller/config/crd/patches/cainjection_in_routes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch adds a directive for certmanager to inject CA into the CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | annotations: 7 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 8 | name: routes.networking.cloudfoundry.org 9 | -------------------------------------------------------------------------------- /routecontroller/config/crd/patches/webhook_in_routes.yaml: -------------------------------------------------------------------------------- 1 | # The following patch enables conversion webhook for CRD 2 | # CRD conversion requires k8s 1.13 or later. 3 | apiVersion: apiextensions.k8s.io/v1beta1 4 | kind: CustomResourceDefinition 5 | metadata: 6 | name: routes.networking.cloudfoundry.org 7 | spec: 8 | conversion: 9 | strategy: Webhook 10 | webhookClientConfig: 11 | # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, 12 | # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) 13 | caBundle: Cg== 14 | service: 15 | namespace: system 16 | name: webhook-service 17 | path: /convert 18 | -------------------------------------------------------------------------------- /routecontroller/config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: routecontroller-system 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: routecontroller- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | bases: 16 | - ../crd 17 | - ../rbac 18 | - ../manager 19 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml 20 | #- ../webhook 21 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. 22 | #- ../certmanager 23 | # [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. 24 | #- ../prometheus 25 | 26 | patchesStrategicMerge: 27 | # Protect the /metrics endpoint by putting it behind auth. 28 | # Only one of manager_auth_proxy_patch.yaml and 29 | # manager_prometheus_metrics_patch.yaml should be enabled. 30 | - manager_auth_proxy_patch.yaml 31 | # If you want your controller-manager to expose the /metrics 32 | # endpoint w/o any authn/z, uncomment the following line and 33 | # comment manager_auth_proxy_patch.yaml. 34 | # Only one of manager_auth_proxy_patch.yaml and 35 | # manager_prometheus_metrics_patch.yaml should be enabled. 36 | #- manager_prometheus_metrics_patch.yaml 37 | 38 | # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in crd/kustomization.yaml 39 | #- manager_webhook_patch.yaml 40 | 41 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 42 | # Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. 43 | # 'CERTMANAGER' needs to be enabled to use ca injection 44 | #- webhookcainjection_patch.yaml 45 | 46 | # the following config is for teaching kustomize how to do var substitution 47 | vars: 48 | # [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. 49 | #- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR 50 | # objref: 51 | # kind: Certificate 52 | # group: cert-manager.io 53 | # version: v1alpha2 54 | # name: serving-cert # this name should match the one in certificate.yaml 55 | # fieldref: 56 | # fieldpath: metadata.namespace 57 | #- name: CERTIFICATE_NAME 58 | # objref: 59 | # kind: Certificate 60 | # group: cert-manager.io 61 | # version: v1alpha2 62 | # name: serving-cert # this name should match the one in certificate.yaml 63 | #- name: SERVICE_NAMESPACE # namespace of the service 64 | # objref: 65 | # kind: Service 66 | # version: v1 67 | # name: webhook-service 68 | # fieldref: 69 | # fieldpath: metadata.namespace 70 | #- name: SERVICE_NAME 71 | # objref: 72 | # kind: Service 73 | # version: v1 74 | # name: webhook-service 75 | -------------------------------------------------------------------------------- /routecontroller/config/default/manager_auth_proxy_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch inject a sidecar container which is a HTTP proxy for the controller manager, 2 | # it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: controller-manager 7 | namespace: system 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: kube-rbac-proxy 13 | image: gcr.io/kubebuilder/kube-rbac-proxy:v0.4.1 14 | args: 15 | - "--secure-listen-address=0.0.0.0:8443" 16 | - "--upstream=http://127.0.0.1:8080/" 17 | - "--logtostderr=true" 18 | - "--v=10" 19 | ports: 20 | - containerPort: 8443 21 | name: https 22 | - name: manager 23 | args: 24 | - "--metrics-addr=127.0.0.1:8080" 25 | - "--enable-leader-election" 26 | -------------------------------------------------------------------------------- /routecontroller/config/default/manager_webhook_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: controller-manager 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | - name: manager 11 | ports: 12 | - containerPort: 9443 13 | name: webhook-server 14 | protocol: TCP 15 | volumeMounts: 16 | - mountPath: /tmp/k8s-webhook-server/serving-certs 17 | name: cert 18 | readOnly: true 19 | volumes: 20 | - name: cert 21 | secret: 22 | defaultMode: 420 23 | secretName: webhook-server-cert 24 | -------------------------------------------------------------------------------- /routecontroller/config/default/webhookcainjection_patch.yaml: -------------------------------------------------------------------------------- 1 | # This patch add annotation to admission webhook config and 2 | # the variables $(CERTIFICATE_NAMESPACE) and $(CERTIFICATE_NAME) will be substituted by kustomize. 3 | apiVersion: admissionregistration.k8s.io/v1beta1 4 | kind: MutatingWebhookConfiguration 5 | metadata: 6 | name: mutating-webhook-configuration 7 | annotations: 8 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 9 | --- 10 | apiVersion: admissionregistration.k8s.io/v1beta1 11 | kind: ValidatingWebhookConfiguration 12 | metadata: 13 | name: validating-webhook-configuration 14 | annotations: 15 | cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) 16 | -------------------------------------------------------------------------------- /routecontroller/config/manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manager.yaml 3 | -------------------------------------------------------------------------------- /routecontroller/config/manager/manager.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: controller-manager 12 | namespace: system 13 | labels: 14 | control-plane: controller-manager 15 | spec: 16 | selector: 17 | matchLabels: 18 | control-plane: controller-manager 19 | replicas: 1 20 | template: 21 | metadata: 22 | labels: 23 | control-plane: controller-manager 24 | spec: 25 | containers: 26 | - command: 27 | - /manager 28 | args: 29 | - --enable-leader-election 30 | image: controller:latest 31 | name: manager 32 | resources: 33 | limits: 34 | cpu: 100m 35 | memory: 30Mi 36 | requests: 37 | cpu: 100m 38 | memory: 20Mi 39 | terminationGracePeriodSeconds: 10 40 | -------------------------------------------------------------------------------- /routecontroller/config/prometheus/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - monitor.yaml 3 | -------------------------------------------------------------------------------- /routecontroller/config/prometheus/monitor.yaml: -------------------------------------------------------------------------------- 1 | 2 | # Prometheus Monitor Service (Metrics) 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: ServiceMonitor 5 | metadata: 6 | labels: 7 | control-plane: controller-manager 8 | name: controller-manager-metrics-monitor 9 | namespace: system 10 | spec: 11 | endpoints: 12 | - path: /metrics 13 | port: https 14 | selector: 15 | control-plane: controller-manager 16 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/auth_proxy_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: proxy-role 5 | rules: 6 | - apiGroups: ["authentication.k8s.io"] 7 | resources: 8 | - tokenreviews 9 | verbs: ["create"] 10 | - apiGroups: ["authorization.k8s.io"] 11 | resources: 12 | - subjectaccessreviews 13 | verbs: ["create"] 14 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/auth_proxy_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: proxy-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: proxy-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/auth_proxy_service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | control-plane: controller-manager 6 | name: controller-manager-metrics-service 7 | namespace: system 8 | spec: 9 | ports: 10 | - name: https 11 | port: 8443 12 | targetPort: https 13 | selector: 14 | control-plane: controller-manager 15 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - role.yaml 3 | - role_binding.yaml 4 | - leader_election_role.yaml 5 | - leader_election_role_binding.yaml 6 | # Comment the following 3 lines if you want to disable 7 | # the auth proxy (https://github.com/brancz/kube-rbac-proxy) 8 | # which protects your /metrics endpoint. 9 | - auth_proxy_service.yaml 10 | - auth_proxy_role.yaml 11 | - auth_proxy_role_binding.yaml 12 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do leader election. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - events 31 | verbs: 32 | - create 33 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | name: leader-election-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: Role 8 | name: leader-election-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/role.yaml: -------------------------------------------------------------------------------- 1 | 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | creationTimestamp: null 7 | name: manager-role 8 | rules: 9 | - apiGroups: 10 | - networking.cloudfoundry.org 11 | resources: 12 | - routes 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - networking.cloudfoundry.org 19 | resources: 20 | - routes/status 21 | verbs: 22 | - get 23 | - patch 24 | - update 25 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: manager-rolebinding 5 | roleRef: 6 | apiGroup: rbac.authorization.k8s.io 7 | kind: ClusterRole 8 | name: manager-role 9 | subjects: 10 | - kind: ServiceAccount 11 | name: default 12 | namespace: system 13 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/route_editor_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do edit routes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: route-editor-role 6 | rules: 7 | - apiGroups: 8 | - networking.cloudfoundry.org 9 | resources: 10 | - routes 11 | verbs: 12 | - create 13 | - delete 14 | - get 15 | - list 16 | - patch 17 | - update 18 | - watch 19 | - apiGroups: 20 | - networking.cloudfoundry.org 21 | resources: 22 | - routes/status 23 | verbs: 24 | - get 25 | - patch 26 | - update 27 | -------------------------------------------------------------------------------- /routecontroller/config/rbac/route_viewer_role.yaml: -------------------------------------------------------------------------------- 1 | # permissions to do viewer routes. 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: route-viewer-role 6 | rules: 7 | - apiGroups: 8 | - networking.cloudfoundry.org 9 | resources: 10 | - routes 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: 16 | - networking.cloudfoundry.org 17 | resources: 18 | - routes/status 19 | verbs: 20 | - get 21 | -------------------------------------------------------------------------------- /routecontroller/config/samples/route-with-no-destinations.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Route with no destinations 3 | apiVersion: networking.cloudfoundry.org/v1alpha1 4 | kind: Route 5 | metadata: 6 | labels: 7 | app.kubernetes.io/component: cf-networking 8 | app.kubernetes.io/managed-by: cloudfoundry 9 | app.kubernetes.io/name: 7390d59b-f5f1-4c3c-9cb6-c1e2c5c3cf84 # route guid 10 | app.kubernetes.io/part-of: cloudfoundry 11 | app.kubernetes.io/version: 0.0.0 12 | cloudfoundry.org/domain_guid: 23bb47a0-b042-4087-8e55-97ec4b69b43a 13 | cloudfoundry.org/org_guid: b7ab8526-b63b-4156-90b7-2cacfd686a8b 14 | cloudfoundry.org/route_guid: 7390d59b-f5f1-4c3c-9cb6-c1e2c5c3cf84 15 | cloudfoundry.org/space_guid: d4a93829-fed3-497a-bcba-00bb2d454681 16 | name: 7390d59b-f5f1-4c3c-9cb6-c1e2c5c3cf84 # route guid 17 | namespace: cf-workloads 18 | spec: 19 | destinations: [] 20 | domain: 21 | internal: false 22 | name: apps.example.com 23 | host: destinationless 24 | path: "/some-path" 25 | url: "destinationless.apps.example.com/some-path" -------------------------------------------------------------------------------- /routecontroller/config/samples/route-with-single-destination.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Route with a single destination 3 | apiVersion: networking.cloudfoundry.org/v1alpha1 4 | kind: Route 5 | metadata: 6 | labels: 7 | app.kubernetes.io/component: cf-networking 8 | app.kubernetes.io/managed-by: cloudfoundry 9 | app.kubernetes.io/name: 7390d59b-f5f1-4c3c-9cb6-c1e2c5c3cf84 # route guid 10 | app.kubernetes.io/part-of: cloudfoundry 11 | app.kubernetes.io/version: 0.0.0 12 | cloudfoundry.org/domain_guid: 23bb47a0-b042-4087-8e55-97ec4b69b43a 13 | cloudfoundry.org/org_guid: b7ab8526-b63b-4156-90b7-2cacfd686a8b 14 | cloudfoundry.org/route_guid: 7390d59b-f5f1-4c3c-9cb6-c1e2c5c3cf84 15 | cloudfoundry.org/space_guid: d4a93829-fed3-497a-bcba-00bb2d454681 16 | name: 7390d59b-f5f1-4c3c-9cb6-c1e2c5c3cf84 # route guid 17 | namespace: cf-workloads 18 | spec: 19 | destinations: 20 | - app: 21 | guid: be261513-3ccd-4000-b9d8-0023bbb08fbf 22 | process: 23 | type: web 24 | guid: 9363095c-6be5-4982-a7db-a493e74af2f4 # destination guid 25 | port: 8080 26 | selector: 27 | matchLabels: 28 | cloudfoundry.org/app_guid: be261513-3ccd-4000-b9d8-0023bbb08fbf 29 | cloudfoundry.org/process_type: web 30 | domain: 31 | internal: false 32 | name: apps.example.com 33 | host: catnip 34 | path: "" 35 | url: catnip.apps.example.com -------------------------------------------------------------------------------- /routecontroller/config/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | - manifests.yaml 3 | - service.yaml 4 | 5 | configurations: 6 | - kustomizeconfig.yaml 7 | -------------------------------------------------------------------------------- /routecontroller/config/webhook/kustomizeconfig.yaml: -------------------------------------------------------------------------------- 1 | # the following config is for teaching kustomize where to look at when substituting vars. 2 | # It requires kustomize v2.1.0 or newer to work properly. 3 | nameReference: 4 | - kind: Service 5 | version: v1 6 | fieldSpecs: 7 | - kind: MutatingWebhookConfiguration 8 | group: admissionregistration.k8s.io 9 | path: webhooks/clientConfig/service/name 10 | - kind: ValidatingWebhookConfiguration 11 | group: admissionregistration.k8s.io 12 | path: webhooks/clientConfig/service/name 13 | 14 | namespace: 15 | - kind: MutatingWebhookConfiguration 16 | group: admissionregistration.k8s.io 17 | path: webhooks/clientConfig/service/namespace 18 | create: true 19 | - kind: ValidatingWebhookConfiguration 20 | group: admissionregistration.k8s.io 21 | path: webhooks/clientConfig/service/namespace 22 | create: true 23 | 24 | varReference: 25 | - path: metadata/annotations 26 | -------------------------------------------------------------------------------- /routecontroller/config/webhook/manifests.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cloudfoundry/cf-k8s-networking/49d6047a2270a79e266ea70bb0cab40de0f55633/routecontroller/config/webhook/manifests.yaml -------------------------------------------------------------------------------- /routecontroller/config/webhook/service.yaml: -------------------------------------------------------------------------------- 1 | 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webhook-service 6 | namespace: system 7 | spec: 8 | ports: 9 | - port: 443 10 | targetPort: 9443 11 | selector: 12 | control-plane: controller-manager 13 | -------------------------------------------------------------------------------- /routecontroller/go.mod: -------------------------------------------------------------------------------- 1 | module code.cloudfoundry.org/cf-k8s-networking/routecontroller 2 | 3 | go 1.15 4 | 5 | require ( 6 | github.com/go-logr/logr v0.3.0 7 | github.com/gogo/protobuf v1.3.1 8 | github.com/onsi/ginkgo v1.14.1 9 | github.com/onsi/gomega v1.10.2 10 | github.com/prometheus/client_model v0.2.0 11 | github.com/prometheus/prom2json v1.3.0 12 | github.com/sirupsen/logrus v1.6.0 13 | istio.io/api v0.0.0-20200410141105-715a3039a0b5 14 | k8s.io/api v0.20.4 15 | k8s.io/apimachinery v0.20.4 16 | k8s.io/client-go v0.20.4 17 | sigs.k8s.io/controller-runtime v0.8.3 18 | sigs.k8s.io/kind v0.10.0 19 | ) 20 | -------------------------------------------------------------------------------- /routecontroller/hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ -------------------------------------------------------------------------------- /routecontroller/integration/README.md: -------------------------------------------------------------------------------- 1 | # RouteController Integration Tests 2 | 3 | ### Requirements 4 | 5 | * go 6 | * ginkgo 7 | * docker 8 | * kubectl 9 | * kind 10 | * kustomize 11 | 12 | ### To Run 13 | 14 | ``` 15 | ginkgo -r integration/ 16 | ``` -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/context-path-route-for-single-fqdn1.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: context-path-route-guid-1 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: context-path-route-guid-1 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: context-path-route-guid-1 16 | spec: 17 | host: hostname 18 | path: /hello 19 | url: hostname.apps.example.com/hello 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: 24 | - weight: 100 25 | port: 8080 26 | guid: destination-guid-1 27 | selector: 28 | matchLabels: 29 | cloudfoundry.org/app_guid: cc-app1-guid 30 | cloudfoundry.org/process_type: web 31 | app: 32 | guid: cc-app1-guid 33 | process: 34 | type: web 35 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/context-path-route-for-single-fqdn2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: context-path-route-guid-2 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: context-path-route-guid-2 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: context-path-route-guid-2 16 | spec: 17 | host: hostname 18 | path: /hello/world 19 | url: hostname.apps.example.com/hello/world 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: 24 | - weight: 100 25 | port: 8080 26 | guid: destination-guid-2 27 | selector: 28 | matchLabels: 29 | cloudfoundry.org/app_guid: cc-app2-guid 30 | cloudfoundry.org/process_type: web 31 | app: 32 | guid: cc-app2-guid 33 | process: 34 | type: web 35 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/istio-virtual-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | name: virtualservices.networking.istio.io 5 | labels: 6 | app: istio-pilot 7 | chart: istio 8 | heritage: Tiller 9 | release: istio 10 | annotations: 11 | "helm.sh/resource-policy": keep 12 | spec: 13 | group: networking.istio.io 14 | names: 15 | kind: VirtualService 16 | listKind: VirtualServiceList 17 | plural: virtualservices 18 | singular: virtualservice 19 | shortNames: 20 | - vs 21 | categories: 22 | - istio-io 23 | - networking-istio-io 24 | scope: Namespaced 25 | versions: 26 | - name: v1alpha3 27 | served: true 28 | storage: true 29 | additionalPrinterColumns: 30 | - JSONPath: .spec.gateways 31 | description: The names of gateways and sidecars that should apply these routes 32 | name: Gateways 33 | type: string 34 | - JSONPath: .spec.hosts 35 | description: The destination hosts to which traffic is being sent 36 | name: Hosts 37 | type: string 38 | - JSONPath: .metadata.creationTimestamp 39 | description: |- 40 | CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. 41 | 42 | Populated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata 43 | name: Age 44 | type: date 45 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/multiple-routes-with-different-fqdn.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.cloudfoundry.org/v1alpha1 3 | kind: Route 4 | metadata: 5 | name: cc-route-guid-1 6 | annotations: {} 7 | labels: 8 | app.kubernetes.io/name: cc-route-guid-1 9 | app.kubernetes.io/version: cloud-controller-api-version 10 | app.kubernetes.io/managed-by: cloudfoundry 11 | app.kubernetes.io/component: cf-networking 12 | app.kubernetes.io/part-of: cloudfoundry 13 | cloudfoundry.org/org_guid: cc-org-guid 14 | cloudfoundry.org/space_guid: cc-space-guid 15 | cloudfoundry.org/domain_guid: cc-domain-guid 16 | cloudfoundry.org/route_guid: cc-route-guid-1 17 | spec: 18 | host: hostname-1 19 | path: /some/path 20 | url: hostname-1.apps.example.com/some/path 21 | domain: 22 | name: apps.example.com 23 | internal: false 24 | destinations: 25 | - weight: 100 26 | port: 8080 27 | guid: destination-guid-1 28 | selector: 29 | matchLabels: 30 | cloudfoundry.org/app_guid: cc-app1-guid 31 | cloudfoundry.org/process_type: web 32 | app: 33 | guid: cc-app1-guid 34 | process: 35 | type: web 36 | --- 37 | apiVersion: networking.cloudfoundry.org/v1alpha1 38 | kind: Route 39 | metadata: 40 | name: cc-route-guid-2 41 | annotations: {} 42 | labels: 43 | app.kubernetes.io/name: cc-route-guid-2 44 | app.kubernetes.io/version: cloud-controller-api-version 45 | app.kubernetes.io/managed-by: cloudfoundry 46 | app.kubernetes.io/component: cf-networking 47 | app.kubernetes.io/part-of: cloudfoundry 48 | cloudfoundry.org/org_guid: cc-org-guid 49 | cloudfoundry.org/space_guid: cc-space-guid 50 | cloudfoundry.org/domain_guid: cc-domain-guid 51 | cloudfoundry.org/route_guid: cc-route-guid-2 52 | spec: 53 | host: hostname-2 54 | path: /some/different/path 55 | url: hostname-2.apps.example.com/some/different/path 56 | domain: 57 | name: apps.example.com 58 | internal: false 59 | destinations: 60 | - weight: 100 61 | port: 8080 62 | guid: destination-guid-2 63 | selector: 64 | matchLabels: 65 | cloudfoundry.org/app_guid: cc-app2-guid 66 | cloudfoundry.org/process_type: web 67 | app: 68 | guid: cc-app2-guid 69 | process: 70 | type: web 71 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/multiple-routes-with-same-fqdn.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: cc-route-guid-1 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid-1 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid-1 16 | spec: 17 | host: hostname 18 | path: /some/path 19 | url: hostname.apps.example.com/some/path 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: 24 | - weight: 50 25 | port: 8080 26 | guid: destination-guid-1 27 | selector: 28 | matchLabels: 29 | cloudfoundry.org/app_guid: cc-app1-guid 30 | cloudfoundry.org/process_type: web 31 | app: 32 | guid: cc-app1-guid 33 | process: 34 | type: web 35 | - weight: 50 36 | port: 9090 37 | guid: additional-destination-for-route-1 38 | selector: 39 | matchLabels: 40 | cloudfoundry.org/app_guid: cc-app1-guid 41 | cloudfoundry.org/process_type: web 42 | app: 43 | guid: cc-app1-guid 44 | process: 45 | type: web 46 | --- 47 | apiVersion: networking.cloudfoundry.org/v1alpha1 48 | kind: Route 49 | metadata: 50 | name: cc-route-guid-2 51 | annotations: {} 52 | labels: 53 | app.kubernetes.io/name: cc-route-guid-2 54 | app.kubernetes.io/version: cloud-controller-api-version 55 | app.kubernetes.io/managed-by: cloudfoundry 56 | app.kubernetes.io/component: cf-networking 57 | app.kubernetes.io/part-of: cloudfoundry 58 | cloudfoundry.org/org_guid: cc-org-guid 59 | cloudfoundry.org/space_guid: cc-space-guid 60 | cloudfoundry.org/domain_guid: cc-domain-guid 61 | cloudfoundry.org/route_guid: cc-route-guid-2 62 | spec: 63 | host: hostname 64 | path: /some/different/path 65 | url: hostname.apps.example.com/some/different/path 66 | domain: 67 | name: apps.example.com 68 | internal: false 69 | destinations: 70 | - weight: 100 71 | port: 8080 72 | guid: destination-guid-2 73 | selector: 74 | matchLabels: 75 | cloudfoundry.org/app_guid: cc-app2-guid 76 | cloudfoundry.org/process_type: web 77 | app: 78 | guid: cc-app2-guid 79 | process: 80 | type: web 81 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/route-without-any-destination.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: cc-route-guid 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid 16 | spec: 17 | host: hostname 18 | path: /some/path 19 | url: hostname.apps.example.com/some/path 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: [] 24 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/route.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: cc-route-guid 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid 16 | spec: 17 | host: hostname 18 | path: /some/path 19 | url: hostname.apps.example.com/some/path # CAPI guarantees this is unique 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: 24 | - weight: 80 # weights must add to 100 25 | port: 8080 26 | guid: destination-guid-1 27 | selector: 28 | matchLabels: 29 | cloudfoundry.org/app_guid: cc-app1-guid 30 | cloudfoundry.org/process_type: web 31 | app: 32 | guid: cc-app1-guid 33 | process: 34 | type: web 35 | - weight: 20 # weights must add to 100 36 | port: 9000 37 | guid: destination-guid-2 38 | selector: 39 | matchLabels: 40 | cloudfoundry.org/app_guid: cc-app1-guid 41 | cloudfoundry.org/process_type: other-web 42 | app: 43 | guid: cc-app2-guid 44 | process: 45 | type: other-web 46 | status: 47 | conditions: 48 | type: Ready 49 | status: False # starts out False 50 | # only changed by the Istio Routing Controller after it 51 | # completes the reconciliation (creates child objects) 52 | # maybe it also waits for children to become "Ready"? 53 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/single-route-with-multiple-destinations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: cc-route-guid-1 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid 16 | spec: 17 | host: hostname 18 | path: /some/path 19 | url: hostname.apps.example.com/some/path 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: 24 | - weight: 80 25 | port: 8080 26 | guid: destination-guid-1 27 | selector: 28 | matchLabels: 29 | cloudfoundry.org/app_guid: cc-app1-guid 30 | cloudfoundry.org/process_type: web 31 | app: 32 | guid: cc-app1-guid 33 | process: 34 | type: web 35 | - weight: 20 36 | port: 9000 37 | guid: destination-guid-2 38 | selector: 39 | matchLabels: 40 | cloudfoundry.org/app_guid: cc-app2-guid 41 | cloudfoundry.org/process_type: other-web 42 | app: 43 | guid: cc-app2-guid 44 | process: 45 | type: other-web 46 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/single-route-with-no-destination.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: cc-route-guid-1 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid 16 | spec: 17 | host: hostname 18 | path: /some/path 19 | url: hostname.apps.example.com/some/path 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: [] 24 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/single-route-with-single-destination.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: cc-route-guid-1 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid 16 | spec: 17 | host: hostname 18 | path: /some/path 19 | url: hostname.apps.example.com/some/path 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: 24 | - weight: 100 25 | port: 8080 26 | guid: destination-guid-1 27 | selector: 28 | matchLabels: 29 | cloudfoundry.org/app_guid: cc-app1-guid 30 | cloudfoundry.org/process_type: web 31 | app: 32 | guid: cc-app1-guid 33 | process: 34 | type: web 35 | -------------------------------------------------------------------------------- /routecontroller/integration/fixtures/single-route-with-updated-single-destination.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: cc-route-guid-1 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid 16 | spec: 17 | host: hostname 18 | path: /some/path 19 | url: hostname.apps.example.com/some/path 20 | domain: 21 | name: apps.example.com 22 | internal: false 23 | destinations: 24 | - weight: 100 25 | port: 9090 26 | guid: destination-guid-1 27 | selector: 28 | matchLabels: 29 | cloudfoundry.org/app_guid: cc-app1-guid 30 | cloudfoundry.org/process_type: web 31 | app: 32 | guid: cc-app1-guid 33 | process: 34 | type: web 35 | -------------------------------------------------------------------------------- /routecontroller/integration/integration_suite_test.go: -------------------------------------------------------------------------------- 1 | package integration_test 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | "github.com/onsi/gomega/gexec" 8 | 9 | . "github.com/onsi/ginkgo" 10 | . "github.com/onsi/gomega" 11 | ) 12 | 13 | func TestIntegration(t *testing.T) { 14 | RegisterFailHandler(Fail) 15 | RunSpecs(t, "Integration Suite") 16 | } 17 | 18 | var ( 19 | routeControllerBinaryPath string 20 | ) 21 | 22 | const ( 23 | defaultTimeout = 30 * time.Second 24 | defaultPollingInterval = 1 * time.Second 25 | ) 26 | 27 | var _ = SynchronizedBeforeSuite(func() []byte { 28 | binPath, err := gexec.Build( 29 | "code.cloudfoundry.org/cf-k8s-networking/routecontroller", 30 | "--race", 31 | ) 32 | Expect(err).NotTo(HaveOccurred()) 33 | 34 | SetDefaultEventuallyTimeout(defaultTimeout) 35 | SetDefaultEventuallyPollingInterval(defaultPollingInterval) 36 | SetDefaultConsistentlyDuration(defaultTimeout) 37 | SetDefaultConsistentlyPollingInterval(defaultPollingInterval) 38 | 39 | return []byte(binPath) 40 | }, func(data []byte) { 41 | routeControllerBinaryPath = string(data) 42 | }) 43 | 44 | var _ = SynchronizedAfterSuite(func() {}, func() { 45 | gexec.CleanupBuildArtifacts() 46 | }) 47 | -------------------------------------------------------------------------------- /routecontroller/resourcebuilders/resourcebuilders_suite_test.go: -------------------------------------------------------------------------------- 1 | package resourcebuilders_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | "github.com/sirupsen/logrus" 9 | ) 10 | 11 | func TestResourcebuilders(t *testing.T) { 12 | logrus.SetOutput(GinkgoWriter) 13 | RegisterFailHandler(Fail) 14 | RunSpecs(t, "Resourcebuilders Suite") 15 | } 16 | -------------------------------------------------------------------------------- /routecontroller/resourcebuilders/service_builder.go: -------------------------------------------------------------------------------- 1 | package resourcebuilders 2 | 3 | import ( 4 | networkingv1alpha1 "code.cloudfoundry.org/cf-k8s-networking/routecontroller/apis/networking/v1alpha1" 5 | corev1 "k8s.io/api/core/v1" 6 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" 8 | ) 9 | 10 | type ServiceBuilder struct{} 11 | 12 | func (b *ServiceBuilder) BuildMutateFunction(actualService, desiredService *corev1.Service) controllerutil.MutateFn { 13 | return func() error { 14 | actualService.ObjectMeta.Labels = desiredService.ObjectMeta.Labels 15 | actualService.ObjectMeta.Annotations = desiredService.ObjectMeta.Annotations 16 | actualService.ObjectMeta.OwnerReferences = desiredService.ObjectMeta.OwnerReferences 17 | actualService.Spec.Selector = desiredService.Spec.Selector 18 | actualService.Spec.Ports = desiredService.Spec.Ports 19 | return nil 20 | } 21 | } 22 | 23 | func (b *ServiceBuilder) Build(route *networkingv1alpha1.Route) []corev1.Service { 24 | const httpPortName = "http" 25 | services := []corev1.Service{} 26 | for _, dest := range route.Spec.Destinations { 27 | service := corev1.Service{ 28 | ObjectMeta: metav1.ObjectMeta{ 29 | OwnerReferences: []metav1.OwnerReference{routeToOwnerRef(route)}, 30 | Name: serviceName(dest), 31 | Namespace: route.ObjectMeta.Namespace, 32 | Labels: map[string]string{}, 33 | Annotations: map[string]string{}, 34 | }, 35 | Spec: corev1.ServiceSpec{ 36 | Selector: dest.Selector.MatchLabels, 37 | Ports: []corev1.ServicePort{ 38 | { 39 | Port: int32(*dest.Port), 40 | Name: httpPortName, 41 | }}, 42 | }, 43 | } 44 | service.ObjectMeta.Labels["cloudfoundry.org/app_guid"] = dest.App.Guid 45 | service.ObjectMeta.Labels["cloudfoundry.org/process_type"] = dest.App.Process.Type 46 | service.ObjectMeta.Labels["cloudfoundry.org/route_guid"] = route.ObjectMeta.Name 47 | service.ObjectMeta.Annotations["cloudfoundry.org/route-fqdn"] = route.FQDN() 48 | services = append(services, service) 49 | } 50 | return services 51 | } 52 | 53 | func routeToOwnerRef(r *networkingv1alpha1.Route) metav1.OwnerReference { 54 | return metav1.OwnerReference{ 55 | APIVersion: networkingv1alpha1.SchemeBuilder.GroupVersion.String(), 56 | Kind: r.TypeMeta.Kind, 57 | Name: r.ObjectMeta.Name, 58 | UID: r.ObjectMeta.UID, 59 | } 60 | } 61 | 62 | func boolPtr(x bool) *bool { 63 | return &x 64 | } 65 | -------------------------------------------------------------------------------- /routecontroller/scripts/integration: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | function printStatus { 6 | if [ $? -eq 0 ]; then 7 | echo -e "\nSWEET SUITE SUCCESS" 8 | else 9 | echo -e "\nSUITE FAILURE" 10 | fi 11 | } 12 | 13 | trap printStatus EXIT 14 | 15 | 16 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 17 | cd "${script_dir}/.." 18 | 19 | # btrfs fix: Concourse with btrfs doesn't automatically expose the /dev/loop0 20 | # device on task containers. When running KIND this is necessary because 21 | # Kubelet attempts to stat the device mounts and will fail if this device 22 | # doesn't exist. 23 | # 24 | # More info: 25 | # https://github.com/vmware-tanzu/cross-cluster-connectivity/blob/old-hamlet/doc/adr/0002-kind-concourse-and-btrfs.md 26 | if [[ "$(df -Th / | grep btrfs | wc -l)" -eq 1 ]] && [[ ! -e /dev/loop0 ]]; then 27 | echo "Detected btrfs. Making /dev/loop0." 28 | mknod /dev/loop0 b 7 0 29 | fi 30 | 31 | set -x 32 | # TODO make these runnable in parallel 33 | ginkgo -keepGoing -trace -progress -failOnPending -randomizeAllSpecs -race -slowSpecThreshold 45 integration 34 | set +x 35 | -------------------------------------------------------------------------------- /routecontroller/scripts/stress: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | function printStatus { 6 | if [ $? -eq 0 ]; then 7 | echo -e "\nSWEET SUITE SUCCESS" 8 | else 9 | echo -e "\nSUITE FAILURE" 10 | fi 11 | } 12 | 13 | trap printStatus EXIT 14 | 15 | 16 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 17 | cd "${script_dir}/.." 18 | 19 | set -x 20 | ginkgo -v -trace -progress -slowSpecThreshold 600 stress 21 | set +x 22 | -------------------------------------------------------------------------------- /routecontroller/scripts/test: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | function printStatus { 6 | if [ $? -eq 0 ]; then 7 | echo -e "\nSWEET SUITE SUCCESS" 8 | else 9 | echo -e "\nSUITE FAILURE" 10 | fi 11 | } 12 | 13 | trap printStatus EXIT 14 | 15 | 16 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 17 | cd "${script_dir}/.." 18 | 19 | set -x 20 | 21 | go vet ./... 22 | go fmt ./... 23 | go build -o /dev/null -v . 24 | 25 | ginkgo -keepGoing -trace -progress -r -failOnPending -randomizeAllSpecs -p -race -skipPackage integration,stress 26 | 27 | # TODO run integration tests 28 | -------------------------------------------------------------------------------- /routecontroller/stress/README.md: -------------------------------------------------------------------------------- 1 | # Route Controller Stress Tests 2 | 3 | ## Proposed Metrics 4 | 5 | - Create 1000 routes, then deploy RouteController and measure how long it takes 6 | for all the VirtualServices and Services to be created? 7 | - Once there are 1000 routes (and their corresponding VirtualServices and 8 | Services), if we add 100 more routes quickly, how long does it take to create 9 | the VirtualService and Service? 10 | - Given I have 1100 routes, how long does it take to modify 100 of routes as 11 | individual updates? 12 | - Given I have 1100 routes, how long does it take to modify 1000 in bulk? 13 | - Given I have 1100 routes, how long does it take to remove 100 routes ? 14 | - Given I have 1000 routes, how long does it take to remove all the routes? 15 | 16 | For each of these 6 metrics, we measure the time that each change takes to fully 17 | propagate by observing modifications to VirtualServices and Services, then the 18 | avg propagation of 95% of the new routes is under a baseline. If we fall outside 19 | of some tolerance range of the baseline, the test fails. 20 | 21 | ## Architectural Details 22 | - runs on KIND 23 | - only deploy the Route CRD and the VirtualService CRD from Istio 24 | - No Istio 25 | - No deployment of cf-for-k8s 26 | - No AIs 27 | - deploys RouteController as part of each batch of tests 28 | 29 | ## Run the tests 30 | ``` 31 | cd cf-k8s-networking/routecontroller/stress 32 | ginkgo . 33 | ``` 34 | -------------------------------------------------------------------------------- /routecontroller/stress/fixtures/cluster.yml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | extraPortMappings: 6 | - containerPort: 30080 7 | hostPort: 30080 8 | -------------------------------------------------------------------------------- /routecontroller/stress/fixtures/route_template.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.cloudfoundry.org/v1alpha1 2 | kind: Route 3 | metadata: 4 | name: {{.Name}} 5 | annotations: {} 6 | labels: 7 | app.kubernetes.io/name: cc-route-guid 8 | app.kubernetes.io/version: cloud-controller-api-version 9 | app.kubernetes.io/managed-by: cloudfoundry 10 | app.kubernetes.io/component: cf-networking 11 | app.kubernetes.io/part-of: cloudfoundry 12 | cloudfoundry.org/org_guid: cc-org-guid 13 | cloudfoundry.org/space_guid: cc-space-guid 14 | cloudfoundry.org/domain_guid: cc-domain-guid 15 | cloudfoundry.org/route_guid: cc-route-guid 16 | tag: {{.Tag}} 17 | spec: 18 | host: {{.Host}} 19 | path: {{.Path}} 20 | url: {{.Host}}.{{.Domain}}{{.Path}} 21 | domain: 22 | name: {{.Domain}} 23 | internal: false 24 | destinations: 25 | - weight: 100 26 | port: 8080 27 | guid: {{.DestinationGUID}} 28 | selector: 29 | matchLabels: 30 | cloudfoundry.org/app_guid: {{.AppGUID}} 31 | cloudfoundry.org/process_type: web 32 | app: 33 | guid: {{.AppGUID}} 34 | process: 35 | type: web 36 | -------------------------------------------------------------------------------- /routecontroller/stress/fixtures/service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: routecontroller-8080 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: routecontroller 9 | ports: 10 | - protocol: TCP 11 | port: 8080 12 | nodePort: 30080 13 | -------------------------------------------------------------------------------- /routecontroller/stress/matchers/exit_and_log_matcher.go: -------------------------------------------------------------------------------- 1 | package matchers 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/onsi/gomega/format" 7 | "github.com/onsi/gomega/gexec" 8 | ) 9 | 10 | // Copied from gexec.exit_matcher 11 | 12 | /* 13 | The Exit matcher operates on a session: 14 | 15 | Expect(session).Should(ExitSuccessfully()) 16 | 17 | Exit passes if the session has already exited and has exited with code 0. 18 | 19 | Note that the process must have already exited. To wait for a process to exit, use Eventually: 20 | 21 | Eventually(session, 3).Should(ExitSuccessfully()) 22 | */ 23 | func ExitSuccessfully() *exitMatcher { 24 | return &exitMatcher{} 25 | } 26 | 27 | type exitMatcher struct { 28 | actualExitCode int 29 | } 30 | 31 | type Exiter interface { 32 | ExitCode() int 33 | } 34 | 35 | func (m *exitMatcher) Match(actual interface{}) (success bool, err error) { 36 | exiter, ok := actual.(Exiter) 37 | if !ok { 38 | return false, fmt.Errorf("ExitSuccessfully must be passed a gexec.Exiter (Missing method ExitCode() int) Got:\n%s", format.Object(actual, 1)) 39 | } 40 | 41 | m.actualExitCode = exiter.ExitCode() 42 | 43 | return 0 == m.actualExitCode, nil 44 | } 45 | 46 | func (m *exitMatcher) FailureMessage(actual interface{}) (message string) { 47 | session, ok := actual.(*gexec.Session) 48 | if !ok { 49 | panic("ExitSuccessfully must be passed a gexec.Session") 50 | } 51 | 52 | if m.actualExitCode == -1 { 53 | return "Expected process to exit. It did not." 54 | } 55 | stdout := string(session.Out.Contents()) 56 | stderr := string(session.Err.Contents()) 57 | 58 | return format.Message(m.actualExitCode, fmt.Sprintf("to match exit code 0.\nSTDOUT:\n%s\nSTDERR:\n%s\n", stdout, stderr)) 59 | } 60 | 61 | func (m *exitMatcher) NegatedFailureMessage(actual interface{}) (message string) { 62 | return format.Message(m.actualExitCode, "not to match exit code 0") 63 | } 64 | 65 | func (m *exitMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { 66 | session, ok := actual.(*gexec.Session) 67 | if ok { 68 | return session.ExitCode() == -1 69 | } 70 | return true 71 | } 72 | -------------------------------------------------------------------------------- /scripts/vendir-sync-local: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pushd ~/workspace/cf-for-k8s > /dev/null 4 | vendir sync -d config/networking/_ytt_lib/cf-k8s-networking=~/workspace/cf-k8s-networking 5 | popd > /dev/null 6 | -------------------------------------------------------------------------------- /test/acceptance/README.md: -------------------------------------------------------------------------------- 1 | # Networking Acceptance Tests 2 | 3 | ## Requirements 4 | 5 | To run tests you need to have the following installed: 6 | 7 | * [kapp](https://k14s.io/) 8 | 9 | ```bash 10 | $ wget -O- https://k14s.io/install.sh | bash 11 | ``` 12 | 13 | * [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 14 | 15 | Other requirements: 16 | 17 | * You should have `kubectl` config with the access to system and workload namespaces to GET/POST/PUT/PATCH pods, services, service accounts and execute commands in pods. 18 | 19 | * JSON-formatted configuration file with the following required fields: 20 | 21 | ``` 22 | { 23 | "kubeconfig_path": "/Users/user/.kube/config", 24 | "api": "api.example.com", 25 | "admin_user": "admin", 26 | "admin_password": "PASSWORD", 27 | "apps_domain": "apps.example.com" 28 | } 29 | ``` 30 | 31 | * `diego_docker` feature flag enabled in your CF deployment: 32 | 33 | ```bash 34 | cf enable-feature-flag diego_docker 35 | ``` 36 | 37 | ## Run 38 | 39 | ```bash 40 | # make sure you targeted your cluster before executing this 41 | cd test/acceptance 42 | ./bin/test_local [path to kube config] 43 | ``` 44 | 45 | 46 | ## Configuration 47 | 48 | As was mentioned [configuration file](cfg/cfg.go) is a subset of [CATS config file](https://github.com/cloudfoundry/cf-acceptance-tests#test-configuration) with some additions. 49 | 50 | There are few environment variables which can be used to control tests setup: 51 | 52 | * `CONFIG_KEEP_CLUSTER=1` to not destroy deployed pods and services after tests, helpful for debugging in CI 53 | * `CONFIG_KEEP_CF=1` to not revert changes in CF after tests, helpful for debugging in CI 54 | -------------------------------------------------------------------------------- /test/acceptance/assets/allow-ingress-from-apps-network-policy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: NetworkPolicy 4 | metadata: 5 | name: allow-ingress-from-apps 6 | namespace: cf-system 7 | spec: 8 | podSelector: {} 9 | policyTypes: 10 | - Ingress 11 | ingress: 12 | - from: 13 | - namespaceSelector: 14 | matchLabels: 15 | cf-for-k8s.cloudfoundry.org/cf-workloads-ns: "" 16 | -------------------------------------------------------------------------------- /test/acceptance/assets/app/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Staticfile 6 | 7 | 8 |

click me bruce

9 | 10 | 11 | 12 | -------------------------------------------------------------------------------- /test/acceptance/assets/outbound-network-request-app/go.mod: -------------------------------------------------------------------------------- 1 | module example 2 | 3 | go 1.16 4 | -------------------------------------------------------------------------------- /test/acceptance/assets/outbound-network-request-app/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | "os" 8 | ) 9 | 10 | func main() { 11 | resp, err := http.Get("http://example.com/") 12 | if err != nil { 13 | fmt.Println("=== Network is not available upon start: FAILED === ") 14 | panic(err) 15 | } 16 | defer resp.Body.Close() 17 | _, err = ioutil.ReadAll(resp.Body) 18 | if err != nil { 19 | panic(err) 20 | } 21 | fmt.Println("=== Network is available upon start: SUCCEEDED === ") 22 | fmt.Println("listening...") 23 | err = http.ListenAndServe(":"+os.Getenv("PORT"), nil) 24 | if err != nil { 25 | panic(err) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /test/acceptance/assets/system-component.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: system-component-sa 5 | --- 6 | apiVersion: v1 7 | kind: Pod 8 | metadata: 9 | name: test-system-component 10 | labels: 11 | app: test-system-component 12 | spec: 13 | serviceAccountName: system-component-sa 14 | containers: 15 | - image: cfrouting/httpbin:latest 16 | name: system-component 17 | ports: 18 | - containerPort: 8080 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | labels: 24 | app: test-system-component 25 | name: test-system-component 26 | spec: 27 | ports: 28 | - name: http 29 | port: 80 30 | protocol: TCP 31 | targetPort: 8080 32 | selector: 33 | app: test-system-component 34 | type: ClusterIP 35 | -------------------------------------------------------------------------------- /test/acceptance/bin/test_local: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -euo pipefail 4 | 5 | FLAKE_ATTEMPTS="${FLAKE_ATTEMPTS:-0}" 6 | 7 | script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" 8 | cd "${script_dir}/.." 9 | 10 | set +u 11 | if [[ -z $1 ]]; then 12 | echo "Usage: ./bin/test_local [kube_config_path]" 13 | exit 1 14 | fi 15 | set -u 16 | 17 | kube_config_path=${2:-"${HOME}/.kube/config"} 18 | test_config_path="$1" 19 | 20 | export KUBECONFIG=$kube_config_path 21 | kubectl cluster-info 22 | 23 | export CGO_ENABLED=1 24 | 25 | CONFIG="${test_config_path}" ginkgo \ 26 | -trace \ 27 | -progress \ 28 | -r \ 29 | -randomizeAllSpecs \ 30 | -race \ 31 | -v \ 32 | -flakeAttempts="${FLAKE_ATTEMPTS}" 33 | -------------------------------------------------------------------------------- /test/acceptance/go.mod: -------------------------------------------------------------------------------- 1 | module code.cloudfoundry.org/cf-k8s-networking/acceptance 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/cloudfoundry-incubator/cf-test-helpers v1.0.0 7 | github.com/kr/pretty v0.1.0 // indirect 8 | github.com/onsi/ginkgo v1.11.0 9 | github.com/onsi/gomega v1.8.1 10 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859 // indirect 11 | golang.org/x/sync v0.0.0-20190423024810-112230192c58 // indirect 12 | golang.org/x/sys v0.0.0-20190412213103-97732733099d // indirect 13 | gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect 14 | ) 15 | -------------------------------------------------------------------------------- /test/acceptance/startup_connectivity_test.go: -------------------------------------------------------------------------------- 1 | package acceptance_test 2 | 3 | import ( 4 | "github.com/cloudfoundry-incubator/cf-test-helpers/cf" 5 | "github.com/cloudfoundry-incubator/cf-test-helpers/generator" 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | "github.com/onsi/gomega/gexec" 9 | ) 10 | 11 | var _ = Describe("Outbound network connectivity during app startup", func() { 12 | var ( 13 | app1name string 14 | ) 15 | 16 | BeforeEach(func() { 17 | app1name = generator.PrefixedRandomName("ACCEPTANCE", "outbound-network-app") 18 | }) 19 | 20 | AfterEach(func() { 21 | session := cf.Cf("delete", app1name, "-f") 22 | Expect(session.Wait(TestConfig.DefaultTimeoutDuration())).To(gexec.Exit(0), "expected cf delete to succeed") 23 | }) 24 | 25 | Context("pushing the app", func() { 26 | It("succeeds", func() { 27 | session := cf.Cf("push", 28 | app1name, 29 | "-p", "assets/outbound-network-request-app", 30 | ) 31 | Expect(session.Wait(TestConfig.CfPushTimeoutDuration())).To(gexec.Exit(0), "expected app to start successfully") 32 | }) 33 | }) 34 | }) 35 | -------------------------------------------------------------------------------- /test/scale/README.md: -------------------------------------------------------------------------------- 1 | ## Control Plane Latency Test 2 | 3 | ### Prerequisites 4 | #### Cluster Requirements 5 | 6 | These scale tests are meant to be run against a large kubernetes cluster. We run 7 | these tests in CI on GKE clusters with 100 `n1-standard-8` nodes with [ip 8 | aliasing](https://cloud.google.com/sdk/gcloud/reference/beta/container/clusters/create#--enable-ip-alias) 9 | and [network policy 10 | enabled](https://cloud.google.com/sdk/gcloud/reference/beta/container/clusters/create#--enable-network-policy). 11 | 12 | #### Environment Setup 13 | 14 | After deploying an appropriate size GKE cluster, CI will deploy cf-for-k8s and 15 | push 2000 app instances (1000 apps, 2 instances per app) with 1000 routes (1 route 16 | per app). 17 | 18 | ### Tests 19 | #### Steady State Test 20 | 21 | The steady state test runs once the environment has been set up with 2000 app 22 | instances and 1000 routes. This test is "steady state" because it keeps the 23 | number of routes constant at 1000, by deleting one route every time it maps a 24 | new route. We chose to use a steady state test because we want to keep the 25 | number of routes constant to measure the control plane latency under the desired 26 | load. 27 | 28 | For each route the test maps, the test measures the latency from when the route 29 | is mapped until when that route is reachable. The test asserts that this latency 30 | is under 10 seconds for 95% of the `map-route` requests. 31 | 32 | ### CI 33 | Currently we run scale tests as defined in the [scaling 34 | pipeline](../../ci/scaling.yml). 35 | -------------------------------------------------------------------------------- /test/scale/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/cf-k8s-networking/ci/scale 2 | 3 | go 1.14 4 | 5 | require ( 6 | github.com/cloudfoundry-incubator/cf-test-helpers v1.0.0 7 | github.com/montanaflynn/stats v0.6.3 8 | github.com/onsi/ginkgo v1.14.1 9 | github.com/onsi/gomega v1.10.2 10 | ) 11 | -------------------------------------------------------------------------------- /test/scale/scale_suite_test.go: -------------------------------------------------------------------------------- 1 | package scale_test 2 | 3 | import ( 4 | "os" 5 | "strconv" 6 | 7 | . "github.com/onsi/ginkgo" 8 | . "github.com/onsi/gomega" 9 | 10 | "testing" 11 | ) 12 | 13 | func TestScale(t *testing.T) { 14 | RegisterFailHandler(Fail) 15 | RunSpecs(t, "Scale Suite") 16 | } 17 | 18 | var ( 19 | domain string 20 | cleanup bool 21 | numApps int 22 | numAppsPerSpace int 23 | orgNamePrefix string 24 | spaceNamePrefix string 25 | ) 26 | 27 | var _ = BeforeSuite(func() { 28 | var found bool 29 | var err error 30 | 31 | orgNamePrefix = "scale-tests" 32 | spaceNamePrefix = "scale-tests" 33 | 34 | domain, found = os.LookupEnv("DOMAIN") 35 | Expect(found).To(BeTrue(), "DOMAIN environment variable required but not set") 36 | 37 | cleanupStr := os.Getenv("CLEANUP") 38 | cleanup = cleanupStr == "true" || cleanupStr == "1" 39 | 40 | numAppsStr, found := os.LookupEnv("NUMBER_OF_APPS") 41 | Expect(found).To(BeTrue(), "NUMBER_OF_APPS environment variable required but not set") 42 | numApps, err = strconv.Atoi(numAppsStr) 43 | Expect(err).NotTo(HaveOccurred(), "NUMBER_OF_APPS environment variable malformed") 44 | 45 | // don't change this without also changing pave-cf-for-scale-tests.sh 46 | // must be power of 10 (1, 100, 1000, etc) 47 | numAppsPerSpace = 10 48 | }) 49 | -------------------------------------------------------------------------------- /test/uptime/README.md: -------------------------------------------------------------------------------- 1 | # Uptime Test 2 | 3 | ## Global Configuration 4 | 5 | UPGRADE_DISCOVERY_TIMEOUT is the amount of time given to discover an upgrade is 6 | happening using `kapp app-change ls -a cf`. 7 | 8 | ## Data Plane Service Level Measurements 9 | 10 | Our SLO is defined as: 11 | 12 | 95% (X) of GET Requests to a Route succeed in less than 100 (Y) milliseconds. 13 | 14 | The SLI used to measure our SLO is request latency. 15 | 16 | Request Latency is measured by the following: 17 | 18 | Given an app 'A' deployed on the platform with route 'r', the SLI times how long it 19 | takes to make an HTTP Get Request to 'r' for 'A' and receive a response. 20 | 21 | ### Description 22 | CF_APP_DOMAIN is the app domain. This is currently used to map new routes to 23 | test control plane uptime. 24 | 25 | DATA_PLANE_APP_NAME: Name of the app. 26 | 27 | 28 | ### Configuration 29 | 30 | X: DATA_PLANE_SLO_PERCENTAGE 31 | Y: DATA_PLANE_SLO_MAX_REQUEST_LATENCY 32 | r: DATA_PLANE_SLI_APP_ROUTE_URL 33 | 34 | ## Control Plane Service Level Measurements 35 | 36 | Our SLO is defined as: 37 | 38 | 95% (X) of routes that get mapped become available in less than 15 (Y) seconds. 39 | 40 | 1. Every 5 seconds, map a route 41 | 2. Sleep for propagation time (Y) 42 | 3. Send requests to route for 30 seconds (U), record their latency and response 43 | code 44 | 4. If greater than 95% (Z) of those requests had a non-200 response code, or 45 | exceeded the latency SLO (W), consider that route to be a failure 46 | 5. If greater than 95% (X) of routes are failures, fail the test 47 | 48 | ### Description 49 | CONTROL_PLANE_SLO_PERCENTAGE (X): Percentage of routes that we expect to get mapped 50 | and become available in less than number of seconds defined by 51 | `CONTROL_PLANE_SLO_MAX_ROUTE_PROPAGATION_TIME`. Defaults to 95%. 52 | 53 | CONTROL_PLANE_SLO_MAX_ROUTE_PROPAGATION_TIME (Y): Time we wait before seeing if 54 | a route is live, defaults to 15 seconds. 55 | 56 | CONTROL_PLANE_SLO_DATA_PLANE_AVAILABILITY_PERCENTAGE (Z): Percentage of routes 57 | that succeed in less than the number of seconds defined by 58 | `CONTROL_PLANE_SLO_DATA_PLANE_AVAILABILITY_PERCENTAGE`. Defaults to 95%. 59 | 60 | CONTROL_PLANE_SLO_DATA_PLANE_MAX_REQUEST_LATENCY (W): Max response time from 61 | mapped route, defaults to 200ms. 62 | 63 | CONTROL_PLANE_SLO_SAMPLE_CAPTURE_TIME (U): How often we send a request to a 64 | route. Defaults to 10 seconds. 65 | 66 | CONTROL_PLANE_APP_NAME: Name of the app to map routes to. Defaults to 67 | `upgrade-control-plane-sli`. 68 | 69 | -------------------------------------------------------------------------------- /test/uptime/control_plane_uptime_test.go: -------------------------------------------------------------------------------- 1 | package uptime_test 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "code.cloudfoundry.org/cf-k8s-networking/test/uptime/internal/checker" 8 | "code.cloudfoundry.org/cf-k8s-networking/test/uptime/internal/collector" 9 | "github.com/cloudfoundry-incubator/cf-test-helpers/cf" 10 | . "github.com/onsi/ginkgo" 11 | . "github.com/onsi/gomega" 12 | ) 13 | 14 | var _ = Describe("Control Plane Uptime", func() { 15 | var ( 16 | upgradeChecker *checker.Upgrade 17 | requestCollector *collector.Request 18 | startTime time.Time 19 | count int 20 | ) 21 | 22 | BeforeEach(func() { 23 | upgradeChecker = &checker.Upgrade{ 24 | PollInterval: 1 * time.Second, 25 | } 26 | requestCollector = &collector.Request{ 27 | DataPlaneSLOMaxRequestLatency: controlPlaneSLODataPlaneMaxRequestLatency, 28 | ControlPlaneSLODataPlaneAvailabilityPercentage: controlPlaneSLODataPlaneAvailabilityPercentage, 29 | Client: httpClient, 30 | } 31 | 32 | upgradeChecker.Start() 33 | startTime = time.Now() 34 | count = 0 35 | }) 36 | 37 | AfterEach(func() { 38 | upgradeChecker.Stop() 39 | 40 | for i := 0; i < count; i++ { 41 | cf.Cf("delete-route", "-f", cfAppDomain, "--hostname", fmt.Sprintf("host-%d", i)).Wait(30 * time.Second) 42 | } 43 | }) 44 | 45 | It("measures the control plane uptime", func() { 46 | By("checking whether X% of requests are successful within the acceptable response time during an upgrade", func() { 47 | for { 48 | if !upgradeChecker.HasFoundUpgrade() && time.Since(startTime) > upgradeDiscoveryTimeout { 49 | Fail(fmt.Sprintf("failed to find cf upgrade in %s", upgradeDiscoveryTimeout.String())) 50 | } 51 | 52 | // if the upgrade is finished (learned by checking the "finished at" in 53 | // kapp app-change ls), stop running the test 54 | if upgradeChecker.HasFoundUpgrade() && upgradeChecker.IsUpgradeFinished() { 55 | break 56 | } 57 | 58 | routeHost := fmt.Sprintf("host-%d", count) 59 | cf.Cf("map-route", controlPlaneAppName, cfAppDomain, "--hostname", routeHost) 60 | 61 | route := fmt.Sprintf("http://%s.%s", routeHost, cfAppDomain) 62 | requestCollector.Request(route, controlPlaneSLORoutePropagationTime, controlPlaneSLOSampleCaptureTime) 63 | 64 | count++ 65 | time.Sleep(5 * time.Second) 66 | } 67 | 68 | requestCollector.Wait() 69 | 70 | results := requestCollector.GetResults() 71 | results.PrintResults() 72 | 73 | Expect(results.SuccessPercentage()).To(BeNumerically(">=", controlPlaneSLOPercentage)) 74 | Expect(true).To(BeTrue()) 75 | }) 76 | }) 77 | }) 78 | -------------------------------------------------------------------------------- /test/uptime/data_plane_uptime_test.go: -------------------------------------------------------------------------------- 1 | package uptime_test 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "time" 7 | 8 | "code.cloudfoundry.org/cf-k8s-networking/test/uptime/internal/checker" 9 | "code.cloudfoundry.org/cf-k8s-networking/test/uptime/internal/uptime" 10 | . "github.com/onsi/ginkgo" 11 | . "github.com/onsi/gomega" 12 | ) 13 | 14 | var _ = Describe("Data Plane Uptime", func() { 15 | var ( 16 | results *uptime.DataPlaneResults 17 | upgradeChecker *checker.Upgrade 18 | startTime time.Time 19 | ) 20 | 21 | BeforeEach(func() { 22 | results = &uptime.DataPlaneResults{} 23 | upgradeChecker = &checker.Upgrade{ 24 | PollInterval: 1 * time.Second, 25 | } 26 | upgradeChecker.Start() 27 | 28 | startTime = time.Now() 29 | }) 30 | 31 | AfterEach(func() { 32 | upgradeChecker.Stop() 33 | }) 34 | 35 | It("measures the data plane uptime", func() { 36 | By("checking whether X% of requests are successful within the acceptable response time during an upgrade", func() { 37 | for { 38 | if !upgradeChecker.HasFoundUpgrade() && time.Since(startTime) > upgradeDiscoveryTimeout { 39 | Fail(fmt.Sprintf("failed to find cf upgrade in %s", upgradeDiscoveryTimeout.String())) 40 | } 41 | 42 | // if the upgrade is finished (learned by checking the "finished at" in 43 | // kapp app-change ls), and we've run for at least 15 minutes, stop running the test 44 | if upgradeChecker.HasFoundUpgrade() && upgradeChecker.IsUpgradeFinished() { 45 | break 46 | } 47 | 48 | resp, err, requestLatency := timeGetRequest(dataPlaneSLIAppRouteURL) 49 | if err != nil { 50 | results.RecordError(err) 51 | continue 52 | } 53 | 54 | hasStatusOK := resp.StatusCode == http.StatusOK 55 | hasMetRequestLatencySLO := requestLatency < dataPlaneSLOMaxRequestLatency 56 | hasPassedSLI := hasStatusOK && hasMetRequestLatencySLO 57 | 58 | results.Record(hasPassedSLI, 59 | hasStatusOK, 60 | hasMetRequestLatencySLO, 61 | requestLatency) 62 | 63 | } 64 | 65 | results.PrintResults() 66 | 67 | Expect(results.SuccessPercentage()).To(BeNumerically(">=", dataPlaneSLOPercentage)) 68 | }) 69 | }) 70 | }) 71 | -------------------------------------------------------------------------------- /test/uptime/go.mod: -------------------------------------------------------------------------------- 1 | module code.cloudfoundry.org/cf-k8s-networking/test/uptime 2 | 3 | go 1.14 4 | 5 | require ( 6 | code.cloudfoundry.org/cf-k8s-networking/uptime-tests v0.0.0-20200605171322-192317052474 7 | github.com/cloudfoundry-incubator/cf-test-helpers v1.0.0 8 | github.com/fsnotify/fsnotify v1.4.9 // indirect 9 | github.com/onsi/ginkgo v1.14.0 10 | github.com/onsi/gomega v1.10.1 11 | golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 // indirect 12 | golang.org/x/text v0.3.2 // indirect 13 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 // indirect 14 | gopkg.in/yaml.v2 v2.3.0 // indirect 15 | ) 16 | -------------------------------------------------------------------------------- /test/uptime/internal/checker/upgrade.go: -------------------------------------------------------------------------------- 1 | package checker 2 | 3 | import ( 4 | "encoding/json" 5 | "os/exec" 6 | "sync" 7 | "time" 8 | 9 | . "github.com/onsi/ginkgo" 10 | ) 11 | 12 | type Upgrade struct { 13 | PollInterval time.Duration 14 | 15 | upgradeName string 16 | stopChan chan bool 17 | upgradeDone bool 18 | waitGroup sync.WaitGroup 19 | mutex sync.Mutex 20 | } 21 | 22 | type kappResponse struct { 23 | Tables []struct { 24 | Rows []struct { 25 | Name string `json:"name"` 26 | FinishedAt string `json:"finished_at"` 27 | } 28 | } 29 | } 30 | 31 | func (u *Upgrade) Start() { 32 | u.stopChan = make(chan bool) 33 | 34 | u.waitGroup.Add(1) 35 | go func() { 36 | for { 37 | select { 38 | case <-u.stopChan: 39 | u.waitGroup.Done() 40 | return 41 | case <-time.After(u.PollInterval): 42 | u.checkUpgrade() 43 | } 44 | } 45 | }() 46 | } 47 | 48 | func (u *Upgrade) Stop() { 49 | close(u.stopChan) 50 | u.waitGroup.Wait() 51 | } 52 | 53 | func (u *Upgrade) HasFoundUpgrade() bool { 54 | u.mutex.Lock() 55 | defer u.mutex.Unlock() 56 | 57 | return u.upgradeName != "" 58 | } 59 | 60 | func (u *Upgrade) IsUpgradeFinished() bool { 61 | u.mutex.Lock() 62 | defer u.mutex.Unlock() 63 | 64 | return u.upgradeDone 65 | } 66 | 67 | func (u *Upgrade) checkUpgrade() { 68 | u.mutex.Lock() 69 | defer u.mutex.Unlock() 70 | 71 | if u.upgradeName == "" { 72 | u.upgradeName = u.discoverUpgradeName() 73 | } else { 74 | if u.checkIfUpgradeHasFinished(u.upgradeName) { 75 | u.upgradeDone = true 76 | } 77 | } 78 | } 79 | 80 | func (u *Upgrade) checkIfUpgradeHasFinished(upgradeName string) bool { 81 | resp := u.kappAppChangeLS() 82 | 83 | for _, table := range resp.Tables { 84 | for _, row := range table.Rows { 85 | if row.Name == upgradeName { 86 | return row.FinishedAt != "" 87 | } 88 | } 89 | } 90 | 91 | return false 92 | } 93 | 94 | func (u *Upgrade) discoverUpgradeName() string { 95 | resp := u.kappAppChangeLS() 96 | 97 | for _, table := range resp.Tables { 98 | for _, row := range table.Rows { 99 | if row.FinishedAt == "" { 100 | return row.Name 101 | } 102 | } 103 | } 104 | 105 | return "" 106 | } 107 | 108 | func (u *Upgrade) kappAppChangeLS() kappResponse { 109 | args := []string{"app-change", "ls", "-a", "cf", "--json"} 110 | cmd := exec.Command("kapp", args...) 111 | 112 | cmd.Stderr = GinkgoWriter 113 | 114 | output, err := cmd.Output() 115 | if err != nil { 116 | panic(err) 117 | } 118 | 119 | resp := &kappResponse{} 120 | 121 | err = json.Unmarshal(output, resp) 122 | if err != nil { 123 | panic(err) 124 | } 125 | 126 | return *resp 127 | } 128 | -------------------------------------------------------------------------------- /test/uptime/internal/collector/request.go: -------------------------------------------------------------------------------- 1 | package collector 2 | 3 | import ( 4 | "net/http" 5 | "sync" 6 | "time" 7 | 8 | "code.cloudfoundry.org/cf-k8s-networking/test/uptime/internal/uptime" 9 | ) 10 | 11 | type Request struct { 12 | DataPlaneSLOMaxRequestLatency time.Duration 13 | ControlPlaneSLODataPlaneAvailabilityPercentage float64 14 | Client http.Client 15 | 16 | results *uptime.ControlPlaneResults 17 | waitGroup sync.WaitGroup 18 | mutex sync.Mutex 19 | } 20 | 21 | func (r *Request) Request(route string, delay time.Duration, sampleTime time.Duration) { 22 | r.waitGroup.Add(1) 23 | go func() { 24 | time.Sleep(delay) 25 | 26 | startTime := time.Now() 27 | var result uptime.DataPlaneResults 28 | for { 29 | if time.Since(startTime) > sampleTime { 30 | r.addResult(result) 31 | r.waitGroup.Done() 32 | return 33 | } 34 | 35 | resp, err, requestLatency := r.timeGetRequest(route) 36 | if err != nil { 37 | result.RecordError(err) 38 | continue 39 | } 40 | 41 | hasStatusOK := resp.StatusCode == http.StatusOK 42 | hasMetRequestLatencySLO := requestLatency < r.DataPlaneSLOMaxRequestLatency 43 | hasPassedSLI := hasStatusOK && hasMetRequestLatencySLO 44 | 45 | result.Record(hasPassedSLI, 46 | hasStatusOK, 47 | hasMetRequestLatencySLO, 48 | requestLatency) 49 | } 50 | }() 51 | } 52 | 53 | func (r *Request) Wait() { 54 | r.waitGroup.Wait() 55 | } 56 | 57 | func (r *Request) GetResults() *uptime.ControlPlaneResults { 58 | r.mutex.Lock() 59 | defer r.mutex.Unlock() 60 | 61 | return r.results 62 | } 63 | 64 | func (r *Request) addResult(result uptime.DataPlaneResults) { 65 | r.mutex.Lock() 66 | defer r.mutex.Unlock() 67 | 68 | if r.results == nil { 69 | r.results = &uptime.ControlPlaneResults{ 70 | ControlPlaneSLODataPlaneAvailabilityPercentage: r.ControlPlaneSLODataPlaneAvailabilityPercentage, 71 | } 72 | } 73 | 74 | r.results.AddResult(&result) 75 | } 76 | 77 | func (r *Request) timeGetRequest(requestURL string) (*http.Response, error, time.Duration) { 78 | start := time.Now() 79 | resp, err := r.Client.Get(requestURL) 80 | requestLatency := time.Since(start) 81 | return resp, err, requestLatency 82 | } 83 | -------------------------------------------------------------------------------- /test/uptime/internal/uptime/control_plane_results.go: -------------------------------------------------------------------------------- 1 | package uptime 2 | 3 | import "fmt" 4 | 5 | type ControlPlaneResults struct { 6 | ControlPlaneSLODataPlaneAvailabilityPercentage float64 7 | 8 | collectorResults []*DataPlaneResults 9 | 10 | dataReady bool 11 | sliPassCount int 12 | sliFailCount int 13 | successPercentage float64 14 | } 15 | 16 | func (c *ControlPlaneResults) AddResult(result *DataPlaneResults) { 17 | c.collectorResults = append(c.collectorResults, result) 18 | c.dataReady = false 19 | } 20 | 21 | func (c *ControlPlaneResults) PrintResults() { 22 | c.prepareResults() 23 | 24 | fmt.Println("Control Plane Uptime SLI:") 25 | fmt.Printf("\tPass: %d\n", c.sliPassCount) 26 | fmt.Printf("\tFail: %d\n", c.sliFailCount) 27 | fmt.Printf("\tSuccess Percentage: %.2f%%\n", c.successPercentage*100) 28 | fmt.Println("Request Samples:") 29 | for i, result := range c.collectorResults { 30 | fmt.Printf("\tSample %d: Total Samples: %d\tErrors: %d\t Non-200 Codes: %d\t Exceeded Request Latency: %d\n", 31 | i, 32 | c.sliPassCount+c.sliFailCount, 33 | len(result.Errors), 34 | result.NumberOfUnexpectedResponseCodes, 35 | result.NumberOfExceededSLORequestLatencies, 36 | ) 37 | } 38 | } 39 | 40 | func (c *ControlPlaneResults) SuccessPercentage() float64 { 41 | c.prepareResults() 42 | 43 | return c.successPercentage 44 | } 45 | 46 | func (c *ControlPlaneResults) prepareResults() { 47 | if !c.dataReady { 48 | for _, result := range c.collectorResults { 49 | if result.SuccessPercentage() > c.ControlPlaneSLODataPlaneAvailabilityPercentage { 50 | c.sliPassCount++ 51 | } else { 52 | c.sliFailCount++ 53 | } 54 | } 55 | 56 | c.successPercentage = float64(c.sliPassCount) / float64((c.sliPassCount + c.sliFailCount)) 57 | c.dataReady = true 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /test/uptime/internal/uptime/data_plane_results.go: -------------------------------------------------------------------------------- 1 | package uptime 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | type DataPlaneResults struct { 9 | SliPassCount int 10 | SliFailCount int 11 | NumberOfUnexpectedResponseCodes int 12 | NumberOfExceededSLORequestLatencies int 13 | Errors []error 14 | RequestLatencies []time.Duration 15 | } 16 | 17 | func (u *DataPlaneResults) PrintResults() { 18 | fmt.Println("Data Plane Uptime SLI:") 19 | fmt.Printf("\tPass: %d\n", u.SliPassCount) 20 | fmt.Printf("\tFail: %d\n", u.SliFailCount) 21 | fmt.Printf("\tSuccess Percentage: %.2f%%\n", u.SuccessPercentage()*100) 22 | fmt.Printf("Number Of Request Errors: %d\n", len(u.Errors)) 23 | fmt.Printf("Number Of Unexpected Response Codes: %d\n", u.NumberOfUnexpectedResponseCodes) 24 | fmt.Printf("Number Of Exceeded SLO Request Latencies: %d\n", u.NumberOfExceededSLORequestLatencies) 25 | fmt.Println("Response Time:") 26 | low, high, avg := u.calculateRequestLatencyStats() 27 | fmt.Printf("\tLowest: %s\n", low.String()) 28 | fmt.Printf("\tHighest: %s\n", high.String()) 29 | fmt.Printf("\tAverage: %s\n", avg.String()) 30 | if len(u.Errors) > 0 { 31 | fmt.Println("Errors:") 32 | for errString, count := range u.uniqErrors() { 33 | fmt.Printf("\t%dx: %s\n", count, errString) 34 | } 35 | } 36 | } 37 | 38 | func (u *DataPlaneResults) SuccessPercentage() float64 { 39 | return float64(u.SliPassCount) / float64(u.SliFailCount+u.SliPassCount) 40 | } 41 | 42 | func (u *DataPlaneResults) RecordError(err error) { 43 | u.SliFailCount++ 44 | fmt.Printf("HAD AN ERROR: %+v\n", err) 45 | u.Errors = append(u.Errors, err) 46 | } 47 | 48 | func (u *DataPlaneResults) Record(hasPassedSLI, hasStatusOK, hasMetRequestLatencySLO bool, requestLatency time.Duration) { 49 | if hasPassedSLI { 50 | u.SliPassCount++ 51 | } else { 52 | u.SliFailCount++ 53 | } 54 | 55 | if !hasStatusOK { 56 | u.NumberOfUnexpectedResponseCodes++ 57 | } 58 | 59 | if !hasMetRequestLatencySLO { 60 | u.NumberOfExceededSLORequestLatencies++ 61 | } 62 | 63 | u.RequestLatencies = append(u.RequestLatencies, requestLatency) 64 | } 65 | 66 | func (u *DataPlaneResults) uniqErrors() map[string]int { 67 | uniqErrs := map[string]int{} 68 | for _, err := range u.Errors { 69 | uniqErrs[err.Error()] = uniqErrs[err.Error()] + 1 70 | } 71 | 72 | return uniqErrs 73 | } 74 | 75 | func (u *DataPlaneResults) calculateRequestLatencyStats() (time.Duration, time.Duration, time.Duration) { 76 | var low, high, avg, accumulator time.Duration 77 | if len(u.RequestLatencies) == 0 { 78 | return low, high, avg 79 | } 80 | 81 | low = u.RequestLatencies[0] 82 | for _, time := range u.RequestLatencies { 83 | if low > time { 84 | low = time 85 | } 86 | if high < time { 87 | high = time 88 | } 89 | 90 | accumulator += time 91 | } 92 | 93 | avg = time.Duration(int64(accumulator) / int64(len(u.RequestLatencies))) 94 | 95 | return low, high, avg 96 | } 97 | -------------------------------------------------------------------------------- /version: -------------------------------------------------------------------------------- 1 | 0.6.0 2 | --------------------------------------------------------------------------------