├── .dependabot └── config.yml ├── .editorconfig ├── .gitattributes ├── .gitignore ├── .secrets.baseline ├── .terraform-version ├── Brewfile ├── CODEOWNERS ├── LICENSE ├── README-old.md ├── README.md ├── charts ├── gsp-cluster │ ├── .helmignore │ ├── Chart.yaml │ ├── README.md │ ├── charts │ │ ├── cert-manager-v0.11.0.tgz │ │ ├── cluster-autoscaler-7.1.0.tgz │ │ ├── concourse-11.4.0.tgz │ │ ├── fluentd-cloudwatch │ │ │ ├── Chart.yaml │ │ │ ├── README.md │ │ │ ├── templates │ │ │ │ ├── NOTES.txt │ │ │ │ ├── _helpers.tpl │ │ │ │ ├── clusterrole.yaml │ │ │ │ ├── clusterrolebinding.yaml │ │ │ │ ├── configmap.yaml │ │ │ │ ├── daemonset.yaml │ │ │ │ ├── psp-clusterrole.yaml │ │ │ │ ├── psp-clusterrolebinding.yaml │ │ │ │ ├── psp.yaml │ │ │ │ ├── secrets.yaml │ │ │ │ └── serviceaccount.yaml │ │ │ └── values.yaml │ │ └── gsp-monitoring │ │ │ ├── Chart.yaml │ │ │ ├── charts │ │ │ └── prometheus-operator-8.15.6.tgz │ │ │ ├── dashboards │ │ │ ├── concourse-internal.json │ │ │ ├── pod-availability.json │ │ │ └── sli.json │ │ │ ├── requirements.lock │ │ │ ├── requirements.yaml │ │ │ ├── templates │ │ │ ├── concourse-internal-dashboard.yaml │ │ │ ├── pod-availability-dashboard.yaml │ │ │ ├── rules-general.yaml │ │ │ ├── rules-kubernetes-system.yaml │ │ │ ├── rules-slis.yaml │ │ │ └── sli-dashboard.yaml │ │ │ └── values.yaml │ ├── dashboards │ │ └── concourse.json │ ├── policies │ │ ├── README.md │ │ ├── digests-on-images │ │ │ ├── src.rego │ │ │ └── src_test.rego │ │ ├── isolate-tenant-istio-resources │ │ │ ├── src.rego │ │ │ └── src_test.rego │ │ └── restrict-special-nodes │ │ │ ├── src.rego │ │ │ └── src_test.rego │ ├── requirements.lock │ ├── requirements.yaml │ ├── templates │ │ ├── 00-aws-auth │ │ │ ├── auditor-cluster-role-binding.yaml │ │ │ ├── auditor-cluster-role.yaml │ │ │ ├── aws-auth.yaml │ │ │ ├── aws-vpc-cni.yaml │ │ │ ├── cert-manager-crds.yaml │ │ │ ├── coredns.yaml │ │ │ ├── default-storage-class.yaml │ │ │ ├── gsp-default-psp-cluster-role.yaml │ │ │ ├── gsp-default-psp-default-role-binding.yaml │ │ │ ├── gsp-default-psp.yaml │ │ │ ├── gsp-priorityclass.yaml │ │ │ ├── gsp-system-namespace.yaml │ │ │ ├── istio-namespace.yaml │ │ │ ├── kube-proxy-config.yaml │ │ │ ├── kube-proxy.yaml │ │ │ ├── kube-system-namespace.yaml │ │ │ ├── main-team-namespace.yaml │ │ │ ├── managed-namespaces.yaml │ │ │ ├── operator-cluster-role.yaml │ │ │ └── psp-system-bindings.yaml │ │ ├── 01-aws-system │ │ │ ├── aws-ssm-agent-daemonset.yaml │ │ │ └── kiam-service-entry.yaml │ │ ├── 02-gsp-system │ │ │ ├── calico.yaml │ │ │ ├── cloudhsm-service-entry.yaml │ │ │ ├── cloudhsm-service.yaml │ │ │ ├── concourse-destinationrule.yaml │ │ │ ├── concourse-service-monitor.yaml │ │ │ ├── concourse-web-virtual-service.yaml │ │ │ ├── egress-networkpolicy.yaml │ │ │ ├── egress-safelist.yaml │ │ │ ├── external-dns-clusterrole.yaml │ │ │ ├── gatekeeper │ │ │ │ ├── constraints │ │ │ │ │ ├── digests-on-internal-registry.yaml │ │ │ │ │ ├── isolate-tenant-istio-resources.yaml │ │ │ │ │ └── tolerate-special-nodes.yaml │ │ │ │ ├── gatekeeper.yaml │ │ │ │ └── templates │ │ │ │ │ ├── digests-on-internal-registry-images.yaml │ │ │ │ │ ├── isolate-tenant-istio-resources.yaml │ │ │ │ │ └── tolerate-special-nodes.yaml │ │ │ ├── grafana-concourse-dashboard.yaml │ │ │ ├── grafana-destinationrule.yaml │ │ │ ├── grafana-google-oauth-client-secret.yaml │ │ │ ├── grafana-virtual-service.yaml │ │ │ ├── ingress-certificate.yaml │ │ │ ├── ingress-gateways.yaml │ │ │ ├── kubernetes-dashboard.yaml │ │ │ ├── leader_election_role.yaml │ │ │ ├── leader_election_role_binding.yaml │ │ │ ├── letsencrypt-r53.yaml │ │ │ ├── main-gateway.yaml │ │ │ ├── metrics-server-apiservice.yaml │ │ │ ├── metrics-server-auth-delegator.yaml │ │ │ ├── metrics-server-auth-reader.yaml │ │ │ ├── metrics-server-deployment.yaml │ │ │ ├── metrics-server-resource-reader.yaml │ │ │ ├── metrics-server-service.yaml │ │ │ ├── pipeline-operator-pipeline-crd.yaml │ │ │ ├── pipeline-operator-role-binding.yaml │ │ │ ├── pipeline-operator-role.yaml │ │ │ ├── pipeline-operator-sa.yaml │ │ │ ├── pipeline-operator-secret.yaml │ │ │ ├── pipeline-operator-svc.yaml │ │ │ ├── pipeline-operator-team-crd.yaml │ │ │ ├── pipeline-operator-webhook-secret.yaml │ │ │ ├── pipeline-operator.yaml │ │ │ ├── route53-service-entry.yaml │ │ │ ├── sealed-secrets-controller.yaml │ │ │ ├── sealed-secrets-crd.yaml │ │ │ ├── sealed-secrets-secret.yaml │ │ │ ├── service-operator │ │ │ │ ├── crd │ │ │ │ │ ├── access.govsvc.uk_principals.yaml │ │ │ │ │ ├── database.govsvc.uk_postgres.yaml │ │ │ │ │ ├── database.govsvc.uk_redis.yaml │ │ │ │ │ ├── queue.govsvc.uk_sqs.yaml │ │ │ │ │ ├── storage.govsvc.uk_imagerepositories.yaml │ │ │ │ │ └── storage.govsvc.uk_s3buckets.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ ├── role-binding.yaml │ │ │ │ ├── role.yaml │ │ │ │ ├── service-account.yaml │ │ │ │ └── service.yaml │ │ │ └── sts-service-entry.yaml │ │ ├── 03-namespaces │ │ │ ├── external-dns.yaml │ │ │ └── namespace.yaml │ │ ├── 04-main-team-pipelines │ │ │ ├── canary-image-repository.yaml │ │ │ ├── canary-principal.yaml │ │ │ └── cd-smoke-test-pipeline.yaml │ │ └── _helpers.tpl │ └── values.yaml └── gsp-istio │ ├── .helmignore │ ├── Chart.yaml │ ├── charts │ ├── istio-1.5.8.tgz │ ├── istio-cni-1.5.8.tgz │ └── istio-init-1.5.8.tgz │ ├── requirements.lock │ ├── requirements.yaml │ └── values.yaml ├── components ├── aws-node-lifecycle-hook │ ├── go.mod │ ├── go.sum │ ├── main.go │ └── pkg │ │ ├── awsclient │ │ ├── client.go │ │ ├── fakeawsclient │ │ │ └── fake_awsclient.go │ │ └── suite_test.go │ │ ├── k8sclient │ │ ├── client.go │ │ ├── fakek8sclient │ │ │ ├── fake_corev1.go │ │ │ ├── fake_k8sclient.go │ │ │ └── fake_nodeinterface.go │ │ └── suite_test.go │ │ ├── k8sdrainer │ │ ├── drain.go │ │ ├── fakek8sdrainer │ │ │ └── fake_drainer.go │ │ └── suite_test.go │ │ ├── lifecycle │ │ ├── handler.go │ │ ├── handler_test.go │ │ ├── heartbeat.go │ │ └── suite_test.go │ │ └── tools │ │ └── tools.go ├── aws-ssm-agent │ ├── Dockerfile │ └── README ├── canary │ ├── Dockerfile │ ├── Makefile │ ├── README.md │ ├── chart │ │ ├── .helmignore │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── _helpers.tpl │ │ │ ├── deployment.yaml │ │ │ ├── gateway.yaml │ │ │ ├── ingress-certificate.yaml │ │ │ ├── prometheus-rules.yaml │ │ │ ├── service-monitor.yaml │ │ │ ├── service.yaml │ │ │ └── virtual-service.yaml │ │ └── values.yaml │ └── main.go ├── cloudhsm-client-test │ ├── Dockerfile │ └── cloudhsm-test-deployment.yaml ├── concourse-github-resource │ ├── .gitignore │ ├── Dockerfile │ ├── LICENSE │ ├── README.md │ ├── assets │ │ ├── check │ │ ├── in │ │ └── out │ ├── test-stringy.json │ └── test.json ├── concourse-operator │ ├── .gitignore │ ├── Dockerfile │ ├── Gopkg.lock │ ├── Gopkg.toml │ ├── Makefile │ ├── PROJECT │ ├── README.md │ ├── cmd │ │ └── manager │ │ │ └── main.go │ ├── config │ │ ├── crds │ │ │ ├── concourse_v1beta1_pipeline.yaml │ │ │ └── concourse_v1beta1_team.yaml │ │ ├── default │ │ │ ├── kustomization.yaml │ │ │ └── manager_image_patch.yaml │ │ ├── manager │ │ │ └── manager.yaml │ │ ├── rbac │ │ │ ├── rbac_role.yaml │ │ │ └── rbac_role_binding.yaml │ │ └── samples │ │ │ ├── concourse_v1beta1_pipeline.yaml │ │ │ ├── concourse_v1beta1_pipeline_BAD.yaml │ │ │ └── concourse_v1beta1_team.yaml │ ├── hack │ │ └── boilerplate.go.txt │ └── pkg │ │ ├── apis │ │ ├── addtoscheme_concourse_v1beta1.go │ │ ├── apis.go │ │ └── concourse │ │ │ ├── group.go │ │ │ └── v1beta1 │ │ │ ├── doc.go │ │ │ ├── pipeline_types.go │ │ │ ├── pipeline_types_test.go │ │ │ ├── register.go │ │ │ ├── team_types.go │ │ │ ├── team_types_test.go │ │ │ ├── v1beta1_suite_test.go │ │ │ └── zz_generated.deepcopy.go │ │ ├── controller │ │ ├── add_pipeline.go │ │ ├── add_team.go │ │ ├── client.go │ │ ├── controller.go │ │ ├── pipeline │ │ │ ├── pipeline_controller.go │ │ │ ├── pipeline_controller_suite_test.go │ │ │ └── pipeline_controller_test.go │ │ └── team │ │ │ ├── team_controller.go │ │ │ ├── team_controller_suite_test.go │ │ │ └── team_controller_test.go │ │ └── webhook │ │ ├── add_default_server.go │ │ ├── default_server │ │ ├── add_validating_pipeline.go │ │ ├── pipeline │ │ │ └── validating │ │ │ │ ├── create_update_webhook.go │ │ │ │ ├── pipeline_create_update_handler.go │ │ │ │ ├── pipeline_create_update_handler_test.go │ │ │ │ └── webhooks.go │ │ └── server.go │ │ └── webhook.go ├── concourse-task-toolbox │ ├── Dockerfile │ ├── README.md │ ├── VERSION │ └── bin │ │ ├── aws-assume-role │ │ ├── determine-platform-version.py │ │ ├── findCVEs.py │ │ └── setup-kube-deployer ├── concourse-terraform-resource │ ├── Dockerfile │ ├── README.md │ └── VERSION └── service-operator │ ├── .dockerignore │ ├── .gitignore │ ├── Dockerfile │ ├── Makefile │ ├── PROJECT │ ├── README.md │ ├── apis │ ├── access │ │ └── v1beta1 │ │ │ ├── groupversion_info.go │ │ │ ├── principal_types.go │ │ │ ├── principal_types_test.go │ │ │ ├── suite_test.go │ │ │ └── zz_generated.deepcopy.go │ ├── database │ │ └── v1beta1 │ │ │ ├── groupversion_info.go │ │ │ ├── postgres_types.go │ │ │ ├── postgres_types_test.go │ │ │ ├── redis_types.go │ │ │ ├── redis_types_test.go │ │ │ ├── suite_test.go │ │ │ └── zz_generated.deepcopy.go │ ├── queue │ │ └── v1beta1 │ │ │ ├── groupversion_info.go │ │ │ ├── sqs_types.go │ │ │ ├── sqs_types_test.go │ │ │ ├── suite_test.go │ │ │ └── zz_generated.deepcopy.go │ └── storage │ │ └── v1beta1 │ │ ├── groupversion_info.go │ │ ├── image_repository_types.go │ │ ├── image_repository_types_test.go │ │ ├── s3_types.go │ │ ├── s3_types_test.go │ │ ├── suite_test.go │ │ └── zz_generated.deepcopy.go │ ├── config │ ├── crd │ │ └── serviceentries.yaml │ └── rbac │ │ └── role-not-patch.yaml │ ├── controllers │ ├── image_repository_cloudformation.go │ ├── image_repository_cloudformation_test.go │ ├── postgres_cloudformation.go │ ├── postgres_cloudformation_test.go │ ├── principal_cloudformation.go │ ├── principal_cloudformation_test.go │ ├── redis_cloudformation.go │ ├── redis_cloudformation_test.go │ ├── s3_cloudformation.go │ ├── s3_cloudformation_test.go │ ├── serviceaccount.go │ ├── serviceaccount_test.go │ ├── sqs_cloudformation.go │ ├── sqs_cloudformation_test.go │ ├── suite_test.go │ └── types.go │ ├── examples │ ├── image-respoitory.yaml │ ├── postgres.yaml │ ├── principal.yaml │ ├── s3.yaml │ └── sqs.yaml │ ├── go.mod │ ├── go.sum │ ├── hack │ ├── boilerplate.go.txt │ └── test_integration.sh │ ├── internal │ ├── aws │ │ ├── cloudformation │ │ │ ├── client.go │ │ │ ├── client_test.go │ │ │ ├── cloudformationfakes │ │ │ │ └── fake_stack.go │ │ │ ├── controller.go │ │ │ ├── ecr_policy.go │ │ │ ├── iam.go │ │ │ ├── suite_test.go │ │ │ └── types.go │ │ ├── ecr │ │ │ └── login.go │ │ ├── policy_types.go │ │ └── sdk │ │ │ ├── client.go │ │ │ ├── client_test.go │ │ │ ├── sdk_suite_test.go │ │ │ └── sdkfakes │ │ │ ├── fake_client.go │ │ │ ├── fake_client_happy.go │ │ │ └── fake_error.go │ ├── env │ │ ├── environment.go │ │ ├── environment_test.go │ │ └── suite_test.go │ ├── istio │ │ └── schemebuilder.go │ └── object │ │ ├── finalizers.go │ │ ├── finalizers_test.go │ │ ├── status.go │ │ ├── status_test.go │ │ ├── suite_test.go │ │ ├── types.go │ │ └── zz_generated.deepcopy.go │ ├── main.go │ ├── redisandstunnel │ ├── Dockerfile │ └── stunnel.conf │ └── tools │ └── tools.go ├── docs ├── README.md ├── architecture │ ├── adr │ │ ├── ADR000-template.md │ │ ├── ADR001-support-model.md │ │ ├── ADR002-containers.md │ │ ├── ADR003-container-orchestration.md │ │ ├── ADR004-tenant-isolation.md │ │ ├── ADR005-ingress.md │ │ ├── ADR006-authentication-method.md │ │ ├── ADR007-identity-provider.md │ │ ├── ADR008-continuous-delivery-workflow.md │ │ ├── ADR009-multitenant-ci-cd.md │ │ ├── ADR010-placement-of-ci-cd-tools.md │ │ ├── ADR011-build-artefacts.md │ │ ├── ADR012-docker-image-repositories.md │ │ ├── ADR013-ci-cd-tools.md │ │ ├── ADR014-sealed-secrets.md │ │ ├── ADR015-aws-iam-authentication.md │ │ ├── ADR016-code-verification.md │ │ ├── ADR017-vendor-provided-container-orchestration.md │ │ ├── ADR018-local-development.md │ │ ├── ADR019-service-mesh.md │ │ ├── ADR020-metrics.md │ │ ├── ADR021-alerting.md │ │ ├── ADR022-logging.md │ │ ├── ADR023-cluster-authentication.md │ │ ├── ADR024-soft-multitenancy.md │ │ ├── ADR025-ingress.md │ │ ├── ADR028-container-tools.md │ │ ├── ADR029-continuous-delivery-tools.md │ │ ├── ADR030-aws-service-operator.md │ │ ├── ADR031-postgres.md │ │ ├── ADR032-sre-permissions.md │ │ ├── ADR033-nlb-for-mtls.md │ │ ├── ADR034-one-service-operator-different-resource-kinds.md │ │ ├── ADR035-aurora-postgres.md │ │ ├── ADR036-hsm-isolation-in-detail.md │ │ ├── ADR037-per-namespace-gateways.md │ │ ├── ADR038-sre-permissions-istio.md │ │ ├── ADR039-cloudhsm-namespace-network-policy.md │ │ ├── ADR040-cluster-stability-node-replacement.md │ │ ├── ADR041-service-operated-policies.md │ │ ├── ADR042-static-ingress-ip-workaround.md │ │ ├── ADR043-k8s-resource-access.md │ │ ├── ADR044-security-improvements.md │ │ ├── ADR045-dev-namespaces.md │ │ ├── ADR046-replace-harbor-ecr.md │ │ ├── ADR047-replace-kiam-with-iam-roles-for-service-accounts.md │ │ └── README.md │ ├── diagrams │ │ ├── gsp-architecture-continuous-delivery.svg │ │ ├── gsp-architecture-infrastructure-1.svg │ │ ├── gsp-architecture-overview-1.svg │ │ └── gsp-architecture-overview-2.svg │ ├── gsp-architecture-cloud-infrastructure.md │ ├── gsp-architecture-continuous-deployment.md │ ├── gsp-architecture-local.md │ ├── gsp-architecture-overview.md │ ├── gsp-architecture.md │ └── notes │ │ └── global-accelerator-spike.md ├── assets │ ├── gsp.png │ └── paas-spectrum.png ├── docs_scoping.md ├── gds-supported-platform │ ├── accessing-concourse.md │ ├── accessing-dashboard.md │ ├── bootstrapping-clusters.md │ ├── external-dns.md │ ├── grafana.md │ ├── gsp-service-operator.md │ ├── helm-chart-best-practices.md │ ├── internal-images-require-digests.md │ ├── per-namespace-gateway.md │ ├── permissions.md │ ├── prometheus_alert_manager_grafana.md │ ├── recover-gatekeeper-outage.md │ ├── resizing-persistent-volumes.md │ ├── sealing-secrets.md │ ├── setting-up-hsm.md │ ├── tls-certificates.md │ └── updating-EKS.md └── incident-reports │ ├── 20190204-kiam-breaks-harbor.md │ └── 20190227-control-plane-and-harbor-outage.md ├── hack ├── Vagrantfile ├── hsm │ └── .gitkeep ├── lint-terraform-values-output.sh ├── set-deployer-pipeline.sh ├── set-release-pipeline.sh ├── validate-deployer-pipeline.sh └── validate-release-pipeline.sh ├── modules ├── gsp-cluster │ ├── cert-manager.tf │ ├── cluster-autoscaler.tf │ ├── data │ │ └── values.yaml │ ├── external-dns.tf │ ├── grafana.tf │ ├── main.tf │ ├── monitoring-system.tf │ ├── nlb.tf │ ├── outputs.tf │ ├── secrets-system.tf │ ├── service-operator.tf │ ├── splunk-system.tf │ ├── values.tf │ └── variables.tf ├── gsp-domain │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── gsp-network │ ├── network.tf │ ├── outputs.tf │ └── variables.tf ├── gsp-subnet │ ├── outputs.tf │ ├── private.tf │ ├── public.tf │ └── variables.tf ├── gsp-user │ ├── iam.tf │ └── variables.tf ├── k8s-cluster │ ├── aws-node-lifecycle-hook.tf │ ├── data │ │ ├── kubeconfig │ │ ├── nodegroup-v2.yaml │ │ └── nodegroup.yaml │ ├── iam.tf │ ├── main.tf │ ├── outputs.tf │ ├── security.tf │ └── variables.tf └── lambda_splunk_forwarder │ ├── cyber-cloudwatch-fluentd-to-hec.zip │ ├── iam.tf │ ├── lambda.tf │ └── variables.tf ├── pipelines ├── README.md ├── deployer │ ├── deployer.defaults.yaml │ ├── deployer.tf │ ├── deployer.yaml │ └── main.tf ├── examples │ ├── clusters │ │ └── sandbox.yaml │ ├── namespaces │ │ └── sandbox-canary │ │ │ ├── deployment-keys.yaml │ │ │ └── namespace.yaml │ └── users │ │ ├── chris.farmiloe.yaml │ │ ├── daniel.blair.yaml │ │ ├── sam.crang.yaml │ │ └── stephen.ford.yaml ├── release │ └── release.yaml └── tasks │ ├── bump-semver.yaml │ └── generate-trusted-contributors.yaml └── templates ├── managed-namespaces-gateways.yaml └── managed-namespaces-zones.tf /.dependabot/config.yml: -------------------------------------------------------------------------------- 1 | version: 1 2 | 3 | # This controls how dependabot creates PRs for dependencies: 4 | # See: https://dependabot.com/docs/config-file/ 5 | 6 | update_configs: 7 | 8 | - package_manager: "docker" 9 | directory: /components/service-operator 10 | update_schedule: daily 11 | - package_manager: "go:modules" 12 | directory: /components/service-operator 13 | update_schedule: daily 14 | 15 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | end_of_line = lf 5 | insert_final_newline = true 6 | charset = utf-8 7 | 8 | [*.{yml,yaml}] 9 | indent_style = space 10 | indent_size = 2 11 | 12 | [*.sh] 13 | indent_style = tab 14 | indent_size = 4 15 | 16 | [Makefile] 17 | indent_style = tab 18 | 19 | [*.{tf,tfvars}] 20 | indent_size = 2 21 | indent_style = space 22 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | go.sum linguist-generated=true 2 | zz_generated.deepcopy.go linguist-generated=true 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | **/terraform.tfstate* 3 | manifests/ 4 | 5 | # ignore vendor dirs 6 | components/concourse-operator/vendor 7 | 8 | # ignore cloudhsm setup files 9 | hack/hsm/* 10 | hack/.vagrant 11 | hack/*log 12 | 13 | # generated files 14 | output/ 15 | *.coverprofile 16 | .idea/ 17 | modules/k8s-cluster/aws-node-lifecycle-hook.zip 18 | pipelines/deployer/managed-namespaces-zones.tf 19 | -------------------------------------------------------------------------------- /.terraform-version: -------------------------------------------------------------------------------- 1 | 0.12.12 2 | -------------------------------------------------------------------------------- /Brewfile: -------------------------------------------------------------------------------- 1 | tap "homebrew/core" 2 | 3 | brew "kubernetes-cli" 4 | brew "kubernetes-helm" 5 | brew "minikube" 6 | brew "opa" 7 | 8 | if OS.mac? 9 | brew "hyperkit" 10 | brew "docker-machine-driver-hyperkit" 11 | end 12 | -------------------------------------------------------------------------------- /CODEOWNERS: -------------------------------------------------------------------------------- 1 | # See https://help.github.com/en/articles/about-code-owners 2 | * @alphagov/re-autom8 @alphagov/re-gsp !@issyl0 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2018 Crown Copyright (Government Digital Service) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /charts/gsp-cluster/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /charts/gsp-cluster/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A chart for gsp curated components 4 | name: gsp-cluster 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /charts/gsp-cluster/README.md: -------------------------------------------------------------------------------- 1 | # How to upgrade 2 | 3 | In general copy the [instructions provided by AWS](https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html). 4 | 5 | It all nails down to: 6 | 7 | ```sh 8 | DOWNLOAD_URL=$(curl --silent "https://api.github.com/repos/kubernetes-incubator/metrics-server/releases/latest" | jq -r .tarball_url) 9 | DOWNLOAD_VERSION=$(grep -o '[^/v]*$' <<< $DOWNLOAD_URL) 10 | curl -Ls $DOWNLOAD_URL -o metrics-server-$DOWNLOAD_VERSION.tar.gz 11 | mkdir metrics-server-$DOWNLOAD_VERSION 12 | tar -xzf metrics-server-$DOWNLOAD_VERSION.tar.gz --directory metrics-server-$DOWNLOAD_VERSION --strip-components 1 13 | ``` 14 | 15 | With additional command of: 16 | 17 | ```sh 18 | mv metrics-server-$DOWNLOAD_VERSION/deploy/1.8+ ./metrics-server 19 | ``` 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/cert-manager-v0.11.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/charts/gsp-cluster/charts/cert-manager-v0.11.0.tgz -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/cluster-autoscaler-7.1.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/charts/gsp-cluster/charts/cluster-autoscaler-7.1.0.tgz -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/concourse-11.4.0.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/charts/gsp-cluster/charts/concourse-11.4.0.tgz -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | name: fluentd-cloudwatch 3 | version: 0.13.0 4 | appVersion: v1.7.3-debian-cloudwatch-1.0 5 | description: A Fluentd CloudWatch Helm chart for Kubernetes. 6 | home: https://www.fluentd.org/ 7 | icon: https://raw.githubusercontent.com/fluent/fluentd-docs/master/public/logo/Fluentd_square.png 8 | keywords: 9 | - fluentd 10 | - cloudwatch 11 | - logging 12 | sources: 13 | - https://github.com/kubernetes/charts 14 | - https://github.com/fluent/fluentd-kubernetes-daemonset 15 | maintainers: 16 | - name: jmcarp 17 | email: jm.carp@gmail.com 18 | - name: icereval 19 | email: michael.haselton@gmail.com 20 | - name: kenden 21 | email: quentin.nerden@stylight.com 22 | engine: gotpl 23 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/NOTES.txt: -------------------------------------------------------------------------------- 1 | To verify that Fluentd Cloudwatch has started, run: 2 | 3 | kubectl --namespace={{ .Release.Namespace }} get pods -l "app={{ template "fluentd-cloudwatch.name" . }},release={{ .Release.Name }}" 4 | 5 | THIS APPLICATION CAPTURES ALL CONSOLE OUTPUT AND FORWARDS IT TO AWS CLOUDWATCH. Anything that might be identifying, 6 | including things like IP addresses, container images, and object names will NOT be anonymized. 7 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "fluentd-cloudwatch.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "fluentd-cloudwatch.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "fluentd-cloudwatch.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRole 4 | metadata: 5 | name: {{ template "fluentd-cloudwatch.fullname" . }} 6 | labels: 7 | app: {{ template "fluentd-cloudwatch.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | rules: 12 | - apiGroups: [""] 13 | resources: ["namespaces", "pods"] 14 | verbs: ["get", "list", "watch"] 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.rbac.create }} 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "fluentd-cloudwatch.fullname" . }} 6 | labels: 7 | app: {{ template "fluentd-cloudwatch.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | subjects: 12 | - kind: ServiceAccount 13 | name: {{ template "fluentd-cloudwatch.fullname" . }} 14 | namespace: {{ .Release.Namespace }} 15 | roleRef: 16 | kind: ClusterRole 17 | name: {{ template "fluentd-cloudwatch.fullname" . }} 18 | apiGroup: rbac.authorization.k8s.io 19 | {{ end }} 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: {{ template "fluentd-cloudwatch.fullname" . }} 5 | labels: 6 | app: {{ template "fluentd-cloudwatch.name" . }} 7 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 8 | heritage: {{ .Release.Service | quote }} 9 | release: {{ .Release.Name | quote }} 10 | data: 11 | {{ toYaml .Values.data | indent 2 }} 12 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/psp-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | metadata: 5 | name: {{ template "fluentd-cloudwatch.fullname" . }}-psp 6 | labels: 7 | app: {{ template "fluentd-cloudwatch.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | heritage: "{{ .Release.Service }}" 10 | release: "{{ .Release.Name }}" 11 | rules: 12 | - apiGroups: ['extensions'] 13 | resources: ['podsecuritypolicies'] 14 | verbs: ['use'] 15 | resourceNames: 16 | - {{ template "fluentd-cloudwatch.fullname" . }} 17 | {{- end }} 18 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/psp-clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: {{ template "fluentd-cloudwatch.fullname" . }}-psp 6 | labels: 7 | app: {{ template "fluentd-cloudwatch.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | heritage: "{{ .Release.Service }}" 10 | release: "{{ .Release.Name }}" 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: {{ template "fluentd-cloudwatch.fullname" . }}-psp 15 | subjects: 16 | - kind: ServiceAccount 17 | name: {{ if .Values.rbac.create }}{{ template "fluentd-cloudwatch.fullname" . }}{{ else }}"{{ .Values.rbac.serviceAccountName }}"{{ end }} 18 | namespace: {{ .Release.Namespace }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/psp.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.pspEnabled }} 2 | {{- if or (.Capabilities.APIVersions.Has "policy/v1beta1") (.Values.avoidExtensionsV1Beta1ApiVersion) }} 3 | apiVersion: policy/v1beta1 4 | {{- else }} 5 | apiVersion: extensions/v1beta1 6 | {{- end }} 7 | kind: PodSecurityPolicy 8 | metadata: 9 | name: {{ template "fluentd-cloudwatch.fullname" . }} 10 | labels: 11 | app: {{ template "fluentd-cloudwatch.name" . }} 12 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 13 | heritage: "{{ .Release.Service }}" 14 | release: "{{ .Release.Name }}" 15 | spec: 16 | allowedCapabilities: 17 | - '*' 18 | fsGroup: 19 | rule: RunAsAny 20 | privileged: true 21 | runAsUser: 22 | rule: RunAsAny 23 | seLinux: 24 | rule: RunAsAny 25 | supplementalGroups: 26 | rule: RunAsAny 27 | volumes: 28 | - '*' 29 | hostPID: true 30 | hostIPC: true 31 | hostNetwork: true 32 | hostPorts: 33 | - min: 1 34 | max: 65536 35 | {{- end }} 36 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/secrets.yaml: -------------------------------------------------------------------------------- 1 | {{- if and (not .Values.awsRole) (and .Values.awsAccessKeyId .Values.awsSecretAccessKey) }} 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ template "fluentd-cloudwatch.fullname" . }} 6 | labels: 7 | app: {{ template "fluentd-cloudwatch.name" . }} 8 | chart: {{ .Chart.Name }}-{{ .Chart.Version }} 9 | heritage: {{ .Release.Service }} 10 | release: {{ .Release.Name }} 11 | type: Opaque 12 | data: 13 | {{- if .Values.awsAccessKeyId }} 14 | AWS_ACCESS_KEY_ID: {{ .Values.awsAccessKeyId | b64enc }} 15 | {{- end }} 16 | {{- if .Values.awsSecretAccessKey }} 17 | AWS_SECRET_ACCESS_KEY: {{ .Values.awsSecretAccessKey | b64enc }} 18 | {{- end }} 19 | {{- end }} 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/fluentd-cloudwatch/templates/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.rbac.create }} 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: {{ template "fluentd-cloudwatch.fullname" . }} 6 | labels: 7 | app: {{ template "fluentd-cloudwatch.name" . }} 8 | chart: "{{ .Chart.Name }}-{{ .Chart.Version }}" 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | {{- end }} 12 | {{- if .Values.rbac.serviceAccountAnnotations }} 13 | annotations: 14 | {{ toYaml .Values.rbac.serviceAccountAnnotations | nindent 4 }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/gsp-monitoring/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: Monitoring components for a GSP cluster 4 | name: gsp-monitoring 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/gsp-monitoring/charts/prometheus-operator-8.15.6.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/charts/gsp-cluster/charts/gsp-monitoring/charts/prometheus-operator-8.15.6.tgz -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/gsp-monitoring/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: prometheus-operator 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 8.15.6 5 | digest: sha256:3f4bd028e55b603bb4b301a5e49cfd33efc9e2a9c68fcc39e0872fd5e7e22d61 6 | generated: "2020-06-25T12:22:36.893073665+01:00" 7 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/gsp-monitoring/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: "prometheus-operator" 3 | version: 8.15.6 4 | repository: https://kubernetes-charts.storage.googleapis.com/ 5 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/gsp-monitoring/templates/concourse-internal-dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: '{{ .Release.Name }}-grafana-concourse-internal-dashboard' 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | grafana_dashboard: "1" 9 | data: 10 | concourse-internal.json: |- 11 | {{ .Files.Get "dashboards/concourse-internal.json" | indent 4 }} 12 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/gsp-monitoring/templates/pod-availability-dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: '{{ .Release.Name }}-grafana-pod-availability-dashboard' 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | grafana_dashboard: "1" 9 | data: 10 | pod-availability.json: |- 11 | {{ .Files.Get "dashboards/pod-availability.json" | indent 4 }} 12 | -------------------------------------------------------------------------------- /charts/gsp-cluster/charts/gsp-monitoring/templates/sli-dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: '{{ .Release.Name }}-grafana-sli-dashboard' 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | grafana_dashboard: "1" 9 | data: 10 | sli.json: |- 11 | {{ .Files.Get "dashboards/sli.json" | indent 4 }} 12 | -------------------------------------------------------------------------------- /charts/gsp-cluster/policies/README.md: -------------------------------------------------------------------------------- 1 | # OPA policies 2 | 3 | ## Running Tests 4 | 5 | All together: 6 | 7 | ``` 8 | $ opa test policies 9 | PASS: 21/21 10 | ``` 11 | 12 | Or, individually: 13 | 14 | ``` 15 | $ opa test policies/digests-on-images 16 | PASS: 5/5 17 | ``` 18 | ``` 19 | $ opa test policies/restrict-special-nodes 20 | PASS: 11/11 21 | ``` 22 | ``` 23 | $ opa test policies/isolate-tenant-istio-resources 24 | PASS: 5/5 25 | ``` 26 | -------------------------------------------------------------------------------- /charts/gsp-cluster/policies/digests-on-images/src.rego: -------------------------------------------------------------------------------- 1 | package digests_on_images 2 | 3 | violation[{"msg": msg}] { 4 | image := input.review.object.spec.containers[_].image 5 | aws_account_id := input.parameters.aws_account_id 6 | registry = sprintf("%s.dkr.ecr.eu-west-2.amazonaws.com", [aws_account_id]) 7 | 8 | startswith(image, registry) 9 | not re_match("^.*@sha256:[a-f,0-9]{64}$", image) 10 | 11 | msg := sprintf("images from ecr must use digest (https://github.com/alphagov/gsp/blob/master/docs/gds-supported-platform/internal-images-require-digests.md): %v", [image]) 12 | } 13 | -------------------------------------------------------------------------------- /charts/gsp-cluster/policies/isolate-tenant-istio-resources/src.rego: -------------------------------------------------------------------------------- 1 | package isolate_tenant_istio_resources 2 | 3 | violation[{"msg": msg}] { 4 | not input.review.object.spec.exportTo 5 | input.review.object.metadata.namespace != "istio-system" 6 | msg := "exportTo should be present" 7 | } 8 | 9 | violation[{"msg": msg}] { 10 | not is_array(input.review.object.spec.exportTo) 11 | input.review.object.metadata.namespace != "istio-system" 12 | msg := "exportTo should be a list" 13 | } 14 | 15 | violation[{"msg": msg}] { 16 | exportToCount := count(input.review.object.spec.exportTo) 17 | exportToCount != 1 18 | input.review.object.metadata.namespace != "istio-system" 19 | msg := sprintf("exportTo should be a list of size 1: %v", [exportToCount]) 20 | } 21 | 22 | violation[{"msg": msg}] { 23 | exportToValue := input.review.object.spec.exportTo[0] 24 | exportToValue != "." 25 | input.review.object.metadata.namespace != "istio-system" 26 | msg := sprintf("exportTo should be set to '.': '%v'", [exportToValue]) 27 | } 28 | -------------------------------------------------------------------------------- /charts/gsp-cluster/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: cluster-autoscaler 3 | repository: https://kubernetes-charts.storage.googleapis.com/ 4 | version: 7.1.0 5 | - name: concourse 6 | repository: https://concourse-charts.storage.googleapis.com/ 7 | version: 11.4.0 8 | - name: cert-manager 9 | repository: https://charts.jetstack.io/ 10 | version: v0.11.0 11 | digest: sha256:7a1f4daadebb067bd9173642e3347bc05b023f3531e46b0d2099b3cbd71d419f 12 | generated: "2020-08-04T13:50:37.912398679+01:00" 13 | -------------------------------------------------------------------------------- /charts/gsp-cluster/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: "cluster-autoscaler" 3 | version: 7.1.0 4 | repository: https://kubernetes-charts.storage.googleapis.com/ 5 | condition: global.runningOnAws 6 | - name: "concourse" 7 | version: 11.4.0 8 | repository: https://concourse-charts.storage.googleapis.com/ 9 | condition: global.concourse.enabled 10 | - name: "cert-manager" 11 | version: v0.11.0 12 | repository: https://charts.jetstack.io/ 13 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/auditor-cluster-role-binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: auditor 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: auditor 10 | subjects: 11 | - kind: Group 12 | name: "aws-iam:authenticated-users" 13 | apiGroup: rbac.authorization.k8s.io 14 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/aws-auth.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: aws-auth 5 | namespace: kube-system 6 | data: 7 | mapRoles: | 8 | {{- range .Values.bootstrapRoleARNs }} 9 | - rolearn: {{ . | quote }} 10 | username: system:node:{{ "{{" }}EC2PrivateDNSName{{ "}}" }} 11 | groups: 12 | - system:bootstrappers 13 | - system:nodes 14 | {{- end }} 15 | {{- range .Values.adminRoleARNs }} 16 | - rolearn: {{ . | quote }} 17 | username: admin:{{ "{{" }}SessionName{{ "}}" }} 18 | groups: 19 | - system:masters 20 | {{- end }} 21 | {{- range .Values.users }} 22 | - rolearn: {{ .roleARN | quote }} 23 | username: {{ .name }} 24 | groups: 25 | - aws-iam:authenticated-users 26 | {{- range .roles }} 27 | {{- if eq .account $.Values.global.account.name }} 28 | {{- if .namespace }} 29 | {{- if eq "operator" .role }} 30 | - {{ .namespace }}-operators 31 | {{- end }} 32 | {{- else }} 33 | {{- if eq "admin" .role }} 34 | - system:masters 35 | {{- end }} 36 | {{- end }} 37 | {{- end }} 38 | {{- end }} 39 | {{- end }} 40 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/default-storage-class.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.global.runningOnAws }} 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | name: gp2 8 | parameters: 9 | fsType: ext4 10 | type: gp2 11 | provisioner: kubernetes.io/aws-ebs 12 | reclaimPolicy: Delete 13 | volumeBindingMode: WaitForFirstConsumer 14 | allowVolumeExpansion: true 15 | {{ end }} 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/gsp-default-psp-cluster-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: gsp-default-psp 6 | rules: 7 | - apiGroups: 8 | - policy 9 | resourceNames: 10 | - gsp-default 11 | resources: 12 | - podsecuritypolicies 13 | verbs: 14 | - use 15 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/gsp-default-psp-default-role-binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: gsp-default-psp 6 | namespace: default 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: gsp-default-psp 11 | subjects: 12 | - kind: Group 13 | apiGroup: rbac.authorization.k8s.io 14 | name: system:authenticated 15 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/gsp-default-psp.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: policy/v1beta1 3 | kind: PodSecurityPolicy 4 | metadata: 5 | name: gsp-default 6 | seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' 7 | spec: 8 | privileged: false 9 | allowPrivilegeEscalation: true 10 | allowedCapabilities: 11 | - '*' 12 | volumes: 13 | - 'configMap' 14 | - 'emptyDir' 15 | - 'projected' 16 | - 'secret' 17 | - 'downwardAPI' 18 | # Assume that persistentVolumes set up by the cluster admin are safe to use. 19 | - 'persistentVolumeClaim' 20 | hostNetwork: false 21 | hostIPC: false 22 | hostPID: false 23 | runAsUser: 24 | rule: 'RunAsAny' 25 | seLinux: 26 | rule: 'RunAsAny' 27 | supplementalGroups: 28 | rule: 'RunAsAny' 29 | fsGroup: 30 | rule: 'RunAsAny' 31 | readOnlyRootFilesystem: false 32 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/gsp-priorityclass.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scheduling.k8s.io/v1 2 | kind: PriorityClass 3 | metadata: 4 | name: gsp-critical 5 | # This PriorityClass exists because the admission controller won't let 6 | # you use `system-cluster-critical` in non-`kube-system` namespaces, 7 | # but we still have pods we want to run in gsp-system at high priority 8 | # level. We create a priority class with the highest-allowed 9 | # userspace priority value (which is still lower than 10 | # system-cluster-critical) as a workaround. 11 | # See also https://github.com/kubernetes/kubernetes/issues/60596 12 | description: | 13 | Used for GSP critical pods that must run in the cluster. 14 | value: 1000000000 15 | 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/gsp-system-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: gsp-system 6 | labels: 7 | namespace: gsp-system 8 | istio-injection: disabled 9 | control-plane: "true" 10 | talksToAWSMetadataService: "true" 11 | {{ if .Values.global.cloudHsm.public }} 12 | talksToHsm: "true" 13 | {{ end }} 14 | --- 15 | apiVersion: v1 16 | kind: Secret 17 | metadata: 18 | name: dockerhubpull 19 | namespace: gsp-system 20 | data: 21 | .dockerconfigjson: {{ .Values.global.dockerHubCredentials }} 22 | type: kubernetes.io/dockerconfigjson 23 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/istio-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: istio-system 6 | labels: 7 | namespace: istio-system 8 | istio-injection: disabled 9 | istio: system 10 | --- 11 | apiVersion: v1 12 | kind: Secret 13 | metadata: 14 | name: dockerhubpull 15 | namespace: istio-system 16 | data: 17 | .dockerconfigjson: {{ .Values.global.dockerHubCredentials }} 18 | type: kubernetes.io/dockerconfigjson 19 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/kube-proxy-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | labels: 5 | eks.amazonaws.com/component: kube-proxy 6 | k8s-app: kube-proxy 7 | name: kube-proxy-config 8 | namespace: kube-system 9 | data: 10 | config: |- 11 | apiVersion: kubeproxy.config.k8s.io/v1alpha1 12 | bindAddress: 0.0.0.0 13 | clientConnection: 14 | acceptContentTypes: "" 15 | burst: 10 16 | contentType: application/vnd.kubernetes.protobuf 17 | kubeconfig: /var/lib/kube-proxy/kubeconfig 18 | qps: 5 19 | clusterCIDR: "" 20 | configSyncPeriod: 15m0s 21 | conntrack: 22 | max: 0 23 | maxPerCore: 32768 24 | min: 131072 25 | tcpCloseWaitTimeout: 1h0m0s 26 | tcpEstablishedTimeout: 24h0m0s 27 | enableProfiling: false 28 | healthzBindAddress: 0.0.0.0:10256 29 | hostnameOverride: "" 30 | iptables: 31 | masqueradeAll: false 32 | masqueradeBit: 14 33 | minSyncPeriod: 0s 34 | syncPeriod: 30s 35 | ipvs: 36 | excludeCIDRs: null 37 | minSyncPeriod: 0s 38 | scheduler: "" 39 | syncPeriod: 30s 40 | kind: KubeProxyConfiguration 41 | metricsBindAddress: 0.0.0.0:10249 42 | mode: "iptables" 43 | nodePortAddresses: null 44 | oomScoreAdj: -998 45 | portRange: "" 46 | resourceContainer: "" 47 | udpIdleTimeout: 250ms 48 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/kube-system-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kube-system 6 | labels: 7 | namespace: kube-system 8 | istio-injection: disabled 9 | kube-system: "true" 10 | control-plane: "true" 11 | --- 12 | apiVersion: v1 13 | kind: Secret 14 | metadata: 15 | name: dockerhubpull 16 | namespace: kube-system 17 | data: 18 | .dockerconfigjson: {{ .Values.global.dockerHubCredentials }} 19 | type: kubernetes.io/dockerconfigjson 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/main-team-namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: {{ .Values.global.cluster.name }}-main 6 | labels: 7 | namespace: {{ .Values.global.cluster.name }}-main 8 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/managed-namespaces.yaml: -------------------------------------------------------------------------------- 1 | {{- range .Values.namespaces }} 2 | --- 3 | apiVersion: v1 4 | kind: Namespace 5 | metadata: 6 | name: {{ .name }} 7 | labels: 8 | namespace: {{ .name }} 9 | {{- if .talksToHsm }} 10 | talksToHsm: "true" 11 | {{- end }} 12 | {{- if .talksToPsn }} 13 | talksToPsn: "true" 14 | {{- end }} 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/00-aws-auth/psp-system-bindings.yaml: -------------------------------------------------------------------------------- 1 | # based on https://docs.aws.amazon.com/eks/latest/userguide/pod-security-policy.html 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: RoleBinding 5 | metadata: 6 | name: eks:podsecuritypolicy:authenticated 7 | namespace: kube-system 8 | roleRef: 9 | apiGroup: rbac.authorization.k8s.io 10 | kind: ClusterRole 11 | name: eks:podsecuritypolicy:privileged 12 | subjects: 13 | - kind: Group 14 | apiGroup: rbac.authorization.k8s.io 15 | name: system:authenticated 16 | --- 17 | apiVersion: rbac.authorization.k8s.io/v1 18 | kind: RoleBinding 19 | metadata: 20 | name: eks:podsecuritypolicy:authenticated 21 | namespace: gsp-system 22 | roleRef: 23 | apiGroup: rbac.authorization.k8s.io 24 | kind: ClusterRole 25 | name: eks:podsecuritypolicy:privileged 26 | subjects: 27 | - kind: Group 28 | apiGroup: rbac.authorization.k8s.io 29 | name: system:authenticated 30 | --- 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | kind: RoleBinding 33 | metadata: 34 | name: eks:podsecuritypolicy:authenticated 35 | namespace: istio-system 36 | roleRef: 37 | apiGroup: rbac.authorization.k8s.io 38 | kind: ClusterRole 39 | name: eks:podsecuritypolicy:privileged 40 | subjects: 41 | - kind: Group 42 | apiGroup: rbac.authorization.k8s.io 43 | name: system:authenticated 44 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/01-aws-system/kiam-service-entry.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.global.runningOnAws }} 2 | apiVersion: networking.istio.io/v1alpha3 3 | kind: ServiceEntry 4 | metadata: 5 | name: aws-metadata-service 6 | spec: 7 | hosts: 8 | - aws-metadata.somedomain # not used 9 | addresses: 10 | - 169.254.169.254/32 11 | ports: 12 | - number: 80 13 | name: http 14 | protocol: HTTP 15 | location: MESH_EXTERNAL 16 | resolution: STATIC 17 | endpoints: 18 | - address: 169.254.169.254 19 | {{ end }} 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/concourse-destinationrule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.istio.io/v1alpha3 3 | kind: DestinationRule 4 | metadata: 5 | name: {{ include "gsp-cluster.fullname" . }}-concourse 6 | labels: 7 | app.kubernetes.io/name: {{ include "gsp-cluster.name" . }}-concourse 8 | helm.sh/chart: {{ include "gsp-cluster.chart" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | spec: 12 | host: "gsp-concourse-web.gsp-system.svc.cluster.local" 13 | trafficPolicy: 14 | tls: 15 | mode: DISABLE 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/concourse-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.concourse.monitor.create }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ .Release.Namespace }}-concourse-monitor 6 | spec: 7 | selector: 8 | matchLabels: 9 | release: {{ .Release.Name }} 10 | endpoints: 11 | - port: prometheus 12 | {{- end }} 13 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/concourse-web-virtual-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.istio.io/v1alpha3 3 | kind: VirtualService 4 | metadata: 5 | name: {{ .Release.Name }}-concourse-web 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | chart: {{ .Chart.Name }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | spec: 12 | hosts: 13 | - "ci.{{ .Values.global.cluster.domain }}" 14 | gateways: 15 | - {{ include "gsp-cluster.fullname" . }}-ingress 16 | http: 17 | - route: 18 | - destination: 19 | host: gsp-concourse-web 20 | port: 21 | number: 8080 22 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/egress-networkpolicy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: crd.projectcalico.org/v1 2 | kind: GlobalNetworkPolicy 3 | metadata: 4 | name: {{ .Release.Name }}-egress 5 | spec: 6 | order: 100 7 | types: 8 | - Egress 9 | egress: 10 | - action: Deny 11 | protocol: TCP 12 | source: 13 | selector: talksToHsm != 'true' 14 | destination: 15 | nets: 16 | - {{ .Values.global.cloudHsm.ip }}/32 17 | ports: 18 | - 2223 19 | - 2224 20 | - 2225 21 | - action: Deny 22 | protocol: TCP 23 | source: 24 | namespaceSelector: talksToHsm != 'true' 25 | destination: 26 | nets: 27 | - {{ .Values.global.cloudHsm.ip }}/32 28 | ports: 29 | - 2223 30 | - 2224 31 | - 2225 32 | - action: Deny 33 | protocol: TCP 34 | source: 35 | selector: talksToAWSMetadataService != 'true' 36 | destination: 37 | nets: 38 | - 169.254.169.254/32 39 | - action: Deny 40 | protocol: TCP 41 | source: 42 | namespaceSelector: talksToAWSMetadataService != 'true' 43 | destination: 44 | nets: 45 | - 169.254.169.254/32 46 | - action: Allow 47 | 48 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/egress-safelist.yaml: -------------------------------------------------------------------------------- 1 | {{- range $egress := .Values.egressSafelist }} 2 | --- 3 | apiVersion: networking.istio.io/v1alpha3 4 | kind: ServiceEntry 5 | metadata: 6 | name: {{ $.Release.Name }}-{{ $egress.name }} 7 | namespace: {{ $.Release.Namespace }} 8 | spec: 9 | {{- $egress.service | toYaml | nindent 2 }} 10 | {{- end }} 11 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/external-dns-clusterrole.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1beta1 2 | kind: ClusterRole 3 | metadata: 4 | name: {{ .Release.Name }}-external-dns 5 | labels: 6 | app.kubernetes.io/name: external-dns 7 | helm.sh/chart: {{ .Chart.Name }}-{{ .Chart.Version }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | rules: 11 | - apiGroups: 12 | - "" 13 | resources: 14 | - services 15 | - pods 16 | - nodes 17 | verbs: 18 | - get 19 | - list 20 | - watch 21 | - apiGroups: 22 | - extensions 23 | - "networking.k8s.io" # k8s 1.14+ 24 | resources: 25 | - ingresses 26 | verbs: 27 | - get 28 | - list 29 | - watch 30 | - apiGroups: 31 | - networking.istio.io 32 | resources: 33 | - gateways 34 | verbs: 35 | - get 36 | - list 37 | - watch 38 | - apiGroups: 39 | - externaldns.k8s.io 40 | resources: 41 | - dnsendpoints 42 | verbs: 43 | - get 44 | - list 45 | - watch 46 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/gatekeeper/constraints/digests-on-internal-registry.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.gatekeeper.enabled }} 2 | --- 3 | apiVersion: constraints.gatekeeper.sh/v1beta1 4 | kind: RequireImageDigest 5 | metadata: 6 | name: digests-on-internal-registry 7 | spec: 8 | enforcementAction: deny 9 | match: 10 | kinds: 11 | - apiGroups: [""] 12 | kinds: ["Pod"] 13 | parameters: 14 | aws_account_id: {{ .Values.global.account.id | quote }} 15 | {{ end }} 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/gatekeeper/constraints/isolate-tenant-istio-resources.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.gatekeeper.enabled }} 2 | --- 3 | apiVersion: constraints.gatekeeper.sh/v1beta1 4 | kind: IsolateTenantIstioResources 5 | metadata: 6 | name: isolate-tenant-istio-resources 7 | spec: 8 | enforcementAction: deny 9 | match: 10 | excludedNamespaces: 11 | - "kube-system" 12 | - "gsp-system" 13 | - "istio-system" 14 | kinds: 15 | - apiGroups: 16 | - "networking.istio.io" 17 | kinds: 18 | - "DestinationRule" 19 | - "ServiceEntry" 20 | - "VirtualService" 21 | {{ end }} 22 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/gatekeeper/constraints/tolerate-special-nodes.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.gatekeeper.enabled }} 2 | --- 3 | apiVersion: constraints.gatekeeper.sh/v1beta1 4 | kind: TolerateSpecialNodes 5 | metadata: 6 | name: tolerate-special-nodes 7 | spec: 8 | enforcementAction: deny 9 | match: 10 | kinds: 11 | - apiGroups: [""] 12 | kinds: ["Pod"] 13 | parameters: 14 | restricted_roles: 15 | - node.kubernetes.io/cluster-management 16 | {{ end }} 17 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/gatekeeper/templates/digests-on-internal-registry-images.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.gatekeeper.enabled }} 2 | --- 3 | apiVersion: templates.gatekeeper.sh/v1beta1 4 | kind: ConstraintTemplate 5 | metadata: 6 | name: requireimagedigest 7 | spec: 8 | crd: 9 | spec: 10 | names: 11 | kind: RequireImageDigest 12 | listKind: RequireImageDigestList 13 | plural: requireimagedigests 14 | singular: requireimagedigest 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | {{ .Files.Get "policies/digests-on-images/src.rego" | indent 8 }} 19 | {{ end }} 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/gatekeeper/templates/isolate-tenant-istio-resources.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.gatekeeper.enabled }} 2 | --- 3 | apiVersion: templates.gatekeeper.sh/v1beta1 4 | kind: ConstraintTemplate 5 | metadata: 6 | name: isolatetenantistioresources 7 | spec: 8 | crd: 9 | spec: 10 | names: 11 | kind: IsolateTenantIstioResources 12 | listKind: IsolateTenantIstioResourcesList 13 | plural: isolatetenantistioresources 14 | singular: isolatetenantistioresources 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | {{ .Files.Get "policies/isolate-tenant-istio-resources/src.rego" | indent 8 }} 19 | {{ end }} 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/gatekeeper/templates/tolerate-special-nodes.yaml: -------------------------------------------------------------------------------- 1 | {{ if .Values.gatekeeper.enabled }} 2 | --- 3 | apiVersion: templates.gatekeeper.sh/v1beta1 4 | kind: ConstraintTemplate 5 | metadata: 6 | name: toleratespecialnodes 7 | spec: 8 | crd: 9 | spec: 10 | names: 11 | kind: TolerateSpecialNodes 12 | listKind: TolerateSpecialnodesList 13 | plural: toleratespecialnodes 14 | singular: toleratespecialnodes 15 | targets: 16 | - target: admission.k8s.gatekeeper.sh 17 | rego: | 18 | {{ .Files.Get "policies/restrict-special-nodes/src.rego" | indent 8 }} 19 | {{ end }} 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/grafana-concourse-dashboard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: '{{ .Release.Name }}-grafana-concourse-dashboard' 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | grafana_dashboard: "1" 9 | data: 10 | concourse.json: |- 11 | {{ .Files.Get "dashboards/concourse.json" | indent 4 }} 12 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/grafana-destinationrule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.istio.io/v1alpha3 3 | kind: DestinationRule 4 | metadata: 5 | name: {{ include "gsp-cluster.fullname" . }}-grafana 6 | labels: 7 | app.kubernetes.io/name: {{ include "gsp-cluster.name" . }}-grafana 8 | helm.sh/chart: {{ include "gsp-cluster.chart" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | spec: 12 | host: "gsp-grafana.gsp-system.svc.cluster.local" 13 | trafficPolicy: 14 | tls: 15 | mode: DISABLE 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/grafana-google-oauth-client-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: grafana 6 | namespace: gsp-system 7 | data: 8 | GF_AUTH_GOOGLE_CLIENT_ID: {{ $.Values.googleOauthClientId | b64enc }} 9 | GF_AUTH_GOOGLE_CLIENT_SECRET: {{ $.Values.googleOauthClientSecret | b64enc }} 10 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/grafana-virtual-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.istio.io/v1alpha3 3 | kind: VirtualService 4 | metadata: 5 | name: {{ .Release.Name }}-grafana 6 | namespace: {{ .Release.Namespace }} 7 | labels: 8 | chart: {{ .Chart.Name }} 9 | release: "{{ .Release.Name }}" 10 | heritage: "{{ .Release.Service }}" 11 | spec: 12 | hosts: 13 | - "grafana.{{ .Values.global.cluster.domain }}" 14 | gateways: 15 | - {{ include "gsp-cluster.fullname" . }}-ingress 16 | http: 17 | - route: 18 | - destination: 19 | host: gsp-grafana 20 | port: 21 | number: 80 22 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/ingress-certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1alpha2 2 | kind: Certificate 3 | metadata: 4 | name: {{ .Release.Name }}-ingress 5 | namespace: {{ .Release.Namespace }} 6 | spec: 7 | secretName: {{ .Release.Name }}-ingress-certificate 8 | dnsNames: 9 | - "ci.{{ .Values.global.cluster.domain }}" 10 | - "grafana.{{ .Values.global.cluster.domain }}" 11 | issuerRef: 12 | name: letsencrypt-r53 13 | kind: ClusterIssuer 14 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/ingress-gateways.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: {{ include "gsp-cluster.fullname" . }}-ingress 5 | labels: 6 | app.kubernetes.io/name: {{ include "gsp-cluster.name" . }} 7 | helm.sh/chart: {{ include "gsp-cluster.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | annotations: 11 | externaldns.k8s.io/namespace: {{ .Release.Namespace }} 12 | spec: 13 | selector: 14 | istio: {{ .Release.Namespace }}-ingressgateway 15 | servers: 16 | - port: 17 | number: 80 18 | name: http 19 | protocol: HTTP 20 | tls: 21 | httpsRedirect: true 22 | hosts: 23 | - "ci.{{ .Values.global.cluster.domain }}" 24 | - "grafana.{{ .Values.global.cluster.domain }}" 25 | - port: 26 | number: 443 27 | name: https 28 | protocol: HTTPS 29 | tls: 30 | mode: SIMPLE 31 | serverCertificate: sds 32 | privateKey: sds 33 | credentialName: {{ .Release.Name }}-ingress-certificate 34 | hosts: 35 | - "ci.{{ .Values.global.cluster.domain }}" 36 | - "grafana.{{ .Values.global.cluster.domain }}" 37 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/leader_election_role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: leader-election-role 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - configmaps 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - configmaps/status 23 | verbs: 24 | - get 25 | - update 26 | - patch 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - events 31 | verbs: 32 | - create 33 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/leader_election_role_binding.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: RoleBinding 4 | metadata: 5 | name: leader-election-rolebinding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: Role 9 | name: leader-election-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: default 13 | namespace: {{ .Release.Namespace }} 14 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/letsencrypt-r53.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1alpha2 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-r53 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | email: automate-team@digital.cabinet-office.gov.uk 9 | privateKeySecretRef: 10 | name: letsencrypt-r53 11 | solvers: 12 | - selector: {} 13 | dns01: 14 | route53: 15 | region: eu-west-2 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/main-gateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: {{ include "gsp-cluster.fullname" . }} 5 | labels: 6 | app.kubernetes.io/name: {{ include "gsp-cluster.name" . }} 7 | helm.sh/chart: {{ include "gsp-cluster.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | selector: 12 | istio: ingressgateway 13 | servers: 14 | - port: 15 | number: 80 16 | name: http 17 | protocol: HTTP 18 | hosts: 19 | - "*" 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/metrics-server-apiservice.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiregistration.k8s.io/v1beta1 3 | kind: APIService 4 | metadata: 5 | name: v1beta1.metrics.k8s.io 6 | spec: 7 | service: 8 | name: metrics-server 9 | namespace: kube-system 10 | group: metrics.k8s.io 11 | version: v1beta1 12 | insecureSkipTLSVerify: true 13 | groupPriorityMinimum: 100 14 | versionPriority: 100 15 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/metrics-server-auth-delegator.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: metrics-server:system:auth-delegator 6 | namespace: kube-system 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: system:auth-delegator 11 | subjects: 12 | - kind: ServiceAccount 13 | name: metrics-server 14 | namespace: kube-system 15 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/metrics-server-auth-reader.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1beta1 3 | kind: RoleBinding 4 | metadata: 5 | name: metrics-server-auth-reader 6 | namespace: kube-system 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: Role 10 | name: extension-apiserver-authentication-reader 11 | subjects: 12 | - kind: ServiceAccount 13 | name: metrics-server 14 | namespace: kube-system 15 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/metrics-server-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | --- 8 | apiVersion: apps/v1 9 | kind: Deployment 10 | metadata: 11 | name: metrics-server 12 | namespace: kube-system 13 | labels: 14 | k8s-app: metrics-server 15 | spec: 16 | selector: 17 | matchLabels: 18 | k8s-app: metrics-server 19 | template: 20 | metadata: 21 | name: metrics-server 22 | labels: 23 | k8s-app: metrics-server 24 | spec: 25 | serviceAccountName: metrics-server 26 | volumes: 27 | # mount in tmp so we can safely use from-scratch images and/or read-only containers 28 | - name: tmp-dir 29 | emptyDir: {} 30 | containers: 31 | - name: metrics-server 32 | image: k8s.gcr.io/metrics-server-amd64:v0.3.0 33 | imagePullPolicy: Always 34 | volumeMounts: 35 | - name: tmp-dir 36 | mountPath: /tmp 37 | 38 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/metrics-server-resource-reader.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRole 4 | metadata: 5 | name: system:metrics-server 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | - nodes 12 | - nodes/stats 13 | - namespaces 14 | verbs: 15 | - get 16 | - list 17 | - watch 18 | - apiGroups: 19 | - "extensions" 20 | resources: 21 | - deployments 22 | verbs: 23 | - get 24 | - list 25 | - watch 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: ClusterRoleBinding 29 | metadata: 30 | name: system:metrics-server 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: system:metrics-server 35 | subjects: 36 | - kind: ServiceAccount 37 | name: metrics-server 38 | namespace: kube-system 39 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/metrics-server-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | labels: 8 | kubernetes.io/name: "Metrics-server" 9 | spec: 10 | selector: 11 | k8s-app: metrics-server 12 | ports: 13 | - port: 443 14 | protocol: TCP 15 | targetPort: 443 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/pipeline-operator-pipeline-crd.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | controller-tools.k8s.io: "1.0" 7 | name: pipelines.concourse.govsvc.uk 8 | spec: 9 | group: concourse.govsvc.uk 10 | names: 11 | kind: Pipeline 12 | plural: pipelines 13 | scope: Namespaced 14 | validation: 15 | openAPIV3Schema: 16 | properties: 17 | apiVersion: 18 | type: string 19 | kind: 20 | type: string 21 | metadata: 22 | type: object 23 | spec: 24 | properties: 25 | exposed: 26 | type: boolean 27 | paused: 28 | type: boolean 29 | config: 30 | type: object 31 | pipelineString: 32 | type: string 33 | type: object 34 | status: 35 | type: object 36 | version: v1beta1 37 | status: 38 | acceptedNames: 39 | kind: "" 40 | plural: "" 41 | conditions: [] 42 | storedVersions: [] 43 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/pipeline-operator-role-binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | creationTimestamp: null 5 | name: pipeline-operator-rolebinding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: pipeline-operator-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: {{ template "pipelineOperator.serviceAccountName" . }} 13 | namespace: {{ .Release.Namespace }} 14 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/pipeline-operator-role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | name: pipeline-operator-role 6 | rules: 7 | - apiGroups: 8 | - concourse.govsvc.uk 9 | resources: 10 | - pipelines 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - concourse.govsvc.uk 21 | resources: 22 | - teams 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - concourse.govsvc.uk 33 | resources: 34 | - teams/status 35 | verbs: 36 | - get 37 | - update 38 | - patch 39 | - apiGroups: 40 | - admissionregistration.k8s.io 41 | resources: 42 | - mutatingwebhookconfigurations 43 | - validatingwebhookconfigurations 44 | verbs: 45 | - get 46 | - list 47 | - watch 48 | - create 49 | - update 50 | - patch 51 | - delete 52 | - apiGroups: 53 | - "" 54 | resources: 55 | - secrets 56 | verbs: 57 | - get 58 | - list 59 | - watch 60 | - create 61 | - update 62 | - patch 63 | - delete 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - services 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - create 73 | - update 74 | - patch 75 | - delete 76 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/pipeline-operator-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: {{ template "pipelineOperator.serviceAccountName" . }} 5 | labels: 6 | app: "pipeline-operator" 7 | chart: {{ .Chart.Name }} 8 | release: "{{ .Release.Name }}" 9 | heritage: "{{ .Release.Service }}" 10 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/pipeline-operator-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ .Release.Name }}-pipeline-operator 6 | labels: 7 | app.kubernetes.io/name: "pipeline-operator" 8 | helm.sh/chart: {{ .Chart.Name }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | control-plane: concourse-operator 12 | controller-tools.k8s.io: "1.0" 13 | data: 14 | concourse_password: {{ .Values.pipelineOperator.concoursePassword | b64enc }} 15 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/pipeline-operator-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ .Release.Name }}-pipeline-operator 5 | labels: 6 | app.kubernetes.io/name: {{ .Release.Name }}-pipeline-operator 7 | helm.sh/chart: {{ .Chart.Name }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | control-plane: concourse-operator 11 | controller-tools.k8s.io: "1.0" 12 | spec: 13 | ports: 14 | - port: {{ .Values.pipelineOperator.service.port }} 15 | selector: 16 | app.kubernetes.io/name: "pipeline-operator" 17 | app.kubernetes.io/instance: {{ .Release.Name }} 18 | control-plane: concourse-operator 19 | controller-tools.k8s.io: "1.0" 20 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/pipeline-operator-webhook-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: {{ .Release.Name }}-concourse-operator-webhook 6 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/route53-service-entry.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.runningOnAws }} 2 | apiVersion: networking.istio.io/v1alpha3 3 | kind: ServiceEntry 4 | metadata: 5 | name: {{ include "gsp-cluster.fullname" . }}-route53 6 | labels: 7 | app.kubernetes.io/name: {{ include "gsp-cluster.name" . }}-route53 8 | helm.sh/chart: {{ include "gsp-cluster.chart" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | spec: 12 | hosts: 13 | - route53.amazonaws.com 14 | ports: 15 | - name: https 16 | number: 443 17 | protocol: TLS 18 | location: MESH_EXTERNAL 19 | resolution: DNS 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/sealed-secrets-crd.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apiextensions.k8s.io/v1beta1 3 | kind: CustomResourceDefinition 4 | metadata: 5 | name: sealedsecrets.bitnami.com 6 | spec: 7 | group: bitnami.com 8 | names: 9 | kind: SealedSecret 10 | listKind: SealedSecretList 11 | plural: sealedsecrets 12 | singular: sealedsecret 13 | scope: Namespaced 14 | version: v1alpha1 15 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/sealed-secrets-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | type: kubernetes.io/tls 5 | metadata: 6 | name: {{ .Release.Name }}-sealed-secrets-key 7 | data: 8 | tls.crt: {{ .Values.secrets.public_certificate | quote }} 9 | tls.key: {{ .Values.secrets.private_key | quote }} 10 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/service-operator/role-binding.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.runningOnAws }} 2 | --- 3 | apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: {{ .Release.Name }}-service-operator-rolebinding 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: {{ .Release.Name }}-service-operator-role 11 | subjects: 12 | - kind: ServiceAccount 13 | name: {{ template "serviceOperator.serviceAccountName" . }} 14 | namespace: {{ .Release.Namespace }} 15 | {{ end }} 16 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/service-operator/service-account.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.runningOnAws }} 2 | --- 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: {{ template "serviceOperator.serviceAccountName" . }} 7 | annotations: 8 | eks.amazonaws.com/role-arn: {{ .Values.serviceOperator.roleARN }} 9 | labels: 10 | app.kubernetes.io/name: "{{ .Release.Name }}-service-operator" 11 | helm.sh/chart: {{ .Chart.Name }} 12 | app.kubernetes.io/instance: {{ .Release.Name }} 13 | app.kubernetes.io/managed-by: {{ .Release.Service }} 14 | control-plane: {{ .Release.Name }}-service-operator 15 | controller-tools.k8s.io: "1.0" 16 | {{ end }} 17 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/service-operator/service.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.runningOnAws }} 2 | --- 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | annotations: 7 | prometheus.io/port: "8443" 8 | prometheus.io/scheme: https 9 | prometheus.io/scrape: "true" 10 | labels: 11 | control-plane: {{ .Release.Name }}-service-operator 12 | name: {{ .Release.Name }}-service-operator-metrics-service 13 | spec: 14 | ports: 15 | - name: https 16 | port: 8443 17 | targetPort: https 18 | selector: 19 | control-plane: {{ .Release.Name }}-service-operator 20 | {{ end }} 21 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/02-gsp-system/sts-service-entry.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.global.runningOnAws }} 2 | apiVersion: networking.istio.io/v1alpha3 3 | kind: ServiceEntry 4 | metadata: 5 | name: {{ include "gsp-cluster.fullname" . }}-aws-sts 6 | labels: 7 | app.kubernetes.io/name: {{ include "gsp-cluster.name" . }}-aws-sts 8 | helm.sh/chart: {{ include "gsp-cluster.chart" . }} 9 | app.kubernetes.io/instance: {{ .Release.Name }} 10 | app.kubernetes.io/managed-by: {{ .Release.Service }} 11 | spec: 12 | hosts: 13 | - sts.amazonaws.com 14 | - sts.eu-west-2.amazonaws.com 15 | ports: 16 | - name: https 17 | number: 443 18 | protocol: TLS 19 | location: MESH_EXTERNAL 20 | resolution: DNS 21 | {{- end }} 22 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/04-main-team-pipelines/canary-image-repository.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.govsvc.uk/v1beta1 3 | kind: ImageRepository 4 | metadata: 5 | name: canary 6 | namespace: {{ .Values.global.cluster.name }}-main 7 | labels: 8 | group.access.govsvc.uk: canary 9 | spec: 10 | aws: {} 11 | -------------------------------------------------------------------------------- /charts/gsp-cluster/templates/04-main-team-pipelines/canary-principal.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: access.govsvc.uk/v1beta1 3 | kind: Principal 4 | metadata: 5 | name: canary 6 | namespace: {{ .Values.global.cluster.name }}-main 7 | labels: 8 | group.access.govsvc.uk: canary 9 | -------------------------------------------------------------------------------- /charts/gsp-istio/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /charts/gsp-istio/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: gsp-istio 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /charts/gsp-istio/charts/istio-1.5.8.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/charts/gsp-istio/charts/istio-1.5.8.tgz -------------------------------------------------------------------------------- /charts/gsp-istio/charts/istio-cni-1.5.8.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/charts/gsp-istio/charts/istio-cni-1.5.8.tgz -------------------------------------------------------------------------------- /charts/gsp-istio/charts/istio-init-1.5.8.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/charts/gsp-istio/charts/istio-init-1.5.8.tgz -------------------------------------------------------------------------------- /charts/gsp-istio/requirements.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: istio 3 | repository: https://storage.googleapis.com/istio-release/releases/1.5.8/charts/ 4 | version: 1.5.8 5 | - name: istio-init 6 | repository: https://storage.googleapis.com/istio-release/releases/1.5.8/charts/ 7 | version: 1.5.8 8 | - name: istio-cni 9 | repository: https://storage.googleapis.com/istio-release/releases/1.5.8/charts/ 10 | version: 1.5.8 11 | digest: sha256:4b27c395ed49289a07cf2c0961dbcfe67a61735df3f620df7e42fec7ef5d03b0 12 | generated: "2020-07-23T17:47:08.820500402+01:00" 13 | -------------------------------------------------------------------------------- /charts/gsp-istio/requirements.yaml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: istio 3 | version: 1.5.8 4 | repository: https://storage.googleapis.com/istio-release/releases/1.5.8/charts/ 5 | - name: istio-init 6 | version: 1.5.8 7 | repository: https://storage.googleapis.com/istio-release/releases/1.5.8/charts/ 8 | - name: istio-cni 9 | version: 1.5.8 10 | repository: https://storage.googleapis.com/istio-release/releases/1.5.8/charts/ 11 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/alphagov/gsp/components/aws-node-lifecycle-hook 2 | 3 | go 1.12 4 | 5 | require ( 6 | github.com/aws/aws-lambda-go v1.13.2 7 | github.com/aws/aws-sdk-go v1.25.29 8 | github.com/gofrs/flock v0.7.1 // indirect 9 | github.com/imdario/mergo v0.3.8 // indirect 10 | github.com/kubernetes-sigs/aws-iam-authenticator v0.4.0 11 | github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 12 | github.com/onsi/ginkgo v1.10.1 13 | github.com/onsi/gomega v1.7.0 14 | github.com/pkg/errors v0.8.1 15 | github.com/sirupsen/logrus v1.4.2 16 | golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586 // indirect 17 | golang.org/x/time v0.0.0-20191024005414-555d28b269f0 // indirect 18 | golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72 // indirect 19 | k8s.io/api v0.0.0-20191107030003-665c8a257c1a 20 | k8s.io/apimachinery v0.0.0-20191107105744-2c7f8d2b0fd8 21 | k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90 22 | k8s.io/kubectl v0.0.0-20190918164019-21692a0861df 23 | k8s.io/utils v0.0.0-20191030222137-2b95a09bc58d // indirect 24 | ) 25 | 26 | // fix broken upstream 27 | // https://github.com/dominikh/go-tools/issues/658 28 | replace honnef.co/go/tools => github.com/dominikh/go-tools v0.0.0-20190102054323-c2f93a96b099 29 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | 8 | "github.com/alphagov/gsp/components/aws-node-lifecycle-hook/pkg/awsclient" 9 | "github.com/alphagov/gsp/components/aws-node-lifecycle-hook/pkg/k8sclient" 10 | "github.com/alphagov/gsp/components/aws-node-lifecycle-hook/pkg/k8sdrainer" 11 | "github.com/alphagov/gsp/components/aws-node-lifecycle-hook/pkg/lifecycle" 12 | "github.com/aws/aws-lambda-go/lambda" 13 | ) 14 | 15 | // Start configures a lifecycle handler and registers it with the lambda handler. 16 | func Start() error { 17 | awsClient, err := awsclient.New() 18 | if err != nil { 19 | return fmt.Errorf("failed to configure aws client: %s", err) 20 | } 21 | clusterName := os.Getenv("CLUSTER_NAME") 22 | if clusterName == "" { 23 | return fmt.Errorf("CLUSTER_NAME environment variable is required") 24 | } 25 | k8sClient, err := k8sclient.New(clusterName) 26 | if err != nil { 27 | return err 28 | } 29 | h := lifecycle.Handler{ 30 | AWSClient: awsClient, 31 | KubernetesClient: k8sClient, 32 | Drainer: k8sdrainer.DefaultDrainer, 33 | } 34 | lambda.Start(h.HandleEvent) 35 | return nil 36 | } 37 | 38 | func main() { 39 | if err := Start(); err != nil { 40 | log.Fatalf("failed to startup: %s", err) 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/awsclient/client.go: -------------------------------------------------------------------------------- 1 | package awsclient 2 | 3 | import ( 4 | "context" 5 | 6 | "github.com/aws/aws-sdk-go/aws/request" 7 | "github.com/aws/aws-sdk-go/aws/session" 8 | "github.com/aws/aws-sdk-go/service/autoscaling" 9 | ) 10 | 11 | //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -o ./fakeawsclient/fake_awsclient.go . Client 12 | 13 | type Client interface { 14 | CompleteLifecycleActionWithContext(context.Context, *autoscaling.CompleteLifecycleActionInput, ...request.Option) (*autoscaling.CompleteLifecycleActionOutput, error) 15 | RecordLifecycleActionHeartbeatWithContext(context.Context, *autoscaling.RecordLifecycleActionHeartbeatInput, ...request.Option) (*autoscaling.RecordLifecycleActionHeartbeatOutput, error) 16 | } 17 | 18 | func New() (Client, error) { 19 | return autoscaling.New(session.New()), nil 20 | } 21 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/awsclient/suite_test.go: -------------------------------------------------------------------------------- 1 | package awsclient_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestLifecycleHandler(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "AWSClientSuite") 13 | } 14 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/k8sclient/suite_test.go: -------------------------------------------------------------------------------- 1 | package k8sclient_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestLifecycleHandler(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "KubernetesClientSuite") 13 | } 14 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/k8sdrainer/drain.go: -------------------------------------------------------------------------------- 1 | package k8sdrainer 2 | 3 | import ( 4 | "os" 5 | "time" 6 | 7 | "github.com/alphagov/gsp/components/aws-node-lifecycle-hook/pkg/k8sclient" 8 | v1 "k8s.io/api/core/v1" 9 | "k8s.io/kubectl/pkg/drain" 10 | ) 11 | 12 | const ( 13 | CORDON = true 14 | ) 15 | 16 | //go:generate go run github.com/maxbrunsfeld/counterfeiter/v6 -o ./fakek8sdrainer/fake_drainer.go . Drainer 17 | 18 | type Drainer interface { 19 | Cordon(k8sclient.Client, *v1.Node) error 20 | Drain(k8sclient.Client, *v1.Node) error 21 | } 22 | 23 | var DefaultDrainer Drainer = &DrainHandler{} 24 | 25 | type DrainHandler struct{} 26 | 27 | func (d *DrainHandler) Drain(c k8sclient.Client, node *v1.Node) error { 28 | cfg := &drain.Helper{ 29 | Client: c, 30 | Force: true, 31 | GracePeriodSeconds: 120, 32 | IgnoreAllDaemonSets: true, 33 | Timeout: time.Minute * 9, 34 | DeleteLocalData: true, 35 | Out: os.Stdout, 36 | ErrOut: os.Stderr, 37 | } 38 | return drain.RunNodeDrain(cfg, node.ObjectMeta.Name) 39 | } 40 | 41 | func (d *DrainHandler) Cordon(c k8sclient.Client, node *v1.Node) error { 42 | cfg := &drain.Helper{ 43 | Client: c, 44 | Timeout: time.Minute * 9, 45 | Out: os.Stdout, 46 | ErrOut: os.Stderr, 47 | } 48 | return drain.RunCordonOrUncordon(cfg, node, CORDON) 49 | } 50 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/k8sdrainer/suite_test.go: -------------------------------------------------------------------------------- 1 | package k8sdrainer_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestLifecycleHandler(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "KubernetesNodeDrainerSuite") 13 | } 14 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/lifecycle/heartbeat.go: -------------------------------------------------------------------------------- 1 | package lifecycle 2 | 3 | import ( 4 | "context" 5 | "log" 6 | "time" 7 | 8 | "github.com/alphagov/gsp/components/aws-node-lifecycle-hook/pkg/awsclient" 9 | "github.com/aws/aws-sdk-go/service/autoscaling" 10 | ) 11 | 12 | func heartbeat(ctx context.Context, asgClient awsclient.Client, asgEvent ASGLifecycleEventDetail, interval time.Duration) { 13 | log.Printf("starting heartbeat every %v for %s", interval, asgEvent.EC2InstanceId) 14 | if interval == 0 { 15 | interval = time.Second * 60 16 | } 17 | for { 18 | select { 19 | case <-ctx.Done(): 20 | log.Printf("stopping heartbeat for %s", asgEvent.EC2InstanceId) 21 | return 22 | case <-time.After(interval): 23 | _, err := asgClient.RecordLifecycleActionHeartbeatWithContext(ctx, &autoscaling.RecordLifecycleActionHeartbeatInput{ 24 | AutoScalingGroupName: &asgEvent.AutoScalingGroupName, 25 | InstanceId: &asgEvent.EC2InstanceId, 26 | LifecycleHookName: &asgEvent.LifecycleHookName, 27 | LifecycleActionToken: &asgEvent.LifecycleActionToken, 28 | }) 29 | if err != nil { 30 | log.Printf("heartbeat failed for %s: %s", asgEvent.EC2InstanceId, err) 31 | } 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/lifecycle/suite_test.go: -------------------------------------------------------------------------------- 1 | package lifecycle_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestLifecycleHandler(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "LifecycleHandlerSuite") 13 | } 14 | -------------------------------------------------------------------------------- /components/aws-node-lifecycle-hook/pkg/tools/tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package tools 4 | 5 | import ( 6 | _ "github.com/maxbrunsfeld/counterfeiter/v6" 7 | ) 8 | 9 | // This file imports packages that are used when running go generate, or used 10 | // during the development process but not otherwise depended on by built code. 11 | -------------------------------------------------------------------------------- /components/aws-ssm-agent/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM amazonlinux:2 2 | 3 | RUN yum update -y && \ 4 | yum install -y systemd curl tar sudo && \ 5 | yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm 6 | 7 | WORKDIR /opt/amazon/ssm/ 8 | 9 | CMD ["amazon-ssm-agent", "start"] 10 | -------------------------------------------------------------------------------- /components/aws-ssm-agent/README: -------------------------------------------------------------------------------- 1 | This directory just holds a Dockerfile used to build the AWS SSM agent container we run inside EKS 2 | -------------------------------------------------------------------------------- /components/canary/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang@sha256:9fdb74150f8d8b07ee4b65a4f00ca007e5ede5481fa06e9fd33710890a624331 as builder 2 | 3 | ADD . /go/src/github.com/alphagov/gsp-canary 4 | WORKDIR /go/src/github.com/alphagov/gsp-canary 5 | 6 | RUN go get ./... && \ 7 | CGO_ENABLED=0 GOOS=linux go build -o canary -ldflags "-X main.BuildTimestamp=`date +%s`" . 8 | 9 | FROM alpine@sha256:08d6ca16c60fe7490c03d10dc339d9fd8ea67c6466dea8d558526b1330a85930 10 | RUN adduser -S -D -H -h /app appuser 11 | USER appuser 12 | COPY --from=builder /go/src/github.com/alphagov/gsp-canary/canary /app/ 13 | WORKDIR /app 14 | EXPOSE 8081 15 | CMD ["./canary"] 16 | -------------------------------------------------------------------------------- /components/canary/Makefile: -------------------------------------------------------------------------------- 1 | CANARY_IMAGE_NAME = govsvc/gsp-canary 2 | 3 | .PHONY: all 4 | all: canary 5 | 6 | canary: 7 | docker build -t $(CANARY_IMAGE_NAME) . 8 | 9 | -------------------------------------------------------------------------------- /components/canary/README.md: -------------------------------------------------------------------------------- 1 | # gsp-canary 2 | 3 | The canary is a monitoring tool which is continuously changed, built, and 4 | deployed to a Kubernetes cluster. 5 | 6 | The canary exposes a health check endpoint with metrics that can be gathered by 7 | Prometheus. 8 | 9 | The intention is to smoke out any problems with the pipeline. 10 | 11 | It should monitor itself wherever possible, including testing whether or not its 12 | own age has passed a threshold, which might indicate that a problem exists with 13 | the deployment process. 14 | -------------------------------------------------------------------------------- /components/canary/chart/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *~ 18 | # Various IDEs 19 | .project 20 | .idea/ 21 | *.tmproj 22 | -------------------------------------------------------------------------------- /components/canary/chart/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | appVersion: "1.0" 3 | description: A Helm chart for Kubernetes 4 | name: gsp-canary 5 | version: 0.1.0 6 | -------------------------------------------------------------------------------- /components/canary/chart/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* vim: set filetype=mustache: */}} 2 | {{/* 3 | Expand the name of the chart. 4 | */}} 5 | {{- define "gsp-canary.name" -}} 6 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} 7 | {{- end -}} 8 | 9 | {{/* 10 | Create a default fully qualified app name. 11 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 12 | If release name contains chart name it will be used as a full name. 13 | */}} 14 | {{- define "gsp-canary.fullname" -}} 15 | {{- if .Values.fullnameOverride -}} 16 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} 17 | {{- else -}} 18 | {{- $name := default .Chart.Name .Values.nameOverride -}} 19 | {{- if contains $name .Release.Name -}} 20 | {{- .Release.Name | trunc 63 | trimSuffix "-" -}} 21 | {{- else -}} 22 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} 23 | {{- end -}} 24 | {{- end -}} 25 | {{- end -}} 26 | 27 | {{/* 28 | Create chart name and version as used by the chart label. 29 | */}} 30 | {{- define "gsp-canary.chart" -}} 31 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} 32 | {{- end -}} 33 | -------------------------------------------------------------------------------- /components/canary/chart/templates/gateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: Gateway 3 | metadata: 4 | name: {{ include "gsp-canary.fullname" . }}-ingress 5 | labels: 6 | app.kubernetes.io/name: {{ include "gsp-canary.name" . }} 7 | helm.sh/chart: {{ include "gsp-canary.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | annotations: 11 | externaldns.k8s.io/namespace: {{ .Release.Namespace }} 12 | spec: 13 | selector: 14 | istio: {{ .Release.Namespace }}-ingressgateway 15 | servers: 16 | - port: 17 | number: 80 18 | name: http 19 | protocol: HTTP 20 | tls: 21 | httpsRedirect: true 22 | hosts: 23 | - "canary.{{ .Release.Namespace }}.{{ .Values.global.cluster.domain }}" 24 | - port: 25 | number: 443 26 | name: https 27 | protocol: HTTPS 28 | tls: 29 | mode: SIMPLE 30 | serverCertificate: sds 31 | privateKey: sds 32 | credentialName: {{ include "gsp-canary.fullname" . }}-ingress-certificate 33 | hosts: 34 | - "canary.{{ .Release.Namespace }}.{{ .Values.global.cluster.domain }}" 35 | -------------------------------------------------------------------------------- /components/canary/chart/templates/ingress-certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1alpha2 2 | kind: Certificate 3 | metadata: 4 | name: {{ include "gsp-canary.fullname" . }}-ingress 5 | namespace: {{ .Release.Namespace }} 6 | spec: 7 | secretName: {{ include "gsp-canary.fullname" . }}-ingress-certificate 8 | dnsNames: 9 | - "canary.{{ .Release.Namespace }}.{{ .Values.global.cluster.domain }}" 10 | issuerRef: 11 | name: letsencrypt-r53 12 | kind: ClusterIssuer 13 | -------------------------------------------------------------------------------- /components/canary/chart/templates/prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: {{ include "gsp-canary.fullname" . }} 5 | labels: 6 | app.kubernetes.io/name: {{ include "gsp-canary.name" . }} 7 | helm.sh/chart: {{ include "gsp-canary.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | groups: 12 | - name: {{ include "gsp-canary.fullname" . }} 13 | rules: 14 | - alert: CanaryRotationOverdue 15 | annotations: 16 | message: The Canary rotation is overdue. Check in-cluster concourse. 17 | expr: time() - max(canary_chart_commit_timestamp{namespace="{{ .Release.Namespace }}"}) without (pod) > 600 18 | for: 15m 19 | labels: 20 | severity: critical 21 | layer: cicd 22 | -------------------------------------------------------------------------------- /components/canary/chart/templates/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | {{- if .Values.service.monitor.create }} 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: {{ include "gsp-canary.fullname" . }} 6 | labels: 7 | release: {{ .Values.service.monitor.release }} 8 | spec: 9 | selector: 10 | matchLabels: 11 | app.kubernetes.io/instance: {{ .Release.Name }} 12 | endpoints: 13 | - port: {{ .Values.service.port_name }} 14 | scheme: https 15 | tlsConfig: 16 | caFile: /etc/prometheus/secrets/istio.gsp-prometheus-operator-prometheus/root-cert.pem 17 | certFile: /etc/prometheus/secrets/istio.gsp-prometheus-operator-prometheus/cert-chain.pem 18 | keyFile: /etc/prometheus/secrets/istio.gsp-prometheus-operator-prometheus/key.pem 19 | insecureSkipVerify: true # prometheus does not support secure naming. 20 | {{- end }} 21 | -------------------------------------------------------------------------------- /components/canary/chart/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: {{ include "gsp-canary.fullname" . }} 5 | labels: 6 | app.kubernetes.io/name: {{ include "gsp-canary.name" . }} 7 | helm.sh/chart: {{ include "gsp-canary.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | type: {{ .Values.service.type }} 12 | ports: 13 | - port: {{ .Values.service.port }} 14 | targetPort: http 15 | protocol: TCP 16 | name: {{ .Values.service.port_name }} 17 | selector: 18 | app.kubernetes.io/name: {{ include "gsp-canary.name" . }} 19 | app.kubernetes.io/instance: {{ .Release.Name }} 20 | -------------------------------------------------------------------------------- /components/canary/chart/templates/virtual-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.istio.io/v1alpha3 2 | kind: VirtualService 3 | metadata: 4 | name: {{ include "gsp-canary.fullname" . }} 5 | labels: 6 | app.kubernetes.io/name: {{ include "gsp-canary.name" . }} 7 | helm.sh/chart: {{ include "gsp-canary.chart" . }} 8 | app.kubernetes.io/instance: {{ .Release.Name }} 9 | app.kubernetes.io/managed-by: {{ .Release.Service }} 10 | spec: 11 | hosts: 12 | - canary.{{ .Release.Namespace }}.{{ .Values.global.cluster.domain }} 13 | gateways: 14 | - {{ include "gsp-canary.fullname" . }}-ingress 15 | http: 16 | - route: 17 | - destination: 18 | host: {{ include "gsp-canary.fullname" . }} 19 | port: 20 | number: {{ .Values.service.port }} 21 | exportTo: 22 | - "." 23 | -------------------------------------------------------------------------------- /components/canary/chart/values.yaml: -------------------------------------------------------------------------------- 1 | # Default values for gsp-canary. 2 | # This is a YAML-formatted file. 3 | # Declare variables to be passed into your templates. 4 | 5 | global: 6 | cluster: 7 | domain: 8 | name: 9 | 10 | replicaCount: 1 11 | 12 | canary: 13 | image: 14 | repository: govsvc/gsp-canary 15 | tag: "1544111155" 16 | chartCommitTimestamp: "1544088812" 17 | 18 | nameOverride: "" 19 | fullnameOverride: "" 20 | 21 | service: 22 | type: ClusterIP 23 | port: 8081 24 | port_name: http 25 | monitor: 26 | create: true 27 | release: monitoring-system 28 | 29 | resources: {} 30 | # We usually recommend not to specify default resources and to leave this as a conscious 31 | # choice for the user. This also increases chances charts run on environments with little 32 | # resources, such as Minikube. If you do want to specify resources, uncomment the following 33 | # lines, adjust them as necessary, and remove the curly braces after 'resources:'. 34 | # limits: 35 | # cpu: 100m 36 | # memory: 128Mi 37 | # requests: 38 | # cpu: 100m 39 | # memory: 128Mi 40 | 41 | nodeSelector: {} 42 | 43 | tolerations: [] 44 | 45 | affinity: {} 46 | -------------------------------------------------------------------------------- /components/canary/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "log" 5 | "net/http" 6 | "os" 7 | "strconv" 8 | 9 | "github.com/prometheus/client_golang/prometheus" 10 | "github.com/prometheus/client_golang/prometheus/promhttp" 11 | ) 12 | 13 | var BuildTimestamp string 14 | var BuildTimeUnixFloat float64 15 | var ChartCommitTimeUnixFloat float64 16 | 17 | func init() { 18 | var err error 19 | BuildTimeUnixFloat, err = strconv.ParseFloat(BuildTimestamp, 64) 20 | 21 | if err != nil { 22 | panic(err) 23 | } 24 | 25 | ChartCommitTimeUnixFloat, err = strconv.ParseFloat( 26 | os.Getenv("CHART_COMMIT_TIMESTAMP"), 64, 27 | ) 28 | 29 | if err != nil { 30 | panic(err) 31 | } 32 | } 33 | 34 | func main() { 35 | http.Handle("/metrics", promhttp.Handler()) 36 | 37 | buildTimeMetric := prometheus.NewGauge( 38 | prometheus.GaugeOpts{ 39 | Name: "canary_build_timestamp", 40 | }, 41 | ) 42 | buildTimeMetric.Set(BuildTimeUnixFloat) 43 | prometheus.DefaultRegisterer.MustRegister(buildTimeMetric) 44 | 45 | chartCommitTimeMetric := prometheus.NewGauge( 46 | prometheus.GaugeOpts{ 47 | Name: "canary_chart_commit_timestamp", 48 | }, 49 | ) 50 | chartCommitTimeMetric.Set(ChartCommitTimeUnixFloat) 51 | prometheus.DefaultRegisterer.MustRegister(chartCommitTimeMetric) 52 | 53 | log.Fatal(http.ListenAndServe(":8081", nil)) 54 | } 55 | -------------------------------------------------------------------------------- /components/cloudhsm-client-test/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:xenial 2 | 3 | RUN apt-get update && \ 4 | apt-get install -y curl \ 5 | netcat \ 6 | python3 \ 7 | redis-tools \ 8 | telnet \ 9 | vim \ 10 | wget && \ 11 | wget https://s3.amazonaws.com/cloudhsmv2-software/CloudHsmClient/Xenial/cloudhsm-client_latest_amd64.deb && \ 12 | dpkg -i cloudhsm-client_latest_amd64.deb; apt-get install -f -y && \ 13 | apt-get clean 14 | -------------------------------------------------------------------------------- /components/cloudhsm-client-test/cloudhsm-test-deployment.yaml: -------------------------------------------------------------------------------- 1 | # Note: To actually use this to test CloudHSM connectivity, you will need to 2 | # customise this to have a "talksToHsm" label, as well as be inside a namespace 3 | # with its own "talksToHsm" label. 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: cloudhsm-test 8 | namespace: default 9 | labels: 10 | app: cloudhsm-test 11 | spec: 12 | replicas: 1 13 | selector: 14 | matchLabels: 15 | app: cloudhsm-test 16 | template: 17 | metadata: 18 | labels: 19 | app: cloudhsm-test 20 | spec: 21 | containers: 22 | - name: cloudhsm-test 23 | image: govsvc/cloudhsm-client-test:0.0.1561639958 24 | command: 25 | - /bin/sleep 26 | args: 27 | - "999999" 28 | -------------------------------------------------------------------------------- /components/concourse-github-resource/.gitignore: -------------------------------------------------------------------------------- 1 | tmp 2 | -------------------------------------------------------------------------------- /components/concourse-github-resource/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM concourse/git-resource:1.1.1 2 | 3 | RUN apk update 4 | 5 | RUN curl https://github.com/web-flow.gpg -o /tmp/web-flow.gpg && \ 6 | gpg --import /tmp/web-flow.gpg && \ 7 | rm /tmp/web-flow.gpg 8 | 9 | RUN mkdir -p /opt/resource/origin && \ 10 | mv /opt/resource/check /opt/resource/check.origin && \ 11 | mv /opt/resource/in /opt/resource/in.origin && \ 12 | mv /opt/resource/out /opt/resource/out.origin 13 | 14 | COPY ./assets/check /opt/resource/ 15 | COPY ./assets/in /opt/resource/ 16 | COPY ./assets/out /opt/resource/ 17 | -------------------------------------------------------------------------------- /components/concourse-github-resource/README.md: -------------------------------------------------------------------------------- 1 | # concourse-github-resource 2 | 3 | GitHub resource for Concourse that enforces a minimum number of GitHub approvals. This relies heavily on the [Concourse `git-resource`](https://github.com/concourse/git-resource). 4 | 5 | ## Source configuration 6 | 7 | All the required configuration for the Concourse `git-resource` will be required along with: 8 | 9 | * `organization` *Required.* The GitHub organization the repo is in. 10 | * `repository` *Required.* The repository name. 11 | * `github_api_token` *Required.* A GitHub API token. 12 | * `approvers`*Required.* A list GitHub usernames of approvers. 13 | * `required_approval_count` *Required.* The minimum number of approvals required to proceed. 14 | 15 | ## Run tests 16 | ``` 17 | rm -rf tmp && cat test.json | docker run -v $PWD/tmp:/mnt/myapp -i $(docker build -q .) /opt/resource/in /mnt/myapp 18 | ``` 19 | -------------------------------------------------------------------------------- /components/concourse-github-resource/assets/check: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | exec /opt/resource/check.origin "$@" 6 | -------------------------------------------------------------------------------- /components/concourse-github-resource/assets/out: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | exec /opt/resource/out.origin "$@" 6 | -------------------------------------------------------------------------------- /components/concourse-github-resource/test-stringy.json: -------------------------------------------------------------------------------- 1 | { 2 | "source": { 3 | "uri": "https://github.com/alphagov/gsp", 4 | "organization": "alphagov", 5 | "repository": "gsp", 6 | "branch": "master", 7 | "approvers": "[\"samcrang\", \"paroxp\", \"blairboy362\"]", 8 | "required_approval_count": 1, 9 | "github_api_token": "notarealtoken" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /components/concourse-github-resource/test.json: -------------------------------------------------------------------------------- 1 | { 2 | "source": { 3 | "uri": "https://github.com/alphagov/gsp", 4 | "organization": "alphagov", 5 | "repository": "gsp", 6 | "branch": "master", 7 | "approvers": ["samcrang", "paroxp", "blairboy362"], 8 | "required_approval_count": 1, 9 | "github_api_token": "notarealtoken" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /components/concourse-operator/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Kubernetes Generated files - skip generated files, except for vendored files 17 | 18 | !vendor/**/zz_generated.* 19 | 20 | # editor and IDE paraphernalia 21 | .idea 22 | *.swp 23 | *.swo 24 | *~ 25 | -------------------------------------------------------------------------------- /components/concourse-operator/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.13 as builder 2 | 3 | # install dep (required when not vendored) 4 | RUN wget https://github.com/golang/dep/releases/download/v0.5.3/dep-linux-amd64 \ 5 | && mv dep-linux-amd64 /bin/dep \ 6 | && chmod +x /bin/dep 7 | 8 | # install kubebuilder (required for tests) 9 | RUN wget https://github.com/kubernetes-sigs/kubebuilder/releases/download/v1.0.7/kubebuilder_1.0.7_linux_amd64.tar.gz \ 10 | && tar xvzf kubebuilder_1.0.7_linux_amd64.tar.gz \ 11 | && mkdir -p /usr/local \ 12 | && mv kubebuilder_1.0.7_linux_amd64 /usr/local/kubebuilder 13 | ENV PATH="${PATH}:/usr/local/kubebuilder/bin" 14 | 15 | # setup context 16 | WORKDIR /go/src/github.com/alphagov/gsp/components/concourse-operator 17 | ENV CGO_ENABLED=0 18 | ENV GOOS=linux 19 | ENV GOARCH=amd64 20 | COPY . . 21 | 22 | # install dependencies 23 | RUN sh -c 'if [ -e ./vendor ]; then echo skipping dep ensure as found vendor dir 1>&2; else dep ensure -vendor-only; fi' 24 | 25 | # run unit tests 26 | ENV KUBEBUILDER_CONTROLPLANE_START_TIMEOUT=1m 27 | ENV KUBEBUILDER_CONTROLPLANE_STOP_TIMEOUT=1m 28 | RUN go test -v ./pkg/... ./cmd/... 29 | 30 | # build manager 31 | RUN go build -a -o manager ./cmd/manager 32 | 33 | # CA certs 34 | FROM alpine:3.2 as certs 35 | RUN apk add ca-certificates --update 36 | 37 | # Minimal image for controller 38 | FROM alpine:3.2 39 | WORKDIR /root/ 40 | COPY --from=builder /go/src/github.com/alphagov/gsp/components/concourse-operator/manager /manager 41 | COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ 42 | ENTRYPOINT ["/manager"] 43 | -------------------------------------------------------------------------------- /components/concourse-operator/PROJECT: -------------------------------------------------------------------------------- 1 | version: "1" 2 | domain: k8s.io 3 | repo: github.com/alphagov/gsp/components/concourse-operator 4 | -------------------------------------------------------------------------------- /components/concourse-operator/config/crds/concourse_v1beta1_pipeline.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apiextensions.k8s.io/v1beta1 2 | kind: CustomResourceDefinition 3 | metadata: 4 | creationTimestamp: null 5 | labels: 6 | controller-tools.k8s.io: "1.0" 7 | name: pipelines.concourse.govsvc.uk 8 | spec: 9 | group: concourse.govsvc.uk 10 | names: 11 | kind: Pipeline 12 | plural: pipelines 13 | scope: Namespaced 14 | validation: 15 | openAPIV3Schema: 16 | properties: 17 | apiVersion: 18 | type: string 19 | kind: 20 | type: string 21 | metadata: 22 | type: object 23 | spec: 24 | properties: 25 | exposed: 26 | type: boolean 27 | paused: 28 | type: boolean 29 | config: 30 | type: object 31 | pipelineString: 32 | type: string 33 | type: object 34 | status: 35 | type: object 36 | version: v1beta1 37 | status: 38 | acceptedNames: 39 | kind: "" 40 | plural: "" 41 | conditions: [] 42 | storedVersions: [] 43 | -------------------------------------------------------------------------------- /components/concourse-operator/config/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Adds namespace to all resources. 2 | namespace: concourse-operator 3 | 4 | # Value of this field is prepended to the 5 | # names of all resources, e.g. a deployment named 6 | # "wordpress" becomes "alices-wordpress". 7 | # Note that it should also match with the prefix (text before '-') of the namespace 8 | # field above. 9 | namePrefix: concourse-operator- 10 | 11 | # Labels to add to all resources and selectors. 12 | #commonLabels: 13 | # someName: someValue 14 | 15 | # Each entry in this list must resolve to an existing 16 | # resource definition in YAML. These are the resource 17 | # files that kustomize reads, modifies and emits as a 18 | # YAML string, with resources separated by document 19 | # markers ("---"). 20 | resources: 21 | - ../rbac/rbac_role.yaml 22 | - ../rbac/rbac_role_binding.yaml 23 | - ../manager/manager.yaml 24 | 25 | patches: 26 | - manager_image_patch.yaml 27 | 28 | vars: 29 | - name: WEBHOOK_SECRET_NAME 30 | objref: 31 | kind: Secret 32 | name: concourse-operator-webhook 33 | apiVersion: v1 34 | -------------------------------------------------------------------------------- /components/concourse-operator/config/default/manager_image_patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: concourse-operator 5 | namespace: system 6 | spec: 7 | template: 8 | spec: 9 | containers: 10 | # Change the value of image field below to your controller image URL 11 | - image: govsvc/concourse-operator:latest 12 | name: manager 13 | -------------------------------------------------------------------------------- /components/concourse-operator/config/rbac/rbac_role.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | creationTimestamp: null 5 | name: manager-role 6 | rules: 7 | - apiGroups: 8 | - concourse.govsvc.uk 9 | resources: 10 | - pipelines 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - create 16 | - update 17 | - patch 18 | - delete 19 | - apiGroups: 20 | - concourse.govsvc.uk 21 | resources: 22 | - teams 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - create 28 | - update 29 | - patch 30 | - delete 31 | - apiGroups: 32 | - concourse.govsvc.uk 33 | resources: 34 | - teams/status 35 | verbs: 36 | - get 37 | - update 38 | - patch 39 | - apiGroups: 40 | - admissionregistration.k8s.io 41 | resources: 42 | - mutatingwebhookconfigurations 43 | - validatingwebhookconfigurations 44 | verbs: 45 | - get 46 | - list 47 | - watch 48 | - create 49 | - update 50 | - patch 51 | - delete 52 | - apiGroups: 53 | - "" 54 | resources: 55 | - secrets 56 | verbs: 57 | - get 58 | - list 59 | - watch 60 | - create 61 | - update 62 | - patch 63 | - delete 64 | - apiGroups: 65 | - "" 66 | resources: 67 | - services 68 | verbs: 69 | - get 70 | - list 71 | - watch 72 | - create 73 | - update 74 | - patch 75 | - delete 76 | -------------------------------------------------------------------------------- /components/concourse-operator/config/rbac/rbac_role_binding.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | creationTimestamp: null 5 | name: manager-rolebinding 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: manager-role 10 | subjects: 11 | - kind: ServiceAccount 12 | name: default 13 | namespace: system 14 | -------------------------------------------------------------------------------- /components/concourse-operator/config/samples/concourse_v1beta1_pipeline.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: concourse.govsvc.uk/v1beta1 2 | kind: Pipeline 3 | metadata: 4 | labels: 5 | controller-tools.k8s.io: "1.0" 6 | name: pipeline-sample 7 | spec: 8 | config: 9 | jobs: 10 | - name: hello-world 11 | plan: 12 | - task: hello-world 13 | config: 14 | platform: linux 15 | image_resource: 16 | type: docker-image 17 | source: {repository: busybox} 18 | run: 19 | path: echo 20 | args: 21 | - hello world 22 | 23 | -------------------------------------------------------------------------------- /components/concourse-operator/config/samples/concourse_v1beta1_pipeline_BAD.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: concourse.govsvc.uk/v1beta1 2 | kind: Pipeline 3 | metadata: 4 | labels: 5 | controller-tools.k8s.io: "1.0" 6 | name: pipeline-sample 7 | spec: 8 | config: 9 | resources: 10 | - name: bad 11 | type: bad-not-exist 12 | jobs: 13 | - name: hello-world 14 | plan: 15 | - task: hello-world 16 | config: 17 | platform: linux 18 | image_resource: 19 | type: docker-image 20 | source: {repository: busybox} 21 | run: 22 | path: echo 23 | args: 24 | - hello world 25 | 26 | -------------------------------------------------------------------------------- /components/concourse-operator/config/samples/concourse_v1beta1_team.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: concourse.govsvc.uk/v1beta1 2 | kind: Team 3 | metadata: 4 | labels: 5 | controller-tools.k8s.io: "1.0" 6 | name: team-sample 7 | spec: 8 | roles: 9 | - name: owner 10 | github: 11 | users: ["admin"] 12 | - name: member 13 | github: 14 | teams: ["org:team"] 15 | - name: viewer 16 | github: 17 | orgs: ["org"] 18 | local: 19 | users: ["visitor"] 20 | -------------------------------------------------------------------------------- /components/concourse-operator/hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/apis/addtoscheme_concourse_v1beta1.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package apis 17 | 18 | import ( 19 | "github.com/alphagov/gsp/components/concourse-operator/pkg/apis/concourse/v1beta1" 20 | ) 21 | 22 | func init() { 23 | // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back 24 | AddToSchemes = append(AddToSchemes, v1beta1.SchemeBuilder.AddToScheme) 25 | } 26 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/apis/apis.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Generate deepcopy for apis 17 | //go:generate go run ../../vendor/k8s.io/code-generator/cmd/deepcopy-gen/main.go -O zz_generated.deepcopy -i ./... -h ../../hack/boilerplate.go.txt 18 | 19 | // Package apis contains Kubernetes API groups. 20 | package apis 21 | 22 | import ( 23 | "k8s.io/apimachinery/pkg/runtime" 24 | ) 25 | 26 | // AddToSchemes may be used to add all resources defined in the project to a Scheme 27 | var AddToSchemes runtime.SchemeBuilder 28 | 29 | // AddToScheme adds all Resources to the Scheme 30 | func AddToScheme(s *runtime.Scheme) error { 31 | return AddToSchemes.AddToScheme(s) 32 | } 33 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/apis/concourse/group.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package concourse contains concourse API versions 17 | package concourse 18 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/apis/concourse/v1beta1/doc.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1beta1 contains API Schema definitions for the concourse v1beta1 API group 17 | // +k8s:openapi-gen=true 18 | // +k8s:deepcopy-gen=package,register 19 | // +k8s:conversion-gen=github.com/alphagov/gsp/components/concourse-operator/pkg/apis/concourse 20 | // +k8s:defaulter-gen=TypeMeta 21 | // +groupName=concourse.govsvc.uk 22 | package v1beta1 23 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/apis/concourse/v1beta1/v1beta1_suite_test.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package v1beta1 17 | 18 | import ( 19 | "log" 20 | "os" 21 | "path/filepath" 22 | "testing" 23 | 24 | "k8s.io/client-go/kubernetes/scheme" 25 | "k8s.io/client-go/rest" 26 | "sigs.k8s.io/controller-runtime/pkg/client" 27 | "sigs.k8s.io/controller-runtime/pkg/envtest" 28 | ) 29 | 30 | var cfg *rest.Config 31 | var c client.Client 32 | 33 | func TestMain(m *testing.M) { 34 | t := &envtest.Environment{ 35 | CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "config", "crds")}, 36 | } 37 | 38 | err := SchemeBuilder.AddToScheme(scheme.Scheme) 39 | if err != nil { 40 | log.Fatal(err) 41 | } 42 | 43 | if cfg, err = t.Start(); err != nil { 44 | log.Fatal(err) 45 | } 46 | 47 | if c, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}); err != nil { 48 | log.Fatal(err) 49 | } 50 | 51 | code := m.Run() 52 | t.Stop() 53 | os.Exit(code) 54 | } 55 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/controller/add_pipeline.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package controller 17 | 18 | import ( 19 | "github.com/alphagov/gsp/components/concourse-operator/pkg/controller/pipeline" 20 | ) 21 | 22 | func init() { 23 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 24 | AddToManagerFuncs = append(AddToManagerFuncs, pipeline.Add) 25 | } 26 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/controller/add_team.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package controller 17 | 18 | import ( 19 | "github.com/alphagov/gsp/components/concourse-operator/pkg/controller/team" 20 | ) 21 | 22 | func init() { 23 | // AddToManagerFuncs is a list of functions to create controllers and add them to a manager. 24 | AddToManagerFuncs = append(AddToManagerFuncs, team.Add) 25 | } 26 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/controller/controller.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package controller 17 | 18 | import ( 19 | "github.com/concourse/concourse/go-concourse/concourse" 20 | "sigs.k8s.io/controller-runtime/pkg/manager" 21 | ) 22 | 23 | // AddToManagerFuncs is a list of functions to add all Controllers to the Manager 24 | var AddToManagerFuncs []func(manager.Manager, func(team string) (concourse.Client, error)) error 25 | 26 | // AddToManager adds all Controllers to the Manager 27 | func AddToManager(m manager.Manager) error { 28 | clientFn := NewClientFromEnv 29 | for _, f := range AddToManagerFuncs { 30 | if err := f(m, clientFn); err != nil { 31 | return err 32 | } 33 | } 34 | return nil 35 | } 36 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/webhook/add_default_server.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package webhook 17 | 18 | import ( 19 | server "github.com/alphagov/gsp/components/concourse-operator/pkg/webhook/default_server" 20 | ) 21 | 22 | func init() { 23 | // AddToManagerFuncs is a list of functions to create webhook servers and add them to a manager. 24 | AddToManagerFuncs = append(AddToManagerFuncs, server.Add) 25 | } 26 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/webhook/default_server/add_validating_pipeline.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package defaultserver 17 | 18 | import ( 19 | "fmt" 20 | 21 | "github.com/alphagov/gsp/components/concourse-operator/pkg/webhook/default_server/pipeline/validating" 22 | ) 23 | 24 | func init() { 25 | for k, v := range validating.Builders { 26 | _, found := builderMap[k] 27 | if found { 28 | log.V(1).Info(fmt.Sprintf( 29 | "conflicting webhook builder names in builder map: %v", k)) 30 | } 31 | builderMap[k] = v 32 | } 33 | for k, v := range validating.HandlerMap { 34 | _, found := HandlerMap[k] 35 | if found { 36 | log.V(1).Info(fmt.Sprintf( 37 | "conflicting webhook builder names in handler map: %v", k)) 38 | } 39 | _, found = builderMap[k] 40 | if !found { 41 | log.V(1).Info(fmt.Sprintf( 42 | "can't find webhook builder name %q in builder map", k)) 43 | continue 44 | } 45 | HandlerMap[k] = v 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/webhook/default_server/pipeline/validating/create_update_webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package validating 17 | 18 | import ( 19 | concoursev1beta1 "github.com/alphagov/gsp/components/concourse-operator/pkg/apis/concourse/v1beta1" 20 | admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" 21 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission/builder" 22 | ) 23 | 24 | func init() { 25 | builderName := "validating-create-update-pipeline" 26 | Builders[builderName] = builder. 27 | NewWebhookBuilder(). 28 | Name(builderName+".k8s.io"). 29 | Path("/"+builderName). 30 | Validating(). 31 | Operations(admissionregistrationv1beta1.Create, admissionregistrationv1beta1.Update). 32 | FailurePolicy(admissionregistrationv1beta1.Fail). 33 | ForType(&concoursev1beta1.Pipeline{}) 34 | } 35 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/webhook/default_server/pipeline/validating/webhooks.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package validating 17 | 18 | import ( 19 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission" 20 | "sigs.k8s.io/controller-runtime/pkg/webhook/admission/builder" 21 | ) 22 | 23 | var ( 24 | // Builders contain admission webhook builders 25 | Builders = map[string]*builder.WebhookBuilder{} 26 | // HandlerMap contains admission webhook handlers 27 | HandlerMap = map[string][]admission.Handler{} 28 | ) 29 | -------------------------------------------------------------------------------- /components/concourse-operator/pkg/webhook/webhook.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | package webhook 17 | 18 | import ( 19 | "sigs.k8s.io/controller-runtime/pkg/manager" 20 | ) 21 | 22 | // AddToManagerFuncs is a list of functions to add all Controllers to the Manager 23 | var AddToManagerFuncs []func(manager.Manager) error 24 | 25 | // AddToManager adds all Controllers to the Manager 26 | // +kubebuilder:rbac:groups=admissionregistration.k8s.io,resources=mutatingwebhookconfigurations;validatingwebhookconfigurations,verbs=get;list;watch;create;update;patch;delete 27 | // +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;patch;delete 28 | // +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch;create;update;patch;delete 29 | func AddToManager(m manager.Manager) error { 30 | for _, f := range AddToManagerFuncs { 31 | if err := f(m); err != nil { 32 | return err 33 | } 34 | } 35 | return nil 36 | } 37 | -------------------------------------------------------------------------------- /components/concourse-task-toolbox/README.md: -------------------------------------------------------------------------------- 1 | 2 | # govsvc/task-toolbox 3 | 4 | ## Overview 5 | 6 | Image containing tools for concourse task scripts. 7 | 8 | ## Versioning 9 | 10 | Please bump `VERSION` file 11 | 12 | ## Building 13 | 14 | ``` 15 | docker build -t govsvc/task-toolbox:$(cat VERSION) . 16 | ``` 17 | 18 | ## Releasing 19 | 20 | ``` 21 | docker push govsvc/task-toolbox:$(cat VERSION) 22 | ``` 23 | -------------------------------------------------------------------------------- /components/concourse-task-toolbox/VERSION: -------------------------------------------------------------------------------- 1 | 1.5.0 2 | -------------------------------------------------------------------------------- /components/concourse-task-toolbox/bin/aws-assume-role: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | unset AWS_SESSION_TOKEN 6 | 7 | role_arn="$1" 8 | temp_role=$(aws sts assume-role \ 9 | --role-arn "${role_arn}" \ 10 | --role-session-name "concourse-task") 11 | 12 | echo export AWS_ACCESS_KEY_ID=$(echo $temp_role | jq .Credentials.AccessKeyId | xargs) 13 | echo export AWS_SECRET_ACCESS_KEY=$(echo $temp_role | jq .Credentials.SecretAccessKey | xargs) 14 | echo export AWS_SESSION_TOKEN=$(echo $temp_role | jq .Credentials.SessionToken | xargs) 15 | -------------------------------------------------------------------------------- /components/concourse-task-toolbox/bin/determine-platform-version.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import collections 3 | import os 4 | import subprocess 5 | 6 | os.makedirs("platform-version", exist_ok=True) 7 | 8 | print("Picking platform version...") 9 | partial_repos = [ 10 | "platform", 11 | "aws-node-lifecycle-hook-source", 12 | "service-operator-source", 13 | "concourse-task-toolbox-source", 14 | "concourse-operator-source", 15 | "concourse-github-resource-source", 16 | "concourse-terraform-resource-source", 17 | "aws-ssm-agent-source" 18 | ] 19 | 20 | commit_map = collections.Counter() 21 | for partial_repo in partial_repos: 22 | with open(f"{partial_repo}/.git/ref") as f: 23 | commit = f.read() 24 | proc = subprocess.Popen( 25 | # This could probably use 'HEAD' instead of reading .git/ref 26 | ['git', 'rev-list', '--count', commit.strip()], 27 | env={'GIT_DIR': f'{partial_repo}/.git'}, 28 | stdout=subprocess.PIPE 29 | ) 30 | stdoutdata, _ = proc.communicate() 31 | commit_map[commit] = int(stdoutdata) 32 | 33 | commit, _ = commit_map.most_common()[0] 34 | 35 | print(f"Picked {commit}") 36 | with open('platform-version/ref', 'w') as f: 37 | f.write(commit) 38 | -------------------------------------------------------------------------------- /components/concourse-task-toolbox/bin/setup-kube-deployer: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit \ 4 | -o nounset \ 5 | -o pipefail 6 | 7 | echo "configuring kubectl for deployer" 8 | echo "${KUBERNETES_SERVICE_ACCOUNT}" | jq -r .["ca.crt"] > ca.crt 9 | kubectl config set-cluster self --server=https://kubernetes.default --certificate-authority=ca.crt 10 | kubectl config set-credentials deployer --token "${KUBERNETES_TOKEN}" 11 | kubectl config set-context deployer --user deployer --cluster self 12 | kubectl config use-context deployer 13 | -------------------------------------------------------------------------------- /components/concourse-terraform-resource/Dockerfile: -------------------------------------------------------------------------------- 1 | # Terraform 0.12.12 2 | FROM ljfranklin/terraform-resource@sha256:15eee04112da38c0fcbdb9edb86a6b5acff4a800f21cb29b4e30dc58b27e5d0d 3 | 4 | # we need the aws tools and git in the box for some of the local-exec scripts 5 | RUN apk add --update jq python3 py3-pip git terraform zip && \ 6 | pip3 install --upgrade pip && \ 7 | pip3 install awscli && \ 8 | rm /var/cache/apk/* && \ 9 | git config --system credential.helper '!aws codecommit credential-helper $@' && \ 10 | git config --system credential.UseHttpPath true 11 | -------------------------------------------------------------------------------- /components/concourse-terraform-resource/README.md: -------------------------------------------------------------------------------- 1 | 2 | # govsvc/terraform-resource 3 | 4 | ## Overview 5 | 6 | Concourse resource for running `terraform apply`. 7 | 8 | Extends the [upstream terraform resource](https://github.com/ljfranklin/terraform-resource) to include `awscli`, `git` and `zip` binaries required by `local-exec` scripts. 9 | 10 | ## Versioning 11 | 12 | Please bump `VERSION` file. 13 | 14 | ## Building 15 | 16 | ``` 17 | docker build -t govsvc/terraform-resource:$(cat VERSION) . 18 | ``` 19 | 20 | ## Releasing 21 | 22 | ``` 23 | docker push govsvc/terraform-resource:$(cat VERSION) 24 | ``` 25 | -------------------------------------------------------------------------------- /components/concourse-terraform-resource/VERSION: -------------------------------------------------------------------------------- 1 | 0.14.0 2 | -------------------------------------------------------------------------------- /components/service-operator/.dockerignore: -------------------------------------------------------------------------------- 1 | vendor/ 2 | bin/ 3 | -------------------------------------------------------------------------------- /components/service-operator/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Binaries for programs and plugins 3 | *.exe 4 | *.exe~ 5 | *.dll 6 | *.so 7 | *.dylib 8 | bin 9 | 10 | # Test binary, build with `go test -c` 11 | *.test 12 | 13 | # Output of the go coverage tool, specifically when used with LiteIDE 14 | *.out 15 | 16 | # Kubernetes Generated files - skip generated files, except for vendored files 17 | 18 | !vendor/**/zz_generated.* 19 | 20 | # editor and IDE paraphernalia 21 | .idea 22 | *.swp 23 | *.swo 24 | *~ 25 | 26 | # vendor 27 | vendor/ 28 | 29 | # ignore manifests (they are used to generate the chart) 30 | /config/crd/bases 31 | /config/webhook 32 | -------------------------------------------------------------------------------- /components/service-operator/PROJECT: -------------------------------------------------------------------------------- 1 | version: "2" 2 | domain: govsvc.uk 3 | repo: github.com/alphagov/gsp/components/service-operator 4 | multigroup: true 5 | resources: 6 | - group: database 7 | version: v1beta1 8 | kind: Postgres 9 | - group: queue 10 | version: v1beta1 11 | kind: SQS 12 | - group: access 13 | version: v1beta1 14 | kind: Principal 15 | -------------------------------------------------------------------------------- /components/service-operator/apis/access/v1beta1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1beta1 contains API Schema definitions for the access v1beta1 API group 17 | // +kubebuilder:object:generate=true 18 | // +groupName=access.govsvc.uk 19 | package v1beta1 20 | 21 | import ( 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "sigs.k8s.io/controller-runtime/pkg/scheme" 24 | ) 25 | 26 | var ( 27 | // GroupVersion is group version used to register these objects 28 | GroupVersion = schema.GroupVersion{Group: "access.govsvc.uk", Version: "v1beta1"} 29 | 30 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 31 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 32 | 33 | // AddToScheme adds the types in this group-version to the given scheme. 34 | AddToScheme = SchemeBuilder.AddToScheme 35 | ) 36 | -------------------------------------------------------------------------------- /components/service-operator/apis/access/v1beta1/suite_test.go: -------------------------------------------------------------------------------- 1 | package v1beta1_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/envtest" 10 | ) 11 | 12 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 13 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 14 | 15 | func TestTypesSuite(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | 18 | RunSpecsWithDefaultAndCustomReporters(t, 19 | "Suite", 20 | []Reporter{envtest.NewlineReporter{}}) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/apis/database/v1beta1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1beta1 contains API Schema definitions for the database v1beta1 API group 17 | // +kubebuilder:object:generate=true 18 | // +groupName=database.govsvc.uk 19 | package v1beta1 20 | 21 | import ( 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "sigs.k8s.io/controller-runtime/pkg/scheme" 24 | ) 25 | 26 | var ( 27 | // GroupVersion is group version used to register these objects 28 | GroupVersion = schema.GroupVersion{Group: "database.govsvc.uk", Version: "v1beta1"} 29 | 30 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 31 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 32 | 33 | // AddToScheme adds the types in this group-version to the given scheme. 34 | AddToScheme = SchemeBuilder.AddToScheme 35 | ) 36 | -------------------------------------------------------------------------------- /components/service-operator/apis/database/v1beta1/suite_test.go: -------------------------------------------------------------------------------- 1 | package v1beta1_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/envtest" 10 | ) 11 | 12 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 13 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 14 | 15 | func TestTypesSuite(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | 18 | RunSpecsWithDefaultAndCustomReporters(t, 19 | "Suite", 20 | []Reporter{envtest.NewlineReporter{}}) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/apis/queue/v1beta1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1beta1 contains API Schema definitions for the queue v1beta1 API group 17 | // +kubebuilder:object:generate=true 18 | // +groupName=queue.govsvc.uk 19 | package v1beta1 20 | 21 | import ( 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "sigs.k8s.io/controller-runtime/pkg/scheme" 24 | ) 25 | 26 | var ( 27 | // GroupVersion is group version used to register these objects 28 | GroupVersion = schema.GroupVersion{Group: "queue.govsvc.uk", Version: "v1beta1"} 29 | 30 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 31 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 32 | 33 | // AddToScheme adds the types in this group-version to the given scheme. 34 | AddToScheme = SchemeBuilder.AddToScheme 35 | ) 36 | -------------------------------------------------------------------------------- /components/service-operator/apis/queue/v1beta1/suite_test.go: -------------------------------------------------------------------------------- 1 | package v1beta1_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/envtest" 10 | ) 11 | 12 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 13 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 14 | 15 | func TestTypesSuite(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | 18 | RunSpecsWithDefaultAndCustomReporters(t, 19 | "Suite", 20 | []Reporter{envtest.NewlineReporter{}}) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/apis/storage/v1beta1/groupversion_info.go: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ 15 | 16 | // Package v1beta1 contains API Schema definitions for the queue v1beta1 API group 17 | // +kubebuilder:object:generate=true 18 | // +groupName=storage.govsvc.uk 19 | package v1beta1 20 | 21 | import ( 22 | "k8s.io/apimachinery/pkg/runtime/schema" 23 | "sigs.k8s.io/controller-runtime/pkg/scheme" 24 | ) 25 | 26 | var ( 27 | // GroupVersion is group version used to register these objects 28 | GroupVersion = schema.GroupVersion{Group: "storage.govsvc.uk", Version: "v1beta1"} 29 | 30 | // SchemeBuilder is used to add go types to the GroupVersionKind scheme 31 | SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} 32 | 33 | // AddToScheme adds the types in this group-version to the given scheme. 34 | AddToScheme = SchemeBuilder.AddToScheme 35 | ) 36 | -------------------------------------------------------------------------------- /components/service-operator/apis/storage/v1beta1/suite_test.go: -------------------------------------------------------------------------------- 1 | package v1beta1_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/envtest" 10 | ) 11 | 12 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 13 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 14 | 15 | func TestTypesSuite(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | 18 | RunSpecsWithDefaultAndCustomReporters(t, 19 | "Suite", 20 | []Reporter{envtest.NewlineReporter{}}) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/config/rbac/role-not-patch.yaml: -------------------------------------------------------------------------------- 1 | - apiGroups: 2 | - "" 3 | resources: 4 | - configmaps 5 | - events 6 | - secrets 7 | - serviceaccounts 8 | verbs: 9 | - create 10 | - delete 11 | - get 12 | - list 13 | - patch 14 | - update 15 | - watch 16 | - apiGroups: 17 | - networking.istio.io 18 | resources: 19 | - serviceentries 20 | verbs: 21 | - create 22 | - delete 23 | - get 24 | - list 25 | - patch 26 | - update 27 | - watch 28 | -------------------------------------------------------------------------------- /components/service-operator/controllers/image_repository_cloudformation.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | access "github.com/alphagov/gsp/components/service-operator/apis/access/v1beta1" 5 | storage "github.com/alphagov/gsp/components/service-operator/apis/storage/v1beta1" 6 | "github.com/alphagov/gsp/components/service-operator/internal/aws/cloudformation" 7 | "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" 8 | ) 9 | 10 | func ImageRepositoryCloudFormationController(c sdk.Client) Controller { 11 | return &cloudformation.Controller{ 12 | Kind: &storage.ImageRepository{}, 13 | PrincipalListKind: &access.PrincipalList{}, 14 | CloudFormationClient: &cloudformation.Client{ 15 | Client: c, 16 | }, 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /components/service-operator/controllers/postgres_cloudformation.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | access "github.com/alphagov/gsp/components/service-operator/apis/access/v1beta1" 5 | database "github.com/alphagov/gsp/components/service-operator/apis/database/v1beta1" 6 | "github.com/alphagov/gsp/components/service-operator/internal/aws/cloudformation" 7 | "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" 8 | "github.com/alphagov/gsp/components/service-operator/internal/env" 9 | "github.com/aws/aws-sdk-go/aws" 10 | ) 11 | 12 | // PostgresCloudFormationController creates a Controller instance for provision 13 | // Postgres with cloudformation. 14 | func PostgresCloudFormationController(c sdk.Client) Controller { 15 | return &cloudformation.Controller{ 16 | Kind: &database.Postgres{}, 17 | PrincipalListKind: &access.PrincipalList{}, 18 | CloudFormationClient: &cloudformation.Client{ 19 | Client: c, 20 | }, 21 | Parameters: []*cloudformation.Parameter{ 22 | { 23 | ParameterKey: aws.String(database.VPCSecurityGroupIDParameterName), 24 | ParameterValue: aws.String(env.AWSRDSSecurityGroupID()), 25 | }, 26 | { 27 | ParameterKey: aws.String(database.DBSubnetGroupNameParameterName), 28 | ParameterValue: aws.String(env.AWSRDSSubnetGroupName()), 29 | }, 30 | }, 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /components/service-operator/controllers/redis_cloudformation.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | access "github.com/alphagov/gsp/components/service-operator/apis/access/v1beta1" 5 | cache "github.com/alphagov/gsp/components/service-operator/apis/database/v1beta1" 6 | "github.com/alphagov/gsp/components/service-operator/internal/aws/cloudformation" 7 | "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" 8 | "github.com/alphagov/gsp/components/service-operator/internal/env" 9 | "github.com/aws/aws-sdk-go/aws" 10 | ) 11 | 12 | // RedisCloudFormationController creates a Controller instance for provision 13 | // an ElastiCache ReplicationGroup with cloudformation. 14 | func RedisCloudFormationController(c sdk.Client) Controller { 15 | return &cloudformation.Controller{ 16 | Kind: &cache.Redis{}, 17 | PrincipalListKind: &access.PrincipalList{}, 18 | CloudFormationClient: &cloudformation.Client{ 19 | Client: c, 20 | }, 21 | Parameters: []*cloudformation.Parameter{ 22 | { 23 | ParameterKey: aws.String(cache.RedisVPCSecurityGroupIDParameterName), 24 | ParameterValue: aws.String(env.AWSRedisSecurityGroupID()), 25 | }, 26 | { 27 | ParameterKey: aws.String(cache.CacheSubnetGroupParameterName), 28 | ParameterValue: aws.String(env.AWSRedisSubnetGroupName()), 29 | }, 30 | }, 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /components/service-operator/controllers/s3_cloudformation.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | access "github.com/alphagov/gsp/components/service-operator/apis/access/v1beta1" 5 | storage "github.com/alphagov/gsp/components/service-operator/apis/storage/v1beta1" 6 | "github.com/alphagov/gsp/components/service-operator/internal/aws/cloudformation" 7 | "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" 8 | ) 9 | 10 | func S3CloudFormationController(c sdk.Client) Controller { 11 | return &cloudformation.Controller{ 12 | Kind: &storage.S3Bucket{}, 13 | PrincipalListKind: &access.PrincipalList{}, 14 | CloudFormationClient: &cloudformation.Client{ 15 | Client: c, 16 | }, 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /components/service-operator/controllers/sqs_cloudformation.go: -------------------------------------------------------------------------------- 1 | package controllers 2 | 3 | import ( 4 | access "github.com/alphagov/gsp/components/service-operator/apis/access/v1beta1" 5 | queue "github.com/alphagov/gsp/components/service-operator/apis/queue/v1beta1" 6 | "github.com/alphagov/gsp/components/service-operator/internal/aws/cloudformation" 7 | "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" 8 | ) 9 | 10 | func SQSCloudFormationController(c sdk.Client) Controller { 11 | return &cloudformation.Controller{ 12 | Kind: &queue.SQS{}, 13 | PrincipalListKind: &access.PrincipalList{}, 14 | CloudFormationClient: &cloudformation.Client{ 15 | Client: c, 16 | }, 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /components/service-operator/examples/image-respoitory.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.govsvc.uk/v1beta1 3 | kind: ImageRepository 4 | metadata: 5 | name: sample 6 | labels: 7 | group.access.govsvc.uk: gsp.examples.test 8 | spec: 9 | aws: {} 10 | -------------------------------------------------------------------------------- /components/service-operator/examples/postgres.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: database.govsvc.uk/v1beta1 3 | kind: Postgres 4 | metadata: 5 | name: postgres-sample 6 | labels: 7 | group.access.govsvc.uk: gsp.examples.test 8 | spec: 9 | aws: 10 | instanceType: db.t3.medium 11 | instanceCount: 1 12 | -------------------------------------------------------------------------------- /components/service-operator/examples/principal.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: access.govsvc.uk/v1beta1 3 | kind: Principal 4 | metadata: 5 | name: principal-sample 6 | labels: 7 | group.access.govsvc.uk: gsp.examples.test 8 | -------------------------------------------------------------------------------- /components/service-operator/examples/s3.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.govsvc.uk/v1beta1 3 | kind: S3Bucket 4 | metadata: 5 | name: s3-bucket-sample 6 | labels: 7 | group.access.govsvc.uk: gsp.examples.test 8 | spec: 9 | aws: {} 10 | -------------------------------------------------------------------------------- /components/service-operator/examples/sqs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: queue.govsvc.uk/v1beta1 3 | kind: SQS 4 | metadata: 5 | name: sqs-sample 6 | labels: 7 | group.access.govsvc.uk: gsp.examples.test 8 | spec: 9 | aws: 10 | maximumMessageSize: 1024 11 | messageRetentionPeriod: 3600 12 | -------------------------------------------------------------------------------- /components/service-operator/go.mod: -------------------------------------------------------------------------------- 1 | module github.com/alphagov/gsp/components/service-operator 2 | 3 | go 1.13 4 | 5 | require ( 6 | github.com/aws/aws-sdk-go v1.30.13 7 | github.com/awslabs/goformation/v4 v4.8.0 8 | github.com/go-logr/logr v0.1.0 9 | github.com/imdario/mergo v0.3.8 // indirect 10 | github.com/maxbrunsfeld/counterfeiter/v6 v6.2.3 11 | github.com/onsi/ginkgo v1.12.0 12 | github.com/onsi/gomega v1.9.0 13 | github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 14 | istio.io/istio v0.0.0-20190925083542-b158283f0728 15 | k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b 16 | k8s.io/apimachinery v0.0.0-20190404173353-6a84e37a896d 17 | k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible 18 | sigs.k8s.io/controller-runtime v0.2.0-beta.4 19 | ) 20 | 21 | // avoid jsonpatch v2.0.0 which was yanked and republished and so has 22 | // two different hashes floating around 23 | // https://github.com/gomodules/jsonpatch/issues/21 24 | replace gomodules.xyz/jsonpatch/v2 => gomodules.xyz/jsonpatch/v2 v2.0.1 25 | -------------------------------------------------------------------------------- /components/service-operator/hack/boilerplate.go.txt: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | */ -------------------------------------------------------------------------------- /components/service-operator/hack/test_integration.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | AWS_ACCOUNT_ID="$(aws sts get-caller-identity | jq -r .Account)" 4 | AWS_RDS_SECURITY_GROUP_ID=$(aws ec2 describe-security-groups | jq -r '.SecurityGroups[] | select(.GroupName == "sandbox_rds_from_worker") | .GroupId') 5 | AWS_REDIS_SECURITY_GROUP_ID=$(aws ec2 describe-security-groups | jq -r '.SecurityGroups[] | select(.GroupName == "sandbox_redis_from_worker") | .GroupId') 6 | 7 | docker build \ 8 | --network host \ 9 | --build-arg AWS_INTEGRATION=true \ 10 | --build-arg AWS_ACCESS_KEY_ID \ 11 | --build-arg AWS_SECRET_ACCESS_KEY \ 12 | --build-arg AWS_SESSION_TOKEN \ 13 | --build-arg AWS_RDS_SECURITY_GROUP_ID=$AWS_RDS_SECURITY_GROUP_ID \ 14 | --build-arg AWS_RDS_SUBNET_GROUP_NAME=sandbox-private \ 15 | --build-arg AWS_REDIS_SECURITY_GROUP_ID=$AWS_REDIS_SECURITY_GROUP_ID \ 16 | --build-arg AWS_REDIS_SUBNET_GROUP_NAME=sandbox-private \ 17 | --build-arg AWS_PRINCIPAL_PERMISSIONS_BOUNDARY_ARN=arn:aws:iam::${AWS_ACCOUNT_ID}:policy/sandbox-service-operator-managed-role-permissions-boundary \ 18 | --build-arg AWS_ROLE_ARN=arn:aws:iam::${AWS_ACCOUNT_ID}:role/admin \ 19 | --build-arg AWS_OIDC_PROVIDER_ARN=arn:aws:iam::${AWS_ACCOUNT_ID}:oidc-provider/oidc.eks.eu-west-2.amazonaws.com/id/D4AF693862F6BE27DFD2FCA407D8990D \ 20 | --build-arg AWS_OIDC_PROVIDER_URL=oidc.eks.eu-west-2.amazonaws.com/id/D4AF693862F6BE27DFD2FCA407D8990D \ 21 | . 22 | -------------------------------------------------------------------------------- /components/service-operator/internal/aws/cloudformation/ecr_policy.go: -------------------------------------------------------------------------------- 1 | package cloudformation 2 | 3 | const ( 4 | ECRLifecycleMoreThan = "imageCountMoreThan" 5 | ECRLifecyclePolicyExpire = "expire" 6 | ) 7 | 8 | type ECRLifecyclePolicySelection struct { 9 | TagStatus string `json:"tagStatus,omitempty"` 10 | CountType string `json:"countType,omitempty"` 11 | CountNumber int `json:"countNumber,omitempty"` 12 | } 13 | 14 | type ECRLifecyclePolicyAction struct { 15 | Type string `json:"type,omitempty"` 16 | } 17 | 18 | type ECRLifecyclePolicyRule struct { 19 | RulePriority int64 `json:"rulePriority"` 20 | Description string `json:"description,omitempty"` 21 | Selection ECRLifecyclePolicySelection `json:"selection,omitempty"` 22 | Action ECRLifecyclePolicyAction `json:"action,omitempty"` 23 | } 24 | 25 | type ECRLifecyclePolicy struct { 26 | Rules []ECRLifecyclePolicyRule `json:"rules,omitempty"` 27 | } 28 | -------------------------------------------------------------------------------- /components/service-operator/internal/aws/cloudformation/suite_test.go: -------------------------------------------------------------------------------- 1 | package cloudformation_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/envtest" 10 | ) 11 | 12 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 13 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 14 | 15 | func TestCloudformationPackage(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | 18 | RunSpecsWithDefaultAndCustomReporters(t, 19 | "Cloudformation Suite", 20 | []Reporter{envtest.NewlineReporter{}}) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/internal/aws/ecr/login.go: -------------------------------------------------------------------------------- 1 | package ecr 2 | 3 | import ( 4 | "context" 5 | "encoding/base64" 6 | "errors" 7 | "fmt" 8 | "strings" 9 | 10 | "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" 11 | "github.com/aws/aws-sdk-go/aws" 12 | "github.com/aws/aws-sdk-go/service/ecr" 13 | ) 14 | 15 | type ECRCredentials struct { 16 | Username string 17 | Password string 18 | Endpoint string 19 | } 20 | 21 | func GetECRCredentials(ctx context.Context, c sdk.Client) (*ECRCredentials, error) { 22 | res, err := c.GetAuthorizationTokenWithContext(ctx, &ecr.GetAuthorizationTokenInput{}) 23 | if err != nil { 24 | return nil, err 25 | } 26 | if len(res.AuthorizationData) == 0 { 27 | return nil, fmt.Errorf("GetECRCredentials: no credentials returned") 28 | } else if len(res.AuthorizationData) != 1 { 29 | return nil, fmt.Errorf("GetECRCredentials: unexpected number of credentials returns") 30 | } 31 | data := res.AuthorizationData[0] 32 | token := aws.StringValue(data.AuthorizationToken) 33 | decodedToken, err := base64.StdEncoding.DecodeString(token) 34 | if err != nil { 35 | return nil, err 36 | } 37 | decodedTokenParts := strings.SplitN(string(decodedToken), ":", 2) 38 | if len(decodedTokenParts) != 2 { 39 | return nil, errors.New("GetECRCredentials: invalid credential data") 40 | } 41 | creds := &ECRCredentials{ 42 | Username: "AWS", 43 | Password: decodedTokenParts[1], 44 | Endpoint: aws.StringValue(data.ProxyEndpoint), 45 | } 46 | return creds, nil 47 | 48 | } 49 | -------------------------------------------------------------------------------- /components/service-operator/internal/aws/policy_types.go: -------------------------------------------------------------------------------- 1 | package aws 2 | 3 | type StackPolicyDocument struct { 4 | Statement []StatementEntry 5 | } 6 | 7 | type StatementEntry struct { 8 | Effect string 9 | Action []string 10 | Principal string 11 | Resource string 12 | } 13 | -------------------------------------------------------------------------------- /components/service-operator/internal/aws/sdk/client_test.go: -------------------------------------------------------------------------------- 1 | package sdk_test 2 | 3 | import ( 4 | "github.com/alphagov/gsp/components/service-operator/internal/aws/sdk" 5 | . "github.com/onsi/ginkgo" 6 | ) 7 | 8 | var _ = Describe("Client", func() { 9 | 10 | It("should return a valid aws client", func() { 11 | var _ sdk.Client = sdk.NewClient() 12 | }) 13 | 14 | }) 15 | -------------------------------------------------------------------------------- /components/service-operator/internal/aws/sdk/sdk_suite_test.go: -------------------------------------------------------------------------------- 1 | package sdk_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | ) 9 | 10 | func TestSDK(t *testing.T) { 11 | RegisterFailHandler(Fail) 12 | RunSpecs(t, "Aws Suite") 13 | } 14 | -------------------------------------------------------------------------------- /components/service-operator/internal/aws/sdk/sdkfakes/fake_error.go: -------------------------------------------------------------------------------- 1 | package sdkfakes 2 | 3 | import "github.com/aws/aws-sdk-go/aws/awserr" 4 | 5 | var _ awserr.Error = &MockAWSError{} 6 | 7 | type MockAWSError struct { 8 | C string 9 | M string 10 | O error 11 | } 12 | 13 | func (err *MockAWSError) Code() string { 14 | return err.C 15 | } 16 | func (err *MockAWSError) Error() string { 17 | return err.C 18 | } 19 | func (err *MockAWSError) Message() string { 20 | return err.M 21 | } 22 | func (err *MockAWSError) OrigErr() error { 23 | return err.O 24 | } 25 | 26 | var ResourceNotFoundException awserr.Error = &MockAWSError{ 27 | C: "ResourceNotFoundException", 28 | M: "fake version of error returned when no stack", 29 | } 30 | 31 | var NoUpdateRequiredException awserr.Error = &MockAWSError{ 32 | C: "No updates", 33 | M: "No updates", 34 | } 35 | -------------------------------------------------------------------------------- /components/service-operator/internal/env/suite_test.go: -------------------------------------------------------------------------------- 1 | package env_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/envtest" 10 | ) 11 | 12 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 13 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 14 | 15 | func TestCloudformationPackage(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | 18 | RunSpecsWithDefaultAndCustomReporters(t, 19 | "Suite", 20 | []Reporter{envtest.NewlineReporter{}}) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/internal/istio/schemebuilder.go: -------------------------------------------------------------------------------- 1 | package istio 2 | 3 | import ( 4 | istiocrd "istio.io/istio/pilot/pkg/config/kube/crd" 5 | istioschemas "istio.io/istio/pkg/config/schemas" 6 | meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" 7 | "k8s.io/apimachinery/pkg/runtime" 8 | k8sschema "k8s.io/apimachinery/pkg/runtime/schema" 9 | ) 10 | 11 | func AddToScheme(scheme *runtime.Scheme) error { 12 | istioSchemeBuilder := runtime.NewSchemeBuilder( 13 | func(scheme *runtime.Scheme) error { 14 | gv := k8sschema.GroupVersion{Group: "networking.istio.io", Version: "v1alpha3"} 15 | st := istiocrd.KnownTypes[istioschemas.ServiceEntry.Type] 16 | scheme.AddKnownTypes(gv, st.Object, st.Collection) 17 | meta_v1.AddToGroupVersion(scheme, gv) 18 | return nil 19 | }) 20 | return istioSchemeBuilder.AddToScheme(scheme) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/internal/object/finalizers.go: -------------------------------------------------------------------------------- 1 | package object 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | ) 6 | 7 | // HasFinalizer is a helper for checking if finalizer exists 8 | func HasFinalizer(o metav1.Object, finalizer string) bool { 9 | finalizers := o.GetFinalizers() 10 | return contains(finalizers, finalizer) 11 | } 12 | 13 | // SetFinalizer adds finalizer to object if not exists 14 | func SetFinalizer(o metav1.Object, finalizer string) { 15 | finalizers := o.GetFinalizers() 16 | if !contains(finalizers, finalizer) { 17 | o.SetFinalizers(append(finalizers, finalizer)) 18 | } 19 | } 20 | 21 | // RemoveFinalizer removes finalizer from object if it exists 22 | func RemoveFinalizer(o metav1.Object, finalizer string) { 23 | finalizers := o.GetFinalizers() 24 | o.SetFinalizers(remove(finalizers, finalizer)) 25 | } 26 | 27 | func contains(slice []string, s string) bool { 28 | for _, item := range slice { 29 | if item == s { 30 | return true 31 | } 32 | } 33 | return false 34 | } 35 | 36 | func remove(slice []string, s string) []string { 37 | result := []string{} 38 | for _, item := range slice { 39 | if item == s { 40 | continue 41 | } 42 | result = append(result, item) 43 | } 44 | return result 45 | } 46 | -------------------------------------------------------------------------------- /components/service-operator/internal/object/finalizers_test.go: -------------------------------------------------------------------------------- 1 | package object_test 2 | 3 | import ( 4 | "github.com/alphagov/gsp/components/service-operator/internal/object" 5 | . "github.com/onsi/ginkgo" 6 | . "github.com/onsi/gomega" 7 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 8 | ) 9 | 10 | var _ = Describe("Finalizers", func() { 11 | 12 | type Object struct { 13 | metav1.TypeMeta `json:",inline"` 14 | metav1.ObjectMeta `json:"metadata,omitempty"` 15 | } 16 | 17 | var o *Object 18 | var f string 19 | 20 | BeforeEach(func() { 21 | o = &Object{} 22 | f = "my-finalizer" 23 | }) 24 | 25 | It("should add/remove finalizers", func() { 26 | Expect(o.GetFinalizers()).To(HaveLen(0)) 27 | Expect(object.HasFinalizer(o, f)).To(BeFalse()) 28 | object.SetFinalizer(o, f) 29 | Expect(o.GetFinalizers()).To(ContainElement("my-finalizer")) 30 | Expect(object.HasFinalizer(o, f)).To(BeTrue()) 31 | object.RemoveFinalizer(o, "my-finalizer") 32 | Expect(o.GetFinalizers()).ToNot(ContainElement("my-finalizer")) 33 | Expect(object.HasFinalizer(o, f)).To(BeFalse()) 34 | }) 35 | 36 | It("should not duplicate finalizers", func() { 37 | object.SetFinalizer(o, f) 38 | object.SetFinalizer(o, f) 39 | object.SetFinalizer(o, f) 40 | Expect(o.GetFinalizers()).To(HaveLen(1)) 41 | }) 42 | 43 | }) 44 | -------------------------------------------------------------------------------- /components/service-operator/internal/object/suite_test.go: -------------------------------------------------------------------------------- 1 | package object_test 2 | 3 | import ( 4 | "testing" 5 | 6 | . "github.com/onsi/ginkgo" 7 | . "github.com/onsi/gomega" 8 | 9 | "sigs.k8s.io/controller-runtime/pkg/envtest" 10 | ) 11 | 12 | // These tests use Ginkgo (BDD-style Go testing framework). Refer to 13 | // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. 14 | 15 | func TestCloudformationPackage(t *testing.T) { 16 | RegisterFailHandler(Fail) 17 | 18 | RunSpecsWithDefaultAndCustomReporters(t, 19 | "Suite", 20 | []Reporter{envtest.NewlineReporter{}}) 21 | } 22 | -------------------------------------------------------------------------------- /components/service-operator/internal/object/types.go: -------------------------------------------------------------------------------- 1 | package object 2 | 3 | import ( 4 | metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 5 | "k8s.io/apimachinery/pkg/runtime" 6 | "k8s.io/apimachinery/pkg/runtime/schema" 7 | ) 8 | 9 | // SecretNamer names a Secret to hold sensitive details 10 | type SecretNamer interface { 11 | GetSecretName() string 12 | } 13 | 14 | // Service is the interface shared by all service resources 15 | type Service interface { 16 | runtime.Object 17 | metav1.Object 18 | schema.ObjectKind 19 | StatusReader 20 | StatusWriter 21 | } 22 | 23 | // StatusReader can fetch a status 24 | type StatusReader interface { 25 | GetStatus() Status 26 | GetState() State 27 | } 28 | 29 | // StatusWriter can set status fields 30 | type StatusWriter interface { 31 | SetStatus(Status) 32 | SetState(State) 33 | } 34 | 35 | // PrincipalLister declares that a type can return a list of principals 36 | type PrincipalLister interface { 37 | runtime.Object 38 | GetPrincipals() []Principal 39 | } 40 | 41 | // Principal is the interface shared by all principal types 42 | type Principal interface { 43 | GetRoleName() string 44 | StatusReader 45 | } 46 | -------------------------------------------------------------------------------- /components/service-operator/redisandstunnel/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis 2 | RUN apt-get update && apt-get install -y stunnel && apt-get purge && apt-get autoremove 3 | COPY stunnel.conf /etc/stunnel/redis-cli.conf 4 | -------------------------------------------------------------------------------- /components/service-operator/redisandstunnel/stunnel.conf: -------------------------------------------------------------------------------- 1 | fips = no 2 | setuid = root 3 | setgid = root 4 | pid = /var/run/stunnel.pid 5 | debug = 7 6 | options = NO_SSLv2 7 | options = NO_SSLv3 8 | [redis-cli] 9 | client = yes 10 | accept = 127.0.0.1:6379 11 | -------------------------------------------------------------------------------- /components/service-operator/tools/tools.go: -------------------------------------------------------------------------------- 1 | // +build tools 2 | 3 | package tools 4 | 5 | import ( 6 | _ "github.com/maxbrunsfeld/counterfeiter/v6" 7 | ) 8 | 9 | // This file imports packages that are used when running go generate, or used 10 | // during the development process but not otherwise depended on by built code. 11 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation 2 | 3 | ## Index 4 | - [Accessing Concourse](gds-supported-platform/accessing-concourse.md) 5 | - [Accessing Dashboard](gds-supported-platform/accessing-dashboard.md) 6 | - [Architecture Decision Records](/docs/architecture/adr) 7 | - [Bootstrapping GSP on-demand clusters](gds-supported-platform/bootstrapping-clusters.md) 8 | - [GSP Architecture](/docs/architecture/gsp-architecture.md) 9 | - [GSP Architecture Continuous Deployment](/docs/architecture/gsp-architecture-continuous-deployment.md) 10 | - [GSP Architecture Cloud Infrastructure](/docs/architecture/gsp-architecture-cloud-infrastructure.md) 11 | - [GSP Architecture Overview](/docs/architecture/gsp-architecture-overview.md) 12 | - [Incident Reports](incident-reports) 13 | - [Permissions](gds-supported-platform/permissions.md) 14 | - [Per-namespace istio gateways](gds-supported-platform/per-namespace-gateway.md) 15 | - [Public DNS](gds-supported-platform/external-dns.md) 16 | - [Public TLS Certificates](gds-supported-platform/tls-certificates.md) 17 | - [Recover from gatekeeper outage](gds-supported-platform/recover-gatekeeper-outage.md) 18 | - [Sealing Secrets](gds-supported-platform/sealing-secrets.md) 19 | - [Set up monitoring and alerting with Prometheus and Alertmanager](gds-supported-platform/prometheus_alert_manager_grafana.md) 20 | - [Updating EKS](gds-supported-platform/updating-EKS.md) 21 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR000-template.md: -------------------------------------------------------------------------------- 1 | # ADRXXX: Title 2 | 3 | ## Status 4 | 5 | Accepted | Pending | Superseded by [ADR000](000-template.md) 6 | 7 | ## Context 8 | 9 | ## Decision 10 | 11 | We will ... 12 | 13 | ## Consequences 14 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR002-containers.md: -------------------------------------------------------------------------------- 1 | # ADR002: Containers 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | At the time of writing the infrastructure/deployment landscape is: 10 | 11 | * Many service teams are deploying applications to Virtual Machines (AWS EC2, VMWare, etc) 12 | * Some service teams are deploying applications as containers (AWS ECS, GOV.UK PaaS, Docker) 13 | * Few service teams are deploying applications as functions (AWS Lambda) 14 | 15 | There is a mix of target infrastructure/providers in use, but there is a gradual migration towards hosting on AWS. 16 | 17 | ## Decision 18 | 19 | We will focus on providing the primitives to run stateless containerised workloads. 20 | 21 | ## Consequences 22 | 23 | * Some applications that were previously being deployed to Virtual Machines may require significant modification before they are suitable for running within a container. 24 | * Some of the isolation guarantees present with Virtual Machines are not available to containers, which may limit our options when thinking about multi-tenancy or multi-environment architectures. 25 | * An initial lack of solution to deploying event-based or Function-as-a-Service architectures may not be attractive to teams who are already experimenting in this area (however such systems _could_ be implemented on top of a container-based system) 26 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR007-identity-provider.md: -------------------------------------------------------------------------------- 1 | # ADR007: Identity Provider 2 | 3 | ## Status 4 | 5 | Superseded by [ADR023](ADR023-cluster-authentication.md) 6 | 7 | ## Context 8 | 9 | We need to provide a way to authenticate users who will interact with our Kubernetes clusters. 10 | 11 | We do not have a organisation-wide identity provider. Virtually everyone will have a Google account. Many people will have a GitHub account. 12 | 13 | People working on GitHub repositories are likely the same people who are deploying to a cluster. Access to repositories likely indicates which users should have access to a cluster. We can reuse this user:team mapping in order to control access to clusters. 14 | 15 | ## Decision 16 | 17 | We will use GitHub as our identify provider. 18 | 19 | ## Consequences 20 | 21 | - It may be non-trivial for non-technical people to authenticate if they have to create a GitHub account first. 22 | - Misconfiguration (accidental or malicious) in GitHub of users and organisations will allow/disallow cluster access. 23 | - Granularity of permissions is limited by GitHub's permission model. 24 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR008-continuous-delivery-workflow.md: -------------------------------------------------------------------------------- 1 | # ADR008: Continuous delivery workflow 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | If a Reliability Engineer on support has to make a change to a Service Team's deployment or 10 | cluster to resolve an issue or perform a critical upgrade, they need to know 11 | how to perform a release, where to look for the code and need confidence that 12 | any changes they make have not broken the deployment. 13 | 14 | Traditionally this problem has been addressed with the use of "Team Manuals" 15 | that document release processes, locations of project repositories and who 16 | should have access. These manuals can get out of sync with processes, and 17 | processes often differ significantly between teams. 18 | 19 | 20 | ## Decision 21 | 22 | We will provide the tools and guidance for teams to practice [continuous delivery](https://en.wikipedia.org/wiki/Continuous_delivery) 23 | 24 | We expect this to improve the efficiency of supporting multiple services by: 25 | 26 | * promoting a consistent pattern for deployment across teams (merging PRs) 27 | * giving confidence to those making changes to deployments (culture of testing/staging releases) 28 | * giving confidence that the desired state of deployment is what is commited to git 29 | 30 | ## Consequences 31 | 32 | * We will likely have to run and maintain CI/CD tools 33 | * CI/CD tooling is an area where people have quite strong opinions 34 | * Some teams may have complicated "gated" pipelines where human approval is required 35 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR009-multitenant-ci-cd.md: -------------------------------------------------------------------------------- 1 | # ADR009: Multi-tenancy for CI and CD 2 | 3 | ## Status 4 | 5 | Superseded by [ADR029](ADR029-continuous-delivery-tools.md) 6 | 7 | ## Context 8 | 9 | Two models have been proposed concerning CI and CD tool sets: 10 | 11 | 1. Multi-tenant: all tenants, including Reliability Engineering, share that same CI and CD instance 12 | 2. Per-tenant: each tenant has their own CI and CD cluster 13 | 14 | ## Decision 15 | 16 | - There will be a single CI and CD toolset used by all tenants of the new service 17 | 18 | ## Consequences 19 | 20 | - With a single CI and CD toolset used by all tenants, there will be a need for those tools to have strong RBAC to prevent cross-tenant pollution 21 | - The decision to use a single tool for all tenants could easily be adapted in the future to allow per-tenant CI and CD tool deployments should requirements change 22 | - Authentication for the CI and CD toolsets should align with [ADR007](ADR007-identity-provider.md) 23 | - It is believed that by having the CI and CD toolset sitting within a kubernetes cluster, rather than on puppetised and terraformed EC2 instances, it will allow for easier updates and upgrades to occur, and reduce the need for extra technology to handle configuration management (Puppet, Chef or Ansible) and infrastructure as code tools (Terraform) 24 | - By having a single CI and CD toolset used by all tenants, it will enable Reliability Engineering to centrally manage and upgrade the toolset, rather than having tenants own the management and maintenance responsibility 25 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR010-placement-of-ci-cd-tools.md: -------------------------------------------------------------------------------- 1 | # ADR010: Placement of CI and CD Tools 2 | 3 | ## Status 4 | 5 | Superseded by [ADR029](ADR029-continuous-delivery-tools.md) 6 | 7 | ## Context 8 | 9 | The placement of the CI and CD toolset, either within or external to the control cluster and / or tenant cluster, determines most aspects of the build and deployment toolset and influences architectural decisions. 10 | 11 | 12 | ## Decision 13 | 14 | - The CI and CD tools will run separate from the control cluster 15 | - The CI and CD tools will run within their own kubernetes cluster 16 | 17 | 18 | ## Consequences 19 | 20 | - The separation of the CI and CD tool sets from the control cluster will ensure the control cluster remains as lean and allow us to make it as secure as possible. 21 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR013-ci-cd-tools.md: -------------------------------------------------------------------------------- 1 | # ADR013: CI & CD Tool 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | We need to choose which tool or tools to use for CI and CD. Different tools suit different purposes, however some cross over exists which could allow the use of a single tool to do both CI and CD. 10 | 11 | ## Decision 12 | 13 | We will use [Concourse](https://concourse-ci.org/) for both CI and CD. 14 | 15 | Reasons: 16 | 17 | - It will allow the alpha work to progress without waiting for a decision based upon user research on which tool set is best suited for use for CI or CD 18 | - The team has experience of using concourse for CI and CD with kubernetes 19 | - A working example already exists that can be extended for use in the alpha 20 | - Concourse supports simple RBAC which should allow for multi-tenancy capability in the future 21 | - It will accelerate the development of the alpha, with the team only needing to learn a single tool rather than multiple tools 22 | 23 | 24 | ## Consequences 25 | 26 | - Research to discern whether Concourse is still an appropriate choice will occur as team becomes better familiar with the technology 27 | - User research will be possible once an alpha can be presented to users for evaluation, and a decision to change tool or begin using different tools for CI and CD will be possible 28 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR014-sealed-secrets.md: -------------------------------------------------------------------------------- 1 | # ADR014: Sealed Secrets 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | We want to provide a simple way to pass sensitive values into environments via git. 10 | 11 | Currently the only way to do this is by directly interacting with the cluster to inject secrets. 12 | 13 | ## Decision 14 | 15 | We will deploy a [SealedSecrets](https://github.com/bitnami-labs/sealed-secrets) controller that allows sealing (encrypting) Kubernetes Secrets with a public key unique to each environment, making them safe to store as part of their deployment. 16 | 17 | ## Consequences 18 | 19 | * A per-cluster key pair will require that secrets will have to be "sealed" once per cluster/environment. 20 | * A per-cluster key pair will prevent teamA from ever being able to decrypt teamB's secrets by compromising their own cluster. 21 | * A per-cluster key pair will reduce blast area of a leaked key. 22 | * A per-cluster key pair may lead to more complex deployment charts that require selecting secrets based on environment. 23 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR015-aws-iam-authentication.md: -------------------------------------------------------------------------------- 1 | # ADR015: AWS IAM Authentication (for admins) 2 | 3 | ## Status 4 | 5 | Superseded by [ADR023](ADR023-cluster-authentication.md) 6 | 7 | ## Context 8 | 9 | IAM Roles that can be assumed by authorised infrastructure engineers currently do not give access to the clusters via kubectl. We do not want to have to manage two sets of admins. 10 | 11 | ## Decision 12 | 13 | We will enable any admin-like roles within the cluster only to those who can authenticate via the [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) assuming an appropriate role within the AWS account. 14 | 15 | This should provide: 16 | 17 | * Auditing to CloudTrial of authentication attempts and more 18 | * Single place to manage roles 19 | * A way to enable MFA/policy for cluster access 20 | * A better user-experience for accessing clusters (with help from aws-vault) 21 | * Simpler distribution of cluster configs (kubeconfig not containing anything sensitive) 22 | 23 | ## Consequences 24 | 25 | * Requires installation of additional binary 26 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR017-vendor-provided-container-orchestration.md: -------------------------------------------------------------------------------- 1 | # ADR017: Vendor-provided Container Orchestration 2 | 3 | ## Status 4 | 5 | Pending 6 | 7 | ## Context 8 | 9 | Following the [rollout of AWS EKS in London](https://aws.amazon.com/about-aws/whats-new/2019/02/amazon-eks-available-in-mumbai--london--and-paris-aws-regions/) it is now an attractive alternative to the hand-rolled kubernetes installation that was created as a result of [ADR003](ADR003-container-orchestration.md). This will bring numerous benefits: 10 | * reducing the amount of infrastructure we manage in-house, by offloading it to AWS 11 | * better alignment with [Technology & Operations Strategic Principle #3 - "Use fully managed cloud services by default"](https://reliability-engineering.cloudapps.digital/documentation/strategy-and-principles/re-principles.html#3-use-fully-managed-cloud-services-by-default) 12 | 13 | As of 1.12, EKS supports what we need (e.g. Istio, kiam etc.). 14 | 15 | ## Decision 16 | 17 | We will host the GDS Supported Platform on AWS EKS. 18 | 19 | ## Consequences 20 | 21 | * We can no longer make changes to the control plane configuration 22 | * Resources like DNS records and load balancers now have to be managed from within the cluster 23 | * Reduce the cost of hosting the control plane 24 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR018-local-development.md: -------------------------------------------------------------------------------- 1 | # ADR018: GSP Local Development Environment 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | Teams using the GDS Supported platform require the ability to develop, test applications and prove conformance with the GDS Supported platform on local hardware. Teams need to learn how to use the GSP and to understand how applications are containerised, packaged and deployed to a cluster using the standard CICD tools provided by GSP. 10 | 11 | ## Decision 12 | 13 | We will [provide a way to run a full GSP compatible stack locally on a developer machine](/docs/gds-supported-platform/getting-started-gsp-local.md) without the cloud provider specific configuration. 14 | 15 | ## Consequences 16 | 17 | - Lack of local machine resources (RAM and CPU) may be an issue due to deploying the full GSP stack 18 | - Docker performance may slow down development and will require some effort to optimise 19 | - The current environment lacks higher level tooling to streamline the workflow of the developer 20 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR020-metrics.md: -------------------------------------------------------------------------------- 1 | # ADR020: Metrics 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | The teams looking after a cluster need visibility of key metrics in order that they can ensure reliability and diagnose issues. 10 | 11 | [Prometheus](https://prometheus.io) and [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) are open-source systems monitoring and alerting tools and a graduated from the [Cloud Native Computing Foundation][CNCF]. 12 | 13 | A [kubernetes operator is available for Prometheus](https://github.com/coreos/prometheus-operator) that provides tight integration with the kubernetes API and minimal configuration required from service teams. 14 | 15 | Reliability Engineering has standardised on [Prometheus](https://prometheus.io) to enable platform observability. 16 | 17 | [CNCF]: https://www.cncf.io/announcement/2018/08/09/prometheus-graduates/ 18 | 19 | ## Decision 20 | 21 | We will use [Prometheus](https://prometheus.io/) and [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) managed by the [Prometheus Operator](https://github.com/coreos/prometheus-operator) for metrics in line with the standard [Reliability Engineering approach to Metrics and Alerting](https://reliability-engineering.cloudapps.digital/monitoring-alerts.html). 22 | 23 | ## Consequences 24 | 25 | - Prometheus can use Alert Manager to generate alerts to notify engineers. 26 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR021-alerting.md: -------------------------------------------------------------------------------- 1 | # ADR021: Alerting 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | The teams need timely notifications based on key indicators in order that they can ensure reliability and respond to issues. 10 | 11 | The prometheus operator included in the GSP cluster can provide Alertmanager however we would like to manage alert routing across GDS and not duplicate routing rules or manage multiple sets of alert targets. 12 | 13 | ## Decision 14 | 15 | We will route alerts to a separately hosted shared [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/) to handle platform alert routing 16 | 17 | ## Consequences 18 | 19 | - Using an external alertmanager will require additional configuration in each cluster 20 | - We will be unable to take advantage of the [automated configuration](https://coreos.com/operators/prometheus/docs/latest/user-guides/alerting.html) and custom resources from the prometheus operator 21 | - We will be able to use Pagerduty 22 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR023-cluster-authentication.md: -------------------------------------------------------------------------------- 1 | # ADR023: Cluster Authentication 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | We need to provide a secure way for users to authenticate to interact with cluster resources. 10 | 11 | There are four different roles identified based on need: 12 | 13 | 14 | | Cluster Role | Need | 15 | |---|---| 16 | | deployer | ability to make changes to cluster and full access to AWS resources (for CI) | 17 | | admin | ability to make changes to cluster resources, and restricted access to AWS resources | 18 | | sre | read only access to all cluster resources | 19 | | dev | read only access to resources potentially scoped to a namespace | 20 | 21 | ## Decision 22 | 23 | We will authenticate all users to IAM roles via the [aws-iam-authenticator](https://github.com/kubernetes-sigs/aws-iam-authenticator) and map those IAM roles to [ClusterRoles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/) within the GSP cluster. 24 | 25 | We will store the mapping of IAM user ARN to Cluster Role in Github so that it can be verified. [gds-trusted-developers](https://github.com/alphagov/gds-trusted-developers) 26 | 27 | ## Consequences 28 | 29 | * Requires all users to have an assumable IAM user 30 | * Requires all users to install the aws-iam-authenticator binary to use kubectl 31 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR024-soft-multitenancy.md: -------------------------------------------------------------------------------- 1 | # ADR024: Soft Multi-tenancy 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | One Programme has many Service Teams. 10 | 11 | One Service Team has many Environments. 12 | 13 | Some Service Teams have separate AWS accounts for separate environments. (i.e. Staging, Production) 14 | 15 | Many Service Teams have micro-service architectures that run on many machines. 16 | 17 | Some Service Teams have unique programme specific network isolation requirements that may be hard to implement in a shared environment. 18 | 19 | Separate programme level accounts would enable separation of billing. 20 | 21 | Sharing the infrastructure within a programme will lower hosting costs. 22 | 23 | To ensure network/compute isolation between Service Teams it may be necessary to isolate resources. 24 | 25 | 26 | ## Decision 27 | 28 | We will design for a "soft multi-tenancy" model where each programme shares a single GSP cluster with service teams within that programme. 29 | 30 | This will: 31 | 32 | * Maintain clear separation of billing at the programme level by isolating cluster to programme's own AWS account 33 | * Maintain clear separation of programme specific policies and risk assessments by not forcing all users to adhere to the strictest rules? 34 | * Minimize costs by sharing infrastructure, control plane and tooling between teams/environments 35 | * Minimize support burden by reducing the amount of configuration 36 | 37 | ## Consequences 38 | 39 | * Less efficient than one big cluster 40 | * Less isolated than millions of clusters 41 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR025-ingress.md: -------------------------------------------------------------------------------- 1 | # ADR025: Ingress 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | We currently have two [ingress][Ingress] systems: 10 | 11 | * Istio (see [ADR019]) 12 | * nginx-ingress (see the old Ingress [ADR005]) 13 | 14 | Istio's [Virtual Service] records are essentially advanced `Ingress` records. 15 | 16 | Do we need both? 17 | 18 | ## Decision 19 | 20 | No. We will use an [Istio Ingress Gateway](https://istio.io/docs/tasks/traffic-management/ingress/ingress-control/) 21 | 22 | ## Consequences 23 | 24 | * Less to manage 25 | * [Ingress] is one of the standard kubernetes types, as such people will expect it to work 26 | 27 | [ADR005]: ADR05-ingress.md 28 | [ADR019]: ADR019-service-mesh.md 29 | [Ingress]: https://kubernetes.io/docs/concepts/services-networking/ingress/ 30 | [Virtual Service]: (https://istio.io/docs/reference/config/networking/v1alpha3/virtual-service/ 31 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR033-nlb-for-mtls.md: -------------------------------------------------------------------------------- 1 | # ADR033: NLB for mTLS 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | Verify's [doc-checking service](https://github.com/alphagov/doc-checking) is 10 | secured in part using mTLS. Currently, our clusters are fronted by ALBs which 11 | cannot provide mTLS. 12 | 13 | The doc-checking service currently runs an nginx that provides the mTLS 14 | functionality. In order for GSP to be able to allow something within the 15 | cluster to perform mTLS we must run a load balancer that forwards unaltered TCP 16 | packets in addition to, or instead of an ALB. 17 | 18 | ## Decision 19 | 20 | We will optionally create and run an NLB in addition to the current ALB for 21 | clusters that have a requirement to terminate their own TLS. This NLB will be 22 | available at `nlb.$CLUSTER_DOMAIN`. 23 | 24 | ## Consequences 25 | 26 | In some of our clusters we will be running two public load balancers. This may 27 | be confusing or unexpected. 28 | 29 | The certificates in use for mTLS will be managed outside of ACM. This means any 30 | certificates will have to manually rotated unless we decide to start running 31 | `cert-manager` again. 32 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR035-aurora-postgres.md: -------------------------------------------------------------------------------- 1 | # ADR035: RDS Aurora Postgres 2 | 3 | ## Status 4 | 5 | Accepted 6 | 7 | ## Context 8 | 9 | Our service operator will be responsible for providing Postgres along any other 10 | services to our end users. 11 | 12 | In this case, the ask is specifically Postgres, meaning we can pick our 13 | solution for providing the instance as long as it exposes the correct APIs. 14 | 15 | A reasonable set of candidates are: 16 | 17 | - AWS RDS Postgres 18 | - AWS RDS Aurora Postgres 19 | 20 | ### AWS RDS Postgres 21 | 22 | A solution most comonly used across the board for databases. Is managed and 23 | maintained by AWS, provides Backups and Snapshots. 24 | 25 | ### AWS RDS Aurora Postgres 26 | 27 | Has the benefits of AWS RDS Postgres, and some more benefits. Such as: 28 | 29 | - Scalable persistance storage 30 | - IAM authentication 31 | - Automatic Failover with multi AZs 32 | - Faster read loads 33 | - Slightly cheaper 34 | 35 | Also has few downsides: 36 | 37 | - Not recommended for heavy write systems (Twitter big) 38 | - Slightly behind version wise 39 | 40 | ## Decision 41 | 42 | We will continue with Aurora as we we don't have any specific requirements not 43 | to and can benefit from the solution. 44 | 45 | ## Consequences 46 | 47 | We may find ourselves in need of adding AWS RDS Postgres anyway in a future. 48 | -------------------------------------------------------------------------------- /docs/architecture/adr/ADR038-sre-permissions-istio.md: -------------------------------------------------------------------------------- 1 | # ADR038: SRE Permissions for Istio 2 | 3 | ## Status 4 | 5 | Superseded by [ADR043](ADR043-k8s-resource-access.md) 6 | 7 | ## Context 8 | 9 | During [ADR032](ADR032-sre-permissions.md), only core Kubernetes resources were considered. (The context in that ADR is relevant here and should be read first.) 10 | It was realised after this that other things, such as Istio networking rules, also needed to be deleted sometimes, but could not be. 11 | These resources should all be ultimately sourced from Git and, if removed in error, should be replaced automatically the next time the deployment pipeline is run. 12 | 13 | ## Decision 14 | 15 | We will add to the SRE permissions map the ability to delete the following Istio networking resources so an escalation to cluster admin is no longer necessary: 16 | 17 | * VirtualService 18 | * Gateway 19 | * ServiceEntry 20 | * DestinationRule 21 | * EnvoyFilter 22 | 23 | ## Consequences 24 | 25 | Deleting one of the above resources may result in downtime, depending on context, and will self-correct when the deployment pipeline for the application is run again. 26 | -------------------------------------------------------------------------------- /docs/architecture/gsp-architecture-local.md: -------------------------------------------------------------------------------- 1 | # GDS Supported Platform Local Environment 2 | -------------------------------------------------------------------------------- /docs/architecture/gsp-architecture.md: -------------------------------------------------------------------------------- 1 | # GDS Supported Platform Architecture 2 | 3 | The architecture of the GDS Supported Platform 4 | 5 | 1. [Overview](gsp-architecture-overview.md) 6 | 1. [GSP Cloud Infrastructure](gsp-architecture-cloud-infrastructure.md) 7 | 1. [Continuous Deployment](gsp-architecture-continuous-deployment.md) 8 | 1. [Architecture Decision Records](adr) 9 | -------------------------------------------------------------------------------- /docs/assets/gsp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/docs/assets/gsp.png -------------------------------------------------------------------------------- /docs/assets/paas-spectrum.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/docs/assets/paas-spectrum.png -------------------------------------------------------------------------------- /docs/gds-supported-platform/accessing-concourse.md: -------------------------------------------------------------------------------- 1 | # Accessing Concourse 2 | 3 | You use [Concourse](https://concourse-ci.org) to build, test and deploy apps on the GDS Supported Platform (GSP). You can access Concourse through either the [`fly` CLI](https://concourse-ci.org/fly.html) or through a browser. 4 | 5 | ## Accessing Concourse using `fly` 6 | 7 | To access Concourse using [`fly`](https://concourse-ci.org/fly.html), your version of `fly` must match your version of Concourse. 8 | 9 | Visit your Concourse to check your `fly` version. Use the links in the bottom right of your Concourse page to upgrade your `fly` if necessary. You can find an example Concourse at http://ci.london.verify.govsvc.uk/. 10 | 11 | Refer to the [`fly` documentation](https://concourse-ci.org/fly.html) for more information on accessing Concourse using `fly`. 12 | -------------------------------------------------------------------------------- /docs/gds-supported-platform/accessing-dashboard.md: -------------------------------------------------------------------------------- 1 | # Accessing the Kubernetes Dashboard 2 | 3 | ## Using the GDS CLI 4 | 5 | 1. [Install the GDS CLI](https://github.com/alphagov/gds-cli/#installation) 6 | 1. `gds sandbox dashboard` 7 | 1. Copy and paste the provided token into the dashboard login form 8 | 9 | ## Without using the GDS CLI 10 | 11 | 1. `aws-vault exec sandbox -- kubectl port-forward --namespace kube-system svc/kubernetes-dashboard 8443:443` 12 | 1. `open https://127.0.0.1:8443` 13 | 1. `aws-vault exec sandbox -- aws eks get-token --cluster-name sandbox | jq -r .status.token` 14 | 1. Copy and paste the provided token into the dashboard login form 15 | -------------------------------------------------------------------------------- /docs/gds-supported-platform/grafana.md: -------------------------------------------------------------------------------- 1 | # Grafana inside GSP 2 | 3 | ## Where to find Grafana 4 | 5 | Browse to https://grafana.london.{name of cluster}.govsvc.uk/ 6 | 7 | ## Login via Google 8 | 9 | When your cluster is created you should provide the deployer pipeline the Google OAuth client ID and secret in `google-oauth-client-id` and `google-oauth-client-secret`. 10 | 11 | ## Getting the default admin password of a new GSP Grafana instance 12 | 13 | Using gds-cli, as an admin in the kubernetes cluster: 14 | gds {name of cluster} kubectl get -n gsp-system secret gsp-grafana -o json | jq -r '.data["admin-password"]' | base64 -D - 15 | 16 | -------------------------------------------------------------------------------- /docs/gds-supported-platform/internal-images-require-digests.md: -------------------------------------------------------------------------------- 1 | # Internal images 2 | 3 | We enforce using an immutable digest for the image in order to avoid the situation where a Docker image can be replaced without being built by Concourse (for example, by pushing to the registry with some mutable tag which is referenced by a Helm chart). 4 | 5 | This means you must reference images from the registry like this: 6 | 7 | ``` 8 | 012345678900.dkr.ecr.eu-west-2.amazonaws.com/image@sha256:01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b 9 | ``` 10 | 11 | Referencing images in the registry will fail if a SHA256 is not used: 12 | 13 | ``` 14 | # This will fail 15 | 012345678900.dkr.ecr.eu-west-2.amazonaws.com/image:latest 16 | ``` 17 | -------------------------------------------------------------------------------- /docs/gds-supported-platform/recover-gatekeeper-outage.md: -------------------------------------------------------------------------------- 1 | # Recover from a gatekeeper failure 2 | 3 | ## Symptoms 4 | 5 | Several pods in the system will be stuck in a "Terminating" state, the 6 | gatekeeper among them. 7 | 8 | ## Causes 9 | 10 | The node that the gatekeeper pod was running on was terminated in a way that 11 | meant the control plane could not reschedule the pod elsewhere. All pods created 12 | after the gatekeeper comes online will have the gatekeeper finalizer added and 13 | this finalizer can not run once the gatekeeper pod is down in this way. This 14 | prevents other pods from terminating properly. 15 | 16 | ## Corrective actions 17 | 18 | 1. Elevate to admin in the affected cluster 19 | 1. Execute the following command: 20 | ``` 21 | kubectl -n gsp-system patch pod gatekeeper-controller-manager-0 -p '{"metadata":{"finalizers": []}}' 22 | ``` 23 | 24 | That should enable the gatekeeper pod to terminate and reschedule. 25 | -------------------------------------------------------------------------------- /docs/gds-supported-platform/sealing-secrets.md: -------------------------------------------------------------------------------- 1 | # Sealing secrets 2 | 3 | Create a regular Kubernetes `Secret`: 4 | 5 | ``` 6 | # lol.yaml 7 | apiVersion: v1 8 | kind: Secret 9 | metadata: 10 | name: lol 11 | namespace: your-namespace 12 | type: Opaque 13 | data: 14 | # base64 encode the values 15 | lol: bG9sCg== 16 | ``` 17 | 18 | Convert the `Secret` to a `SealedSecret`: 19 | 20 | ``` 21 | gds sandbox seal < lol.yaml > lol-sealed-secret.yaml --format yaml 22 | ``` 23 | 24 | Commit the `SealedSecret`. 25 | -------------------------------------------------------------------------------- /hack/hsm/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/hack/hsm/.gitkeep -------------------------------------------------------------------------------- /hack/set-deployer-pipeline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu -o pipefail 4 | 5 | : "${CLUSTER_CONFIG:?}" 6 | 7 | FLY_BIN=${FLY_BIN:-fly} 8 | CLUSTER_NAME=$(yq -r '.["cluster-name"]' < ${CLUSTER_CONFIG}) 9 | PIPELINE_NAME=$(yq -r '.["concourse-pipeline-name"]' < ${CLUSTER_CONFIG}) 10 | 11 | echo "generating approvers for ${CLUSTER_NAME}..." 12 | 13 | 14 | $FLY_BIN -t cd-gsp sync 15 | 16 | $FLY_BIN -t cd-gsp set-pipeline -p "${PIPELINE_NAME}" \ 17 | --config "pipelines/deployer/deployer.yaml" \ 18 | --load-vars-from "pipelines/deployer/deployer.defaults.yaml" \ 19 | --load-vars-from "${CLUSTER_CONFIG}" \ 20 | --yaml-var 'config-approvers=[alphagov]' \ 21 | --yaml-var 'config-approval-count=0' \ 22 | --check-creds "$@" 23 | 24 | $FLY_BIN -t cd-gsp expose-pipeline -p "${PIPELINE_NAME}" 25 | 26 | -------------------------------------------------------------------------------- /hack/set-release-pipeline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu -o pipefail 4 | 5 | : "${USER_CONFIGS:?}" 6 | 7 | FLY_BIN=${FLY_BIN:-fly} 8 | PIPELINE_NAME="release" 9 | 10 | CURRENT_BRANCH="$(git rev-parse --abbrev-ref HEAD)" 11 | if [ "${CURRENT_BRANCH}" != "master" ] 12 | then 13 | echo "${CURRENT_BRANCH} is not master!" 14 | exit 1 15 | fi 16 | 17 | echo "generating initial list of trusted developers for releases..." 18 | 19 | approvers="/tmp/gsp-release-approvers.yaml" 20 | echo -n "config-approvers: " > "${approvers}" 21 | yq . ${USER_CONFIGS}/*.yaml \ 22 | | jq -c -s "[.[].github] | unique | sort" \ 23 | >> "${approvers}" 24 | 25 | $FLY_BIN -t cd-gsp sync 26 | 27 | $FLY_BIN -t cd-gsp set-pipeline -p "${PIPELINE_NAME}" \ 28 | --config "pipelines/release/release.yaml" \ 29 | --load-vars-from "${approvers}" \ 30 | --var "pipeline-name=${PIPELINE_NAME}" \ 31 | --var "branch=${CURRENT_BRANCH}" \ 32 | --var "github-release-tag-prefix=gsp-" \ 33 | --check-creds "$@" 34 | 35 | $FLY_BIN -t cd-gsp expose-pipeline -p "${PIPELINE_NAME}" 36 | -------------------------------------------------------------------------------- /hack/validate-deployer-pipeline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu -o pipefail 4 | 5 | FLY_BIN=${FLY_BIN:-fly} 6 | CLUSTER_NAME="validation" 7 | PIPELINE_NAME="validation-deployer" 8 | 9 | $FLY_BIN validate-pipeline \ 10 | --config "pipelines/deployer/deployer.yaml" \ 11 | --load-vars-from "pipelines/deployer/deployer.defaults.yaml" \ 12 | --yaml-var 'config-approvers=[alphagov]' \ 13 | --yaml-var 'config-approval-count=0' \ 14 | "$@" 15 | 16 | 17 | -------------------------------------------------------------------------------- /hack/validate-release-pipeline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu -o pipefail 4 | 5 | FLY_BIN=${FLY_BIN:-fly} 6 | PIPELINE_NAME="release" 7 | 8 | $FLY_BIN validate-pipeline \ 9 | --config "pipelines/release/release.yaml" \ 10 | --yaml-var 'config-approvers=[alphagov]' \ 11 | --yaml-var 'config-approval-count=0' \ 12 | --var "pipeline-name=${PIPELINE_NAME}" \ 13 | --var "branch=validating" \ 14 | --var "github-release-tag-prefix=validating-" \ 15 | "$@" 16 | -------------------------------------------------------------------------------- /modules/gsp-cluster/cluster-autoscaler.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "cluster_autoscaler_policy" { 2 | statement { 3 | effect = "Allow" 4 | 5 | actions = [ 6 | "autoscaling:DescribeAutoScalingGroups", 7 | "autoscaling:DescribeAutoScalingInstances", 8 | "autoscaling:DescribeLaunchConfigurations", 9 | "autoscaling:DescribeTags", 10 | "ec2:DescribeLaunchTemplateVersions", 11 | ] 12 | 13 | resources = ["*"] 14 | } 15 | 16 | statement { 17 | effect = "Allow" 18 | 19 | actions = [ 20 | "autoscaling:SetDesiredCapacity", 21 | "autoscaling:TerminateInstanceInAutoScalingGroup", 22 | ] 23 | 24 | condition { 25 | test = "Null" 26 | variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${var.cluster_name}" 27 | values = ["false"] 28 | } 29 | 30 | resources = ["*"] 31 | } 32 | } 33 | 34 | resource "aws_iam_policy" "cluster-autoscaler" { 35 | name = "${var.cluster_name}-cluster-autoscaler" 36 | description = "Policy for the cluster autoscaler" 37 | policy = data.aws_iam_policy_document.cluster_autoscaler_policy.json 38 | } 39 | 40 | resource "aws_iam_policy_attachment" "cluster-autoscaler-mgmt" { 41 | name = "${var.cluster_name}-cluster-autoscaler-mgmt" 42 | roles = [module.k8s-cluster.kiam-server-node-instance-role-name] 43 | policy_arn = aws_iam_policy.cluster-autoscaler.arn 44 | } 45 | 46 | -------------------------------------------------------------------------------- /modules/gsp-cluster/external-dns.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | external_dns = { 3 | "externalDns" = var.managed_namespaces_zones, 4 | } 5 | } 6 | -------------------------------------------------------------------------------- /modules/gsp-cluster/grafana.tf: -------------------------------------------------------------------------------- 1 | resource "random_password" "grafana_default_admin_password" { 2 | length = 40 3 | special = false 4 | } 5 | 6 | -------------------------------------------------------------------------------- /modules/gsp-cluster/main.tf: -------------------------------------------------------------------------------- 1 | module "k8s-cluster" { 2 | source = "../k8s-cluster" 3 | vpc_id = var.vpc_id 4 | 5 | private_subnet_ids = var.private_subnet_ids 6 | public_subnet_ids = var.public_subnet_ids 7 | cluster_name = var.cluster_name 8 | 9 | minimum_workers_per_az_count = var.minimum_workers_per_az_count 10 | desired_workers_per_az_map = var.desired_workers_per_az_map 11 | maximum_workers_per_az_count = var.maximum_workers_per_az_count 12 | worker_on_demand_base_capacity = var.worker_on_demand_base_capacity 13 | worker_on_demand_percentage_above_base = var.worker_on_demand_percentage_above_base 14 | 15 | eks_version = var.eks_version 16 | worker_eks_version = var.worker_eks_version 17 | apiserver_allowed_cidrs = concat( 18 | formatlist("%s/32", var.egress_ips), 19 | var.gds_external_cidrs, 20 | ) 21 | worker_generation_timestamp = var.worker_generation_timestamp 22 | } 23 | 24 | -------------------------------------------------------------------------------- /modules/gsp-cluster/nlb.tf: -------------------------------------------------------------------------------- 1 | resource "aws_lb" "ingress-nlb" { 2 | count = var.enable_nlb == "1" ? 1 : 0 3 | 4 | name = "${var.cluster_name}-ingress-nlb" 5 | load_balancer_type = "network" 6 | 7 | subnet_mapping { 8 | subnet_id = var.public_subnet_ids[0] 9 | } 10 | 11 | subnet_mapping { 12 | subnet_id = var.public_subnet_ids[1] 13 | } 14 | 15 | subnet_mapping { 16 | subnet_id = var.public_subnet_ids[2] 17 | } 18 | 19 | tags = { 20 | "Name" = "${var.cluster_name}-ingress" 21 | } 22 | } 23 | 24 | resource "aws_lb_listener" "ingress-nlb" { 25 | count = var.enable_nlb == "1" ? 1 : 0 26 | 27 | load_balancer_arn = "${aws_lb.ingress-nlb[0].arn}" 28 | protocol = "TCP" 29 | port = "443" 30 | 31 | default_action { 32 | type = "forward" 33 | target_group_arn = module.k8s-cluster.worker_tcp_target_group_arn 34 | } 35 | } 36 | 37 | resource "aws_route53_record" "ingress-nlb" { 38 | count = var.enable_nlb == "1" ? 1 : 0 39 | 40 | zone_id = var.cluster_domain_id 41 | name = "nlb.${var.cluster_domain}." 42 | type = "A" 43 | 44 | alias { 45 | name = aws_lb.ingress-nlb[0].dns_name 46 | zone_id = aws_lb.ingress-nlb[0].zone_id 47 | evaluate_target_health = true 48 | } 49 | } 50 | 51 | -------------------------------------------------------------------------------- /modules/gsp-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { 2 | value = module.k8s-cluster.kubeconfig 3 | } 4 | 5 | output "worker_security_group_id" { 6 | value = module.k8s-cluster.worker_security_group_id 7 | } 8 | 9 | output "oidc_provider_url" { 10 | value = module.k8s-cluster.oidc_provider_url 11 | } 12 | 13 | output "oidc_provider_arn" { 14 | value = module.k8s-cluster.oidc_provider_arn 15 | } 16 | 17 | output "values" { 18 | sensitive = true 19 | value = data.template_file.values.rendered 20 | } 21 | -------------------------------------------------------------------------------- /modules/gsp-cluster/secrets-system.tf: -------------------------------------------------------------------------------- 1 | resource "tls_private_key" "sealed-secrets-key" { 2 | algorithm = "RSA" 3 | rsa_bits = 4096 4 | } 5 | 6 | resource "tls_self_signed_cert" "sealed-secrets-certificate" { 7 | key_algorithm = tls_private_key.sealed-secrets-key.algorithm 8 | private_key_pem = tls_private_key.sealed-secrets-key.private_key_pem 9 | 10 | subject { 11 | common_name = var.cluster_domain 12 | organization = "Government Digital Service" 13 | } 14 | 15 | validity_period_hours = 8760 16 | 17 | allowed_uses = [ 18 | "key_encipherment", 19 | "digital_signature", 20 | ] 21 | } 22 | 23 | -------------------------------------------------------------------------------- /modules/gsp-domain/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | } 3 | 4 | provider "aws" { 5 | alias = "apex" 6 | region = "eu-west-2" 7 | } 8 | 9 | data "aws_route53_zone" "apex" { 10 | provider = aws.apex 11 | name = var.existing_zone 12 | } 13 | 14 | resource "aws_route53_zone" "subdomain" { 15 | name = var.delegated_zone 16 | force_destroy = true 17 | } 18 | 19 | resource "aws_route53_record" "ns" { 20 | provider = aws.apex 21 | zone_id = data.aws_route53_zone.apex.zone_id 22 | name = var.delegated_zone 23 | type = "NS" 24 | ttl = "30" 25 | 26 | records = [ 27 | aws_route53_zone.subdomain.name_servers[0], 28 | aws_route53_zone.subdomain.name_servers[1], 29 | aws_route53_zone.subdomain.name_servers[2], 30 | aws_route53_zone.subdomain.name_servers[3], 31 | ] 32 | } 33 | 34 | -------------------------------------------------------------------------------- /modules/gsp-domain/outputs.tf: -------------------------------------------------------------------------------- 1 | output "zone_id" { 2 | value = aws_route53_zone.subdomain.zone_id 3 | } 4 | 5 | output "name" { 6 | value = aws_route53_zone.subdomain.name 7 | } 8 | 9 | -------------------------------------------------------------------------------- /modules/gsp-domain/variables.tf: -------------------------------------------------------------------------------- 1 | variable "existing_zone" { 2 | description = "the FQDN of the existing root zone to delegate a subdomain from" 3 | type = string 4 | } 5 | 6 | variable "delegated_zone" { 7 | description = "the FQDN of the new zone delegated from the existing_zone" 8 | type = string 9 | } 10 | 11 | -------------------------------------------------------------------------------- /modules/gsp-network/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = aws_vpc.network.id 3 | } 4 | 5 | output "private_subnet_ids" { 6 | value = [ 7 | module.subnet-0.private_subnet_id, 8 | module.subnet-1.private_subnet_id, 9 | module.subnet-2.private_subnet_id, 10 | ] 11 | } 12 | 13 | output "public_subnet_ids" { 14 | value = [ 15 | module.subnet-0.public_subnet_id, 16 | module.subnet-1.public_subnet_id, 17 | module.subnet-2.public_subnet_id, 18 | ] 19 | } 20 | 21 | output "egress_ips" { 22 | value = [ 23 | module.subnet-0.egress_ip, 24 | module.subnet-1.egress_ip, 25 | module.subnet-2.egress_ip, 26 | ] 27 | } 28 | 29 | output "ingress_ips" { 30 | value = [ 31 | module.subnet-0.ingress_ip, 32 | module.subnet-1.ingress_ip, 33 | module.subnet-2.ingress_ip, 34 | ] 35 | } 36 | 37 | output "cidr_block" { 38 | description = "CIDR IPv4 range of the VPC" 39 | value = aws_vpc.network.cidr_block 40 | } 41 | 42 | output "availability_zones" { 43 | value = [ 44 | module.subnet-0.availability_zone, 45 | module.subnet-1.availability_zone, 46 | module.subnet-2.availability_zone, 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /modules/gsp-network/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | type = string 3 | } 4 | 5 | variable "netnum" { 6 | description = "network number (0-255) for assigned 10.x.0.0/16 cidr, preferably unique per persistant cluster" 7 | default = "0" 8 | } 9 | 10 | -------------------------------------------------------------------------------- /modules/gsp-subnet/outputs.tf: -------------------------------------------------------------------------------- 1 | output "egress_ip" { 2 | value = aws_eip.egress.public_ip 3 | } 4 | 5 | output "egress_id" { 6 | value = aws_eip.egress.id 7 | } 8 | 9 | output "ingress_ip" { 10 | value = aws_eip.ingress.public_ip 11 | } 12 | 13 | output "ingress_id" { 14 | value = aws_eip.ingress.id 15 | } 16 | 17 | output "private_subnet_id" { 18 | value = aws_subnet.private.id 19 | } 20 | 21 | output "private_subnet_cidr" { 22 | value = aws_subnet.private.cidr_block 23 | } 24 | 25 | output "public_subnet_id" { 26 | value = aws_subnet.public.id 27 | } 28 | 29 | output "availability_zone" { 30 | value = var.availability_zone 31 | } 32 | -------------------------------------------------------------------------------- /modules/gsp-subnet/private.tf: -------------------------------------------------------------------------------- 1 | resource "aws_subnet" "private" { 2 | vpc_id = var.vpc_id 3 | availability_zone = var.availability_zone 4 | cidr_block = var.private_cidr_block 5 | map_public_ip_on_launch = false 6 | 7 | tags = { 8 | "Name" = "${var.cluster_name}-private-${var.availability_zone}" 9 | "kubernetes.io/cluster/${var.cluster_name}" = "shared" 10 | "kubernetes.io/role/internal-elb" = "1" 11 | } 12 | } 13 | 14 | resource "aws_route_table" "private" { 15 | vpc_id = var.vpc_id 16 | 17 | route { 18 | cidr_block = "0.0.0.0/0" 19 | nat_gateway_id = aws_nat_gateway.egress.id 20 | } 21 | 22 | tags = { 23 | "Name" = "${var.cluster_name}-private-${var.availability_zone}" 24 | } 25 | } 26 | 27 | resource "aws_route_table_association" "private" { 28 | route_table_id = aws_route_table.private.id 29 | subnet_id = aws_subnet.private.id 30 | } 31 | 32 | -------------------------------------------------------------------------------- /modules/gsp-subnet/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | type = string 3 | } 4 | 5 | variable "private_cidr_block" { 6 | description = "CIDR IPv4 range for private subnet" 7 | type = string 8 | } 9 | 10 | variable "public_cidr_block" { 11 | description = "CIDR IPv4 range for public subnet" 12 | type = string 13 | } 14 | 15 | variable "vpc_id" { 16 | description = "VPC ID" 17 | } 18 | 19 | variable "availability_zone" { 20 | description = "The availability zone for this subnet" 21 | } 22 | 23 | variable "internet_gateway_id" { 24 | description = "gateway id for public subnet" 25 | } 26 | 27 | -------------------------------------------------------------------------------- /modules/gsp-user/variables.tf: -------------------------------------------------------------------------------- 1 | variable "role_prefix" { 2 | description = "prefix string given to role" 3 | default = "user" 4 | } 5 | 6 | variable "user_name" { 7 | description = "unique name for the user" 8 | } 9 | 10 | variable "user_arn" { 11 | description = "IAM user arn that will assume this role" 12 | } 13 | 14 | variable "cluster_name" { 15 | description = "cluster name to scope this role to" 16 | } 17 | 18 | variable "source_cidrs" { 19 | description = "Source CIDRs that are allowed to perform the assume role" 20 | type = list(string) 21 | 22 | default = [ 23 | "213.86.153.212/32", 24 | "213.86.153.213/32", 25 | "213.86.153.214/32", 26 | "213.86.153.235/32", 27 | "213.86.153.236/32", 28 | "213.86.153.237/32", 29 | "85.133.67.244/32", 30 | ] 31 | } 32 | 33 | -------------------------------------------------------------------------------- /modules/k8s-cluster/data/kubeconfig: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Config 3 | clusters: 4 | - name: ${name} 5 | cluster: 6 | server: ${apiserver_endpoint} 7 | certificate-authority-data: ${ca_cert} 8 | users: 9 | - name: ${name} 10 | user: 11 | exec: 12 | apiVersion: client.authentication.k8s.io/v1alpha1 13 | command: aws-iam-authenticator 14 | args: 15 | - "token" 16 | - "-i" 17 | - "${cluster_id}" 18 | contexts: 19 | - name: ${name} 20 | context: 21 | cluster: ${name} 22 | user: ${name} 23 | current-context: ${name} 24 | -------------------------------------------------------------------------------- /modules/k8s-cluster/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { 2 | value = data.template_file.kubeconfig.rendered 3 | } 4 | 5 | output "kiam-server-node-instance-role-arn" { 6 | value = aws_cloudformation_stack.kiam-server-nodes.outputs["NodeInstanceRole"] 7 | } 8 | 9 | output "kiam-server-node-instance-role-name" { 10 | value = replace(data.aws_arn.kiam-server-nodes-role.resource, "role/", "") 11 | } 12 | 13 | output "bootstrap_role_arns" { 14 | value = [ 15 | aws_cloudformation_stack.worker-nodes.outputs["NodeInstanceRole"], 16 | aws_cloudformation_stack.kiam-server-nodes.outputs["NodeInstanceRole"], 17 | ] 18 | } 19 | 20 | output "worker_tcp_target_group_arn" { 21 | value = aws_cloudformation_stack.worker-nodes.outputs["TCPTargetGroup"] 22 | } 23 | 24 | output "eks-log-group-arn" { 25 | value = aws_cloudwatch_log_group.eks.arn 26 | } 27 | 28 | output "eks-log-group-name" { 29 | value = aws_cloudwatch_log_group.eks.name 30 | } 31 | 32 | output "worker_security_group_id" { 33 | value = aws_security_group.worker.id 34 | } 35 | 36 | output "oidc_provider_url" { 37 | value = aws_iam_openid_connect_provider.eks.url 38 | } 39 | 40 | output "oidc_provider_arn" { 41 | value = aws_iam_openid_connect_provider.eks.arn 42 | } 43 | 44 | output "aws_node_lifecycle_hook_role_arn" { 45 | value = aws_iam_role.aws-node-lifecycle-hook.arn 46 | } 47 | -------------------------------------------------------------------------------- /modules/k8s-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vpc_id" { 2 | type = string 3 | } 4 | 5 | variable "public_subnet_ids" { 6 | type = list(string) 7 | } 8 | 9 | variable "private_subnet_ids" { 10 | type = list(string) 11 | } 12 | 13 | variable "cluster_name" { 14 | type = string 15 | } 16 | 17 | variable "apiserver_allowed_cidrs" { 18 | type = list(string) 19 | } 20 | 21 | variable "eks_version" { 22 | type = string 23 | } 24 | 25 | variable "worker_eks_version" { 26 | type = string 27 | } 28 | 29 | variable "worker_generation_timestamp" { 30 | type = string 31 | default = "none" 32 | } 33 | 34 | variable "minimum_workers_per_az_count" { 35 | type = string 36 | default = "1" 37 | } 38 | 39 | variable "desired_workers_per_az_map" { 40 | type = map(number) 41 | default = {} 42 | } 43 | 44 | variable "maximum_workers_per_az_count" { 45 | type = string 46 | default = "5" 47 | } 48 | 49 | variable "worker_on_demand_base_capacity" { 50 | type = "string" 51 | default = "1" 52 | } 53 | 54 | variable "worker_on_demand_percentage_above_base" { 55 | type = "string" 56 | default = "100" 57 | } 58 | -------------------------------------------------------------------------------- /modules/lambda_splunk_forwarder/cyber-cloudwatch-fluentd-to-hec.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/alphagov/gsp/a9f80de52b39d460d4c8d00ae5fd9f7a4c9da383/modules/lambda_splunk_forwarder/cyber-cloudwatch-fluentd-to-hec.zip -------------------------------------------------------------------------------- /modules/lambda_splunk_forwarder/iam.tf: -------------------------------------------------------------------------------- 1 | resource "aws_iam_role" "lambda_log_forwarder" { 2 | count = var.enabled == "0" ? 0 : 1 3 | name = "${var.cluster_name}_${var.name}_lambda_log_forwarder" 4 | assume_role_policy = data.aws_iam_policy_document.lambda_log_forwarder_assume_role_policy.json 5 | } 6 | 7 | resource "aws_iam_policy_attachment" "lambda_log_forwarder" { 8 | count = var.enabled == "0" ? 0 : 1 9 | name = "${var.cluster_name}_${var.name}_lambda_log_forwarder_attachment" 10 | roles = ["${aws_iam_role.lambda_log_forwarder[0].name}"] 11 | policy_arn = "${aws_iam_policy.lambda_log_forwarder[0].arn}" 12 | } 13 | 14 | resource "aws_iam_policy" "lambda_log_forwarder" { 15 | count = var.enabled == "0" ? 0 : 1 16 | name = "${var.cluster_name}_${var.name}_lambda_log_forwarder" 17 | description = "Policy for Lambda log forwarding function" 18 | policy = data.aws_iam_policy_document.lambda_log_forwarder.json 19 | } 20 | 21 | data "aws_iam_policy_document" "lambda_log_forwarder" { 22 | statement { 23 | effect = "Allow" 24 | 25 | actions = [ 26 | "logs:CreateLogGroup", 27 | "logs:CreateLogStream", 28 | "logs:PutLogEvents", 29 | ] 30 | 31 | resources = ["*"] 32 | } 33 | } 34 | 35 | data "aws_iam_policy_document" "lambda_log_forwarder_assume_role_policy" { 36 | statement { 37 | effect = "Allow" 38 | actions = ["sts:AssumeRole"] 39 | 40 | principals { 41 | type = "Service" 42 | identifiers = ["lambda.amazonaws.com"] 43 | } 44 | } 45 | } 46 | 47 | -------------------------------------------------------------------------------- /modules/lambda_splunk_forwarder/variables.tf: -------------------------------------------------------------------------------- 1 | variable "enabled" { 2 | default = 1 3 | } 4 | 5 | variable "name" { 6 | description = "A unique (within the cluster) name" 7 | type = string 8 | } 9 | 10 | variable "cloudwatch_log_group_arn" { 11 | description = "The ARN of the cloudwatch log group to ship to Splunk" 12 | type = string 13 | } 14 | 15 | variable "cloudwatch_log_group_name" { 16 | description = "The name of the cloudwatch log group to ship to Splunk" 17 | type = string 18 | } 19 | 20 | variable "cluster_name" { 21 | type = string 22 | } 23 | 24 | variable "splunk_hec_token" { 25 | description = "Splunk HTTP event collector token for authentication" 26 | type = string 27 | } 28 | 29 | variable "splunk_hec_url" { 30 | description = "Splunk HTTP event collector URL to send logs to" 31 | type = string 32 | } 33 | 34 | variable "splunk_index" { 35 | description = "Name of index to be added as metadata to logs for use in splunk" 36 | type = string 37 | } 38 | 39 | -------------------------------------------------------------------------------- /pipelines/README.md: -------------------------------------------------------------------------------- 1 | # Deployer pipeline 2 | 3 | Example for pushing a deployer pipeline to provision a sandbox cluster can be found in `./hack/set-deployer-pipeline.sh`: 4 | 5 | ``` 6 | CLUSTER_CONFIG=./pipelines/examples/clusters/sandbox.yaml ./hack/set-deployer-pipeline.sh 7 | ``` 8 | -------------------------------------------------------------------------------- /pipelines/deployer/deployer.defaults.yaml: -------------------------------------------------------------------------------- 1 | eks-version: "1.16" 2 | worker-eks-version: "1.16" 3 | 4 | config-trigger: true 5 | config-version: "master" 6 | 7 | platform-uri: "https://github.com/alphagov/gsp.git" 8 | platform-organization: "alphagov" 9 | platform-repository: "gsp" 10 | platform-trigger: true 11 | platform-pre-release: false 12 | platform-version: master 13 | platform-resource-type: github-release 14 | platform-tag-filter: ^gsp-v(\d+\.\d+\.\d+)$ 15 | 16 | users-uri: "git@github.com:alphagov/gds-trusted-developers.git" 17 | users-organization: "alphagov" 18 | users-repository: "gds-trusted-developers" 19 | users-trigger: true 20 | 21 | disable-destroy: true 22 | 23 | config-approvers: [] 24 | config-approval-count: 2 25 | config-resource-type: github 26 | 27 | minimum-workers-per-az-count: 1 28 | maximum-workers-per-az-count: 5 29 | worker-on-demand-base-capacity: 1 30 | worker-on-demand-percentage-above-base: 100 31 | 32 | task-toolbox-image: govsvc/task-toolbox 33 | task-toolbox-tag: latest 34 | 35 | github-resource-image: govsvc/concourse-github-resource 36 | github-resource-tag: latest 37 | 38 | terraform-resource-image: govsvc/terraform-resource 39 | terraform-resource-tag: latest 40 | 41 | cls-destination-enabled: false 42 | -------------------------------------------------------------------------------- /pipelines/deployer/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" { 3 | } 4 | } 5 | 6 | variable "aws_account_role_arn" { 7 | type = string 8 | } 9 | 10 | provider "aws" { 11 | region = "eu-west-2" 12 | 13 | version = "~> 2.37" 14 | 15 | assume_role { 16 | role_arn = var.aws_account_role_arn 17 | } 18 | } 19 | 20 | -------------------------------------------------------------------------------- /pipelines/examples/clusters/sandbox.yaml: -------------------------------------------------------------------------------- 1 | account-id: "011571571136" 2 | account-name: "sandbox" 3 | account-role-arn: "arn:aws:iam::011571571136:role/deployer" 4 | cluster-name: "example" 5 | cluster-number: "101" 6 | cluster-domain: "london.example.govsvc.uk" 7 | concourse-team: "gsp" 8 | concourse-username: "gsp" 9 | concourse-url: "https://cd.gds-reliability.engineering" 10 | concourse-pipeline-name: "example-deployer" 11 | splunk-enabled: "0" 12 | splunk-hec-url: "NOTAURL" 13 | k8s-splunk-hec-token: "NOTATOKEN" 14 | k8s-splunk-index: "NOTAURL" 15 | hsm-splunk-hec-token: "NOTATOKEN" 16 | hsm-splunk-index: "NOTAURL" 17 | vpc-flow-log-splunk-hec-token: "NOTATOKEN" 18 | vpc-flow-log-splunk-index: "NOTAURL" 19 | github-client-secret: "NOTASECRET" 20 | github-client-id: "NOTID" 21 | google-oauth-client-id: "NOTASECRET" 22 | google-oauth-client-secret: "NOTID" 23 | eks-version: "1.14" 24 | config-approval-count: 0 25 | config-approvers: ["chrisfarms"] 26 | disable-destroy: false 27 | worker-instance-type: t3.medium 28 | minimum-workers-per-az-count: 1 29 | config-uri: "https://github.com/alphagov/gsp.git" 30 | config-organization: "alphagov" 31 | config-repository: "gsp" 32 | config-path: "pipelines/examples" 33 | config-trigger: false 34 | users-trigger: false 35 | enable-nlb: true 36 | cls-destination-enabled: false 37 | -------------------------------------------------------------------------------- /pipelines/examples/namespaces/sandbox-canary/deployment-keys.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Secret 4 | type: Opaque 5 | metadata: 6 | name: ci-deploy-key 7 | data: 8 | private_key: "RklYTUUK" 9 | 10 | --- 11 | apiVersion: v1 12 | kind: ConfigMap 13 | metadata: 14 | name: ci-deploy-key 15 | data: 16 | public_key: "RklYTUUK" 17 | -------------------------------------------------------------------------------- /pipelines/examples/namespaces/sandbox-canary/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: sandbox-canary 6 | labels: 7 | namespace: sandbox-canary 8 | -------------------------------------------------------------------------------- /pipelines/examples/users/chris.farmiloe.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: chris.farmiloe 3 | email: chris.farmiloe@digital.cabinet-office.gov.uk 4 | ARN: arn:aws:iam::622626885786:user/chris.farmiloe@digital.cabinet-office.gov.uk 5 | roles: 6 | - sandbox-sre 7 | - sandbox-admin 8 | - samcrang-admin 9 | hardware: 10 | id: 9599175 11 | type: yubikey 12 | github: chrisfarms 13 | teams: 14 | - re-gsp 15 | - team-government-paas-people 16 | -------------------------------------------------------------------------------- /pipelines/examples/users/daniel.blair.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: daniel.blair 3 | email: daniel.blair@digital.cabinet-office.gov.uk 4 | ARN: arn:aws:iam::622626885786:user/daniel.blair@digital.cabinet-office.gov.uk 5 | roles: 6 | - verify-admin 7 | - sandbox-sre 8 | - sandbox-admin 9 | - portfolio-admin 10 | hardware: 11 | id: 9607848 12 | type: yubikey 13 | github: blairboy362 14 | teams: 15 | - re-gsp 16 | -------------------------------------------------------------------------------- /pipelines/examples/users/sam.crang.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: sam.crang 3 | email: sam.crang@digital.cabinet-office.gov.uk 4 | ARN: arn:aws:iam::622626885786:user/sam.crang@digital.cabinet-office.gov.uk 5 | roles: 6 | - sandbox-admin 7 | - samcrang-admin 8 | hardware: 9 | type: yubikey 10 | personal: true 11 | github: samcrang 12 | teams: 13 | - re-gsp 14 | -------------------------------------------------------------------------------- /pipelines/examples/users/stephen.ford.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: stephen.ford 3 | email: stephen.ford@digital.cabinet-office.gov.uk 4 | ARN: arn:aws:iam::622626885786:user/stephen.ford@digital.cabinet-office.gov.uk 5 | roles: 6 | - verify-admin 7 | - sandbox-sre 8 | - sandbox-admin 9 | - portfolio-sre 10 | hardware: 11 | id: 9599499 12 | type: yubikey 13 | github: smford 14 | teams: 15 | - re-gsp 16 | -------------------------------------------------------------------------------- /pipelines/tasks/bump-semver.yaml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | image_resource: 4 | type: docker-image 5 | source: 6 | repository: govsvc/task-toolbox 7 | tag: "1.5.0" 8 | inputs: 9 | - name: release 10 | outputs: 11 | - name: version 12 | run: 13 | path: /bin/bash 14 | args: 15 | - -eu 16 | - -c 17 | - | 18 | echo "bumping release number..." 19 | CURRENT_TAG=$(cat release/tag) 20 | sed -r 's/.*([0-9]+\.[0-9]+\.[0-9]+).*/\1/' <<< "${CURRENT_TAG}" | awk -F. '/[0-9]+\./{$NF++;print}' OFS=. | sed "s/^/${DEST_TAG_PREFIX}/" > version/tag 21 | NEW_TAG=$(cat version/tag) 22 | echo "${NEW_TAG}" > version/name 23 | cat version/name 24 | -------------------------------------------------------------------------------- /pipelines/tasks/generate-trusted-contributors.yaml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | image_resource: 3 | type: docker-image 4 | source: 5 | repository: govsvc/task-toolbox 6 | tag: "1.5.0" 7 | params: 8 | ACCOUNT_NAME: gds 9 | CLUSTER_PUBLIC_KEY: 10 | inputs: 11 | - name: users 12 | outputs: 13 | - name: trusted-contributors 14 | run: 15 | path: /bin/bash 16 | args: 17 | - -euo 18 | - pipefail 19 | - -c 20 | - | 21 | echo "preparing keyring to verify user release..." 22 | echo "${CLUSTER_PUBLIC_KEY}" > key 23 | gpg --import key 24 | gpg --verify "users/${ACCOUNT_NAME}-trusted-developers.yaml.asc" 25 | echo "generating list of pipeline approvers..." 26 | trusted_approvers="trusted-contributors/github.vars.yaml" 27 | echo -n "config-approvers: " > "${trusted_approvers}" 28 | yq '.[]' "users/${ACCOUNT_NAME}-trusted-developers.yaml" \ 29 | | jq -c -s "[.[] | .github] | unique | sort" \ 30 | >> "${trusted_approvers}" 31 | cat "${trusted_approvers}" 32 | --------------------------------------------------------------------------------